input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.00614237,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.207513,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0303174,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.144989,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.251068,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.143995,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.540052,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.138668,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.18734,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.00572761,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00525596,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.040423,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.038871,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0461506,
'Execution Unit/Register Files/Runtime Dynamic': 0.044127,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0993613,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.246254,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 1.44332,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00141613,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00141613,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00124171,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000485201,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000558386,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00463235,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0132827,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0373677,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.37691,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.117954,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.126918,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.71254,
'Instruction Fetch Unit/Runtime Dynamic': 0.300154,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0873171,
'L2/Runtime Dynamic': 0.0261277,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.17728,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.491529,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0304163,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0304164,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.32149,
'Load Store Unit/Runtime Dynamic': 0.671949,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0750014,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.150003,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0266182,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0279272,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.147787,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0193444,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.352303,
'Memory Management Unit/Runtime Dynamic': 0.0472716,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 17.2227,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0199827,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.00765439,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0750745,
'Renaming Unit/Int Front End RAT/Subthreshold | |
<filename>fsleyes/gl/shaders/arbp/parse.py
#!/usr/bin/env python
#
# parse.py - Very simple parser for ARB assembly shader programs.
#
# Author: <NAME> <<EMAIL>>
#
"""This module provides functions for use with OpenGL ``ARB_vertex_program``
and ``ARB_fragment_program`` assembly source code. It defines a simple
templating system, allowing ARB assembly programs to be written in such a way
that input parameters, vertex attributes, texture locations, and texture
coordinates and do not have to be hard coded in the source.
.. note:: This module is used by the :class:`.ARBPShader` class - if you use
the :class:`.ARBShader` class, you will not need to use this module
at all.
Instead, place holder expressions can be used in the source code. These
expressions may be parsed (using ``jinja2``) by the :func:`parseARBP`
function. Values can then be assigned to the place holders using the
:func:`fillARBP` function.
An example
----------
As an example, consider the following vertex program, for drawing a slice
from a 3D image texture::
!!ARBvp1.0
PARAM imageShape = program.local[0];
# Transform the vertex position into display coordinates
DP4 result.position.x, state.matrix.mvp.row[0], vertex.position;
DP4 result.position.y, state.matrix.mvp.row[1], vertex.position;
DP4 result.position.z, state.matrix.mvp.row[2], vertex.position;
DP4 result.position.w, state.matrix.mvp.row[3], vertex.position;
# Transform the texture coordinates (which are
# between 0 and 1) into voxel coordinates (which
# are within the image voxel dimensions).
MOV voxCoord, vertex.texcoord[0];
MUL voxCoord, voxCoord, imageShape;
# Pass the texture coordinates and
# corresponding voxel coordinates
# through to the fragment program.
MOV result.texcoord[0], vertex.texcoord[0];
MOV result.texcoord[1], voxCoord;
END
And the corresponding fragment program, which looks up the voxel value
and colours the fragment accordingly::
!!ARBfp1.0
TEMP voxValue;
# A transformation matrix (encoding a linear
# offset/scale) which transforms a voxel value
# from the image texture data range to the
# colour map texture input coordinate range.
PARAM voxValXform[4] = { program.local[0],
program.local[1],
program.local[2],
program.local[3] };
# Get the voxel value
TEX voxValue.x, fragment.texcoord[0], texture[0], 3D;
# Transform the voxel value
MAD voxValue, voxValue, voxValXform[0].x, voxValXform[3].x;
# Get the colour that corresponds to the voxel value
TEX result.color, voxValue.x, texture[1], 1D;
This program requires:
- The image shape to be specified as a program parameter at index 0.
- Image texture coordinates to be passed as coordinates on texture unit 0.
- Both the vertex and fragment programs to know which texture units the
texture and voxel coordinates are passed through on.
- The image texture to be bound to texture unit 0.
- The colour map texture to be bound to texture unit 1.
By using this module, all of these requirements can be removed by re-writing
the vertex program as follows::
!!ARBvp1.0
PARAM imageShape = {{ param_imageShape }};
TEMP voxCoord;
# Transform the vertex position into display coordinates
DP4 result.position.x, state.matrix.mvp.row[0], vertex.position;
DP4 result.position.y, state.matrix.mvp.row[1], vertex.position;
DP4 result.position.z, state.matrix.mvp.row[2], vertex.position;
DP4 result.position.w, state.matrix.mvp.row[3], vertex.position;
# Transform the texture coordinates (which are
# between 0 and 1) into voxel coordinates (which
# are within the image voxel dimensions).
MOV voxCoord, {{ attr_texCoord }};
MUL voxCoord, voxCoord, imageShape;
# Pass the texture coordinates and
# corresponding voxel coordinates
# through to the fragment program.
MOV {{ varying_texCoord }}, {{ attr_texCoord }};
MOV {{ varying_voxCoord }}, voxCoord;
END
And the fragment program::
!!ARBfp1.0
TEMP voxValue;
# A transformation matrix (encoding a linear
# offset/scale) which transforms a voxel value
# from the image texture data range to the
# colour map texture input coordinate range.
PARAM voxValXform[4] = {{ param4_voxValXform }};
# Get the voxel value
TEX voxValue.x, {{ varying_texCoord }}, {{ texture_imageTexture }}, 3D;
# Transform the voxel value
MAD voxValue, voxValue, voxValXform[0].x, voxValXform[3].x;
# Get the colour that corresponds to the voxel value
TEX result.color, voxValue.x, {{ texture_colourMapTexture }}, 1D;
The :func:`parseARBP` function parses the source code and returns information
about all declared items. The :func:`fillARBP` function can then be used to
assign explicit values to each of the items::
vertSrc = '!!ARBvp1.0 vertex shader source'
fragSrc = '!!ARBfp1.0 fragment shader source'
# Get information about all parameters,
# attributes, textures, varyings, and
# constants.
items = parse.parseARBP(vertSrc, fragSrc)
# ...
# You have to calculate positions for
# parameters, attributes and textures.
# Positions for varying items are
# automatically calculated for you.
# ...
vertParams = {'imageShape' : 0}
vertParamLens = {'imageShape' : 1}
fragParams = {'voxValXform' : 0}
fragParamLens = {'voxValXform' : 4}
textures = {'imageTexture' : 0,
'colourMapTexture' : 1}
attrs = {'texCoord' : 0}
constants = {}
# Fill in the template
vertSrc, fragSrc = parse.fillARBP(vertSrc,
fragSrc,
vertParams,
vertParamLens,
fragParams,
fragParamLens,
constants,
textures,
attrs)
# Now you can compile the source
# code and run your program!
Template expressions
--------------------
The following items may be specified as template expressions. As depicted in
the example above, an expression is specified in the following manner (with
the exception of constant values, which are described below)::
{{ tokenPrefix_itemName }}
Prefixes for each item type are as follows:
===================== =================
Item Expression prefix
===================== =================
*Parameters*: ``param``
*Vertex attributes* ``attr``
*Textures* ``texture``
*Varying attributes* ``varying``
===================== =================
Parameters
==========
*Parameters* are constant values which are passed to every instantiation of a
shader program - they are equivalent to ``uniform`` values in a GLSL program.
In a normal ARB assembly program, parameters are accessed as follows::
PARAM imageShape = program.local[0];
When using this module, you may instead access parameters in this way::
PARAM imageShape = {{ param_imageShape }};
Parameters with a length greater than 1 (e.g. matrix parameters) are
traditionally accessed in this way::
PARAM xform[4] = { program.local[0],
program.local[1],
program.local[2],
program.local[3] };
When using this module, you may access matrix parameters in this way::
PARAM xform[4] = {{ param4_xform }};
Vertex attributes
=================
*Vertex attributes* are values which are associated with every rendered
vertex. They are equivalent to ``attribute`` values in a GLSL program.
In a normal ARB assembly program, one would typically pass vertex
attributes as texture coordinates bound to a specified texture unit::
PARAM texCoord = vertex.texcoord[0];
When using this module, you may access vertex attributes as follows::
PARAM texCoord = {{ attr_texCoord }};
Textures
========
In a typical ARB assembly program, the texture unit to which each texture is
bound must be hard coded::
TEX voxelValue, texCoord, texture[0], 3D;
This can be avoided by using texture expressions::
TEX voxelValue, texCoord, {{ texture_imageTexture }}, 3D;
Varying attributes
==================
Varying attributes are attributes which are generated in the vertex program,
and passed through to the fragment program. They are equivalent to ``varying``
values in a GLSL program. In an ARB assembly program, they are typically
passed and accessed as texture coordinates::
!!ARBvp1.0
# In the vertex program, we pass varying
# attribute through as texture coordinates:
MOV result.texcoord[0], texCoord;
MOV result.texcoord[1], voxCoord;
# ...
!!ARBfp1.0
# ...
# In the fragment program, we access varying
# attrbutes as texture coordinates
TEMP texCoord;
TEMP voxCoord;
MOV texCoord, fragment.texcoord[0];
MOV voxCoord, fragment.texcoord[1];
# ...
This can be avoided by using the :func:`fillARBP` function, which will
automatically assign texture coordinate positions to each varying attribute.
The assembly code can thus be re-written as follows::
!!ARBvp1.0
# ...
MOV {{ varying_texCoord }}, texCoord;
MOV {{ varying_voxCoord }}, voxCoord;
# ...
!!ARBfp1.0
# ...
TEMP texCoord;
TEMP voxCoord;
MOV texCoord, {{ varying_texCoord }};
MOV voxCoord, {{ varying_voxCoord }};
# ...
Constants
=========
All expressions in the source which do not fit into any of the above
categories are treated as "constant" values. These can be used to specify any
values which will not change across multiple executions of the program. As a
silly example, let's say you want to apply a fixed offset to some texture
coordinates. You could do this::
!!ARBfp1.0
# ...
TEMP texCoord;
MOV texCoord, {{ varying_texCoord }};
ADD texCoord, texCoord, {{ my_fixed_offset }};
Then, when calling :func:`fillARBP` to generate the source code, add
``my_fixed_offset`` as a constant::
vertSrc = '!!ARBvp1.0 vertex shader source'
fragSrc = '!!ARBfp1.0 fragment shader source'
items = parse.parseARBP(vertSrc, fragSrc)
vertParams = {}
vertParamLens = {}
fragParams = {}
fragParamLens = {}
textures = {}
attrs = {'texCoord' : 0}
constants = {'my_fixed_offset' : '{0.1, 0.2, 0.3, 0}'}
# Fill in the template
vertSrc, fragSrc = parse.fillARBP(vertSrc,
fragSrc,
vertParams,
vertParamLens,
fragParams,
fragParamLens,
constants,
textures,
attrs)
Constant values can also be used in ``jinja2`` ``if` and ``for`` statements.
For example, to unroll a | |
one output node are not processed
h = np.zeros(len(self.output_nodes))
for i, out_node in enumerate(self.output_nodes):
h[i] = self.process_node(out_node)
return h
def nodes(self) -> List["ne.neat.genes.NodeGene"]:
"""
Returns all the genome's node genes. Order: inputs, bias, outputs and
hidden.
"""
return (self.input_nodes +
([self.bias_node] if self.bias_node is not None else []) +
self.output_nodes +
self.hidden_nodes)
def valid_out_nodes(self) -> bool:
""" Checks if all the genome's output nodes are valid.
An output node is considered to be valid if it receives, during its
processing, at least one input, i.e., the node has at least one enabled
incoming connection. Invalid output nodes simply outputs a fixed
default value and are, in many cases, undesirable.
Returns:
`True` if all the genome's output nodes have at least one enabled
incoming connection and `False` otherwise. Self-connecting
connections are not considered.
"""
for out_node in self.output_nodes:
valid = False
for in_con in out_node.in_connections:
if in_con.enabled and not in_con.self_connecting():
valid = True
break
if not valid:
return False
return True
def valid_in_nodes(self) -> bool:
""" Checks if all the genome's input nodes are valid.
An input node is considered to be valid if it has at least one enabled
connection leaving it, i.e., its activation is used as input by at least
one other node.
Returns:
`True` if all the genome's input nodes are valid and `False`
otherwise.
"""
for in_node in self.input_nodes:
valid = False
for out_con in in_node.out_connections:
if out_con.enabled:
valid = True
break
if not valid:
return False
return True
def mate(self, other: "NeatGenome") -> "NeatGenome":
""" Mates two genomes to produce a new genome (offspring).
Sexual reproduction. Follows the idea described in the original paper of
the NEAT algorithm:
"When crossing over, the genes in both genomes with the same innovation
numbers are lined up. These genes are called matching genes. (...).
Matching genes are inherited randomly, whereas disjoint genes (those
that do not match in the middle) and excess genes (those that do not
match in the end) are inherited from the more fit parent. (...) [If the
parents fitness are equal] the disjoint and excess genes are also
inherited randomly. (...) there’s a preset chance that an inherited gene
is disabled if it is disabled in either parent." - :cite:`stanley:ec02`
Args:
other (NeatGenome): The second genome. Currently,
:class:`.NeatGenome` is only compatible for mating with
instances of :class:`.NeatGenome` or of one of its subclasses.
Returns:
A new genome (the offspring born from the sexual reproduction
between the current genome and the genome passed as argument.
Raises:
IncompatibleGenomesError: If the genome passed as argument to
``other`` is incompatible with the current genome (`self`).
"""
if not issubclass(type(other), NeatGenome):
raise ne.IncompatibleGenomesError(
"Instances of `NeatGenome` are currently only compatible for "
"sexual reproduction with instances of `NeatGenome or one of "
"its subclasses!"
)
# aligning matching genes
genes = ne.neat.align_connections(self.connections, other.connections)
# new genome
new_gen = self.simple_copy()
copied_nodes = {n.id: n for n in new_gen.nodes()}
# mate (choose new genome's connections)
chosen_connections = []
for c1, c2 in zip(*genes):
if c1 is None and self.adj_fitness > other.adj_fitness:
# case 1: the gene is missing on self and self is dominant
# (higher fitness); action: ignore the gene
continue
if c2 is None and other.adj_fitness > self.adj_fitness:
# case 2: the gene is missing on other and other is dominant
# (higher fitness); action: ignore the gene
continue
# case 3: the gene is missing either on self or on other and their
# fitness are equal; action: random choice
# case 4: the gene is present both on self and on other; action:
# random choice
c = np.random.choice((c1, c2))
if c is not None:
# if the gene is disabled in either parent, it has a chance to
# also be disabled in the new genome
enabled = True
if ((c1 is not None and not c1.enabled)
or (c2 is not None and not c2.enabled)):
enabled = not ne.utils.chance(
self.config.disable_inherited_connection_chance)
chosen_connections.append((c, enabled))
# adding the hidden nodes of the connection (if needed)
for node in (c.from_node, c.to_node):
if (node.type == ne.neat.NodeGene.Type.HIDDEN
and node.id not in copied_nodes):
new_node = node.simple_copy()
new_gen.hidden_nodes.append(new_node)
copied_nodes[node.id] = new_node
# adding inherited connections
for c, enabled in chosen_connections:
src_node = copied_nodes[c.from_node.id]
dest_node = copied_nodes[c.to_node.id]
try:
new_gen.add_connection(cid=c.id,
src_node=src_node, dest_node=dest_node,
enabled=enabled, weight=c.weight)
except ConnectionExistsError:
# if this exception is raised, it means that the connection was
# already inherited from the other parent; this is possible
# because, in some cases, a connection between the same two
# nodes appears in different generations and are assigned,
# because of that, different IDs.
pass
# _debug_mating(genes, c, self, other, new_gen)
# raise ConnectionExistsError()
return new_gen
def info(self) -> str:
"""
Returns a string with the genome's nodes activations and connections.
Used mostly for debugging purposes.
"""
txt = ">> NODES ACTIVATIONS\n"
for n in self.nodes():
txt += f"[{n.id}][{str(n.type).split('.')[1][0]}] {n.activation}\n"
txt += "\n>> CONNECTIONS\n"
for c in self.connections:
txt += f"[{'ON' if c.enabled else 'OFF'}][{c.id}]" \
f"[{c.from_node.id}->{c.to_node.id}] {c.weight}\n"
return txt
def visualize(self, **kwargs) -> None:
""" Simple wrapper for the
:func:`nevopy.neat.visualization.visualize_genome` function. Please
refer to its documentation for more information.
"""
ne.neat.visualize_genome(genome=self, **kwargs)
def visualize_activations(self, **kwargs) -> Any:
""" Simple wrapper for the
:func:`nevopy.neat.visualization.visualize_activations` function. Please
refer to its documentation for more information.
"""
return ne.neat.visualize_activations(genome=self, **kwargs)
def _debug_mating(genes, c, gen1, gen2, new_gen):
""" Used to debug the "mate_genomes" function. """
alignment_info = ""
for gene1, gene2 in zip(*genes):
alignment_info += (
" " + (f"[cid={gene1.id}, src={gene1.from_node.id}, "
f"dest={gene1.to_node.id}]"
if gene1 is not None
else 11 * " " + "-" + 10 * " ") +
" | " + (f"[cid={gene2.id}, src={gene2.from_node.id}, "
f"dest={gene2.to_node.id}]"
if gene2 is not None
else 11 * " " + "-" + 11 * " ") +
"\n"
)
p1_cons = [(con.from_node.id, con.to_node.id, con.enabled)
for con in gen1.connections]
p2_cons = [(con.from_node.id, con.to_node.id, con.enabled)
for con in gen2.connections]
child_cons = [(con.from_node.id, con.to_node.id, con.enabled)
for con in new_gen.connections]
print(
"\n\n" + 50 * "#" + "\n\n"
f"Error while adding the connection {c.from_node.id, c.to_node.id} "
f"to a new child node generated by mating.\n"
f"Parent 1's connections: {p1_cons}\n"
f"Parent 2's connections: {p2_cons}\n"
f"Child's connections: {child_cons}\n"
f"Genes alignment: \n{alignment_info}\n"
)
gen1.visualize(block_thread=False)
gen2.visualize()
class ConnectionExistsError(Exception):
"""
Exception that indicates that a connection between two given nodes already
exists.
"""
pass
class ConnectionToBiasNodeError(Exception):
"""
Exception that indicates that an attempt has been made to create a
connection containing a bias node as destination.
"""
pass
class FixTopNeatGenome(NeatGenome):
""" Integration of a NEAT genome with a fixed topology genome.
This class defines a new type of NEAT genome that integrates the default
:class:`.NeatGenome with a :class:`.FixedTopologyGenome`. It can be used
with :class:`.NeatPopulation`.
When an input is received, it's first processed by the layers of the fixed
topology genome. The output is, then, processed using NEAT, which generates
the final output.
Note:
This class is useful when the inputs that will be fed to the genome have
high dimensions. Since NEAT doesn't scale well with such lengthy inputs
(like images), a fixed topology genome (that can contain, for instance,
convolutional layers) can be used to reduce the dimensionality of the
input before feeding it to NEAT's nodes.
Args:
fito_genome (FixedTopologyGenome): Instance of
:class:`.FixedTopologyGenome` to be used to pre-process the inputs.
It will also be evolved.
num_neat_inputs (int): Length of the flattened outputs of the fixed
topology genome. It's also the number of input nodes of the NEAT
genome.
num_neat_outputs (int): Number of output nodes of the NEAT genome.
config (NeatConfig): Settings of the current evolutionary session.
initial_neat_connections (bool): Whether to create connections
connecting each input node of the NEAT genome to each of its output
nodes.
"""
def __init__(self,
fito_genome: "ne.fixed_topology.FixedTopologyGenome",
num_neat_inputs: int,
num_neat_outputs: int,
config: "ne.neat.config.NeatConfig",
initial_neat_connections: bool = True) -> None:
super().__init__(num_inputs=num_neat_inputs,
num_outputs=num_neat_outputs,
config=config,
initial_connections=initial_neat_connections)
self.fito_genome = fito_genome
def distance(self, other: NeatGenome) -> float:
""" Sums, to the default distance calculated | |
<filename>nepc/nepc.py
"""This package provides the following functionality for NRL Evaluated Plasma
Chemistry (NEPC) style databases:
- create data files for building a NEPC database from common sources (e.g. LXCat)
- build a NEPC style database on a MySQL server
- establishing a connection to a local or remote database
- access cross section data via the CS class
- access pre-defined plasma chemistry models via the Model class
- curate, visualize, and use cross section data
- perform exploratory data analysis (EDA) of cross section data
- print statistics about a database (e.g. number of rows in various tables)
Examples
--------
Establish a connection to the database named `nepc` running on a
production server:
>>> cnx, cursor = nepc.connect()
Establish a connection to the database named `nepc`
running on the local machine:
>>> cnx, cursor = nepc.connect(local=True, test=True)
Access the pre-defined plasma chemistry model, `fict`, in the `nepc_test` database:
>>> fict = nepc.Model(cursor, "fict")
Print a summary of the ``fict`` model, including a stylized Pandas dataframe:
>>> fict.summary()
Additional examples of EDA using nepc are in ``tests/data/eda``. Examples of methods for
building data files for the ``nepc_test`` database, including parsing
`LXCat <https://nl.lxcat.net/data/set_type.php>`_ formatted data,
are in ``tests/data/methods``.
"""
from typing import List
import numpy as np
from pandas import DataFrame
import pandas as pd
import mysql.connector
import matplotlib.pyplot as plt
from nepc.util import config
PRODUCTION = config.production()
def connect(local=False, DBUG=False, test=False, travis=False):
"""Establish a connection to a NEPC MySQL database
Parameters
----------
local : bool, optional
Access a database on localhost; otherwise use the production
server (default False).
DBUG : bool, optional
Print debug info (default False).
test : bool, optional
If true, access the `nepc_test` database; otherwise, connect to the `nepc` database.
travis : bool, optional
If true, connect to database on TravisCI
Returns
-------
cnx : `connection.MySQLConnection <https://dev.mysql.com/doc/connectors/en/connector-python-api-mysqlconnection.html>`_
A connection to a NEPC MySQL database.
cursor : `cursor.MySQLCursor <https://dev.mysql.com/doc/connectors/en/connector-python-api-mysqlcursor.html>`_
A MySQLCursor object that can execute operations such as SQL statements. `cursor`
interacts with the NEPC server using the `cnx` connection.
"""
if local:
hostname = 'localhost'
else:
hostname = PRODUCTION
if DBUG: # pragma: no cover
print("\nUsing NEPC database on " + hostname)
if test:
database = 'nepc_test'
else:
database = 'nepc'
if travis:
config = {'user': 'root',
'host': hostname,
'database': database,
'raise_on_warnings': True}
else:
config = {'user': 'nepc',
'password': '<PASSWORD>',
'host': hostname,
'database': database,
'raise_on_warnings': True}
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
return cnx, cursor
def count_table_rows(cursor, table: str):
"""Return the number of rows in a MySQL table.
Parameters
----------
cursor : cursor.MySQLCursor
A MySQLCursor object. See return value ``cursor`` of :func:`.connect`.
table : str
Name of a table in the NEPC database at ``cursor``.
Returns
-------
: int
Number of rows in ``table``.
"""
cursor.execute("select count(*) from " + table + ";")
table_rows = cursor.fetchall()
return table_rows[0][0]
def model_cs_id_list(cursor, model_name):
"""Get a list of ``cs_id``'s for a model in a NEPC database.
Parameters
----------
cursor : cursor.MySQLCursor
A MySQLCursor object. See return value ``cursor`` of :func:`.connect`.
model_name : str
Name of a model in the NEPC MySQL database
Returns
-------
cs_id_list : list of int
cs_id's corresponding to cross sections in the model
"""
cursor.execute("SELECT cs.cs_id as cs_id " +
"FROM cs " +
"JOIN models2cs m2cs ON (cs.cs_id = m2cs.cs_id) " +
"JOIN models m ON (m2cs.model_id = m.model_id) " +
"WHERE m.name LIKE '" + model_name + "'")
cs_id_list = cursor.fetchall()
cs_id_list = [cs_id[0] for cs_id in cs_id_list]
return cs_id_list
def cs_e_sigma(cursor, cs_id):
"""Get electron energy and cross section data for a given ``cs_id`` in a NEPC database.
Parameters
----------
cursor : cursor.MySQLCursor
A MySQLCursor object. See return value ``cursor`` of :func:`.connect`.
cs_id : int
The ``cs_id`` for a cross section dataset in the NEPC database at ``cursor``.
Returns
-------
: list of float
Electron energies for the cross section dataset corresponding to ``cs_id``.
: list of float
Cross sections for the cross section dataset corresponding to ``cs_id``.
"""
cursor.execute("SELECT e, sigma FROM csdata WHERE cs_id = " +
str(cs_id))
cross_section = cursor.fetchall()
# print(cross_section)
e_energy = [i[0] for i in cross_section]
sigma = [i[1] for i in cross_section]
return e_energy, sigma
def cs_e(cursor, cs_id):
"""Get the electron energies for a cross section dataset in a NEPC database
corresponding to a given ``cs_id``.
Parameters
----------
cursor : cursor.MySQLCursor
A MySQLCursor object. See return value ``cursor`` of :func:`.connect`.
cs_id : int
The ``cs_id`` for a cross section dataset in the NEPC database at ``cursor``.
Returns
-------
: list of float
Electron energies for the cross section dataset corresponding to ``cs_id``.
"""
cursor.execute("SELECT e FROM csdata WHERE cs_id = " +
str(cs_id))
cross_section = cursor.fetchall()
# print(cross_section)
return [i[0] for i in cross_section]
def cs_sigma(cursor, cs_id):
"""Get the cross sections for a cross section dataset in a NEPC database
corresponding to a given ``cs_id``.
Parameters
----------
cursor : cursor.MySQLCursor
A MySQLCursor object. See return value ``cursor`` of :func:`.connect`.
cs_id : int
The ``cs_id`` for a cross section dataset in the NEPC database at ``cursor``.
Returns
-------
: list of float
Cross sections for the cross section dataset corresponding to ``cs_id``.
"""
cursor.execute("SELECT sigma FROM csdata WHERE cs_id = " +
str(cs_id))
sigma = cursor.fetchall()
# print(sigma)
return [i[0] for i in sigma]
def cs_metadata(cursor, cs_id):
"""Get metadata for a given ``cs_id`` in a NEPC database.
Parameters
----------
cursor : cursor.MySQLCursor
A MySQLCursor object. See return value ``cursor`` of :func:`.connect`.
cs_id : int
The ``cs_id`` for a cross section dataset in the NEPC database at ``cursor``.
Returns
-------
list
See :attr:`CS.metadata`. List items are in same order as :attr:`.CS.metadata`.
"""
cursor.execute("SELECT A.`cs_id` , "
"C.`name` , "
"A.`units_e`, A.`units_sigma`, A.`ref`, "
"D.`name`, E.`name`, "
"F.`name`, G.`name`, "
"A.`threshold`, A.`wavelength`, A.`lhs_v`, A.`rhs_v`, "
"A.`lhs_j`, A.`rhs_j`, "
"A.`background`, A.`lpu`, A.`upu`, "
"D.`long_name`, E.`long_name`, "
"F.`long_name`, G.`long_name`, "
"C.`lhs_e`, C.`rhs_e`, "
"C.`lhs_hv`, C.`rhs_hv`, "
"C.`lhs_v`, C.`rhs_v`, "
"C.`lhs_j`, C.`rhs_j` "
"FROM `cs` AS A "
"LEFT JOIN `processes` AS C "
"ON C.`id` = A.`process_id` "
"LEFT JOIN `states` AS D "
"ON D.`id` = A.`lhsA_id` "
"LEFT JOIN `states` AS E "
"ON E.`id` = A.`lhsB_id` "
"LEFT JOIN `states` AS F "
"ON F.`id` = A.`rhsA_id` "
"LEFT JOIN `states` AS G "
"ON G.`id` = A.`rhsB_id` "
"WHERE A.`cs_id` = " + str(cs_id))
return list(cursor.fetchall()[0])
class CS:
r"""A cross section data set, including metadata and cross section data,
from a NEPC MySQL database.
Parameters
----------
cursor : cursor.MySQLCursor
A MySQLCursor object. See return value ``cursor`` of :func:`.connect`.
cs_id : int
i.d. of the cross section in `cs` and `csdata` tables
Attributes
----------
metadata : dict
cs_id : int
id of the cross section in `cs` and `csdata` tables
process : str
`name` of process from `processes` table
units_e : float
units of electron energy list e in eV
units_sigma : float
units of cross section list sigma in m^2
ref : str
`ref` from `cs` table corresponding to entry in
'[nepc]/models/ref.bib'
lhsA : str
`name` of lhsA state from `states` table
lhsB : str
`name` of lhsB state from `states` table
rhsA : str
`name` of rhsA state from `states` table
rhsB : str
`name` of rhsB state from `states` table
wavelength : float
wavelength of photon involved in process in nanometers (nm)
lhs_v : int
vibrational energy level of lhsA
rhs_v : int
vibrational energy level of rhsA
lhs_j : int
rotational energy level of lhsA
rhs_j : int
rotational energy level of rhsA
background : str
background text describing origin of data and other important info
lpu : float
lower percent uncertainty
upu : float
upper percent uncertainty
lhsA_long : str
`long_name` of lhsA state from `states` table
lhsB_long : str
`long_name` of lhsB state from `states` table
rhsA_long : str
`long_name` of rhsA state from `states` table
rhsB_long : str
`long_name` of rhsB state from `states` table
e_on_lhs : int
number of electrons on lhs
e_on_rhs : int
number of electrons on rhs
hv_on_lhs : int
photon on lhs? (0 or 1)
hv_on_rhs | |
#!/usr/bin/env python
from translate.convert import html2po
from translate.convert import po2html
from translate.convert import test_convert
from translate.misc import wStringIO
class TestHTML2PO:
def html2po(self, markup, includeuntagged=False, duplicatestyle="msgctxt", keepcomments=False):
"""Helper to convert html to po without a file."""
inputfile = wStringIO.StringIO(markup)
convertor = html2po.html2po()
outputpo = convertor.convertfile(inputfile, "test", False, includeuntagged, duplicatestyle, keepcomments)
return outputpo
def po2html(self, posource, htmltemplate):
"""Helper to convert po to html without a file."""
inputfile = wStringIO.StringIO(posource)
outputfile = wStringIO.StringIO()
templatefile = wStringIO.StringIO(htmltemplate)
assert po2html.converthtml(inputfile, outputfile, templatefile)
return outputfile.getvalue()
def countunits(self, pofile, expected):
"""helper to check that we got the expected number of messages"""
actual = len(pofile.units)
if actual > 0:
if pofile.units[0].isheader():
actual = actual - 1
print pofile
assert actual == expected
def compareunit(self, pofile, unitnumber, expected):
"""helper to validate a PO message"""
if not pofile.units[0].isheader():
unitnumber = unitnumber - 1
print 'unit source: ' + pofile.units[unitnumber].source.encode('utf-8') + '|'
print 'expected: ' + expected.encode('utf-8') + '|'
assert unicode(pofile.units[unitnumber].source) == unicode(expected)
def check_single(self, markup, itemtext):
"""checks that converting this markup produces a single element with value itemtext"""
pofile = self.html2po(markup)
self.countunits(pofile, 1)
self.compareunit(pofile, 1, itemtext)
def check_null(self, markup):
"""checks that converting this markup produces no elements"""
pofile = self.html2po(markup)
self.countunits(pofile, 0)
def check_phpsnippet(self, php):
"""Given a snippet of php, put it into an HTML shell and see
if the results are as expected"""
self.check_single('<html><head></head><body><p><a href="'+php+'/site.html">Body text</a></p></body></html>', "Body text")
self.check_single('<html><head></head><body><p>More things in <a href="'+php+'/site.html">Body text</a></p></body></html>', 'More things in <a href="'+php+'/site.html">Body text</a>')
self.check_null('<html><head></head><body><p>'+php+'</p></body></html>')
def test_htmllang(self):
"""test to ensure that we no longer use the lang attribure"""
markup = '''<html lang="en"><head><title>My title</title></head><body></body></html>'''
pofile = self.html2po(markup)
self.countunits(pofile, 1)
# Check that the first item is the <title> not <head>
self.compareunit(pofile, 1, "My title")
def test_title(self):
"""test that we can extract the <title> tag"""
self.check_single("<html><head><title>My title</title></head><body></body></html>", "My title")
def test_title_with_linebreak(self):
"""Test a linebreak in the <title> tag"""
htmltext = '''<html>
<head>
<title>My
title</title>
</head>
<body>
</body>
</html>
'''
self.check_single(htmltext, "My title")
def test_meta(self):
"""Test that we can extract certain <meta> info from <head>."""
self.check_single('''<html><head><meta name="keywords" content="these are keywords"></head><body></body></html>''', "these are keywords")
def test_tag_p(self):
"""test that we can extract the <p> tag"""
self.check_single("<html><head></head><body><p>A paragraph.</p></body></html>", "A paragraph.")
markup = "<p>First line.<br>Second line.</p>"
pofile = self.html2po(markup)
self.compareunit(pofile, 1, "First line.<br>Second line.")
def test_tag_p_with_linebreak(self):
"""Test newlines within the <p> tag."""
htmltext = '''<html>
<head>
</head>
<body>
<p>
A paragraph is a section in a piece of writing, usually highlighting a
particular point or topic. It always begins on a new line and usually
with indentation, and it consists of at least one sentence.
</p>
</body>
</html>
'''
self.check_single(htmltext, "A paragraph is a section in a piece of writing, usually highlighting a particular point or topic. It always begins on a new line and usually with indentation, and it consists of at least one sentence.")
markup = "<p>First\nline.<br>Second\nline.</p>"
pofile = self.html2po(markup)
self.compareunit(pofile, 1, "First line.<br>Second line.")
def test_tag_div(self):
"""test that we can extract the <div> tag"""
self.check_single("<html><head></head><body><div>A paragraph.</div></body></html>", "A paragraph.")
markup = "<div>First line.<br>Second line.</div>"
pofile = self.html2po(markup)
self.compareunit(pofile, 1, "First line.<br>Second line.")
def test_tag_div_with_linebreaks(self):
"""Test linebreaks within a <div> tag."""
htmltext = '''<html>
<head>
</head>
<body>
<div>
A paragraph is a section in a piece of writing, usually highlighting a
particular point or topic. It always begins on a new line and usually
with indentation, and it consists of at least one sentence.
</div>
</body>
</html>
'''
self.check_single(htmltext, "A paragraph is a section in a piece of writing, usually highlighting a particular point or topic. It always begins on a new line and usually with indentation, and it consists of at least one sentence.")
markup = "<div>First\nline.<br>Second\nline.</div>"
pofile = self.html2po(markup)
self.compareunit(pofile, 1, "First line.<br>Second line.")
def test_tag_a(self):
"""test that we can extract the <a> tag"""
self.check_single('<html><head></head><body><p>A paragraph with <a href="http://translate.org.za/">hyperlink</a>.</p></body></html>', 'A paragraph with <a href="http://translate.org.za/">hyperlink</a>.')
def test_tag_a_with_linebreak(self):
"""Test that we can extract the <a> tag with newlines in it."""
htmltext = '''<html>
<head>
</head>
<body>
<p>A
paragraph
with <a
href="http://translate.org.za/">hyperlink</a>
and
newlines.</p></body></html>
'''
self.check_single(htmltext, 'A paragraph with <a href="http://translate.org.za/">hyperlink</a> and newlines.')
def test_tag_img(self):
"""Test that we can extract the alt attribute from the <img> tag."""
self.check_single('''<html><head></head><body><img src="picture.png" alt="A picture"></body></html>''', "A picture")
def test_img_empty(self):
"""Test that we can extract the alt attribute from the <img> tag."""
htmlsource = '''<html><head></head><body><img src="images/topbar.jpg" width="750" height="80"></body></html>'''
self.check_null(htmlsource)
def test_tag_table_summary(self):
"""Test that we can extract the summary attribute."""
self.check_single('''<html><head></head><body><table summary="Table summary"></table></body></html>''', "Table summary")
def test_table_simple(self):
"""Test that we can fully extract a simple table."""
markup = '''<html><head></head><body><table><tr><th>Heading One</th><th>Heading Two</th><tr><td>One</td><td>Two</td></tr></table></body></html>'''
pofile = self.html2po(markup)
self.countunits(pofile, 4)
self.compareunit(pofile, 1, "Heading One")
self.compareunit(pofile, 2, "Heading Two")
self.compareunit(pofile, 3, "One")
self.compareunit(pofile, 4, "Two")
def test_table_complex(self):
markup = '''<table summary="This is the summary"><caption>A caption</caption><thead><tr><th abbr="Head 1">Heading One</th><th>Heading Two</th></thead><tfoot><tr><td>Foot One</td><td>Foot Two</td></tr></tfoot><tbody><tr><td>One</td><td>Two</td></tr></tbody></table>'''
pofile = self.html2po(markup)
self.countunits(pofile, 9)
self.compareunit(pofile, 1, "This is the summary")
self.compareunit(pofile, 2, "A caption")
self.compareunit(pofile, 3, "Head 1")
self.compareunit(pofile, 4, "Heading One")
self.compareunit(pofile, 5, "Heading Two")
self.compareunit(pofile, 6, "Foot One")
self.compareunit(pofile, 7, "Foot Two")
self.compareunit(pofile, 8, "One")
self.compareunit(pofile, 9, "Two")
def test_table_empty(self):
"""Test that we ignore tables that are empty.
A table is deemed empty if it has no translatable content.
"""
self.check_null('''<html><head></head><body><table><tr><td><img src="bob.png"></td></tr></table></body></html>''')
self.check_null('''<html><head></head><body><table><tr><td> </td></tr></table></body></html>''')
self.check_null('''<html><head></head><body><table><tr><td><strong></strong></td></tr></table></body></html>''')
def test_address(self):
"""Test to see if the address element is extracted"""
self.check_single("<body><address>My address</address></body>", "My address")
def test_headings(self):
"""Test to see if the h* elements are extracted"""
markup = "<html><head></head><body><h1>Heading One</h1><h2>Heading Two</h2><h3>Heading Three</h3><h4>Heading Four</h4><h5>Heading Five</h5><h6>Heading Six</h6></body></html>"
pofile = self.html2po(markup)
self.countunits(pofile, 6)
self.compareunit(pofile, 1, "Heading One")
self.compareunit(pofile, 2, "Heading Two")
self.compareunit(pofile, 3, "Heading Three")
self.compareunit(pofile, 4, "Heading Four")
self.compareunit(pofile, 5, "Heading Five")
self.compareunit(pofile, 6, "Heading Six")
def test_headings_with_linebreaks(self):
"""Test to see if h* elements with newlines can be extracted"""
markup = "<html><head></head><body><h1>Heading\nOne</h1><h2>Heading\nTwo</h2><h3>Heading\nThree</h3><h4>Heading\nFour</h4><h5>Heading\nFive</h5><h6>Heading\nSix</h6></body></html>"
pofile = self.html2po(markup)
self.countunits(pofile, 6)
self.compareunit(pofile, 1, "Heading One")
self.compareunit(pofile, 2, "Heading Two")
self.compareunit(pofile, 3, "Heading Three")
self.compareunit(pofile, 4, "Heading Four")
self.compareunit(pofile, 5, "Heading Five")
self.compareunit(pofile, 6, "Heading Six")
def test_dt(self):
"""Test to see if the definition list title (dt) element is extracted"""
self.check_single("<html><head></head><body><dl><dt>Definition List Item Title</dt></dl></body></html>", "Definition List Item Title")
def test_dd(self):
"""Test to see if the definition list description (dd) element is extracted"""
self.check_single("<html><head></head><body><dl><dd>Definition List Item Description</dd></dl></body></html>", "Definition List Item Description")
def test_span(self):
"""test to check that we don't double extract a span item"""
self.check_single("<html><head></head><body><p>You are a <span>Spanish</span> sentence.</p></body></html>", "You are a <span>Spanish</span> sentence.")
def test_ul(self):
"""Test to see if the list item <li> is exracted"""
markup = "<html><head></head><body><ul><li>Unordered One</li><li>Unordered Two</li></ul><ol><li>Ordered One</li><li>Ordered Two</li></ol></body></html>"
pofile = self.html2po(markup)
self.countunits(pofile, 4)
self.compareunit(pofile, 1, "Unordered One")
self.compareunit(pofile, 2, "Unordered Two")
self.compareunit(pofile, 3, "Ordered One")
self.compareunit(pofile, 4, "Ordered Two")
def test_duplicates(self):
"""check that we use the default style of msgctxt to disambiguate duplicate messages"""
markup = "<html><head></head><body><p>Duplicate</p><p>Duplicate</p></body></html>"
pofile = self.html2po(markup)
self.countunits(pofile, 2)
# FIXME change this so that we check that the msgctxt is correctly added
self.compareunit(pofile, 1, "Duplicate")
self.compareunit(pofile, 2, "Duplicate")
def wtest_multiline_reflow(self):
"""check that we reflow multiline content to make it more readable for translators"""
self.check_single('''<td valign="middle" width="96%"><font class="headingwhite">South
Africa</font></td>''', '''<font class="headingwhite">South Africa</font>''')
def wtest_nested_tags(self):
"""check that we can extract items within nested tags"""
markup = "<div><p>Extract this</p>And this</div>"
pofile = self.html2po(markup)
self.countunits(pofile, 2)
self.compareunit(pofile, 1, "Extract this")
self.compareunit(pofile, 2, "And this")
def test_carriage_return(self):
"""Remove carriage returns from files in dos format."""
htmlsource = '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\r
<html><!-- InstanceBegin template="/Templates/masterpage.dwt" codeOutsideHTMLIsLocked="false" -->\r
<head>\r
<!-- InstanceBeginEditable name="doctitle" -->\r
<link href="fmfi.css" rel="stylesheet" type="text/css">\r
</head>\r
\r
<body>\r
<p>The rapid expansion of telecommunications infrastructure in recent\r
years has helped to bridge the digital divide to a limited extent.</p> \r
</body>\r
<!-- InstanceEnd --></html>\r
'''
self.check_single(htmlsource, 'The rapid expansion of telecommunications infrastructure in recent years has helped to bridge the digital divide to a limited extent.')
def test_encoding_latin1(self):
"""Convert HTML input in iso-8859-1 correctly to unicode."""
htmlsource = '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html><!-- InstanceBegin template="/Templates/masterpage.dwt" codeOutsideHTMLIsLocked="false" -->
<head>
<!-- InstanceBeginEditable name="doctitle" -->
<title>FMFI - South Africa - CSIR Openphone - Overview</title>
<!-- InstanceEndEditable -->
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
<meta name="keywords" content="fmfi, first mile, first inch, wireless, rural development, access devices, mobile devices, wifi, connectivity, rural connectivty, ict, low cost, cheap, digital divide, csir, idrc, community">
<!-- InstanceBeginEditable name="head" -->
<!-- InstanceEndEditable -->
<link href="../../../fmfi.css" rel="stylesheet" type="text/css">
</head>
<body>
<p>We aim to please \x96 will you aim too, please?</p>
<p>South Africa\x92s language diversity can be challenging.</p>
</body>
</html>
'''
pofile = self.html2po(htmlsource)
self.countunits(pofile, 4)
self.compareunit(pofile, 3, u'We aim to please \x96 will you aim too, please?')
self.compareunit(pofile, 4, u'South Africa\x92s language diversity can be challenging.')
def test_strip_html(self):
"""Ensure that unnecessary html is stripped from the | |
import ast
import json
import math
import re
from copy import deepcopy
from functools import partial
from types import FunctionType
from typing import Type, Tuple, Sequence, Dict, Callable, Any, Optional, List
from django.apps import apps
from django.conf.urls import url
from django.db.models import QuerySet, Model
from rest_framework.authentication import BaseAuthentication
from rest_framework.parsers import FormParser, MultiPartParser
from rest_framework.permissions import BasePermission
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from rest_framework_json_api.filters import QueryParameterValidationFilter, OrderingFilter
from rest_framework_json_api.metadata import JSONAPIMetadata
from rest_framework_json_api.parsers import JSONParser
from rest_framework_json_api.renderers import JSONRenderer
from rest_framework_json_api.views import RelationshipView, ModelViewSet
from . import json_api_spec_http_methods
from . import lookups as filter_lookups
from . import plugins
from .common import LimitedJsonApiPageNumberPagination, JsonApiSearchFilter, LOGGER
from .constructors import _construct_serializer, _construct_filter_backend
from .json_api_spec_http_methods import HTTP_GET, HTTP_POST, HTTP_PATCH, HTTP_DELETE
from .namespace import _append_to_namespace, _RESOURCE_NAME_TO_SPICE, _MODEL_TO_SERIALIZERS
from .types import CustomField, Filter, Relation, GenericRelation, ComputedFilter, RelatedResource
FILTER_REGEX = re.compile(r'filter\[(?P<field>[\w_\-]+)(?P<op>\.[\w_\-]+)?\]', re.IGNORECASE)
FILTER_MAP = {
'is_null': 'is_null',
'is_not_null': 'is_not_null',
'eq': '==',
'ne': '!=',
'gt': '>',
'lt': '<',
'gte': '>=',
'gle': '<=',
'contains': 'like',
'icontains': 'ilike',
'not_icontains': 'not_ilike',
'not_contains': 'not_like',
'in': 'in',
'not_in': 'not_in',
'any': 'any',
'not_any': 'not_any',
}
def get_dict_by_methods(view_type, allowed_http_methods):
out = {}
if view_type == 'get':
if HTTP_GET in allowed_http_methods:
out['get'] = 'retrieve'
if HTTP_PATCH in allowed_http_methods:
out['patch'] = 'update'
if HTTP_DELETE in allowed_http_methods:
out['delete'] = 'destroy'
elif view_type == 'list':
if HTTP_GET in allowed_http_methods:
out['get'] = 'list'
if HTTP_POST in allowed_http_methods:
out['post'] = 'create'
elif view_type == 'relation':
if HTTP_GET in allowed_http_methods:
out['get'] = 'retrieve_related'
return out
class JsonApiModelViewBuilder:
DEFAULT_RELATED_LIMIT = 100
def __init__(self, model: Type[Model],
primary_key_name: Optional[str] = None,
resource_name: Optional[str] = None,
api_version: Optional[str] = '',
allowed_methods=json_api_spec_http_methods.HTTP_ALL,
permission_classes: Optional[Sequence[Type[BasePermission]]] = None,
authentication_classes: Optional[Sequence[Type[BaseAuthentication]]] = None,
queryset: Optional[QuerySet] = None,
permitted_objects: Optional[Callable[[Request, QuerySet], QuerySet]] = None,
include_plugins: Optional[Sequence[str]] = None,
plugin_options: Optional[Dict[str, Any]] = None,
expose_related_views: Optional[bool] = False):
self.__validate_http_methods(allowed_methods)
self._model = model
self._fields = {}
self._filters = {}
self._computed_filters = {}
self._relations = {}
self._generic_relations = {}
self._custom_fields = {}
self._api_version = api_version.replace('.', '').replace('-', '')
self._url_api_version = f'v{api_version}'
self._primary_key_name = primary_key_name or 'id'
self._allowed_methods = [*allowed_methods]
self._resource_name = resource_name or self._model.objects.model._meta.db_table.split('_')[-1]
self._related_limit = self.DEFAULT_RELATED_LIMIT
self._permission_classes = permission_classes or []
self._authentication_classes = authentication_classes or []
self._before_create_callback = None
self._after_create_callback = None
self._after_get_callback = None
self._before_update_callback = None
self._after_update_callback = None
self._before_delete_callback = None
self._after_delete_callback = None
self._before_list_callback = None
self._after_list_callback = None
self._before_raw_response = None
self._expose_related_views = expose_related_views
self._is_admin = False
if queryset is None:
self._queryset = self._model.objects
else:
self._queryset = queryset
self._spice_queryset = permitted_objects
self._include_plugins = include_plugins or []
self._plugin_options = plugin_options or {}
@staticmethod
def __validate_http_methods(limit_to_http_methods: Sequence[str] = json_api_spec_http_methods.HTTP_ALL):
if any(map(lambda method: method not in json_api_spec_http_methods.HTTP_ALL, limit_to_http_methods)):
raise Exception(
f'Cannot limit fields to HTTP Method of types: '
f'{list(filter(lambda method: method not in json_api_spec_http_methods.HTTP_ALL, limit_to_http_methods))}')
def __warn_if_method_not_available(self, method: str):
if method not in self._allowed_methods:
LOGGER.warning(
f'You\'ve set a lifecycle callback for resource {self._resource_name}, '
f'which doesn\'t allow it\'s respective HTTP method through `allowed_methods`.')
def fields(self, fields: Sequence[str],
limit_to_on_retrieve: bool = False) -> 'JsonApiModelViewBuilder':
if limit_to_on_retrieve not in self._fields:
self._fields[limit_to_on_retrieve] = []
self._fields[limit_to_on_retrieve].extend(fields)
return self
def dummy_fields(self, fields: Sequence[str]) -> 'JsonApiModelViewBuilder':
self.fields(fields=fields)
_f = lambda instance: ''
self.custom_fields(fields=[(field, _f,) for field in fields])
return self
def add_field(self, name: str, limit_to_on_retrieve: bool = False) -> 'JsonApiModelViewBuilder':
if limit_to_on_retrieve not in self._fields:
self._fields[limit_to_on_retrieve] = []
self._fields[limit_to_on_retrieve].append(name)
return self
def add_dummy_field(self, name: str) -> 'JsonApiModelViewBuilder':
self.add_field(name=name)
self.add_custom_field(name=name, instance_callback=lambda instance: '')
return self
def add_filter(self, name: str, field: str = None, lookups: Sequence[str] = None,
transform_value: Callable[
[str, QuerySet], Tuple[str, QuerySet]] = None) -> 'JsonApiModelViewBuilder':
if lookups is None:
lookups = (filter_lookups.EXACT,)
if any(map(lambda lookup: lookup not in filter_lookups.ALL, lookups)):
raise Exception(
f'Filter lookups are invalid: '
f'{list(filter(lambda lookup: lookup not in filter_lookups.ALL, lookups))}')
self._filters[name] = Filter(field=field or name, lookups=lookups, transform_value=transform_value)
return self
def add_computed_filter(self, name: str, filter_type: Filter,
filter_func: Callable[[QuerySet, str], Any],
field: str = None) -> 'JsonApiModelViewBuilder':
self._computed_filters[name] = ComputedFilter(field=field or name, filter_func=filter_func,
filter_type=filter_type)
return self
def add_relation(self, field: str, many: bool = False, resource_name: str = None,
primary_key_name: str = None,
limit_to_on_retrieve: bool = False,
required: bool = False, api_version: Optional[str] = '') -> 'JsonApiModelViewBuilder':
if limit_to_on_retrieve not in self._relations:
self._relations[limit_to_on_retrieve] = []
self._relations[limit_to_on_retrieve].append(
Relation(field=field, resource_name=resource_name or field, many=many,
primary_key_name=primary_key_name, required=required,
api_version=api_version.replace('.', '').replace('-', '')))
return self
def rl(self, field: str, many: bool = False, resource_name: str = None,
primary_key_name: str = None,
limit_to_on_retrieve: bool = False,
required: bool = False, api_version: Optional[str] = '') -> 'JsonApiModelViewBuilder':
return self.add_relation(field=field, many=many, resource_name=resource_name, primary_key_name=primary_key_name,
limit_to_on_retrieve=limit_to_on_retrieve, required=required,
api_version=api_version.replace('.', '').replace('-', ''))
def add_generic_relation(self, field: str,
related: Sequence[RelatedResource],
many: bool = False,
limit_to_on_retrieve: bool = False,
required: bool = False) -> 'JsonApiModelViewBuilder':
if limit_to_on_retrieve not in self._generic_relations:
self._generic_relations[limit_to_on_retrieve] = []
api_fixed_related = []
for rel in related:
rel.api_version = rel.api_version.replace('.', '').replace('-', '')
api_fixed_related.append(rel)
self._generic_relations[limit_to_on_retrieve].append(
GenericRelation(field=field, related=api_fixed_related, many=many, required=required))
return self
def add_custom_field(self, name: str, instance_callback: Callable[[Any], Any] = None,
limit_to_on_retrieve: bool = False) -> 'JsonApiModelViewBuilder':
if limit_to_on_retrieve not in self._custom_fields:
self._custom_fields[limit_to_on_retrieve] = []
self._custom_fields[limit_to_on_retrieve].append(CustomField(name=name, callback=instance_callback))
return self
def custom_fields(self, fields: Sequence[Tuple[str, Callable[[Any], Any]]] = None,
limit_to_on_retrieve: bool = False) -> 'JsonApiModelViewBuilder':
if limit_to_on_retrieve not in self._custom_fields:
self._custom_fields[limit_to_on_retrieve] = []
for name, instance_callback in fields:
self._custom_fields[limit_to_on_retrieve].append(CustomField(name=name, callback=instance_callback))
return self
def set_related_limit(self, limit: int = DEFAULT_RELATED_LIMIT) -> 'JsonApiModelViewBuilder':
self._related_limit = limit
return self
def before_create(self, before_create_callback: Callable[[Any], Any] = None) -> 'JsonApiModelViewBuilder':
self._before_create_callback = before_create_callback
self.__warn_if_method_not_available(json_api_spec_http_methods.HTTP_POST)
return self
def after_create(self, after_create_callback: Callable[[Any], Any] = None) -> 'JsonApiModelViewBuilder':
self._after_create_callback = after_create_callback
self.__warn_if_method_not_available(json_api_spec_http_methods.HTTP_POST)
return self
def after_get(self, after_get_callback: Callable[[Any], Any] = None) -> 'JsonApiModelViewBuilder':
self._after_get_callback = after_get_callback
self.__warn_if_method_not_available(json_api_spec_http_methods.HTTP_GET)
return self
def before_update(self, before_update_callback: Callable[[Any], Any] = None) -> 'JsonApiModelViewBuilder':
self._before_update_callback = before_update_callback
self.__warn_if_method_not_available(json_api_spec_http_methods.HTTP_PATCH)
return self
def after_update(self, after_update_callback: Callable[[Any], Any] = None) -> 'JsonApiModelViewBuilder':
self._after_update_callback = after_update_callback
self.__warn_if_method_not_available(json_api_spec_http_methods.HTTP_PATCH)
return self
def before_delete(self, before_delete_callback: Callable[[Any], Any] = None) -> 'JsonApiModelViewBuilder':
self._before_delete_callback = before_delete_callback
self.__warn_if_method_not_available(json_api_spec_http_methods.HTTP_DELETE)
return self
def after_delete(self, after_delete_callback: Callable[[Any], Any] = None) -> 'JsonApiModelViewBuilder':
self._after_delete_callback = after_delete_callback
self.__warn_if_method_not_available(json_api_spec_http_methods.HTTP_DELETE)
return self
def before_list(self,
before_list_callback: Callable[[Request, QuerySet], QuerySet] = None) -> 'JsonApiModelViewBuilder':
self._before_list_callback = before_list_callback
self.__warn_if_method_not_available(json_api_spec_http_methods.HTTP_GET)
return self
def after_list(self, after_list_callback: Callable[[Any], Any] = None) -> 'JsonApiModelViewBuilder':
self._after_list_callback = after_list_callback
self.__warn_if_method_not_available(json_api_spec_http_methods.HTTP_GET)
return self
def before_response(self, before_raw_response: Callable[[str], str] = None) -> 'JsonApiModelViewBuilder':
self._before_raw_response = before_raw_response
return self
def _get_history_urls(self) -> Sequence[partial]:
history_builder = deepcopy(self)
if plugins.DJANGO_SIMPLE_HISTORY in self._include_plugins:
history_builder._include_plugins = []
history_builder._model = apps.get_model(self._model.objects.model._meta.db_table.split('_')[0],
f'Historical{self._model.__name__}')
history_builder._queryset = self._model.history
history_builder._resource_name = f'historical_{self._resource_name}'
history_builder._custom_fields = []
history_urls = history_builder.fields(['history_date', 'history_change_reason', 'history_id', 'history_type']) \
.add_filter(name='history_date', lookups=(
filter_lookups.EXACT, filter_lookups.IN, filter_lookups.LT, filter_lookups.LTE, filter_lookups.GT,
filter_lookups.GTE)) \
.add_filter(name='history_id', lookups=(filter_lookups.EXACT, filter_lookups.IN)) \
.add_filter(name='history_change_reason', lookups=(filter_lookups.EXACT, filter_lookups.IN)) \
.add_filter(name='history_type', lookups=(filter_lookups.EXACT, filter_lookups.IN)) \
.get_urls(urls_prefix='history/', url_resource_name=self._resource_name, ignore_swagger=True)
return history_urls
def _get_admin_urls(self, ignore_swagger: bool = False) -> Sequence[partial]:
admin_builder = deepcopy(self)
if plugins.AUTO_ADMIN_VIEWS in self._include_plugins:
admin_builder._include_plugins = []
admin_builder._spice_queryset = None
admin_permission_class = admin_builder._plugin_options.get(plugins.AUTO_ADMIN_VIEWS, {}).get(
'ADMIN_PERMISSION_CLASS')
admin_builder._is_admin = True
if admin_permission_class is not None:
admin_builder._permission_classes = [*admin_builder._permission_classes, admin_permission_class]
admin_urls = admin_builder._build(url_resource_name=self._resource_name, urls_prefix='admin/',
ignore_serializer=False, ignore_swagger=ignore_swagger)
return admin_urls
def _build(self, url_resource_name: str = '', urls_prefix: str = '', ignore_serializer: bool = False,
ignore_swagger: bool = False) -> Sequence[
partial]:
method_to_serializer = {}
if not ignore_serializer:
for limit_to_on_retrieve in [False, True]:
fields = self._fields[limit_to_on_retrieve] if limit_to_on_retrieve in self._fields else []
if limit_to_on_retrieve is True:
fields.extend(self._fields[False] if False in self._fields else [])
custom_fields = self._custom_fields[
limit_to_on_retrieve] if limit_to_on_retrieve in self._custom_fields else []
if limit_to_on_retrieve is True:
custom_fields.extend(self._custom_fields[False] if False in self._custom_fields else [])
relations = self._relations[limit_to_on_retrieve] if limit_to_on_retrieve in self._relations else []
if limit_to_on_retrieve is True:
relations.extend(self._relations[False] if False in self._relations else [])
generic_relations = self._generic_relations[
limit_to_on_retrieve] if limit_to_on_retrieve in self._generic_relations else []
if limit_to_on_retrieve is True:
generic_relations.extend(self._generic_relations[False] if False in self._generic_relations else [])
method_to_serializer[limit_to_on_retrieve] = \
_construct_serializer('Retrieve' if limit_to_on_retrieve else 'List',
self._api_version,
self._model,
self._resource_name,
fields,
custom_fields,
relations,
generic_relations,
self._related_limit,
self._primary_key_name,
self._before_update_callback if limit_to_on_retrieve else self._before_create_callback,
self._after_list_callback,
self._is_admin)
_append_to_namespace(method_to_serializer[limit_to_on_retrieve])
else:
method_to_serializer[False] = list(
filter(lambda serializer: serializer.__class__.__name__.startswith('List'),
_MODEL_TO_SERIALIZERS[self._model]))
method_to_serializer[True] = list(
filter(lambda serializer: serializer.__class__.__name__.startswith('Retrieve'),
_MODEL_TO_SERIALIZERS[self._model]))
filter_set, filter_backend = _construct_filter_backend(self._model, self._resource_name, self._filters,
self._computed_filters)
def perform_create(view, serializer):
instance = serializer.save()
if self._after_create_callback is not None:
self._after_create_callback(view.request, instance, serializer)
def perform_destroy(view, instance):
if self._before_delete_callback is not None:
self._before_delete_callback(instance, view.get_serializer())
instance.delete()
if self._after_delete_callback is not None:
self._after_delete_callback(instance, view.get_serializer())
def perform_get(view, instance, *args, **kwargs):
response = super(view.__class__, view).retrieve(instance, *args, **kwargs)
if self._after_get_callback is not None:
response.data = self._after_get_callback(response.data)
return response
def perform_update(view, serializer):
instance = | |
"rttMonApplNumCtrlAdminEntry": {},
"rttMonApplPreConfigedReset": {},
"rttMonApplPreConfigedValid": {},
"rttMonApplProbeCapacity": {},
"rttMonApplReset": {},
"rttMonApplResponder": {},
"rttMonApplSupportedProtocolsValid": {},
"rttMonApplSupportedRttTypesValid": {},
"rttMonApplTimeOfLastSet": {},
"rttMonApplVersion": {},
"rttMonControlEnableErrors": {},
"rttMonCtrlAdminFrequency": {},
"rttMonCtrlAdminGroupName": {},
"rttMonCtrlAdminLongTag": {},
"rttMonCtrlAdminNvgen": {},
"rttMonCtrlAdminOwner": {},
"rttMonCtrlAdminRttType": {},
"rttMonCtrlAdminStatus": {},
"rttMonCtrlAdminTag": {},
"rttMonCtrlAdminThreshold": {},
"rttMonCtrlAdminTimeout": {},
"rttMonCtrlAdminVerifyData": {},
"rttMonCtrlOperConnectionLostOccurred": {},
"rttMonCtrlOperDiagText": {},
"rttMonCtrlOperModificationTime": {},
"rttMonCtrlOperNumRtts": {},
"rttMonCtrlOperOctetsInUse": {},
"rttMonCtrlOperOverThresholdOccurred": {},
"rttMonCtrlOperResetTime": {},
"rttMonCtrlOperRttLife": {},
"rttMonCtrlOperState": {},
"rttMonCtrlOperTimeoutOccurred": {},
"rttMonCtrlOperVerifyErrorOccurred": {},
"rttMonEchoAdminAggBurstCycles": {},
"rttMonEchoAdminAvailNumFrames": {},
"rttMonEchoAdminCache": {},
"rttMonEchoAdminCallDuration": {},
"rttMonEchoAdminCalledNumber": {},
"rttMonEchoAdminCodecInterval": {},
"rttMonEchoAdminCodecNumPackets": {},
"rttMonEchoAdminCodecPayload": {},
"rttMonEchoAdminCodecType": {},
"rttMonEchoAdminControlEnable": {},
"rttMonEchoAdminControlRetry": {},
"rttMonEchoAdminControlTimeout": {},
"rttMonEchoAdminDetectPoint": {},
"rttMonEchoAdminDscp": {},
"rttMonEchoAdminEmulateSourceAddress": {},
"rttMonEchoAdminEmulateSourcePort": {},
"rttMonEchoAdminEmulateTargetAddress": {},
"rttMonEchoAdminEmulateTargetPort": {},
"rttMonEchoAdminEnableBurst": {},
"rttMonEchoAdminEndPointListName": {},
"rttMonEchoAdminEntry": {"77": {}, "78": {}, "79": {}},
"rttMonEchoAdminEthernetCOS": {},
"rttMonEchoAdminGKRegistration": {},
"rttMonEchoAdminHTTPVersion": {},
"rttMonEchoAdminICPIFAdvFactor": {},
"rttMonEchoAdminIgmpTreeInit": {},
"rttMonEchoAdminInputInterface": {},
"rttMonEchoAdminInterval": {},
"rttMonEchoAdminLSPExp": {},
"rttMonEchoAdminLSPFECType": {},
"rttMonEchoAdminLSPNullShim": {},
"rttMonEchoAdminLSPReplyDscp": {},
"rttMonEchoAdminLSPReplyMode": {},
"rttMonEchoAdminLSPSelector": {},
"rttMonEchoAdminLSPTTL": {},
"rttMonEchoAdminLSPVccvID": {},
"rttMonEchoAdminLSREnable": {},
"rttMonEchoAdminLossRatioNumFrames": {},
"rttMonEchoAdminMode": {},
"rttMonEchoAdminNameServer": {},
"rttMonEchoAdminNumPackets": {},
"rttMonEchoAdminOWNTPSyncTolAbs": {},
"rttMonEchoAdminOWNTPSyncTolPct": {},
"rttMonEchoAdminOWNTPSyncTolType": {},
"rttMonEchoAdminOperation": {},
"rttMonEchoAdminPktDataRequestSize": {},
"rttMonEchoAdminPktDataResponseSize": {},
"rttMonEchoAdminPrecision": {},
"rttMonEchoAdminProbePakPriority": {},
"rttMonEchoAdminProtocol": {},
"rttMonEchoAdminProxy": {},
"rttMonEchoAdminReserveDsp": {},
"rttMonEchoAdminSSM": {},
"rttMonEchoAdminSourceAddress": {},
"rttMonEchoAdminSourceMPID": {},
"rttMonEchoAdminSourceMacAddress": {},
"rttMonEchoAdminSourcePort": {},
"rttMonEchoAdminSourceVoicePort": {},
"rttMonEchoAdminString1": {},
"rttMonEchoAdminString2": {},
"rttMonEchoAdminString3": {},
"rttMonEchoAdminString4": {},
"rttMonEchoAdminString5": {},
"rttMonEchoAdminTOS": {},
"rttMonEchoAdminTargetAddress": {},
"rttMonEchoAdminTargetAddressString": {},
"rttMonEchoAdminTargetDomainName": {},
"rttMonEchoAdminTargetEVC": {},
"rttMonEchoAdminTargetMEPPort": {},
"rttMonEchoAdminTargetMPID": {},
"rttMonEchoAdminTargetMacAddress": {},
"rttMonEchoAdminTargetPort": {},
"rttMonEchoAdminTargetVLAN": {},
"rttMonEchoAdminTstampOptimization": {},
"rttMonEchoAdminURL": {},
"rttMonEchoAdminVideoTrafficProfile": {},
"rttMonEchoAdminVrfName": {},
"rttMonEchoPathAdminHopAddress": {},
"rttMonFileIOAdminAction": {},
"rttMonFileIOAdminFilePath": {},
"rttMonFileIOAdminSize": {},
"rttMonGeneratedOperCtrlAdminIndex": {},
"rttMonGrpScheduleAdminAdd": {},
"rttMonGrpScheduleAdminAgeout": {},
"rttMonGrpScheduleAdminDelete": {},
"rttMonGrpScheduleAdminFreqMax": {},
"rttMonGrpScheduleAdminFreqMin": {},
"rttMonGrpScheduleAdminFrequency": {},
"rttMonGrpScheduleAdminLife": {},
"rttMonGrpScheduleAdminPeriod": {},
"rttMonGrpScheduleAdminProbes": {},
"rttMonGrpScheduleAdminReset": {},
"rttMonGrpScheduleAdminStartDelay": {},
"rttMonGrpScheduleAdminStartTime": {},
"rttMonGrpScheduleAdminStartType": {},
"rttMonGrpScheduleAdminStatus": {},
"rttMonHTTPStatsBusies": {},
"rttMonHTTPStatsCompletions": {},
"rttMonHTTPStatsDNSQueryError": {},
"rttMonHTTPStatsDNSRTTSum": {},
"rttMonHTTPStatsDNSServerTimeout": {},
"rttMonHTTPStatsError": {},
"rttMonHTTPStatsHTTPError": {},
"rttMonHTTPStatsMessageBodyOctetsSum": {},
"rttMonHTTPStatsOverThresholds": {},
"rttMonHTTPStatsRTTMax": {},
"rttMonHTTPStatsRTTMin": {},
"rttMonHTTPStatsRTTSum": {},
"rttMonHTTPStatsRTTSum2High": {},
"rttMonHTTPStatsRTTSum2Low": {},
"rttMonHTTPStatsTCPConnectRTTSum": {},
"rttMonHTTPStatsTCPConnectTimeout": {},
"rttMonHTTPStatsTransactionRTTSum": {},
"rttMonHTTPStatsTransactionTimeout": {},
"rttMonHistoryAdminFilter": {},
"rttMonHistoryAdminNumBuckets": {},
"rttMonHistoryAdminNumLives": {},
"rttMonHistoryAdminNumSamples": {},
"rttMonHistoryCollectionAddress": {},
"rttMonHistoryCollectionApplSpecificSense": {},
"rttMonHistoryCollectionCompletionTime": {},
"rttMonHistoryCollectionSampleTime": {},
"rttMonHistoryCollectionSense": {},
"rttMonHistoryCollectionSenseDescription": {},
"rttMonIcmpJStatsOWSum2DSHighs": {},
"rttMonIcmpJStatsOWSum2DSLows": {},
"rttMonIcmpJStatsOWSum2SDHighs": {},
"rttMonIcmpJStatsOWSum2SDLows": {},
"rttMonIcmpJStatsOverThresholds": {},
"rttMonIcmpJStatsPktOutSeqBoth": {},
"rttMonIcmpJStatsPktOutSeqDSes": {},
"rttMonIcmpJStatsPktOutSeqSDs": {},
"rttMonIcmpJStatsRTTSum2Highs": {},
"rttMonIcmpJStatsRTTSum2Lows": {},
"rttMonIcmpJStatsSum2NegDSHighs": {},
"rttMonIcmpJStatsSum2NegDSLows": {},
"rttMonIcmpJStatsSum2NegSDHighs": {},
"rttMonIcmpJStatsSum2NegSDLows": {},
"rttMonIcmpJStatsSum2PosDSHighs": {},
"rttMonIcmpJStatsSum2PosDSLows": {},
"rttMonIcmpJStatsSum2PosSDHighs": {},
"rttMonIcmpJStatsSum2PosSDLows": {},
"rttMonIcmpJitterMaxSucPktLoss": {},
"rttMonIcmpJitterMinSucPktLoss": {},
"rttMonIcmpJitterStatsAvgJ": {},
"rttMonIcmpJitterStatsAvgJDS": {},
"rttMonIcmpJitterStatsAvgJSD": {},
"rttMonIcmpJitterStatsBusies": {},
"rttMonIcmpJitterStatsCompletions": {},
"rttMonIcmpJitterStatsErrors": {},
"rttMonIcmpJitterStatsIAJIn": {},
"rttMonIcmpJitterStatsIAJOut": {},
"rttMonIcmpJitterStatsMaxNegDS": {},
"rttMonIcmpJitterStatsMaxNegSD": {},
"rttMonIcmpJitterStatsMaxPosDS": {},
"rttMonIcmpJitterStatsMaxPosSD": {},
"rttMonIcmpJitterStatsMinNegDS": {},
"rttMonIcmpJitterStatsMinNegSD": {},
"rttMonIcmpJitterStatsMinPosDS": {},
"rttMonIcmpJitterStatsMinPosSD": {},
"rttMonIcmpJitterStatsNumNegDSes": {},
"rttMonIcmpJitterStatsNumNegSDs": {},
"rttMonIcmpJitterStatsNumOWs": {},
"rttMonIcmpJitterStatsNumOverThresh": {},
"rttMonIcmpJitterStatsNumPosDSes": {},
"rttMonIcmpJitterStatsNumPosSDs": {},
"rttMonIcmpJitterStatsNumRTTs": {},
"rttMonIcmpJitterStatsOWMaxDS": {},
"rttMonIcmpJitterStatsOWMaxSD": {},
"rttMonIcmpJitterStatsOWMinDS": {},
"rttMonIcmpJitterStatsOWMinSD": {},
"rttMonIcmpJitterStatsOWSumDSes": {},
"rttMonIcmpJitterStatsOWSumSDs": {},
"rttMonIcmpJitterStatsPktLateAs": {},
"rttMonIcmpJitterStatsPktLosses": {},
"rttMonIcmpJitterStatsPktSkippeds": {},
"rttMonIcmpJitterStatsRTTMax": {},
"rttMonIcmpJitterStatsRTTMin": {},
"rttMonIcmpJitterStatsRTTSums": {},
"rttMonIcmpJitterStatsSumNegDSes": {},
"rttMonIcmpJitterStatsSumNegSDs": {},
"rttMonIcmpJitterStatsSumPosDSes": {},
"rttMonIcmpJitterStatsSumPosSDs": {},
"rttMonJitterStatsAvgJitter": {},
"rttMonJitterStatsAvgJitterDS": {},
"rttMonJitterStatsAvgJitterSD": {},
"rttMonJitterStatsBusies": {},
"rttMonJitterStatsCompletions": {},
"rttMonJitterStatsError": {},
"rttMonJitterStatsIAJIn": {},
"rttMonJitterStatsIAJOut": {},
"rttMonJitterStatsMaxOfICPIF": {},
"rttMonJitterStatsMaxOfMOS": {},
"rttMonJitterStatsMaxOfNegativesDS": {},
"rttMonJitterStatsMaxOfNegativesSD": {},
"rttMonJitterStatsMaxOfPositivesDS": {},
"rttMonJitterStatsMaxOfPositivesSD": {},
"rttMonJitterStatsMinOfICPIF": {},
"rttMonJitterStatsMinOfMOS": {},
"rttMonJitterStatsMinOfNegativesDS": {},
"rttMonJitterStatsMinOfNegativesSD": {},
"rttMonJitterStatsMinOfPositivesDS": {},
"rttMonJitterStatsMinOfPositivesSD": {},
"rttMonJitterStatsNumOfNegativesDS": {},
"rttMonJitterStatsNumOfNegativesSD": {},
"rttMonJitterStatsNumOfOW": {},
"rttMonJitterStatsNumOfPositivesDS": {},
"rttMonJitterStatsNumOfPositivesSD": {},
"rttMonJitterStatsNumOfRTT": {},
"rttMonJitterStatsNumOverThresh": {},
"rttMonJitterStatsOWMaxDS": {},
"rttMonJitterStatsOWMaxDSNew": {},
"rttMonJitterStatsOWMaxSD": {},
"rttMonJitterStatsOWMaxSDNew": {},
"rttMonJitterStatsOWMinDS": {},
"rttMonJitterStatsOWMinDSNew": {},
"rttMonJitterStatsOWMinSD": {},
"rttMonJitterStatsOWMinSDNew": {},
"rttMonJitterStatsOWSum2DSHigh": {},
"rttMonJitterStatsOWSum2DSLow": {},
"rttMonJitterStatsOWSum2SDHigh": {},
"rttMonJitterStatsOWSum2SDLow": {},
"rttMonJitterStatsOWSumDS": {},
"rttMonJitterStatsOWSumDSHigh": {},
"rttMonJitterStatsOWSumSD": {},
"rttMonJitterStatsOWSumSDHigh": {},
"rttMonJitterStatsOverThresholds": {},
"rttMonJitterStatsPacketLateArrival": {},
"rttMonJitterStatsPacketLossDS": {},
"rttMonJitterStatsPacketLossSD": {},
"rttMonJitterStatsPacketMIA": {},
"rttMonJitterStatsPacketOutOfSequence": {},
"rttMonJitterStatsRTTMax": {},
"rttMonJitterStatsRTTMin": {},
"rttMonJitterStatsRTTSum": {},
"rttMonJitterStatsRTTSum2High": {},
"rttMonJitterStatsRTTSum2Low": {},
"rttMonJitterStatsRTTSumHigh": {},
"rttMonJitterStatsSum2NegativesDSHigh": {},
"rttMonJitterStatsSum2NegativesDSLow": {},
"rttMonJitterStatsSum2NegativesSDHigh": {},
"rttMonJitterStatsSum2NegativesSDLow": {},
"rttMonJitterStatsSum2PositivesDSHigh": {},
"rttMonJitterStatsSum2PositivesDSLow": {},
"rttMonJitterStatsSum2PositivesSDHigh": {},
"rttMonJitterStatsSum2PositivesSDLow": {},
"rttMonJitterStatsSumOfNegativesDS": {},
"rttMonJitterStatsSumOfNegativesSD": {},
"rttMonJitterStatsSumOfPositivesDS": {},
"rttMonJitterStatsSumOfPositivesSD": {},
"rttMonJitterStatsUnSyncRTs": {},
"rttMonLatestHTTPErrorSenseDescription": {},
"rttMonLatestHTTPOperDNSRTT": {},
"rttMonLatestHTTPOperMessageBodyOctets": {},
"rttMonLatestHTTPOperRTT": {},
"rttMonLatestHTTPOperSense": {},
"rttMonLatestHTTPOperTCPConnectRTT": {},
"rttMonLatestHTTPOperTransactionRTT": {},
"rttMonLatestIcmpJPktOutSeqBoth": {},
"rttMonLatestIcmpJPktOutSeqDS": {},
"rttMonLatestIcmpJPktOutSeqSD": {},
"rttMonLatestIcmpJitterAvgDSJ": {},
"rttMonLatestIcmpJitterAvgJitter": {},
"rttMonLatestIcmpJitterAvgSDJ": {},
"rttMonLatestIcmpJitterIAJIn": {},
"rttMonLatestIcmpJitterIAJOut": {},
"rttMonLatestIcmpJitterMaxNegDS": {},
"rttMonLatestIcmpJitterMaxNegSD": {},
"rttMonLatestIcmpJitterMaxPosDS": {},
"rttMonLatestIcmpJitterMaxPosSD": {},
"rttMonLatestIcmpJitterMaxSucPktL": {},
"rttMonLatestIcmpJitterMinNegDS": {},
"rttMonLatestIcmpJitterMinNegSD": {},
"rttMonLatestIcmpJitterMinPosDS": {},
"rttMonLatestIcmpJitterMinPosSD": {},
"rttMonLatestIcmpJitterMinSucPktL": {},
"rttMonLatestIcmpJitterNumNegDS": {},
"rttMonLatestIcmpJitterNumNegSD": {},
"rttMonLatestIcmpJitterNumOW": {},
"rttMonLatestIcmpJitterNumOverThresh": {},
"rttMonLatestIcmpJitterNumPosDS": {},
"rttMonLatestIcmpJitterNumPosSD": {},
"rttMonLatestIcmpJitterNumRTT": {},
"rttMonLatestIcmpJitterOWAvgDS": {},
"rttMonLatestIcmpJitterOWAvgSD": {},
"rttMonLatestIcmpJitterOWMaxDS": {},
"rttMonLatestIcmpJitterOWMaxSD": {},
"rttMonLatestIcmpJitterOWMinDS": {},
"rttMonLatestIcmpJitterOWMinSD": {},
"rttMonLatestIcmpJitterOWSum2DS": {},
"rttMonLatestIcmpJitterOWSum2SD": {},
"rttMonLatestIcmpJitterOWSumDS": {},
"rttMonLatestIcmpJitterOWSumSD": {},
"rttMonLatestIcmpJitterPktLateA": {},
"rttMonLatestIcmpJitterPktLoss": {},
"rttMonLatestIcmpJitterPktSkipped": {},
"rttMonLatestIcmpJitterRTTMax": {},
"rttMonLatestIcmpJitterRTTMin": {},
"rttMonLatestIcmpJitterRTTSum": {},
"rttMonLatestIcmpJitterRTTSum2": {},
"rttMonLatestIcmpJitterSense": {},
"rttMonLatestIcmpJitterSum2NegDS": {},
"rttMonLatestIcmpJitterSum2NegSD": {},
"rttMonLatestIcmpJitterSum2PosDS": {},
"rttMonLatestIcmpJitterSum2PosSD": {},
"rttMonLatestIcmpJitterSumNegDS": {},
"rttMonLatestIcmpJitterSumNegSD": {},
"rttMonLatestIcmpJitterSumPosDS": {},
"rttMonLatestIcmpJitterSumPosSD": {},
"rttMonLatestJitterErrorSenseDescription": {},
"rttMonLatestJitterOperAvgDSJ": {},
"rttMonLatestJitterOperAvgJitter": {},
"rttMonLatestJitterOperAvgSDJ": {},
"rttMonLatestJitterOperIAJIn": {},
"rttMonLatestJitterOperIAJOut": {},
"rttMonLatestJitterOperICPIF": {},
"rttMonLatestJitterOperMOS": {},
"rttMonLatestJitterOperMaxOfNegativesDS": {},
"rttMonLatestJitterOperMaxOfNegativesSD": {},
"rttMonLatestJitterOperMaxOfPositivesDS": {},
"rttMonLatestJitterOperMaxOfPositivesSD": {},
"rttMonLatestJitterOperMinOfNegativesDS": {},
"rttMonLatestJitterOperMinOfNegativesSD": {},
"rttMonLatestJitterOperMinOfPositivesDS": {},
"rttMonLatestJitterOperMinOfPositivesSD": {},
"rttMonLatestJitterOperNTPState": {},
"rttMonLatestJitterOperNumOfNegativesDS": {},
"rttMonLatestJitterOperNumOfNegativesSD": {},
"rttMonLatestJitterOperNumOfOW": {},
"rttMonLatestJitterOperNumOfPositivesDS": {},
"rttMonLatestJitterOperNumOfPositivesSD": {},
"rttMonLatestJitterOperNumOfRTT": {},
"rttMonLatestJitterOperNumOverThresh": {},
"rttMonLatestJitterOperOWAvgDS": {},
"rttMonLatestJitterOperOWAvgSD": {},
"rttMonLatestJitterOperOWMaxDS": {},
"rttMonLatestJitterOperOWMaxSD": {},
"rttMonLatestJitterOperOWMinDS": {},
"rttMonLatestJitterOperOWMinSD": {},
"rttMonLatestJitterOperOWSum2DS": {},
"rttMonLatestJitterOperOWSum2DSHigh": {},
"rttMonLatestJitterOperOWSum2SD": {},
"rttMonLatestJitterOperOWSum2SDHigh": {},
"rttMonLatestJitterOperOWSumDS": {},
"rttMonLatestJitterOperOWSumDSHigh": {},
"rttMonLatestJitterOperOWSumSD": {},
"rttMonLatestJitterOperOWSumSDHigh": {},
"rttMonLatestJitterOperPacketLateArrival": {},
"rttMonLatestJitterOperPacketLossDS": {},
"rttMonLatestJitterOperPacketLossSD": {},
"rttMonLatestJitterOperPacketMIA": {},
"rttMonLatestJitterOperPacketOutOfSequence": {},
"rttMonLatestJitterOperRTTMax": {},
"rttMonLatestJitterOperRTTMin": {},
"rttMonLatestJitterOperRTTSum": {},
"rttMonLatestJitterOperRTTSum2": {},
"rttMonLatestJitterOperRTTSum2High": {},
"rttMonLatestJitterOperRTTSumHigh": {},
"rttMonLatestJitterOperSense": {},
"rttMonLatestJitterOperSum2NegativesDS": {},
"rttMonLatestJitterOperSum2NegativesSD": {},
"rttMonLatestJitterOperSum2PositivesDS": {},
"rttMonLatestJitterOperSum2PositivesSD": {},
"rttMonLatestJitterOperSumOfNegativesDS": {},
"rttMonLatestJitterOperSumOfNegativesSD": {},
"rttMonLatestJitterOperSumOfPositivesDS": {},
"rttMonLatestJitterOperSumOfPositivesSD": {},
"rttMonLatestJitterOperUnSyncRTs": {},
"rttMonLatestRtpErrorSenseDescription": {},
"rttMonLatestRtpOperAvgOWDS": {},
"rttMonLatestRtpOperAvgOWSD": {},
"rttMonLatestRtpOperFrameLossDS": {},
"rttMonLatestRtpOperIAJitterDS": {},
"rttMonLatestRtpOperIAJitterSD": {},
"rttMonLatestRtpOperMOSCQDS": {},
"rttMonLatestRtpOperMOSCQSD": {},
"rttMonLatestRtpOperMOSLQDS": {},
"rttMonLatestRtpOperMaxOWDS": {},
"rttMonLatestRtpOperMaxOWSD": {},
"rttMonLatestRtpOperMinOWDS": {},
"rttMonLatestRtpOperMinOWSD": {},
"rttMonLatestRtpOperPacketEarlyDS": {},
"rttMonLatestRtpOperPacketLateDS": {},
"rttMonLatestRtpOperPacketLossDS": {},
"rttMonLatestRtpOperPacketLossSD": {},
"rttMonLatestRtpOperPacketOOSDS": {},
"rttMonLatestRtpOperPacketsMIA": {},
"rttMonLatestRtpOperRFactorDS": {},
"rttMonLatestRtpOperRFactorSD": {},
"rttMonLatestRtpOperRTT": {},
"rttMonLatestRtpOperSense": {},
"rttMonLatestRtpOperTotalPaksDS": {},
"rttMonLatestRtpOperTotalPaksSD": {},
"rttMonLatestRttOperAddress": {},
"rttMonLatestRttOperApplSpecificSense": {},
"rttMonLatestRttOperCompletionTime": {},
"rttMonLatestRttOperSense": {},
"rttMonLatestRttOperSenseDescription": {},
"rttMonLatestRttOperTime": {},
"rttMonLpdGrpStatsAvgRTT": {},
"rttMonLpdGrpStatsGroupProbeIndex": {},
"rttMonLpdGrpStatsGroupStatus": {},
"rttMonLpdGrpStatsLPDCompTime": {},
"rttMonLpdGrpStatsLPDFailCause": {},
"rttMonLpdGrpStatsLPDFailOccurred": {},
"rttMonLpdGrpStatsLPDStartTime": {},
"rttMonLpdGrpStatsMaxNumPaths": {},
"rttMonLpdGrpStatsMaxRTT": {},
"rttMonLpdGrpStatsMinNumPaths": {},
"rttMonLpdGrpStatsMinRTT": {},
"rttMonLpdGrpStatsNumOfFail": {},
"rttMonLpdGrpStatsNumOfPass": {},
"rttMonLpdGrpStatsNumOfTimeout": {},
"rttMonLpdGrpStatsPathIds": {},
"rttMonLpdGrpStatsProbeStatus": {},
"rttMonLpdGrpStatsResetTime": {},
"rttMonLpdGrpStatsTargetPE": {},
"rttMonReactActionType": {},
"rttMonReactAdminActionType": {},
"rttMonReactAdminConnectionEnable": {},
"rttMonReactAdminThresholdCount": {},
"rttMonReactAdminThresholdCount2": {},
"rttMonReactAdminThresholdFalling": {},
"rttMonReactAdminThresholdType": {},
"rttMonReactAdminTimeoutEnable": {},
"rttMonReactAdminVerifyErrorEnable": {},
"rttMonReactOccurred": {},
"rttMonReactStatus": {},
"rttMonReactThresholdCountX": {},
"rttMonReactThresholdCountY": {},
"rttMonReactThresholdFalling": {},
"rttMonReactThresholdRising": {},
"rttMonReactThresholdType": {},
"rttMonReactTriggerAdminStatus": {},
"rttMonReactTriggerOperState": {},
"rttMonReactValue": {},
"rttMonReactVar": {},
"rttMonRtpStatsFrameLossDSAvg": {},
"rttMonRtpStatsFrameLossDSMax": {},
"rttMonRtpStatsFrameLossDSMin": {},
"rttMonRtpStatsIAJitterDSAvg": {},
"rttMonRtpStatsIAJitterDSMax": {},
"rttMonRtpStatsIAJitterDSMin": {},
"rttMonRtpStatsIAJitterSDAvg": {},
"rttMonRtpStatsIAJitterSDMax": {},
"rttMonRtpStatsIAJitterSDMin": {},
"rttMonRtpStatsMOSCQDSAvg": {},
"rttMonRtpStatsMOSCQDSMax": {},
"rttMonRtpStatsMOSCQDSMin": {},
"rttMonRtpStatsMOSCQSDAvg": {},
"rttMonRtpStatsMOSCQSDMax": {},
"rttMonRtpStatsMOSCQSDMin": {},
"rttMonRtpStatsMOSLQDSAvg": {},
"rttMonRtpStatsMOSLQDSMax": {},
"rttMonRtpStatsMOSLQDSMin": {},
"rttMonRtpStatsOperAvgOWDS": {},
"rttMonRtpStatsOperAvgOWSD": {},
"rttMonRtpStatsOperMaxOWDS": {},
"rttMonRtpStatsOperMaxOWSD": {},
"rttMonRtpStatsOperMinOWDS": {},
"rttMonRtpStatsOperMinOWSD": {},
"rttMonRtpStatsPacketEarlyDSAvg": {},
"rttMonRtpStatsPacketLateDSAvg": {},
"rttMonRtpStatsPacketLossDSAvg": {},
"rttMonRtpStatsPacketLossDSMax": {},
"rttMonRtpStatsPacketLossDSMin": {},
"rttMonRtpStatsPacketLossSDAvg": {},
"rttMonRtpStatsPacketLossSDMax": {},
"rttMonRtpStatsPacketLossSDMin": {},
"rttMonRtpStatsPacketOOSDSAvg": {},
"rttMonRtpStatsPacketsMIAAvg": {},
"rttMonRtpStatsRFactorDSAvg": {},
"rttMonRtpStatsRFactorDSMax": {},
"rttMonRtpStatsRFactorDSMin": {},
"rttMonRtpStatsRFactorSDAvg": {},
"rttMonRtpStatsRFactorSDMax": {},
"rttMonRtpStatsRFactorSDMin": {},
"rttMonRtpStatsRTTAvg": {},
"rttMonRtpStatsRTTMax": {},
"rttMonRtpStatsRTTMin": {},
"rttMonRtpStatsTotalPacketsDSAvg": {},
"rttMonRtpStatsTotalPacketsDSMax": {},
"rttMonRtpStatsTotalPacketsDSMin": {},
"rttMonRtpStatsTotalPacketsSDAvg": {},
"rttMonRtpStatsTotalPacketsSDMax": {},
"rttMonRtpStatsTotalPacketsSDMin": {},
"rttMonScheduleAdminConceptRowAgeout": {},
"rttMonScheduleAdminConceptRowAgeoutV2": {},
"rttMonScheduleAdminRttLife": {},
"rttMonScheduleAdminRttRecurring": {},
"rttMonScheduleAdminRttStartTime": {},
"rttMonScheduleAdminStartDelay": {},
"rttMonScheduleAdminStartType": {},
"rttMonScriptAdminCmdLineParams": {},
"rttMonScriptAdminName": {},
"rttMonStatisticsAdminDistInterval": {},
"rttMonStatisticsAdminNumDistBuckets": {},
"rttMonStatisticsAdminNumHops": {},
"rttMonStatisticsAdminNumHourGroups": {},
"rttMonStatisticsAdminNumPaths": {},
"rttMonStatsCaptureCompletionTimeMax": {},
"rttMonStatsCaptureCompletionTimeMin": {},
"rttMonStatsCaptureCompletions": {},
"rttMonStatsCaptureOverThresholds": {},
"rttMonStatsCaptureSumCompletionTime": {},
"rttMonStatsCaptureSumCompletionTime2High": {},
"rttMonStatsCaptureSumCompletionTime2Low": {},
"rttMonStatsCollectAddress": {},
"rttMonStatsCollectBusies": {},
"rttMonStatsCollectCtrlEnErrors": {},
"rttMonStatsCollectDrops": {},
"rttMonStatsCollectNoConnections": {},
"rttMonStatsCollectNumDisconnects": {},
"rttMonStatsCollectRetrieveErrors": {},
"rttMonStatsCollectSequenceErrors": {},
"rttMonStatsCollectTimeouts": {},
"rttMonStatsCollectVerifyErrors": {},
"rttMonStatsRetrieveErrors": {},
"rttMonStatsTotalsElapsedTime": {},
"rttMonStatsTotalsInitiations": {},
"rttMplsVpnMonCtrlDelScanFactor": {},
"rttMplsVpnMonCtrlEXP": {},
"rttMplsVpnMonCtrlLpd": {},
"rttMplsVpnMonCtrlLpdCompTime": {},
"rttMplsVpnMonCtrlLpdGrpList": {},
"rttMplsVpnMonCtrlProbeList": {},
"rttMplsVpnMonCtrlRequestSize": {},
"rttMplsVpnMonCtrlRttType": {},
"rttMplsVpnMonCtrlScanInterval": {},
"rttMplsVpnMonCtrlStatus": {},
"rttMplsVpnMonCtrlStorageType": {},
"rttMplsVpnMonCtrlTag": {},
"rttMplsVpnMonCtrlThreshold": {},
"rttMplsVpnMonCtrlTimeout": {},
"rttMplsVpnMonCtrlVerifyData": {},
"rttMplsVpnMonCtrlVrfName": {},
"rttMplsVpnMonReactActionType": {},
"rttMplsVpnMonReactConnectionEnable": {},
"rttMplsVpnMonReactLpdNotifyType": {},
"rttMplsVpnMonReactLpdRetryCount": {},
"rttMplsVpnMonReactThresholdCount": {},
"rttMplsVpnMonReactThresholdType": {},
"rttMplsVpnMonReactTimeoutEnable": {},
"rttMplsVpnMonScheduleFrequency": {},
"rttMplsVpnMonSchedulePeriod": {},
"rttMplsVpnMonScheduleRttStartTime": {},
"rttMplsVpnMonTypeDestPort": {},
"rttMplsVpnMonTypeInterval": {},
"rttMplsVpnMonTypeLSPReplyDscp": {},
"rttMplsVpnMonTypeLSPReplyMode": {},
"rttMplsVpnMonTypeLSPTTL": {},
"rttMplsVpnMonTypeLpdEchoInterval": {},
"rttMplsVpnMonTypeLpdEchoNullShim": {},
"rttMplsVpnMonTypeLpdEchoTimeout": {},
"rttMplsVpnMonTypeLpdMaxSessions": {},
"rttMplsVpnMonTypeLpdScanPeriod": {},
"rttMplsVpnMonTypeLpdSessTimeout": {},
"rttMplsVpnMonTypeLpdStatHours": {},
"rttMplsVpnMonTypeLspSelector": {},
"rttMplsVpnMonTypeNumPackets": {},
"rttMplsVpnMonTypeSecFreqType": {},
"rttMplsVpnMonTypeSecFreqValue": {},
"sapCircEntry": {
"1": {},
"10": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"sapSysEntry": {"1": {}, "2": {}, "3": {}},
"sdlcLSAdminEntry": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"sdlcLSOperEntry": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
| |
pos in box]
return getCoords(x, y, w, h, pagesize)
def getFrameDimensions(data, page_width, page_height):
"""Calculate dimensions of a frame
Returns left, top, width and height of the frame in points.
"""
box = data.get("-pdf-frame-box", [])
if len(box) == 4:
return [getSize(x) for x in box]
top = getSize(data.get("top", 0))
left = getSize(data.get("left", 0))
bottom = getSize(data.get("bottom", 0))
right = getSize(data.get("right", 0))
if "height" in data:
height = getSize(data["height"])
if "top" in data:
top = getSize(data["top"])
bottom = page_height - (top + height)
elif "bottom" in data:
bottom = getSize(data["bottom"])
top = page_height - (bottom + height)
if "width" in data:
width = getSize(data["width"])
if "left" in data:
left = getSize(data["left"])
right = page_width - (left + width)
elif "right" in data:
right = getSize(data["right"])
left = page_width - (right + width)
top += getSize(data.get("margin-top", 0))
left += getSize(data.get("margin-left", 0))
bottom += getSize(data.get("margin-bottom", 0))
right += getSize(data.get("margin-right", 0))
width = page_width - (left + right)
height = page_height - (top + bottom)
return left, top, width, height
@memoized
def getPos(position, pagesize):
"""
Pair of coordinates
"""
position = str(position).split()
if len(position) != 2:
raise Exception("position not defined right way")
x, y = [getSize(pos) for pos in position]
return getCoords(x, y, None, None, pagesize)
def getBool(s):
" Is it a boolean? "
return str(s).lower() in ("y", "yes", "1", "true")
_uid = 0
def getUID():
" Unique ID "
global _uid
_uid += 1
return str(_uid)
_alignments = {
"left": TA_LEFT,
"center": TA_CENTER,
"middle": TA_CENTER,
"right": TA_RIGHT,
"justify": TA_JUSTIFY,
}
def getAlign(value, default=TA_LEFT):
return _alignments.get(str(value).lower(), default)
GAE = "google.appengine" in sys.modules
if GAE:
STRATEGIES = (
six.BytesIO,
six.BytesIO)
else:
STRATEGIES = (
six.BytesIO,
tempfile.NamedTemporaryFile)
class pisaTempFile(object):
"""
A temporary file implementation that uses memory unless
either capacity is breached or fileno is requested, at which
point a real temporary file will be created and the relevant
details returned
If capacity is -1 the second strategy will never be used.
Inspired by:
http://code.activestate.com/recipes/496744/
"""
STRATEGIES = STRATEGIES
CAPACITY = 10 * 1024
def __init__(self, buffer="", capacity=CAPACITY):
"""Creates a TempFile object containing the specified buffer.
If capacity is specified, we use a real temporary file once the
file gets larger than that size. Otherwise, the data is stored
in memory.
"""
self.capacity = capacity
self.strategy = int(len(buffer) > self.capacity)
try:
self._delegate = self.STRATEGIES[self.strategy]()
except IndexError:
# Fallback for Google AppEnginge etc.
self._delegate = self.STRATEGIES[0]()
self.write(buffer)
# we must set the file's position for preparing to read
self.seek(0)
def makeTempFile(self):
"""
Switch to next startegy. If an error occured,
stay with the first strategy
"""
if self.strategy == 0:
try:
new_delegate = self.STRATEGIES[1]()
new_delegate.write(self.getvalue())
self._delegate = new_delegate
self.strategy = 1
log.warn("Created temporary file %s", self.name)
except:
self.capacity = - 1
def getFileName(self):
"""
Get a named temporary file
"""
self.makeTempFile()
return self.name
def fileno(self):
"""
Forces this buffer to use a temporary file as the underlying.
object and returns the fileno associated with it.
"""
self.makeTempFile()
return self._delegate.fileno()
def getvalue(self):
"""
Get value of file. Work around for second strategy.
Always returns bytes
"""
if self.strategy == 0:
return self._delegate.getvalue()
self._delegate.flush()
self._delegate.seek(0)
value = self._delegate.read()
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
return value
def write(self, value):
"""
If capacity != -1 and length of file > capacity it is time to switch
"""
if self.capacity > 0 and self.strategy == 0:
len_value = len(value)
if len_value >= self.capacity:
needs_new_strategy = True
else:
self.seek(0, 2) # find end of file
needs_new_strategy = \
(self.tell() + len_value) >= self.capacity
if needs_new_strategy:
self.makeTempFile()
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
self._delegate.write(value)
def __getattr__(self, name):
try:
return getattr(self._delegate, name)
except AttributeError:
# hide the delegation
e = "object '%s' has no attribute '%s'" \
% (self.__class__.__name__, name)
raise AttributeError(e)
_rx_datauri = re.compile(
"^data:(?P<mime>[a-z]+/[a-z]+);base64,(?P<data>.*)$", re.M | re.DOTALL)
class pisaFileObject:
"""
XXX
"""
def __init__(self, uri, basepath=None):
self.basepath = basepath
self.mimetype = None
self.file = None
self.data = None
self.uri = None
self.local = None
self.tmp_file = None
uri = uri or str()
if type(uri) != str:
uri = uri.decode("utf-8")
log.debug("FileObject %r, Basepath: %r", uri, basepath)
# Data URI
if uri.startswith("data:"):
m = _rx_datauri.match(uri)
self.mimetype = m.group("mime")
b64 = urllib_unquote(m.group("data")).encode("utf-8")
self.data = base64.b64decode(b64)
else:
# Check if we have an external scheme
if basepath and not urlparse.urlparse(uri).scheme:
urlParts = urlparse.urlparse(basepath)
else:
urlParts = urlparse.urlparse(uri)
log.debug("URLParts: {}".format((urlParts, urlParts.scheme)))
if urlParts.scheme == 'file':
if basepath and uri.startswith('/'):
uri = urlparse.urljoin(basepath, uri[1:])
urlResponse = urllib2.urlopen(uri)
self.mimetype = urlResponse.info().get(
"Content-Type", '').split(";")[0]
self.uri = urlResponse.geturl()
self.file = urlResponse
# Drive letters have len==1 but we are looking
# for things like http:
elif urlParts.scheme in ('http', 'https'):
log.debug("Sending request for {} with httplib".format(uri))
# External data
if basepath:
uri = urlparse.urljoin(basepath, uri)
log.debug("Uri parsed: {}".format(uri))
#path = urlparse.urlsplit(url)[2]
#mimetype = getMimeType(path)
# Using HTTPLIB
server, path = urllib2.splithost(uri[uri.find("//"):])
if uri.startswith("https://"):
conn = httplib.HTTPSConnection(server, **httpConfig)
else:
conn = httplib.HTTPConnection(server)
conn.request("GET", path)
r1 = conn.getresponse()
# log.debug("HTTP %r %r %r %r", server, path, uri, r1)
if (r1.status, r1.reason) == (200, "OK"):
self.mimetype = r1.getheader(
"Content-Type", '').split(";")[0]
self.uri = uri
log.debug("here")
if r1.getheader("content-encoding") == "gzip":
import gzip
self.file = gzip.GzipFile(
mode="rb", fileobj=six.BytesIO(r1.read()))
else:
self.file = pisaTempFile(r1.read())
else:
log.debug(
"Received non-200 status: {}".format((r1.status, r1.reason)))
try:
urlResponse = urllib2.urlopen(uri)
except urllib2.HTTPError as e:
log.error("Could not process uri: {}".format(e))
return
self.mimetype = urlResponse.info().get(
"Content-Type", '').split(";")[0]
self.uri = urlResponse.geturl()
self.file = urlResponse
else:
log.debug("Unrecognized scheme, assuming local file path")
# Local data
if basepath:
if sys.platform == 'win32' and os.path.isfile(basepath):
basepath = os.path.dirname(basepath)
uri = os.path.normpath(os.path.join(basepath, uri))
if os.path.isfile(uri):
self.uri = uri
self.local = uri
self.setMimeTypeByName(uri)
if self.mimetype and self.mimetype.startswith('text'):
self.file = open(uri, "r") #removed bytes... lets hope it goes ok :/
else:
# removed bytes... lets hope it goes ok :/
self.file = open(uri, "rb")
def getFile(self):
if self.file is not None:
return self.file
if self.data is not None:
return pisaTempFile(self.data)
return None
def getNamedFile(self):
if self.notFound():
return None
if self.local:
return str(self.local)
if not self.tmp_file:
self.tmp_file = tempfile.NamedTemporaryFile()
if self.file:
shutil.copyfileobj(self.file, self.tmp_file)
else:
self.tmp_file.write(self.getData())
self.tmp_file.flush()
return self.tmp_file.name
def getData(self):
if self.data is not None:
return self.data
if self.file is not None:
try:
self.data = self.file.read()
except:
if self.mimetype and self.mimetype.startswith('text'):
self.file = open(self.file.name, "rb") #removed bytes... lets hope it goes ok :/
self.data = self.file.read().decode('utf-8')
else:
raise
return self.data
return None
def notFound(self):
return (self.file is None) and (self.data is None)
def setMimeTypeByName(self, name):
" Guess the mime type "
mimetype = mimetypes.guess_type(name)[0]
if mimetype is not None:
self.mimetype = mimetypes.guess_type(name)[0].split(";")[0]
def getFile(*a, **kw):
file = pisaFileObject(*a, **kw)
if file.notFound():
return None
return file
COLOR_BY_NAME = {
'activeborder': Color(212, 208, 200),
'activecaption': Color(10, 36, 106),
'aliceblue': Color(.941176, .972549, 1),
'antiquewhite': Color(.980392, .921569, .843137),
'appworkspace': Color(128, 128, 128),
'aqua': Color(0, 1, 1),
'aquamarine': Color(.498039, 1, .831373),
'azure': Color(.941176, 1, 1),
'background': Color(58, 110, 165),
'beige': Color(.960784, .960784, .862745),
'bisque': Color(1, .894118, .768627),
'black': Color(0, 0, 0),
'blanchedalmond': Color(1, .921569, .803922),
'blue': Color(0, 0, 1),
'blueviolet': Color(.541176, .168627, .886275),
'brown': Color(.647059, .164706, .164706),
'burlywood': Color(.870588, .721569, .529412),
'buttonface': Color(212, 208, 200),
'buttonhighlight': Color(255, 255, 255),
'buttonshadow': Color(128, 128, 128),
'buttontext': Color(0, 0, 0),
'cadetblue': Color(.372549, .619608, .627451),
'captiontext': Color(255, 255, 255),
'chartreuse': Color(.498039, 1, 0),
'chocolate': Color(.823529, .411765, .117647),
'coral': Color(1, .498039, .313725),
'cornflowerblue': Color(.392157, .584314, .929412),
'cornsilk': Color(1, .972549, .862745),
'crimson': Color(.862745, .078431, .235294),
'cyan': Color(0, 1, 1),
'darkblue': Color(0, 0, .545098),
'darkcyan': Color(0, .545098, .545098),
'darkgoldenrod': Color(.721569, .52549, .043137),
'darkgray': Color(.662745, .662745, .662745),
'darkgreen': Color(0, .392157, 0),
'darkgrey': Color(.662745, .662745, .662745),
'darkkhaki': Color(.741176, .717647, .419608),
'darkmagenta': Color(.545098, 0, .545098),
'darkolivegreen': Color(.333333, .419608, .184314),
'darkorange': Color(1, .54902, 0),
'darkorchid': Color(.6, .196078, .8),
'darkred': Color(.545098, 0, 0),
'darksalmon': Color(.913725, .588235, .478431),
'darkseagreen': Color(.560784, .737255, .560784),
'darkslateblue': Color(.282353, .239216, .545098),
'darkslategray': Color(.184314, .309804, .309804),
'darkslategrey': Color(.184314, .309804, .309804),
'darkturquoise': Color(0, .807843, .819608),
'darkviolet': Color(.580392, 0, .827451),
'deeppink': Color(1, .078431, .576471),
'deepskyblue': Color(0, .74902, 1),
'dimgray': Color(.411765, .411765, .411765),
'dimgrey': Color(.411765, .411765, .411765),
'dodgerblue': Color(.117647, .564706, 1),
'firebrick': Color(.698039, .133333, .133333),
'floralwhite': Color(1, .980392, .941176),
'forestgreen': Color(.133333, .545098, .133333),
'fuchsia': Color(1, 0, 1),
| |
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CNN definition helpers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lsi.nnutils import helpers as nn_helpers
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow.contrib.layers.python.layers import utils
def encoder_simple(inp_img, nz=1000, is_training=True, reuse=False):
"""Creates a simple encoder CNN.
Args:
inp_img: TensorFlow node for input with size B X H X W X C
nz: number of units in last layer, default=1000
is_training: whether batch_norm should be in train mode
reuse: Whether to reuse weights from an already defined net
Returns:
An encoder CNN which computes a final representation with nz
units.
"""
batch_norm_params = {'is_training': is_training}
with tf.variable_scope('encoder', reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params,
weights_regularizer=slim.l2_regularizer(0.05),
activation_fn=tf.nn.relu,
outputs_collections=end_points_collection):
cnv1 = slim.conv2d(inp_img, 32, [7, 7], stride=2, scope='cnv1')
cnv1b = slim.conv2d(cnv1, 32, [7, 7], stride=1, scope='cnv1b')
cnv2 = slim.conv2d(cnv1b, 64, [5, 5], stride=2, scope='cnv2')
cnv2b = slim.conv2d(cnv2, 64, [5, 5], stride=1, scope='cnv2b')
cnv3 = slim.conv2d(cnv2b, 128, [3, 3], stride=2, scope='cnv3')
cnv3b = slim.conv2d(cnv3, 128, [3, 3], stride=1, scope='cnv3b')
cnv4 = slim.conv2d(cnv3b, 256, [3, 3], stride=2, scope='cnv4')
cnv4b = slim.conv2d(cnv4, 256, [3, 3], stride=1, scope='cnv4b')
cnv5 = slim.conv2d(cnv4b, 512, [3, 3], stride=2, scope='cnv5')
cnv5b = slim.conv2d(cnv5, 512, [3, 3], stride=1, scope='cnv5b')
cnv6 = slim.conv2d(cnv5b, 512, [3, 3], stride=2, scope='cnv6')
cnv6b = slim.conv2d(cnv6, 512, [3, 3], stride=1, scope='cnv6b')
cnv7 = slim.conv2d(cnv6b, 512, [3, 3], stride=2, scope='cnv7')
cnv7b = slim.conv2d(cnv7, 512, [3, 3], stride=1, scope='cnv7b')
cnv7b_flat = slim.flatten(cnv7b, scope='cnv7b_flat')
enc = slim.stack(
cnv7b_flat, slim.fully_connected, [2 * nz, nz, nz], scope='fc')
end_points = utils.convert_collection_to_dict(end_points_collection)
return enc, end_points
def decoder_simple(feat, nconv=7, is_training=True, skip_feat=None,
reuse=False):
"""Creates a simple encoder CNN.
Args:
feat: Input geatures with size B X nz or B X H X W X nz
nconv: number of deconv layers
is_training: whether batch_norm should be in train mode
skip_feat: additional skip-features per upconv layer
reuse: Whether to reuse weights from an already defined net
Returns:
A decoder CNN which adds nconv upsampling layers
units.
"""
batch_norm_params = {'is_training': is_training}
n_filters = [32, 64, 128, 256]
if nconv > 4:
for _ in range(nconv - 4):
n_filters.append(512)
with tf.variable_scope('decoder', reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose],
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params,
weights_regularizer=slim.l2_regularizer(0.05),
activation_fn=tf.nn.relu,
outputs_collections=end_points_collection):
if feat.get_shape().ndims == 2:
feat = tf.expand_dims(tf.expand_dims(feat, 1), 1)
for nc in range(nconv, 0, -1):
n_filt = n_filters[nc - 1]
feat = slim.conv2d_transpose(
feat, n_filt, [4, 4], stride=2, scope='upcnv' + str(nc))
if (nc > 1) and (skip_feat is not None):
feat = tf.concat([feat, skip_feat[-nc + 1]], axis=3)
feat = slim.conv2d(
feat, n_filt, [3, 3], stride=1, scope='upcnv' + str(nc) + 'b')
end_points = utils.convert_collection_to_dict(end_points_collection)
return feat, end_points
def pixelwise_predictor(feat,
nc=3,
n_layers=1,
n_layerwise_steps=0,
skip_feat=None,
reuse=False,
is_training=True):
"""Predicts texture images and probilistic masks.
Args:
feat: B X H X W X C feature vectors
nc: number of output channels
n_layers: number of plane equations to predict (denoted as L)
n_layerwise_steps: Number of independent per-layer up-conv steps
skip_feat: List of features useful for skip connections. Used if lws>0.
reuse: Whether to reuse weights from an already defined net
is_training: whether batch_norm should be in train mode
Returns:
textures : L X B X H X W X nc.
"""
with tf.variable_scope('pixelwise_pred', reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope(
[slim.conv2d],
normalizer_fn=None,
weights_regularizer=slim.l2_regularizer(0.05),
activation_fn=tf.nn.sigmoid,
outputs_collections=end_points_collection):
preds = []
for l in range(n_layers):
with tf.variable_scope('upsample_' + str(l), reuse=reuse):
feat_l, _ = decoder_simple(
feat,
nconv=n_layerwise_steps,
skip_feat=skip_feat,
reuse=reuse,
is_training=is_training)
pred = slim.conv2d(
feat_l, nc, [3, 3], stride=1, scope='pred_' + str(l))
preds.append(pred)
end_points = utils.convert_collection_to_dict(end_points_collection)
preds = tf.stack(preds, axis=0)
return preds, end_points
def ldi_predictor(feat,
n_layers=1,
reuse=False,
n_layerwise_steps=0,
skip_feat=None,
pred_masks=False,
is_training=True):
"""Predicts ldi : [textures, masks, disps].
Args:
feat: B X H X W X C feature vectors
n_layers: number of layers to predict (denoted as L)
reuse: Whether to reuse weights from an already defined net
n_layerwise_steps: Number of independent per-layer up-conv steps
skip_feat: List of features useful for skip connections. Used if lws>0.
pred_masks: Whether to predict masks or use all 1s
is_training: whether batch_norm should be in train mode
Returns:
ldi : [textures, masks, disps]
textures : L X B X H X W X nc.
masks : L X B X H X W X 1 (all ones)
textures : L X B X H X W X 1
"""
with tf.variable_scope('ldi_tex_disp', reuse=reuse):
nc = 3 + 1
if pred_masks:
nc += 1
tex_disp_pred, _ = pixelwise_predictor(
feat,
nc=nc,
n_layers=n_layers,
n_layerwise_steps=n_layerwise_steps,
skip_feat=skip_feat,
reuse=reuse,
is_training=is_training)
if pred_masks:
tex_pred, masks_ldi, disps_pred = tf.split(
tex_disp_pred, [3, 1, 1], axis=4)
masks_ldi = nn_helpers.enforce_bg_occupied(tf.nn.sigmoid(masks_ldi))
else:
tex_pred, disps_pred = tf.split(tex_disp_pred, [3, 1], axis=4)
masks_ldi = tf.ones(disps_pred.get_shape())
ldi = [tex_pred, masks_ldi, disps_pred]
return ldi
def encoder_decoder_simple(inp_img,
nz=1000,
nupconv=8,
is_training=True,
reuse=False,
nl_diff_enc_dec=0):
"""Creates a simple encoder-decoder CNN.
Args:
inp_img: TensorFlow node for input with size B X H X W X C
nz: number of units in last layer, default=1000
nupconv: number of upconv layers in the deocder
is_training: whether batch_norm should be in train mode
reuse: Whether to reuse weights from an already defined net
nl_diff_enc_dec: Number of dec layers are nupconv - nl_diff_enc_dec
Returns:
feat: A bottleneck representation with nz units.
feat_dec: features of the same size as the image.
skip_feat: initial layer features useful for layerwise steps
end_points: intermediate activations
"""
feat, enc_intermediate = encoder_simple(
inp_img, is_training=is_training, nz=nz, reuse=reuse)
feat_dec, dec_intermediate = decoder_simple(
feat,
nconv=nupconv - nl_diff_enc_dec,
is_training=is_training,
reuse=reuse)
enc_dec_int = dict(enc_intermediate, **dec_intermediate)
skip_feat = None
return feat, feat_dec, skip_feat, enc_dec_int
def encoder_decoder_unet(inp_img,
nz=1000,
is_training=True,
reuse=False,
nl_diff_enc_dec=0):
"""Creates a Unet-like CNN with + features extracted from bottleneck.
Args:
inp_img: TensorFlow node for input with size B X H X W X C
nz: number of units in last layer, default=1000
is_training: whether batch_norm should be in train mode
reuse: Whether to reuse weights from an already defined net
nl_diff_enc_dec: Number of dec layers are num_enc_layers - nl_diff_enc_dec
Returns:
feat: A bottleneck representation with nz units.
icnv1: features of the same size as the image / 2^(nl_diff_enc_dec).
skip_feat: initial layer features useful for layerwise steps
end_points: intermediate activations
"""
batch_norm_params = {'is_training': is_training}
with tf.variable_scope('encoder_decoder_unet', reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose, slim.fully_connected],
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params,
weights_regularizer=slim.l2_regularizer(0.05),
activation_fn=tf.nn.relu,
outputs_collections=end_points_collection):
cnv1 = slim.conv2d(inp_img, 32, [7, 7], stride=2, scope='cnv1')
cnv1b = slim.conv2d(cnv1, 32, [7, 7], stride=1, scope='cnv1b')
cnv2 = slim.conv2d(cnv1b, 64, [5, 5], stride=2, scope='cnv2')
cnv2b = slim.conv2d(cnv2, 64, [5, 5], stride=1, scope='cnv2b')
cnv3 = slim.conv2d(cnv2b, 128, [3, 3], stride=2, scope='cnv3')
cnv3b = slim.conv2d(cnv3, 128, [3, 3], stride=1, scope='cnv3b')
cnv4 = slim.conv2d(cnv3b, 256, [3, 3], stride=2, scope='cnv4')
cnv4b = slim.conv2d(cnv4, 256, [3, 3], stride=1, scope='cnv4b')
cnv5 = slim.conv2d(cnv4b, 512, [3, 3], stride=2, scope='cnv5')
cnv5b = slim.conv2d(cnv5, 512, [3, 3], stride=1, scope='cnv5b')
cnv6 = slim.conv2d(cnv5b, 512, [3, 3], stride=2, scope='cnv6')
cnv6b = slim.conv2d(cnv6, 512, [3, 3], stride=1, scope='cnv6b')
cnv7 = slim.conv2d(cnv6b, 512, [3, 3], stride=2, scope='cnv7')
cnv7b = slim.conv2d(cnv7, 512, [3, 3], stride=1, scope='cnv7b')
## features via fc layers on bottleneck
cnv7b_flat = slim.flatten(cnv7b, scope='cnv7b_flat')
feat = slim.stack(
cnv7b_flat, slim.fully_connected, [2 * nz, nz, nz], scope='fc')
feats_dec = [] # decoded features at different layers
skip_feat = [] # initial layer features useful for layerwise steps
upcnv7 = slim.conv2d_transpose(
cnv7b, 512, [4, 4], stride=2, scope='upcnv7')
# There might be dimension mismatch due to uneven down/up-sampling
# upcnv7 = resize_like(upcnv7, cnv6b)
i7_in = tf.concat([upcnv7, cnv6b], axis=3)
icnv7 = slim.conv2d(i7_in, 512, [3, 3], stride=1, scope='icnv7')
feats_dec.append(icnv7)
skip_feat.append(cnv6b)
upcnv6 = slim.conv2d_transpose(
icnv7, 512, [4, 4], stride=2, scope='upcnv6')
# upcnv6 = resize_like(upcnv6, cnv5b)
| |
self.multifile.addSignature(certificate, chain or '', pkey or '', password or '')
self.multifile.close()
if not multifileFilename.renameTo(self.packageFullpath):
self.notify.error("Cannot move %s to %s" % (multifileFilename, self.packageFullpath))
if self.p3dApplication:
# No patches for an application; just move it into place.
# Make the application file executable.
os.chmod(self.packageFullpath.toOsSpecific(), 0o755)
else:
self.readDescFile()
self.packageSeq += 1
self.perPlatform = True # always true on modern packages.
self.compressMultifile()
self.writeDescFile()
self.writeImportDescFile()
# Now that we've written out the desc file, we don't
# need to keep around the uncompressed archive
# anymore.
self.packageFullpath.unlink()
# Replace or add the entry in the contents.
pe = Packager.PackageEntry()
pe.fromFile(self.packageName, self.platform, self.version,
False, self.perPlatform, self.packager.installDir,
self.packageDesc, self.packageImportDesc)
pe.packageSeq = self.packageSeq
pe.packageSetVer = self.packageSetVer
self.packager.contents[pe.getKey()] = pe
self.packager.contentsChanged = True
self.cleanup()
return True
def installSolo(self):
""" Installs the package as a "solo", which means we
simply copy the one file into the install directory. This
is primarily intended for the "coreapi" plugin, which is
just a single dll and a jpg file; but it can support other
kinds of similar "solo" packages as well. """
self.considerPlatform()
self.perPlatform = False # Not true on "solo" packages.
packageDir = self.packageName
if self.platform:
packageDir += '/' + self.platform
if self.version:
packageDir += '/' + self.version
if not self.packager.allowPackages:
message = 'Cannot generate packages without an installDir; use -i'
raise PackagerError(message)
installPath = Filename(self.packager.installDir, packageDir)
# Remove any files already in the installPath.
origFiles = vfs.scanDirectory(installPath)
if origFiles:
for origFile in origFiles:
origFile.getFilename().unlink()
files = []
for file in self.files:
if file.isExcluded(self):
# Skip this file.
continue
files.append(file)
if not files:
# No files, never mind.
return
if len(files) != 1:
raise PackagerError('Multiple files in "solo" package %s' % (self.packageName))
Filename(installPath, '').makeDir()
file = files[0]
targetPath = Filename(installPath, file.newName)
targetPath.setBinary()
file.filename.setBinary()
if not file.filename.copyTo(targetPath):
self.notify.warning("Could not copy %s to %s" % (
file.filename, targetPath))
# Replace or add the entry in the contents.
pe = Packager.PackageEntry()
pe.fromFile(self.packageName, self.platform, self.version,
True, self.perPlatform, self.packager.installDir,
Filename(packageDir, file.newName), None)
peOrig = self.packager.contents.get(pe.getKey(), None)
if peOrig:
pe.packageSeq = peOrig.packageSeq + 1
pe.packageSetVer = peOrig.packageSetVer
if self.packageSetVer:
pe.packageSetVer = self.packageSetVer
self.packager.contents[pe.getKey()] = pe
self.packager.contentsChanged = True
# Hack for coreapi package, to preserve backward compatibility
# with old versions of the runtime, which still called the
# 32-bit Windows platform "win32".
if self.packageName == "coreapi" and self.platform == "win_i386":
pe2 = copy.copy(pe)
pe2.platform = "win32"
self.packager.contents[pe2.getKey()] = pe2
self.cleanup()
return True
def cleanup(self):
# Now that all the files have been packed, we can delete
# the temporary files.
for file in self.files:
if file.deleteTemp:
file.filename.unlink()
def addFile(self, *args, **kw):
""" Adds the named file to the package. Returns the file
object, or None if it was not added by this call. """
file = Packager.PackFile(self, *args, **kw)
if file.filename in self.sourceFilenames:
# Don't bother, it's already here.
return None
lowerName = file.newName.lower()
if lowerName in self.targetFilenames:
# Another file is already in the same place.
file2 = self.targetFilenames[lowerName]
self.packager.notify.warning(
"%s is shadowing %s" % (file2.filename, file.filename))
return None
self.sourceFilenames[file.filename] = file
if file.required:
self.requiredFilenames.append(file)
if file.text is None and not file.filename.exists():
if not file.isExcluded(self):
self.packager.notify.warning("No such file: %s" % (file.filename))
return None
self.files.append(file)
self.targetFilenames[lowerName] = file
return file
def excludeFile(self, filename):
""" Excludes the named file (or glob pattern) from the
package. """
xfile = Packager.ExcludeFilename(self.packager, filename, self.packager.caseSensitive)
self.excludedFilenames.append(xfile)
def __addImplicitDependenciesWindows(self):
""" Walks through the list of files, looking for dll's and
exe's that might include implicit dependencies on other
dll's and assembly manifests. Tries to determine those
dependencies, and adds them back into the filelist. """
# We walk through the list as we modify it. That's OK,
# because we want to follow the transitive closure of
# dependencies anyway.
for file in self.files:
if not file.executable:
continue
if file.isExcluded(self):
# Skip this file.
continue
if file.filename.getExtension().lower() == "manifest":
filenames = self.__parseManifest(file.filename)
if filenames is None:
self.notify.warning("Unable to determine dependent assemblies from %s" % (file.filename))
continue
else:
tempFile = Filename.temporary('', 'p3d_', '.txt')
command = 'dumpbin /dependents "%s" >"%s"' % (
file.filename.toOsSpecific(),
tempFile.toOsSpecific())
try:
os.system(command)
except:
pass
filenames = None
if tempFile.exists():
filenames = self.__parseDependenciesWindows(tempFile)
tempFile.unlink()
if filenames is None:
self.notify.warning("Unable to determine dependencies from %s" % (file.filename))
filenames = []
# Extract the manifest file so we can figure out
# the dependent assemblies.
tempFile = Filename.temporary('', 'p3d_', '.manifest')
resindex = 2
if file.filename.getExtension().lower() == "exe":
resindex = 1
command = 'mt -inputresource:"%s";#%d -out:"%s" > nul' % (
file.filename.toOsSpecific(),
resindex, tempFile.toOsSpecific())
try:
out = os.system(command)
except:
pass
afilenames = None
if tempFile.exists():
afilenames = self.__parseManifest(tempFile)
tempFile.unlink()
# Also check for an explicit private-assembly
# manifest file on disk.
mfile = file.filename + '.manifest'
if mfile.exists():
if afilenames is None:
afilenames = []
afilenames += self.__parseManifest(mfile)
# Since it's an explicit manifest file, it
# means we should include the manifest
# file itself in the package.
newName = Filename(file.dependencyDir, mfile.getBasename())
self.addFile(mfile, newName = str(newName),
explicit = False, executable = True)
if afilenames is None and out != 31:
self.notify.warning("Unable to determine dependent assemblies from %s" % (file.filename))
if afilenames is not None:
filenames += afilenames
# Attempt to resolve the dependent filename relative
# to the original filename, before we resolve it along
# the PATH.
path = DSearchPath(Filename(file.filename.getDirname()))
for filename in filenames:
filename = Filename.fromOsSpecific(filename)
filename.resolveFilename(path)
filename.makeTrueCase()
newName = Filename(file.dependencyDir, filename.getBasename())
self.addFile(filename, newName = str(newName),
explicit = False, executable = True)
def __parseDependenciesWindows(self, tempFile):
""" Reads the indicated temporary file, the output from
dumpbin /dependents, to determine the list of dll's this
executable file depends on. """
lines = open(tempFile.toOsSpecific(), 'rU').readlines()
li = 0
while li < len(lines):
line = lines[li]
li += 1
if line.find(' has the following dependencies') != -1:
break
if li < len(lines):
line = lines[li]
if line.strip() == '':
# Skip a blank line.
li += 1
# Now we're finding filenames, until the next blank line.
filenames = []
while li < len(lines):
line = lines[li]
li += 1
line = line.strip()
if line == '':
# We're done.
return filenames
filenames.append(line)
# Hmm, we ran out of data. Oh well.
if not filenames:
# Some parse error.
return None
# At least we got some data.
return filenames
def __parseManifest(self, tempFile):
""" Reads the indicated application manifest file, to
determine the list of dependent assemblies this
executable file depends on. """
doc = TiXmlDocument(tempFile.toOsSpecific())
if not doc.LoadFile():
return None
assembly = doc.FirstChildElement("assembly")
if not assembly:
return None
# Pick up assemblies that it depends on
filenames = []
dependency = assembly.FirstChildElement("dependency")
while dependency:
depassembly = dependency.FirstChildElement("dependentAssembly")
if depassembly:
ident = depassembly.FirstChildElement("assemblyIdentity")
if ident:
name = ident.Attribute("name")
if name:
filenames.append(name + ".manifest")
dependency = dependency.NextSiblingElement("dependency")
# Pick up direct dll dependencies that it lists
dfile = assembly.FirstChildElement("file")
while dfile:
name = dfile.Attribute("name")
if name:
filenames.append(name)
dfile = dfile.NextSiblingElement("file")
return filenames
def __locateFrameworkLibrary(self, library):
""" Locates the given library inside its framework on the
default framework paths, and returns its location as Filename. """
# If it's already a full existing path, we
# don't search for it anymore, of course.
if Filename.fromOsSpecific(library).exists():
return Filename.fromOsSpecific(library)
# DSearchPath appears not to work for directories.
fpath = []
fpath.append(Filename("/Library/Frameworks"))
fpath.append(Filename("/System/Library/Frameworks"))
fpath.append(Filename("/Developer/Library/Frameworks"))
fpath.append(Filename(os.path.expanduser("~"), "Library/Frameworks"))
if "HOME" in os.environ:
fpath.append(Filename(os.environ["HOME"], "Library/Frameworks"))
ffilename = Filename(library.split('.framework/', 1)[0].split('/')[-1] + '.framework')
ffilename = Filename(ffilename, library.split('.framework/', 1)[-1])
# Look under the system root first, if supplied.
if self.packager.systemRoot:
for i in fpath:
fw = Filename(self.packager.systemRoot, i)
if Filename(fw, ffilename).exists():
return Filename(fw, ffilename)
for i in fpath:
if Filename(i, ffilename).exists():
return Filename(i, ffilename)
# Not found? Well, let's just return the framework + file
# path, the user will be presented with a warning later.
return ffilename
def __alterFrameworkDependencies(self, file, framework_deps):
""" Copies the given library file to a temporary directory,
and alters the dependencies so that it doesn't contain absolute
framework dependencies. """
if not file.deleteTemp:
# Copy the file to a temporary location because we
| |
<filename>Veerappan_bnfo601_exam2/Veerappan_BLAST_prot.py<gh_stars>0
"""
<NAME>
BNFO 601 - Exam 2
Question 2. Protein BLAST
"""
import math
from PAM import PAM
class BLAST(object):
FORWARD = 1 # These are class variables shared by all instances of the BLAST class
BACKWARD = -1
ROW = (0, 1)
COLUMN = (1, 0)
def __init__(self, query=None, target=None, word_size=3, gap_open=-10, gap_extend=-4, threshold=10, PAM=None):
self.query = query # This is the string corresponding to the query sequence
self.target = target # This is the string corresponding to the target sequence
self.word_size = word_size # Size of the seed word for initiating extensions
self.word_score = None # something different required for PBLAST!
self.gap_open = gap_open
self.gap_extend = gap_extend
self.querylen = len(query)
self.targetlen = len(target)
self.blast_table = {} # Our main dynamic programming table containing scores
self.traceback_table = {} # A corresponding table for recording the tracebacks
self.target_index = {}
self.threshold = threshold # Neighborhood threshold value for scoring
self.PAM = PAM # PAM table
return
def score(self): # This method performs BLAST scoring and returns a string describing the resulting alignment
result_summary = [] # A list, for now, that will store results of the alignments
if not self.target_index: # if this is the first time scoring we should index the target
for i in xrange(len(self.target) - self.word_size + 1):
word = self.target[i: i + self.word_size]
if word in self.target_index:
self.target_index[word].append(i) # A dict of lists is an efficient structure for this index.
# The list items are word coordinates in the target.
else:
self.target_index[word] = [i]
# print self.target_index
## First we must iterate through words in the query:
query_position = 0
while query_position < self.querylen - self.word_size + 1:
# print "Query position is", query_position
query_word = self.query[query_position:query_position + self.word_size]
# lookup scores for each AA pair from PAM table
for target_word in self.target_index.keys():
score = 0
for i in range(len(target_word)):
score += self.PAM[target_word[i], query_word[i]]
# If the calculated score is higher than the neighborhood threshold value then extend the alignment
# and set the starting word score equal to the calculated score
if score > self.threshold:
self.word_score = score
for target_position in self.target_index[target_word]:
print "Searching for seed", query_word, "at target position", target_position
# print "Extending forward"
forward_score, forward_extension_q, forward_extension_t = \
self._extend_alignment(query_position, target_position, self.FORWARD)
# print "Extending backwards"
backward_score, backward_extension_q, backward_extension_t = \
self._extend_alignment(query_position, target_position, self.BACKWARD)
q_result = backward_extension_q[:-1] + query_word + forward_extension_q[1:]
t_result = backward_extension_t[:-1] + query_word + forward_extension_t[1:]
# Note that the last character of a backward extension, and the zeroth character of a forward
# extension overlap with the query word and should therefore be discarded - thus the slice notation.
score = forward_score + backward_score - self.word_score
# We need to make sure that we don't double count the seed score!
# calculate e-value
# e_value = self.querylen * self.targetlen * math.e ** (math.log(1 / 4) * score)
# calculate bit score
# bit_score = (-math.log(1 / 4) * score - math.log(1)) / math.log(2)
query_begin = query_position - len(backward_extension_q) + 2
target_begin = target_position - len(backward_extension_t) + 2
# result_summary.append((e_value, bit_score, score, q_result, t_result, query_begin, target_begin))
result_summary.append((score, q_result, t_result, query_begin, target_begin))
alignment_string = '\nAlignment had a score of ' + str(score) + ' and is:\n\nTarget:\t' + \
str(target_begin) + '\t' + str(t_result) + '\n\t\t\t'
for k in xrange(len(t_result)): # t and q alignments should be the same length!
if t_result[k] == q_result[k]:
alignment_string += '|'
# Only put a bar if the two characters are identical at this position
else:
alignment_string += ' ' # otherwise just insert a space
alignment_string += '\nQuery:\t' + str(query_begin) + '\t' + str(q_result) + '\n'
print alignment_string
# The above statements just concatenate together a multi-line string that will correctly display
# the best alignment when it is subsequently printed.
query_position += 1
return result_summary
def _extend_alignment(self, query_start, target_start, direction):
""" This private method attempts to extend an alignment in the forward and backward direction
depending on the value of the direction flag, which here takes the value 1 (for forward extension) or
-1 for backward.For clarity these constants are defined by the class variables self.FORWARD and self.BACKWARD
"""
self.high_score = self.word_score
# highest scores encountered so far will always initially be the word_score * match_reward
self.high_q_pos = self.high_t_pos = 0
if direction == self.FORWARD: # We start with the 0,0 position representing the last character
query_start += self.word_size - 1 # of the seed word for forward extensions.
target_start += self.word_size - 1 # For backward extensions, leave it as it is (i.e. zeroth character)
self.blast_table = dict()
# The BLAST table is a dict of tuples. Each tuple represents a (query, target) position
# this sparse representation will be much more efficient than using a 2D list
self.blast_table[0, 0] = self.high_score # initialize the top left corner with the word score
self.high_q_pos = 0
self.high_t_pos = 0
self.traceback_table[0, 0] = (1, 1)
# There is no traceback path for the origin, but the program logic elsewhere dictates that we provide one
cur_t_pos = 1 # we are going to score the edges first (top and left), which can *only* ever be gaps back
# to the origin. i.e. the question of matching or not matching is completely irrelevant here.
# We start by scoring the top edge, beginning with position 1..
cur_score = max(0, self.blast_table[(0, 0)] + self.gap_open) # first one always a gap open
while cur_score: # only keep going as long as we have non-zero values
self.blast_table[(0, cur_t_pos)] = cur_score # only record non-zero values
self.traceback_table[(0, cur_t_pos)] = (0, 1) # record a target gap in the traceback table
cur_score = max(0, self.blast_table[(0, cur_t_pos)] + self.gap_extend) # any subsequent are extends
cur_t_pos += 1
cur_t_pos = 0 # Now we do the same thing for the left edge as we just did for the top edge
cur_q_pos = 1
cur_score = max(0, self.blast_table[(0, 0)] + self.gap_open) # first one always a gap open
while cur_score: # only keep going as long as we have non-zero values
self.blast_table[(cur_q_pos, 0)] = cur_score # only record non-zero values
self.traceback_table[(cur_q_pos, 0)] = (1, 0) # record a query gap in the traceback table
cur_score = max(0, self.blast_table[(cur_q_pos, 0)] + self.gap_extend)
cur_t_pos += 1
# print "blast table 0,0 is", self.blast_table[0, 0], "and high score is", self.high_score
# alright, finished with edges. Note that high scores can NEVER occur in an edge so these were not considered.
# Henceforth, however, we will need to think about this.
cur_t_pos = 0 # Start at the first position
cur_q_pos = 0
# Now we will score the table, proceeding according to the algorithm description: first incrementing along
# the diagonal, then scoring the adjacent row, then the column below
# Unlike <NAME>, the matrix is no longer of defined size, so we need to use while loops instead of for
while True: # I think it's cleaner to affirmatively break out of this main loop. Too bad Python has no do-while
cur_t_pos += 1 # Advance along the diagonal by incrementing
cur_q_pos += 1 # Remember, these refer to coordinates in our table, not in the actual target or query
# Probably we need to do some bounds checking here too with respect to absolute position in the query and
# target similar to what is done in the _fill_in_row_or_column method
# print "Beginning row starting at", cur_q_pos, cur_t_pos, "of the blast table"
max_in_row = self._fill_in_row_or_column(cur_q_pos, cur_t_pos, query_start, target_start,
direction, self.ROW)
# print "Max in row was ", max_in_row
# print "Beginning column starting at", cur_q_pos, cur_t_pos, "of the blast table"
max_in_column = self._fill_in_row_or_column(cur_q_pos, cur_t_pos, query_start,
target_start, direction, self.COLUMN)
# print "Max in column was ", max_in_column
if not max(max_in_row, max_in_column):
break # If the maximum value we encounter in both the rows and columns is zero, we are done building
# print "Finished building a | |
from sympy import (
acos,
acosh,
asinh,
atan,
cos,
Derivative,
diff,
Dummy,
Eq,
Ne,
erfi,
exp,
Function,
I,
Integral,
LambertW,
log,
O,
pi,
Rational,
rootof,
S,
sin,
sqrt,
Subs,
Symbol,
tan,
asin,
sinh,
Piecewise,
symbols,
Poly,
sec,
Ei,
re,
im,
atan2,
collect,
hyper,
simplify,
)
from sympy.solvers.ode import (
classify_ode,
homogeneous_order,
infinitesimals,
checkinfsol,
dsolve,
)
from sympy.solvers.ode.subscheck import checkodesol, checksysodesol
from sympy.solvers.ode.ode import (
_linear_coeff_match,
_ode_factorable_match,
_remove_redundant_solutions,
_undetermined_coefficients_match,
classify_sysode,
constant_renumber,
constantsimp,
get_numbered_constants,
solve_ics,
)
from sympy.functions import airyai, airybi, besselj, bessely
from sympy.solvers.deutils import ode_order
from sympy.testing.pytest import XFAIL, skip, raises, slow, ON_TRAVIS, SKIP
from sympy.utilities.misc import filldedent
C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10 = symbols("C0:11")
u, x, y, z = symbols("u,x:z", real=True)
f = Function("f")
g = Function("g")
h = Function("h")
# Note: the tests below may fail (but still be correct) if ODE solver,
# the integral engine, solve(), or even simplify() changes. Also, in
# differently formatted solutions, the arbitrary constants might not be
# equal. Using specific hints in tests can help to avoid this.
# Tests of order higher than 1 should run the solutions through
# constant_renumber because it will normalize it (constant_renumber causes
# dsolve() to return different results on different machines)
def test_get_numbered_constants():
with raises(ValueError):
get_numbered_constants(None)
def test_dsolve_system():
eqs = [-f(x).diff(x), g(x).diff(x)]
sols = {Eq(f(x), C1), Eq(g(x), C2)}
assert set(dsolve(eqs)) == sols
eqs = [f(x).diff(x, 2), g(x).diff(x)]
with raises(ValueError):
dsolve(eqs) # NotImplementedError would be better
eqs = [f(x).diff(x) - x, f(x).diff(x) + x]
with raises(ValueError):
# Could also be NotImplementedError. f(x)=0 is a solution...
dsolve(eqs)
eqs = [f(x, y).diff(x)]
with raises(ValueError):
dsolve(eqs)
eqs = [f(x, y).diff(x) + g(x).diff(x), g(x).diff(x)]
with raises(ValueError):
dsolve(eqs)
def test_dsolve_all_hint():
eq = f(x).diff(x)
output = dsolve(eq, hint="all")
# Match the Dummy variables:
sol1 = output["separable_Integral"]
_y = sol1.lhs.args[1][0]
sol1 = output["1st_homogeneous_coeff_subs_dep_div_indep_Integral"]
_u1 = sol1.rhs.args[1].args[1][0]
expected = {
"1st_homogeneous_coeff_subs_indep_div_dep_Integral": Eq(f(x), C1),
"separable_Integral": Eq(Integral(1, (_y, f(x))), C1 + Integral(0, x)),
"separable": Eq(f(x), C1),
"lie_group": Eq(f(x), C1),
"nth_linear_constant_coeff_homogeneous": Eq(f(x), C1),
"nth_algebraic_Integral": Eq(f(x), C1),
"1st_power_series": Eq(f(x), C1),
"1st_homogeneous_coeff_subs_indep_div_dep": Eq(f(x), C1),
"1st_linear": Eq(f(x), C1),
"1st_homogeneous_coeff_subs_dep_div_indep": Eq(f(x), C1),
"1st_homogeneous_coeff_subs_dep_div_indep_Integral": Eq(
log(x), C1 + Integral(-1 / _u1, (_u1, f(x) / x))
),
"1st_homogeneous_coeff_best": Eq(f(x), C1),
"nth_linear_euler_eq_homogeneous": Eq(f(x), C1),
"nth_algebraic": Eq(f(x), C1),
"1st_linear_Integral": Eq(f(x), C1 + Integral(0, x)),
"best": Eq(f(x), C1),
"best_hint": "nth_algebraic",
"default": "nth_algebraic",
"order": 1,
}
assert output == expected
assert dsolve(eq, hint="best") == Eq(f(x), C1)
def test_dsolve_ics():
# Maybe this should just use one of the solutions instead of raising...
with raises(NotImplementedError):
dsolve(f(x).diff(x) - sqrt(f(x)), ics={f(1): 1})
@XFAIL
@slow
def test_nonlinear_3eq_order1_type1():
if ON_TRAVIS:
skip("Too slow for travis.")
a, b, c = symbols("a b c")
eqs = [
a * f(x).diff(x) - (b - c) * g(x) * h(x),
b * g(x).diff(x) - (c - a) * h(x) * f(x),
c * h(x).diff(x) - (a - b) * f(x) * g(x),
]
assert dsolve(eqs) # NotImplementedError
def test_dsolve_euler_rootof():
eq = x ** 6 * f(x).diff(x, 6) - x * f(x).diff(x) + f(x)
sol = Eq(
f(x),
C1 * x
+ C2
* x
** rootof(x ** 5 - 14 * x ** 4 + 71 * x ** 3 - 154 * x ** 2 + 120 * x - 1, 0)
+ C3
* x
** rootof(x ** 5 - 14 * x ** 4 + 71 * x ** 3 - 154 * x ** 2 + 120 * x - 1, 1)
+ C4
* x
** rootof(x ** 5 - 14 * x ** 4 + 71 * x ** 3 - 154 * x ** 2 + 120 * x - 1, 2)
+ C5
* x
** rootof(x ** 5 - 14 * x ** 4 + 71 * x ** 3 - 154 * x ** 2 + 120 * x - 1, 3)
+ C6
* x
** rootof(x ** 5 - 14 * x ** 4 + 71 * x ** 3 - 154 * x ** 2 + 120 * x - 1, 4),
)
assert dsolve(eq) == sol
def test_linear_2eq_order1_type2_noninvertible():
# a*d - b*c == 0
eqs = [Eq(diff(f(x), x), f(x) + g(x) + 5), Eq(diff(g(x), x), f(x) + g(x) + 7)]
sol = [
Eq(f(x), C1 * exp(2 * x) + C2 - x - 3),
Eq(g(x), C1 * exp(2 * x) - C2 + x - 3),
]
assert dsolve(eqs) == sol
assert checksysodesol(eqs, sol) == (True, [0, 0])
@XFAIL
def test_linear_2eq_order1_type2_fixme():
# There is a FIXME comment about this in the code that handles this case.
# The answer returned is currently incorrect as reported by checksysodesol
# below...
# a*d - b*c == 0 and a + b*c/a = 0
eqs = [Eq(diff(f(x), x), f(x) + g(x) + 5), Eq(diff(g(x), x), -f(x) - g(x) + 7)]
sol = [
Eq(f(x), C1 + C2 * (x + 1) + 12 * x ** 2 + 5 * x),
Eq(g(x), -C1 - C2 * x - 12 * x ** 2 + 7 * x),
]
assert dsolve(eqs) == sol
assert checksysodesol(eqs, sol) == (True, [0, 0])
def test_linear_2eq_order1_type4():
eqs = [Eq(diff(f(x), x), f(x) + x * g(x)), Eq(diff(g(x), x), -x * f(x) + g(x))]
sol = [
Eq(f(x), (C1 * cos(x ** 2 / 2) + C2 * sin(x ** 2 / 2)) * exp(x)),
Eq(g(x), (-C1 * sin(x ** 2 / 2) + C2 * cos(x ** 2 / 2)) * exp(x)),
]
# FIXME: This should probably be fixed so that this happens in the solver:
dsolve_sol = dsolve(eqs)
dsolve_sol = [s.doit() for s in sol]
assert dsolve_sol == sol
assert checksysodesol(eqs, sol) == (True, [0, 0])
@XFAIL
def test_linear_2eq_order1_type4_broken():
eqs = [Eq(f(x).diff(x), f(x) + x * g(x)), Eq(g(x).diff(x), x * f(x) - g(x))]
# FIXME: This is not the correct solution:
sol = [
Eq(f(x), (C1 * sin(x) + C2 * cos(x)) * exp(x ** 2 / 2)),
Eq(g(x), (C1 * cos(x) - C2 * sin(x)) * exp(x ** 2 / 2)),
]
dsolve_sol = dsolve(eqs)
dsolve_sol = [s.doit() for s in sol]
assert dsolve_sol == sol
assert checksysodesol(eqs, sol) == (True, [0, 0])
def test_linear_2eq_order1_type5():
eqs = [
Eq(diff(f(x), x), x * f(x) + x ** 2 * g(x)),
Eq(diff(g(x), x), 2 * x ** 2 * f(x) + (x + 3 * x ** 2) * g(x)),
]
sol = [
Eq(
f(x),
(
C1 * exp(x ** 3 * (S(3) / 2 + sqrt(17) / 2) / 3)
+ C2 * exp(x ** 3 * (-sqrt(17) / 2 + S(3) / 2) / 3)
)
* exp(x ** 2 / 2),
),
Eq(
g(x),
(
C1
* (S(3) / 2 + sqrt(17) / 2)
* exp(x ** 3 * (S(3) / 2 + sqrt(17) / 2) / 3)
+ C2
* (-sqrt(17) / 2 + S(3) / 2)
* exp(x ** 3 * (-sqrt(17) / 2 + S(3) / 2) / 3)
)
* exp(x ** 2 / 2),
),
]
dsolve_sol = dsolve(eqs)
# FIXME: This should probably be fixed so that this happens in the solver:
dsolve_sol = [s.doit() for s in sol]
assert dsolve_sol == sol
assert checksysodesol(eqs, sol) == (True, [0, 0])
@XFAIL
def test_linear_2eq_order1_type6_path1():
eqs = [
Eq(diff(f(x), x), f(x) + x * g(x)),
Eq(diff(g(x), x), 2 * (1 + 2 / x) * f(x) + 2 * (x - 1 / x) * g(x)),
]
# This solution is currently returned but is incorrect:
sol = [
Eq(
f(x),
(
C1
+ Integral(
C2
* x
* exp(-2 * Integral(1 / x, x))
* exp(Integral(-2 * x - 1, x)),
x,
)
)
* exp(-Integral(-2 * x - 1, x)),
),
Eq(
g(x),
C1 * exp(-2 * Integral(1 / x, x))
+ 2
* (
C1
+ Integral(
C2
* x
* exp(-2 * Integral(1 / x, x))
* exp(Integral(-2 * x - 1, x)),
x,
)
)
* exp(-Integral(-2 * x - 1, x)),
),
]
dsolve_sol = dsolve(eqs)
# Comparing solutions with == doesn't work in this case...
assert [ds.lhs | |
from enum import Enum, auto
from operator import attrgetter
import networkx as nx
import pendulum
import uuid
from django.apps import apps
from django.core.exceptions import FieldDoesNotExist
from django.db.models import DateTimeField
from share.exceptions import ShareException
from share.util import TopologicalSorter
class MutableGraphError(ShareException):
pass
class PrivateNodeAttrs(Enum):
TYPE = auto()
MODEL = auto()
CONCRETE_TYPE = auto()
CONCRETE_MODEL = auto()
class EdgeAttrs(Enum):
FROM_NAME = auto()
TO_NAME = auto()
# TODO get SHARE schema in a non-model form
def resolve_model(type):
return apps.get_model('share', type)
def resolve_field(model, key):
# TODO make this util more general; don't use Django stuff
try:
return model._meta.get_field(key)
except FieldDoesNotExist:
return None
class MutableGraph(nx.DiGraph):
"""NetworkX DiGraph with some SHARE-specific features.
Nodes in the DiGraph are string IDs. Uses MutableNode as a convenience interface to access/manipulate nodes.
Provides the abstraction of named edges:
* Each named edge has two names: `from_name` and `to_name`
* the "from" node knows the edge by its `from_name`
* the "to" node knows the edge by its `to_name`
* correspond to a foreign key and its related field
* All outgoing edges from a node must be unique on `from_name`
Example: Find all URIs identifying a work
```
work = graph.get_node(work_id)
uris = [identifier['uri'] for identifier in work['identifiers']]
```
Example: Remove all orphan nodes (no incoming or outgoing edges)
```
orphans = graph.filter_nodes(lambda n: not graph.degree(n))
for orphan in orphans:
graph.remove_node(orphan.id)
```
"""
@classmethod
def from_jsonld(cls, nodes):
"""Create a mutable graph from a list of JSON-LD-style dicts.
"""
if isinstance(nodes, dict):
nodes = nodes['@graph']
graph = cls()
for n in nodes:
id, type = None, None
attrs = {}
for k, v in n.items():
if k == '@id':
id = v
elif k == '@type':
type = v
elif isinstance(v, dict) and k != 'extra':
graph.add_node(v['@id'], v['@type'])
attrs[k] = v['@id']
elif isinstance(v, list):
pass # Don't bother with incoming edges, let the other node point here
else:
attrs[k] = v
if not id or not type:
raise MutableGraphError('Nodes must have id and type')
graph.add_node(id, type, attrs)
return graph
def __init__(self):
super().__init__()
self.changed = False
def to_jsonld(self, in_edges=True):
"""Return a list of JSON-LD-style dicts.
in_edges (boolean): Include lists of incoming edges. Default True.
"""
return [
node.to_jsonld(in_edges=in_edges)
for node in self.topologically_sorted()
]
def add_node(self, id, type, attrs=None):
"""Create a node in the graph.
id (hashable): Unique node ID. If None, generate a random ID.
type (str): Name of the node's model
keyword args: Named attributes or relations corresponding to fields on the node's model
Returns a MutableNode wrapper for the new node.
"""
if type is None:
raise MutableGraphError('Must provide `type` to MutableGraph.add_node')
self.changed = True
if id is None:
id = '_:{}'.format(uuid.uuid4())
super().add_node(id)
return MutableNode(self, id, type, attrs)
def get_node(self, id):
"""Get a node by ID.
id (hashable): Unique node ID
Returns a MutableNode wrapper for the node, or None.
"""
if id in self:
return MutableNode(self, id)
return None
def remove_node(self, id, cascade=True):
"""Remove a node and its incoming/outgoing edges.
id (hashable): Unique node ID
cascade (boolean): Also remove nodes with edges which point to this node. Default True.
"""
self.changed = True
to_remove = list(self.predecessors(id)) if cascade else []
super().remove_node(id)
for from_id in to_remove:
self.remove_node(from_id, cascade)
def filter_nodes(self, filter):
"""Filter the nodes in the graph.
filter (callable): When called with a MutableNode argument, return something truthy to
include it in the filtered list, or something falsy to omit it.
Returns list of MutableNodes.
"""
# TODO figure out common sorts of filters, make kwargs for them and optimize
return [node for node in self if filter(node)]
def filter_type(self, type_name):
# TODO make a sort of index dict, mapping type to nodes
return self.filter_nodes(lambda n: n.type == type_name.lower())
def filter_by_concrete_model(self, model):
# TODO make a sort of index dict, mapping model to nodes
return self.filter_nodes(lambda n: n.concrete_model == model)
def add_named_edge(self, from_id, to_id, from_name, to_name):
"""Add a named edge.
from_id (hashable): Unique ID for the node this edge comes from
to_id (hashable): Unique ID for the node this edge points to
from_name (str): Name of the edge on its 'from' node (must be unique on the node)
to_name (str): Name of the edge on its 'to' node
"""
if any(data.get(EdgeAttrs.FROM_NAME) == from_name
for _, _, data in self.out_edges(from_id, data=True)):
raise MutableGraphError('Out-edge names must be unique on the node')
self.changed = True
self.add_edge(from_id, to_id)
self.edges[from_id, to_id][EdgeAttrs.FROM_NAME] = from_name
self.edges[from_id, to_id][EdgeAttrs.TO_NAME] = to_name
def remove_named_edge(self, from_id, from_name):
"""Remove a named edge.
from_id (hashable): Unique ID for the node this edge comes from
from_name (str): Name of the edge on its 'from' node
"""
self.changed = True
try:
to_id = next(
to_id for _, to_id, data
in self.out_edges(from_id, data=True)
if data.get(EdgeAttrs.FROM_NAME) == from_name
)
self.remove_edge(from_id, to_id)
except StopIteration:
pass
def resolve_named_out_edge(self, from_id, from_name):
"""Get the node a named edge points to.
from_id (hashable): Unique ID for the node this edge comes from
from_name (str): Name of the edge on its 'from' node
Returns a MutableNode wrapper for the node the edge points to.
"""
try:
return next(
MutableNode(self, to_id) for _, to_id, data
in self.out_edges(from_id, data=True)
if data.get(EdgeAttrs.FROM_NAME) == from_name
)
except StopIteration:
return None
def resolve_named_in_edges(self, to_id, to_name):
"""Get all nodes which point to a node with the same named edges.
to_id (hashable): Unique ID for the node these edges point to
to_name (str): Name of the edges on their 'to' node
Returns list of MutableNode wrappers for the nodes these edges come from.
"""
return [
MutableNode(self, from_id) for from_id, _, data
in self.in_edges(to_id, data=True)
if data.get(EdgeAttrs.TO_NAME) == to_name
]
def named_out_edges(self, from_id):
"""Get all outgoing named edges from a node.
from_id (hashable): Unique node ID
Returns dict with:
keys: `from_name` of each outgoing edge
values: MutableNode wrapper for the node each edge points to
"""
return {
data[EdgeAttrs.FROM_NAME]: MutableNode(self, to_id) for _, to_id, data
in self.out_edges(from_id, data=True)
if data.get(EdgeAttrs.FROM_NAME) is not None
}
def named_in_edges(self, to_id):
"""Get all incoming named edges to a node.
to_id (hashable): Unique node ID
Returns dict of edges with:
keys: `to_name` of each incoming edge
values: list of MutableNode wrappers for the nodes each edge comes from
"""
in_edges = {}
for from_id, _, data in self.in_edges(to_id, data=True):
to_name = data.get(EdgeAttrs.TO_NAME)
if to_name is not None:
in_edges.setdefault(to_name, []).append(MutableNode(self, from_id))
return in_edges
def merge_nodes(self, from_node, into_node):
"""Merge a nodes attrs and edges into another node.
"""
if from_node.concrete_model is not into_node.concrete_model:
raise MutableGraphError('Cannot merge nodes of different types')
self.changed = True
# Merged node will have the more specific typ
if len(from_node.model.__mro__) >= len(into_node.model.__mro__):
from_node, into_node = into_node, from_node
self._merge_node_attrs(from_node, into_node)
self._merge_in_edges(from_node, into_node)
self._merge_out_edges(from_node, into_node)
from_node.delete(cascade=False)
def topologically_sorted(self):
return TopologicalSorter(
sorted(self, key=attrgetter('id')),
dependencies=lambda n: sorted(self.successors(n.id)),
key=attrgetter('id'),
).sorted()
def __iter__(self):
return (MutableNode(self, id) for id in super().__iter__())
def __contains__(self, n):
if isinstance(n, MutableNode):
n = n.id
return super().__contains__(n)
def __bool__(self):
return bool(len(self))
def _merge_node_attrs(self, from_node, into_node):
into_attrs = into_node.attrs()
for k, new_val in from_node.attrs().items():
if k in into_attrs:
old_val = into_attrs[k]
if new_val == old_val:
continue
field = resolve_field(into_node.model, k)
if isinstance(field, DateTimeField):
new_val = max(pendulum.parse(new_val), pendulum.parse(old_val)).isoformat()
else:
new_val = self._merge_value(new_val, old_val)
into_node[k] = new_val
def _merge_value(self, value_a, value_b):
# use the longer value, or the first alphabetically if they're the same length
return sorted([value_a, value_b], key=lambda x: (-len(str(x)), x))[0]
def _merge_in_edges(self, from_node, into_node):
for in_edge_name, source_nodes in self.named_in_edges(from_node.id).items():
source_field = resolve_field(from_node.model, in_edge_name).remote_field
for source_node in source_nodes:
source_node[source_field.name] = into_node
def _merge_out_edges(self, from_node, into_node):
into_edges = self.named_out_edges(into_node.id)
for edge_name, from_target in self.named_out_edges(from_node.id).items():
into_target = into_edges.get(edge_name)
if from_target != into_target:
self.merge_nodes(from_target, into_target)
class MutableNode:
"""Convenience wrapper around a node in a MutableGraph.
"""
def __new__(cls, graph, id, *args, **kwargs):
if id not in graph:
return graph.add_node(id, *args, **kwargs)
return super().__new__(cls)
def __init__(self, graph, id, type=None, attrs=None):
self.__graph = graph
self.__id = id
self.__attrs = graph.nodes[id]
if type:
self.type = type
if attrs:
self.update(attrs)
@property
def id(self):
return self.__id
@property
def graph(self):
return self.__graph
@property
def type(self):
return self.__attrs[PrivateNodeAttrs.TYPE]
@type.setter
def type(self, value):
self.graph.changed = True
model = resolve_model(value)
self.__attrs.update({
PrivateNodeAttrs.TYPE: model._meta.model_name,
PrivateNodeAttrs.MODEL: model,
PrivateNodeAttrs.CONCRETE_TYPE: model._meta.concrete_model._meta.model_name,
| |
order can be arbitrarily high, but some RBFs,
such as Wendland and Matern, become numerically unstable when
the derivative order exceeds 2.
'''
x = np.asarray(x, dtype=float)
assert_shape(x, (None, None), 'x')
c = np.asarray(c, dtype=float)
assert_shape(c, (None, x.shape[1]), 'c')
# makes `eps` an array of constant values if it is a scalar
if np.isscalar(eps):
eps = np.full(c.shape[0], eps, dtype=float)
else:
eps = np.asarray(eps, dtype=float)
assert_shape(eps, (c.shape[0],), 'eps')
# if `diff` is not given then take no derivatives
if diff is None:
diff = (0,)*x.shape[1]
else:
# make sure diff is immutable
diff = tuple(diff)
assert_shape(diff, (x.shape[1],), 'diff')
# add numerical function to cache if not already
if diff not in self._cache:
self._add_diff_to_cache(diff)
# expand to allow for broadcasting
x = x.T[:, :, None]
c = c.T[:, None, :]
args = (tuple(x) + tuple(c) + (eps,))
# evaluate the cached function for the given `x`, `c`, and `eps
out = self._cache[diff](*args)
return out
def __repr__(self):
out = '<RBF : %s>' % str(self.expr)
return out
def _add_diff_to_cache(self, diff):
'''
Symbolically differentiates the RBF and then converts the
expression to a function which can be evaluated numerically.
'''
logger.debug('Creating a numerical function for the RBF %s with '
'the derivative %s ...' % (self,str(diff)))
dim = len(diff)
c_sym = sympy.symbols('c:%s' % dim)
x_sym = sympy.symbols('x:%s' % dim)
r_sym = sympy.sqrt(sum((xi-ci)**2 for xi, ci in zip(x_sym, c_sym)))
# substitute 'r' in the RBF expression with the cartesian spatial
# variables and differentiate the RBF with respect to them
expr = self.expr.subs(_R, r_sym)
for xi, order in zip(x_sym, diff):
if order == 0:
continue
expr = expr.diff(*(xi,)*order)
# if `tol` is given, form a separate expression for the RBF near
# its center
if self.tol is not None:
if diff in self.limits:
# use a user-specified limit if available
lim = self.limits[diff]
else:
logger.debug('Approximating the value at the RBF center ...')
# replace any numbers in `tol` with high precision floats
mapping = {n : sympy.Float(n, 50)
for n in self.tol.atoms(sympy.Number)}
tol = self.tol.xreplace(mapping)
# evaluate the RBF at the point (x0=tol+c0, x1=c1, x2=c2, ...)
subs_list = [(x_sym[0], tol + c_sym[0])]
subs_list += zip(x_sym[1:], c_sym[1:])
# evaluate the RBF and its derivative w.r.t. x0 at that point
a = expr.subs(subs_list)
b = expr.diff(x_sym[0]).subs(subs_list)
# form a linear polynomial and evaluate it at x=c
lim = a - tol*b
# try to simplify the expression to reduce numerical rounding
# error. Note that this should only be a function of `eps` now
# and the simplification should not take long
lim = sympy.cancel(lim)
# return any remaining numbers to regular precision floats
mapping = {n : float(n) for n in lim.atoms(sympy.Number)}
lim = sympy.sympify(lim.xreplace(mapping))
logger.debug('Approximate value at the RBF center: %s' % lim)
# create a piecewise symbolic function which is `lim` when
# `r_sym < tol` and `expr` otherwise
expr = sympy.Piecewise((lim, r_sym < self.tol), (expr, True))
if _SYMBOLIC_TO_NUMERIC_METHOD == 'ufuncify':
func = ufuncify(x_sym + c_sym + (_EPS,), expr, backend='numpy')
elif _SYMBOLIC_TO_NUMERIC_METHOD == 'lambdify':
func = lambdify(x_sym + c_sym + (_EPS,),
expr,
modules=['numpy'])
else:
raise ValueError()
self._cache[diff] = func
logger.debug('The numeric function has been created and cached')
def clear_cache(self):
'''
Clears the cache of numeric functions. Makes a cache dictionary
if it does not already exist
'''
self._cache = {}
def __getstate__(self):
# This method is needed for RBF instances to be picklable. The
# cached numerical functions are not picklable and so we need to
# remove them from the state dictionary.
# make a shallow copy of the instances __dict__ so that we do not
# mess with it
state = dict(self.__dict__)
state['_cache'] = {}
return state
class SparseRBF(RBF):
'''
Stores a symbolic expression of a compact Radial Basis Function
(RBF) and evaluates the expression numerically when called. Calling
a `SparseRBF` instance will return a csc sparse matrix.
Parameters
----------
expr : sympy expression
Sympy expression for the RBF. This must be a function of the
symbolic variable `r`, which can be obtained by calling `get_r()`
or `sympy.symbols('r')`. `r` is the radial distance to the RBF
center. The expression may optionally be a function of `eps`,
which is a shape parameter obtained by calling `get_eps()` or
`sympy.symbols('eps')`. If `eps` is not provided then `r` is
substituted with `r*eps`.
support : float or sympy expression
Indicates the support of the RBF. The RBF is set to zero for
radial distances greater than `support`, regardless of what `expr`
evaluates to. This can be a float or a sympy expression containing
`eps`.
tol : float or sympy expression, optional
This is for when an RBF or its derivatives contain a removable
singularity at the center. If `tol` is specified, then a numerical
estimate of the RBF value at its center will be made, using linear
extrapolation, and that estimate will be returned for all
evaluation points, `x`, that are within `tol` of the RBF center,
`c`. If the limit of the RBF at `x = c` is known, then it can be
manually specified with the `limits` arguments. `tol` can be a
float or a sympy expression containing `eps`.
limits : dict, optional
Contains the values of the RBF or its derivatives at the center.
For example, `{(0, 1):2*eps}` indicates that the derivative with
respect to the second spatial dimension is `2*eps` at `x = c`. If
this dictionary is provided and `tol` is not `None`, then it will
be searched before estimating the limit with the method describe
above.
'''
@property
def supp(self):
return self._supp
def __init__(self, expr, supp, **kwargs):
RBF.__init__(self, expr, **kwargs)
## SANITIZE `SUPP`
# make sure `supp` is a scalar or a sympy expression of `eps`
supp = sympy.sympify(supp)
other_symbols = supp.free_symbols.difference({_EPS})
if len(other_symbols) != 0:
raise ValueError(
'`supp` cannot contain any symbols other than `eps`')
self._supp = supp
def __call__(self, x, c, eps=1.0, diff=None):
'''
Numerically evaluates the RBF or its derivatives.
Parameters
----------
x : (N, D) float array
Evaluation points
c : (M, D) float array
RBF centers
eps : float, optional
Shape parameter
diff : (D,) int array, optional
Specifies the derivative order for each Cartesian direction. For
example, if there are three spatial dimensions then providing
(2, 0, 1) would cause this function to return the RBF after
differentiating it twice along the first axis and once along the
third axis.
Returns
-------
out : (N, M) csc sparse matrix
The RBFs with centers `c` evaluated at `x`
'''
x = np.asarray(x, dtype=float)
assert_shape(x, (None, None), 'x')
c = np.asarray(c, dtype=float)
assert_shape(c, (None, x.shape[1]), 'c')
if not np.isscalar(eps):
raise NotImplementedError(
'`eps` must be a scalar for `SparseRBF` instances')
# convert scalar to (1,) array
eps = np.array([eps], dtype=float)
if diff is None:
diff = (0,)*x.shape[1]
else:
# make sure diff is immutable
diff = tuple(diff)
assert_shape(diff, (x.shape[1],), 'diff')
# add numerical function to cache if not already
if diff not in self._cache:
self._add_diff_to_cache(diff)
# convert self.supp from a sympy expression to a float
supp = float(self.supp.subs(_EPS, eps[0]))
# find the nonzero entries based on distances between `x` and `c`
nx, nc = x.shape[0], c.shape[0]
xtree = cKDTree(x)
ctree = cKDTree(c)
# `idx` contains the indices of `x` which are within
# `supp` of each node in `c`
idx = ctree.query_ball_tree(xtree, supp)
# total nonzero entries in the output array
nnz = sum(len(i) for i in idx)
# allocate sparse matrix data
data = np.zeros(nnz, dtype=float)
rows = np.zeros(nnz, dtype=int)
cols = np.zeros(nnz, dtype=int)
# `n` is the total number of data entries thus far
n = 0
for i, idxi in enumerate(idx):
# `m` is the | |
<reponame>WONDER-project/GSAS-II-WONDER-OSX
# -*- coding: utf-8 -*-
#GSASII - phase data display routines
########### SVN repository information ###################
# $Date: 2019-08-07 11:35:30 -0500 (Wed, 07 Aug 2019) $
# $Author: vondreele $
# $Revision: 4080 $
# $URL: https://subversion.xray.aps.anl.gov/pyGSAS/trunk/GSASIIddataGUI.py $
# $Id: GSASIIddataGUI.py 4080 2019-08-07 16:35:30Z vondreele $
########### SVN repository information ###################
'''
*GSASIIddataGUI: Phase Diffraction Data GUI*
--------------------------------------------
Module to create the GUI for display of diffraction data * phase
information that is shown in the data display window
(when a phase is selected.)
'''
from __future__ import division, print_function
import wx
import GSASIIpath
GSASIIpath.SetVersionNumber("$Revision: 4080 $")
import GSASIIlattice as G2lat
import GSASIIspc as G2spc
import GSASIIplot as G2plt
import GSASIIpwd as G2pwd
import GSASIIphsGUI as G2phsGUI
import GSASIIctrlGUI as G2G
import numpy as np
import numpy.linalg as nl
WACV = wx.ALIGN_CENTER_VERTICAL
VERY_LIGHT_GREY = wx.Colour(235,235,235)
WHITE = wx.Colour(255,255,255)
BLACK = wx.Colour(0,0,0)
mapDefault = {'MapType':'','RefList':'','GridStep':0.25,'Show bonds':True,
'rho':[],'rhoMax':0.,'mapSize':10.0,'cutOff':50.,'Flip':False}
################################################################################
##### DData routines
################################################################################
def UpdateDData(G2frame,DData,data,hist='',Scroll=0):
'''Display the Diffraction Data associated with a phase
(items where there is a value for each histogram and phase)
:param wx.frame G2frame: the main GSAS-II frame object
:param wx.ScrolledWindow DData: notebook page to be used for the display
:param dict data: all the information on the phase in a dictionary
:param str hist: histogram name
:param int Scroll: previous scroll position
'''
def PlotSizer():
def OnPlotSel(event):
Obj = event.GetEventObject()
generalData['Data plot type'] = Obj.GetStringSelection()
G2plt.PlotSizeStrainPO(G2frame,data,G2frame.hist)
wx.CallLater(100,UpdateDData,G2frame,DData,data,G2frame.hist)
def OnPOhkl(event):
event.Skip()
Obj = event.GetEventObject()
Saxis = Obj.GetValue().split()
try:
hkl = [int(Saxis[i]) for i in range(3)]
except (ValueError,IndexError):
hkl = generalData['POhkl']
if not np.any(np.array(hkl)):
hkl = generalData['POhkl']
generalData['POhkl'] = hkl
h,k,l = hkl
Obj.SetValue('%3d %3d %3d'%(h,k,l))
G2plt.PlotSizeStrainPO(G2frame,data,G2frame.hist)
def OnProj(event):
Obj = event.GetEventObject()
generalData['3Dproj'] = Obj.GetValue()
G2plt.PlotSizeStrainPO(G2frame,data,G2frame.hist)
plotSizer = wx.BoxSizer(wx.VERTICAL)
choice = ['None','Mustrain','Size','Preferred orientation','St. proj. Inv. pole figure','Eq. area Inv. pole figure']
plotSel = wx.RadioBox(DData,wx.ID_ANY,'Select plot type:',choices=choice,
majorDimension=1,style=wx.RA_SPECIFY_COLS)
plotSel.SetStringSelection(generalData['Data plot type'])
plotSel.Bind(wx.EVT_RADIOBOX,OnPlotSel)
plotSizer.Add(plotSel)
if generalData['Data plot type'] == 'Preferred orientation':
POhklSizer = wx.BoxSizer(wx.HORIZONTAL)
POhklSizer.Add(wx.StaticText(DData,wx.ID_ANY,' Plot preferred orientation for H K L: '),0,WACV)
h,k,l = generalData['POhkl']
poAxis = wx.TextCtrl(DData,wx.ID_ANY,'%3d %3d %3d'%(h,k,l),style=wx.TE_PROCESS_ENTER)
poAxis.Bind(wx.EVT_TEXT_ENTER,OnPOhkl)
poAxis.Bind(wx.EVT_KILL_FOCUS,OnPOhkl)
POhklSizer.Add(poAxis,0,WACV)
plotSizer.Add(POhklSizer)
elif generalData['Data plot type'] in ['Mustrain','Size']:
projSizer = wx.BoxSizer(wx.HORIZONTAL)
projSizer.Add(wx.StaticText(DData,wx.ID_ANY,' Show projections for: '),0,WACV)
proj = ['','x','y','z','xy','xz','yz','xyz']
projType = wx.ComboBox(DData,wx.ID_ANY,value=generalData['3Dproj'],choices=proj,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
projType.Bind(wx.EVT_COMBOBOX, OnProj)
projSizer.Add(projType,0,WACV)
plotSizer.Add(projSizer)
return plotSizer
def ScaleSizer():
def OnScaleRef(event):
Obj = event.GetEventObject()
UseList[G2frame.hist]['Scale'][1] = Obj.GetValue()
def onChangeFraction(invalid,value,tc):
wx.CallLater(100,RepaintHistogramInfo,DData.GetScrollPos(wx.VERTICAL))
scaleSizer = wx.BoxSizer(wx.HORIZONTAL)
if 'PWDR' in G2frame.hist:
scaleRef = wx.CheckBox(DData,wx.ID_ANY,label=' Phase fraction: ')
elif 'HKLF' in G2frame.hist:
scaleRef = wx.CheckBox(DData,wx.ID_ANY,label=' Scale factor: ')
scaleRef.SetValue(UseList[G2frame.hist]['Scale'][1])
scaleRef.Bind(wx.EVT_CHECKBOX, OnScaleRef)
scaleSizer.Add(scaleRef,0,WACV|wx.LEFT,5)
scaleVal = G2G.ValidatedTxtCtrl(DData,UseList[G2frame.hist]['Scale'],0,
min=0.,nDig=(10,4),typeHint=float,OnLeave=onChangeFraction)
scaleSizer.Add(scaleVal,0,WACV)
if 'PWDR' in G2frame.hist and generalData['Type'] != 'magnetic':
wtSum = G2pwd.PhaseWtSum(G2frame,G2frame.hist)
if wtSum and UseList[G2frame.hist]['Use']:
weightFr = UseList[G2frame.hist]['Scale'][0]*generalData['Mass']/wtSum
scaleSizer.Add(wx.StaticText(DData,label=' Wt. fraction: %.3f'%(weightFr)),0,WACV)
return scaleSizer
def OnLGmixRef(event):
Obj = event.GetEventObject()
hist,name = Indx[Obj.GetId()]
UseList[G2frame.hist][name][2][2] = Obj.GetValue()
def OnLGmixVal(event):
event.Skip()
Obj = event.GetEventObject()
hist,name = Indx[Obj.GetId()]
try:
value = float(Obj.GetValue())
UseList[G2frame.hist][name][1][2] = value
# if 0 <= value <= 1:
# UseList[G2frame.hist][name][1][2] = value
# else:
# raise ValueError
except ValueError:
pass
Obj.SetValue("%.4f"%(UseList[G2frame.hist][name][1][2])) #reset in case of error
def OnSizeType(event):
Obj = event.GetEventObject()
UseList[G2frame.hist]['Size'][0] = Obj.GetValue()
G2plt.PlotSizeStrainPO(G2frame,data,G2frame.hist)
wx.CallLater(100,RepaintHistogramInfo,DData.GetScrollPos(wx.VERTICAL))
def OnSizeRef(event):
Obj = event.GetEventObject()
hist,pid = Indx[Obj.GetId()]
if UseList[G2frame.hist]['Size'][0] == 'ellipsoidal':
UseList[G2frame.hist]['Size'][5][pid] = Obj.GetValue()
else:
UseList[G2frame.hist]['Size'][2][pid] = Obj.GetValue()
def OnSizeVal(event):
event.Skip()
Obj = event.GetEventObject()
hist,pid = Indx[Obj.GetId()]
if UseList[G2frame.hist]['Size'][0] == 'ellipsoidal':
try:
size = float(Obj.GetValue())
if pid < 3 and size < 0.001: #10A lower limit!
raise ValueError
UseList[G2frame.hist]['Size'][4][pid] = size
except ValueError:
pass
Obj.SetValue("%.5f"%(UseList[G2frame.hist]['Size'][4][pid])) #reset in case of error
wx.CallAfter(UpdateDData,G2frame,DData,data,G2frame.hist)
else:
try:
size = float(Obj.GetValue())
if size < 0.001: #10A lower limit!
raise ValueError
UseList[G2frame.hist]['Size'][1][pid] = size
except ValueError:
pass
Obj.SetValue("%.5f"%(UseList[G2frame.hist]['Size'][1][pid])) #reset in case of error
wx.CallAfter(G2plt.PlotSizeStrainPO,G2frame,data,hist)
def OnStrainType(event):
Obj = event.GetEventObject()
UseList[G2frame.hist]['Mustrain'][0] = Obj.GetValue()
G2plt.PlotSizeStrainPO(G2frame,data,G2frame.hist)
wx.CallLater(100,RepaintHistogramInfo,DData.GetScrollPos(wx.VERTICAL))
def OnStrainRef(event):
Obj = event.GetEventObject()
hist,pid = Indx[Obj.GetId()]
if UseList[G2frame.hist]['Mustrain'][0] == 'generalized':
UseList[G2frame.hist]['Mustrain'][5][pid] = Obj.GetValue()
else:
UseList[G2frame.hist]['Mustrain'][2][pid] = Obj.GetValue()
def OnStrainVal(event):
event.Skip()
Snames = G2spc.MustrainNames(SGData)
Obj = event.GetEventObject()
hist,pid = Indx[Obj.GetId()]
try:
strain = float(Obj.GetValue())
if UseList[G2frame.hist]['Mustrain'][0] == 'generalized':
if '4' in Snames[pid] and strain < 0:
raise ValueError
UseList[G2frame.hist]['Mustrain'][4][pid] = strain
else:
if strain <= 0:
raise ValueError
UseList[G2frame.hist]['Mustrain'][1][pid] = strain
except ValueError:
pass
if UseList[G2frame.hist]['Mustrain'][0] == 'generalized':
Obj.SetValue("%.1f"%(UseList[G2frame.hist]['Mustrain'][4][pid])) #reset in case of error
wx.CallLater(100,RepaintHistogramInfo,DData.GetScrollPos(wx.VERTICAL))
else:
Obj.SetValue("%.1f"%(UseList[G2frame.hist]['Mustrain'][1][pid])) #reset in case of error
wx.CallAfter(G2plt.PlotSizeStrainPO,G2frame,data,hist)
def OnStrainAxis(event):
event.Skip()
Obj = event.GetEventObject()
Saxis = Obj.GetValue().split()
try:
hkl = [int(Saxis[i]) for i in range(3)]
except (ValueError,IndexError):
hkl = UseList[G2frame.hist]['Mustrain'][3]
if not np.any(np.array(hkl)):
hkl = UseList[G2frame.hist]['Mustrain'][3]
UseList[G2frame.hist]['Mustrain'][3] = hkl
h,k,l = hkl
Obj.SetValue('%3d %3d %3d'%(h,k,l))
G2plt.PlotSizeStrainPO(G2frame,data,G2frame.hist)
def OnResetStrain(event):
Obj = event.GetEventObject()
Obj.SetValue(False)
item,name = Indx[Obj.GetId()]
if name == 'isotropic':
UseList[item]['Mustrain'][1][0] = 1000.0
elif name == 'uniaxial':
UseList[item]['Mustrain'][1][0] = 1000.0
UseList[item]['Mustrain'][1][1] = 1000.0
elif name == 'generalized':
muiso = 1000.
cell = generalData['Cell'][1:7]
vals = G2spc.Muiso2Shkl(muiso,SGData,cell)
nTerm = len(UseList[item]['Mustrain'][4])
for i in range(nTerm):
UseList[item]['Mustrain'][4][i] = vals[i]
G2plt.PlotSizeStrainPO(G2frame,data,item)
wx.CallLater(100,RepaintHistogramInfo,DData.GetScrollPos(wx.VERTICAL))
def OnHstrainRef(event):
Obj = event.GetEventObject()
hist,pid = Indx[Obj.GetId()]
UseList[G2frame.hist]['HStrain'][1][pid] = Obj.GetValue()
def OnHstrainVal(event):
event.Skip()
Obj = event.GetEventObject()
hist,pid = Indx[Obj.GetId()]
try:
strain = float(Obj.GetValue())
UseList[G2frame.hist]['HStrain'][0][pid] = strain
except ValueError:
pass
Obj.SetValue("%.3g"%(UseList[G2frame.hist]['HStrain'][0][pid])) #reset in case of error
def OnPOAxis(event):
event.Skip()
Obj = event.GetEventObject()
Saxis = Obj.GetValue().split()
try:
hkl = [int(Saxis[i]) for i in range(3)]
except (ValueError,IndexError):
hkl = UseList[G2frame.hist]['Pref.Ori.'][3]
if not np.any(np.array(hkl)):
hkl = UseList[G2frame.hist]['Pref.Ori.'][3]
UseList[G2frame.hist]['Pref.Ori.'][3] = hkl
h,k,l = hkl
Obj.SetValue('%3d %3d %3d'%(h,k,l))
def OnPOOrder(event):
Obj = event.GetEventObject()
Order = int(Obj.GetValue())
UseList[G2frame.hist]['Pref.Ori.'][4] = Order
UseList[G2frame.hist]['Pref.Ori.'][5] = SetPOCoef(Order,G2frame.hist)
wx.CallLater(100,RepaintHistogramInfo,DData.GetScrollPos(wx.VERTICAL))
def OnPOType(event):
Obj = event.GetEventObject()
if 'March' in Obj.GetValue():
UseList[G2frame.hist]['Pref.Ori.'][0] = 'MD'
else:
UseList[G2frame.hist]['Pref.Ori.'][0] = 'SH'
wx.CallLater(100,RepaintHistogramInfo,DData.GetScrollPos(wx.VERTICAL))
def OnPORef(event):
Obj = event.GetEventObject()
UseList[G2frame.hist]['Pref.Ori.'][2] = Obj.GetValue()
def SetPOCoef(Order,hist):
cofNames = G2lat.GenSHCoeff(SGData['SGLaue'],'0',Order,False) #cylindrical & no M
newPOCoef = dict(zip(cofNames,np.zeros(len(cofNames))))
POCoeff = UseList[G2frame.hist]['Pref.Ori.'][5]
for cofName in POCoeff:
if cofName in cofNames:
newPOCoef[cofName] = POCoeff[cofName]
return newPOCoef
def checkAxis(axis):
if not np.any(np.array(axis)):
return False
return axis
def TopSizer(name,choices,parm,OnType):
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(wx.StaticText(DData,wx.ID_ANY,name),0,WACV)
sizeType = wx.ComboBox(DData,wx.ID_ANY,value=UseList[G2frame.hist][parm][0],choices=choices,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
sizeType.Bind(wx.EVT_COMBOBOX, OnType)
topSizer.Add(sizeType,0,WACV|wx.BOTTOM,5)
return topSizer
def LGmixSizer(name,OnVal,OnRef):
lgmixSizer = wx.BoxSizer(wx.HORIZONTAL)
lgmixRef = wx.CheckBox(DData,wx.ID_ANY,label='LGmix')
lgmixRef.thisown = False
lgmixRef.SetValue(UseList[G2frame.hist][name][2][2])
Indx[lgmixRef.GetId()] = [G2frame.hist,name]
lgmixRef.Bind(wx.EVT_CHECKBOX, OnRef)
lgmixSizer.Add(lgmixRef,0,WACV|wx.LEFT,5)
# azmthOff = G2G.ValidatedTxtCtrl(G2frame.dataDisplay,data,'azmthOff',nDig=(10,2),typeHint=float,OnLeave=OnAzmthOff)
lgmixVal = wx.TextCtrl(DData,wx.ID_ANY,
'%.4f'%(UseList[G2frame.hist][name][1][2]),style=wx.TE_PROCESS_ENTER)
Indx[lgmixVal.GetId()] = [G2frame.hist,name]
lgmixVal.Bind(wx.EVT_TEXT_ENTER,OnVal)
lgmixVal.Bind(wx.EVT_KILL_FOCUS,OnVal)
lgmixSizer.Add(lgmixVal,0,WACV|wx.LEFT,5)
return lgmixSizer
def ResetSizer(name,OnReset):
resetSizer = wx.BoxSizer(wx.HORIZONTAL)
reset = wx.CheckBox(DData,wx.ID_ANY,label='Reset?')
reset.thisown = False
reset.SetValue(False)
Indx[reset.GetId()] = [G2frame.hist,name]
reset.Bind(wx.EVT_CHECKBOX,OnReset)
resetSizer.Add(reset,0,WACV|wx.TOP|wx.LEFT,5)
return resetSizer
def IsoSizer(name,parm,fmt,OnVal,OnRef):
isoSizer = wx.BoxSizer(wx.HORIZONTAL)
sizeRef = wx.CheckBox(DData,wx.ID_ANY,label=name)
sizeRef.thisown = False
sizeRef.SetValue(UseList[G2frame.hist][parm][2][0])
Indx[sizeRef.GetId()] = [G2frame.hist,0]
sizeRef.Bind(wx.EVT_CHECKBOX, OnRef)
isoSizer.Add(sizeRef,0,WACV|wx.LEFT,5)
# azmthOff = G2G.ValidatedTxtCtrl(G2frame.dataDisplay,data,'azmthOff',nDig=(10,2),typeHint=float,OnLeave=OnAzmthOff)
sizeVal = wx.TextCtrl(DData,wx.ID_ANY,
fmt%(UseList[G2frame.hist][parm][1][0]),style=wx.TE_PROCESS_ENTER)
Indx[sizeVal.GetId()] = [G2frame.hist,0]
sizeVal.Bind(wx.EVT_TEXT_ENTER,OnVal)
sizeVal.Bind(wx.EVT_KILL_FOCUS,OnVal)
isoSizer.Add(sizeVal,0,WACV)
return isoSizer
def UniSizer(parm,OnAxis):
uniSizer = wx.BoxSizer(wx.HORIZONTAL)
uniSizer.Add(wx.StaticText(DData,wx.ID_ANY,' Unique axis, H K L: '),0,WACV)
h,k,l = UseList[G2frame.hist][parm][3]
Axis = wx.TextCtrl(DData,wx.ID_ANY,'%3d %3d %3d'%(h,k,l),style=wx.TE_PROCESS_ENTER)
Axis.Bind(wx.EVT_TEXT_ENTER,OnAxis)
Axis.Bind(wx.EVT_KILL_FOCUS,OnAxis)
uniSizer.Add(Axis,0,WACV|wx.LEFT,5)
return uniSizer
def UniDataSizer(parmName,parm,fmt,OnVal,OnRef):
dataSizer = wx.BoxSizer(wx.HORIZONTAL)
parms = zip([' Equatorial '+parmName,' Axial '+parmName],
UseList[G2frame.hist][parm][1],UseList[G2frame.hist][parm][2],range(2))
for Pa,val,ref,Id in parms:
sizeRef = wx.CheckBox(DData,wx.ID_ANY,label=Pa)
sizeRef.thisown = False
sizeRef.SetValue(ref)
Indx[sizeRef.GetId()] = [G2frame.hist,Id]
sizeRef.Bind(wx.EVT_CHECKBOX, OnRef)
dataSizer.Add(sizeRef,0,WACV|wx.LEFT,5)
# azmthOff = G2G.ValidatedTxtCtrl(G2frame.dataDisplay,data,'azmthOff',nDig=(10,2),typeHint=float,OnLeave=OnAzmthOff)
sizeVal = wx.TextCtrl(DData,wx.ID_ANY,fmt%(val),style=wx.TE_PROCESS_ENTER)
Indx[sizeVal.GetId()] = [G2frame.hist,Id]
sizeVal.Bind(wx.EVT_TEXT_ENTER,OnVal)
sizeVal.Bind(wx.EVT_KILL_FOCUS,OnVal)
dataSizer.Add(sizeVal,0,WACV|wx.BOTTOM,5)
return dataSizer
def EllSizeDataSizer():
parms = zip(['S11','S22','S33','S12','S13','S23'],UseList[G2frame.hist]['Size'][4],
UseList[G2frame.hist]['Size'][5],range(6))
dataSizer = wx.BoxSizer(wx.VERTICAL)
# dataSizer = wx.FlexGridSizer(0,6,5,5)
matrixSizer = wx.FlexGridSizer(0,6,5,5)
Sij = []
for Pa,val,ref,id in parms:
sizeRef = wx.CheckBox(DData,wx.ID_ANY,label=Pa)
sizeRef.thisown = False
sizeRef.SetValue(ref)
Indx[sizeRef.GetId()] = [G2frame.hist,id]
sizeRef.Bind(wx.EVT_CHECKBOX, OnSizeRef)
# dataSizer.Add(sizeRef,0,WACV)
matrixSizer.Add(sizeRef,0,WACV)
# azmthOff = G2G.ValidatedTxtCtrl(G2frame.dataDisplay,data,'azmthOff',nDig=(10,2),typeHint=float,OnLeave=OnAzmthOff)
sizeVal = wx.TextCtrl(DData,wx.ID_ANY,'%.3f'%(val),style=wx.TE_PROCESS_ENTER)
# Create Sij matrix
Sij += [val]
Indx[sizeVal.GetId()] = [G2frame.hist,id]
sizeVal.Bind(wx.EVT_TEXT_ENTER,OnSizeVal)
sizeVal.Bind(wx.EVT_KILL_FOCUS,OnSizeVal)
# dataSizer.Add(sizeVal,0,WACV)
matrixSizer.Add(sizeVal,0,WACV)
dataSizer.Add(matrixSizer, 0, WACV)
Esize,Rsize = nl.eigh(G2lat.U6toUij(np.asarray(Sij)))
lengths = Esize
G,g = G2lat.cell2Gmat(data['General']['Cell'][1:7]) #recip & real metric tensors
GA,GB = G2lat.Gmat2AB(G) #Orthogonalization matricies
hkls = [x/(sum(x**2)**0.5) for x in np.dot(Rsize, GA)]
Ids = np.argsort(lengths)
dataSizer.Add(wx.StaticText(DData,label=' Principal ellipsoid components:'),0,WACV)
compSizer = wx.FlexGridSizer(3,3,5,5)
Axes = [' Short Axis:',' Middle Axis:',' Long Axis:']
for Id in Ids:
compSizer.Add(wx.StaticText(DData,label=Axes[Id]),0,WACV)
compSizer.Add(wx.StaticText(DData,label='(%.3f, %.3f, %.3f) '%(hkls[Id][0], hkls[Id][1], hkls[Id][2])),0,WACV)
compSizer.Add(wx.StaticText(DData,label='Length: %.3f'%lengths[Id]),0,WACV)
dataSizer.Add(compSizer)
return dataSizer
def GenStrainDataSizer():
Snames = G2spc.MustrainNames(SGData)
numb = len(Snames)
onumb = len(UseList[G2frame.hist]['Mustrain'][4])
while onumb < numb:
UseList[G2frame.hist]['Mustrain'][4].append(0.0)
UseList[G2frame.hist]['Mustrain'][5].append(False)
onumb += 1
muMean = G2spc.MuShklMean(SGData,Amat,UseList[G2frame.hist]['Mustrain'][4])
parms = zip(Snames,UseList[G2frame.hist]['Mustrain'][4],UseList[G2frame.hist]['Mustrain'][5],range(numb))
dataSizer = wx.FlexGridSizer(0,6,5,5)
for Pa,val,ref,Id in parms:
strainRef = wx.CheckBox(DData,wx.ID_ANY,label=Pa)
strainRef.thisown = False
strainRef.SetValue(ref)
Indx[strainRef.GetId()] = [G2frame.hist,Id]
strainRef.Bind(wx.EVT_CHECKBOX, OnStrainRef)
dataSizer.Add(strainRef,0,WACV)
# azmthOff = G2G.ValidatedTxtCtrl(G2frame.dataDisplay,data,'azmthOff',nDig=(10,2),typeHint=float,OnLeave=OnAzmthOff)
strainVal = wx.TextCtrl(DData,wx.ID_ANY,'%.1f'%(val),style=wx.TE_PROCESS_ENTER)
Indx[strainVal.GetId()] = [G2frame.hist,Id]
strainVal.Bind(wx.EVT_TEXT_ENTER,OnStrainVal)
strainVal.Bind(wx.EVT_KILL_FOCUS,OnStrainVal)
dataSizer.Add(strainVal,0,WACV)
dataSizer.Add(wx.StaticText(DData,label=' Mean mustrain %.1f'%muMean),0,WACV)
return dataSizer
def HstrainSizer():
hstrainSizer = wx.FlexGridSizer(0,6,5,5)
Hsnames = G2spc.HStrainNames(SGData)
parms = zip(Hsnames,UseList[G2frame.hist]['HStrain'][0],UseList[G2frame.hist]['HStrain'][1],range(len(Hsnames)))
for Pa,val,ref,Id | |
Constraint(expr= - m.b405 + m.b406 - m.b886 <= 0)
m.c935 = Constraint(expr= - m.b406 + m.b407 - m.b887 <= 0)
m.c936 = Constraint(expr= - m.b407 + m.b408 - m.b888 <= 0)
m.c937 = Constraint(expr= - m.b408 + m.b409 - m.b889 <= 0)
m.c938 = Constraint(expr= m.b410 - m.b890 <= 0)
m.c939 = Constraint(expr= - m.b410 + m.b411 - m.b891 <= 0)
m.c940 = Constraint(expr= - m.b411 + m.b412 - m.b892 <= 0)
m.c941 = Constraint(expr= - m.b412 + m.b413 - m.b893 <= 0)
m.c942 = Constraint(expr= - m.b413 + m.b414 - m.b894 <= 0)
m.c943 = Constraint(expr= - m.b414 + m.b415 - m.b895 <= 0)
m.c944 = Constraint(expr= - m.b415 + m.b416 - m.b896 <= 0)
m.c945 = Constraint(expr= - m.b416 + m.b417 - m.b897 <= 0)
m.c946 = Constraint(expr= - m.b417 + m.b418 - m.b898 <= 0)
m.c947 = Constraint(expr= - m.b418 + m.b419 - m.b899 <= 0)
m.c948 = Constraint(expr= - m.b419 + m.b420 - m.b900 <= 0)
m.c949 = Constraint(expr= - m.b420 + m.b421 - m.b901 <= 0)
m.c950 = Constraint(expr= - m.b421 + m.b422 - m.b902 <= 0)
m.c951 = Constraint(expr= - m.b422 + m.b423 - m.b903 <= 0)
m.c952 = Constraint(expr= - m.b423 + m.b424 - m.b904 <= 0)
m.c953 = Constraint(expr= - m.b424 + m.b425 - m.b905 <= 0)
m.c954 = Constraint(expr= - m.b425 + m.b426 - m.b906 <= 0)
m.c955 = Constraint(expr= - m.b426 + m.b427 - m.b907 <= 0)
m.c956 = Constraint(expr= - m.b427 + m.b428 - m.b908 <= 0)
m.c957 = Constraint(expr= - m.b428 + m.b429 - m.b909 <= 0)
m.c958 = Constraint(expr= - m.b429 + m.b430 - m.b910 <= 0)
m.c959 = Constraint(expr= - m.b430 + m.b431 - m.b911 <= 0)
m.c960 = Constraint(expr= - m.b431 + m.b432 - m.b912 <= 0)
m.c961 = Constraint(expr= - m.b432 + m.b433 - m.b913 <= 0)
m.c962 = Constraint(expr= m.b434 - m.b914 <= 0)
m.c963 = Constraint(expr= - m.b434 + m.b435 - m.b915 <= 0)
m.c964 = Constraint(expr= - m.b435 + m.b436 - m.b916 <= 0)
m.c965 = Constraint(expr= - m.b436 + m.b437 - m.b917 <= 0)
m.c966 = Constraint(expr= - m.b437 + m.b438 - m.b918 <= 0)
m.c967 = Constraint(expr= - m.b438 + m.b439 - m.b919 <= 0)
m.c968 = Constraint(expr= - m.b439 + m.b440 - m.b920 <= 0)
m.c969 = Constraint(expr= - m.b440 + m.b441 - m.b921 <= 0)
m.c970 = Constraint(expr= - m.b441 + m.b442 - m.b922 <= 0)
m.c971 = Constraint(expr= - m.b442 + m.b443 - m.b923 <= 0)
m.c972 = Constraint(expr= - m.b443 + m.b444 - m.b924 <= 0)
m.c973 = Constraint(expr= - m.b444 + m.b445 - m.b925 <= 0)
m.c974 = Constraint(expr= - m.b445 + m.b446 - m.b926 <= 0)
m.c975 = Constraint(expr= - m.b446 + m.b447 - m.b927 <= 0)
m.c976 = Constraint(expr= - m.b447 + m.b448 - m.b928 <= 0)
m.c977 = Constraint(expr= - m.b448 + m.b449 - m.b929 <= 0)
m.c978 = Constraint(expr= - m.b449 + m.b450 - m.b930 <= 0)
m.c979 = Constraint(expr= - m.b450 + m.b451 - m.b931 <= 0)
m.c980 = Constraint(expr= - m.b451 + m.b452 - m.b932 <= 0)
m.c981 = Constraint(expr= - m.b452 + m.b453 - m.b933 <= 0)
m.c982 = Constraint(expr= - m.b453 + m.b454 - m.b934 <= 0)
m.c983 = Constraint(expr= - m.b454 + m.b455 - m.b935 <= 0)
m.c984 = Constraint(expr= - m.b455 + m.b456 - m.b936 <= 0)
m.c985 = Constraint(expr= - m.b456 + m.b457 - m.b937 <= 0)
m.c986 = Constraint(expr= m.b458 - m.b938 <= 0)
m.c987 = Constraint(expr= - m.b458 + m.b459 - m.b939 <= 0)
m.c988 = Constraint(expr= - m.b459 + m.b460 - m.b940 <= 0)
m.c989 = Constraint(expr= - m.b460 + m.b461 - m.b941 <= 0)
m.c990 = Constraint(expr= - m.b461 + m.b462 - m.b942 <= 0)
m.c991 = Constraint(expr= - m.b462 + m.b463 - m.b943 <= 0)
m.c992 = Constraint(expr= - m.b463 + m.b464 - m.b944 <= 0)
m.c993 = Constraint(expr= - m.b464 + m.b465 - m.b945 <= 0)
m.c994 = Constraint(expr= - m.b465 + m.b466 - m.b946 <= 0)
m.c995 = Constraint(expr= - m.b466 + m.b467 - m.b947 <= 0)
m.c996 = Constraint(expr= - m.b467 + m.b468 - m.b948 <= 0)
m.c997 = Constraint(expr= - m.b468 + m.b469 - m.b949 <= 0)
m.c998 = Constraint(expr= - m.b469 + m.b470 - m.b950 <= 0)
m.c999 = Constraint(expr= - m.b470 + m.b471 - m.b951 <= 0)
m.c1000 = Constraint(expr= - m.b471 + m.b472 - m.b952 <= 0)
m.c1001 = Constraint(expr= - m.b472 + m.b473 - m.b953 <= 0)
m.c1002 = Constraint(expr= - m.b473 + m.b474 - m.b954 <= 0)
m.c1003 = Constraint(expr= - m.b474 + m.b475 - m.b955 <= 0)
m.c1004 = Constraint(expr= - m.b475 + m.b476 - m.b956 <= 0)
m.c1005 = Constraint(expr= - m.b476 + m.b477 - m.b957 <= 0)
m.c1006 = Constraint(expr= - m.b477 + m.b478 - m.b958 <= 0)
m.c1007 = Constraint(expr= - m.b478 + m.b479 - m.b959 <= 0)
m.c1008 = Constraint(expr= - m.b479 + m.b480 - m.b960 <= 0)
m.c1009 = Constraint(expr= - m.b480 + m.b481 - m.b961 <= 0)
m.c1010 = Constraint(expr= m.x2 <= 200)
m.c1011 = Constraint(expr= - m.x2 + m.x3 <= 200)
m.c1012 = Constraint(expr= - m.x3 + m.x4 <= 200)
m.c1013 = Constraint(expr= - m.x4 + m.x5 <= 200)
m.c1014 = Constraint(expr= - m.x5 + m.x6 <= 200)
m.c1015 = Constraint(expr= - m.x6 + m.x7 <= 200)
m.c1016 = Constraint(expr= - m.x7 + m.x8 <= 200)
m.c1017 = Constraint(expr= - m.x8 + m.x9 <= 200)
m.c1018 = Constraint(expr= - m.x9 + m.x10 <= 200)
m.c1019 = Constraint(expr= - m.x10 + m.x11 <= 200)
m.c1020 = Constraint(expr= - m.x11 + m.x12 <= 200)
m.c1021 = Constraint(expr= - m.x12 + m.x13 <= 200)
m.c1022 = Constraint(expr= - m.x13 + m.x14 <= 200)
m.c1023 = Constraint(expr= - m.x14 + m.x15 <= 200)
m.c1024 = Constraint(expr= - m.x15 + m.x16 <= 200)
m.c1025 = Constraint(expr= - m.x16 + m.x17 <= 200)
m.c1026 = Constraint(expr= - m.x17 + m.x18 <= 200)
m.c1027 = Constraint(expr= - m.x18 + m.x19 <= 200)
m.c1028 = Constraint(expr= - m.x19 + m.x20 <= 200)
m.c1029 = Constraint(expr= - m.x20 + m.x21 <= 200)
m.c1030 = Constraint(expr= - m.x21 + m.x22 <= 200)
m.c1031 = Constraint(expr= - m.x22 + m.x23 <= 200)
m.c1032 = Constraint(expr= - m.x23 + m.x24 <= 200)
m.c1033 = Constraint(expr= - m.x24 + m.x25 <= 200)
m.c1034 = Constraint(expr= m.x26 <= 200)
m.c1035 = Constraint(expr= - m.x26 + m.x27 <= 200)
m.c1036 = Constraint(expr= - m.x27 + m.x28 <= 200)
m.c1037 = Constraint(expr= - m.x28 + m.x29 <= 200)
m.c1038 = Constraint(expr= - m.x29 + m.x30 <= 200)
m.c1039 = Constraint(expr= - m.x30 + m.x31 <= 200)
m.c1040 = Constraint(expr= - m.x31 + m.x32 <= 200)
m.c1041 = Constraint(expr= - m.x32 + m.x33 <= 200)
m.c1042 = Constraint(expr= - m.x33 + m.x34 <= 200)
m.c1043 = Constraint(expr= - m.x34 + m.x35 <= 200)
m.c1044 = Constraint(expr= - m.x35 + m.x36 <= 200)
m.c1045 = Constraint(expr= - m.x36 + m.x37 <= 200)
m.c1046 = Constraint(expr= - m.x37 + m.x38 <= 200)
m.c1047 = Constraint(expr= - m.x38 + m.x39 <= 200)
m.c1048 = Constraint(expr= - m.x39 + m.x40 <= 200)
m.c1049 = Constraint(expr= - m.x40 + m.x41 <= 200)
m.c1050 = Constraint(expr= - m.x41 + m.x42 <= 200)
m.c1051 = Constraint(expr= - m.x42 + m.x43 <= 200)
m.c1052 = Constraint(expr= - m.x43 + m.x44 <= 200)
m.c1053 = Constraint(expr= - m.x44 + m.x45 <= 200)
m.c1054 = Constraint(expr= - m.x45 + m.x46 <= 200)
m.c1055 = Constraint(expr= - m.x46 + m.x47 <= 200)
m.c1056 = Constraint(expr= - m.x47 + m.x48 <= 200)
m.c1057 = Constraint(expr= - m.x48 + m.x49 <= 200)
m.c1058 = Constraint(expr= m.x50 <= 100)
m.c1059 = Constraint(expr= - m.x50 + m.x51 <= 100)
m.c1060 = Constraint(expr= - m.x51 + m.x52 <= 100)
m.c1061 = Constraint(expr= - m.x52 + m.x53 <= 100)
m.c1062 = Constraint(expr= - m.x53 + m.x54 <= 100)
m.c1063 = Constraint(expr= - m.x54 + m.x55 <= 100)
m.c1064 = Constraint(expr= - m.x55 + m.x56 <= 100)
m.c1065 = Constraint(expr= - m.x56 + m.x57 <= 100)
m.c1066 = Constraint(expr= - m.x57 + m.x58 <= 100)
m.c1067 = Constraint(expr= - m.x58 + m.x59 <= 100)
m.c1068 = Constraint(expr= - m.x59 + m.x60 <= 100)
m.c1069 = Constraint(expr= - m.x60 + m.x61 <= 100)
m.c1070 = Constraint(expr= - m.x61 + m.x62 <= 100)
m.c1071 = Constraint(expr= - m.x62 + m.x63 <= 100)
m.c1072 = Constraint(expr= - m.x63 + m.x64 <= 100)
m.c1073 = Constraint(expr= - m.x64 + m.x65 <= 100)
m.c1074 = Constraint(expr= - m.x65 + m.x66 <= 100)
m.c1075 = Constraint(expr= - m.x66 + m.x67 <= 100)
m.c1076 = Constraint(expr= - m.x67 + m.x68 <= 100)
m.c1077 = Constraint(expr= - | |
"690.7(A)(3)-P100 + {:1.1%} SF: {:.3f} V<br>".format(
voc_summary['safety_factor'][
'690.7(A)(3)-P100'],
max_module_voltage_with_safety_factor[
'690.7(A)(3)-P100']) + \
'Maximum String Length: {:.0f}<br>'.format(
voc_summary['string_length'][
'690.7(A)(3)-P100']) + \
'Conservative 690.7(A)(3) value for string length.',
'690.7(A)(1)-DAY': 'Traditional daytime Voc, using 1 sun irradiance and<br>' + \
'mean yearly minimum daytime (GHI>150 W/m^2) dry bulb temperature of {:.1f} C.<br>'.format(
mean_yearly_min_day_temp) + \
'Day Voc: {:.3f} V<br>'.format(
voc_values['690.7(A)(1)-DAY']) + \
'Maximum String Length:{:.0f}<br>'.format(
voc_summary['string_length'][
'690.7(A)(1)-DAY']) + \
'Recommended 690.7(A)(1) Value',
'690.7(A)(1)-NSRDB': 'Traditional 690.7(A)(1) value, using 1 sun irradiance and<br>' + \
'mean yearly minimum dry bulb temperature of {:.1f} C.<br>'.format(
mean_yearly_min_temp) + \
'690.7(A)(1)-NSRDB: {:.3f}<br>'.format(
voc_values['690.7(A)(1)-NSRDB']) + \
'Maximum String Length: {:.0f}'.format(
voc_summary['string_length'][
'690.7(A)(1)-NSRDB']),
'690.7(A)(1)-ASHRAE': 'Traditional 690.7(A)(1) value<br>' + \
'using 1 sun irradiance and<br>' + \
'mean yearly minimum dry bulb temperature of {:.1f} C.<br>'.format(
lowest_expected_temperature_ashrae) + \
'Trad-ASHRAE-690.7a1 Voc: {:.3f}<br>'.format(
voc_values['690.7(A)(1)-ASHRAE']) + \
'Maximum String Length: {:.0f}'.format(
voc_summary['string_length'][
'690.7(A)(1)-ASHRAE']),
'690.7(A)(2)-ASHRAE': 'Traditional 690.7(A)(2) value<br>' + \
'using NEC derating table and<br>' + \
'mean yearly minimum dry bulb temperature of {:.1f} C.<br>'.format(
lowest_expected_temperature_ashrae) + \
'Trad-ASHRAE-690.7(A)(2) Voc: {:.3f}<br>'.format(
voc_values['690.7(A)(2)-ASHRAE']) + \
'Maximum String Length: {:.0f}'.format(
voc_summary['string_length'][
'690.7(A)(2)-ASHRAE']),
# 'Norm_P99.5': "Normal Voc, 99.5 percentile Voc value<br>".format(voc_values['Norm_P99.5']) +\
# "assuming array always oriented normal to sun.<br>" +\
# "Norm_P99.5 Voc: {:.3f}<br>".format(voc_values['Norm_P99.5']) +\
# "Maximum String Length: {:.0f}".format(voc_summary['string_length']['Norm_P99.5'])
}
short_note = {
'690.7(A)(3)-P99.5': "Recommended 690.7(A)(3) value for string length.",
'690.7(A)(3)-P100': 'Conservative 690.7(A)(3) value for string length.',
'690.7(A)(1)-DAY': 'Traditional design using daytime temp (GHI>150 W/m^2)',
'690.7(A)(1)-ASHRAE': 'Traditional design using ASHRAE and temperature coefficient',
'690.7(A)(1)-NSRDB': 'Traditional design using NSRDB and temperature coefficient',
'690.7(A)(2)-ASHRAE': 'Traditional design using ASHRAE and standard derating.',
# 'Norm_P99.5': ""
}
voc_summary['long_note'] = voc_summary.index.map(long_note)
voc_summary['short_note'] = voc_summary.index.map(short_note)
return voc_summary
def get_s3_csv(filename):
"""
"""
import boto3
# filename = '2017DesignConditions_s.xlsx.csv'
bucket = 'pvtools-nsrdb-pickle'
# connect to AWS S3
s3 = boto3.resource('s3')
obj = s3.Object(bucket, filename)
df = pd.read_csv(obj.get()['Body'])
return df
def scale_to_hours_per_year(y, info):
return y / info['timedelta_in_years'] * info['interval_in_hours']
def make_voc_histogram(df, info, number_bins=400):
# Voc histogram
voc_hist_y_raw, voc_hist_x_raw = np.histogram(df['v_oc'],
bins=np.linspace(
df['v_oc'].max() * 0.6,
df['v_oc'].max() + 1,
number_bins))
voc_hist_y = scale_to_hours_per_year(voc_hist_y_raw, info)[1:]
voc_hist_x = voc_hist_x_raw[1:-1]
return voc_hist_x, voc_hist_y
def make_simulation_summary(df, info, module_parameters, racking_parameters,
thermal_model, string_design_voltage, safety_factor,
ashrae='local_load'):
"""
Makes a text summary of the simulation.
Parameters
----------
info
module_parameters
racking_parameters
max_string_voltage
Returns
-------
"""
voc_summary = make_voc_summary(df, info, module_parameters,
string_design_voltage=string_design_voltage,
safety_factor=safety_factor,
ashrae=ashrae)
if type(thermal_model) == type(''):
thermal_model = {'Model parameters': thermal_model}
if 'Location ID' in info:
info['Location_ID'] = info['Location ID']
if 'Time Zone' in info:
info['local_time_zone'] = info['Time Zone']
# extra_parameters = calculate_extra_module_parameters(module_parameters)
voc_hist_x, voc_hist_y = make_voc_histogram(df, info, number_bins=200)
pd.DataFrame({'Voc': voc_hist_x, 'hours per year': voc_hist_y}).to_csv(
index=False)
summary = \
'Simulation Run Date,' + str(datetime.datetime.now()) + '\n\n' + \
'Weather data,\n' + \
pd.Series(info)[
['Source', 'Latitude', 'Longitude', 'Location_ID',
'local_time_zone',
'Elevation', 'Version', 'interval_in_hours',
'timedelta_in_years']].to_csv(header=False) + '\n' + \
'Module Parameters\n' + \
pd.Series(module_parameters).to_csv(header=False) + '\n' + \
'Racking Parameters\n' + \
pd.Series(racking_parameters).to_csv(header=False) + '\n' + \
'Thermal model\n' + \
'model type, Sandia\n' + \
pd.Series(thermal_model).to_csv(header=False) + '\n' + \
'String Design Voltage,' + str(string_design_voltage) + '\n' + \
'vocmaxlib Version,' + vocmax.__version__ + '\n' + \
'\nKey Voc Values\n' + \
voc_summary.to_csv() + \
'\nVoc Histogram\n' + \
pd.DataFrame(
{'Voc': voc_hist_x,
'hours per year': voc_hist_y}
).to_csv(index=False)
return summary
def calculate_normal_voc(poa_direct, poa_diffuse, temp_cell, module_parameters,
spectral_loss=1, aoi_loss=1, FD=1):
"""
Parameters
----------
poa_direct
poa_diffuse
temp_cell
module_parameters
spectral_loss
aoi_loss
FD
Returns
-------
"""
effective_irradiance = calculate_effective_irradiance(
poa_direct,
poa_diffuse,
spectral_loss=spectral_loss,
aoi_loss=aoi_loss,
FD=FD
)
v_oc = calculate_voc(effective_irradiance, temp_cell,
module_parameters)
return v_oc
# def calculate_effective_irradiance_bifacial(poa_direct_front,
# poa_diffuse_front,
# poa_direct_back,
# spectral_loss_front=1,
# spectral_loss_back=1,
# aoi_loss_front=1,
# FD_front=1):
# """
#
# Parameters
# ----------
# poa_direct
# poa_diffuse
# spectral_loss
# aoi_loss
# FD
#
# Returns
# -------
# effective_irradiance in W/m^2
#
# """
# # See pvlib.pvsystem.sapm_effective_irradiance for source of this line:
# effective_irradiance = spectral_loss_front * (
# poa_direct_front * aoi_loss_front + FD_front * poa_diffuse_front) + \
# spectral_loss_back*poa_back
#
# return effective_irradiance
def calculate_effective_irradiance(poa_direct, poa_diffuse, spectral_loss=1,
aoi_loss=1, FD=1):
"""
Parameters
----------
poa_direct
poa_diffuse
spectral_loss
aoi_loss
FD
Returns
-------
effective_irradiance in W/m^2
"""
# See pvlib.pvsystem.sapm_effective_irradiance for source of this line:
effective_irradiance = spectral_loss * (
poa_direct * aoi_loss + FD * poa_diffuse)
return effective_irradiance
def calculate_voc(effective_irradiance, temp_cell, module,
reference_temperature=25,
reference_irradiance=1000):
"""
Standard reference conditions are 1000 W/m2 and 25 C.
Parameters
----------
effective_irradiance
Irradiance in W/m^2
temperature
module_parameters
Dict or Series containing the fields:
'alpha_sc': The short-circuit current temperature coefficient of the
module in units of A/C.
'a_ref': The product of the usual diode ideality factor (n,
unitless), number of cells in series (Ns), and cell thermal voltage
at reference conditions, in units of V
'I_L_ref': The light-generated current (or photocurrent) at reference
conditions, in amperes.
'I_o_ref': The dark or diode reverse saturation current at reference
conditions, in amperes.
'R_sh_ref': The shunt resistance at reference conditions, in ohms.
'R_s': The series resistance at reference conditions, in ohms.
'Adjust': The adjustment to the temperature coefficient for short
circuit current, in percent.
model : str
Model to use, can be 'cec' or 'desoto'
XX
Returns
-------
References
----------
[1] <NAME>, “An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model”, Journal of
Solar Energy Engineering, vol 134, 2012.
"""
if (not 'iv_model' in module) or module['iv_model'] == 'sapm':
v_oc = sapm_voc(effective_irradiance, temp_cell, module,
reference_temperature=reference_temperature,
reference_irradiance=reference_irradiance)
elif module['iv_model'] in ['cec', 'desoto']:
photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth = \
calcparams_singlediode(effective_irradiance, temp_cell, module)
# out = pvlib.pvsystem.singlediode(photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth,
# method='newton')
v_oc = pvlib.singlediode.bishop88_v_from_i(0,
photocurrent,
saturation_current,
resistance_series,
resistance_shunt,
nNsVth,
method='newton')
else:
raise Exception('iv_model not recognized')
return v_oc
def singlediode_voc(effective_irradiance, temp_cell, module_parameters):
"""
Calculate voc using the singlediode model.
Parameters
----------
effective_irradiance
temp_cell
module_parameters
Returns
-------
"""
photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth = \
calcparams_singlediode(effective_irradiance, temp_cell,
module_parameters)
# out = pvlib.pvsystem.singlediode(photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth,
# method='newton')
v_oc = pvlib.singlediode.bishop88_v_from_i(0,
photocurrent,
saturation_current,
resistance_series,
resistance_shunt,
nNsVth,
method='newton')
return v_oc
def sapm_voc(effective_irradiance, temp_cell, module, reference_temperature=25,
reference_irradiance=1000):
"""
This function differs from the PVLIB version in that the effective
irradiance is in W/m2.
Parameters
----------
effective_irradiance : numeric
Effective irradiance in W/m^2
temp_cell : numeric
module : dict
parameters are:
'Voco'
'cells_in_series'
'Bvoco'
'Mbvoc'
reference_temperature : float
reference_irradiance : float
Returns
-------
"""
T0 = reference_temperature
q = 1.60218e-19 # Elementary charge in units of coulombs
kb = 1.38066e-23 # Boltzmann's constant in units of J/K
# avoid problem with integer input
Ee = np.array(effective_irradiance, dtype='float64')
# set up masking for 0, positive, and nan inputs
Ee_gt_0 = np.full_like(Ee, False, dtype='bool')
Ee_eq_0 = np.full_like(Ee, False, dtype='bool')
notnan = ~np.isnan(Ee)
np.greater(Ee, 0, where=notnan, out=Ee_gt_0)
np.equal(Ee, 0, where=notnan, out=Ee_eq_0)
# Bvmpo = module['Bvmpo'] + module['Mbvmp'] * (1 - Ee)
if 'Mbvoc' in module:
Bvoco = module['Bvoco'] + module['Mbvoc'] * (1 - Ee)
else:
Bvoco = module['Bvoco']
delta = module['n_diode'] * kb * (temp_cell + 273.15) / q
# avoid repeated computation
logEe = np.full_like(Ee, np.nan)
np.log(Ee / reference_irradiance, where=Ee_gt_0, out=logEe)
logEe = np.where(Ee_eq_0, -np.inf, logEe)
# avoid repeated __getitem__
cells_in_series = module['cells_in_series']
v_oc = np.maximum(0, (
module['Voco'] + cells_in_series * delta * logEe +
Bvoco * (temp_cell - T0)))
return v_oc
def sapm_temperature_to_get_voc(effective_irradiance,
Voc,
Voco,
Bvoco,
diode_factor,
cells_in_series,
Mbvoc=0,
reference_temperature=25,
reference_irradiance=1000
):
"""
Calculate the cell temperature to achieve a certain Voc at a value of
effective irradiance.
Parameters
----------
effective_irradiance
Voc
Voco
Bvoco
diode_factor
cells_in_series
Mbvoc
reference_temperature
reference_irradiance
Returns
-------
"""
T0 = reference_temperature
q = 1.60218e-19 # Elementary charge in units of coulombs
kb = 1.38066e-23 # Boltzmann's constant in units of J/K
# avoid problem with integer input
Ee = np.array(effective_irradiance, dtype='float64')
# set up masking for 0, positive, and nan inputs
Ee_gt_0 = np.full_like(Ee, False, dtype='bool')
Ee_eq_0 = np.full_like(Ee, False, dtype='bool')
notnan = ~np.isnan(Ee)
np.greater(Ee, 0, where=notnan, out=Ee_gt_0)
np.equal(Ee, 0, where=notnan, out=Ee_eq_0)
# avoid repeated computation
logEe = np.full_like(Ee, np.nan)
np.log(Ee / reference_irradiance, where=Ee_gt_0, out=logEe)
logEe = np.where(Ee_eq_0, -np.inf, logEe)
Bvoco = Bvoco + Mbvoc * (1 - Ee)
delta_ref = diode_factor * kb * (reference_temperature + 273.15) / q
delta_prime = diode_factor * kb / q
temperature_cell = reference_temperature + (
Voc - Voco - cells_in_series * delta_ref * logEe) / (
cells_in_series * delta_prime * logEe + Bvoco)
return temperature_cell
def sapm_mpp(effective_irradiance, temperature, module_parameters):
| |
of chunk 1 without bias correction
:param match_count2: number of samples in new chunk 2
:param mean2: mean of chunk 2
:param biased_variance2: variance of chunk 2 without bias correction
:return: combined variance
:rtype: float
"""
if match_count1 < 1:
return biased_variance2
elif match_count2 < 1:
return biased_variance1
elif np.isnan(biased_variance1) or np.isnan(biased_variance2):
return np.nan
curr_count = match_count1
delta = mean2 - mean1
m_curr = biased_variance1 * curr_count
m_batch = biased_variance2 * match_count2
M2 = m_curr + m_batch + delta ** 2 * curr_count * match_count2 / \
(curr_count + match_count2)
new_variance = M2 / (curr_count + match_count2)
return new_variance
@staticmethod
def _correct_bias_variance(match_count, biased_variance):
if match_count is None or biased_variance is None or match_count < 2:
warnings.warn("Insufficient match count to correct bias in variance. Bias correction"
"can be manually disabled by setting bias_correction.is_enabled to"
"False in ProfilerOptions.", RuntimeWarning)
return np.nan
variance = match_count / (match_count - 1) * biased_variance
return variance
@staticmethod
def _merge_biased_skewness(match_count1, biased_skewness1, biased_variance1, mean1,
match_count2, biased_skewness2, biased_variance2, mean2):
"""
Calculate the combined skewness of two data chunks
:param match_count1: # of samples in 1st chunk
:param biased_skewness1: skewness of 1st chunk without bias correction
:param biased_variance1: variance of 1st chunk without bias correction
:param mean1: mean of 1st chunk
:param match_count2: # of samples in 2nd chunk
:param biased_skewness2: skewness of 2nd chunk without bias correction
:param biased_variance2: variance of 2nd chunk without bias correction
:param mean2: mean of 2nd chunk
:return: combined skewness
:rtype: float
"""
if match_count1 < 1:
return biased_skewness2
elif match_count2 < 1:
return biased_skewness1
elif np.isnan(biased_skewness1) or np.isnan(biased_skewness2):
return np.nan
delta = mean2 - mean1
N = match_count1 + match_count2
M2_1 = match_count1 * biased_variance1
M2_2 = match_count2 * biased_variance2
M2 = M2_1 + M2_2 + delta**2 * match_count1 * match_count2 / N
if not M2:
return 0.0
M3_1 = biased_skewness1 * np.sqrt(M2_1**3) / np.sqrt(match_count1)
M3_2 = biased_skewness2 * np.sqrt(M2_2**3) / np.sqrt(match_count2)
first_term = M3_1 + M3_2
second_term = delta**3 * match_count1 * match_count2 \
* (match_count1 - match_count2) / N**2
third_term = 3 * delta * (match_count1 * M2_2
- match_count2 * M2_1) / N
M3 = first_term + second_term + third_term
biased_skewness = np.sqrt(N) * M3 / np.sqrt(M2**3)
return biased_skewness
@staticmethod
def _correct_bias_skewness(match_count, biased_skewness):
"""
Apply bias correction to skewness
:param match_count: number of samples
:param biased_skewness: skewness without bias correction
:return: unbiased estimator of skewness
:rtype: NaN if sample size is too small, float otherwise
"""
if np.isnan(biased_skewness) or match_count < 3:
warnings.warn("Insufficient match count to correct bias in skewness. Bias correction"
"can be manually disabled by setting bias_correction.is_enabled to"
"False in ProfilerOptions.", RuntimeWarning)
return np.nan
skewness = np.sqrt(match_count * (match_count - 1)) \
* biased_skewness / (match_count - 2)
return skewness
@staticmethod
def _merge_biased_kurtosis(match_count1, biased_kurtosis1, biased_skewness1,
biased_variance1, mean1, match_count2, biased_kurtosis2,
biased_skewness2, biased_variance2, mean2):
"""
Calculate the combined kurtosis of two sets of data
:param match_count1: # of samples in 1st chunk
:param biased_kurtosis1: kurtosis of 1st chunk without bias correction
:param biased_skewness1: skewness of 1st chunk without bias correction
:param biased_variance1: variance of 1st chunk without bias correction
:param mean1: mean of 1st chunk
:param match_count2: # of samples in 2nd chunk
:param biased_kurtosis2: kurtosis of 2nd chunk without bias correction
:param biased_skewness2: skewness of 2nd chunk without bias correction
:param biased_variance2: variance of 2nd chunk without bias correction
:param mean2: mean of 2nd chunk
:return: combined skewness
:rtype: float
"""
if match_count1 < 1:
return biased_kurtosis2
elif match_count2 < 1:
return biased_kurtosis1
elif np.isnan(biased_kurtosis1) or np.isnan(biased_kurtosis2):
return np.nan
delta = mean2 - mean1
N = match_count1 + match_count2
M2_1 = match_count1 * biased_variance1
M2_2 = match_count2 * biased_variance2
M2 = M2_1 + M2_2 + delta ** 2 * match_count1 * match_count2 / N
if not M2:
return 0
M3_1 = biased_skewness1 * np.sqrt(M2_1**3) / np.sqrt(match_count1)
M3_2 = biased_skewness2 * np.sqrt(M2_2**3) / np.sqrt(match_count2)
M4_1 = (biased_kurtosis1 + 3) * M2_1**2 / match_count1
M4_2 = (biased_kurtosis2 + 3) * M2_2**2 / match_count2
first_term = M4_1 + M4_2
second_term = delta**4 * (match_count1 * match_count2 *
(match_count1**2 - match_count1 * match_count2 +
match_count2**2)) / N**3
third_term = 6 * delta**2 * (match_count1**2 * M2_2 +
match_count2**2 * M2_1) / N**2
fourth_term = 4 * delta * (match_count1 * M3_2 - match_count2
* M3_1) / N
M4 = first_term + second_term + third_term + fourth_term
biased_kurtosis = N * M4 / M2**2 - 3
return biased_kurtosis
@staticmethod
def _correct_bias_kurtosis(match_count, biased_kurtosis):
"""
Apply bias correction to kurtosis
:param match_count: number of samples
:param biased_kurtosis: skewness without bias correction
:return: unbiased estimator of kurtosis
:rtype: NaN if sample size is too small, float otherwise
"""
if np.isnan(biased_kurtosis) or match_count < 4:
warnings.warn("Insufficient match count to correct bias in kurtosis. Bias correction"
"can be manually disabled by setting bias_correction.is_enabled to"
"False in ProfilerOptions.", RuntimeWarning)
return np.nan
kurtosis = (match_count - 1) / ((match_count - 2) *
(match_count - 3)) * ((match_count + 1) *
(biased_kurtosis + 3) - 3 * (match_count - 1))
return kurtosis
def _estimate_mode_from_histogram(self):
"""
Estimates the mode of the current data using the
histogram. If there are multiple modes, returns
K of them (where K is defined in options given, but
5 by default)
:return: The estimated mode of the histogram
:rtype: list(float)
"""
bin_counts = self._stored_histogram['histogram']['bin_counts']
bin_edges = self._stored_histogram['histogram']['bin_edges']
# Get the K bin(s) with the highest frequency (one-pass):
cur_max = -1
highest_idxs = []
count = 0
for i in range(0, len(bin_counts)):
if bin_counts[i] > cur_max:
# If a new maximum frequency is found, reset the mode counts
highest_idxs = [i]
cur_max = bin_counts[i]
count = 1
elif bin_counts[i] == cur_max and count < self._top_k_modes:
highest_idxs.append(i)
count += 1
highest_idxs = np.array(highest_idxs)
mode = (bin_edges[highest_idxs] + bin_edges[highest_idxs + 1]) / 2
return mode.tolist()
def _estimate_stats_from_histogram(self):
# test estimated mean and var
bin_counts = self._stored_histogram['histogram']['bin_counts']
bin_edges = self._stored_histogram['histogram']['bin_edges']
mids = 0.5 * (bin_edges[1:] + bin_edges[:-1])
mean = np.average(mids, weights=bin_counts)
var = np.average((mids - mean) ** 2, weights=bin_counts)
return var
def _total_histogram_bin_variance(self, input_array):
# calculate total variance over all bins of a histogram
bin_counts = self._stored_histogram['histogram']['bin_counts']
bin_edges = self._stored_histogram['histogram']['bin_edges']
# account ofr digitize which is exclusive
bin_edges = bin_edges.copy()
bin_edges[-1] += 1e-3
inds = np.digitize(input_array, bin_edges)
sum_var = 0
non_zero_bins = np.where(bin_counts)[0] + 1
for i in non_zero_bins:
elements_in_bin = input_array[inds == i]
bin_var = elements_in_bin.var()
sum_var += bin_var
return sum_var
def _histogram_bin_error(self, input_array):
"""
Calculate the error of each value from the bin of the histogram it
falls within.
:param input_array: input data used to calculate the histogram
:type input_array: Union[np.array, pd.Series]
:return: binning error
:rtype: float
"""
bin_edges = self._stored_histogram['histogram']['bin_edges']
# account ofr digitize which is exclusive
bin_edges = bin_edges.copy()
temp_last_edge = bin_edges[-1]
bin_edges[-1] = np.inf
inds = np.digitize(input_array, bin_edges)
if temp_last_edge == np.inf:
inds = np.minimum(inds, len(bin_edges) - 1)
# reset the edge
bin_edges[-1] = temp_last_edge
sum_error = sum(
(input_array - (bin_edges[inds] + bin_edges[inds - 1])/2) ** 2
)
return sum_error
@staticmethod
def _histogram_loss(diff_var, avg_diffvar, total_var,
avg_totalvar, run_time, avg_runtime):
norm_diff_var, norm_total_var, norm_runtime = 0, 0, 0
if avg_diffvar > 0:
norm_diff_var = float(diff_var - avg_diffvar) / avg_diffvar
if avg_totalvar > 0:
norm_total_var = float(total_var - avg_totalvar) / avg_totalvar
penalized_time = 1 # currently set as 1s
if (run_time - avg_runtime) >= penalized_time:
norm_runtime = float(run_time - avg_runtime) / avg_runtime
return norm_diff_var + norm_total_var + norm_runtime
def _select_method_for_histogram(self, current_exact_var, current_est_var,
current_total_var, current_run_time):
current_diff_var = np.abs(current_exact_var - current_est_var)
current_avg_diff_var = current_diff_var.mean()
current_avg_total_var = current_total_var.mean()
current_avg_run_time = current_run_time.mean()
min_total_loss = np.inf
selected_method = ''
selected_suggested_bin_count = 0
for method_id, method in enumerate(self.histogram_bin_method_names):
self.histogram_methods[method]['current_loss'] = \
self._histogram_loss(current_diff_var[method_id],
current_avg_diff_var,
current_total_var[method_id],
current_avg_total_var,
current_run_time[method_id],
current_avg_run_time)
self.histogram_methods[method]['total_loss'] += \
self.histogram_methods[method]['current_loss']
if min_total_loss >= self.histogram_methods[method]['total_loss']:
# if same loss and less bins, don't save bc higher resolution
if (self.histogram_methods[method]['suggested_bin_count']
<= selected_suggested_bin_count
and min_total_loss ==
self.histogram_methods[method]['total_loss']):
continue
min_total_loss = self.histogram_methods[method]['total_loss']
selected_method = method
selected_suggested_bin_count = \
self.histogram_methods[method]['suggested_bin_count']
return selected_method
def _histogram_to_array(self):
# Extend histogram to array format
bin_counts | |
from bw2data import methods, Method
from . import get_biosphere_database
from .config import Config
from copy import copy
from .categories.resources import FossilResourceScarcityEndpoint
from collections import defaultdict
NAME_MAPPING = {
("Acidification - Terrestrial ecosystems", "Egalitarian"): (
"Terrestrial ecosystems",
("Terrestrial Acidification",),
),
("Acidification - Terrestrial ecosystems", "Hierarchist"): (
"Terrestrial ecosystems",
("Terrestrial Acidification",),
),
("Acidification - Terrestrial ecosystems", "Individualist"): (
"Terrestrial ecosystems",
("Terrestrial Acidification",),
),
("Eutrophication - Freshwater ecosystems", "Egalitarian"): (
"Freshwater ecosystems",
("Freshwater Eutrophication",),
),
("Eutrophication - Freshwater ecosystems", "Hierarchist"): (
"Freshwater ecosystems",
("Freshwater Eutrophication",),
),
("Eutrophication - Freshwater ecosystems", "Individualist"): (
"Freshwater ecosystems",
("Freshwater Eutrophication",),
),
("Eutrophication - Marine ecosystems", "Egalitarian"): (
"Marine ecosystems",
("Marine eutrophication",),
),
("Eutrophication - Marine ecosystems", "Hierarchist"): (
"Marine ecosystems",
("Marine eutrophication",),
),
("Eutrophication - Marine ecosystems", "Individualist"): (
"Marine ecosystems",
("Marine eutrophication",),
),
("Fine particulate matter formation - Human health", "Egalitarian"): (
"Human health",
("Particulate Matter Formation", "Egalitarian"),
),
("Fine particulate matter formation - Human health", "Hierarchist"): (
"Human health",
("Particulate Matter Formation", "Hierarchist"),
),
("Fine particulate matter formation - Human health", "Individualist"): (
"Human health",
("Particulate Matter Formation", "Individualist"),
),
("Global Warming - Freshwater ecosystems", "Egalitarian"): (
"Freshwater ecosystems",
("Global Warming", "1000 year timescale", "Egalitarian"),
),
("Global Warming - Freshwater ecosystems", "Hierarchist"): (
"Freshwater ecosystems",
("Global Warming", "100 year timescale", "Hierarchist"),
),
("Global Warming - Freshwater ecosystems", "Individualist"): (
"Freshwater ecosystems",
("Global Warming", "20 year timescale", "Individualist"),
),
("Global Warming - Human health", "Egalitarian"): (
"Human health",
("Global Warming", "1000 year timescale", "Egalitarian"),
),
("Global Warming - Human health", "Hierarchist"): (
"Human health",
("Global Warming", "100 year timescale", "Hierarchist"),
),
("Global Warming - Human health", "Individualist"): (
"Human health",
("Global Warming", "20 year timescale", "Individualist"),
),
("Global Warming - Terrestrial ecosystems", "Egalitarian"): (
"Terrestrial ecosystems",
("Global Warming", "1000 year timescale", "Egalitarian"),
),
("Global Warming - Terrestrial ecosystems", "Hierarchist"): (
"Terrestrial ecosystems",
("Global Warming", "100 year timescale", "Hierarchist"),
),
("Global Warming - Terrestrial ecosystems", "Individualist"): (
"Terrestrial ecosystems",
("Global Warming", "20 year timescale", "Individualist"),
),
("Ionzing Radiation - Human health", "Egalitarian"): (
"Human health",
("Ionizing Radiation", "Egalitarian"),
),
("Ionzing Radiation - Human health", "Hierarchist"): (
"Human health",
("Ionizing Radiation", "Hierarchist"),
),
("Ionzing Radiation - Human health", "Individualist"): (
"Human health",
("Ionizing Radiation", "Individualist"),
),
("Land use - occupation", "Egalitarian"): (
"Terrestrial ecosystems",
("Land occupation",),
),
("Land use - occupation", "Hierarchist"): (
"Terrestrial ecosystems",
("Land occupation",),
),
("Land use - occupation", "Individualist"): (
"Terrestrial ecosystems",
("Land occupation",),
),
("Land use - transformation", "Egalitarian"): (
"Terrestrial ecosystems",
("Land transformation",),
),
("Land use - transformation", "Hierarchist"): (
"Terrestrial ecosystems",
("Land transformation",),
),
("Land use - transformation", "Individualist"): (
"Terrestrial ecosystems",
("Land transformation",),
),
("Photochemical ozone formation - Human health", "Egalitarian"): (
"Human health",
("Ozone Formation", "Damage to Humans"),
),
("Photochemical ozone formation - Human health", "Hierarchist"): (
"Human health",
("Ozone Formation", "Damage to Humans"),
),
("Photochemical ozone formation - Human health", "Individualist"): (
"Human health",
("Ozone Formation", "Damage to Humans"),
),
("Photochemical ozone formation - Terrestrial ecosystems", "Egalitarian"): (
"Terrestrial ecosystems",
("Ozone Formation", "Damage to Ecosystems"),
),
("Photochemical ozone formation - Terrestrial ecosystems", "Hierarchist"): (
"Terrestrial ecosystems",
("Ozone Formation", "Damage to Ecosystems"),
),
("Photochemical ozone formation - Terrestrial ecosystems", "Individualist"): (
"Terrestrial ecosystems",
("Ozone Formation", "Damage to Ecosystems"),
),
("Stratospheric ozone depletion - Human health", "Egalitarian"): (
"Human health",
("Stratospheric Ozone Depletion", "Infinite timescale", "Egalitarian"),
),
("Stratospheric ozone depletion - Human health", "Hierarchist"): (
"Human health",
("Stratospheric Ozone Depletion", "100 year timescale", "Hierarchist"),
),
("Stratospheric ozone depletion - Human health", "Individualist"): (
"Human health",
("Stratospheric Ozone Depletion", "20 year timescale", "Individualist"),
),
("Toxicity - Freshwater ecosystems", "Egalitarian"): (
"Freshwater ecosystems",
("Ecotoxicity", "Freshwater", "Egalitarian"),
),
("Toxicity - Freshwater ecosystems", "Hierarchist"): (
"Freshwater ecosystems",
("Ecotoxicity", "Freshwater", "Hierarchist"),
),
("Toxicity - Freshwater ecosystems", "Individualist"): (
"Freshwater ecosystems",
("Ecotoxicity", "Freshwater", "Individualist"),
),
("Toxicity - Human health (cancer)", "Egalitarian"): (
"Human health",
("Toxicity", "Carcinogenic", "Egalitarian"),
),
("Toxicity - Human health (cancer)", "Hierarchist"): (
"Human health",
("Toxicity", "Carcinogenic", "Hierarchist"),
),
("Toxicity - Human health (cancer)", "Individualist"): (
"Human health",
("Toxicity", "Carcinogenic", "Individualist"),
),
("Toxicity - Human health (non-cancer)", "Egalitarian"): (
"Human health",
("Toxicity", "Non-carcinogenic", "Egalitarian"),
),
("Toxicity - Human health (non-cancer)", "Hierarchist"): (
"Human health",
("Toxicity", "Non-carcinogenic", "Hierarchist"),
),
("Toxicity - Human health (non-cancer)", "Individualist"): (
"Human health",
("Toxicity", "Non-carcinogenic", "Individualist"),
),
("Toxicity - Marine ecosystems", "Egalitarian"): (
"Marine ecosystems",
("Ecotoxicity", "Marine", "Egalitarian"),
),
("Toxicity - Marine ecosystems", "Hierarchist"): (
"Marine ecosystems",
("Ecotoxicity", "Marine", "Hierarchist"),
),
("Toxicity - Marine ecosystems", "Individualist"): (
"Marine ecosystems",
("Ecotoxicity", "Marine", "Individualist"),
),
("Toxicity - Terrestrial ecosystems", "Egalitarian"): (
"Terrestrial ecosystems",
("Ecotoxicity", "Terrestrial", "Egalitarian"),
),
("Toxicity - Terrestrial ecosystems", "Hierarchist"): (
"Terrestrial ecosystems",
("Ecotoxicity", "Terrestrial", "Hierarchist"),
),
("Toxicity - Terrestrial ecosystems", "Individualist"): (
"Terrestrial ecosystems",
("Ecotoxicity", "Terrestrial", "Individualist"),
),
("Water consumption - human health", "Egalitarian"): (
"Human health",
("Water consumption",),
),
("Water consumption - human health", "Hierarchist"): (
"Human health",
("Water consumption",),
),
("Water consumption - human health", "Individualist"): (
"Human health",
("Water consumption",),
),
("Water consumption - terrestrial ecosystems", "Egalitarian"): (
"Terrestrial ecosystems",
("Water consumption",),
),
("Water consumption - terrestrial ecosystems", "Hierarchist"): (
"Terrestrial ecosystems",
("Water consumption",),
),
("Water consumption - terrestrial ecosystems", "Individualist"): (
"Terrestrial ecosystems",
("Water consumption",),
),
("Water consumption -aquatic ecosystems", "Egalitarian"): (
"Freshwater ecosystems",
("Water consumption",),
),
("Water consumption -aquatic ecosystems", "Hierarchist"): (
"Freshwater ecosystems",
("Water consumption",),
),
("Water consumption -aquatic ecosystems", "Individualist"): (
"Freshwater ecosystems",
("Water consumption",),
),
("Mineral resource scarcity", "Egalitarian"): (
"Resources",
("Mineral Resource Scarcity", "Egalitarian"),
),
("Mineral resource scarcity", "Hierarchist"): (
"Resources",
("Mineral Resource Scarcity", "Hierarchist"),
),
("Mineral resource scarcity", "Individualist"): (
"Resources",
("Mineral Resource Scarcity", "Individualist"),
),
}
def create_single_endpoints(data, version=2):
end_line = {
0: 32,
1: 33,
2: 33,
}
config = Config(version)
formatted = [
{label: value for label, value in zip(data[1][0], row[:5])}
for row in data[1][1 : end_line[version]]
if row[1]
]
# Split land occupation and transformation for easier processing
new = []
for elem in formatted:
if (
elem["Midpoint to endpoint conversion factor"]
== "Land use - occupation and transformation"
):
elem["Midpoint to endpoint conversion factor"] = "Land use - occupation"
new_elem = copy(elem)
new_elem[
"Midpoint to endpoint conversion factor"
] = "Land use - transformation"
new.append(new_elem)
formatted.extend(new)
# Correct inconsistent labels
for elem in formatted:
elem["Individualist"] = elem.pop("Individualistic")
elem["Hierarchist"] = elem.pop("Hierarchic")
for elem in formatted:
for perspective in ("Individualist", "Hierarchist", "Egalitarian"):
section, ending = NAME_MAPPING[
(elem["Midpoint to endpoint conversion factor"]), perspective
]
midpoint = config.base_midpoint_name + ending
assert midpoint in methods
endpoint = config.base_endpoint_name + (section,) + ending
if endpoint[-1] != perspective:
endpoint += (perspective,)
endpoint_method = Method(endpoint)
endpoint_method.register(
unit=elem["unit"], description="", filename=config.filename
)
endpoint_method.write(
[(flow, cf * elem[perspective]) for flow, cf in Method(midpoint).load()]
)
frse = FossilResourceScarcityEndpoint(get_biosphere_database(), version)
frse.apply_strategies(verbose=False)
try:
frse.drop_unlinked(verbose=False)
frse.write_methods(overwrite=True, verbose=False)
except TypeError:
frse.drop_unlinked()
frse.write_methods(overwrite=True)
def create_aggregated_endpoints(version):
config = Config(version)
UNIT_MAPPING = {
"Freshwater ecosystems": "Species∙yr",
"Resources": "USD (2013)",
"Human health": "DALY",
"Marine ecosystems": "Species∙yr",
"Terrestrial ecosystems": "Species∙yr",
}
sections = [
"Freshwater ecosystems",
"Resources",
"Human health",
"Marine ecosystems",
"Terrestrial ecosystems",
]
perspectives = ["Egalitarian", "Hierarchist", "Individualist"]
for section in sections:
for perspective in perspectives:
name = config.base_endpoint_name + (section, "Aggregated", perspective)
metadata = {
"unit": UNIT_MAPPING[section],
"filename": config.filename,
"description": "",
}
children = [
m
for m in methods
if m[: len(config.base_endpoint_name)] == config.base_endpoint_name
and perspective in m
and section in m
and "Aggregated" not in m
]
combine_methods(name, children, metadata)
for perspective in perspectives:
name = config.base_endpoint_name + ("Ecosystems", "Aggregated", perspective)
metadata = {
"unit": "Species∙yr",
"filename": config.filename,
"description": "",
}
children = [
m
for m in methods
if m[: len(config.base_endpoint_name)] == config.base_endpoint_name
and perspective in m
and m[3]
in ("Freshwater ecosystems", "Marine ecosystems", "Terrestrial ecosystems")
and m[4] == "Aggregated"
]
combine_methods(name, children, metadata)
for perspective in perspectives:
name = config.base_endpoint_name + (
"Weighted single score",
"Aggregated",
perspective,
)
metadata = {"unit": "Monetary", "filename": config.filename, "description": ""}
children = [
config.base_name
+ ("Endpoint", "Human health", "Aggregated", "Hierarchist"),
config.base_name + ("Endpoint", "Resources", "Aggregated", "Hierarchist"),
config.base_name + ("Endpoint", "Ecosystems", "Aggregated", "Hierarchist"),
]
weights = [7.4e4, 1, 3.08e7]
combine_methods(name, children, metadata, weights)
def | |
<reponame>VinAIResearch/Point-Unet<filename>SaliencyAttention/custom_ops.py
# -*- coding: utf-8 -*-
# File: custom_ops.py
###
# Code are borrowed from tensorpack modified to support 5d input for batchnorm.
# https://github.com/tensorpack/tensorpack/blob/master/tensorpack/models/batch_norm.py
###
import tensorflow as tf
from tensorflow.contrib.framework import add_model_variable
from tensorflow.python.training import moving_averages
import re
import six
import functools
from tensorpack.utils import logger
from tensorpack.utils.argtools import get_data_format
from tensorpack.tfutils.tower import get_current_tower_context
from tensorpack.tfutils.common import get_tf_version_number
from tensorpack.tfutils.collection import backup_collection, restore_collection
from tensorpack import layer_register, VariableHolder
from tensorpack.tfutils.varreplace import custom_getter_scope
__all__ = ['BatchNorm', 'BatchRenorm']
# decay: being too close to 1 leads to slow start-up. torch use 0.9.
# eps: torch: 1e-5. Lasagne: 1e-4
@layer_register()
def InstanceNorm5d(x, epsilon=1e-5, use_affine=True, gamma_init=None, data_format='channels_last'):
"""
Instance Normalization, as in the paper:
`Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`_.
Args:
x (tf.Tensor): a 4D tensor.
epsilon (float): avoid divide-by-zero
use_affine (bool): whether to apply learnable affine transformation
"""
data_format = get_data_format(data_format, tfmode=False)
shape = x.get_shape().as_list()
# assert len(shape) == 4, "Input of InstanceNorm has to be 4D!"
if len(shape) == 5:
if data_format == 'NHWC':
axis = [1, 2, 3]
ch = shape[4]
new_shape = [1, 1, 1, 1, ch]
else:
axis = [2, 3, 4]
ch = shape[1]
new_shape = [1, ch, 1, 1, 1]
else:
if data_format == 'NHWC':
axis = [1, 2]
ch = shape[3]
new_shape = [1, 1, 1, ch]
else:
axis = [2, 3]
ch = shape[1]
new_shape = [1, ch, 1, 1]
assert ch is not None, "Input of InstanceNorm require known channel!"
mean, var = tf.nn.moments(x, axis, keep_dims=True)
if not use_affine:
return tf.divide(x - mean, tf.sqrt(var + epsilon), name='output')
beta = tf.get_variable('beta', [ch], initializer=tf.constant_initializer())
beta = tf.reshape(beta, new_shape)
if gamma_init is None:
gamma_init = tf.constant_initializer(1.0)
gamma = tf.get_variable('gamma', [ch], initializer=gamma_init)
gamma = tf.reshape(gamma, new_shape)
ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output')
vh = ret.variables = VariableHolder()
if use_affine:
vh.gamma = gamma
vh.beta = beta
return ret
def rename_get_variable(mapping):
"""
Args:
mapping(dict): an old -> new mapping for variable basename. e.g. {'kernel': 'W'}
Returns:
A context where the variables are renamed.
"""
def custom_getter(getter, name, *args, **kwargs):
splits = name.split('/')
basename = splits[-1]
if basename in mapping:
basename = mapping[basename]
splits[-1] = basename
name = '/'.join(splits)
return getter(name, *args, **kwargs)
return custom_getter_scope(custom_getter)
def map_common_tfargs(kwargs):
df = kwargs.pop('data_format', None)
if df is not None:
df = get_data_format(df, tfmode=True)
kwargs['data_format'] = df
old_nl = kwargs.pop('nl', None)
if old_nl is not None:
kwargs['activation'] = lambda x, name=None: old_nl(x, name=name)
if 'W_init' in kwargs:
kwargs['kernel_initializer'] = kwargs.pop('W_init')
if 'b_init' in kwargs:
kwargs['bias_initializer'] = kwargs.pop('b_init')
return kwargs
def convert_to_tflayer_args(args_names, name_mapping):
"""
After applying this decorator:
1. data_format becomes tf.layers style
2. nl becomes activation
3. initializers are renamed
4. positional args are transformed to correspoding kwargs, according to args_names
5. kwargs are mapped to tf.layers names if needed, by name_mapping
"""
def decorator(func):
@functools.wraps(func)
def decorated_func(inputs, *args, **kwargs):
kwargs = map_common_tfargs(kwargs)
posarg_dic = {}
assert len(args) <= len(args_names), \
"Please use kwargs instead of positional args to call this model, " \
"except for the following arguments: {}".format(', '.join(args_names))
for pos_arg, name in zip(args, args_names):
posarg_dic[name] = pos_arg
ret = {}
for name, arg in six.iteritems(kwargs):
newname = name_mapping.get(name, None)
if newname is not None:
assert newname not in kwargs, \
"Argument {} and {} conflicts!".format(name, newname)
else:
newname = name
ret[newname] = arg
ret.update(posarg_dic) # Let pos arg overwrite kw arg, for argscope to work
return func(inputs, **ret)
return decorated_func
return decorator
def get_bn_variables(n_out, use_scale, use_bias, beta_init, gamma_init):
if use_bias:
beta = tf.get_variable('beta', [n_out], initializer=beta_init)
else:
beta = tf.zeros([n_out], name='beta')
if use_scale:
gamma = tf.get_variable('gamma', [n_out], initializer=gamma_init)
else:
gamma = tf.ones([n_out], name='gamma')
# x * gamma + beta
moving_mean = tf.get_variable('mean/EMA', [n_out],
initializer=tf.constant_initializer(), trainable=False)
moving_var = tf.get_variable('variance/EMA', [n_out],
initializer=tf.constant_initializer(1.0), trainable=False)
return beta, gamma, moving_mean, moving_var
def update_bn_ema(xn, batch_mean, batch_var,
moving_mean, moving_var, decay, internal_update):
update_op1 = moving_averages.assign_moving_average(
moving_mean, batch_mean, decay, zero_debias=False,
name='mean_ema_op')
update_op2 = moving_averages.assign_moving_average(
moving_var, batch_var, decay, zero_debias=False,
name='var_ema_op')
if internal_update:
with tf.control_dependencies([update_op1, update_op2]):
return tf.identity(xn, name='output')
else:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op1)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op2)
return tf.identity(xn, name='output')
@layer_register()
@convert_to_tflayer_args(
args_names=[],
name_mapping={
'use_bias': 'center',
'use_scale': 'scale',
'gamma_init': 'gamma_initializer',
'decay': 'momentum',
'use_local_stat': 'training'
})
def BatchNorm3d(inputs, axis=None, training=None, momentum=0.9, epsilon=1e-5,
center=True, scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
virtual_batch_size=None,
data_format='channels_last',
internal_update=False,
sync_statistics=None):
"""
Almost equivalent to `tf.layers.batch_normalization`, but different (and more powerful)
in the following:
1. Accepts an alternative `data_format` option when `axis` is None. For 2D input, this argument will be ignored.
2. Default value for `momentum` and `epsilon` is different.
3. Default value for `training` is automatically obtained from tensorpack's `TowerContext`, but can be overwritten.
4. Support the `internal_update` option, which enables the use of BatchNorm layer inside conditionals.
5. Support the `sync_statistics` option, which is very useful in small-batch models.
Args:
internal_update (bool): if False, add EMA update ops to
`tf.GraphKeys.UPDATE_OPS`. If True, update EMA inside the layer by control dependencies.
They are very similar in speed, but `internal_update=True` can be used
when you have conditionals in your model, or when you have multiple networks to train.
Corresponding TF issue: https://github.com/tensorflow/tensorflow/issues/14699
sync_statistics: either None or "nccl". By default (None), it uses statistics of the input tensor to normalize.
When set to "nccl", this layer must be used under tensorpack multi-gpu trainers,
and it then uses per-machine (multiple GPU) statistics to normalize.
Note that this implementation averages the per-tower E[x] and E[x^2] among towers to compute
global mean&variance. The result is the global mean&variance only if each tower has the same batch size.
This option has no effect when not training.
This option is also known as "Cross-GPU BatchNorm" as mentioned in https://arxiv.org/abs/1711.07240.
Corresponding TF issue: https://github.com/tensorflow/tensorflow/issues/18222
Variable Names:
* ``beta``: the bias term. Will be zero-inited by default.
* ``gamma``: the scale term. Will be one-inited by default.
* ``mean/EMA``: the moving average of mean.
* ``variance/EMA``: the moving average of variance.
Note:
Combinations of ``training`` and ``ctx.is_training``:
* ``training == ctx.is_training``: standard BN, EMA are maintained during training
and used during inference. This is the default.
* ``training and not ctx.is_training``: still use batch statistics in inference.
* ``not training and ctx.is_training``: use EMA to normalize in
training. This is useful when you load a pre-trained BN and
don't want to fine tune the EMA. EMA will not be updated in
this case.
"""
# parse shapes
data_format = get_data_format(data_format, tfmode=False)
shape = inputs.get_shape().as_list()
ndims = len(shape)
# in 3d conv, we have 5d dim [batch, c, d, h, w]
# assert ndims in [2, 4], ndims
if sync_statistics is not None:
sync_statistics = sync_statistics.lower()
assert sync_statistics in [None, 'nccl', 'horovod'], sync_statistics
if axis is None:
if ndims == 2:
data_format = 'NHWC'
axis = 1
elif ndims == 5:
axis = 1 if data_format == 'NCHW' else 4
else:
axis = 1 if data_format == 'NCHW' else 3
else:
data_format = 'NCHW' if axis == 1 else 'NHWC'
num_chan = shape[axis]
# parse training/ctx
ctx = get_current_tower_context()
if training is None:
training = ctx.is_training
training = bool(training)
TF_version = get_tf_version_number()
if not training and ctx.is_training:
assert TF_version >= 1.4, \
"Fine tuning a BatchNorm model with fixed statistics is only " \
"supported after https://github.com/tensorflow/tensorflow/pull/12580 "
if ctx.is_main_training_tower: # only warn in first tower
logger.warn("[BatchNorm] Using moving_mean/moving_variance in training.")
# Using moving_mean/moving_variance in training, which means we
# loaded a pre-trained BN and only fine-tuning the affine part.
if sync_statistics is None or not (training and ctx.is_training):
coll_bk = backup_collection([tf.GraphKeys.UPDATE_OPS])
with rename_get_variable(
{'moving_mean': 'mean/EMA',
'moving_variance': 'variance/EMA'}):
tf_args = dict(
axis=axis,
momentum=momentum, epsilon=epsilon,
center=center, scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
fused=True,
_reuse=tf.get_variable_scope().reuse)
if TF_version >= 1.5:
tf_args['virtual_batch_size'] = virtual_batch_size
else:
assert virtual_batch_size is None, "Feature not supported in this version of TF!"
layer = tf.layers.BatchNormalization(**tf_args)
xn = layer.apply(inputs, training=training, scope=tf.get_variable_scope())
# maintain EMA only on one GPU is OK, even in replicated mode.
# because during training, EMA isn't used
if ctx.is_main_training_tower:
for v in layer.non_trainable_variables:
add_model_variable(v)
if not ctx.is_main_training_tower or internal_update:
restore_collection(coll_bk)
if training and internal_update:
assert layer.updates
with tf.control_dependencies(layer.updates):
ret = tf.identity(xn, name='output')
else:
ret = tf.identity(xn, name='output')
vh = ret.variables = VariableHolder(
moving_mean=layer.moving_mean,
mean=layer.moving_mean, # for backward-compatibility
moving_variance=layer.moving_variance,
variance=layer.moving_variance) # | |
<reponame>vincentrobin/conda<filename>Cantera-data-examples/onedim.py<gh_stars>1-10
# This file is part of Cantera. See License.txt in the top-level directory or
# at http://www.cantera.org/license.txt for license and copyright information.
import numpy as np
from ._cantera import *
from .composite import Solution
import csv as _csv
try:
# Python 2.7 or 3.2+
from math import erf
except ImportError:
from scipy.special import erf
class FlameBase(Sim1D):
""" Base class for flames with a single flow domain """
__slots__ = ('gas',)
def __init__(self, domains, gas, grid=None):
"""
:param gas:
object to use to evaluate all gas properties and reaction rates
:param grid:
array of initial grid points
"""
if grid is None:
grid = np.linspace(0.0, 0.1, 6)
self.flame.grid = grid
super(FlameBase, self).__init__(domains)
self.gas = gas
self.flame.P = gas.P
def set_refine_criteria(self, ratio=10.0, slope=0.8, curve=0.8, prune=0.0):
"""
Set the criteria used for grid refinement.
:param ratio:
additional points will be added if the ratio of the spacing on
either side of a grid point exceeds this value
:param slope:
maximum difference in value between two adjacent points, scaled by
the maximum difference in the profile (0.0 < slope < 1.0). Adds
points in regions of high slope.
:param curve:
maximum difference in slope between two adjacent intervals, scaled
by the maximum difference in the profile (0.0 < curve < 1.0). Adds
points in regions of high curvature.
:param prune:
if the slope or curve criteria are satisfied to the level of
'prune', the grid point is assumed not to be needed and is removed.
Set prune significantly smaller than 'slope' and 'curve'. Set to
zero to disable pruning the grid.
>>> f.set_refine_criteria(ratio=3.0, slope=0.1, curve=0.2, prune=0)
"""
super(FlameBase, self).set_refine_criteria(self.flame, ratio, slope,
curve, prune)
def get_refine_criteria(self):
"""
Get a dictionary of the criteria used for grid refinement. The items in
the dictionary are the ``ratio``, ``slope``, ``curve``, and ``prune``,
as defined in `~FlameBase.set_refine_criteria`.
>>> f.set_refine_criteria(ratio=3.0, slope=0.1, curve=0.2, prune=0)
>>> f.get_refine_criteria()
{'ratio': 3.0, 'slope': 0.1, 'curve': 0.2, 'prune': 0.0}
"""
return super(FlameBase, self).get_refine_criteria(self.flame)
def set_profile(self, component, locations, values):
"""
Set an initial estimate for a profile of one component.
:param component:
component name or index
:param positions:
sequence of relative positions, from 0 on the left to 1 on the right
:param values:
sequence of values at the relative positions specified in *positions*
>>> f.set_profile('T', [0.0, 0.2, 1.0], [400.0, 800.0, 1500.0])
"""
super(FlameBase, self).set_profile(self.flame, component, locations,
values)
@property
def max_grid_points(self):
"""
Get/Set the maximum number of grid points used in the solution of
this flame.
"""
return super(FlameBase, self).get_max_grid_points(self.flame)
@max_grid_points.setter
def max_grid_points(self, npmax):
super(FlameBase, self).set_max_grid_points(self.flame, npmax)
@property
def transport_model(self):
"""
Get/Set the transport model used by the `Solution` object used for this
simulation.
"""
return self.gas.transport_model
@transport_model.setter
def transport_model(self, model):
self.gas.transport_model = model
self.flame.set_transport(self.gas)
@property
def energy_enabled(self):
""" Get/Set whether or not to solve the energy equation."""
return self.flame.energy_enabled
@energy_enabled.setter
def energy_enabled(self, enable):
self.flame.energy_enabled = enable
@property
def soret_enabled(self):
"""
Get/Set whether or not to include diffusive mass fluxes due to the
Soret effect. Enabling this option works only when using the
multicomponent transport model.
"""
return self.flame.soret_enabled
@soret_enabled.setter
def soret_enabled(self, enable):
self.flame.soret_enabled = enable
@property
def radiation_enabled(self):
"""
Get/Set whether or not to include radiative heat transfer
"""
return self.flame.radiation_enabled
@radiation_enabled.setter
def radiation_enabled(self, enable):
self.flame.radiation_enabled = enable
def set_boundary_emissivities(self, e_left, e_right):
self.flame.set_boundary_emissivities(e_left, e_right)
@property
def grid(self):
""" Array of grid point positions along the flame. """
return self.flame.grid
@property
def P(self):
""" Get/Set the pressure of the flame [Pa] """
return self.flame.P
@P.setter
def P(self, P):
self.flame.P = P
@property
def T(self):
""" Array containing the temperature [K] at each grid point. """
return self.profile(self.flame, 'T')
@property
def u(self):
"""
Array containing the velocity [m/s] normal to the flame at each point.
"""
return self.profile(self.flame, 'u')
@property
def V(self):
"""
Array containing the tangential velocity gradient [1/s] at each point.
"""
return self.profile(self.flame, 'V')
@property
def L(self):
"""
Array containing the radial pressure gradient (1/r)(dP/dr) [N/m^4] at
each point. Note: This value is named 'lambda' in the C++ code.
"""
return self.profile(self.flame, 'lambda')
def elemental_mass_fraction(self, m):
r"""
Get the elemental mass fraction :math:`Z_{\mathrm{mass},m}` of element
:math:`m` at each grid point, which is defined as:
.. math:: Z_{\mathrm{mass},m} = \sum_k \frac{a_{m,k} M_m}{M_k} Y_k
with :math:`a_{m,k}` being the number of atoms of element :math:`m` in
species :math:`k`, :math:`M_m` the atomic weight of element :math:`m`,
:math:`M_k` the molecular weight of species :math:`k`, and :math:`Y_k`
the mass fraction of species :math:`k`.
:param m:
Base element, may be specified by name or by index.
>>> phase.elemental_mass_fraction('H')
[1.0, ..., 0.0]
"""
vals = np.empty(self.flame.n_points)
for i in range(self.flame.n_points):
self.set_gas_state(i)
vals[i] = self.gas.elemental_mass_fraction(m)
return vals
def elemental_mole_fraction(self, m):
r"""
Get the elemental mole fraction :math:`Z_{\mathrm{mole},m}` of element
:math:`m` at each grid point, which is defined as:
.. math:: Z_{\mathrm{mole},m} = \sum_k \frac{a_{m,k}}{\sum_j a_{j,k}} X_k
with :math:`a_{m,k}` being the number of atoms of element :math:`m` in
species :math:`k` and :math:`X_k` the mole fraction of species
:math:`k`.
:param m:
Base element, may be specified by name or by index.
>>> phase.elemental_mole_fraction('H')
[1.0, ..., 0.0]
"""
vals = np.empty(self.flame.n_points)
for i in range(self.flame.n_points):
self.set_gas_state(i)
vals[i] = self.gas.elemental_mole_fraction(m)
return vals
def solution(self, component, point=None):
"""
Get the solution at one point or for the full flame domain (if
`point=None`) for the specified *component*. The *component* can be
specified by name or index.
"""
if point is None:
return self.profile(self.flame, component)
else:
return self.value(self.flame, component, point)
def set_gas_state(self, point):
"""
Set the state of the the Solution object used for calculations,
`self.gas`, to the temperature and composition at the point with index
*point*.
"""
k0 = self.flame.component_index(self.gas.species_name(0))
Y = [self.solution(k, point)
for k in range(k0, k0 + self.gas.n_species)]
self.gas.set_unnormalized_mass_fractions(Y)
self.gas.TP = self.value(self.flame, 'T', point), self.P
@property
def heat_release_rate(self):
"""
Get the total volumetric heat release rate [W/m^3].
"""
return - np.sum(self.partial_molar_enthalpies *
self.net_production_rates, 0)
@property
def heat_production_rates(self):
"""
Get the volumetric heat production rates [W/m^3] on a per-reaction
basis. The sum over all reactions results in the total volumetric heat
release rate.
Example: <NAME>: Combustion Physics (2006), Fig. 7.8.6
>>> f.heat_production_rates[2] # heat production rate of the 2nd reaction
"""
return - self.net_rates_of_progress * self.delta_standard_enthalpy
def write_csv(self, filename, species='X', quiet=True):
"""
Write the velocity, temperature, density, and species profiles
to a CSV file.
:param filename:
Output file name
:param species:
Attribute to use obtaining species profiles, e.g. ``X`` for
mole fractions or ``Y`` for mass fractions.
"""
z = self.grid
T = self.T
u = self.u
V = self.V
csvfile = open(filename, 'w')
writer = _csv.writer(csvfile)
writer.writerow(['z (m)', 'u (m/s)', 'V (1/s)',
'T (K)', 'rho (kg/m3)'] + self.gas.species_names)
for n in range(self.flame.n_points):
self.set_gas_state(n)
writer.writerow([z[n], u[n], V[n], T[n], self.gas.density] +
list(getattr(self.gas, species)))
csvfile.close()
if not quiet:
print("Solution saved to '{0}'.".format(filename))
def _trim(docstring):
"""Remove block indentation from a docstring."""
if not docstring:
return ''
lines = docstring.splitlines()
# Determine minimum indentation (first line doesn't count):
indent = 999
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < 999:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Return a single string, with trailing and leading blank lines stripped
return '\n'.join(trimmed).strip('\n')
def _array_property(attr, size=None):
"""
Generate a property that retrieves values at each point in the flame. The
'size' argument is the attribute name of the gas object used to set the
leading dimension of the resulting array.
"""
def getter(self):
if size is None:
# 1D array for scalar property
vals = np.empty(self.flame.n_points)
else:
# 2D array
vals = np.empty((getattr(self.gas, size), self.flame.n_points))
for i in range(self.flame.n_points):
self.set_gas_state(i)
vals[...,i] = getattr(self.gas, attr)
return vals
if size is None:
extradoc = "\nReturns an array of length `n_points`."
else:
extradoc = "\nReturns an array of size `%s` x `n_points`." % size
doc = _trim(getattr(Solution, attr).__doc__) +'\n' + extradoc
return property(getter, doc=doc)
# Add scalar properties to FlameBase
for _attr in ['density', 'density_mass', 'density_mole', 'volume_mass',
'volume_mole', 'int_energy_mole', 'int_energy_mass', 'h',
'enthalpy_mole', 'enthalpy_mass', 's', 'entropy_mole',
'entropy_mass', 'g', 'gibbs_mole', 'gibbs_mass', 'cv',
'cv_mole', | |
isWeaponDrawn:
DistributedPlayerPirate.sendRequestRemoveEffects(self, self.stickyTargets)
self.setStickyTargets([])
taskMgr.remove(self.uniqueName('runAuraDetection'))
subtype = ItemGlobals.getSubtype(currentWeaponId)
if WeaponGlobals.getWeaponCategory(currentWeaponId) == WeaponGlobals.VOODOO and isWeaponDrawn == True:
self.guiMgr.attuneSelection.show()
else:
self.guiMgr.attuneSelection.hide()
specialAttack = ItemGlobals.getSpecialAttack(self.currentWeaponId)
if self.curAttackAnim:
if specialAttack == EnemySkills.CUTLASS_ROLLTHRUST:
self.curAttackAnim.pause()
else:
self.curAttackAnim.finish()
self.curAttackAnim = None
if self.secondWeapon:
self.secondWeapon.removeNode()
self.secondWeapon = None
if ItemGlobals.getSubtype(currentWeaponId) == ItemGlobals.QUEST_PROP_POWDER_KEG and not isWeaponDrawn:
currentWeaponId = 0
self.checkWeaponSwitch(currentWeaponId, isWeaponDrawn)
self.guiMgr.setCurrentWeapon(currentWeaponId, isWeaponDrawn, slotId)
specialAttack = ItemGlobals.getSpecialAttack(currentWeaponId)
if specialAttack and isWeaponDrawn:
if WeaponGlobals.getSkillTrack(specialAttack) == WeaponGlobals.BREAK_ATTACK_SKILL_INDEX:
self.skillDiary.clearHits(specialAttack)
self.guiMgr.combatTray.clearSkillCharge(specialAttack)
else:
self.skillDiary.startRecharging(specialAttack, 0)
self.guiMgr.combatTray.startSkillRecharge(specialAttack)
def d_requestCurrentWeapon(self, currentWeaponId, isWeaponDrawn):
self.sendUpdate('requestCurrentWeapon', [
currentWeaponId,
isWeaponDrawn])
def d_requestCurrentAmmo(self, currentAmmoId):
self.sendUpdate('requestCurrentAmmo', [
currentAmmoId])
def d_requestCurrentCharm(self, currentCharmId):
self.sendUpdate('requestCurrentCharm', [
currentCharmId])
def setCurrentCharm(self, currentCharm):
DistributedPlayerPirate.setCurrentCharm(self, currentCharm)
self.guiMgr.combatTray.skillTray.updateCharmSkills()
def _LocalPirate__drawWeapon(self):
self.guiMgr.combatTray.toggleWeapon(self.currentWeaponId, self.currentWeaponSlotId)
def _LocalPirate__drawWeaponIfTarget(self):
if self.isWeaponDrawn:
return None
if self.cr.targetMgr:
target = self.cr.targetMgr.pickObject()
if target and TeamUtils.damageAllowed(target, self):
self.guiMgr.combatTray.toggleWeapon(self.currentWeaponId, self.currentWeaponSlotId)
def enableMouseWeaponDraw(self):
self.accept('control', self._LocalPirate__drawWeapon)
self.accept('mouse1', self._LocalPirate__drawWeaponIfTarget)
self.accept('mouse2', self._LocalPirate__drawWeapon)
def disableMouseWeaponDraw(self):
self.ignore('control')
self.ignore('mouse1')
self.ignore('mouse2')
def runAuraDetection(self, task):
targets = []
self.areaAuraSphere.reparentTo(self)
self.areaAuraTrav.addCollider(self.areaAuraSphere, self.areaAuraQueue)
self.areaAuraTrav.traverse(self.getRender())
self.areaAuraTrav.removeCollider(self.areaAuraSphere)
self.areaAuraSphere.detachNode()
numEntries = self.areaAuraQueue.getNumEntries()
if numEntries == 0:
pass
1
for i in range(numEntries):
entry = self.areaAuraQueue.getEntry(i)
potentialTargetColl = entry.getIntoNodePath()
potentialTarget = self.repository.targetMgr.getObjectFromNodepath(potentialTargetColl)
if potentialTarget:
if not TeamUtils.damageAllowed(potentialTarget, self):
potentialTargetId = potentialTarget.getDoId()
targets.append(potentialTargetId)
TeamUtils.damageAllowed(potentialTarget, self)
DistributedPlayerPirate.sendRequestAuraDetection(self, targets)
return Task.again
def setMoney(self, money, quiet = 0):
if money == None:
inv = self.getInventory()
if inv:
money = inv.getGoldInPocket()
else:
return None
self.guiMgr.setMoney(money)
if money != 0:
gain = money - self.money
if gain > 0 and self._LocalPirate__lootUIEnabled:
if quiet and self.firstMoneyQuieted == 0 and self.gameFSM.getCurrentOrNextState() == 'ParlorGame' and localAvatar.guiMgr.scoreboard and not localAvatar.guiMgr.scoreboard.isEmpty():
self.firstMoneyQuieted = 1
else:
self.guiMgr.messageStack.showLoot([], gold = gain)
self.money = money
inv = self.getInventory()
if inv:
if not self.money >= 300 and inv.getStackQuantity(InventoryType.BuyNewShip) == 0:
if not self.money >= 800 and inv.getStackQuantity(InventoryType.BuyNewShip) == 1:
if not self.money >= 1000 and inv.getStackQuantity(InventoryType.BuyNewShip) == 2:
if not self.money >= 3500 and inv.getStackQuantity(InventoryType.BuyNewShip) == 3:
if not self.money >= 5000 and inv.getStackQuantity(InventoryType.BuyNewShip) == 4:
if not self.money >= 20000 and inv.getStackQuantity(InventoryType.BuyNewShip) == 5:
if self.money >= 40000 and inv.getStackQuantity(InventoryType.BuyNewShip) == 6 and self.money >= 60000 and inv.getStackQuantity(InventoryType.BuyNewShip) == 7:
self.sendRequestContext(InventoryType.BuyNewShip)
def _setCrewShip(self, ship):
crewShip = self.crewShip
if crewShip is not None and crewShip != ship:
crewShip.hideStatusDisplay()
if self.guiMgr and self.guiMgr.mapPage:
self.guiMgr.mapPage.removeShip(crewShip.doId)
mapObj = crewShip.getMinimapObject()
if mapObj:
mapObj.setAsLocalAvShip(False)
DistributedPlayerPirate._setCrewShip(self, ship)
if ship:
ship.showStatusDisplay()
self.d_requestCurrentIsland(0)
if self.guiMgr and self.guiMgr.mapPage:
pos = base.cr.activeWorld.getWorldPos(ship)
self.guiMgr.mapPage.addShip(ship.getShipInfo(), pos)
mapObj = ship.getMinimapObject()
if mapObj:
mapObj.setAsLocalAvShip(True)
else:
self.b_clearTeleportFlag(PiratesGlobals.TFOnShip)
self.b_clearTeleportFlag(PiratesGlobals.TFNotSameCrew)
self.b_clearTeleportFlag(PiratesGlobals.TFSiegeCaptain)
_setCrewShip = report(types = [
'args',
'deltaStamp',
'module'], dConfigParam = 'shipboard')(_setCrewShip)
def setActiveShipId(self, shipId):
DistributedPlayerPirate.setActiveShipId(self, shipId)
messenger.send('activeShipChange', sentArgs = [
shipId])
setActiveShipId = report(types = [
'args',
'deltaStamp',
'module'], dConfigParam = 'shipboard')(setActiveShipId)
def setReturnLocation(self, returnLocation):
if returnLocation == '1142018473.22dxschafe':
returnLocation = LocationIds.DEL_FUEGO_ISLAND
DistributedPlayerPirate.setReturnLocation(self, returnLocation)
def setIt(inventory, returnLocation = returnLocation):
if inventory:
if __dev__ and not getBase().config.GetBool('login-location-used-setIt', False):
bp.loginCfg()
ConfigVariableBool('login-location-used-setRetIt').setValue(True)
config_location = getBase().config.GetString('login-location', '').lower()
config_location_uid = PLocalizer.LocationUids.get(config_location)
if config_location and config_location_uid:
self.guiMgr.mapPage.setReturnIsland(config_location_uid)
return None
if inventory.getShipDoIdList():
self.guiMgr.mapPage.setReturnIsland(returnLocation)
else:
self.guiMgr.mapPage.setReturnIsland(LocationIds.PORT_ROYAL_ISLAND)
else:
DistributedInventoryBase.getInventory(self.inventoryId, setIt)
DistributedInventoryBase.getInventory(self.inventoryId, setIt)
def setCurrentIsland(self, islandUid):
DistributedPlayerPirate.setCurrentIsland(self, islandUid)
if self.guiMgr:
if self.guiMgr.mapPage:
self.guiMgr.mapPage.setCurrentIsland(islandUid)
setCurrentIsland = report(types = [
'frameCount',
'args'], dConfigParam = 'map')(setCurrentIsland)
def setJailCellIndex(self, index):
DistributedPlayerPirate.setJailCellIndex(self, index)
messenger.send('localAvatar-setJailCellIndex', [
index])
def setCurrentTarget(self, targetId):
target = self.cr.doId2do.get(targetId)
if target == self.currentTarget:
if TeamUtils.damageAllowed(target, self):
self.requestCombatMusic()
return None
if self.currentTarget:
self.currentTarget.setLocalTarget(0)
if self.currentTarget.state == 'Use':
self.currentTarget.request('Idle')
self.currentTarget = target
if target:
if (not hasattr(target, 'currentDialogMovie') or target.currentDialogMovie == None) and target.hideHpMeterFlag == 0:
target.showHpMeter()
target.setLocalTarget(1)
target.request('Use')
self.cr.interactionMgr.start()
if self.currentTarget and TeamUtils.damageAllowed(self.currentTarget, self):
self.requestCombatMusic()
DistributedPlayerPirate.setCurrentTarget(self, targetId)
def delete(self):
try:
pass
except:
self.LocalPirate_deleted = 1
self.guiMgr.delete()
del self.guiMgr
self.cameraFSM.cleanup()
del self.cameraFSM
del self.currentMouseOver
self.currentAimOver = None
del self.currentSelection
del self.skillDiary
self.ignore('shipRemoved')
self.cr.avatarFriendsManager.reset()
DistributedPlayerPirate.delete(self)
taskMgr.remove(self.uniqueName('questShow'))
taskMgr.remove(self.uniqueName('oceanCheck'))
taskMgr.remove(self.uniqueName('runAuraDetection'))
self.currentStoryQuests = []
LocalAvatar.delete(self)
self.stopAllDefenceEffects()
if self.cloudScudEffect:
self.cloudScudEffect.stopLoop()
self.cloudScudEffect = None
self.questStatus.delete()
del self.questStatus
self._LocalPirate__cleanupGuildDialog()
self._LocalPirate__cleanupMoraleDialog()
del base.localAvatar
del __builtins__['localAvatar']
if __dev__:
del __builtins__['av']
def generateHuman(self, *args, **kwargs):
DistributedPlayerPirate.generateHuman(self, *args, **args)
self.deleteWeaponJoints()
lod2000 = self.getLOD('2000')
if lod2000:
lod2000.flattenStrong()
lod1000 = self.getLOD('1000')
if lod1000:
lod1000.flattenStrong()
self.getWeaponJoints()
self.setLODAnimation(1000, 1000, 0.001)
def generate(self):
base.localAvatar = self
__builtins__['localAvatar'] = self
if __dev__:
__builtins__['av'] = self
DistributedPlayerPirate.generate(self)
def addInvInterest(self):
self.invInterest = self.cr.addTaggedInterest(self.doId, PiratesGlobals.InventoryZone, self.cr.ITAG_AVATAR, 'inventory')
def announceGenerate(self):
base.loadingScreen.tick()
invInterestDelay = base.config.GetInt('delay-inv-interest', 10)
if invInterestDelay > 0:
DelayedCall(self.addInvInterest, delay = invInterestDelay)
else:
self.addInvInterest()
if self.guildId:
self.guildInterest = self.cr.addTaggedInterest(self.cr.guildManager.doId, self.guildId, self.cr.ITAG_AVATAR, 'guild')
else:
self.guildInterest = None
self.nametag.manage(base.marginManager)
self.controlManager.setTag('avId', str(self.getDoId()))
pe = PolylightEffect.make()
brightness = 1.25
darkness = 0.80000000000000004
pe.setWeight(brightness)
self.node().setEffect(pe)
DistributedPlayerPirate.announceGenerate(self)
self.questStatus = QuestStatus.QuestStatus(self)
posHpr = (0, 0, 0, 0, 0, 0)
self.setPosHpr(*posHpr)
if base.config.GetBool('osd-anim-blends', 0):
self.toggleOsdAnimBlends(True)
self.acceptOnce('generate-%s' % self.getInventoryId(), self.initInventoryGui)
for weaponId in WeaponGlobals.getHumanWeaponTypes():
self.accept('inventoryQuantity-%s-%s' % (self.getInventoryId(), weaponId), self.refreshInventoryWeapons)
for skillId in range(InventoryType.begin_WeaponSkillMelee, InventoryType.end_WeaponSkillMelee):
self.accept('inventoryQuantity-%s-%s' % (self.getInventoryId(), skillId), self.guiMgr.updateSkillUnlock, extraArgs = [
skillId])
for skillId in range(InventoryType.begin_WeaponSkillCutlass, InventoryType.end_WeaponSkillCutlass):
self.accept('inventoryQuantity-%s-%s' % (self.getInventoryId(), skillId), self.guiMgr.updateSkillUnlock, extraArgs = [
skillId])
for skillId in range(InventoryType.begin_WeaponSkillPistol, InventoryType.end_WeaponSkillPistol):
self.accept('inventoryQuantity-%s-%s' % (self.getInventoryId(), skillId), self.guiMgr.updateSkillUnlock, extraArgs = [
skillId])
for skillId in range(InventoryType.begin_WeaponSkillMusket, InventoryType.end_WeaponSkillMusket):
self.accept('inventoryQuantity-%s-%s' % (self.getInventoryId(), skillId), self.guiMgr.updateSkillUnlock, extraArgs = [
skillId])
for skillId in range(InventoryType.begin_WeaponSkillBayonet, InventoryType.end_WeaponSkillBayonet):
self.accept('inventoryQuantity-%s-%s' % (self.getInventoryId(), skillId), self.guiMgr.updateSkillUnlock, extraArgs = [
skillId])
for skillId in range(InventoryType.begin_WeaponSkillDagger, InventoryType.end_WeaponSkillDagger):
self.accept('inventoryQuantity-%s-%s' % (self.getInventoryId(), skillId), self.guiMgr.updateSkillUnlock, extraArgs = [
skillId])
for skillId in range(InventoryType.begin_SkillSailing, InventoryType.end_SkillSailing):
self.accept('inventoryQuantity-%s-%s' % (self.getInventoryId(), skillId), self.guiMgr.updateSkillUnlock, extraArgs = [
skillId])
for skillId in range(InventoryType.begin_WeaponSkillCannon, InventoryType.end_ExtendedWeaponSkillCannon):
self.accept('inventoryQuantity-%s-%s' % (self.getInventoryId(), skillId), self.guiMgr.updateSkillUnlock, extraArgs = [
skillId])
for skillId in range(InventoryType.begin_WeaponSkillDoll, InventoryType.end_WeaponSkillDoll):
self.accept('inventoryQuantity-%s-%s' % (self.getInventoryId(), skillId), self.guiMgr.updateSkillUnlock, extraArgs = [
skillId])
for skillId in range(InventoryType.begin_WeaponSkillWand, InventoryType.end_WeaponSkillWand):
self.accept('inventoryQuantity-%s-%s' % (self.getInventoryId(), skillId), self.guiMgr.updateSkillUnlock, extraArgs = [
skillId])
for teleportTokenId in range(InventoryType.begin_TeleportToken, InventoryType.end_TeleportToken):
self.accept('inventoryQuantity-%s-%s' % (self.getInventoryId(), teleportTokenId), self.guiMgr.mapPage.updateTeleportIsland, extraArgs = [
teleportTokenId])
self.accept('inventoryAccumulator-%s-%s' % (self.getInventoryId(), InventoryType.OverallRep), self.updateReputation, extraArgs = [
InventoryType.OverallRep])
for repCategory in ReputationGlobals.getReputationCategories():
self.accept('inventoryAccumulator-%s-%s' % (self.getInventoryId(), repCategory), self.updateReputation, extraArgs = [
repCategory])
for unCat in ReputationGlobals.getUnspentCategories():
self.accept('inventoryQuantity-%s-%s' % (self.getInventoryId(), unCat), self.guiMgr.updateUnspent, extraArgs = [
unCat])
self.accept(InventoryGlobals.getCategoryQuantChangeMsg(self.getInventoryId(), InventoryType.ItemTypeConsumable), self.guiMgr.updateTonic)
self.guiMgr.combatTray.updateBestTonic()
self.accept('inventoryQuantity-%s-%s' % (self.getInventoryId(), InventoryType.ShipRepairKit), self.guiMgr.updateShipRepairKit)
self.guiMgr.combatTray.updateShipRepairKits()
taskMgr.add(self.shadowReach, 'shadowReach', priority = 40)
self.accept('enterWater', self.handleWaterIn)
self.accept('againWater', self.handleWaterAgain)
self.accept('exitWater', self.handleWaterOut)
if self.style.getTutorial() < PiratesGlobals.TUT_GOT_COMPASS and not base.config.GetBool('teleport-all', 0):
self.b_setTeleportFlag(PiratesGlobals.TFNoCompass)
if self.style.getTutorial() == PiratesGlobals.TUT_CHAPTER3_STARTED:
if self.chatMgr.noChat:
ct = ChatTutorialAlt.ChatTutorialAlt()
else:
ct = ChatTutorial.ChatTutorial()
if not (self.inPvp):
if self.style.getTutorial() >= PiratesGlobals.TUT_MET_JOLLY_ROGER or self.guiMgr.forceLookout:
self.guiMgr.crewHUD.setHUDOn()
self.guiMgr.crewHUDTurnedOff = False
if not base.launcher.getPhaseComplete(5):
self.b_setTeleportFlag(PiratesGlobals.TFPhaseIncomplete)
self.accept('phaseComplete-5', self.handlePhaseComplete, extraArgs = [
5])
self.accept('InputState-forward', self.checkInputState)
self.accept('InputState-reverse', self.checkInputState)
self.accept('InputState-turnLeft', self.checkInputState)
self.accept('InputState-turnRight', self.checkInputState)
self.accept(WeaponGlobals.LocalAvatarUseItem, self.checkAction)
self.accept(WeaponGlobals.LocalAvatarUseProjectileSkill, self.checkAction)
self.accept(WeaponGlobals.LocalAvatarUseShipSkill, self.checkAction)
self.accept(WeaponGlobals.LocalAvatarUseTargetedSkill, self.checkAction)
self.accept(WeaponGlobals.LocalAvatarUseTargetedSkill, self.checkAction)
self.accept('action', self.checkAction)
self.accept('moustacheFlip', self.handleMoustache)
self.bindAnim([
'idle',
'run',
'walk',
'spin_right',
'spin_left'])
self.ignore('localAvatarVisZoneChanged')
if base.options.getCharacterDetailSetting() in (0, 1):
self.getLODNode().forceSwitch(1)
messenger.send('localPirate-created', [])
DistributedInventoryBase.getInventory(base.localAvatar.inventoryId, self.inventoryArrived)
self.guiMgr.initQuestPage()
def disable(self):
if base.config.GetBool('want-pstats', 0):
taskMgr.remove('avatarPstats')
self.ignore('generate-%s' % self.getInventoryId())
self.ignore(InventoryGlobals.getCategoryQuantChangeMsg(self.getInventoryId(), InventoryType.ItemTypeMoney))
self.ignore('inventoryQuantity-%s-%s' % (self.getInventoryId(), InventoryType.Dinghy))
self.ignore('inventoryAddDoId-%s-%s' % (self.getInventoryId(), InventoryCategory.SHIPS))
self.ignore('inventoryRemoveDoId-%s-%s' % (self.getInventoryId(), InventoryCategory.SHIPS))
self.ignore('control-f3')
self.ignore('shift-f12')
self.ignore('enterWater')
self.ignore('againWater')
self.ignore('exitWater')
self.ignore('phaseComplete-5')
self.ignore(self.cr.getAllInterestsCompleteEvent())
self.ignore('moustacheFlip')
self.cr.removeTaggedInterest(self.invInterest)
self.invInterest = None
if self.guildInterest:
self.cr.removeTaggedInterest(self.guildInterest)
self.guildInterest = None
taskMgr.remove(self.taskName('irisIn'))
self.stopCombatMusic()
self.clearBattleTeleportFlag(send = False)
self.shipList = set()
self.nametag.unmanage(base.marginManager)
del self.invInterest
if self.pendingInitQuest:
DistributedInventoryBase.cancelGetInventory(self.pendingInitQuest)
self.pendingInitQuest = None
if self.openJailDoorTrack:
self.openJailDoorTrack.pause()
self.openJailDoorTrack = None
taskMgr.remove(self.uniqueName('monitorStickyTargets'))
taskMgr.remove('localAvLookAtTarget')
taskMgr.remove(self.uniqueName('setZombie'))
base.talkAssistant.clearHistory()
base.chatPanel.updateDisplay()
self.ignore('InputState-forward')
self.ignore('InputState-backward')
self.ignore('uber-enter')
taskMgr.remove('autoAFK')
self.cleanupLocalProjectiles()
messenger.send('localPirateDisabled')
DistributedPlayerPirate.disable(self)
def inventoryArrived(self, inventory):
self.accept(InventoryGlobals.getCategoryQuantChangeMsg(localAvatar.getInventoryId(), InventoryType.PVPTotalInfamyLand), self.infamyUpdate)
self.accept(InventoryGlobals.getCategoryQuantChangeMsg(localAvatar.getInventoryId(), InventoryType.PVPTotalInfamySea), self.infamyUpdate)
def setBadgeIcon(self, titleId, rank):
DistributedPlayerPirate.setBadgeIcon(self, titleId, rank)
messenger.send('LocalBadgeChanged')
def setShipBadgeIcon(self, titleId, rank):
DistributedPlayerPirate.setShipBadgeIcon(self, titleId, rank)
messenger.send('LocalShipBadgeChanged')
def infamyUpdate(self, task = None):
if localAvatar.badge and len(localAvatar.badge) == 2:
titleId = localAvatar.badge[0]
inventoryType = TitleGlobals.getInventoryType(titleId)
if inventoryType:
exp = localAvatar.getInventory().getStackQuantity(TitleGlobals.getInventoryType(titleId))
realRank = TitleGlobals.getRank(titleId, exp)
if realRank != localAvatar.badge[1]:
localAvatar.sendRequestSetBadgeIcon(titleId, realRank)
if localAvatar.shipBadge and len(localAvatar.shipBadge) == 2:
titleId = localAvatar.shipBadge[0]
inventoryType = TitleGlobals.getInventoryType(titleId)
if inventoryType:
exp = localAvatar.getInventory().getStackQuantity(TitleGlobals.getInventoryType(titleId))
realRank = TitleGlobals.getRank(titleId, exp)
if realRank != localAvatar.shipBadge[1]:
localAvatar.sendRequestSetShipBadgeIcon(titleId, realRank)
messenger.send('LocalAvatarInfamyUpdated')
def clearInventoryInterest(self):
self.removeInterest(self.invInterest, event = self.uniqueName('localAvatar-close-inventory'))
def handlePhaseComplete(self, phase):
DistributedPlayerPirate.handlePhaseComplete(self, phase)
if phase == 5:
self.b_clearTeleportFlag(PiratesGlobals.TFPhaseIncomplete)
| |
from random import *
import numpy as np
import time
import logging
import tkinter as tk
from PIL import Image, ImageTk
import pygame as pygame
#*************************************************************************
# Classe Paquet:
# Une carte est representé par sa valuer, sa figure et le fichier image
# le represantant.
# La classe offre divers methods pour comparer deux cartes entre elles
#*************************************************************************
class Carte:
"""Une classe carte rudimentaire définie par \n
- sa valeur : 1 à 10, Valet, Dame, Roi\n
- sa couleur : Carreau, Coeur, Pique, Trèfle\n
- sa figure (le nom du fichier image correspondant)"""
__valeur = 0
__couleur = 0
__figure = ""
def __init__(self, valeur, couleur):
"""String*String->Carte
Construit l'objet Carte avec la valeur et la couleur fournclearie"""
self.Attribuer_Valeur(valeur)
self.Attribuer_Couleur(couleur)
self.__Attribuer_Figure(self.__valeur, self.__couleur)
def Obtenir_Valeur(self):
"""None->String
Retourne la valeur de la carte"""
if self.__valeur < 11:
return str(self.__valeur)
elif self.__valeur == 11:
return "Valet"
elif self.__valeur == 12:
return "Dame"
elif self.__valeur == 13:
return "Roi"
def Obtenir_Couleur(self):
"""None->String
retourne la couleur de la carte"""
if self.__couleur == 0:
return "Carreau"
elif self.__couleur == 1:
return "Coeur"
elif self.__couleur == 2:
return "Pique"
elif self.__couleur == 3:
return "Trèfle"
def Obtenir_Code_Couleur(self):
return self.__couleur
def Obtenir_Figure(self):
"""None->String
Retourne le nom du fichier image correspondant à la carte"""
return self.__figure
def Attribuer_Valeur(self, valeur):
"""String->None
Change la valeur de la carte"""
if valeur == "Valet":
self.__valeur = 11
elif valeur == "Dame":
self.__valeur = 12
elif valeur == "Roi":
self.__valeur = 13
else:
self.__valeur = int(valeur)
self.__Attribuer_Figure(self.__valeur, self.__couleur)
def Attribuer_Couleur(self, couleur):
"""String->None
Change la couleur de la carte"""
if couleur == "Carreau":
self.__couleur = 0
elif couleur == "Coeur":
self.__couleur = 1
elif couleur == "Pique":
self.__couleur = 2
elif couleur == "Trèfle":
self.__couleur = 3
self.__Attribuer_Figure(self.__valeur, self.__couleur)
def __Attribuer_Figure(self, valeur, couleur):
"""String*String->None
Attribue le fichier image en fonction de la valeur et de la couleur"""
#self.__figure = str(self.__valeur*10+self.__couleur)+".jpg"
self.__figure = f"{self.Obtenir_Valeur().lower()}-{self.Obtenir_Couleur().lower()}.png"
def __repr__(self):
"""None->None
Permet d'afficher la carte lors de l'appel par print"""
return "{0}-{1}".format(self.Obtenir_Valeur(), self.Obtenir_Couleur())
def __eq__(self, carte):
return ((self.Obtenir_Couleur() == carte.Obtenir_Couleur()) and (self.Obtenir_Valeur() == carte.Obtenir_Valeur()))
# Methodes pour comparer la valeur de la carte (self) par rapport a une autre passé en parametre d'entrée
# La couleur ne compte pas dans ces comparaisons
def __valeur_eq__(self, carte):
return (self.__valeur == carte.__valeur)
def __valeur_gt__(self, carte):
if self.__valeur_eq__(carte):
return False
#L'As bat toute autre carte sauf en cas d'egalité avec un autre As
if self.__valeur == 1:
return True
elif carte.__valeur == 1:
return False
else:
return (self.__valeur > carte.__valeur)
def __valeur_lt__(self, carte):
if self.__valeur_eq__(carte):
return False
return not (self.__valeur_gt__(carte))
#*************************************************************************
# Classe Paquet:
# Un paquet est constinué de N cartes. Le constructeur prend en
# entrée le nombre de cartes du paquet que l'on désir créer
#*************************************************************************
class Paquet:
def __init__(self, nb_cartes):
self.__deck = []
self.__nb_cartes = nb_cartes
couleurs = ["Carreau", "Coeur", "Pique", "Trèfle"]
valeurs = { 52: ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "Valet", "Dame", "Roi"],
32: ["1", "7", "8", "9", "10", "Valet", "Dame", "Roi"] }
if self.__nb_cartes in [32, 52]:
for couleur in couleurs:
for valeur in valeurs[self.__nb_cartes]:
self.__deck.append(Carte(valeur, couleur))
else:
while len(self.__deck) < nb_cartes:
rand_couleur = randint(0,len(couleurs)-1)
rand_valeur = randint(0,len(valeurs[52])-1)
new_card = Carte(valeurs[52][rand_valeur], couleurs[rand_couleur])
if not self.Carte_Existe(new_card):
self.__deck.append(Carte(valeurs[52][rand_valeur], couleurs[rand_couleur]))
def Obtenir_nombre_cartes(self):
return self.__nb_cartes
def Obtenir_cartes(self):
return self.__deck
def afficher(self):
print(self.Obtenir_cartes())
def melanger(self):
shuffle(self.Obtenir_cartes())
def Carte_Existe(self, card):
if len(self.__deck) > 0:
for i in range(len(self.__deck)):
if card.__eq__(self.__deck[i]):
return True
return False
#*************************************************************************
# Classe Bataille:
# Classe definisaant le jeu ainsi que toutes les actions lié au jeu
#*************************************************************************
class Bataille:
def __init__(self, nb_cartes):
self.__paquet = Paquet(nb_cartes)
self.__cartes_joueurs = {1:[], 2:[]}
#-----------------------------------------------------------------------
# Initialise un nouvelle partie
#-----------------------------------------------------------------------
def initialiser_partie(self):
# Si en mode DEBUG on crée manuellement le jeu de chaque joueur
# Sinon distribue la moitié du paquet de façon aléatoire à chaque joueur
if DEBUG:
self.__cartes_joueurs[1] = []
self.__cartes_joueurs[2] = []
couleurs = ["Carreau", "Coeur", "Pique", "Trèfle"]
for valeur in ["5", "9", "Dame", "10", "4", "10"]:
rand_couleur = randint(0,len(couleurs)-1)
self.__cartes_joueurs[1].append(Carte(valeur, couleurs[rand_couleur]))
for valeur in ["8", "9", "Dame", "10", "7", "2"]:
rand_couleur = randint(0,len(couleurs)-1)
self.__cartes_joueurs[2].append(Carte(valeur, couleurs[rand_couleur]))
else:
self.__paquet.melanger()
paquet_complet = self.__paquet.Obtenir_cartes()
#Distribuer la moité du paquet à chaque joueur
index_milieu = len(paquet_complet) // 2
self.__cartes_joueurs[1] = paquet_complet[:index_milieu]
self.__cartes_joueurs[2] = paquet_complet[index_milieu:]
#-----------------------------------------------------------------------
# Entrée:
# Sortie: Le paquet de cartes du jeu
#-----------------------------------------------------------------------
def obtenir_paquet(self):
return self.__paquet
#-----------------------------------------------------------------------
# Entrée: Un numero de joueur (1 ou 2)
# Sortie: La liste replresantant la carte du joueur passé en entrée
#-----------------------------------------------------------------------
def obtenir_cartes_joueur(self, joueur):
return self.__cartes_joueurs[joueur]
#-----------------------------------------------------------------------
# Un tour de jeu: Prend en entrée la liste des cartes des deux joueurs
# Compare la première carte, établie le vainqueur
# En sortie les deux listes on été mises à jour
#-----------------------------------------------------------------------
def tour_de_jeu_imperatif(self, cartes_joueur_1, cartes_joueur_2):
if cartes_joueur_1[0].__valeur_gt__(cartes_joueur_2[0]):
cartes_joueur_1.append(cartes_joueur_1[0])
cartes_joueur_1.append(cartes_joueur_2[0])
del cartes_joueur_1[0]
del cartes_joueur_2[0]
elif cartes_joueur_1[0].__valeur_lt__(cartes_joueur_2[0]):
cartes_joueur_2.append(cartes_joueur_1[0])
cartes_joueur_2.append(cartes_joueur_2[0])
del cartes_joueur_1[0]
del cartes_joueur_2[0]
else:
#En cas d'égalité: Bataille
index_egalite = 0
while cartes_joueur_1[index_egalite].__valeur_eq__(cartes_joueur_2[index_egalite]):
if len(cartes_joueur_1[index_egalite + 1:]) == 0:
break
elif len(cartes_joueur_2[index_egalite + 1:]) == 0:
break
else:
if cartes_joueur_1[index_egalite + 1].__valeur_eq__(cartes_joueur_2[index_egalite + 1]):
index_egalite +=1
else:
break
logging.debug(f"Indice Egalite: {index_egalite}")
if cartes_joueur_1[index_egalite + 1].__valeur_gt__(cartes_joueur_2[index_egalite + 1]):
for i in range(0, index_egalite + 1):
cartes_joueur_1.append(cartes_joueur_1[i])
cartes_joueur_1.append(cartes_joueur_2[i])
del cartes_joueur_1[i]
del cartes_joueur_2[i]
elif cartes_joueur_1[index_egalite + 1].__valeur_lt__(cartes_joueur_2[index_egalite + 1]):
for i in range(0, index_egalite +1):
cartes_joueur_2.append(cartes_joueur_1[i])
cartes_joueur_2.append(cartes_joueur_2[i])
del cartes_joueur_1[i]
del cartes_joueur_2[i]
#-----------------------------------------------------------------------
# Un tour de jeu: Prend en entrée la liste des cartes des deux joueurs
# Compare la première carte, établie le vainqueur
# En cas d'égalité on regarde la caret suivante et ainsi de suite
# jusqu'à ne plus avoir de carte egale en valeur. Dès qu'un joueur a une
# carte plus forte il rafle toutes les cartes précedanctes qui étaient
# egales.
# On utilise la recusrivité pour faciliter l'implementation de cet
# algorythm.
#-----------------------------------------------------------------------
def tour_de_jeu(self, cartes_joueur_1, cartes_joueur_2, nb_batailles=0):
if cartes_joueur_1[0].__valeur_gt__(cartes_joueur_2[0]):
return 1 * (nb_batailles +1)
elif cartes_joueur_1[0].__valeur_lt__(cartes_joueur_2[0]):
return -1 * (nb_batailles +1)
else:
#En cas d'égalité: Bataille. On fait un appel recursif
return self.tour_de_jeu(cartes_joueur_1[1:], cartes_joueur_2[1:], nb_batailles+1)
def tour_de_jeu2(self, cartes_joueur_1, cartes_joueur_2):
if cartes_joueur_1[0].__valeur_gt__(cartes_joueur_2[0]):
return 1, 1
elif cartes_joueur_1[0].__valeur_lt__(cartes_joueur_2[0]):
return 2, 1
else:
#En cas d'égalité: Bataille. On fait un appel recursif
resultat = self.tour_de_jeu(cartes_joueur_1[1:], cartes_joueur_2[1:])
return resultat[0], 1 + resultat[1]
#------------------------------------------------------------------
# Retourne le joueur gagnant (1 ou 2).
# Si pas encore de gagnant renvoi 0
#------------------------------------------------------------------
def gagnant(self):
if len(self.__cartes_joueurs[1]) == 0:
return 2
elif len(self.__cartes_joueurs[2]) == 0:
return 1
else:
return 0
#------------------------------------------------------------------
# Retourne le joueur gagnant (1 ou 2).
# Si pas encore de gagnant renvoi 0
#------------------------------------------------------------------
def partie_finie(self):
if self.gagnant() == 0:
return False
else:
return True
#------------------------------------------------------------------
# Commencer une partie
#------------------------------------------------------------------
def commencer_partie(self):
self.initialiser_partie()
logging.info("Commencons le jeu")
# Commencer la bataille jusqu'à ce qu'un des joueurs n'ait plus de cartes
i = 1
while not self.partie_finie():
input(f"Appuyer sur une touche pour le round {i}")
logging.info(f"Joueur 1:{self.__cartes_joueurs[1]} -- Joueur 2:{self.__cartes_joueurs[2]}")
resultat = self.tour_de_jeu(self.obtenir_cartes_joueur(1), self.obtenir_cartes_joueur(2))
logging.info(f"Resultat: {resultat}")
if resultat > 0:
for j in range(0, resultat):
self.obtenir_cartes_joueur(1).append(self.obtenir_cartes_joueur(1)[0])
self.obtenir_cartes_joueur(1).append(self.obtenir_cartes_joueur(2)[0])
del self.obtenir_cartes_joueur(1)[0]
del self.obtenir_cartes_joueur(2)[0]
elif resultat < 0:
for j in range(0, (resultat * -1 )):
self.obtenir_cartes_joueur(2).append(self.obtenir_cartes_joueur(1)[0])
self.obtenir_cartes_joueur(2).append(self.obtenir_cartes_joueur(2)[0])
del self.obtenir_cartes_joueur(1)[0]
del self.obtenir_cartes_joueur(2)[0]
'''
joueur_gagnant = resultat[0]
nb_cartes_gagnees = resultat[1]
if joueur_gagnant == 1:
joueur_perdant = 2
else:
joueur_perdant = 1
for i in range(0, nb_cartes_gagnees):
self.obtenir_cartes_joueur(joueur_gagnant).append(self.obtenir_cartes_joueur(joueur_gagnant)[0])
self.obtenir_cartes_joueur(joueur_gagnant).append(self.obtenir_cartes_joueur(joueur_perdant)[0])
del self.obtenir_cartes_joueur(joueur_gagnant)[0]
del self.obtenir_cartes_joueur(joueur_perdant)[0]
'''
logging.info(f"Joueur 1:{self.__cartes_joueurs[1]} -- Joueur 2:{self.__cartes_joueurs[2]}")
i += 1
#*************************************************************************
# Classe BatailleGraphique:
# Cette classe gère toute la partie graphique du jeu mais ulilse la
# classe Bataille comme moteur du jeu lui même. Ainsi toutes les règles
# du jeu, et initialisations de cartes sont fait dans la classe Bataille.
#*************************************************************************
class BatailleGraphique():
# ------------------------------------------------------------------
# Initialization Functions:
# ------------------------------------------------------------------
def __init__(self):
self.jeu= Bataille(0)
self.window = tk.Tk()
self.debug_mode = tk.BooleanVar(value=DEBUG)
self.window.title('Bataille')
self.new_button = tk.Button()
self.exit_button = tk.Button()
self.width=1024
self.height=600
self.canvas = tk.Canvas(self.window, width=self.width, height=self.height)
self.command_canvas = tk.Canvas(self.window, width=self.width, height=50, bd=1, relief='groove')
self.canvas.pack()
self.command_canvas.pack()
self.initialise_gui()
def debug_changed(self):
print(f"Changed: {self.debug_mode.get()}")
global DEBUG
DEBUG = self.debug_mode.get()
if self.debug_mode.get():
self.canvas.create_text(self.width/2, 20, font="cmr 24 bold", fill="Black", text="Mode DEBUG", tags=["debug"])
else:
self.canvas.delete("debug")
def initialise_gui(self):
reduction = 0.65
bg_dimension = int(1500*reduction), int(791*reduction)
las_vegas | |
#
# The MIT License (MIT)
#
# This file is part of RLScore
#
# Copyright (c) 2008 - 2016 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from scipy import sparse
from numpy import float64, loadtxt
import numpy as np
def read_folds(fname):
""" Reads a list of fold index lists.
Format: let the training set indices range from 0... n_samples-1. Each line
in a fold file should contain a subset of these indices corresponding to a
single fold. For example, let n_samples = 11, then:
0 3 4 8
1 5 9 10
2 6 7
would correspond to a fold-file with three folds, with first and second fold
containing 4, and last one 3 instances. The reader would return the list
[[0,3,4,8],[1,5,9,10],[2,6,7]]
Parameters
----------
fname : string
input file name
Returns
-------
folds : a list of lists, each containing the indices corresponding to a single fold
"""
f = open(fname)
folds = []
for i, line in enumerate(f):
#We allow comments starting with #
cstart = line.find("#")
if cstart != -1:
line = line[:cstart]
fold = []
foldset = set([])
line = line.strip().split()
for x in line:
try:
index = int(x)
except ValueError:
raise Exception("Error when reading in fold file: malformed index on line %d in the fold file: %s" % (i + 1, x))
if index < 0:
raise Exception("Error when reading in fold file: negative index on line %d in the fold file: %d" % (i + 1, index))
if index in foldset:
raise Exception("Error when reading in fold file: duplicate index on line %d in the fold file: %d" % (i + 1, index + 1))
fold.append(index)
foldset.add(index)
folds.append(fold)
f.close()
return folds
def read_sparse(fname, fdim=None):
"""Reads in a sparse n x m matrix from a file with n rows.
Format is of the type 0:1.5 3:4.2 7:1.1 ...
with each line containing index:value pairs with indices
ranging from 0...n_features-1, and only indices with non-zero values
being present in the file.
Parameters
----------
fname : string
input file name
fdim: int
number of dimensions, if None estimated from data file
Returns
-------
X : sparse matrix (csr)
"""
#each row represents an instance, each column a feature
f = open(fname)
rows = []
columns = []
values = []
linecounter = 0
for line in f:
linecounter += 1
#Empty lines and commented lines are passed over
if len(line.strip()) == 0 or line[0] == '#':
print("Warning: no inputs on line %d" % linecounter)
continue
line = line.split("#",1)
attributes = line[0].split()
previous = -1
#Attributes indices must be positive integers in an ascending order,
#and the values must be real numbers.
for att_val in attributes:
if len(att_val.split(":")) != 2:
raise Exception("Error when reading in feature file: feature:value pair %s on line %d is not well-formed\n" % (att_val, linecounter))
index, value = att_val.split(":")
try:
index = int(index)
value = float(value)
if value != 0. and (fdim is None or index < fdim):
columns.append(index)
rows.append(linecounter-1)
values.append(value)
except ValueError:
raise Exception("Error when reading in feature file: feature:value pair %s on line %d is not well-formed\n" % (att_val, linecounter))
if not index > previous:
raise Exception("Error when reading in feature file: line %d features must be in ascending order\n" % (linecounter))
previous = index
#That's all folks
if fdim is None:
X = sparse.coo_matrix((values,(rows,columns)), dtype=float64)
else:
rdim = np.max(rows)+1
X = sparse.coo_matrix((values,(rows,columns)), (rdim, fdim), dtype=float64)
X = X.tocsr()
f.close()
return X
def read_svmlight(fname, fdim=None):
""" Loads examples from an SVM-light format data file. The
file contains attributes, one label per example and optionally qids.
Parameters
----------
fname : string
input file name
fdim: int
number of dimensions, if None estimated from data file
Returns
-------
tuple : ['spmatrix': X, 'matrix':Y, 'qids':Q]
X : sparse csc_matrix, shape = [n_samples, n_features]
Y : ndarray, shape = [n_samples, n_labels]
Q : list of n_queries index lists
"""
f = open(fname)
#some interesting statistics are calculated
labelcount = None
linecounter = 0
feaspace_dim = 0
#Features, labels, comments and possibly qids are later returned to caller
#The indexing, with respect to the instances, is the same in all the lists.
qids = None
rows = []
columns = []
values = []
all_outputs = []
#Each line in the source represents an instance
for linenumber, line in enumerate(f):
if line[0] == "#" or line.strip() == "":
continue
linecounter += 1
line = line.split('#')
line = line[0].split()
labels = line.pop(0)
if line[0].startswith("qid:"):
qid = line.pop(0)[4:]
if qids is None:
if linecounter > 1:
raise Exception("Error when reading in SVMLight file: Line %d has a qid, previous lines did not have qids defined" % (linenumber))
else:
qids = [qid]
else:
qids.append(qid)
else:
if qids is not None:
raise Exception("Error when reading in SVMLight file: Line %d has no qid, previous lines had qids defined" % (linenumber))
attributes = line
#Multiple labels are allowed, but each instance must have the
#same amount of them. Labels must be real numbers.
labels = labels.split("|")
if labelcount is None:
labelcount = len(labels)
#Check that the number of labels is the same for all instances
#and that the labels are real valued numbers.
else:
if labelcount != len(labels):
raise Exception("Error when reading in SVMLight file: Number of labels assigned to instances differs.\n First instance had %d labels whereas instance on line %d has %d labels\n" % (labelcount, linenumber, len(labels)))
label_list = []
#We check that the labels are real numbers and gather them
for label in labels:
try:
label = float(label)
label_list.append(label)
except ValueError:
raise Exception("Error when reading in SVMLight file: label %s on line %d not a real number\n" % (label, linenumber))
all_outputs.append(label_list)
previous = 0
#Attributes indices must be positive integers in an ascending order,
#and the values must be real numbers.
for att_val in attributes:
if len(att_val.split(":")) != 2:
raise Exception("Error when reading in SVMLight file: feature:value pair %s on line %d is not well-formed\n" % (att_val, linenumber))
index, value = att_val.split(":")
try:
index = int(index)
value = float(value)
if value != 0. and (fdim is None or index < fdim):
#rows.append(index-1)
rows.append(linecounter-1)
#columns.append(linecounter-1)
columns.append(index-1)
values.append(value)
except ValueError:
raise Exception("Error when reading in SVMLight file: feature:value pair %s on line %d is not well-formed\n" % (att_val, linecounter))
if not index > previous:
raise Exception("Error when reading in SVMLight file: line %d features must be in ascending order\n" % (linecounter))
previous = index
if index > feaspace_dim:
feaspace_dim = index
if fdim is not None:
feaspace_dim = fdim
X = sparse.coo_matrix((values,(rows,columns)),(linecounter, feaspace_dim), dtype=float64)
X = X.tocsr()
Y = np.array(all_outputs)
return X, Y, qids
def read_preferences(fname):
"""Reads a pairwise preferences file, used typically with ranking
Parameters
----------
fname : string
input file name
Returns
-------
data : n x 2 -dimensional numpy array containing pairwise preferences one pair per row, i.e. the data point | |
= do p <= 5;
ASSERT(r, True, "High client proxy transaction errors", "OPERATIONS", WARNING,
"Listed namespace[s] show higher than normal proxy transaction errors (> 5% client proxy transactions). Please run 'show statistics namespace like client_proxy' to see values.",
"High proxy transaction error check");
warning_breached = do p > 5;
r = do p <= error_pct_threshold;
r = do r || warning_breached;
ASSERT(r, True, "Non-zero client proxy transaction errors", "OPERATIONS", INFO,
"Listed namespace[s] show non-zero proxy transaction errors. Please run 'show statistics namespace like client_proxy' to see values.",
"Non-zero proxy transaction error check");
t = select "client_proxy_timeout" from NAMESPACE.STATISTICS save;
t = group by CLUSTER, NAMESPACE t;
r = do t/total_client_proxy;
r = do r * 100 save as "client_proxy_timeout % of total proxy transactions";
r = do r <= 5;
ASSERT(r, True, "High client proxy transaction timeouts", "OPERATIONS", WARNING,
"Listed namespace[s] show higher than normal proxy transaction timeouts (> 5% client proxy transactions). Please run 'show statistics namespace like client_proxy' to see values.",
"High proxy transaction timeouts check");
// UDF Transaction statistics
s = select "client_udf_complete" as "cnt" from NAMESPACE.STATISTICS;
t = select "client_udf_timeout" as "cnt" from NAMESPACE.STATISTICS;
e = select "client_udf_error" as "cnt" from NAMESPACE.STATISTICS;
total_udf_transactions = do s + t;
total_udf_transactions = do total_udf_transactions + e save as "total udf transactions";
total_udf_transactions_per_sec = do total_udf_transactions/u;
total_udf_transactions = group by CLUSTER, NAMESPACE, NODE do MAX(total_udf_transactions);
total_udf_transactions_per_sec = group by CLUSTER, NAMESPACE, NODE do MAX(total_udf_transactions_per_sec);
e = select "client_udf_error" from NAMESPACE.STATISTICS save;
e = do e/u save as "errors per second (by using uptime)";
e = group by CLUSTER, NAMESPACE e;
p = do e/total_udf_transactions_per_sec;
p = do p * 100 save as "client_udf_error % of total udf transactions";
r = do p <= 5;
ASSERT(r, True, "High udf transaction errors", "OPERATIONS", WARNING,
"Listed namespace[s] show higher than normal udf transaction errors (> 5% udf transactions). Please run 'show statistics namespace like client_udf' to see values.",
"High udf transaction error check");
warning_breached = do p > 5;
r = do p <= error_pct_threshold;
r = do r || warning_breached;
ASSERT(r, True, "Non-zero udf transaction errors", "OPERATIONS", INFO,
"Listed namespace[s] show non-zero udf transaction errors. Please run 'show statistics namespace like client_udf' to see values.",
"Non-zero udf transaction error check");
t = select "client_udf_timeout" from NAMESPACE.STATISTICS save;
t = group by CLUSTER, NAMESPACE t;
r = do t/total_udf_transactions;
r = do r * 100 save as "client_udf_timeout % of total udf transactions";
r = do r <= 5;
ASSERT(r, True, "High udf transaction timeouts", "OPERATIONS", WARNING,
"Listed namespace[s] show higher than normal udf transaction timeouts (> 5% udf transaction). Please run 'show statistics namespace like client_udf' to see values.",
"High udf transaction timeouts check");
// UDF Sub-Transaction statistics
s = select "udf_sub_udf_complete" as "cnt" from NAMESPACE.STATISTICS;
t = select "udf_sub_udf_timeout" as "cnt" from NAMESPACE.STATISTICS;
e = select "udf_sub_udf_error" as "cnt" from NAMESPACE.STATISTICS;
total_udf_sub_transactions = do s + t;
total_udf_sub_transactions = do total_udf_sub_transactions + e save as "total udf sub-transactions";
total_udf_sub_transactions_per_sec = do total_udf_sub_transactions/u;
total_udf_sub_transactions = group by CLUSTER, NAMESPACE, NODE do MAX(total_udf_sub_transactions);
total_udf_sub_transactions_per_sec = group by CLUSTER, NAMESPACE, NODE do MAX(total_udf_sub_transactions_per_sec);
e = select "udf_sub_udf_error" from NAMESPACE.STATISTICS save;
e = do e/u save as "errors per second (by using uptime)";
e = group by CLUSTER, NAMESPACE e;
p = do e/total_udf_sub_transactions_per_sec;
p = do p * 100 save as "udf_sub_udf_error % of total udf sub-transactions";
r = do p <= 5;
ASSERT(r, True, "High udf sub-transaction errors", "OPERATIONS", WARNING,
"Listed namespace[s] show higher than normal udf sub-transaction errors (> 5% udf sub-transactions). Please run 'show statistics namespace like udf_sub_udf' to see values.",
"High udf sub-transaction error check");
warning_breached = do p > 5;
r = do p <= error_pct_threshold;
r = do r || warning_breached;
ASSERT(r, True, "Non-zero udf sub-transaction errors", "OPERATIONS", INFO,
"Listed namespace[s] show non-zero udf sub-transaction errors. Please run 'show statistics namespace like udf_sub_udf' to see values.",
"Non-zero udf sub-transaction error check");
t = select "udf_sub_udf_timeout" from NAMESPACE.STATISTICS save;
t = group by CLUSTER, NAMESPACE t;
r = do t/total_udf_sub_transactions;
r = do r * 100 save as "udf_sub_udf_timeout % of total udf sub-transactions";
r = do r <= 5;
ASSERT(r, True, "High udf sub-transaction timeouts", "OPERATIONS", WARNING,
"Listed namespace[s] show higher than normal udf sub-transaction timeouts (> 5% udf sub-transaction). Please run 'show statistics namespace like udf_sub_udf' to see values.",
"High udf sub-transaction timeouts check");
// Proxied Batch-index Sub-Transaction statistics
s = select "batch_sub_proxy_complete" as "cnt" from NAMESPACE.STATISTICS;
t = select "batch_sub_proxy_error" as "cnt" from NAMESPACE.STATISTICS;
e = select "batch_sub_proxy_timeout" as "cnt" from NAMESPACE.STATISTICS;
total_transactions = do s + t;
total_transactions = do total_transactions + e save as "total batch-index sub-transactions";
total_transactions_per_sec = do total_transactions/u;
total_transactions = group by CLUSTER, NAMESPACE, NODE do MAX(total_transactions);
total_transactions_per_sec = group by CLUSTER, NAMESPACE, NODE do MAX(total_transactions_per_sec);
e = select "batch_sub_proxy_error" from NAMESPACE.STATISTICS save;
e = do e/u save as "errors per second (by using uptime)";
e = group by CLUSTER, NAMESPACE e;
p = do e/total_transactions_per_sec;
p = do p * 100 save as "batch_sub_proxy_error % of total batch-index sub-transactions";
r = do p <= 5;
ASSERT(r, True, "High batch-index sub-transaction errors", "OPERATIONS", WARNING,
"Listed namespace[s] show higher than normal batch-index sub-transaction errors (> 5% batch-index sub-transactions). Please run 'show statistics namespace like batch_sub_proxy' to see values.",
"High batch-index sub-transaction error check");
warning_breached = do p > 5;
r = do p <= error_pct_threshold;
r = do r || warning_breached;
ASSERT(r, True, "Non-zero batch-index sub-transaction errors", "OPERATIONS", INFO,
"Listed namespace[s] show non-zero batch-index sub-transaction errors. Please run 'show statistics namespace like batch_sub_proxy' to see values.",
"Non-zero batch-index sub-transaction error check");
t = select "batch_sub_proxy_timeout" from NAMESPACE.STATISTICS save;
t = group by CLUSTER, NAMESPACE t;
r = do t/total_transactions;
r = do r * 100 save as "batch_sub_proxy_timeout % of total batch-index sub-transactions";
r = do r <= 5;
ASSERT(r, True, "High batch-index sub-transaction timeouts", "OPERATIONS", WARNING,
"Listed namespace[s] show higher than normal batch-index sub-transaction timeouts (> 5% batch-index sub-transaction). Please run 'show statistics namespace like batch_sub_proxy' to see values.",
"High batch-index sub-transaction timeouts check");
// Batch-index read Sub-Transaction statistics
nf = select "batch_sub_read_not_found" as "cnt" from NAMESPACE.STATISTICS;
s = select "batch_sub_read_success" as "cnt" from NAMESPACE.STATISTICS;
t = select "batch_sub_read_timeout" as "cnt" from NAMESPACE.STATISTICS;
e = select "batch_sub_read_error" as "cnt" from NAMESPACE.STATISTICS;
total_transactions = do s + nf;
total_transactions = do total_transactions + t;
total_transactions = do total_transactions + e save as "total batch-index read sub-transactions";
total_transactions_per_sec = do total_transactions/u;
total_transactions = group by CLUSTER, NAMESPACE, NODE do MAX(total_transactions);
total_transactions_per_sec = group by CLUSTER, NAMESPACE, NODE do MAX(total_transactions_per_sec);
e = select "batch_sub_read_error" from NAMESPACE.STATISTICS save;
e = do e/u save as "errors per second (by using uptime)";
e = group by CLUSTER, NAMESPACE e;
p = do e/total_transactions_per_sec;
p = do p * 100 save as "batch_sub_read_error % of total reads";
r = do p <= 5;
ASSERT(r, True, "High batch-index read sub-transaction errors", "OPERATIONS", WARNING,
"Listed namespace[s] show higher than normal batch-index read sub-transaction errors (> 5% batch-index read sub-transactions). Please run 'show statistics namespace like batch_sub_read' to see values.",
"High batch-index read sub-transaction error check");
warning_breached = do p > 5;
r = do p <= error_pct_threshold;
r = do r || warning_breached;
ASSERT(r, True, "Non-zero batch-index read sub-transaction errors", "OPERATIONS", INFO,
"Listed namespace[s] show non-zero batch-index read sub-transaction errors. Please run 'show statistics namespace like batch_sub_read' to see values.",
"Non-zero batch-index read sub-transaction error check");
t = select "batch_sub_read_timeout" from NAMESPACE.STATISTICS save;
t = group by CLUSTER, NAMESPACE t;
r = do t/total_transactions;
r = do r * 100 save as "batch_sub_read_timeout % of total batch-index read sub-transactions";
r = do r <= 5;
ASSERT(r, True, "High batch-index read sub-transaction timeouts", "OPERATIONS", WARNING,
"Listed namespace[s] show higher than normal batch-index read sub-transaction timeouts (> 5% batch-index read sub-transactions). Please run 'show statistics namespace like batch_sub_read' to see values.",
"High batch-index read sub-transaction timeouts check");
c = select "batch_sub_read_not_found" from NAMESPACE.STATISTICS save;
c = group by CLUSTER, NAMESPACE c;
r = do c / total_transactions;
r = do r * 100 save as "batch_sub_read_not_found % of total batch-index read sub-transactions";
r = do r <= 20;
ASSERT(r, True, "High batch-index read sub-transaction not found errors", "OPERATIONS", INFO,
"Listed namespace[s] show higher than normal batch-index read sub-transaction not found errors (> 20% batch-index read sub-transactions). Please run 'show statistics namespace like batch_sub_read' to see values.",
"High batch-index read sub-transaction not found error check");
// Client UDF Transaction statistics
rs = select "client_lang_read_success" as "cnt" from NAMESPACE.STATISTICS;
ds = select "client_lang_delete_success" as "cnt" from NAMESPACE.STATISTICS;
ws = select "client_lang_write_success" as "cnt" from NAMESPACE.STATISTICS;
e = select "client_lang_error" as "cnt" from NAMESPACE.STATISTICS;
total_client_udf_transactions = do rs + ds;
total_client_udf_transactions = do total_client_udf_transactions + ws;
total_client_udf_transactions = do total_client_udf_transactions + e save as "total client_lang";
total_client_udf_transactions_per_sec = | |
decreasing.
#: There is no specific data size ordering in the `GXVV <geosoft.gxapi.GXVV>`.
VV_ORDER_NONE = 0
#: Every value is greater than or equal to the previous value.
VV_ORDER_INCREASING = 1
#: Every value is less than or equal to the previous value.
VV_ORDER_DECREASING = 2
#
# VV_SORT constants
#
# Sort order
#: Ascending
VV_SORT_ASCENDING = 0
#: Descending
VV_SORT_DESCENDING = 1
#
# VV_WINDOW constants
#
# How to handle `GXVV <geosoft.gxapi.GXVV>` limits
#: Dummy values outside the limits
VV_WINDOW_DUMMY = 0
#: Set values outside the limits to the limits
VV_WINDOW_LIMIT = 1
#
# GXWA Constants
#
#
# WA_ENCODE constants
#
# `GXWA <geosoft.gxapi.GXWA>` Encode defines
#: Current Ansi Code Page (Conversion from UTF-8 data, if an exisiting BOM header found with `WA_APPEND <geosoft.gxapi.WA_APPEND>`,
#: encoding will switch to `WA_ENCODE_UTF8 <geosoft.gxapi.WA_ENCODE_UTF8>`)
WA_ENCODE_ANSI = 0
#: Write all data without any conversion check
WA_ENCODE_RAW = 1
#: :ref:`UTF8` (If no exisiting BOM header found with `WA_APPEND <geosoft.gxapi.WA_APPEND>`, encoding will switch to `WA_ENCODE_ANSI <geosoft.gxapi.WA_ENCODE_ANSI>`)
WA_ENCODE_UTF8 = 2
#: :ref:`UTF8` w.o. header (will assume :ref:`UTF8` encoding if `WA_APPEND <geosoft.gxapi.WA_APPEND>` is used)
WA_ENCODE_UTF8_NOHEADER = 3
#: UTF16 w.o. header (will assume UTF16 encoding if `WA_APPEND <geosoft.gxapi.WA_APPEND>` is used)
WA_ENCODE_UTF16_NOHEADER = 4
#
# WA_OPEN constants
#
# `GXWA <geosoft.gxapi.GXWA>` Open defines
#: Create new file
WA_NEW = 0
#: Append to existing file
WA_APPEND = 1
#
# GXACQUIRE Constants
#
#
# ACQUIRE_SEL constants
#
# Type of Selection
#: Holes
ACQUIRE_SEL_HOLES = 0
#: Point
ACQUIRE_SEL_POINT = 1
#
# GXARCDB Constants
#
#
# ARC_SELTBL_TYPE constants
#
# Describes what kind of table was selected
#: Standalone Table
ARC_SELTBL_STANDALONE = 0
#: Feature Layer
ARC_SELTBL_FEATURELAYER = 1
#: User Canceled
ARC_SELTBL_CANCELED = -1
#
# GXARCDH Constants
#
#
# GXARCMAP Constants
#
#
# ARCMAP_LOAD_FLAGS constants
#
# Flags that can be combined and passed to iLoadMap_ARCMAP
#: If an existing frame is found delete it
ARCMAP_LOAD_DELFRAME = 1
#: If an existing layer is found delete it
ARCMAP_LOAD_DELLAYER = 2
#: If an existing frame is found add new layers to it
ARCMAP_LOAD_EXISTFRAME = 4
#: If an existing layer is found make a copy
ARCMAP_LOAD_COPYLAYER = 8
#: Hide all other existing layers in frame
ARCMAP_LOAD_HIDESIBLINGS = 16
#: Prefix the map filename part as part of the frame name
ARCMAP_LOAD_PREFIXMAPFRAME = 32
#: Prefix the map filename part as part of the layer name
ARCMAP_LOAD_PREFIXMAPLAYER = 64
#: Will render all views in single layer with the data view defining the coordinate system
ARCMAP_LOAD_MERGETOSINGLEVIEW = 128
#: Load everything into the current data frame
ARCMAP_LOAD_INTOCURRENTFRAME = 256
#: Use the map only for sizing data frames in layout, only load extra datasets.
ARCMAP_LOAD_NOMAPLAYERS = 512
#: Activates the main quickmap layer when done (e.g. 3D Viewer)
ARCMAP_LOAD_ACTIVATE = 1024
#: New method for loading maps introduced in 7.1. Will mimic what happens in montaj (i.e. base groups and 3D become graphics and views gets split into separate LYRs).
ARCMAP_LOAD_NEW = 2048
#: Use a provided name tag as prefix when naming a newly created map layer.
ARCMAP_LOAD_NAMETAGISPREFIX = 4096
#
# GXARCPY Constants
#
#
# GXARCSYS Constants
#
#
# GXBIGRID Constants
#
#
# GXCHIMERA Constants
#
#
# CHIMERA_MAX_CHAN constants
#
# Maximum channels in Chimera database
#: Chimera max chan
CHIMERA_MAX_CHAN = 128
#
# CHIMERA_PLOT constants
#
# Chimera plot type
#: Rose
CHIMERA_PLOT_ROSE = 0
#: Pie
CHIMERA_PLOT_PIE = 1
#: Bar
CHIMERA_PLOT_BAR = 2
#
# GXCOM Constants
#
#
# COM_BAUD constants
#
# Connection Speed
#: 110
COM_BAUD_110 = 0
#: 300
COM_BAUD_300 = 1
#: 600
COM_BAUD_600 = 2
#: 1200
COM_BAUD_1200 = 3
#: 2400
COM_BAUD_2400 = 4
#: 4800
COM_BAUD_4800 = 5
#: 9600
COM_BAUD_9600 = 6
#: 14400
COM_BAUD_14400 = 7
#: 19200
COM_BAUD_19200 = 8
#: 56000
COM_BAUD_56000 = 9
#: 57600
COM_BAUD_57600 = 10
#: 115200
COM_BAUD_115200 = 11
#: 128000
COM_BAUD_128000 = 12
#: 256000
COM_BAUD_256000 = 13
#: 38400
COM_BAUD_38400 = 14
#
# COM_DATASIZE constants
#
# Data Bits
#: Five
COM_DATASIZE_FIVE = 5
#: Six
COM_DATASIZE_SIX = 6
#: Seven
COM_DATASIZE_SEVEN = 7
#: Eight
COM_DATASIZE_EIGHT = 8
#
# COM_FLOWCONTROL constants
#
# Flow Control Options
#: None
COM_FLOWCONTROL_NONE = 0
#: Rts cts
COM_FLOWCONTROL_RTS_CTS = 1
#: Dtr dsr
COM_FLOWCONTROL_DTR_DSR = 2
#: Xon xoff
COM_FLOWCONTROL_XON_XOFF = 3
#
# COM_PARITY constants
#
# Parity
#: Even
COM_PARITY_EVEN = 0
#: Nark
COM_PARITY_NARK = 1
#: None
COM_PARITY_NONE = 2
#: Odd
COM_PARITY_ODD = 3
#: Space
COM_PARITY_SPACE = 4
#
# COM_STOPBITS constants
#
# Stop Bits
#: One
COM_STOPBITS_ONE = 0
#: One5
COM_STOPBITS_ONE5 = 1
#: Two
COM_STOPBITS_TWO = 2
#
# GXDCOL Constants
#
#
# BRIGHT constants
#
# Brightness type
#: Set the brightness of all the layers
BRIGHT_ALL = 0
#: Set the brightness of the current layer
BRIGHT_LAYER = 1
#
# BRIGHTNESS_TYPES constants
#
# Detrending option
#: Can set the brightness only for object as a whole
BRIGHTNESS_ALL = 0
#: Can set the brightness for object as a whole and for individual layers
BRIGHTNESS_ALL_AND_LAYERS = 1
#
# DCOL_TYPE constants
#
# Layer type
#: Unknown
DCOL_TYPE_UNKNOWN = 0
#: Grid
DCOL_TYPE_GRID = 1
#: Symbols
DCOL_TYPE_SYMBOLS = 2
#: Voxel
DCOL_TYPE_VOXEL = 3
#: Vector voxel
DCOL_TYPE_VECTOR_VOXEL = 4
#
# GXDGW Constants
#
#
# DGW_OBJECT constants
#
# Dialog object defines
# INFO TYPE EDIT FEDIT LEDIT CEDIT EBUT
# ========= ===== ===== ===== ===== =====
# LABEL RW RW RW RW RW R - can use GetInfo_DGW
# TEXT RW RW RW RW . W - can use `set_info <geosoft.gxapi.GXDGW.set_info>`
# PATH . RW . . .
# FILEPATH . RW . . .
# LISTVAL . . R . .
# LISTALIAS . . RW . .
#: The text label tied to each Dialog component.
DGW_LABEL = 0
#: The edit field text.
DGW_TEXT = 1
#: The file edit path.
DGW_PATH = 2
#: The complete file name, path included.
DGW_FILEPATH = 3
#: The alias value associated with the list entry.
DGW_LISTVAL = 4
#: The text value associated with the list entry.
DGW_LISTALIAS = 5
#: The extension associated with a filename entry.
DGW_EXT = 7
#: Hide the button or entry and its label, if string is not "0"
DGW_HIDE = 8
#
# GXDH Constants
#
#
# DH_DEFAULT_FILENAMES constants
#
# Default filenames
#: Dh default rockcode file
DH_DEFAULT_ROCKCODE_FILE = "agso.csv"
#: Dh default structurecode file
DH_DEFAULT_STRUCTURECODE_FILE = "structcodes.csv"
#
# STR_DH_HOLES constants
#
# This declares the size of the string used in various
# `GXDH <geosoft.gxapi.GXDH>` GXs to store all the currently selected holes, as input to the two-panel
# selection tool. This should be big enough for 65,000 16-character hole names!
#: Str dh holes
STR_DH_HOLES = 1048576
#
# DH_COMP_CHOICE constants
#
# Composition
#: User is done
DH_COMP_DONE = 0
#: User canceled
DH_COMP_CANCEL = -1
#: User chose to select an interval interactively
DH_COMP_SELECT = 1
#: User chose to refresh
DH_COMP_REFRESH = 2
#
# DH_COMPSTDB_HOLSEL constants
#
# Composite Hole Selection
#: All
DH_COMPSTDB_HOLSEL_ALL = 0
#: Selected
DH_COMPSTDB_HOLSEL_SELECTED = 1
#
# DH_COMPSTDB_INTSEL constants
#
# Composite Interval
#: Fixed
DH_COMPSTDB_INTSEL_FIXED = 0
#: Lithology
DH_COMPSTDB_INTSEL_LITHOLOGY = 1
#: Bestfitlith
DH_COMPSTDB_INTSEL_BESTFITLITH = 2
#: Intfile
DH_COMPSTDB_INTSEL_INTFILE = 3
#
# DH_DATA constants
#
# What to import
#: Dipazimuth
DH_DATA_DIPAZIMUTH = 0
#: Eastnorth
DH_DATA_EASTNORTH = 1
#: Fromto
DH_DATA_FROMTO = 2
#: Point
DH_DATA_POINT = 3
#: Collar
DH_DATA_COLLAR = 4
#: The type is not known
DH_DATA_UNKNOWN = 100
#
# DH_DEFINE_PLAN constants
#
# Plans
#: Dh define plan
DH_DEFINE_PLAN = 1
#
# DH_DEFINE_SECT constants
#
# Types of Sections
#: Ns
DH_DEFINE_SECT_NS = 1
#: Ew
DH_DEFINE_SECT_EW = 2
#: Angled
DH_DEFINE_SECT_ANGLED = 3
#
# DH_EXP constants
#
# Type of Export
#: Csv
DH_EXP_CSV = 0
#: Ascii
DH_EXP_ASCII = 1
#: Access
DH_EXP_ACCESS = 2
#: Collars as points
DH_EXP_SHP = 3
#: To Surpace Geological database (special format ACCESS)
DH_EXP_SURPAC = 4
#: Hole traces as polylines
DH_EXP_SHP_TRACES = 5
#
# DH_HOLES constants
#
# Holes to select
#: All
DH_HOLES_ALL = 0
#: Selected
DH_HOLES_SELECTED = 1
#
# DH_MASK constants
#
# Masks
#: Append
DH_MASK_APPEND = 0
#: New
DH_MASK_NEW = 1
#
# DH_PLOT constants
#
# Type of Plot
#: Plan
DH_PLOT_PLAN = 0
#: Section
DH_PLOT_SECTION = 1
#: Striplog
DH_PLOT_STRIPLOG = 2
#: Hole traces
DH_PLOT_HOLE_TRACES = 3
#: 3d
DH_PLOT_3D = 4
#: Section stack
DH_PLOT_SECTION_STACK = 5
#: Section fence
DH_PLOT_SECTION_FENCE = 6
#: Section crooked
DH_PLOT_SECTION_CROOKED = 7
#
# DH_SECT_PAGE constants
#
# Sections
#: Section
DH_SECT_PAGE_SECTION = 1
#
# DH_SURFACE constants
#
# Surface selection for creation of geological
# top or bottom surfaces.
#: First layer from
DH_SURFACE_FIRST_LAYER_FROM = 0
#: First layer to
DH_SURFACE_FIRST_LAYER_TO = 1
#: Second layer from
DH_SURFACE_SECOND_LAYER_FROM = 2
#: Second layer to
DH_SURFACE_SECOND_LAYER_TO = 3
#: Last layer from
DH_SURFACE_LAST_LAYER_FROM = 4
#: Last layer to
DH_SURFACE_LAST_LAYER_TO = 5
#
# DIP_CONVENTION constants
#
# Dip convention
#: Negative
DIP_CONVENTION_NEGATIVE = -1
#: Positive
DIP_CONVENTION_POSITIVE = 1
#
# DH_DESURVEY constants
#
# Desurvey method
#: Rad curve
DH_DESURVEY_RAD_CURVE = 0
#: Polynomial
DH_DESURVEY_POLYNOMIAL = 1
#: Straight seg
DH_DESURVEY_STRAIGHT_SEG = 2
#
# GXDMPPLY Constants
#
#
# GXDOCU Constants
#
#
# DOCU_OPEN constants
#
# How to open document
#: View
DOCU_OPEN_VIEW = 0
#: Edit
DOCU_OPEN_EDIT = 1
#
# GXDU Constants
#
#
# DB_DUP constants
#
# Duplicate Types
#: First
DB_DUP_FIRST = 1
#: Average
DB_DUP_AVERAGE = 2
#: Minimum
DB_DUP_MINIMUM = 3
#: Maximum
DB_DUP_MAXIMUM = 4
#: Median
DB_DUP_MEDIAN = 5
#: Last
DB_DUP_LAST = 6
#
# DB_DUPEDIT constants
#
# Duplicate Edit Flags
#: Single
DB_DUPEDIT_SINGLE = 0
#: All
DB_DUPEDIT_ALL = 1
#
# DU_CHANNELS constants
#
# Channels to Display
#: Displayed
DU_CHANNELS_DISPLAYED = 0
#: All
DU_CHANNELS_ALL = 1
#
# DU_EXPORT constants
#
# Export Type
#: Csv
DU_EXPORT_CSV = 0
#: Oddf
DU_EXPORT_ODDF = 1
#: Post pc
DU_EXPORT_POST_PC = 2
#: Post unix
DU_EXPORT_POST_UNIX = 3
#
# DU_FILL constants
#
# Filling Options
#: Inside
DU_FILL_INSIDE = 0
#: Outside
DU_FILL_OUTSIDE = 1
#
# DU_IMPORT constants
#
# Import Mode
#: Append
DU_IMPORT_APPEND = 0
#: Replace
DU_IMPORT_REPLACE = 1
#: Merge
DU_IMPORT_MERGE = 2
#: Merge append
DU_IMPORT_MERGE_APPEND = 3
#
# DU_INTERP constants
#
# Inside Interpolation Method
#: Nearest
DU_INTERP_NEAREST = 1
#: Linear
DU_INTERP_LINEAR = 2
#: Cubic
DU_INTERP_CUBIC = 3
#: Akima
DU_INTERP_AKIMA = 4
#: Predict
DU_INTERP_PREDICT = 5
#
# DU_INTERP_EDGE constants
#
# Edge Interpolation Method
#: None
DU_INTERP_EDGE_NONE = 0
#: Same
DU_INTERP_EDGE_SAME = 1
#: Nearest
DU_INTERP_EDGE_NEAREST = 2
#: Linear
DU_INTERP_EDGE_LINEAR = 3
#
# DU_LAB_TYPE constants
#
# File Types
#: The delimiter string identifies
#: characters to be used as delimiters. Use C style | |
'\ue331', '&udotbl;': 'ụ', '&Udotbl;': 'Ụ',
'&ubrevinvbl;': '\ue727', '&udot;': '\ue715', '&Udot;': '\ue315',
'ü': 'ü', 'Ü': 'Ü', 'ú': 'ú', 'Ú': 'Ú',
'ű': 'ű', 'Ű': 'Ű', '&udotacute;': '\uebff',
'&Udotacute;': '\uebfe', 'ù': 'ù', 'Ù': 'Ù',
'&uvertline;': '\ue724', '&Uvertline;': '\ue324', 'û': 'û',
'Û': 'Û', 'ücirc;': '\ue717', 'Ücirc;': '\ue317',
'&ucar;': 'ǔ', '&Ucar;': 'Ǔ', 'ů': 'ů', 'Ů': 'Ů',
'&uhook;': 'ủ', '&Uhook;': 'Ủ', '&ucurlbar;': '\uebbf',
'ŭ': 'ŭ', 'Ŭ': 'Ŭ', 'ū': 'ū', 'Ū': 'Ū',
'&umacrbreve;': '\ue70b', '&Umacrbreve;': '\ue30b',
'&umacracute;': '\ue709', '&Umacracute;': '\ue309', 'ümacr;': 'ǖ',
'Ümacr;': 'Ǖ', '&uelig;': '\ue8c9', '&UElig;': '\ue8c8',
'&uulig;': '\ue8c7', '&UUlig;': '\ue8c6', '&uuligdblac;': '\uefd8',
'&UUligdblac;': '\uefd9', '&uasup;': '\ue8eb', '&uesup;': '\ue72b',
'&Uesup;': '\ue32b', '&uisup;': '\ue72c', '&uosup;': '\ue72d',
'&Uosup;': '\ue32d', '&uvsup;': '\ue8ec', '&uwsup;': '\ue8ed',
'&venl;': '\ueef8', '&vscap;': 'ᴠ', '&vbar;': '\ue74e',
'&vslash;': '\ue8ba', '&vslashura;': '\ue8bb',
'&vslashuradbl;': '\ue8bc', '&vdiagstrok;': 'ꝟ', '&Vdiagstrok;': 'Ꝟ',
'&Vslstrok;': '℣', '&vdotbl;': 'ṿ', '&Vdotbl;': 'Ṿ',
'&vdot;': '\ue74c', '&Vdot;': '\ue34c', '&vuml;': '\ue742',
'&Vuml;': '\ue342', '&vacute;': '\ue73a', '&Vacute;': '\ue33a',
'&vvertline;': '\ue74f', '&Vvertline;': '\ue34e',
'&vdblac;': '\ue74b', '&Vdblac;': '\ue34b', '&vcirc;': '\ue73b',
'&Vcirc;': '\ue33b', '&vring;': '\ue743', '&vmacr;': '\ue74d',
'&Vmacr;': '\ue34d', '&Vovlhigh;': '\uf7b2', '&wynn;': 'ƿ',
'&WYNN;': 'Ƿ', '&vins;': 'ꝩ', '&Vins;': 'Ꝩ', '&vinsdotbl;': '\ue7e6',
'&Vinsdotbl;': '\ue3e6', '&vinsdot;': '\ue7e7', '&Vinsdot;': '\ue3e7',
'&vinsacute;': '\uebbb', '&Vinsacute;': '\uebba', '&vwelsh;': 'ỽ',
'&Vwelsh;': 'Ỽ', '&wenl;': '\ueef9', '&wscap;': 'ᴡ', '&wdotbl;': 'ẉ',
'&Wdotbl;': 'Ẉ', '&wdot;': 'ẇ', '&Wdot;': 'Ẇ', '&wuml;': 'ẅ',
'&Wuml;': 'Ẅ', '&wacute;': 'ẃ', '&Wacute;': 'Ẃ',
'&wdblac;': '\ue750', '&Wdblac;': '\ue350', '&wgrave;': 'ẁ',
'&Wgrave;': 'Ẁ', 'ŵ': 'ŵ', 'Ŵ': 'Ŵ', '&wring;': 'ẘ',
'&wmacr;': '\ue757', '&Wmacr;': '\ue357', '&wasup;': '\ue8f0',
'&wesup;': '\ue753', '&Wesup;': '\ue353', '&wisup;': '\ue8f1',
'&wosup;': '\ue754', '&wusup;': '\ue8f2', '&wvsup;': '\ue8f3',
'&xenl;': '\ueefa', '&xscap;': '\uef11', '&xmod;': 'ˣ', '&xdes;': 'ꭗ',
'&xslashula;': '\ue8bd', '&xslashlra;': '\ue8be',
'&xslashlradbl;': '\ue8ce', '&Xovlhigh;': '\uf7b3',
'¥l;': '\ueefb', '&yscap;': 'ʏ', '&ybar;': '\ue77b',
'&ycurl;': '\ue785', '&Ycurl;': '\ue385', '&ydotbl;': 'ỵ',
'&Ydotbl;': 'Ỵ', '&ydot;': 'ẏ', '&Ydot;': 'Ẏ', 'ÿ': 'ÿ',
'Ÿ': 'Ÿ', 'ý': 'ý', 'Ý': 'Ý', '&ydblac;': '\ue77c',
'&Ydblac;': '\ue37c', '&ydotacute;': '\ue784',
'&Ydotacute;': '\ue384', '&ygrave;': 'ỳ', '&Ygrave;': 'Ỳ',
'ŷ': 'ŷ', 'Ŷ': 'Ŷ', '&yring;': 'ẙ', '&yhook;': 'ỷ',
'&Yhook;': 'Ỷ', '&ybreve;': '\ue776', '&Ybreve;': '\ue376',
'&ymacr;': 'ȳ', '&Ymacr;': 'Ȳ', '&ymacrbreve;': '\ue775',
'&Ymacrbreve;': '\ue375', '&ymacracute;': '\ue773',
'&Ymacracute;': '\ue373', '&yylig;': 'ꝡ', '&YYlig;': 'Ꝡ',
'&yyliguml;': '\uebe9', '&YYliguml;': '\uebe8',
'&yyligdblac;': '\uebcb', '&YYligdblac;': '\uebca',
'&yesup;': '\ue781', '&yrgmainstrok;': '\uf233', '&yloop;': 'ỿ',
'&Yloop;': 'Ỿ', '&zenl;': '\ueefc', '&zscap;': 'ᴢ', '&zstrok;': 'ƶ',
'&Zstrok;': 'Ƶ', '&zdotbl;': 'ẓ', '&Zdotbl;': 'Ẓ', 'ż': 'ż',
'Ż': 'Ż', '&zvisigot;': 'ꝣ', '&Zvisigot;': 'Ꝣ', '&ezh;': 'ʒ',
'&EZH;': 'Ʒ', '&yogh;': 'ȝ', '&YOGH;': 'Ȝ', 'þ': 'þ',
'Þ': 'Þ', 'þenl;': '\ueef6', 'þscap;': '\uef15',
'þbar;': 'ꝥ', 'Þbar;': 'Ꝥ', 'þovlmed;': '\ue7a2',
'þbarslash;': '\uf149', 'Þbarslash;': '\ue337',
'þbardes;': 'ꝧ', 'Þbardes;': 'Ꝧ', 'þdotbl;': '\ue79f',
'Þdotbl;': '\ue39f', 'þacute;': '\ue737',
'þslonglig;': '\ue734', 'þslongligbar;': '\ue735',
'þrarmlig;': '\ue8c1', '¼': '¼', '½': '½',
'¾': '¾', '&sup0;': '⁰', '¹': '¹', '²': '²',
'³': '³', '&sup4;': '⁴', '&sup5;': '⁵', '&sup6;': '⁶',
'&sup7;': '⁷', '&sup8;': '⁸', '&sup9;': '⁹', '&sub0;': '₀',
'&sub1;': '₁', '&sub2;': '₂', '&sub3;': '₃', '&sub4;': '₄',
'&sub5;': '₅', '&sub6;': '₆', '&sub7;': '₇', '&sub8;': '₈',
'&sub9;': '₉', '&romnumCDlig;': 'ↀ', '&romnumDDlig;': 'ↁ',
'&romnumDDdbllig;': 'ↂ', '&romnumCrev;': 'Ↄ',
'&romnumCrevovl;': '\uf23f', '&romnumCdblbar;': '\uf2fe',
'&romnumcdblbar;': '\uf2ff', '&Imod;': 'ᴵ', '&Vmod;': 'ⱽ',
'&Xmod;': '\uf1bf', '&asup;': 'ͣ', 'æsup;': 'ᷔ',
'&anligsup;': '\uf036', '&anscapligsup;': '\uf03a', '&aoligsup;': 'ᷕ',
'&arligsup;': '\uf038', '&arscapligsup;': '\uf130', '&avligsup;': 'ᷖ',
'&bsup;': '\uf012', '&bscapsup;': '\uf013', '⫐': 'ͨ',
'çsup;': 'ᷗ', '&dsup;': 'ͩ', '&drotsup;': 'ᷘ', 'ðsup;': 'ᷙ',
'&dscapsup;': '\uf016', '&esup;': 'ͤ', '&eogonsup;': '\uf135',
'&emacrsup;': '\uf136', '&fsup;': '\uf017', '&gsup;': 'ᷚ',
'&gscapsup;': 'ᷛ', '&hsup;': 'ͪ', '&isup;': 'ͥ',
'&inodotsup;': '\uf02f', '&jsup;': '\uf030', '&jnodotsup;': '\uf031',
'&ksup;': 'ᷜ', '&kscapsup;': '\uf01c', '&lsup;': 'ᷝ',
'&lscapsup;': 'ᷞ', '&msup;': 'ͫ', '&mscapsup;': 'ᷟ', '⊅': 'ᷠ',
'&nscapsup;': 'ᷡ', '&osup;': 'ͦ', '&omacrsup;': '\uf13f',
'øsup;': '\uf032', '&oogonsup;': '\uf13e',
'&orrotsup;': '\uf03e', '&orumsup;': '\uf03f', '&psup;': '\uf025',
'&qsup;': '\uf033', '&rsup;': 'ͬ', '&rrotsup;': 'ᷣ',
'&rumsup;': '\uf040', '&rscapsup;': 'ᷢ', '&ssup;': 'ᷤ',
'&slongsup;': 'ᷥ', '&tsup;': 'ͭ', '&trotsup;': '\uf03b',
'&tscapsup;': '\uf02a', '&usup;': 'ͧ', '&vsup;': 'ͮ',
'&wsup;': '\uf03c', '&xsup;': 'ͯ', '&ysup;': '\uf02b', '&zsup;': 'ᷦ',
'þsup;': '\uf03d', '&combgrave;': '̀', '&combacute;': '́',
'&combcirc;': '̂', '&combcircdbl;': '᷍', '&combtilde;': '̃',
'&combmacr;': '̄', '&combbreve;': '̆', '&combdot;': '̇',
'&combuml;': '̈', '&combhook;': '̉', '&combring;': '̊',
'&combdblac;': '̋', '&combsgvertl;': '̍', '&combdbvertl;': '̎',
'&combdotbl;': '̣', '&combced;': '̧', '&dblbarbl;': '̳',
'&dblovl;': '̿', '&combogon;': '̨', '&combastbl;': '͙',
'&combdblbrevebl;': '͜', '&combtripbrevebl;': '\uf1fc',
'&combcurl;': '᷎', '&combcurlhigh;': '\uf1c5',
'&combdothigh;': '\uf1ca', '&combcurlbar;': '\uf1cc', '&bar;': '̅',
'¯high;': '\uf00a', '¯med;': '\uf00b', '&ovlhigh;': '\uf00c',
'&ovlmed;': '\uf00d', '&barbl;': '̲', '&baracr;': '̶',
'&arbar;': '\uf1c0', '&combcomma;': '̕', '&combtildevert;': '̾',
'&er;': '͛', '&erang;': '\uf1c7', '&ercurl;': '\uf1c8',
'&ersub;': '᷏', '&ra;': 'ᷓ', '&rabar;': '\uf1c1', '&urrot;': '\uf153',
'&urlemn;': '\uf1c2', '&ur;': '᷑', '&us;': '᷒', '&combisbelow;': '᷐',
'.': '.', ';': ';', '&': '&', 'Θ': 'Θ',
'θ': 'θ', '&obiit;': 'ꝋ', '&OBIIT;': 'Ꝋ', '&et;': '⁊',
'&etslash;': '\uf158', '&ET;': '\uf142', '&ETslash;': '\uf1a7',
'&apomod;': 'ʼ', '&esse;': '≈', '&est;': '∻', '&condes;': 'ꝯ',
'&CONdes;': 'Ꝯ', '&condot;': 'ꜿ', '&CONdot;': 'Ꜿ',
'&usbase;': '\uf1a6', '&USbase;': '\uf1a5', '&usmod;': 'ꝰ',
'&autem;': '\ue8a3', '&rum;': 'ꝝ', '&RUM;': 'Ꝝ', '&de;': '\uf159',
'&is;': 'ꝭ', '&IS;': 'Ꝭ', '&sstrok;': 'ꝸ', '&etfin;': 'ꝫ',
'&ETfin;': 'Ꝫ', '&sem;': '\uf1ac', '&fMedrun;': 'ᚠ', '&mMedrun;': 'ᛘ',
'&lbbar;': '℔', 'ˆ': '^', '´': '´', '`': '`',
'¨': '¨', '&tld;': '~', '¯': '¯', '˘': '˘',
'˙': '˙', '˚': '˚', '¸': '¸', '˛': '˛',
'˜': '˜', '˝': '˝', '&verbarup;': 'ˈ', '·': '·',
'&hyphpoint;': '‧', '&sgldr;': '․', '&dblldr;': '‥', '…': '…',
':': ':', ',': ',', '&tridotright;': '჻',
'&tridotupw;': '∴', '&tridotdw;': '∵', '&quaddot;': '∷',
'&tridotleft;': '⁖', '&lozengedot;': '⁘', '&midring;': '\uf1da',
'|': '|', '¦': '¦', '‖': '‖', '/': '/',
'&fracsol;': '⁄', '&dblsol;': '⫽', '\': '\\', '&luslst;': '⸌',
'&ruslst;': '⸍', '&rlslst;': '⸜', '&llslst;': '⸝', '_': '_',
'‐': '-', '‐': '‐', '&nbhy;': '‑', '&dblhyph;': '⹀',
'&dbloblhyph;': '⸗', '&numdash;': '‒', '–': '–', '—': '—',
'―': '―', '!': '!', '¡': '¡', '?': '?',
'¿': '¿', '&ramus;': '\uf1db', '(': '(', ')': ')',
'&lUbrack;': '⸦', '&rUbrack;': '⸧', '&ldblpar;': '⸨',
'&rdblpar;': '⸩', '[': '[', ']': ']', '{': '{',
'}': '}', '&lsqbqu;': '⁅', '&rsqbqu;': '⁆', '&lwhsqb;': '⟦',
'&rwhsqb;': '⟧', '&verbarql;': '⸡', '&verbarqr;': '⸠',
'&luhsqb;': '⸢', '&ruhsqb;': '⸣', '&llhsqb;': '⸤', '&rlhsqb;': '⸥',
''': "'", '′': '′', '"': '"', '″': '″',
'‘': '‘', '’': '’', '&lsquolow;': '‚', '&rsquorev;': '‛',
'“': '“', '”': '”', '&ldquolow;': '„', '&rdquorev;': '‟',
'‹': '‹', '«': '«', '<': '<', '&langb;': '⟨',
'›': '›', '>': '>', '»': '»', '&rangb;': '⟩',
'&hidot;': '\uf1f8', '&posit;': '\uf1e2', '&ductsimpl;': '\uf1e3',
'&punctvers;': '\uf1ea', '&punctposit;': '\uf1e4',
'&colmidcomposit;': '\uf1e5', '&bidotscomposit;': '\uf1f2',
'&tridotscomposit;': '\uf1e6', '&punctelev;': '\uf161',
'&punctelevdiag;': '\uf1f0', '&punctelevhiback;': '\uf1fa',
'&punctelevhack;': '\uf1fb', '&punctflex;': '\uf1f5',
'&punctexclam;': '\uf1e7', '&punctinter;': '\uf160',
'&punctintertilde;': '\uf1e8', '&punctinterlemn;': '\uf1f1',
'&punctpercont;': '⸮', '&wavylin;': '\uf1f9', '&medcom;': '\uf1e0',
'¶g;': '\uf1e1', '&renvoi;': '\uf1ec', '&tridotsdownw;': '⸪',
'&tridotsupw;': '⸫', '&quaddots;': '⸬', '&fivedots;': '⸭',
'&virgsusp;': '\uf1f4', '&virgmin;': '\uf1f7', '&dipledot;': '⋗',
'&sp;': ' ', ' ': '\xa0', '&nnbsp;': '\u202f',
'&enqd;': '\u2000', '&emqd;': '\u2001', ' ': '\u2002',
' ': '\u2003', ' ': '\u2004', ' ': '\u2005',
'&emsp16;': '\u2006', ' ': '\u2007', ' ': '\u2008',
' ': '\u2009', ' ': '\u200a', '&zerosp;': '\u200b',
'&del;': '\x7f', '­': '\xad', '#': '#', '§': '§',
'*': '*', '&triast;': '⁂', '@': '@', '©': '©',
'®': '®', '¬': '¬', '&logand;': '∧', '¶': '¶',
'&revpara;': '⁋', '✗': '✝', '†': '†', '‡': '‡',
'&tridagger;': '\uf1d2', '&refmark;': '※', '&dotcross;': '⁜',
'&hedera;': '❦', '&hederarot;': '❧', '$': '$', '¢': '¢',
'£': '£', '¤': '¤', '¥': '¥', '&pennygerm;': '₰',
'&scruple;': '℈', '&romaslibr;': '\uf2e0', '&romXbar;': '\uf2e1',
'&romscapxbar;': '\uf2e2', '&romscapybar;': '\uf2e3',
'&romscapdslash;': '\uf2e4', '&drotbar;': '\uf159', '&ecu;': '\uf2e7',
'&florloop;': '\uf2e8', '&grosch;': '\uf2e9', '&helbing;': '\uf2fb',
'&krone;': '\uf2fa', '&libradut;': '\uf2ea', '&librafren;': '\uf2eb',
'&libraital;': '\uf2ec', '&libraflem;': '\uf2ed',
'&liranuov;': '\uf2ee', '&lirasterl;': '\uf2ef',
'&markold;': '\uf2f0', '&markflour;': '\uf2f1', '&msign;': '\uf2f2',
'&msignflour;': '\uf2f3', '&penningar;': '\uf2f5',
'&reichtalold;': '\uf2f6', '&schillgerm;': '\uf2f7',
'&schillgermscript;': '\uf2f8', '&scudi;': '\uf2f9', '&ounce;': '℥',
'&sestert;': '\uf2fa', '&romas;': '\uf2d8', '&romunc;': '\uf2d9',
'&romsemunc;': '\uf2da', '&romsext;': '\uf2db',
'&romdimsext;': '\uf2dc', '&romsiliq;': '\uf2dd',
'&romquin;': '\uf2de', '&romdupond;': '\uf2df', '+': '+',
'−': '−', '±': '±', '×': '×', '÷': '÷',
'=': '=', '∞': '∞', '¬equals;': '≠', '%': '%',
'‰': '‰', '°': '°', '&smallzero;': '\uf1bd',
'µ': 'µ', '&dram;': '\uf2e6', '&obol;': '\uf2f4',
'&sextans;': '\uf2fb', '&ouncescript;': '\uf2fd', '&arrsgllw;': '←',
'&arrsglupw;': '↑', '&arrsglrw;': '→', '&arrsgldw;': '↓',
'&squareblsm;': '▪', '&squarewhsm;': '▫', '•': '•',
'&circledot;': '◌', '&tribull;': '‣', '&trirightwh;': '▹',
'&trileftwh;': '◃', '&metrshort;': '⏑', '&metrshortlong;': '⏒',
'&metrlongshort;': '⏓', '&metrdblshortlong;': '⏔',
'&metranc;': '\uf70a', '&metrancacute;': '\uf70b',
'&metrancdblac;': '\uf719', '&metrancgrave;': '\uf70c',
'&metrancdblgrave;': '\uf71a', '&metrbreve;': '\uf701',
'&metrbreveacute;': '\uf706', '&metrbrevedblac;': '\uf717',
'&metrbrevegrave;': '\uf707', '&metrbrevedblgrave;': '\uf718',
'&metrmacr;': '\uf700', '&metrmacracute;': '\uf704',
'&metrmacrdblac;': '\uf715', '&metrmacrgrave;': '\uf705',
'&metrmacrdblgrave;': '\uf716', '&metrmacrbreve;': '\uf702',
'&metrbrevemacr;': '\uf703', '&metrmacrbreveacute;': '\uf708',
'&metrmacrbrevegrave;': '\uf709', '&metrdblbrevemacr;': '\uf72e',
'&metrdblbrevemacracute;': '\uf71b',
'&metrdblbrevemacrdblac;': '\uf71c', '&metrpause;': '\uf714'}
MUFI4_KEYS = "&aenl;&ascap;ªąĄ&acurl;&Acurl;&adotbl;" \
"&Adotbl;&adot;&Adot;äÄ&adiaguml;&adotbluml;á" \
"Á&aenlacute;&aogonacute;&Aogonacute;&adblac;&Adblac;" \
"&adotacute;&Adotacute;àÀâÂäcirc;" \
"åcirc;ãÃåÅ&ahook;&Ahook;" \
"ăĂāĀ&amacrbreve;&Amacrbreve;" \
"&abreveacute;&Abreveacute;&amacracute;&Amacracute;&aalig;" \
"&aacloselig;&AAlig;&aaligenl;&aaligdotbl;&AAligdotbl;" \
"&aaligdot;&AAligdot;&aaliguml;&AAliguml;&aaligacute;" \
"&AAligacute;&aaligdblac;&AAligdblac;æÆæenl;" \
"æscap;æred;æcurl;Æcurl;æogon;" | |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.10
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_csr', [dirname(__file__)])
except ImportError:
import _csr
return _csr
if fp is not None:
try:
_mod = imp.load_module('_csr', fp, pathname, description)
finally:
fp.close()
return _mod
_csr = swig_import_helper()
del swig_import_helper
else:
import _csr
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def expandptr(*args):
"""
expandptr(npy_int32 const n_row, npy_int32 const [] Ap, npy_int32 [] Bi)
expandptr(npy_int64 const n_row, npy_int64 const [] Ap, npy_int64 [] Bi)
"""
return _csr.expandptr(*args)
def csr_matmat_pass1(*args):
"""
csr_matmat_pass1(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_int32 const [] Bp, npy_int32 const [] Bj, npy_int32 [] Cp)
csr_matmat_pass1(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_int64 const [] Bp, npy_int64 const [] Bj, npy_int64 [] Cp)
"""
return _csr.csr_matmat_pass1(*args)
def csr_count_blocks(*args):
"""
csr_count_blocks(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const R, npy_int32 const C,
npy_int32 const [] Ap, npy_int32 const [] Aj) -> npy_int32
csr_count_blocks(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const R, npy_int64 const C,
npy_int64 const [] Ap, npy_int64 const [] Aj) -> npy_int64
"""
return _csr.csr_count_blocks(*args)
def csr_has_sorted_indices(*args):
"""
csr_has_sorted_indices(npy_int32 const n_row, npy_int32 const [] Ap, npy_int32 const [] Aj) -> bool
csr_has_sorted_indices(npy_int64 const n_row, npy_int64 const [] Ap, npy_int64 const [] Aj) -> bool
"""
return _csr.csr_has_sorted_indices(*args)
def csr_sample_offsets(*args):
"""
csr_sample_offsets(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_int32 const n_samples, npy_int32 const [] Bi, npy_int32 const [] Bj,
npy_int32 [] Bp) -> int
csr_sample_offsets(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_int64 const n_samples, npy_int64 const [] Bi, npy_int64 const [] Bj,
npy_int64 [] Bp) -> int
"""
return _csr.csr_sample_offsets(*args)
def csr_diagonal(*args):
"""
csr_diagonal(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_bool_wrapper const [] Ax, npy_bool_wrapper [] Yx)
csr_diagonal(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
signed char const [] Ax, signed char [] Yx)
csr_diagonal(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned char const [] Ax, unsigned char [] Yx)
csr_diagonal(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
short const [] Ax, short [] Yx)
csr_diagonal(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned short const [] Ax, unsigned short [] Yx)
csr_diagonal(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
int const [] Ax, int [] Yx)
csr_diagonal(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned int const [] Ax, unsigned int [] Yx)
csr_diagonal(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
long long const [] Ax, long long [] Yx)
csr_diagonal(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned long long const [] Ax, unsigned long long [] Yx)
csr_diagonal(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
float const [] Ax, float [] Yx)
csr_diagonal(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
double const [] Ax, double [] Yx)
csr_diagonal(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
long double const [] Ax, long double [] Yx)
csr_diagonal(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_cfloat_wrapper const [] Ax, npy_cfloat_wrapper [] Yx)
csr_diagonal(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_cdouble_wrapper const [] Ax, npy_cdouble_wrapper [] Yx)
csr_diagonal(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_clongdouble_wrapper const [] Ax, npy_clongdouble_wrapper [] Yx)
csr_diagonal(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_bool_wrapper const [] Ax, npy_bool_wrapper [] Yx)
csr_diagonal(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
signed char const [] Ax, signed char [] Yx)
csr_diagonal(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned char const [] Ax, unsigned char [] Yx)
csr_diagonal(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
short const [] Ax, short [] Yx)
csr_diagonal(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned short const [] Ax, unsigned short [] Yx)
csr_diagonal(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
int const [] Ax, int [] Yx)
csr_diagonal(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned int const [] Ax, unsigned int [] Yx)
csr_diagonal(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
long long const [] Ax, long long [] Yx)
csr_diagonal(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned long long const [] Ax, unsigned long long [] Yx)
csr_diagonal(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
float const [] Ax, float [] Yx)
csr_diagonal(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
double const [] Ax, double [] Yx)
csr_diagonal(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
long double const [] Ax, long double [] Yx)
csr_diagonal(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_cfloat_wrapper const [] Ax, npy_cfloat_wrapper [] Yx)
csr_diagonal(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_cdouble_wrapper const [] Ax, npy_cdouble_wrapper [] Yx)
csr_diagonal(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_clongdouble_wrapper const [] Ax, npy_clongdouble_wrapper [] Yx)
"""
return _csr.csr_diagonal(*args)
def csr_scale_rows(*args):
"""
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_bool_wrapper [] Ax, npy_bool_wrapper const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
signed char [] Ax, signed char const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned char [] Ax, unsigned char const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
short [] Ax, short const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned short [] Ax, unsigned short const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
int [] Ax, int const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned int [] | |
CPUs')
argp.add_option('--no-chop-difference',
dest="chop_diff", action='store_false',
help='print full difference')
argp.add_option('--chop-difference',
dest="chop_diff", action='store_true', default=True,
help='print chopped difference')
argp.add_option('--ui-checksum-length',
default=None, type='int',
help='print in human readable format')
argp.add_option('--no-auto-loading', '--no-automatic-loading',
dest="auto_load", action='store_false',
help='No automatic loading when snapshotting to dirs.')
argp.add_option('--auto-loading', '--automatic-loading',
dest="auto_load", action='store_true', default=True,
help='automatic loading when snapshotting to dirs.')
argp.add_option('-c',
'--checksums', default="default",
help='what checksums to use')
argp.add_option('-V',
'--verify-cached', default="nsm",
help='what data do we verify for using cached checksums')
argp.add_option('-l',
'--load-mtree', default=[], action="append",
help='file to load old metadata tree from')
argp.add_option('--info-fields',
'--information-fields', dest="info_fields", default="default",
help='fields to show in info command')
argp.add_option('--data-only',
action='store_true', default=False,
help='use only the data, info=data,mt no saving middle dirs.')
argp.add_option('--info', dest="info_fields",
help=optparse.SUPPRESS_HELP)
return argp, argp.parse_args()
def _setup_arg_mp(opts):
global mp_worker_num
global mp_workers
if mp_workers is not None:
return
if opts.mp is None:
mp_worker_num = _num_cpus_online()
if opts.verbose:
print "Workers:", mp_worker_num
elif opts.mp > 0:
mp_worker_num = opts.mp
if mp_worker_num:
mp_workers = multiprocessing.Pool(mp_worker_num)
def _setup_arg_checksum(opts):
global _available_checksums
global primary_checksum
global primary_checksum_ui_len
if opts.ui_checksum_length == 0:
primary_checksum_ui_len = None
if opts.ui_checksum_length > 0:
primary_checksum_ui_len = opts.ui_checksum_length
if not opts.checksums:
if 'md5' in _available_checksums:
primary_checksum = 'md5'
else:
primary_checksum = 'sha1'
else:
chks = opts.checksums.split(',')
if 'all' in chks:
chks.update(list(_available_checksums))
nchks = []
for chk in chks:
if chk in ('sm', 'small'):
if 'md5' in _available_checksums:
chk = 'md5'
else:
chk = 'sha1'
if chk in ('def', 'default'):
if 'md5' in _available_checksums:
chk = 'md5'
nchks.extend(['md5', 'sha256', 'sha512'])
else:
chk = 'sha1'
nchks.extend(['sha1', 'sha256', 'sha512'])
if chk not in _available_checksums:
print >>sys.stderr, 'Invalid checksum:', chk, 'Supported:', \
", ".join(_available_checksums)
sys.exit(1)
nchks.append(chk)
_available_checksums = set(nchks)
primary_checksum = nchks[0]
def _setup_arg_vc(opts):
_vc_valid = ("all", "data", "default", "ns", "ps", "nsm", "psm",
"ism", "nism", "pism")
if opts.verify_cached not in _vc_valid:
print >>sys.stderr, 'Invalid Verify:', 'Supported:',", ".join(_vc_valid)
sys.exit(1)
if opts.verify_cached == "all":
opts.verify_cached = "pism"
if opts.verify_cached == "data":
opts.verify_cached = "ns"
if opts.verify_cached == "default":
opts.verify_cached = "nsm"
def _setup_arg_info(opts):
_i_valid = set(['p', 'name', 'c', 's', 'num', 'mt',
'u', 'g', 'd', 'i', 'l', 'mo', 'at', 'ct'])
ifields = set()
# Allish -- no name, still has at. This is the save "format".
ifields_all = set(['p', 'c', 's', 'num', 'mt',
'u', 'g', 'd', 'i', 'l', 'mo', 'at', 'ct'])
if opts.data_only:
ifields.update(set(['p', 'c', 's', 'mt'])) # Includes mt, =data doesn't
for ifield in opts.info_fields.split(","):
if ifield == 'all':
ifields.update(ifields_all)
continue
if ifield in ('def', 'default'):
ifields.update(set(['p', 'c', 's', 'num', 'mt', 'mo', 'ct']))
continue
if ifield in ('sm', 'small'):
ifields.update(set(['p', 'c', 's', 'num', 'mt']))
continue
if ifield == 'data':
ifields.update(set(['p', 'c', 's']))
continue
if ifield not in _vc_valid:
print >>sys.stderr, 'Invalid Info. Field:', 'Supported:', ", ".join(_i_valid)
sys.exit(1)
ifields.add(ifield)
return ifields
def _setup_arg_cache_load(opts):
_cached_roots = {}
if opts.load_mtree:
data_only = opts.verify_cached in ("ns", "ps", "nsm", "psm")
for fn in opts.load_mtree:
root = u_load(fn, data_only)
if root is None:
continue
rroot,root = root
_cached_roots[fn] = rroot
if opts.verbose:
print >>sys.stderr, "Loaded:", root.num, len(_cached_roots), len(_cached)
return _cached_roots
__jdbg_print__ = False
_top_beg = time.time()
def _jdbg(arg):
if not __jdbg_print__:
return
print >>sys.stderr, "JDBG: %.4f %s" % (time.time()-_top_beg, arg)
_last_beg = None
def _jdbgb(arg):
global _last_beg
if not __jdbg_print__:
return
_last_beg = time.time()
print >>sys.stderr, "JDBG: %6.2f BEG: %s" % (_last_beg-_top_beg, arg)
def _jdbge(arg):
if not __jdbg_print__:
return
print >>sys.stderr, "JDBG: %6.2f %s" % (time.time()-_last_beg, arg)
def main():
""" Command line UI to run commands. """
global prog
global last_matched_chop
global mp_worker_num
global mp_workers
remap_cmds = {'info' : 'information',
'ls' : 'list',
'diff' : 'difference',
'treediff' : 'tree-difference',
'tree-diff' : 'tree-difference',
'treedifference' : 'tree-difference',
'snap' : 'snapshot',
'dir' : 'directory-summary',
'dirsum' : 'directory-summary',
'dir-sum' : 'directory-summary',
'dirls' : 'directory-list',
'dir-ls' : 'directory-list',
'dirinfo' : 'directory-information',
'dir-info' : 'directory-information',
'dirlist' : 'directory-list',
'dir-list' : 'directory-list',
'dirtree' : 'directory-tree',
'dir-tree' : 'directory-tree',
'directory-ls' : 'directory-list',
'directory-info' : 'directory-information',
'directoryls' : 'directory-list',
'directorylist' : 'directory-list',
'directoryinfo' : 'directory-information',
'directorytree' : 'directory-tree',
}
all_cmds = ("summary", "list", "information", "difference",
"directory-list", "directory-information", "directory-tree",
"directory-summary",
"snapshot", "check", "tree", "tree-difference", "resave", "help")
(argp, (opts, cmds)) = _setup_argp(all_cmds)
_jdbg("args")
if argp.prog is not None:
prog = argp.prog
elif sys.argv:
prog = os.path.basename(sys.argv[0])
else:
prog = "mtree"
if opts.verbose:
global __show_checksum_work__
__show_checksum_work__ = True
global __show_stat_work__
__show_stat_work__ = True
global _prnt_diff_dir_mod
global _prnt_diff_reg_mod
_prnt_diff_dir_mod = False
_prnt_diff_reg_mod = False
last_matched_chop = opts.chop_diff
_setup_arg_checksum(opts)
_setup_arg_vc(opts)
ifields = _setup_arg_info(opts)
_cached_roots = _setup_arg_cache_load(opts)
cmd = 'unknown'
if len(cmds) >= 1:
cmd = remap_cmds.get(cmds[0], cmds[0])
if cmd not in all_cmds and not cmd.startswith("debug-test-"):
argp.print_help()
sys.exit(1)
_jdbg("cmds")
errcode = 0
done = False
if False: pass
elif cmd == 'snapshot':
if len(cmds) < 3:
print >>sys.stderr, "Format: %s %s <filename> <path> [...]" % (prog, cmds[0])
sys.exit(1)
info = set(['p', 'c', 's', 'num', 'mt',
'u', 'g', 'd', 'i', 'l', 'mo', 'at', 'ct'])
if opts.data_only:
info = set(['p', 'c', 's', 'mt'])
snap_fn = _real_snapfn(cmds[1], opts.auto_load, opts.verify_cached)
print "Creating snapshot:", snap_fn
try:
try:
out = open(snap_fn, "wb")
except IOError, e:
print >>sys.stderr, "open(%s): %s" % (snap_fn, e)
sys.exit(1)
if pylstat is None:
out.write("mtree-file-0.1\n")
else: # Can have NS data in atime/ctime/mtime
out.write("mtree-file-0.2\n")
roots = {}
for path in cmds[2:]:
if not os.path.isdir(path):
print >>sys.stderr, "Path is not a directory: %s" % path
continue
names = _path2names(path)
parent = _names2parent(roots, names)
name = names[-1]
vfs = _name2vfs(roots, parent, 'd', name)
_jdbgb("walk")
_walk(vfs, ui=opts.ui)
_jdbge("walk")
# _setup_arg_mp(opts) -- slower :(
_jdbgb("walk stat")
if mp_workers is None:
_walk_stat_vfsd(vfs, ui=opts.ui)
else:
_walk_stat_vfsd_mp(vfs, ui=opts.ui)
_jdbge("walk stat")
_jdbgb("cached read")
_cache_read(vfs, opts.verify_cached, opts.verbose, ui=opts.ui)
_jdbge("cached read")
_setup_arg_mp(opts)
_jdbgb("walk checksums")
if mp_workers is None:
_walk_checksum_vfsd(vfs, ui=opts.ui)
else:
_walk_checksum_vfsd_mp(vfs, ui=opts.ui)
_jdbge("walk checksums")
if len(roots) == 1:
_jdbgb("prnt vfsd")
vfs = _root2useful(roots[roots.keys()[0]])
_prnt_vfsd(out, vfs, info=info, leaf_only=opts.data_only)
_jdbge("prnt vfsd")
_jdbgb("prnt vfs")
_prnt_vfs(sys.stdout, vfs, ui=opts.ui)
_jdbge("prnt vfs")
else:
print "Multiple ROOTs FAIL!", len(roots), roots
except KeyboardInterrupt, e:
print >>sys.stderr, "\nRemoving: ", snap_fn
_unlink_f(snap_fn)
raise
elif cmd == 'summary':
if len(cmds) < 2:
print >>sys.stderr, "Format: %s %s <filename> [...]" % (prog, cmds[0])
sys.exit(1)
for fn in cmds[1:]:
root = u_load(fn, data_only=True)
if root is None:
errcode = 1
done = True
if done: print ''
continue
# Need to keep real root around due to GC.
rroot,root = root
_jdbg("post load")
if done: print ''
print "Filename:", fn
_ui_prnt_root(_root2useful(root), ui=opts.ui)
done = True
elif cmd in ('information', 'list', 'tree'):
if len(cmds) < 2:
print >>sys.stderr, "Format: %s %s <filename> [...]" % (prog, cmds[0])
sys.exit(1)
data_only = False
if cmd in ('list', 'tree'):
data_only = True
ifields = False
_jdbg("beg")
for fn in cmds[1:]:
_jdbgb("load")
root = u_load(fn, data_only)
_jdbge("loaded")
if root is None:
errcode = 1
done = True
if done: print ''
continue
# Need to keep real root around due to GC.
rroot,root = root
if done: print ''
done = True
_jdbg("pre prnt")
_prnt_vfsd(sys.stdout, _root2useful(root),
ifields, ui=opts.ui, tree=cmd=='tree')
_jdbg("end")
elif cmd in ('directory-information', 'directory-list', 'directory-tree',
'directory-summary'):
_jdbg("dir")
if len(cmds) < 2:
print >>sys.stderr, "Format: %s %s <path> [...]" % (prog, cmds[0])
sys.exit(1)
data_only = False
if cmd in ('directory-list', 'directory-tree', 'directory-summary'):
data_only = True
ifields = False
roots = {}
vfses = set()
for path in cmds[1:]:
names = _path2names(path)
parent = _names2parent(roots, names)
name = names[-1]
if not os.path.isdir(path):
_name2vfs(roots, parent, 'f', name)
vfs = parent
else:
vfs = _name2vfs(roots, parent, 'd', name)
_jdbgb("walk")
_walk(vfs, ui=opts.ui)
_jdbge("walk")
vfses.add(vfs)
for vfs in vfses:
# _setup_arg_mp(opts) -- slower :(
_jdbgb("walk stat")
if mp_workers is None:
_walk_stat_vfsd(vfs, ui=opts.ui)
else:
_walk_stat_vfsd_mp(vfs, ui=opts.ui)
_jdbge("walk stat")
_jdbgb("cached read")
_cache_read(vfs, opts.verify_cached, opts.verbose, ui=opts.ui)
_jdbge("cached read")
_setup_arg_mp(opts)
_jdbgb("walk checksums")
if mp_workers is None:
_walk_checksum_vfsd(vfs, ui=opts.ui)
else:
_walk_checksum_vfsd_mp(vfs, ui=opts.ui)
_jdbge("walk checksums")
if done: print ''
done = True
_jdbg("pre prnt")
if cmd == 'directory-summary':
_ui_prnt_root(_root2useful(vfs), ui=opts.ui)
else:
_prnt_vfsd(sys.stdout, _root2useful(vfs),
ifields, ui=opts.ui, tree=cmd=='dir-tree')
_jdbg("end")
elif cmd in ('difference', 'tree-difference'):
if len(cmds) == 2 and os.path.isdir(cmds[1]):
last_snaps = auto_load_fns(cmds[1])
if len(last_snaps) >= 2:
cmds[1:] = [cmds[1] + '/' + last_snaps[-2],
cmds[1] + '/' + last_snaps[-1]]
print "Loading snapshot:", cmds[1]
print "Loading snapshot:", cmds[2]
if len(cmds) != 3:
print >>sys.stderr, "Format: %s %s [<dir>|<filename>] <filename>" % (prog,
cmds[0])
sys.exit(1)
if False:
roots1 = _load(cmds[1])
if roots1 is None:
sys.exit(1)
roots2 = _load(cmds[2])
if roots2 is None:
sys.exit(1)
| |
"i1xslq9jgp9b.ml",
"i1xslq9jgp9b.tk",
"i201zzf8x.com",
"i2pmail.org",
"i35t0a5.com",
"i3pv1hrpnytow.cf",
"i3pv1hrpnytow.ga",
"i3pv1hrpnytow.gq",
"i3pv1hrpnytow.ml",
"i3pv1hrpnytow.tk",
"i4j0j3iz0.com",
"i4racpzge8.cf",
"i4racpzge8.ga",
"i4racpzge8.gq",
"i4racpzge8.ml",
"i4racpzge8.tk",
"i537244.cf",
"i537244.ga",
"i537244.ml",
"i54o8oiqdr.cf",
"i54o8oiqdr.ga",
"i54o8oiqdr.gq",
"i54o8oiqdr.ml",
"i54o8oiqdr.tk",
"i6.cloudns.cc",
"i6.cloudns.cx",
"i66g2i2w.com",
"i6appears.com",
"i75rwe24vcdc.cf",
"i75rwe24vcdc.ga",
"i75rwe24vcdc.gq",
"i75rwe24vcdc.ml",
"i75rwe24vcdc.tk",
"i774uhrksolqvthjbr.cf",
"i774uhrksolqvthjbr.ga",
"i774uhrksolqvthjbr.gq",
"i774uhrksolqvthjbr.ml",
"i774uhrksolqvthjbr.tk",
"i8e2lnq34xjg.cf",
"i8e2lnq34xjg.ga",
"i8e2lnq34xjg.gq",
"i8e2lnq34xjg.ml",
"i8e2lnq34xjg.tk",
"i8tvebwrpgz.cf",
"i8tvebwrpgz.ga",
"i8tvebwrpgz.gq",
"i8tvebwrpgz.ml",
"i8tvebwrpgz.tk",
"ia4stypglismiks.cf",
"ia4stypglismiks.ga",
"ia4stypglismiks.gq",
"ia4stypglismiks.ml",
"ia4stypglismiks.tk",
"iamcoder.ru",
"iaptkapkl53.tk",
"ib5dy8b0tip3dd4qb.cf",
"ib5dy8b0tip3dd4qb.ga",
"ib5dy8b0tip3dd4qb.gq",
"ib5dy8b0tip3dd4qb.ml",
"ib5dy8b0tip3dd4qb.tk",
"ibaxdiqyauevzf9.cf",
"ibaxdiqyauevzf9.ga",
"ibaxdiqyauevzf9.gq",
"ibaxdiqyauevzf9.ml",
"ibaxdiqyauevzf9.tk",
"ibitcoini.ru",
"ibmpc.cf",
"ibmpc.ga",
"ibmpc.gq",
"ibmpc.ml",
"ibsats.com",
"ibt7tv8tv7.cf",
"ibt7tv8tv7.ga",
"ibt7tv8tv7.gq",
"ibt7tv8tv7.ml",
"ibt7tv8tv7.tk",
"iccmail.men",
"ice-rulet.ru",
"icemail.club",
"icetmail.ga",
"ichatz.ga",
"ichbinvollcool.de",
"ichehol.ru",
"ichichich.faith",
"ichigo.me",
"icloud.do",
"icloudbusiness.net",
"ico-decenturion.ru",
"iconsultant.me",
"icraftx.net",
"ict0crp6ocptyrplcr.cf",
"ict0crp6ocptyrplcr.ga",
"ict0crp6ocptyrplcr.gq",
"ict0crp6ocptyrplcr.ml",
"ict0crp6ocptyrplcr.tk",
"icunet.icu",
"id10tproof.com",
"idea-mail.com",
"ideepmind.pw",
"ideer.msk.ru",
"ideer.pro",
"idihgabo.cf",
"idihgabo.gq",
"idn.vn",
"idnkil.cf",
"idnkil.ga",
"idnkil.gq",
"idnkil.ml",
"idotem.cf",
"idotem.ga",
"idotem.gq",
"idotem.ml",
"idt8wwaohfiru7.cf",
"idt8wwaohfiru7.ga",
"idt8wwaohfiru7.gq",
"idt8wwaohfiru7.ml",
"idt8wwaohfiru7.tk",
"idtv.site",
"idx4.com",
"ieatspam.eu",
"ieatspam.info",
"iefbcieuf.cf",
"iefbcieuf.ml",
"iefbcieuf.tk",
"ieh-mail.de",
"ieit9sgwshbuvq9a.cf",
"ieit9sgwshbuvq9a.ga",
"ieit9sgwshbuvq9a.gq",
"ieit9sgwshbuvq9a.ml",
"ieit9sgwshbuvq9a.tk",
"iencm.com",
"ies76uhwpfly.cf",
"ies76uhwpfly.ga",
"ies76uhwpfly.gq",
"ies76uhwpfly.ml",
"ies76uhwpfly.tk",
"iexh1ybpbly8ky.cf",
"iexh1ybpbly8ky.ga",
"iexh1ybpbly8ky.gq",
"iexh1ybpbly8ky.ml",
"iexh1ybpbly8ky.tk",
"if-store.ru",
"if58.cf",
"if58.ga",
"if58.gq",
"if58.ml",
"if58.tk",
"ifans-electronics.ru",
"ifans-headphone.ru",
"ifans-i9s-tws.ru",
"ifans-mad.ru",
"ifansforyou.ru",
"ifansim.ru",
"ifd8tclgtg.cf",
"ifd8tclgtg.ga",
"ifd8tclgtg.gq",
"ifd8tclgtg.ml",
"ifd8tclgtg.tk",
"iffymedia.com",
"ifneick22qpbft.cf",
"ifneick22qpbft.ga",
"ifneick22qpbft.gq",
"ifneick22qpbft.ml",
"ifneick22qpbft.tk",
"ifoodpe19.ml",
"ig9kxv6omkmxsnw6rd.cf",
"ig9kxv6omkmxsnw6rd.ga",
"ig9kxv6omkmxsnw6rd.gq",
"ig9kxv6omkmxsnw6rd.ml",
"ig9kxv6omkmxsnw6rd.tk",
"igcl5axr9t7eduxkwm.cf",
"igcl5axr9t7eduxkwm.gq",
"igcl5axr9t7eduxkwm.ml",
"igcl5axr9t7eduxkwm.tk",
"igelonline.de",
"igg.biz",
"iggqnporwjz9k33o.ga",
"iggqnporwjz9k33o.ml",
"ighjbhdf890fg.cf",
"igintang.ga",
"iginting.cf",
"igiveu.win",
"ignoremail.com",
"igqtrustee.com",
"igvaku.cf",
"igvaku.ga",
"igvaku.gq",
"igvaku.ml",
"igvaku.tk",
"igxppre7xeqgp3.cf",
"igxppre7xeqgp3.ga",
"igxppre7xeqgp3.gq",
"igxppre7xeqgp3.ml",
"igxppre7xeqgp3.tk",
"ih2vvamet4sqoph.cf",
"ih2vvamet4sqoph.ga",
"ih2vvamet4sqoph.gq",
"ih2vvamet4sqoph.ml",
"ih2vvamet4sqoph.tk",
"ihateyoualot.info",
"ihavedildo.tk",
"ihazspam.ca",
"iheartspam.org",
"ihhjomblo.online",
"iigmail.com",
"iigtzic3kesgq8c8.cf",
"iigtzic3kesgq8c8.ga",
"iigtzic3kesgq8c8.gq",
"iigtzic3kesgq8c8.ml",
"iigtzic3kesgq8c8.tk",
"iiifans.ru",
"iiko-rf.ru",
"iitdmefoq9z6vswzzua.cf",
"iitdmefoq9z6vswzzua.ga",
"iitdmefoq9z6vswzzua.gq",
"iitdmefoq9z6vswzzua.ml",
"iitdmefoq9z6vswzzua.tk",
"ij3zvea4ctirtmr2.cf",
"ij3zvea4ctirtmr2.ga",
"ij3zvea4ctirtmr2.gq",
"ij3zvea4ctirtmr2.ml",
"ij3zvea4ctirtmr2.tk",
"ik7gzqu2gved2g5wr.cf",
"ik7gzqu2gved2g5wr.ga",
"ik7gzqu2gved2g5wr.gq",
"ik7gzqu2gved2g5wr.ml",
"ik7gzqu2gved2g5wr.tk",
"ikaren-suma.ru",
"ikaza.info",
"ikbenspamvrij.nl",
"ikelsik.cf",
"ikelsik.ga",
"ikelsik.gq",
"ikelsik.ml",
"iki.kr",
"ikke.win",
"ikkjacket.com",
"ikoplak.cf",
"ikoplak.ga",
"ikoplak.gq",
"ikoplak.ml",
"ikra-ufa.ru",
"iku.us",
"ikuzus.cf",
"ikuzus.ga",
"ikuzus.gq",
"ikuzus.ml",
"ikuzus.tk",
"ilikespam.com",
"illistnoise.com",
"ilove39.ru",
"ilovemyniggers.club",
"ilovespam.com",
"ilt.ctu.edu.gr",
"ilzida-ismagilovna.ru",
"im-irsyad.tech",
"imacpro.ml",
"imails.info",
"imamail1928.cf",
"imavex.ru",
"imd044u68tcc4.cf",
"imd044u68tcc4.ga",
"imd044u68tcc4.gq",
"imd044u68tcc4.ml",
"imd044u68tcc4.tk",
"imeil.tk",
"imfilons.ru",
"imgjar.com",
"imgof.com",
"imgrpost.xyz",
"imgv.de",
"imhtcut.xyz",
"iminimalm.com",
"immo-gerance.info",
"immortalcraft.ru",
"imouto.pro",
"imozmail.com",
"impastore.co",
"imperfectron.com",
"imperia-school.ru",
"imperiya1.ru",
"impostore.co",
"improvedtt.com",
"imstations.com",
"imul.info",
"in.mailsac.com",
"inaby.com",
"inapplicable.org",
"inarbicloud.ru",
"inarbicrm.ru",
"inarbisoft.ru",
"inbaca.com",
"inbax.ga",
"inbax.ml",
"inbax.tk",
"inbound.plus",
"inbox.comx.cf",
"inbox.loseyourip.com",
"inbox.si",
"inboxalias.com",
"inboxbear.com",
"inboxclean.com",
"inboxclean.org",
"inboxhub.net",
"inboxkitten.com",
"inboxmail.world",
"inboxproxy.com",
"incestry.co.uk",
"inclusiveprogress.com",
"incognitomail.com",
"incognitomail.net",
"incognitomail.org",
"incomego.ru",
"increase5f.com",
"incrediemail.com",
"indeecsam.ru",
"indeedlebeans.com",
"indeedtime.us",
"independentsucks.twilightparadox.com",
"india2in.com",
"indogame.site",
"indoliqueur.com",
"indomaed.pw",
"indomina.cf",
"indomovie21.me",
"indoserver.stream",
"indosukses.press",
"inf39.ru",
"infalled.com",
"infest.org",
"infideles.nu",
"infinityclippingpath.com",
"infitter.ru",
"info-radio.ml",
"info.tm",
"infoaccount-team.news",
"infokehilangan.com",
"infomarketer.ru",
"infomedia.ga",
"infoprice.tech",
"informasikuyuk.com",
"information-account.net",
"inibuatkhoirul.cf",
"inibuatsgb.cf",
"inibuatsgb.ga",
"inibuatsgb.gq",
"inibuatsgb.ml",
"inibuatsgb.tk",
"inikita.online",
"inipunyakitasemua.cf",
"inipunyakitasemua.ga",
"inipunyakitasemua.gq",
"inipunyakitasemua.ml",
"inipunyakitasemua.tk",
"inji4voqbbmr.cf",
"inji4voqbbmr.ga",
"inji4voqbbmr.gq",
"inji4voqbbmr.ml",
"inji4voqbbmr.tk",
"inmail.site",
"inmail.xyz",
"inmynetwork.tk",
"inretail.ru",
"inrim.cf",
"inrim.ga",
"inrim.gq",
"inrim.ml",
"inrim.tk",
"insanumingeniumhomebrew.com",
"inshapeactive.ru",
"insorg-mail.info",
"insta-ground.ru",
"instaforex-info.ru",
"instafun.men",
"instaindofree.com",
"instaku-media.com",
"instamaniya.ru",
"instant-mail.de",
"instantblingmail.info",
"instantemailaddress.com",
"instantmail.de",
"instantmail.fr",
"instaprice.co",
"insuranceonlinequotes.info",
"inter-dohod.ru",
"intercom1000.ru",
"interesno-prosto.ru",
"interesting-rus.ru",
"internet-v-astrakhani.ru",
"internet-v-belgorode.ru",
"internet-v-kaluge.ru",
"internet-v-krasnodare.ru",
"internet-v-kurske.ru",
"internet-v-moskve.ru",
"internet-v-orle.ru",
"internet-v-rostove.ru",
"internet-v-ryazani.ru",
"internet-v-samare.ru",
"internet-v-saratove.ru",
"internet-v-shakhti.ru",
"internet-v-stavropole.ru",
"internet-v-tule.ru",
"internet-v-volgograde.ru",
"interserver.ga",
"intersteller.com",
"intervesp-wood.ru",
"intim-dreams.ru",
"intimm-shop.ru",
"intrxi6ti6f0w1fm3.cf",
"intrxi6ti6f0w1fm3.ga",
"intrxi6ti6f0w1fm3.gq",
"intrxi6ti6f0w1fm3.ml",
"intrxi6ti6f0w1fm3.tk",
"inunglove.cf",
"invalidmarket.ru",
"investore.co",
"inzh-s.ru",
"iodizc3krahzsn.cf",
"iodizc3krahzsn.ga",
"iodizc3krahzsn.gq",
"iodizc3krahzsn.ml",
"iodizc3krahzsn.tk",
"ioemail.win",
"ioio.eu",
"iolkjk.cf",
"iolkjk.ga",
"iolkjk.gq",
"iolkjk.ml",
"iolokdi.ga",
"iolokdi.ml",
"ionb1ect2iark1ae1.cf",
"ionb1ect2iark1ae1.ga",
"ionb1ect2iark1ae1.gq",
"ionb1ect2iark1ae1.ml",
"ionb1ect2iark1ae1.tk",
"iordanii.ru",
"iot.aiphone.eu.org",
"iot.dmtc.dev",
"iot.ptcu.dev",
"iot.vuforia.us",
"iotrh5667.cf",
"iotrh5667.ga",
"iotrh5667.gq",
"iotrh5667.ml",
"iotu.creo.site",
"iotu.de.vipqq.eu.org",
"iotu.hstu.eu.org",
"iotu.nctu.me",
"iouy67cgfss.cf",
"iouy67cgfss.ga",
"iouy67cgfss.gq",
"iouy67cgfss.ml",
"iouy67cgfss.tk",
"ip4.pp.ua",
"ip6.pp.ua",
"iparts96.ru",
"ipdeer.com",
"ipemail.win",
"ipharchenko.ru",
"iphoneaccount.com",
"iphonees.info",
"iphonemail.cf",
"iphonemail.ga",
"iphonemail.gq",
"iphonemail.tk",
"iphonex-shop.ru",
"ipiranga.dynu.com",
"ipizza24.ru",
"ipmaximus.ru",
"ipoo.org",
"ippandansei.tk",
"ipswell.com",
"iq2kq5bfdw2a6.cf",
"iq2kq5bfdw2a6.ga",
"iq2kq5bfdw2a6.gq",
"iq2kq5bfdw2a6.ml",
"iqbaby-toys.ru",
"iqcfpcrdahtqrx7d.cf",
"iqcfpcrdahtqrx7d.ga",
"iqcfpcrdahtqrx7d.gq",
"iqcfpcrdahtqrx7d.ml",
"iqcfpcrdahtqrx7d.tk",
"iqemail.win",
"iqsfu65qbbkrioew.cf",
"iqsfu65qbbkrioew.ga",
"iqsfu65qbbkrioew.gq",
"iqsfu65qbbkrioew.ml",
"iqsfu65qbbkrioew.tk",
"irabops.com",
"ireccomend.ru",
"iren24.ru",
"irina-kusik.ru",
"irish2me.com",
"irknim.ru",
"irobotlab.ru",
"iroid.com",
"iroirorussia.ru",
"ironiebehindert.de",
"irpanenjin.com",
"irr.kr",
"irssi.tv",
"is-the-bestway.ru",
"isbjct4e.com",
"isdaq.com",
"ise4mqle13.o-r.kr",
"isf4e2tshuveu8vahhz.cf",
"isf4e2tshuveu8vahhz.ga",
"isf4e2tshuveu8vahhz.gq",
"isf4e2tshuveu8vahhz.ml",
"isf4e2tshuveu8vahhz.tk",
"ishkinn.ru",
"ishop-go.ru",
"iskus-elki.ru",
"islamm.cf",
"islamm.gq",
"ismem.ru",
"isosq.com",
"issthnu7p9rqzaew.cf",
"issthnu7p9rqzaew.ga",
"issthnu7p9rqzaew.gq",
"issthnu7p9rqzaew.ml",
"issthnu7p9rqzaew.tk",
"istlecker.de",
"istmail.tk",
"istoktepla.ru",
"istreamingtoday.com",
"isxuldi8gazx1.ga",
"isxuldi8gazx1.ml",
"isxuldi8gazx1.tk",
"it-italy.cf",
"it-italy.ga",
"it-italy.gq",
"it-italy.ml",
"it-italy.tk",
"it2-mail.tk",
"itclub-smanera.tech",
"itemp.email",
"itempmail.tk",
"itfilmes.ru",
"itks-it.ru",
"itksit.ru",
"itmtx.com",
"itnews-group.ru",
"itomo.ru",
"itoup.com",
"itoxwehnbpwgr.cf",
"itoxwehnbpwgr.ga",
"itoxwehnbpwgr.gq",
"itoxwehnbpwgr.ml",
"itoxwehnbpwgr.tk",
"its0k.com",
"itsme.edu.pl",
"itue33ubht.ga",
"itue33ubht.gq",
"itue33ubht.tk",
"iu54edgfh.cf",
"iu54edgfh.ga",
"iu54edgfh.gq",
"iu54edgfh.ml",
"iu54edgfh.tk",
"iu66sqrqprm.cf",
"iu66sqrqprm.ga",
"iu66sqrqprm.gq",
"iu66sqrqprm.ml",
"iu66sqrqprm.tk",
"iuemail.men",
"ivecotrucks.cf",
"ivecotrucks.ga",
"ivecotrucks.gq",
"ivecotrucks.ml",
"ivecotrucks.tk",
"iw409uttadn.cf",
"iw409uttadn.ga",
"iw409uttadn.gq",
"iw409uttadn.ml",
"iw409uttadn.tk",
"iwanbanjarworo.cf",
"iwancorp.cf",
"iwankopi.cf",
"iwantumake.us",
"iwi.net",
"iwin.ga",
"iwmfuldckw5rdew.cf",
"iwmfuldckw5rdew.ga",
"iwmfuldckw5rdew.gq",
"iwmfuldckw5rdew.ml",
"iwmfuldckw5rdew.tk",
"iwv06uutxic3r.cf",
"iwv06uutxic3r.ga",
"iwv06uutxic3r.gq",
"iwv06uutxic3r.ml",
"iwv06uutxic3r.tk",
"ixkxirzvu10sybu.cf",
"ixkxirzvu10sybu.ga",
"ixkxirzvu10sybu.gq",
"ixkxirzvu10sybu.ml",
"ixkxirzvu10sybu.tk",
"ixtwhjqz4a992xj.cf",
"ixtwhjqz4a992xj.ga",
"ixtwhjqz4a992xj.gq",
"ixtwhjqz4a992xj.ml",
"ixtwhjqz4a992xj.tk",
"ixvfhtq1f3uuadlas.cf",
"ixvfhtq1f3uuadlas.ga",
"ixvfhtq1f3uuadlas.gq",
"ixvfhtq1f3uuadlas.ml",
"ixvfhtq1f3uuadlas.tk",
"ixxycatmpklhnf6eo.cf",
"ixxycatmpklhnf6eo.ga",
"ixxycatmpklhnf6eo.gq",
"iy47wwmfi6rl5bargd.cf",
"iy47wwmfi6rl5bargd.ga",
"iy47wwmfi6rl5bargd.gq",
"iy47wwmfi6rl5bargd.ml",
"iy47wwmfi6rl5bargd.tk",
"iz0tvkxu43buk04rx.cf",
"iz0tvkxu43buk04rx.ga",
"iz0tvkxu43buk04rx.gq",
"iz0tvkxu43buk04rx.ml",
"iz0tvkxu43buk04rx.tk",
"iz3oht8hagzdp.cf",
"iz3oht8hagzdp.ga",
"iz3oht8hagzdp.gq",
"iz3oht8hagzdp.ml",
"iz3oht8hagzdp.tk",
"iz4acijhcxq9i30r.cf",
"iz4acijhcxq9i30r.ga",
"iz4acijhcxq9i30r.gq",
"iz4acijhcxq9i30r.ml",
"iz4acijhcxq9i30r.tk",
"izbashop.ru",
"izmoscowpo.ru",
"izobretateli59.ru",
"izoli9afsktfu4mmf1.cf",
"izoli9afsktfu4mmf1.ga",
"izoli9afsktfu4mmf1.gq",
"izoli9afsktfu4mmf1.ml",
"izoli9afsktfu4mmf1.tk",
"j-keats.cf",
"j-keats.ga",
"j-keats.gq",
"j-keats.ml",
"j-keats.tk",
"j-p.us",
"j.fairuse.org",
"j.rvb.ro",
"j2anellschild.ga",
"j3rqt89ez.com",
"j4rang0y4nk.ga",
"j5vhmmbdfl.cf",
"j5vhmmbdfl.ga",
"j5vhmmbdfl.gq",
"j5vhmmbdfl.ml",
"j5vhmmbdfl.tk",
"j8-freemail.cf",
"jaaj.cf",
"jabkagames.ru",
"jacckpot.site",
"jackmailer.com",
"jackymail.top",
"jacquelx.com",
"jad32.cf",
"jad32.ga",
"jad32.gq",
"jadopado.com",
"jafps.com",
"jagongan.ml",
"jaguar-landrover.cf",
"jaguar-landrover.ga",
"jaguar-landrover.gq",
"jaguar-landrover.ml",
"jaguar-landrover.tk",
"jaguar-xj.ml",
"jaguar-xj.tk",
"jajxz.com",
"jakjtavvtva8ob2.cf",
"jakjtavvtva8ob2.ga",
"jakjtavvtva8ob2.gq",
"jakjtavvtva8ob2.ml",
"jakjtavvtva8ob2.tk",
"jama.trenet.eu",
"jamaw-ewad.ru",
"jamesmaylovescabbage.xyz",
"jamieziggers.nl",
"jamikait.cf",
"jamikait.ga",
"jamikait.gq",
"jamikait.ml",
"jamit.com.au",
"jancok.in",
"jancokancene.cf",
"jancokancene.ga",
"jancokancene.gq",
"jancokancene.ml",
"jancuk.tech",
"janganjadiabu1.tk",
"janganjadiabu10.gq",
"janganjadiabu2.ml",
"janganjadiabu3.ga",
"janganjadiabu4.cf",
"janganjadiabu5.gq",
"janganjadiabu6.tk",
"janganjadiabu7.ml",
"janganjadiabu8.ga",
"janganjadiabu9.cf",
"janproz.com",
"janym.ru",
"japanyn7ys.com",
"jaqis.com",
"jaqueline1121.club",
"jasmierodgers.ga",
"jatmikav.top",
"jauhari.cf",
"jauhari.ga",
"jauhari.gq",
"javmail.tech",
"javmaniac.co",
"jb73bq0savfcp7kl8q0.ga",
"jb73bq0savfcp7kl8q0.ml",
"jb73bq0savfcp7kl8q0.tk",
"jblacust.ru",
"jbnote.com",
"jcdmail.men",
"jcpclothing.ga",
"jdasdhj.cf",
"jdasdhj.ga",
"jdasdhj.gq",
"jdasdhj.ml",
"jdasdhj.tk",
"jde53sfxxbbd.cf",
"jde53sfxxbbd.ga",
"jde53sfxxbbd.gq",
"jde53sfxxbbd.ml",
"jde53sfxxbbd.tk",
"jdl5wt6kptrwgqga.cf",
"jdl5wt6kptrwgqga.ga",
"jdl5wt6kptrwgqga.gq",
"jdl5wt6kptrwgqga.ml",
"jdl5wt6kptrwgqga.tk",
"jdmadventures.com",
"jdtfdf55ghd.ml",
"jdvmail.com",
"je-recycle.info",
"je7f7muegqi.ga",
"je7f7muegqi.gq",
"je7f7muegqi.ml",
"je7f7muegqi.tk",
"jeemboo.ru",
"jeep-official.cf",
"jeep-official.ga",
"jeep-official.gq",
"jeep-official.ml",
"jeep-official.tk",
"jellow.ml",
"jellyrolls.com",
"jembotbrodol.com",
"jembud.icu",
"jembulan.bounceme.net",
"jembut142.cf",
"jembut142.ga",
"jembut142.gq",
"jembut142.ml",
"jembut142.tk",
"jeodumifi.ns3.name",
"jepijopiijo.cf",
"jepijopiijo.ga",
"jepijopiijo.gq",
"jepijopiijo.ml",
"jepijopiijo.tk",
"jerapah993r.gq",
"jerusalem.fyxo.ml",
"jesus-shop.ru",
"jet-renovation.fr",
"jetable.com",
"jetable.fr.nf",
"jetable.net",
"jetable.org",
"jetable.pp.ua",
"jetableemail.com",
"jetableemails.com",
"jfgfgfgdfdder545yy.ml",
"jfiee.tk",
"jftruyrfghd8867.cf",
"jftruyrfghd8867.ga",
"jftruyrfghd8867.gq",
"jftruyrfghd8867.ml",
"jftruyrfghd8867.tk",
"jgerbn4576aq.cf",
"jgerbn4576aq.ga",
"jgerbn4576aq.gq",
"jgerbn4576aq.ml",
"jgerbn4576aq.tk",
"jgfhh10-0-0-1.defaultdomain.ml",
"jh24promo.ru",
"jhhgcv54367.cf",
"jhhgcv54367.ga",
"jhhgcv54367.ml",
"jhhgcv54367.tk",
"jhjty56rrdd.cf",
"jhjty56rrdd.ga",
"jhjty56rrdd.gq",
"jhjty56rrdd.ml",
"jhjty56rrdd.tk",
"jhow.cf",
"jhow.ga",
"jhow.gq",
"jhow.ml",
"jiancok.cf",
"jiancok.ga",
"jiancok.gq",
"jiancokowe.cf",
"jiancokowe.ga",
"jiancokowe.gq",
"jiancokowe.ml",
"jiaxin8736.com",
"jieber.net",
"jiez00veud9z.cf",
"jiez00veud9z.ga",
"jiez00veud9z.gq",
"jiez00veud9z.ml",
"jiez00veud9z.tk",
"jijixiaozhen.cn",
"jil.kr",
"jilossesq.com",
"jimboba.ru",
"jinggakop.ga",
"jinggakop.gq",
"jinggakq.ml",
"jirafikcraft.ru",
"jiskhdgbgsytre43vh.ga",
"jjkgrtteee098.cf",
"jjkgrtteee098.ga",
"jjkgrtteee098.gq",
"jjkgrtteee098.ml",
"jjkgrtteee098.tk",
"jjlink.cn",
"jjmsb.eu.org",
"jkcntadia.cf",
"jkcntadia.ga",
"jkcntadia.gq",
"jkcntadia.ml",
"jkcntadia.tk",
"jkjsrdtr35r67.cf",
"jkjsrdtr35r67.ga",
"jkjsrdtr35r67.gq",
"jkjsrdtr35r67.ml",
"jkjsrdtr35r67.tk",
"jklasdf.com",
"jkljkl.cf",
"jkljkl.ga",
"jkrowlg.cf",
"jkrowlg.ga",
"jkrowlg.gq",
"jkrowlg.ml",
"jkyvznnqlrc.gq",
"jkyvznnqlrc.ml",
"jkyvznnqlrc.tk",
"jmail.fr.nf",
"jmail.ovh",
"jnggachoc.cf",
"jnggachoc.gq",
"jnpayy.com",
"jnthn39vr4zlohuac.cf",
"jnthn39vr4zlohuac.ga",
"jnthn39vr4zlohuac.gq",
"jnthn39vr4zlohuac.ml",
"jnthn39vr4zlohuac.tk",
"jnxjn.com",
"jnyfyxdhrx85f0rrf.cf",
"jnyfyxdhrx85f0rrf.ga",
"jnyfyxdhrx85f0rrf.gq",
"jnyfyxdhrx85f0rrf.ml",
"jnyfyxdhrx85f0rrf.tk",
"jo-mail.com",
"jo8otki4rtnaf.cf",
"jo8otki4rtnaf.ga",
"jo8otki4rtnaf.gq",
"jo8otki4rtnaf.ml",
"jo8otki4rtnaf.tk",
"joakarond.tk",
"joaquinito01.servehttp.com",
"joasantos.ga",
"job.craigslist.org",
"jobbikszimpatizans.hu",
"jobku.id",
"jobposts.net",
"jobs-to-be-done.net",
"jobsfind.ru",
"jocerset.ru",
"joelpet.com",
"joetestalot.com",
"john-doe.cf",
"john-doe.ga",
"john-doe.gq",
"john-doe.ml",
"johnpo.cf",
"johnpo.ga",
"johnpo.gq",
"johnpo.ml",
"johnpo.tk",
"johnrisky4u.ga",
"johnrisky4u.gq",
"johnrisky4u.ml",
"johnrisky4u.tk",
"join-taxi.ru",
"jokenaka.press",
"joker-dostavka.ru",
"jokerkard.ru",
"jombase.com",
"jonrepoza.ml",
"joq7slph8uqu.cf",
"joq7slph8uqu.ga",
"joq7slph8uqu.gq",
"joq7slph8uqu.ml",
"joq7slph8uqu.tk",
"jorja344cc.tk",
"jorosc.cf",
"jorosc.ga",
"jorosc.gq",
"jorosc.ml",
"jorosc.tk",
"josadelia100.tk",
"josalita95.ml",
"josalyani102.ml",
"josamadea480.ga",
"josamanda777.tk",
"josangel381.ml",
"josasjari494.ml",
"josdita632.ml",
"joseihorumon.info",
"josfitrawati410.ga",
"josfrisca409.tk",
"josgishella681.cf",
"joshendriyawati219.tk",
"josivangkia341.tk",
"josjihaan541.cf",
"josnarendra746.tk",
"josnurul491.ga",
"josprayugo291.tk",
"josresa306.tk",
"josrustam128.cf",
"josse.ltd",
"josyahya751.tk",
"jotyaduolchaeol2fu.cf",
"jotyaduolchaeol2fu.ga",
"jotyaduolchaeol2fu.gq",
"jotyaduolchaeol2fu.ml",
"jotyaduolchaeol2fu.tk",
"jourrapide.com",
"joysclick.ru",
"jp-morgan.cf",
"jp-morgan.ga",
"jp-morgan.gq",
"jp-morgan.ml",
"jp.ftp.sh",
"jpggh76ygh0v5don1f.cf",
"jpggh76ygh0v5don1f.ga",
"jpggh76ygh0v5don1f.gq",
"jpggh76ygh0v5don1f.ml",
"jpggh76ygh0v5don1f.tk",
"jpinvest.ml",
"jptb2motzaoa30nsxjb.cf",
"jptb2motzaoa30nsxjb.ga",
"jptb2motzaoa30nsxjb.gq",
"jptb2motzaoa30nsxjb.ml",
"jptb2motzaoa30nsxjb.tk",
"jqweblogs.com",
"jqwgmzw73tnjjm.cf",
"jqwgmzw73tnjjm.ga",
"jqwgmzw73tnjjm.gq",
"jqwgmzw73tnjjm.ml",
"jqwgmzw73tnjjm.tk",
"jr46wqsdqdq.cf",
"jr46wqsdqdq.ga",
"jr46wqsdqdq.gq",
"jr46wqsdqdq.ml",
"jr46wqsdqdq.tk",
"jralalk263.tk",
"jrcs61ho6xiiktrfztl.cf",
"jrcs61ho6xiiktrfztl.ga",
"jrcs61ho6xiiktrfztl.gq",
"jrcs61ho6xiiktrfztl.ml",
"jrcs61ho6xiiktrfztl.tk",
"jredm.com",
"jrinkkang97oye.cf",
"jrjrj4551wqe.cf",
"jrjrj4551wqe.ga",
"jrjrj4551wqe.gq",
"jrjrj4551wqe.ml",
"jrjrj4551wqe.tk",
"jryt7555ou9m.cf",
"jryt7555ou9m.ga",
"jryt7555ou9m.gq",
"jryt7555ou9m.ml",
"jryt7555ou9m.tk",
"jsrsolutions.com",
"jstzamo.com",
"jswfdb48z.com",
"jtkgatwunk.cf",
"jtkgatwunk.ga",
"jtkgatwunk.gq",
"jtkgatwunk.ml",
"jtkgatwunk.tk",
"jtmalwkpcvpvo55.cf",
"jtmalwkpcvpvo55.ga",
"jtmalwkpcvpvo55.gq",
"jtmalwkpcvpvo55.ml",
"jtmalwkpcvpvo55.tk",
"jto.kr",
"jugglepile.com",
"juiupsnmgb4t09zy.cf",
"juiupsnmgb4t09zy.ga",
"juiupsnmgb4t09zy.gq",
"juiupsnmgb4t09zy.ml",
"juiupsnmgb4t09zy.tk",
"julnic.ru",
"jumaelda4846.ml",
"jumanindya8240.cf",
"jumaprilia4191.cf",
"jumbunga3502.cf",
"jumgita6884.tk",
"jumlatifani8910.tk",
"jummario7296.ml",
"jummayang1472.ml",
"jumnia4726.ga",
"jumnoor4036.ga",
"jumnugroho6243.cf",
"jumonji.tk",
"jumossi51.ml",
"jumpy5678.cf",
"jumpy5678.ga",
"jumpy5678.gq",
"jumpy5678.ml",
"jumpy5678.tk",
"jumrestia9994.ga",
"jumreynard5211.ml",
"jumreza258.tk",
"jumveronica8959.tk",
"jun8yt.cf",
"jun8yt.ga",
"jun8yt.gq",
"jun8yt.ml",
"jun8yt.tk",
"junctiondx.com",
"jungkamushukum.com",
"junk.beats.org",
"junk.ihmehl.com",
"junk1e.com",
"junkmail.ga",
"junkmail.gq",
"jur-likbez.ru",
"just-games.ru",
"justbegood.pw",
"justdoit132.cf",
"justdoit132.ga",
"justdoit132.gq",
"justdoit132.ml",
"justdoit132.tk",
"justemail.ml",
"justifans.ru",
"justnowmail.com",
"justshoes.gq",
"justtoy.ru",
"justyland.ru",
"juyouxi.com",
"jv6hgh1.com",
"jv7ykxi7t5383ntrhf.cf",
"jv7ykxi7t5383ntrhf.ga",
"jv7ykxi7t5383ntrhf.gq",
"jv7ykxi7t5383ntrhf.ml",
"jv7ykxi7t5383ntrhf.tk",
"jvhclpv42gvfjyup.cf",
"jvhclpv42gvfjyup.ml",
"jvhclpv42gvfjyup.tk",
"jwk4227ufn.com",
"jwl3uabanm0ypzpxsq.cf",
"jwl3uabanm0ypzpxsq.ga",
"jwl3uabanm0ypzpxsq.gq",
"jwom.ru",
"jwoug2rht98plm3ce.cf",
"jwoug2rht98plm3ce.ga",
"jwoug2rht98plm3ce.ml",
"jwoug2rht98plm3ce.tk",
"jwtukew1xb1q.cf",
"jwtukew1xb1q.ga",
"jwtukew1xb1q.gq",
"jwtukew1xb1q.ml",
"jwtukew1xb1q.tk",
"jyliananderik.com",
"jymfit.info",
"jziad5qrcege9.cf",
"jziad5qrcege9.ga",
"jziad5qrcege9.gq",
"jziad5qrcege9.ml",
"jziad5qrcege9.tk",
"k-l-k.ru",
"k-mail.top",
"k.fido.be",
"k17tcth11b.me",
"k2dfcgbld4.cf",
"k2dfcgbld4.ga",
"k2dfcgbld4.gq",
"k2dfcgbld4.ml",
"k2dfcgbld4.tk",
"k2eztto1yij4c.cf",
"k2eztto1yij4c.ga",
"k2eztto1yij4c.gq",
"k2eztto1yij4c.ml",
"k2eztto1yij4c.tk",
"k2idacuhgo3vzskgss.cf",
"k2idacuhgo3vzskgss.ga",
"k2idacuhgo3vzskgss.gq",
"k2idacuhgo3vzskgss.ml",
"k2idacuhgo3vzskgss.tk",
"k3663a40w.com",
"k3opticsf.com",
"k3zaraxg9t7e1f.cf",
"k3zaraxg9t7e1f.ga",
"k3zaraxg9t7e1f.gq",
"k3zaraxg9t7e1f.ml",
"k3zaraxg9t7e1f.tk",
"k4tbtqa7ag5m.cf",
"k4tbtqa7ag5m.ga",
"k4tbtqa7ag5m.gq",
"k4tbtqa7ag5m.ml",
"k4tbtqa7ag5m.tk",
"k9ifse3ueyx5zcvmqmw.cf",
"k9ifse3ueyx5zcvmqmw.ga",
"k9ifse3ueyx5zcvmqmw.ml",
"k9ifse3ueyx5zcvmqmw.tk",
"kaaw39hiawtiv1.ga",
"kaaw39hiawtiv1.gq",
"kaaw39hiawtiv1.ml",
"kaaw39hiawtiv1.tk",
"kademen.com",
"kadokawa.cf",
"kadokawa.ga",
"kadokawa.gq",
"kadokawa.ml",
"kadokawa.tk",
"kadokawa.top",
"kaguya.tk",
"kah.pw",
"kaj3goluy2q.cf",
"kaj3goluy2q.ga",
"kaj3goluy2q.gq",
"kaj3goluy2q.ml",
"kaj3goluy2q.tk",
"kak-gotovit-prosto.ru",
"kak-gotovit-vkusno.ru",
"kakashi1223e.cf",
"kakashi1223e.ga",
"kakashi1223e.ml",
"kakashi1223e.tk",
"kalemproje.com",
"kamen-market.ru",
"kamenrider.ru",
"kamin-hous.ru",
"kamx0-nacal.ru",
"kanciang.faith",
"kandymail.com",
"kangkunk44lur.cf",
"kangsmo.ru",
"kanker.website",
"kanzanishop.com",
"kapitalbuks.ru",
"kapitulin.ru",
"kapnik.ru",
"kappala.info",
"kapstroyservice.ru",
"karaokemike.ru",
"karatraman.ml",
"karel-bani.ru",
"karibu-piratai.ru",
"karitas.com.br",
"karta-kykyruza.ru",
"kartvelo.me",
"karusel-kard.ru",
"kasmail.com",
"kaspop.com",
"katcang.tk",
"katergizmo.de",
"katie11muramats.ga",
"katztube.com",
"kaufshop.ru",
"kauinginpergi.cf",
"kauinginpergi.ga",
"kauinginpergi.gq",
"kauinginpergi.ml",
"kavbc6fzisxzh.cf",
"kavbc6fzisxzh.ga",
"kavbc6fzisxzh.gq",
"kavbc6fzisxzh.ml",
"kavbc6fzisxzh.tk",
"kavisto.ru",
"kaxks55ofhkzt5245n.cf",
"kaxks55ofhkzt5245n.ga",
"kaxks55ofhkzt5245n.gq",
"kaxks55ofhkzt5245n.ml",
"kaxks55ofhkzt5245n.tk",
"kaye.ooo",
"kazan-elki.ru",
"kazelink.ml",
"kazper.net",
"kbakvkwvsu857.cf",
"kbbxowpdcpvkxmalz.cf",
"kbbxowpdcpvkxmalz.ga",
"kbbxowpdcpvkxmalz.gq",
"kbbxowpdcpvkxmalz.ml",
"kbbxowpdcpvkxmalz.tk",
"kbdjvgznhslz.ga",
"kbdjvgznhslz.ml",
"kbdjvgznhslz.tk",
"kchkch.com",
"kcrw.de",
"kdeos.ru",
"kdfgedrdf57mmj.ga",
"kdjngsdgsd.tk",
"kdl8zp0zdh33ltp.ga",
"kdl8zp0zdh33ltp.gq",
"kdl8zp0zdh33ltp.ml",
"kdl8zp0zdh33ltp.tk",
"kdublinstj.com",
"kebl0bogzma.ga",
"kecambahijo89klp.ml",
"kedrovskiy.ru",
"keeplucky.pw",
"keepmymail.com",
"kehangatan.ga",
"keinpardon.de",
"kejenx.com",
"kekita.com",
"kelasbelajar.web.id",
"keluruk.fun",
"kemampuan.me",
"kembangpasir.website",
"kemonkoreeitaholoto.tk",
"kemska.pw",
"kemulastalk.https443.org",
"kenal-saya.ga",
"kenecrehand.port25.biz",
"kennedy808.com",
"kenzo-official.ru",
"keraorganica.ru",
"kerupukmlempem.ml",
"kerupukmlempem.tk",
"kerupukmlempem1.cf",
"kerupukmlempem1.ga",
"kerupukmlempem2.cf",
"kerupukmlempem3.cf",
"kerupukmlempem3.ml",
"kerupukmlempem4.cf",
"kerupukmlempem4.ml",
"kerupukmlempem5.cf",
"kerupukmlempem6.cf",
"kerupukmlempem6.ml",
"kerupukmlempem7.cf",
"kerupukmlempem7.ga",
"kerupukmlempem8.ga",
"kerupukmlempem9.cf",
"kevintrankt.com",
"kewkece.com",
"kexukexu.xyz",
"key-mail.net",
"keyesrealtors.tk",
"keykeykelyns.cf",
"keykeykelyns.ga",
"keykeykelyns.gq",
"keykeykelyns.ml",
"keykeykelyns.tk",
"keykeykelynss.cf",
"keykeykelynss.ga",
"keykeykelynss.gq",
"keykeykelynss.ml",
"keykeykelynss.tk",
"keykeykelynsss.cf",
"keykeykelynsss.ga",
"keykeykelynsss.gq",
"keykeykelynsss.ml",
"keykeykelynsss.tk",
| |
<reponame>MilesCranmer/pyjulia
"""
Bridge Python and Julia by initializing the Julia runtime inside Python.
"""
# ----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython and Julia Development Teams.
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import atexit
import ctypes
import ctypes.util
import logging as _logging # see `.logger`
import os
import sys
import textwrap
import warnings
from ctypes import c_char_p, c_void_p
from logging import getLogger # see `.logger`
from types import ModuleType # this is python 3.3 specific
from .find_libpython import find_libpython, linked_libpython
from .juliainfo import JuliaInfo
from .libjulia import UNBOXABLE_TYPES, LibJulia, get_inprocess_libjulia, get_libjulia
from .options import JuliaOptions, options_docs
from .release import __version__
from .utils import PYCALL_PKGID, is_windows
try:
from shutil import which
except ImportError:
# For Python < 3.3; it should behave more-or-less similar to
# shutil.which when used with single argument.
from distutils.spawn import find_executable as which
try:
FutureWarning
except NameError:
# Python 2
FutureWarning = DeprecationWarning
try:
string_types = (basestring,)
except NameError:
string_types = (str,)
# ----------------------------------------------------------------------------
# Classes and funtions
# ----------------------------------------------------------------------------
python_version = sys.version_info
logger = getLogger("julia")
"""
Implementation notes: We are not importing `logging` module at the top
level so that using `logging.debug` instead of `logger.debug` becomes
an error.
"""
_loghandler = None
def get_loghandler():
"""
Get `logging.StreamHandler` private to PyJulia.
"""
global _loghandler
if _loghandler is None:
formatter = _logging.Formatter("%(levelname)s %(message)s")
_loghandler = _logging.StreamHandler()
_loghandler.setFormatter(formatter)
logger.addHandler(_loghandler)
return _loghandler
def set_loglevel(level):
get_loghandler()
logger.setLevel(getattr(_logging, level, level))
def enable_debug():
set_loglevel("DEBUG")
handler = get_loghandler()
handler.setFormatter(_logging.Formatter("%(levelname)s (%(process)d) %(message)s"))
logger.debug("") # flush whatever in the line
logger.debug("Debug-level logging is enabled for PyJulia.")
logger.debug("PyJulia version: %s", __version__)
class JuliaError(Exception):
"""
Wrapper for Julia exceptions.
"""
# fmt: off
class JuliaNotFound(RuntimeError):
def __init__(self, executable, kwargname):
self.executable = executable
self.kwargname = kwargname
def __str__(self):
return """\
Julia executable `{}` cannot be found.
If you have installed Julia, make sure Julia executable is in the
system PATH. Alternatively, specify file path to the Julia executable
using `{}` keyword argument.
If you have not installed Julia, download Julia from
https://julialang.org/downloads/ and install it.
""".format(self.executable, self.kwargname)
def remove_prefix(string, prefix):
return string[len(prefix):] if string.startswith(prefix) else string
def jl_name(name):
if name.endswith('_b'):
return name[:-2] + '!'
return name
def py_name(name):
if name.endswith('!'):
return name[:-1] + '_b'
return name
class JuliaModule(ModuleType):
def __init__(self, loader, *args, **kwargs):
super(JuliaModule, self).__init__(*args, **kwargs)
self._julia = loader.julia
self.__loader__ = loader
@property
def __all__(self):
juliapath = remove_prefix(self.__name__, "julia.")
names = set(self._julia.eval("names({})".format(juliapath)))
names.discard(juliapath.rsplit('.', 1)[-1])
return [py_name(n) for n in names if is_accessible_name(n)]
def __dir__(self):
if python_version.major == 2:
names = set()
else:
names = set(super(JuliaModule, self).__dir__())
names.update(self.__all__)
return list(names)
# Override __dir__ method so that completing member names work
# well in Python REPLs like IPython.
__path__ = ()
# Declare that `JuliaModule` is a Python module since any Julia
# module can have sub-modules.
# See: https://docs.python.org/3/reference/import.html#package-path-rules
def __getattr__(self, name):
try:
return self.__try_getattr(name)
except AttributeError:
if name.endswith("_b"):
try:
return self.__try_getattr(jl_name(name))
except AttributeError:
pass
raise
def __try_getattr(self, name):
jl_module = remove_prefix(self.__name__, "julia.")
jl_fullname = ".".join((jl_module, name))
if self._julia.isamodule(jl_fullname):
realname = self._julia.fullname(self._julia.eval(jl_fullname))
if self._julia.isdefined(realname):
return self.__loader__.load_module("julia." + realname)
# Otherwise, it may be, e.g., "Main.anonymous", created by
# Module().
if self._julia._isdefined(jl_module, name):
return self._julia.eval(jl_fullname)
raise AttributeError(name)
class JuliaMainModule(JuliaModule):
def __setattr__(self, name, value):
if name.startswith('_'):
super(JuliaMainModule, self).__setattr__(name, value)
else:
juliapath = remove_prefix(self.__name__, "julia.")
setter = '''
PyCall.pyfunctionret(
(x) -> Base.eval({}, :({} = $x)),
Any,
PyCall.PyAny)
'''.format(juliapath, jl_name(name))
self._julia.eval(setter)(value)
help = property(lambda self: self._julia.help)
eval = property(lambda self: self._julia.eval)
using = property(lambda self: self._julia.using)
# add custom import behavior for the julia "module"
class JuliaImporter(object):
# find_module was deprecated in v3.4
def find_module(self, fullname, path=None):
if fullname.startswith("julia."):
filename = fullname.split(".", 2)[1]
filepath = os.path.join(os.path.dirname(__file__), filename)
if os.path.isfile(filepath + ".py") or os.path.isdir(filepath):
return
return JuliaModuleLoader()
class JuliaModuleLoader(object):
@property
def julia(self):
self.__class__.julia = julia = Julia()
return julia
# load module was deprecated in v3.4
def load_module(self, fullname):
juliapath = remove_prefix(fullname, "julia.")
if juliapath == 'Main':
return sys.modules.setdefault(fullname,
JuliaMainModule(self, fullname))
elif self.julia.isafunction(juliapath):
return self.julia.eval(juliapath)
try:
self.julia.eval("import {}".format(juliapath.split(".", 1)[0]))
except JuliaError:
pass
else:
if self.julia.isamodule(juliapath):
return sys.modules.setdefault(fullname,
JuliaModule(self, fullname))
raise ImportError("{} not found".format(juliapath))
def ismacro(name):
""" Is the name a macro?
>>> ismacro('@time')
True
>>> ismacro('sum')
False
"""
return name.startswith("@")
def isoperator(name):
return not name[0].isalpha()
def isprotected(name):
return name.startswith("_")
def notascii(name):
try:
name.encode("ascii")
return False
except:
return True
def is_accessible_name(name):
"""
Check if a Julia variable `name` is (easily) accessible from Python.
Return `True` if `name` can be safely converted to a Python
identifier using `py_name` function. For example,
>>> is_accessible_name('A_mul_B!')
True
Since it can be accessed as `A_mul_B_b` in Python.
"""
return not (ismacro(name) or
isoperator(name) or
isprotected(name) or
notascii(name))
# fmt: on
def determine_if_statically_linked():
"""Determines if this python executable is statically linked"""
return linked_libpython() is None
_unsupported_error_common_header = """\
It seems your Julia and PyJulia setup are not supported.
Julia executable:
{runtime}
Python interpreter and libpython used by PyCall.jl:
{jlinfo.python}
{jl_libpython}
Python interpreter used to import PyJulia and its libpython.
{sys.executable}
{py_libpython}
"""
_unsupported_error_common_footer = """
For more information, see:
https://pyjulia.readthedocs.io/en/latest/troubleshooting.html
"""
_unsupported_error_statically_linked = """
Your Python interpreter "{sys.executable}"
is statically linked to libpython. Currently, PyJulia does not fully
support such Python interpreter.
The easiest workaround is to pass `compiled_modules=False` to `Julia`
constructor. To do so, first *reboot* your Python REPL (if this happened
inside an interactive session) and then evaluate:
>>> from julia.api import Julia
>>> jl = Julia(compiled_modules=False)
Another workaround is to run your Python script with `python-jl`
command bundled in PyJulia. You can simply do:
$ python-jl PATH/TO/YOUR/SCRIPT.py
See `python-jl --help` for more information.
"""
_unsupported_error_incompatible_libpython = """
In Julia >= 0.7, above two paths to `libpython` have to match exactly
in order for PyJulia to work out-of-the-box. To configure PyCall.jl to use
Python interpreter "{sys.executable}",
run the following code in the Python REPL:
>>> import julia
>>> julia.install()
"""
class UnsupportedPythonError(Exception):
def __init__(self, jlinfo):
self.jlinfo = jlinfo
self.statically_linked = determine_if_statically_linked()
def __str__(self):
template = _unsupported_error_common_header
if self.statically_linked:
template += _unsupported_error_statically_linked
else:
template += _unsupported_error_incompatible_libpython
template += _unsupported_error_common_footer
return template.format(
runtime=self.jlinfo.julia,
jlinfo=self.jlinfo,
py_libpython=find_libpython(),
jl_libpython=self.jlinfo.libpython_path,
sys=sys,
)
class Julia(object):
"""
Implements a bridge to the Julia runtime.
This uses the Julia PyCall module to perform type conversions and allow
full access to the entire Julia runtime.
"""
# fmt: off
def __init__(self, init_julia=True, jl_init_path=None, runtime=None,
jl_runtime_path=None, debug=False, **julia_options):
"""
Create a Python object that represents a live Julia runtime.
Note: Use `LibJulia` to fully control the initialization of
the Julia runtime.
Parameters
==========
init_julia : bool
If True, try to initialize the Julia runtime. If this code is
being called from inside an already running Julia, the flag should
be passed as False so the interpreter isn't re-initialized.
Note that it is safe to call this class constructor twice in the
same process with `init_julia` set to True, as a global reference
is kept to avoid re-initializing it. The purpose of the flag is
only to manage situations when Julia was initialized from outside
this code.
runtime : str
Custom Julia binary, e.g. "/usr/local/bin/julia" or "julia-1.0.0".
debug : bool
If True, print some debugging information to STDERR
"""
# Note: `options_docs` is appended below (top level)
if debug:
enable_debug()
if jl_runtime_path is not None:
warnings.warn(
"`jl_runtime_path` is deprecated. Please use `runtime`.", FutureWarning
)
if not init_julia and runtime is None and is_windows:
warnings.warn(
"It is recommended to pass `runtime` when `init_julia=False` in Windows"
)
if runtime is None:
if jl_runtime_path is None:
runtime = "julia"
else:
runtime = jl_runtime_path
else:
if jl_runtime_path is None:
jl_runtime_path = which(runtime)
if jl_runtime_path is None:
raise JuliaNotFound(runtime, kwargname="runtime")
else:
raise TypeError(
"Both `runtime` and `jl_runtime_path` are specified.")
if jl_init_path:
warnings.warn(
"`jl_init_path` is deprecated. Please use `bindir`.", FutureWarning
)
if "bindir" in julia_options:
raise TypeError("Both `jl_init_path` and `bindir` are specified.")
logger.debug("") # so that debug message is shown nicely w/ pytest
if get_libjulia():
# Use pre-existing `LibJulia`.
self.api = get_libjulia()
elif init_julia:
jlinfo = JuliaInfo.load(runtime)
if jlinfo.version_info < (0, 7):
raise RuntimeError("PyJulia does not support Julia < 0.7 anymore")
self.api = LibJulia.from_juliainfo(jlinfo)
if jl_init_path:
self.api.bindir = jl_init_path
options = JuliaOptions(**julia_options)
is_compatible_python = jlinfo.is_compatible_python()
logger.debug("is_compatible_python = %r", is_compatible_python)
use_custom_sysimage = options.sysimage is not None
logger.debug("use_custom_sysimage = %r", use_custom_sysimage)
logger.debug("compiled_modules = %r", options.compiled_modules)
if not (
options.compiled_modules == "no"
or is_compatible_python
or use_custom_sysimage
):
raise UnsupportedPythonError(jlinfo)
self.api.init_julia(options)
# We are assuming that `jl_is_initialized()` was true only
# if this process was a Julia process (hence PyCall had
# already | |
response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ComponentHub
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_component_hub_with_http_info(owner, name, **kwargs) # noqa: E501
def get_component_hub_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Get hub component # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_component_hub_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ComponentHub, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_component_hub" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_component_hub`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `get_component_hub`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/{owner}/hub/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ComponentHub', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_component_hub_activities(self, owner, name, **kwargs): # noqa: E501
"""Get hub activities # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_component_hub_activities(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Entity managing the resource (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool bookmarks: Filter by bookmarks.
:param str pins: Pinned entities.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListActivitiesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_component_hub_activities_with_http_info(owner, name, **kwargs) # noqa: E501
def get_component_hub_activities_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Get hub activities # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_component_hub_activities_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Entity managing the resource (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool bookmarks: Filter by bookmarks.
:param str pins: Pinned entities.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListActivitiesResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'name',
'offset',
'limit',
'sort',
'query',
'bookmarks',
'pins',
'mode',
'no_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_component_hub_activities" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_component_hub_activities`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `get_component_hub_activities`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'bookmarks' in local_var_params and local_var_params['bookmarks'] is not None: # noqa: E501
query_params.append(('bookmarks', local_var_params['bookmarks'])) # noqa: E501
if 'pins' in local_var_params and local_var_params['pins'] is not None: # noqa: E501
query_params.append(('pins', local_var_params['pins'])) # noqa: E501
if 'mode' in local_var_params and local_var_params['mode'] is not None: # noqa: E501
query_params.append(('mode', local_var_params['mode'])) # noqa: E501
if 'no_page' in local_var_params and local_var_params['no_page'] is not None: # noqa: E501
query_params.append(('no_page', local_var_params['no_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/{owner}/hub/{name}/activities', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ListActivitiesResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_component_hub_settings(self, owner, name, **kwargs): # noqa: E501
"""Get hub component settings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_component_hub_settings(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ComponentHubSettings
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_component_hub_settings_with_http_info(owner, name, **kwargs) # noqa: E501
def get_component_hub_settings_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Get hub component settings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_component_hub_settings_with_http_info(owner, name, async_req=True)
>>> result | |
import pytest
try:
from linebot import LineBotApi, WebhookParser
from linebot.models import (
BeaconEvent, FollowEvent, JoinEvent, LeaveEvent, MessageEvent,
PostbackEvent, UnfollowEvent, MemberJoinedEvent, MemberLeftEvent,
AccountLinkEvent,
TextSendMessage, ImageSendMessage, AudioSendMessage,
VideoSendMessage, LocationSendMessage, StickerSendMessage,
ImagemapSendMessage, TemplateSendMessage, FlexSendMessage,
ButtonsTemplate, FlexContainer,
)
from linebot.exceptions import LineBotApiError
from minette.adapter.lineadapter import LineAdapter
except Exception:
# Skip if import dependencies not found
pytestmark = pytest.mark.skip
from minette import (
DialogService,
Message,
Payload,
Config
)
lineconfig = Config("config/test_config_adapter.ini")
channel_secret = lineconfig.get("channel_secret", section="line_bot_api")
channel_access_token = lineconfig.get("channel_access_token", section="line_bot_api")
# Skip if channel_secret or channel_access_token is not provided
if not channel_secret or not channel_access_token:
pytestmark = pytest.mark.skip
class MyDialog(DialogService):
def compose_response(self, request, context, connection):
return "res:" + request.text
def test_init():
adapter = LineAdapter(
channel_secret=channel_secret,
channel_access_token=channel_access_token, prepare_table=True)
assert isinstance(adapter.parser, WebhookParser)
assert isinstance(adapter.api, LineBotApi)
def test_extract_token():
adapter = LineAdapter(
channel_secret=channel_secret,
channel_access_token=channel_access_token, prepare_table=True)
event = MessageEvent.new_from_json_dict({
"replyToken": "<KEY>",
"type": "message",
"timestamp": 1462629479859,
"source": {
"type": "user",
"userId": "U4af4980629..."
},
"message": {
"id": "325708",
"type": "text",
"text": "Hello, world!"
}
})
token = adapter._extract_token(event)
assert token == "<KEY>"
def test_to_minette_message():
# setup adapter
adapter = LineAdapter(
channel_secret=channel_secret,
channel_access_token=channel_access_token, prepare_table=True)
# text
event = MessageEvent.new_from_json_dict({
"replyToken": "<KEY>",
"type": "message",
"timestamp": 1462629479859,
"source": {
"type": "user",
"userId": "U4af4980629..."
},
"message": {
"id": "325708",
"type": "text",
"text": "Hello, world!"
}
})
message = adapter._to_minette_message(event)
assert message.id == "325708"
assert message.type == "text"
assert message.text == "Hello, world!"
assert message.channel_user_id == "U4af4980629..."
# image
event = MessageEvent.new_from_json_dict({
"replyToken": "<KEY>",
"type": "message",
"timestamp": 1462629479859,
"source": {
"type": "user",
"userId": "U4af4980629..."
},
"message": {
"id": "325708",
"type": "image",
"contentProvider": {
"type": "line"
}
}
})
message = adapter._to_minette_message(event)
assert message.type == "image"
assert message.payloads[0].url == \
"https://api.line.me/v2/bot/message/325708/content"
assert message.payloads[0].thumb == \
"https://api.line.me/v2/bot/message/325708/content"
# video
event = MessageEvent.new_from_json_dict({
"replyToken": "<KEY>",
"type": "message",
"timestamp": 1462629479859,
"source": {
"type": "user",
"userId": "U4af4980629..."
},
"message": {
"id": "325708",
"type": "video",
"duration": 60000,
"contentProvider": {
"type": "external",
"originalContentUrl": "https://example.com/original.mp4",
"previewImageUrl": "https://example.com/preview.jpg"
}
}
})
message = adapter._to_minette_message(event)
assert message.type == "video"
assert message.payloads[0].url == \
"https://api.line.me/v2/bot/message/325708/content"
assert message.payloads[0].thumb == \
"https://api.line.me/v2/bot/message/325708/content"
# audio
event = MessageEvent.new_from_json_dict({
"replyToken": "<KEY>",
"type": "message",
"timestamp": 1462629479859,
"source": {
"type": "user",
"userId": "U4af4980629..."
},
"message": {
"id": "325708",
"type": "audio",
"duration": 60000,
"contentProvider": {
"type": "line"
}
}
})
message = adapter._to_minette_message(event)
assert message.type == "audio"
assert message.payloads[0].url == \
"https://api.line.me/v2/bot/message/325708/content"
# location
event = MessageEvent.new_from_json_dict({
"replyToken": "<KEY>",
"type": "message",
"timestamp": 1462629479859,
"source": {
"type": "user",
"userId": "U4af4980629..."
},
"message": {
"id": "325708",
"type": "location",
"title": "my location",
"address": "〒150-0002 東京都渋谷区渋谷2丁目21−1",
"latitude": 35.65910807942215,
"longitude": 139.70372892916203
}
})
message = adapter._to_minette_message(event)
assert message.type == "location"
assert message.payloads[0].content["title"] == "my location"
assert message.payloads[0].content["address"] == \
"〒150-0002 東京都渋谷区渋谷2丁目21−1"
assert message.payloads[0].content["latitude"] == 35.65910807942215
assert message.payloads[0].content["longitude"] == 139.70372892916203
# sticker
event = MessageEvent.new_from_json_dict({
"replyToken": "<KEY>",
"type": "message",
"timestamp": 1462629479859,
"source": {
"type": "user",
"userId": "U4af4980629..."
},
"message": {
"id": "325708",
"type": "sticker",
"packageId": "1",
"stickerId": "2"
}
})
message = adapter._to_minette_message(event)
assert message.type == "sticker"
assert message.payloads[0].content["package_id"] == "1"
assert message.payloads[0].content["sticker_id"] == "2"
# postback
event = PostbackEvent.new_from_json_dict({
"type": "postback",
"replyToken": "<KEY>",
"source": {
"userId": "U91eeaf62d...",
"type": "user"
},
"timestamp": 1513669370317,
"postback": {
"data": "storeId=12345",
"params": {
"datetime": "2017-12-25T01:00"
}
}
})
message = adapter._to_minette_message(event)
assert message.type == "postback"
assert message.payloads[0].content["data"] == "storeId=12345"
assert message.payloads[0].content["params"] == \
{"datetime": "2017-12-25T01:00"}
# follow
event = FollowEvent.new_from_json_dict({
"replyToken": "<KEY>",
"type": "follow",
"timestamp": 1462629479859,
"source": {
"type": "user",
"userId": "U4af4980629..."
}
})
message = adapter._to_minette_message(event)
assert message.type == "follow"
# unfollow
event = UnfollowEvent.new_from_json_dict({
"type": "unfollow",
"timestamp": 1462629479859,
"source": {
"type": "user",
"userId": "U4af4980629..."
}
})
message = adapter._to_minette_message(event)
assert message.type == "unfollow"
# join
event = JoinEvent.new_from_json_dict({
"replyToken": "<KEY>",
"type": "join",
"timestamp": 1462629479859,
"source": {
"type": "group",
"groupId": "C4af4980629..."
}
})
message = adapter._to_minette_message(event)
assert message.type == "join"
# leave
event = LeaveEvent.new_from_json_dict({
"type": "leave",
"timestamp": 1462629479859,
"source": {
"type": "group",
"groupId": "C4af4980629..."
}
})
message = adapter._to_minette_message(event)
assert message.type == "leave"
# member join
event = MemberJoinedEvent.new_from_json_dict({
"replyToken": "<KEY>",
"type": "memberJoined",
"timestamp": 1462629479859,
"source": {
"type": "room",
"roomId": "C4af4980629..."
},
"joined": {
"members": [
{
"type": "user",
"userId": "U4af4980629..."
},
{
"type": "user",
"userId": "U91eeaf62d9..."
}
]
}
})
message = adapter._to_minette_message(event)
assert message.type == "memberJoined"
assert message.group.type == "room"
# member leave
event = MemberLeftEvent.new_from_json_dict({
"type": "memberLeft",
"timestamp": 1462629479960,
"source": {
"type": "room",
"roomId": "C4af4980629..."
},
"left": {
"members": [
{
"type": "user",
"userId": "U4af4980629..."
},
{
"type": "user",
"userId": "U91eeaf62d9..."
}
]
}
})
message = adapter._to_minette_message(event)
assert message.type == "memberLeft"
assert message.group.type == "room"
# beacon
event = BeaconEvent.new_from_json_dict({
"replyToken": "<KEY>",
"type": "beacon",
"timestamp": 1462629479859,
"source": {
"type": "user",
"userId": "U4af4980629..."
},
"beacon": {
"hwid": "d41d8cd98f",
"type": "enter"
}
})
message = adapter._to_minette_message(event)
assert message.type == "beacon"
# other(account link)
event = AccountLinkEvent.new_from_json_dict({
"type": "accountLink",
"replyToken": "<KEY>",
"source": {
"userId": "U91eeaf62d...",
"type": "user"
},
"timestamp": 1513669370317,
"link": {
"result": "ok",
"nonce": "xxxxxxxxxxxxxxx"
}
})
message = adapter._to_minette_message(event)
assert message.type == "accountLink"
def test_to_channel_message():
# payload = next(iter([p for p in message.payloads if p.content_type != "quick_reply"]), None)
# quick_reply = next(iter([p.content for p in message.payloads if p.content_type == "quick_reply"]), None)
message = LineAdapter._to_channel_message(Message(text="hello"))
assert isinstance(message, TextSendMessage)
assert message.text == "hello"
message = LineAdapter._to_channel_message(Message(type="image",
payloads=[Payload(url="https://image", thumb="https://thumb")]))
assert isinstance(message, ImageSendMessage)
assert message.original_content_url == "https://image"
assert message.preview_image_url == "https://thumb"
message = LineAdapter._to_channel_message(Message(type="audio",
payloads=[Payload(url="https://audio", content={"duration": 1.2})]))
assert isinstance(message, AudioSendMessage)
assert message.original_content_url == "https://audio"
assert message.duration == 1.2
message = LineAdapter._to_channel_message(Message(type="video",
payloads=[Payload(url="https://video", thumb="https://thumb")]))
assert isinstance(message, VideoSendMessage)
assert message.original_content_url == "https://video"
assert message.preview_image_url == "https://thumb"
message = LineAdapter._to_channel_message(
Message(type="location", payloads=[Payload(content={
"title": "Jiyugaoka",
"address": "1-2-3 Jiyugaoka, Meguro-ku, Tokyo",
"latitude": 35.607757, "longitude": 139.668411
})])
)
assert isinstance(message, LocationSendMessage)
assert message.title == "Jiyugaoka"
assert message.address == "1-2-3 Jiyugaoka, Meguro-ku, Tokyo"
assert message.latitude == 35.607757
assert message.longitude == 139.668411
message = LineAdapter._to_channel_message(Message(type="sticker",
payloads=[Payload(content={
"package_id": "11537", "sticker_id": "52002734"})]))
assert isinstance(message, StickerSendMessage)
assert message.package_id == "11537"
assert message.sticker_id == "52002734"
imagemap_action = {
"type": "uri",
"label": "https://example.com/",
"linkUri": "https://example.com/",
"area": {
"x": 0,
"y": 0,
"width": 520,
"height": 1040
}
}
message = LineAdapter._to_channel_message(Message(type="imagemap",
text="imagemap message",
payloads=[Payload(
url="https://imagemap",
content={
"base_size": {"width": 1040, "height": 585},
"actions": [imagemap_action]})]))
assert isinstance(message, ImagemapSendMessage)
assert message.alt_text == "imagemap message"
assert message.base_url == "https://imagemap"
assert message.base_size.width == 1040
assert message.base_size.height == 585
assert message.actions[0].type == "uri"
assert message.actions[0].link_uri == "https://example.com/"
assert message.actions[0].area.width == 520
template = {
"type": "buttons",
"thumbnailImageUrl": "https://example.com/bot/images/image.jpg",
"imageAspectRatio": "rectangle",
"imageSize": "cover",
"imageBackgroundColor": "#FFFFFF",
"title": "Menu",
"text": "Please select",
"defaultAction": {
"type": "uri",
"label": "View detail",
"uri": "http://example.com/page/123"
},
"actions": [
{
"type": "postback",
"label": "Buy",
"data": "action=buy&itemid=123"
},
{
"type": "postback",
"label": "Add to cart",
"data": "action=add&itemid=123"
},
{
"type": "uri",
"label": "View detail",
"uri": "http://example.com/page/123"
}
]
}
message = LineAdapter._to_channel_message(Message(type="template",
text="template message", payloads=[Payload(content=template)]))
assert isinstance(message, TemplateSendMessage)
assert message.alt_text == "template message"
assert isinstance(message.template, ButtonsTemplate)
flex = {
"type": "bubble",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "hello"
},
{
"type": "text",
"text": "world"
}
]
}
}
message = LineAdapter._to_channel_message(Message(type="flex",
text="flex message", payloads=[Payload(content=flex)]))
assert isinstance(message, FlexSendMessage)
assert message.alt_text == "flex message"
assert isinstance(message.contents, FlexContainer)
message = LineAdapter._to_channel_message(Message(type="unknown"))
assert message is None
def test_handle_event():
adapter = LineAdapter(
channel_secret=channel_secret,
channel_access_token=channel_access_token,
default_dialog_service=MyDialog, debug=True, prepare_table=True
)
with pytest.raises(LineBotApiError):
adapter.handle_event(MessageEvent.new_from_json_dict({
"replyToken": "<KEY>",
"type": "message",
"timestamp": 1462629479859,
"source": {
"type": "user",
"userId": "U4af4980629..."
},
"message": {
"id": "325708",
"type": "text",
"text": "Hello, world!"
}
}))
adapter.debug = False
with pytest.raises(LineBotApiError):
adapter.handle_event(MessageEvent.new_from_json_dict({
"replyToken": "<KEY>",
"type": "message",
"timestamp": 1462629479859,
"source": {
"type": "user",
"userId": "U4af4980629..."
},
"message": {
"id": "325708",
"type": "text",
"text": "Hello, world!"
}
}))
def test_handle_http_request():
request_data = '{"events":[{"type":"message","replyToken":"e60740718df849e396e93600254b28b5","source":{"userId":"U4bb389af09ad694ace414ce22d57ac0f","type":"user"},"timestamp":1569657170129,"message":{"type":"text","id":"10646756260763","text":"hello"}}],"destination":"U9e20741b688ed93c536adbe97acee31d"}'.encode(encoding="utf-8")
request_headers = {
"X-Line-Signature": "Kj+MIQWKb6gE/IO8c9+TydGF3o9qx8sjC1qiqiTfDao=",
"Content-Type": "application/json;charset=UTF-8",
"Content-Length": 288,
"Host": "host.name.local",
"Accept": "*/*",
"User-Agent": "LineBotWebhook/1.0",
"X-Forwarded-Proto": "https",
"X-Forwarded-For": "1.2.3.4"
}
# multi thread
adapter = LineAdapter(
channel_secret=channel_secret,
channel_access_token=channel_access_token,
default_dialog_service=MyDialog, prepare_table=True
)
response = adapter.handle_http_request(request_data, request_headers)
# error will not be handled because error occures in worker thread
assert response.messages[0].text == "done"
# main thread
adapter = LineAdapter(
channel_secret=channel_secret,
channel_access_token=channel_access_token,
threads=0,
default_dialog_service=MyDialog, prepare_table=True
)
response = adapter.handle_http_request(request_data, request_headers)
assert response.messages[0].text == "failure in parsing request"
# common error
adapter.parser = None
response = adapter.handle_http_request(request_data, request_headers)
assert response.messages[0].text == "failure in parsing request"
# signiture error
request_headers = {
"X-Line-Signature": "invalid_signiture",
| |
import collections
import torch
from ctools.pysc2.lib import features
from ctools.pysc2.lib.static_data import NUM_ACTIONS, ACTIONS_REORDER, UPGRADES_REORDER_INV
import numpy as np
Avail_fn = collections.namedtuple(
'Avail_fn', ['func_id', 'func_name', 'units', 'upgrade', 'resource', 'mana', 'supply']
)
FUNCTION_LIST = [
Avail_fn(0, "no_op", None, None, [], 0, 0),
Avail_fn(1, "Smart_pt", None, None, [], 0, 0),
Avail_fn(2, "Attack_pt", None, None, [], 0, 0),
Avail_fn(3, "Attack_unit", None, None, [], 0, 0),
Avail_fn(12, "Smart_unit", None, None, [], 0, 0),
Avail_fn(13, "Move_pt", None, None, [], 0, 0),
Avail_fn(14, "Move_unit", None, None, [], 0, 0),
Avail_fn(15, "Patrol_pt", None, None, [], 0, 0),
Avail_fn(16, "Patrol_unit", None, None, [], 0, 0),
Avail_fn(17, "HoldPosition_quick", None, None, [], 0, 0),
#Avail_fn(18, "Research_InterceptorGravitonCatapult_quick", [[64]], None, [150, 150], 0, 0),
#Avail_fn(19, "Research_PhoenixAnionPulseCrystals_quick", [[64]], None, [150, 150], 0, 0),
#Avail_fn(20, "Effect_GuardianShield_quick", [[77]], None, [], 75, 0),
#Avail_fn(21, "Train_Mothership_quick", [[59], [64]], None, [400, 400], 0, 0),
#Avail_fn(22, "Hallucination_Archon_quick", [[77]], None, [], 75, 0),
#Avail_fn(23, "Hallucination_Colossus_quick", [[77]], None, [], 75, 0),
#Avail_fn(24, "Hallucination_HighTemplar_quick", [[77]], None, [], 75, 0),
#Avail_fn(25, "Hallucination_Immortal_quick", [[77]], None, [], 75, 0),
#Avail_fn(26, "Hallucination_Phoenix_quick", [[77]], None, [], 75, 0),
#Avail_fn(27, "Hallucination_Probe_quick", [[77]], None, [], 75, 0),
#Avail_fn(28, "Hallucination_Stalker_quick", [[77]], None, [], 75, 0),
#Avail_fn(29, "Hallucination_VoidRay_quick", [[77]], None, [], 75, 0),
#Avail_fn(30, "Hallucination_WarpPrism_quick", [[77]], None, [], 75, 0),
#Avail_fn(31, "Hallucination_Zealot_quick", [[77]], None, [], 75, 0),
#Avail_fn(32, "Effect_GravitonBeam_unit", [[78]], None, [], 50, 0),
#Avail_fn(33, "Effect_ChronoBoost_unit", [[59]], None, [], 50, 0),
#Avail_fn(34, "Build_Nexus_pt", [[84]], None, [400, 0], 0, 0),
#Avail_fn(35, "Build_Pylon_pt", [[84]], None, [100, 0], 0, 0),
#Avail_fn(36, "Build_Assimilator_unit", [[84]], None, [25, 0], 0, 0),
#Avail_fn(37, "Build_Gateway_pt", [[84], [59]], None, [150, 0], 0, 0),
#Avail_fn(38, "Build_Forge_pt", [[84], [59]], None, [150, 0], 0, 0),
#Avail_fn(39, "Build_FleetBeacon_pt", [[84], [67]], None, [300, 200], 0, 0),
#Avail_fn(40, "Build_TwilightCouncil_pt", [[84], [72]], None, [150, 100], 0, 0),
#Avail_fn(41, "Build_PhotonCannon_pt", [[84], [63]], None, [150, 0], 0, 0),
#Avail_fn(42, "Build_Stargate_pt", [[84], [72]], None, [150, 150], 0, 0),
#Avail_fn(43, "Build_TemplarArchive_pt", [[84], [65]], None, [150, 200], 0, 0),
#Avail_fn(44, "Build_DarkShrine_pt", [[84], [65]], None, [150, 150], 0, 0),
#Avail_fn(45, "Build_RoboticsBay_pt", [[84], [71]], None, [150, 150], 0, 0),
#Avail_fn(46, "Build_RoboticsFacility_pt", [[84], [72]], None, [200, 100], 0, 0),
#Avail_fn(47, "Build_CyberneticsCore_pt", [[84], [62, 133]], None, [150, 0], 0, 0),
#Avail_fn(48, "Build_ShieldBattery_pt", [[84], [72]], None, [100, 0], 0, 0),
#Avail_fn(49, "Train_Zealot_quick", [[62]], None, [100, 0], 0, 0),
#Avail_fn(50, "Train_Stalker_quick", [[62], [72]], None, [125, 50], 0, 0),
#Avail_fn(51, "Train_HighTemplar_quick", [[62], [68]], None, [50, 150], 0, 0),
#Avail_fn(52, "Train_DarkTemplar_quick", [[62], [69]], None, [125, 125], 0, 0),
#Avail_fn(53, "Train_Sentry_quick", [[62], [72]], None, [50, 100], 0, 0),
#Avail_fn(54, "Train_Adept_quick", [[62], [72]], None, [100, 25], 0, 0),
#Avail_fn(55, "Train_Phoenix_quick", [[67]], None, [150, 100], 0, 0),
#Avail_fn(56, "Train_Carrier_quick", [[67], [64]], None, [350, 250], 0, 0),
#Avail_fn(57, "Train_VoidRay_quick", [[67]], None, [250, 150], 0, 0),
#Avail_fn(58, "Train_Oracle_quick", [[67]], None, [150, 150], 0, 0),
#Avail_fn(59, "Train_Tempest_quick", [[67], [64]], None, [250, 175], 0, 0),
#Avail_fn(60, "Train_WarpPrism_quick", [[71]], None, [250, 0], 0, 0),
#Avail_fn(61, "Train_Observer_quick", [[71]], None, [25, 75], 0, 0),
#Avail_fn(62, "Train_Colossus_quick", [[71], [70]], None, [300, 200], 0, 0),
#Avail_fn(63, "Train_Immortal_quick", [[71]], None, [275, 100], 0, 0),
#Avail_fn(64, "Train_Probe_quick", [[59]], None, [50, 0], 0, 0),
#Avail_fn(65, "Effect_PsiStorm_pt", [[75]], 52, [], 75, 0),
#Avail_fn(66, "Build_Interceptors_quick", [[79]], None, [15, 0], 0, 0),
#Avail_fn(67, "Research_GraviticBooster_quick", [[70]], None, [100, 100], 0, 0),
#Avail_fn(68, "Research_GraviticDrive_quick", [[70]], None, [100, 100], 0, 0),
#Avail_fn(69, "Research_ExtendedThermalLance_quick", [[70]], None, [150, 150], 0, 0),
#Avail_fn(70, "Research_PsiStorm_quick", [[68]], None, [200, 200], 0, 0),
#Avail_fn(71, "TrainWarp_Zealot_pt", [[133]], None, [100, 0], 0, 2),
#Avail_fn(72, "TrainWarp_Stalker_pt", [[133]], None, [125, 50], 0, 2),
#Avail_fn(73, "TrainWarp_HighTemplar_pt", [[133]], None, [50, 150], 0, 2),
#Avail_fn(74, "TrainWarp_DarkTemplar_pt", [[133]], None, [125, 125], 0, 2),
#Avail_fn(75, "TrainWarp_Sentry_pt", [[133]], None, [50, 100], 0, 2),
#Avail_fn(76, "TrainWarp_Adept_pt", [[133]], None, [100, 25], 0, 2),
#Avail_fn(77, "Morph_WarpGate_quick", [[62]], 84, [], 0, 0),
#Avail_fn(78, "Morph_Gateway_quick", [[133]], None, [], 0, 0),
#Avail_fn(79, "Effect_ForceField_pt", [[77]], None, [], 50, 0),
#Avail_fn(80, "Morph_WarpPrismPhasingMode_quick", [[81]], None, [], 0, 0),
#Avail_fn(81, "Morph_WarpPrismTransportMode_quick", [[136]], None, [], 0, 0),
#Avail_fn(82, "Research_WarpGate_quick", [[72]], None, [50, 50], 0, 0),
#Avail_fn(83, "Research_Charge_quick", [[65]], None, [100, 100], 0, 0),
#Avail_fn(84, "Research_Blink_quick", [[65]], None, [150, 150], 0, 0),
#Avail_fn(85, "Research_AdeptResonatingGlaives_quick", [[65]], None, [100, 100], 0, 0),
#Avail_fn(86, "Morph_Archon_quick", [[75, 76]], None, [], 0, 0),
Avail_fn(87, "Behavior_BuildingAttackOn_quick", [[9]], None, [], 0, 0),
Avail_fn(88, "Behavior_BuildingAttackOff_quick", [[9]], None, [], 0, 0),
#Avail_fn(89, "Hallucination_Oracle_quick", [[77]], None, [], 75, 0),
#Avail_fn(90, "Effect_OracleRevelation_pt", [[495]], None, [], 50, 0),
#Avail_fn(91, "Effect_ImmortalBarrier_quick", [[83]], None, [], 0, 0),
#Avail_fn(92, "Hallucination_Disruptor_quick", [[77]], None, [], 75, 0),
#Avail_fn(93, "Hallucination_Adept_quick", [[77]], None, [], 75, 0),
#Avail_fn(94, "Effect_VoidRayPrismaticAlignment_quick", [[80]], None, [], 0, 0),
#Avail_fn(95, "Build_StasisTrap_pt", [[495]], None, [], 50, 0),
#Avail_fn(96, "Effect_AdeptPhaseShift_pt", [[311]], None, [], 0, 0),
#Avail_fn(97, "Research_ShadowStrike_quick", [[69]], None, [100, 100], 0, 0),
Avail_fn(98, "Cancel_quick", None, None, [], 0, 0),
Avail_fn(99, "Halt_quick", None, None, [], 0, 0),
Avail_fn(100, "UnloadAll_quick", [[81, 136, 24, 18, 54, 142, 95, 893]], None, [], 0, 0),
Avail_fn(101, "Stop_quick", None, None, [], 0, 0),
Avail_fn(102, "Harvest_Gather_unit", None, None, [], 0, 0),
Avail_fn(103, "Harvest_Return_quick", None, None, [], 0, 0),
Avail_fn(104, "Load_unit", [[81, 136, 24, 18, 54, 142, 95, 893]], None, [], 0, 0),
Avail_fn(105, "UnloadAllAt_pt", [[81, 136, 24, 18, 54, 142, 95, 893]], None, [], 0, 0),
Avail_fn(106, "Rally_Units_pt", None, None, [], 0, 0),
Avail_fn(107, "Rally_Units_unit", None, None, [], 0, 0),
#Avail_fn(108, "Effect_Repair_pt", [[45, 268]], None, [], 0, 0),
#Avail_fn(109, "Effect_Repair_unit", [[45, 268]], None, [], 0, 0),
#Avail_fn(110, "Effect_MassRecall_pt", [[59, 488]], None, [], 50, 0),
#Avail_fn(111, "Effect_Blink_pt", [[74]], 87, [], 0, 0),
#Avail_fn(112, "Effect_Blink_unit", [[74]], 87, [], 0, 0),
Avail_fn(114, "Rally_Workers_pt", None, None, [], 0, 0),
Avail_fn(115, "Rally_Workers_unit", None, None, [], 0, 0),
#Avail_fn(116, "Research_ProtossAirArmor_quick", [[72]], None, [150, 150], 0, 0),
#Avail_fn(117, "Research_ProtossAirWeapons_quick", [[72]], None, [100, 100], 0, 0),
#Avail_fn(118, "Research_ProtossGroundArmor_quick", [[63]], None, [100, 100], 0, 0),
#Avail_fn(119, "Research_ProtossGroundWeapons_quick", [[63]], None, [100, 100], 0, 0),
#Avail_fn(120, "Research_ProtossShields_quick", [[63]], None, [150, 150], 0, 0),
# Avail_fn(121, "Morph_ObserverMode_quick", [[1911]], None, [], 0, 0),
# Avail_fn(122, "Effect_ChronoBoostEnergyCost_unit", [[59]], None, [], 50, 0),
Avail_fn(129, "Cancel_Last_quick", None, None, [], 0, 0),
# Avail_fn(157, "Effect_Feedback_unit", [[75]], None, [], 50, 0),
# Avail_fn(158, "Behavior_PulsarBeamOff_quick", [[495]], None, [], 0, 0),
# Avail_fn(159, "Behavior_PulsarBeamOn_quick", [[495]], None, [], 25, 0),
Avail_fn(160, "Morph_SurveillanceMode_quick", [[82]], None, [], 0, 0),
Avail_fn(161, "Effect_Restore_unit", [[1910]], None, [], 0, 0),
Avail_fn(164, "UnloadAllAt_unit", [[81, 136, 24, 18, 54, 142, 95, 893]], None, [], 0, 0),
# Avail_fn(166, "Train_Disruptor_quick", [[70]], None, [150, 150], 0, 0),
# Avail_fn(167, "Effect_PurificationNova_pt", [[694]], None, [], 0, 0),
# Avail_fn(168, "raw_move_camera", None, None, [], 0, 0),
# Avail_fn(169, "Behavior_CloakOff_quick", [[50, 55, 144]], None, [], 0, 0),
# Avail_fn(172, "Behavior_CloakOn_quick", [[50, 55, 144]], None, [], 25, 0),
Avail_fn(175, "Behavior_GenerateCreepOff_quick", [[100, 101]], None, [], 0, 0),
Avail_fn(176, "Behavior_GenerateCreepOn_quick", [[100, 101]], None, [], 0, 0),
Avail_fn(177, "Behavior_HoldFireOff_quick", [[503, 50, 144]], None, [], 0, 0),
Avail_fn(180, "Behavior_HoldFireOn_quick", [[503, 50, 144]], None, [], 0, 0),
# Avail_fn(183, "Build_Armory_pt", [[45], [27, 43]], None, [150, 100], 0, 0),
Avail_fn(184, "Build_BanelingNest_pt", [[89], [104]], None, [100, 50], 0, 0),
# Avail_fn(185, "Build_Barracks_pt", [[45], [19, 47]], None, [150, 0], 0, 0),
# Avail_fn(186, "Build_Bunker_pt", [[45], [21, 46]], None, [100, 0], 0, 0),
# Avail_fn(187, "Build_CommandCenter_pt", [[45]], None, [400, 0], 0, 0),
Avail_fn(188, "Build_CreepTumor_pt", [[87, 126, 137]], None, [], 0, 0),
# Avail_fn(191, "Build_EngineeringBay_pt", [[45], [18, 36, 132, 134, 130]], None, [125, 0], 0, 0),
Avail_fn(192, "Build_EvolutionChamber_pt", [[86, 100, 101], [104]], None, [75, 0], 0, 0),
Avail_fn(193, "Build_Extractor_unit", [[104]], None, [25, 0], 0, 0),
# Avail_fn(194, "Build_Factory_pt", [[21, 46], [45]], None, [150, 100], 0, 0),
# Avail_fn(195, "Build_FusionCore_pt", [[28, 44], [45]], None, [150, 150], 0, 0),
# Avail_fn(196, "Build_GhostAcademy_pt", [[21, 46], [45]], None, [150, 50], 0, 0),
Avail_fn(197, "Build_Hatchery_pt", [[104]], None, [300, 0], 0, 0),
Avail_fn(198, "Build_HydraliskDen_pt", [[100, 101], [104]], None, [100, 100], 0, 0),
Avail_fn(199, "Build_InfestationPit_pt", [[100, 101], [104]], None, [100, 100], 0, 0),
Avail_fn(200, "Build_Interceptors_autocast", [[79]], None, [], 0, 0),
Avail_fn(201, "Build_LurkerDen_pt", [[91], [104], [100, 101]], None, [100, 150], 0, 0),
# Avail_fn(202, "Build_MissileTurret_pt", [[45], [22]], None, [100, 0], 0, 0),
# Avail_fn(203, "Build_Nuke_quick", [[26]], None, [100, 100], 0, 0),
Avail_fn(204, "Build_NydusNetwork_pt", [[100, 101], [104]], None, [150, 150], 0, 0),
Avail_fn(205, "Build_NydusWorm_pt", [[95]], None, [50, 50], 0, 0),
# Avail_fn(206, "Build_Reactor_quick", [[21, 46, | |
# -*- coding: ascii -*-
# $Id: CNCEditor.py,v 1.9 2014/10/15 15:04:38 bnv Exp $
#
# Author: <EMAIL>
# Date: 24-Aug-2014
from __future__ import print_function
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
try:
import cPickle as pickle
except ImportError:
import pickle
try:
from Tkinter import *
import tkFont
except ImportError:
from tkinter import *
import tkinter.font as tkFont
from CNC import Block, CNC
import tkExtra
import re
#import tkDialogs
BLOCK_COLOR = "LightYellow"
COMMENT_COLOR = "Blue"
DISABLE_COLOR = "Gray"
from CNCCanvas import TAB_COLOR
MAXINT = 1000000000 # python3 doesn't have maxint
#==============================================================================
# CNC Listbox
#==============================================================================
class CNCListbox(Listbox):
def __init__(self, master, app, *kw, **kwargs):
Listbox.__init__(self, master, *kw, **kwargs)
self.bind("<Button-1>", self.button1)
self.bind("<ButtonRelease-1>", self.release1)
self.bind("<Double-1>", self.double)
self.bind("<Return>", self.edit)
self.bind("<KP_Enter>", self.edit)
self.bind("<Insert>", self.insertItem)
self.bind("<Control-Key-Return>",self.insertItem)
self.bind("<Control-Key-space>",self.commandFocus)
self.bind("<Left>", self.toggleKey)
self.bind("<Right>", self.toggleKey)
self.bind("<Control-Key-d>", self.clone)
self.bind("<Control-Key-Up>", self.orderUp)
self.bind("<Control-Key-Prior>",self.orderUp)
self.bind("<Control-Key-Down>", self.orderDown)
self.bind("<Control-Key-Next>", self.orderDown)
self.bind("<Control-Key-p>", lambda e : "break")
self.bind("<Control-Key-n>", lambda e : "break")
self.bind("<Control-Key-D>", self.dump)
self.bind("<Delete>", self.deleteBlock)
self.bind("<BackSpace>", self.deleteBlock)
try:
self.bind("<KP_Delete>",self.deleteBlock)
except:
pass
self.bind("<Control-Key-b>", self.insertBlock)
self.bind("<Control-Key-r>", self.fill)
self._blockPos = [] # listbox position of each block
self._items = [] # each listbox lien which item (bid,lid) shows
self.app = app
self.gcode = app.gcode
self.font = tkFont.nametofont(self.cget("font"))
self._ystart = 0
self._double = False # double clicked handled
self._hadfocus = False
self.filter = None
# ----------------------------------------------------------------------
def commandFocus(self, event=None):
self.app.commandFocus(event)
return "break"
# ----------------------------------------------------------------------
# Change the value of a list item
# and return the value of the old one
# ----------------------------------------------------------------------
def set(self, index, value):
"""Set/Change the value of a list item"""
try:
sel = self.selection_includes(index)
act = self.index(ACTIVE)
self.delete(index)
except TclError:
return
self.insert(index, value)
if sel: self.selection_set(index)
self.activate(act)
# ----------------------------------------------------------------------
# Fill listbox with enable items
# ----------------------------------------------------------------------
def fill(self, event=None):
ypos = self.yview()[0]
act = self.index(ACTIVE)
#sel = self.curselection()
items = self.getSelection()
self.delete(0,END)
del self._blockPos[:]
del self._items[:]
y = 0
for bi,block in enumerate(self.gcode.blocks):
if self.filter is not None:
if not (self.filter in block.name() or \
self.filter=="enable" and block.enable or
self.filter=="disable" and not block.enable):
self._blockPos.append(None)
continue
self._blockPos.append(y)
self.insert(END, block.header())
self._items.append((bi, None))
self.itemconfig(END, background=BLOCK_COLOR)
y += 1
if not block.enable:
self.itemconfig(END, foreground=DISABLE_COLOR)
if not block.expand: continue
for lj,line in enumerate(block):
self.insert(END, line)
y += 1
if line and line[0] in ("(","%"):
self.itemconfig(END, foreground=COMMENT_COLOR)
self._items.append((bi, lj))
self.select(items)
#for i in sel: self.selection_set(i)
self.yview_moveto(ypos)
self.activate(act)
self.see(act)
# ----------------------------------------------------------------------
# Copy selected items to clipboard
# ----------------------------------------------------------------------
def copy(self, event=None):
sio = StringIO()
pickler = pickle.Pickler(sio)
#sio.write(_PLOT_CLIP)
for block,line in self.getCleanSelection():
if line is None:
pickler.dump(self.gcode.blocks[block].dump())
else:
pickler.dump(self.gcode.blocks[block][line])
self.clipboard_clear()
self.clipboard_append(sio.getvalue())
return "break"
# ----------------------------------------------------------------------
def cut(self, event=None):
self.copy()
self.deleteBlock()
return "break"
# ----------------------------------------------------------------------
def paste(self, event=None):
try: clipboard = self.selection_get(selection='CLIPBOARD')
except: return
ypos = self.yview()[0]
# paste them after the last selected item
# bid,lid push them to self so it can be accessed from addLines()
# python3 might fix this with the inner scope
try:
self._bid, self._lid = self._items[self.curselection()[-1]]
except:
try:
self._bid, self._lid = self._items[-1]
except:
self._bid = 0
self._lid = None
selitems = []
undoinfo = []
def addLines(lines):
for line in lines.splitlines():
# Create a new block
if self._lid is None:
self._bid += 1
if self._bid > len(self.gcode.blocks):
self._bid = len(self.gcode.blocks)
self._lid = MAXINT
block = Block()
undoinfo.append(self.gcode.addBlockUndo(self._bid,block))
selitems.append((self._bid, None))
else:
block = self.gcode.blocks[self._bid]
if self._lid == MAXINT:
self._lid = len(block)
selitems.append((self._bid, len(block)))
else:
self._lid += 1
selitems.append((self._bid, self._lid))
undoinfo.append(self.gcode.insLineUndo(self._bid, self._lid, line))
try:
# try to unpickle it
unpickler = pickle.Unpickler(StringIO(clipboard))
try:
while True:
obj = unpickler.load()
if isinstance(obj,tuple):
block = Block.load(obj)
self._bid += 1
undoinfo.append(self.gcode.addBlockUndo(self._bid, block))
selitems.append((self._bid,None))
self._lid = None
else:
addLines(obj)
except EOFError:
pass
except pickle.UnpicklingError:
# Paste as text
addLines(clipboard)
if not undoinfo: return
self.gcode.addUndo(undoinfo)
self.selection_clear(0,END)
self.fill()
self.yview_moveto(ypos)
self.select(selitems, clear=True)
#self.selection_set(ACTIVE)
#self.see(ACTIVE)
self.winfo_toplevel().event_generate("<<Modified>>")
# ----------------------------------------------------------------------
# Clone selected blocks
# ----------------------------------------------------------------------
def clone(self, event=None):
sel = list(map(int,self.curselection()))
if not sel: return
ypos = self.yview()[0]
undoinfo = []
self.selection_clear(0,END)
pos = self._items[sel[-1]][0]+1
blocks = []
for i in reversed(sel):
bid, lid = self._items[i]
if lid is None:
undoinfo.append(self.gcode.cloneBlockUndo(bid, pos))
for i in range(len(blocks)): blocks[i] += 1
blocks.append(pos)
else:
undoinfo.append(self.gcode.cloneLineUndo(bid, lid))
self.gcode.addUndo(undoinfo)
self.fill()
self.yview_moveto(ypos)
if blocks:
self.selectBlocks(blocks)
self.activate(self._blockPos[blocks[-1]])
else:
self.selection_set(ACTIVE)
self.see(ACTIVE)
self.winfo_toplevel().event_generate("<<Modified>>")
return "break"
# ----------------------------------------------------------------------
# Delete selected blocks of code
# ----------------------------------------------------------------------
def deleteBlock(self, event=None):
sel = list(map(int,self.curselection()))
if not sel: return
ypos = self.yview()[0]
undoinfo = []
for i in reversed(sel):
bid, lid = self._items[i]
if isinstance(lid,int):
undoinfo.append(self.gcode.delLineUndo(bid, lid))
else:
undoinfo.append(self.gcode.delBlockUndo(bid))
self.gcode.addUndo(undoinfo)
self.selection_clear(0,END)
self.fill()
self.yview_moveto(ypos)
self.selection_set(ACTIVE)
self.see(ACTIVE)
self.winfo_toplevel().event_generate("<<Modified>>")
# ----------------------------------------------------------------------
# Edit active item
# ----------------------------------------------------------------------
def edit(self, event=None):
active = self.index(ACTIVE)
txt = self.get(active)
if event:
x = event.x
else:
x = 0
ypos = self.yview()[0]
bid, lid = self._items[active]
if lid is None:
txt0 = txt
txt = self.gcode[bid].name()
self.set(active, txt)
edit = tkExtra.InPlaceEdit(self, select=False, bg=self.cget("bg"))
else:
edit = tkExtra.InPlaceEdit(self,x=x, select=False, bg=self.cget("bg"))
if edit.value is None or edit.value==txt:
if lid is None:
self.set(active,txt0)
self.itemconfig(active, background=BLOCK_COLOR)
if not self.gcode[bid].enable:
self.itemconfig(active, foreground=DISABLE_COLOR)
return
if isinstance(lid,int):
self.gcode.addUndo(self.gcode.setLineUndo(bid, lid, edit.value))
self.set(active, edit.value)
if edit.value and edit.value[0] in ("(","%"):
self.itemconfig(active, foreground=COMMENT_COLOR)
else:
self.gcode.addUndo(self.gcode.setBlockNameUndo(bid, edit.value))
self.set(active, self.gcode[bid].header())
self.itemconfig(active, background=BLOCK_COLOR)
if not self.gcode[bid].enable:
self.itemconfig(active, foreground=DISABLE_COLOR)
self.yview_moveto(ypos)
self.winfo_toplevel().event_generate("<<Modified>>")
# ----------------------------------------------------------------------
# return active block id
# ----------------------------------------------------------------------
def activeBlock(self):
active = self.index(ACTIVE)
if self._items:
bid, lid = self._items[active]
else:
bid = 0
return bid
# ----------------------------------------------------------------------
# Insert a line or a block
# ----------------------------------------------------------------------
def insertItem(self, event=None):
active = self.index(ACTIVE)
if active is None: return
if len(self._items)==0 or self._items[active][1] is None:
self.insertBlock()
else:
self.insertLine()
# ----------------------------------------------------------------------
# Insert New Block
# ----------------------------------------------------------------------
def insertBlock(self, event=None):
active = self.index(ACTIVE)
if self._items:
bid, lid = self._items[active]
bid += 1
else:
bid = 0
block = Block()
block.expand = True
block.append("g0 x0 y0")
block.append("g1 z0")
block.append(CNC.zsafe())
self.gcode.addUndo(self.gcode.addBlockUndo(bid,block))
self.selection_clear(0,END)
self.fill()
# find location of new block
while active < self.size():
if self._items[active][0] == bid:
break
active += 1
self.selection_set(active)
self.see(active)
self.activate(active)
self.edit()
self.winfo_toplevel().event_generate("<<Modified>>")
# ----------------------------------------------------------------------
# Insert a new line below cursor
# ----------------------------------------------------------------------
def insertLine(self, event=None):
active = self.index(ACTIVE)
if active is None: return
if len(self._items)==0:
self.insertBlock()
return
bid, lid = self._items[active]
active += 1
self.insert(active,"")
self.selection_clear(0,END)
self.activate(active)
self.selection_set(active)
self.see(active)
edit = tkExtra.InPlaceEdit(self, bg=self.cget("bg"))
ypos = self.yview()[0]
self.delete(active)
if edit.value is None:
# Cancel and leave
active -= 1
self.activate(active)
self.selection_set(active)
self.see(active)
return
self.insert(active, edit.value)
self.selection_set(active)
self.activate(active)
if edit.value and edit.value[0] in ("(","%"):
self.itemconfig(active, foreground=COMMENT_COLOR)
self.yview_moveto(ypos)
# Add line into code
# Correct pointers
if lid is None:
lid = 0
else:
lid += 1
self.gcode.addUndo(self.gcode.insLineUndo(bid, lid, edit.value))
self._items.insert(active, (bid, lid))
for i in range(active+1,len(self._items)):
b,l = self._items[i]
if b != bid: break
if isinstance(l,int):
self._items[i] = (b,l+1)
for i in range(bid+1, len(self._blockPos)):
if self._blockPos[i] is not None:
self._blockPos[i] += 1 # shift all blocks below by one
self.winfo_toplevel().event_generate("<<Modified>>")
# ----------------------------------------------------------------------
def toggleKey(self,event=None):
if not self._items: return
active = self.index(ACTIVE)
bid,lid = self._items[active]
if lid is None:
self.toggleExpand()
else:
# Go to header
self.selection_clear(0,END)
self.activate(self._blockPos[bid])
self.selection_set(ACTIVE)
self.see(ACTIVE)
self.winfo_toplevel().event_generate("<<ListboxSelect>>")
# ----------------------------------------------------------------------
# Button1 clicked
# ----------------------------------------------------------------------
def button1(self, event):
if self._double: return
# Remember if we had the focus before clicking
# to be used later in editing
self._hadfocus = self.focus_get() == self
# from a single click
self._ystart = self.nearest(event.y)
selected = self.selection_includes(self._ystart)
loc = self._headerLocation(event)
if loc is None:
pass
elif self._headerLocation(event)<2 and selected:
return "break" # do not alter selection!
# ----------------------------------------------------------------------
# Release button-1. Warning on separation of double or single click or
# click and drag
# ----------------------------------------------------------------------
def release1(self, event):
if not self._items: return
if self._double:
self._double = False
return
self._double = False
active = self.index(ACTIVE)
# from a single click
y = self.nearest(event.y)
self.activate(y)
if y != self._ystart: return
loc = self._headerLocation(event)
if loc is None:
# Normal line
if active==y:
# In place edit if we had already the focus
if self._hadfocus:
self.edit(event)
elif loc == 0:
self.toggleExpand()
elif loc == 1:
self.toggleEnable()
return "break"
# ----------------------------------------------------------------------
def double(self, event):
if self._headerLocation(event) == 2:
self.edit()
self._double = True
else:
self._double = False
# ----------------------------------------------------------------------
# Return location where we clicked on header
# 0 = expand arrow
# 1 = enable ballot box
# 2 = name
# ----------------------------------------------------------------------
def _headerLocation(self, event):
if not self._items: return None
# from a single click
y = self.nearest(event.y)
block,line = self._items[y]
if line is not None: return None
txt = self.get(y)
if event.x <= self.font.measure(txt[:2]):
return 0
elif event.x <= self.font.measure(txt[:5]):
return 1
else:
return 2
# ----------------------------------------------------------------------
# Toggle expand selection
# ----------------------------------------------------------------------
def toggleExpand(self, event=None):
if not self._items: return None
items = list(map(int,self.curselection()))
expand = None
active = self.index(ACTIVE)
bactive,lactive = self._items[active]
blocks = []
undoinfo = []
for i in reversed(items):
bid,lid = self._items[i]
if lid is not None:
if bid in blocks: continue
blocks.append(bid)
if expand is None: expand = not self.gcode[bid].expand
undoinfo.append(self.gcode.setBlockExpandUndo(bid, expand))
if undoinfo:
self.gcode.addUndo(undoinfo)
self.selection_clear(0,END)
self.fill()
active = self._blockPos[bactive]
for bid in blocks:
self.selectBlock(bid)
self.activate(active)
self.see(active)
self.winfo_toplevel().event_generate("<<Status>>",data="Toggled Expand of selected objects")
# ----------------------------------------------------------------------
def _toggleEnable(self, enable=None):
if not self._items: return None
items = list(map(int,self.curselection()))
active = self.index(ACTIVE)
ypos = self.yview()[0]
undoinfo = []
blocks = []
for i in items:
bid,lid = self._items[i]
if lid is not None:
if bid in blocks: continue
pos = self._blockPos[bid]
else:
pos = i
blocks.append(bid)
block = self.gcode[bid]
if block.name() in ("Header", "Footer"): continue
if enable is None: enable = not block.enable
undoinfo.append(self.gcode.setBlockEnableUndo(bid, enable))
sel = self.selection_includes(pos)
self.delete(pos)
self.insert(pos, block.header())
self.itemconfig(pos, background=BLOCK_COLOR)
if not block.enable:
self.itemconfig(pos, foreground=DISABLE_COLOR)
if sel: self.selection_set(pos)
if undoinfo:
self.gcode.calculateEnableMargins()
self.gcode.addUndo(undoinfo)
self.activate(active)
self.yview_moveto(ypos)
self.winfo_toplevel().event_generate("<<ListboxSelect>>")
# ----------------------------------------------------------------------
def enable(self, event=None):
self._toggleEnable(True)
self.winfo_toplevel().event_generate("<<Status>>",data="Enabled selected objects")
# ----------------------------------------------------------------------
def disable(self, event=None):
self._toggleEnable(False)
self.winfo_toplevel().event_generate("<<Status>>",data="Disabled selected objects")
# ----------------------------------------------------------------------
# toggle state enable/disable
# ----------------------------------------------------------------------
def toggleEnable(self, event=None):
self._toggleEnable()
self.winfo_toplevel().event_generate("<<Status>>",data="Toggled Visibility of selected objects")
# ----------------------------------------------------------------------
# comment uncomment row
# ----------------------------------------------------------------------
def commentRow(self, event=None):
if not self._items: return
all_items = self._items
sel_items = list(map(int,self.curselection()))
mreg = re.compile("^\((.*)\)$")
change = False
for i in sel_items:
my_item = all_items[i]
if my_item[1] is not None:
change = True
#check for ()
line = self.gcode[my_item[0]][my_item[1]]
m = mreg.search(line)
if m is None:
self.gcode[my_item[0]][my_item[1]] = "("+line+")"
else:
self.gcode[my_item[0]][my_item[1]] = m.group(1)
if change: self.fill()
# ----------------------------------------------------------------------
# splitBlocks
# ----------------------------------------------------------------------
def joinBlocks(self, event=None):
if not self._items: return
all_items = self._items
sel_items = list(map(int,self.curselection()))
change = True
bl = Block(self.gcode[sel_items[0]].name())
for bid in sel_items:
for line in self.gcode[bid]:
bl.append(line)
bl.append("( ---------- cut-here ---------- )")
del bl[-1]
self.gcode.addUndo(self.gcode.addBlockUndo(bid+1,bl))
if change: self.fill()
self.deleteBlock()
self.winfo_toplevel().event_generate("<<Modified>>")
# ----------------------------------------------------------------------
# splitBlocks
# ----------------------------------------------------------------------
def splitBlocks(self, event=None):
if not self._items: return
all_items = self._items
sel_items = list(map(int,self.curselection()))
change = True
newblocks = []
for bid in sel_items:
bl = Block(self.gcode[bid].name())
for line in self.gcode[bid]:
if line == "( ---------- cut-here ---------- )":
#newblocks.append(bl)
#self.insertBlock(bl)
self.gcode.addUndo(self.gcode.addBlockUndo(bid+1,bl))
bl = Block(self.gcode[bid].name())
else:
bl.append(line)
self.gcode.addUndo(self.gcode.addBlockUndo(bid+1,bl))
#newblocks.append(bl)
#self.gcode.extend(newblocks)
if change: self.fill()
self.deleteBlock()
self.winfo_toplevel().event_generate("<<Modified>>")
# ----------------------------------------------------------------------
# change color of a block
# ----------------------------------------------------------------------
def changeColor(self, event=None):
items = list(map(int,self.curselection()))
if not items:
self.winfo_toplevel().event_generate("<<Status>>",data="Nothing is selected")
return
# Find initial color
bid,lid = self._items[items[0]]
try:
rgb, color = tkExtra.askcolor(
title=_("Color"),
initialcolor=self.gcode[bid].color,
parent=self)
except TclError:
color = None
if color is None: return
blocks = []
undoinfo = []
for i in reversed(items):
bid,lid = self._items[i]
if lid is not None:
if bid in blocks: continue
blocks.append(bid)
oldColor = self.gcode[bid].color
undoinfo.append(self.gcode.setBlockColorUndo(bid, oldColor))
if undoinfo:
self.gcode.addUndo(undoinfo)
for bid in blocks:
self.gcode[bid].color = color
self.winfo_toplevel().event_generate("<<Modified>>")
self.winfo_toplevel().event_generate("<<Status>>",data="Changed color of block")
# ----------------------------------------------------------------------
# Select items in the form of (block, item)
# ----------------------------------------------------------------------
def select(self, items, double=False, clear=False, toggle=True):
if clear:
self.selection_clear(0,END)
toggle = False
first = None
for bi in items:
bid,lid = bi
try:
block = self.gcode[bid]
except:
continue
if double:
if block.expand:
# select whole block
y = self._blockPos[bid]
else:
# select all blocks with the same name
name = block.nameNop()
for i,bl in enumerate(self.gcode.blocks):
if name == bl.nameNop():
self.selection_set(self._blockPos[i])
continue
elif not block.expand or lid is None:
# select whole block
y = self._blockPos[bid]
elif isinstance(lid,int):
# find line of block
y = self._blockPos[bid]+1 + lid
else:
raise
#continue
if y is None: continue
if toggle:
select = not self.selection_includes(y)
else:
select = True
if select:
self.selection_set(y)
if first is None: first = y
elif toggle:
self.selection_clear(y)
if first is not None:
self.activate(first)
self.see(first)
# ----------------------------------------------------------------------
# Select whole block lines if expanded
# ----------------------------------------------------------------------
def selectBlock(self, bid):
start = self._blockPos[bid]
while True:
bid += 1
if bid >= len(self._blockPos):
end = END
break
elif self._blockPos[bid] is not None:
end = self._blockPos[bid]-1
break
self.selection_set(start,end)
# ----------------------------------------------------------------------
def selectBlocks(self, blocks):
self.selection_clear(0,END)
for bid in blocks:
self.selectBlock(bid)
# ----------------------------------------------------------------------
def selectAll(self):
self.selection_set(0,END)
# ----------------------------------------------------------------------
def selectClear(self):
self.selection_clear(0,END)
# ----------------------------------------------------------------------
def selectInvert(self):
for i in range(self.size()):
if self.selection_includes(i):
self.selection_clear(i)
else:
self.selection_set(i)
# ----------------------------------------------------------------------
# Select all blocks with the same name of the selected | |
isRank):
'''
Update the entire row of locations on the sheets, where:
ports_row: list of length 6 with strings denoting the value for each cell.
timestamp: string representing the time for in the time cell.
name: the name of the editor.
isRank: bollean value that denotes whether or not the editor is a rank.
'''
agc = await agcm.authorize()
ss = await agc.open(config['sheetName'])
sheet = await ss.worksheet('Home')
cell_list = [gspread.models.Cell(21, i+1, value=val) for i, val in enumerate(ports_row)]
mobile_cell_list = [gspread.models.Cell(32+i, 2, value=val) for i, val in enumerate(ports_row)]
await sheet.update_cells(cell_list, nowait=True)
await sheet.update_cells(mobile_cell_list, nowait=True)
await sheet.update_cell(22, 3, timestamp, nowait=True) # update time cell
if isRank:
await sheet.update_cell(22, 5, name, nowait=True) # update editor name
await sheet.update_cell(39, 2, name, nowait=True) # update mobile editor name
async def add_activity(agcm, name, date, sheet_activity=False):
'''
Note a player as active for a given date
'''
agc = await agcm.authorize()
ss = await agc.open(config['adminSheetName'])
sheet = await ss.worksheet('Rank Reports')
sheet_month_cell = await sheet.cell(3, 1)
sheet_month = sheet_month_cell.value
if sheet_month.upper() != date.strftime("%B").upper():
await write_error(agcm, name, date, f"Could not track {'fc' if not sheet_activity else 'sheet'} activity: month out of sync")
return
day = str(date.day)
ranks = await sheet.col_values(1)
for i, r in enumerate(ranks):
if r.upper() == name.upper():
row = i+1
if not sheet_activity:
dates = (await sheet.row_values(row) + [""]*100)[3:33]
else:
dates = (await sheet.row_values(row) + [""]*100)[34:64]
if not dates:
if not sheet_activity:
col = 4
else:
col = 35
await sheet.update_cell(row, col, day, nowait=True)
return
else:
for j, d in enumerate(dates):
if not sheet_activity:
col = j+1+3
else:
col = j+1+34
if d == day:
return
elif d == "":
await sheet.update_cell(row, col, day, nowait=True)
return
elif i == len(ranks) - 1:
await write_error(agcm, name, date, f"Could not track {'fc' if not sheet_activity else 'sheet'} activity: name not found")
return
async def write_error(agcm, name, date, msg):
'''
Write an error message to the error tab on the admin sheets.
'''
agc = await agcm.authorize()
ss = await agc.open(config['adminSheetName'])
sheet = await ss.worksheet('Errors')
values = [name, str(date), msg]
errors = await sheet.col_values(1)
for i, e in enumerate(errors):
if e == "":
row = i+1
cell_list = [gspread.models.Cell(row, col, value=values[col-1]) for col in range(1,4)]
await sheet.update_cells(cell_list, nowait=True)
return
elif i == len(errors)-1:
await sheet.insert_row(values, i+2)
async def getPortRow(agcm):
'''
Returns the current row of portable locations on the sheets.
'''
agc = await agcm.authorize()
ss = await agc.open(config['sheetName'])
sheet = await ss.worksheet('Home')
ports = await sheet.row_values(21)
ports = ports[:7]
return ports
def checkPorts(newPorts, ports):
'''
Checks the validity of a given set of new portable locations, given a set of current locations.
Returns a string with an error message, empty string if no error.
'''
for port in newPorts:
loc = port[1]
for world in port[0]:
if world < 1 or world > highestWorld:
return f'Sorry, **{str(world)}** is not a valid world. Please enter a number between 1 and 141.'
if world in forbiddenWorlds:
return f'Sorry, world **{str(world)}** is not called because it is either a pking world or a bounty hunter world, or it is not on the world list.'
for forbiddenLoc in forbiddenLocs:
if world == forbiddenLoc[0] and loc == forbiddenLoc[1]:
return f'Sorry, **{str(world)} {loc}** is a forbidden location.'
if loc == 'GE' and world not in f2pWorlds:
return 'Sorry, we only call the location **GE** in F2P worlds.'
portNames = []
count = 0
i = 0
for p in ports:
i += 1
for entry in p:
if loc != entry[1]:
continue
if world in entry[0]:
portNames.append(portablesNames[i-1])
count += 1
break
'''
if count >= 3 and not dxp_active:
msgPorts = ""
i = 0
for p in portNames:
i += 1
msgPorts += '**' + p + '**'
if i < len(portNames):
msgPorts += ", "
if i == len(portNames) - 1:
msgPorts += "and "
return f'Sorry, there cannot be more than 3 portables at the same location.\nThe location **{str(world)} {loc}** already has a {msgPorts}.'
'''
return ''
def get_port_type(input, channel=None):
if 'FL' in input or input.startswith('F'):
return ['fletcher', 1]
elif 'CR' in input or (input.startswith('C') and not (input.startswith('CA') or input.startswith('CW'))):
return ['crafter', 2]
elif 'BR' in input or (input.startswith('B') and not (input.startswith('BE') or input.startswith('BA') or input.startswith('BU'))):
return ['brazier', 3]
elif 'SAW' in input or 'MIL' in input or (input.startswith('M') and not (input.startswith('MG') or input.startswith('MEI'))) or input.startswith('S'):
return ['sawmill', 4]
elif 'RAN' in input or input.startswith('R'):
return ['range', 5]
elif 'WEL' in input or input.startswith('WE'):
return ['well', 6]
elif 'WOR' in input or 'BEN' in input or input.startswith('WO') or input.startswith('WB'):
return ['workbench', 7]
else:
if channel.id in portables_channel_ids:
return [portablesNames[portables_channel_ids.index(channel.id)].lower(), portables_channel_ids.index(channel.id) + 1]
return ['', -1]
last_ports = None
def get_last_ports():
return last_ports
def set_last_ports(ports):
global last_ports
last_ports = ports
def get_editors(credit):
'''
Get list of editor names from credit cell string
'''
separators = [',', '/', '&', '|', '+', ' - ']
names = split(credit, separators)
return names
def split(txt, seps):
# https://stackoverflow.com/questions/4697006/python-split-string-by-list-of-separators/4697047
default_sep = seps[0]
# we skip seps[0] because that's the default seperator
for sep in seps[1:]:
txt = txt.replace(sep, default_sep)
return [i.strip() for i in txt.split(default_sep)]
class Sheets(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.track_location_updates.start()
def cog_unload(self):
self.track_location_updates.cancel()
@tasks.loop(seconds=10)
async def track_location_updates(self):
'''
Loop to track location update activity
'''
try:
agc = await self.bot.agcm.authorize()
ss = await agc.open(config['sheetName'])
home = await ss.worksheet('Home')
last_ports = get_last_ports()
if last_ports is None:
last_ports = await home.range('A20:I22')
set_last_ports(last_ports)
return
ports = await home.range('A20:I22')
if not any(ports[i].value != l_p.value for i, l_p in enumerate(last_ports)):
return
else:
set_last_ports(ports)
top_row_old, mid_row_old, bot_row_old = last_ports[:9], last_ports[9:18], last_ports[18:]
top_row, mid_row, bot_row = ports[:9], ports[9:18], ports[18:]
role_ids = [config['fletcher_role'], config['crafter_role'], config['brazier_role'], config['sawmill_role'], config['range_role'], config['well_role'], config['workbench_role']]
port_server = self.bot.get_guild(config['portablesServer'])
if port_server:
roles = []
for role_id in role_ids:
role = port_server.get_role(role_id)
roles.append(role)
for i, cell in enumerate(mid_row[:7]):
old_cell = mid_row_old[i]
val = cell.value
old_val = old_cell.value
current_locs = getPorts(val)
old_locs = getPorts(old_val)
if only_f2p(old_locs):
if not only_f2p(current_locs):
role = roles[i]
if role:
channel_id = portables_channel_ids[i]
loc_channel = port_server.get_channel(channel_id)
if loc_channel:
await loc_channel.send(f'{role.mention} active at **{format(current_locs)}**')
except Exception as e:
error = f'Error encountered portable locations tracking: {e}'
print(error)
logging.critical(error)
try:
channel = self.get_channel(config['testChannel'])
await channel.send(error)
except:
pass
@commands.command(aliases=['box'])
async def boxes(self, ctx):
'''
Get portable bank deposit box locations.
Only available during DXP.
'''
addCommand()
if not dxp_active:
raise commands.CommandError(message='This command is only enabled during DXP.')
locChannel = self.bot.get_channel(config['locChannel'])
adminCommandsChannel = self.bot.get_channel(config['adminCommandsChannel'])
if ctx.guild == self.bot.get_guild(config['portablesServer']):
if ctx.channel != locChannel and ctx.channel != adminCommandsChannel:
raise commands.CommandError(message=f'Error: Incorrect channel. Use {locChannel.mention}.')
last_ports = get_last_ports()
boxes = last_ports[17].value
embed = discord.Embed(title='__Deposit boxes__', description=boxes, colour=0xff0000, url=config['publicSheets'], timestamp=datetime.utcnow())
embed.set_thumbnail(url='https://i.imgur.com/Hccdnts.png')
await ctx.send(embed=embed)
@commands.command(aliases=['p', 'portable'] + [item for sublist in portableAliases for item in sublist])
async def portables(self, ctx, portable='', *input):
'''
Get portable locations.
'''
addCommand()
if ctx.invoked_with in [item for sublist in portableAliases for item in sublist]:
input = (portable,) + input
portable = ctx.invoked_with
if any(thing for thing in input):
edit_command = commands.Bot.get_command(self.bot, 'edit')
try:
for check in edit_command.checks:
if not await check(ctx):
raise commands.CommandError(message=f'Insufficient permissions: `Portables helper`.')
await edit_command.callback(self, ctx, portable, *input)
return
except commands.CommandError as e:
raise e
adminCommandsChannel = self.bot.get_channel(config['adminCommandsChannel'])
if adminCommandsChannel:
if ctx.guild == self.bot.get_guild(config['portablesServer']):
if ctx.channel != adminCommandsChannel and not ctx.channel.id in portables_channel_ids and not ctx.author.id == config['owner']:
raise commands.CommandError(message=f'Error: `Incorrect channel`. Please use {portables_channel_mention_string}.')
last_ports = get_last_ports()
if last_ports is None:
return
top_row, mid_row, bot_row = last_ports[:9], last_ports[9:18], last_ports[18:]
now = datetime.utcnow()
time_val = str(now.year) + " " + bot_row[2].value + ":" + str(now.second)
time = datetime.strptime(time_val, '%Y %d %b, %H:%M:%S')
embed = discord.Embed(title='__Portables FC Locations__', colour=0xff0000, url=config['publicSheets'], timestamp=time)
if (not portable or not any(portable.upper() in port_name for port_name in portablesNamesUpper)) and not portable.upper() == 'WB':
for i in range(len(top_row)-2):
embed.add_field(name=top_row[i].value, value=mid_row[i].value.replace('*', '\*'), inline=True)
notes = mid_row[7].value
embed.add_field(name='Notes', value=notes, inline=False)
else:
index = 0
if portable.upper() == 'WB':
index = 6
else:
for i, port_name in enumerate(portablesNamesUpper):
if port_name.startswith(portable.upper()):
index = i
break
if not index:
for i, port_name in | |
is not None:
foundCycle.insert(0, rel)
return foundCycle
path.discard(relTo)
return None
''' this may be unused now, if common stock class members are only taken from observed facts, not defined members, per Dean R
def getDimMembers(self, dim, default=None, rels=None, members=None, visited=None):
hasDefinedRelationship = False
if rels is None:
visited = set()
members = set()
for rel in self.modelXbrl.relationshipSet(XbrlConst.dimensionDefault).fromModelObject(dim):
default = rel.toModelObject
rels = self.modelXbrl.relationshipSet(XbrlConst.dimensionDomain).fromModelObject(dim)
for rel in rels:
hasDefinedRelationship = True
relTo = rel.toModelObject
if rel.isUsable and relTo != default:
# HF: bug, if not usable, then not usable in any other place in network, fix
members.add(relTo.qname)
if relTo not in visited:
visited.add(relTo)
domMbrRels = self.modelXbrl.relationshipSet(XbrlConst.domainMember, rel.consecutiveLinkrole).fromModelObject(relTo)
self.getDimMembers(dim, default, domMbrRels, members, visited)
visited.discard(relTo)
return (members,hasDefinedRelationship)
'''
def checkConceptLabels(self, modelXbrl, labelsRelationshipSet, disclosureSystem, concept):
hasDefaultLangStandardLabel = False
dupLabels = {}
for modelLabelRel in labelsRelationshipSet.fromModelObject(concept):
modelLabel = modelLabelRel.toModelObject
if modelLabel is not None and modelLabel.xmlLang:
if modelLabel.xmlLang.startswith(disclosureSystem.defaultXmlLang) and \
modelLabel.role == XbrlConst.standardLabel:
hasDefaultLangStandardLabel = True
dupDetectKey = ( (modelLabel.role or ''), modelLabel.xmlLang)
if dupDetectKey in dupLabels:
modelXbrl.error(("EFM.6.10.02", "GFM.1.5.2", "SBR.NL.2.2.1.05"),
_("Concept %(concept)s has duplicated labels for role %(role)s lang %(lang)s."),
modelObject=(modelLabel, dupLabels[dupDetectKey]), # removed concept from modelObjects
concept=concept.qname, role=dupDetectKey[0], lang=dupDetectKey[1])
else:
dupLabels[dupDetectKey] = modelLabel
if modelLabel.role in (XbrlConst.periodStartLabel, XbrlConst.periodEndLabel):
modelXbrl.error("SBR.NL.2.3.8.03",
_("Concept %(concept)s has label for semantical role %(role)s."),
modelObject=modelLabel, concept=concept.qname, role=modelLabel.role)
if self.validateSBRNL: # check for missing nl labels
for role, lang in dupLabels.keys():
if role and lang != disclosureSystem.defaultXmlLang and (role,disclosureSystem.defaultXmlLang) not in dupLabels:
modelXbrl.error("SBR.NL.2.3.8.05",
_("Concept %(concept)s has en but no nl label in role %(role)s."),
modelObject=(concept,dupLabels[(role,lang)]), concept=concept.qname, role=role)
#6 10.1 en-US standard label
if not hasDefaultLangStandardLabel:
modelXbrl.error(("EFM.6.10.01", "GFM.1.05.01"),
_("Concept used in facts %(concept)s is missing an %(lang)s standard label."),
# concept must be the first referenced modelObject
modelObject=[concept] + list(modelXbrl.factsByQname[concept.qname]), concept=concept.qname,
lang=disclosureSystem.defaultLanguage)
#6 10.3 default lang label for every role
try:
dupLabels[("zzzz",disclosureSystem.defaultXmlLang)] = None #to allow following loop
priorRole = None
priorLang = None
hasDefaultLang = True
for role, lang in sorted(dupLabels.keys()):
if role != priorRole:
if not hasDefaultLang:
modelXbrl.error(("EFM.6.10.03", "GFM.1.5.3"),
_("Concept %(concept)s is missing an %(lang)s label for role %(role)s."),
modelObject=list(modelXbrl.factsByQname[concept.qname]) + [dupLabels[(priorRole,priorLang)]],
concept=concept.qname,
lang=disclosureSystem.defaultLanguage, role=priorRole)
hasDefaultLang = False
priorLang = lang
priorRole = role
if lang is not None and lang.startswith(disclosureSystem.defaultXmlLang):
hasDefaultLang = True
except Exception as err:
pass
# check if concept is behaving as a total based on role, deed, or circumstances
def presumptionOfTotal(self, rel, siblingRels, iSibling, isStatementSheet, nestedInTotal, checkLabelRoleOnly):
"""
A numeric concept target of a parent-child relationship is presumed total if:
(i) its preferredLabel role is a total role (pre XbrlConst static function of
current such total roles) or
(ii) if not in a nested total (abstract child relationship to a known total's
contributing siblings):
the parent is not SupplementalCashFlowInformationAbstract and the preceding
sibling relationship is monetary and it's on a statement sheet and it's the
last of more than one monetary item
(a) Last monetary parented by an abstract or non-monetary and not in a nested
(breakdown) total, or
(b) effective label (en-US of preferred role) has "Total" in its wording.
(c) (commented out for now due to false positives: Concept name has "Total"
in its name)
(d) last monetary (may be sub level) whose immediate sibling is a calc LB child
"""
concept = rel.toModelObject
if isinstance(concept, ModelConcept) and concept.isNumeric:
preferredLabel = rel.preferredLabel
if XbrlConst.isTotalRole(preferredLabel):
return _("preferredLabel {0}").format(os.path.basename(preferredLabel))
if concept.isMonetary and not checkLabelRoleOnly:
effectiveLabel = concept.label(lang="en-US", fallbackToQname=False, preferredLabel=preferredLabel)
''' word total in label/name does not seem to be a good indicator,
e.g., Google Total in label for ShareBasedCompensationArrangementByShareBasedPaymentAwardGrantDateFairValueOfOptionsVested followed by
label with Aggregate but name has Total
... so only perform this test on last monetary in a Note
if 'Total' in effectiveLabel: # also check for Net ???
return _("word 'Total' in effective label {0}").format(effectiveLabel)
if 'Total' in concept.name: # also check for Net ???
return _("word 'Total' in concept name {0}").format(concept.name)
'''
parent = rel.fromModelObject
if (len(siblingRels) > 1 and
iSibling == len(siblingRels) - 1 and
parent is not None and
parent.name not in {
"SupplementalCashFlowInformationAbstract"
}):
preceedingSibling = siblingRels[iSibling - 1].toModelObject
if preceedingSibling is not None and preceedingSibling.isMonetary:
# last fact, may be total
if isStatementSheet:
# check if facts add up??
if (parent.isAbstract or not parent.isMonetary) and not nestedInTotal:
return _("last monetary item in statement sheet monetary line items parented by nonMonetary concept")
elif effectiveLabel and 'Total' in effectiveLabel:
return _("last monetary item in statement sheet monetary line items with word 'Total' in effective label {0}").format(effectiveLabel)
elif 'Total' in concept.name:
return _("last monetary item in statement sheet monetary line items with word 'Total' in concept name {0}").format(concept.name)
elif self.summationItemRelsSetAllELRs.isRelated(concept, "child", preceedingSibling):
return _("last monetary item in statement sheet monetary line items is calc sum of previous line item")
''' for now unreliable to use total words for notes
else:
if 'Total' in effectiveLabel: # also check for Net ???
return _("last monetary item in note with word 'Total' in effective label {0}").format(effectiveLabel)
if 'Total' in concept.name: # also check for Net ???
return _("last monetary item in note with word 'Total' in concept name {0}").format(concept.name)
'''
return None
# 6.15.02, 6.15.03
def checkCalcsTreeWalk(self, parentChildRels, concept, isStatementSheet, inNestedTotal, conceptsUsed, visited):
"""
- EFM-strict validation 6.15.2/3: finding presumed totals in presentation and inspecting for
equivalents in calculation (noted as error-semantic, in efm-strict mode).
- Best practice approach: inspecting for calcuations in the UGT calculations that would hint
that like filing constructs should have presentation (noted as warning-semantic in best practices plug-in, when loaded and enabled)
EFM-strict missing-calcs
a. Presumption of total
The presentation linkbase is tree-walked to find items presumed to be totals and their contributing
items. (see description of presumptionOfTotal, above)
b. Finding calculation link roles with least mis-fit to presumed total and its contributing items
(presumptionOfTotal in ValidateFiling.py).
For each presumed total (checkForCalculations in ValidateFiling.py):
b.1 Contributing items are found for the presumed total as follows:
From the presumed total, walking back through its preceding sibilings (with caution to avoid
looping on allowed direct cycles), a preceding sibling is a contributing item if it has facts,
same period type, and numeric. If a preceding sibling is abstract, the abstract's children are
likewise recursively checked (as they often represent a breakdown, and such children of an
abstract sibling to the total are also contributing items (except for such children preceding
a total at the child level).
If a preceding sibling is presumed total (on same level), it is a running subtotal (in subsequent
same-level total) unless it's independent in the calc LB (separate totaled stuff preceding these
siblings) or related to grandparent sum.
b.2 Finding the facts of these total/contributing item sets
Sets of total and compatible contributing facts that match the sets of total concept and
contributing concept must next be found, because each of these different sets (of total
and compatible contributing facts) may fit different calculation link roles (according to
which compatible contributing facts are present for each total). This is particularly
important when totals and contributing items exist both on face statements and notes, but
the contributing compatible fact population is different).
For each fact of the total concept, that has a specified end/instant datetime and unit, if
(i) it's not on a statement or
(ii) required context is absent or
(iii) the fact's end/instant is within the required context's duration, the contributing
item facts are those unit and context equivalent to such total fact.
b.3 Finding least-mis-matched calculation link role
Each link role in calculation produces a different set of summation-item arc-sets, and
each set of presumed-total facts | |
'''Check the data for prediction'''
import os
from basis.file import downloadMatchingRelationship
existing_datasets = os.path.exists("haikou-experiments/matching_relationship")
if existing_datasets == False:
print("Downloading datasets...")
print("If failed, you can download them from https://drive.google.com/file/d/1RNEmGBfnm-nIP32m3R1oj4pK8Oxmm7EO/view?usp=sharing")
downloadMatchingRelationship()
'''import neccessary dependency'''
import scipy
import pandas as pd
import json
import time
import random
import math
import os
import datetime
import csv
from copy import deepcopy
import progressbar
from basis.schedule import Schedule
from basis.assistant import getID
from basis.setting import MAX_SEARCH_LAYERS,PERIODS,PLATFORM
from basis.setting import WAITING_TIME,SPEED,MINUTE,PERIODS_MINUTES
from basis.setting import CRITERION
from basis.edges import ALL_EDGES
from basis.vertexes import ALL_VERTEXES
from basis.neighbor import ALL_NEIGHBOR_EDGES
ALL_TAKERS, ALL_SEEKERS = {},{}
E = 2.718281828459045
class InteratedSolver(object):
def __init__(self, max_OD_ID):
self.HOUR_INDEX = 0 # study period
self.MAX_ITERATE_TIMES = 10000
self.max_OD_ID = max_OD_ID
self.min_samples = 15 # ODs have less than 15 samples during the study period will be excluded
self.tendency = 1 # proportion of passengers choose carpooling
self.loadODDic()
self.loadODs()
self.loadSeekerTaker()
self.initialVariables()
self.interatedSolver()
self.predictResults(final=True)
def loadODs(self):
lambda_df = pd.read_csv("haikou-experiments/network/combined_0.csv") # demand rates
ODs_df = pd.read_csv("haikou-experiments/matching_relationship/ODs.csv") # OD
self.all_ODs = {}
bar = progressbar.ProgressBar(widgets=["ODs Loading:", progressbar.Percentage(),' (', progressbar.SimpleProgress(), ') ',' (', progressbar.AbsoluteETA(), ') ',])
for j in bar(range(self.max_OD_ID)):
if j >= lambda_df.shape[0]: break
if lambda_df["days"][j] <= self.min_samples: break
combined_id = getID(lambda_df["start_ver"][j],lambda_df["end_ver"][j])
i = self.OD_dic[combined_id]["line_id"]
self.all_ODs[ODs_df["id"][i]] = {
"OD_id": ODs_df["id"][i],
"start_ver": ODs_df["start_ver"][i],
"end_ver": ODs_df["end_ver"][i],
"num": lambda_df["num"][j],
"taker_keys": json.loads(ODs_df["taker_keys"][i]),
"seeker_keys": json.loads(ODs_df["seeker_keys"][i]),
"lam_w": lambda_df["num"][j]*self.tendency/(PERIODS_MINUTES[self.HOUR_INDEX]*40)
}
print("#############Experiments Setting##############")
print("Experiments Period: %02s:%02s - %02s:%02s" % (PERIODS[self.HOUR_INDEX][0],PERIODS[self.HOUR_INDEX][1],PERIODS[self.HOUR_INDEX+1][0],PERIODS[self.HOUR_INDEX+1][1]))
print("Search Distance: %s " % (MAX_SEARCH_LAYERS*500))
print("MAX OD ID: %s" % self.max_OD_ID)
print("Feasible OD: %s" % len(self.all_ODs))
def loadSeekerTaker(self):
self.ALL_SEEKERS = {}
self.ALL_TAKERS = {}
seekers_df = pd.read_csv("haikou-experiments/matching_relationship/seekers.csv")
takers_df = pd.read_csv("haikou-experiments/matching_relationship/takers.csv")
bar = progressbar.ProgressBar(widgets=["Seeker Loading:", progressbar.Percentage(),' (', progressbar.SimpleProgress(), ') ',' (', progressbar.AbsoluteETA(), ') ',])
self.all_seeker_keys = []
for i in bar(range(seekers_df.shape[0])):
if seekers_df["OD_id"][i] not in self.all_ODs: continue
self.ALL_SEEKERS[i] = {
"seeker_id" : seekers_df["seeker_id"][i],
"vertex_id" : seekers_df["vertex_id"][i],
"OD_id" : seekers_df["OD_id"][i],
"type" : seekers_df["type"][i],
"sub_taker_key" : seekers_df["sub_taker_key"][i],
}
self.all_seeker_keys.append(i)
self.all_seeker_num = len(self.all_seeker_keys)
bar = progressbar.ProgressBar(widgets=["Taker Loading:", progressbar.Percentage(),' (', progressbar.SimpleProgress(), ') ',' (', progressbar.AbsoluteETA(), ') ',])
self.all_taker_keys = []
for i in bar(range(takers_df.shape[0])):
if takers_df["OD_id"][i] not in self.all_ODs: continue
original_matching_seekers = json.loads(takers_df["matching_seekers"][i])
original_shared_distance = json.loads(takers_df["shared_distance"][i])
original_detour = json.loads(takers_df["detour"][i])
matching_seekers, all_shared_distance,all_detour = self.getFeasibleSeekers(original_matching_seekers,original_shared_distance,original_detour)
self.ALL_TAKERS[i] = {
"taker_id" : takers_df["taker_id"][i],
"edge_id" : json.loads(takers_df["edge_id"][i]),
"OD_id" : takers_df["OD_id"][i],
"type" : takers_df["type"][i],
"length" : takers_df["length"][i],
"matching_seekers" : matching_seekers,
"all_shared_distance" : all_shared_distance,
"all_detour" : all_detour,
}
self.all_taker_keys.append(i)
self.all_taker_num = len(self.all_taker_keys)
for i in self.ALL_SEEKERS.keys():
matching_takers, all_shared_distance, all_detour = self.getFeasibleTakers(json.loads(seekers_df["matching_takers"][i]),\
json.loads(seekers_df["shared_distance"][i]),json.loads(seekers_df["detour"][i]))
self.ALL_SEEKERS[i]["matching_takers"] = matching_takers
self.ALL_SEEKERS[i]["all_shared_distance"] = all_shared_distance
self.ALL_SEEKERS[i]["all_detour"] = all_detour
print("Number of Takers: %s " % len(self.ALL_TAKERS))
print("Number of Seekers: %s" % len(self.ALL_SEEKERS))
def initialVariables(self):
self.lam_seeker, self.P_seeker = {},{}
for key in self.ALL_SEEKERS.keys():
self.lam_seeker[key] = [random.random()/5,0] # demand rates of seekers
self.P_seeker[key] = [random.random()/5,0] # matching probability of seekers
self.lam_taker_arrive, self.eta_taker, self.eta_taker_seeker = {},{},{}
self.P_taker, self.rho_taker = {},{}
for key in self.ALL_TAKERS.keys():
self.lam_taker_arrive[key] = [random.random()/5,0] # arrival rates of takers
self.eta_taker[key] = [random.random()/5,0] # aggregate arrival rate of matching opportunities for takers
self.eta_taker_seeker[key] = {} # mean arrival rate of matching opportunities
macthing_num = len(self.ALL_TAKERS[key]["matching_seekers"])
for seeker_key in self.ALL_TAKERS[key]["matching_seekers"]:
self.eta_taker_seeker[key][seeker_key] = [random.random()/(macthing_num*5),random.random()/(macthing_num*5)] # 边上匹配机会的期望到达率(所有的点拆分)
self.P_taker[key] = [random.random()/5,0] # matching probability of taker
self.rho_taker[key] = [random.random()/5,0] # The probability of having at least one taker in state taker
self.num_eta_st = self.getNumberEtaST()
def interatedSolver(self):
'''迭代求解全部变量'''
self.iterate_time = 0
change = 99999
starttime = datetime.datetime.now()
while self.iterate_time < self.MAX_ITERATE_TIMES and (change > 0.1 or self.iterate_time < 20):
_cur,_last = 1 - self.iterate_time%2, self.iterate_time%2
self.obtainLamST(_cur,_last)
self.obtainEtaST(_cur,_last)
self.obtainEtaTaker(_cur,_last)
self.obtainPTaker(_cur,_last)
self.obtainPSeeker(_cur,_last)
all_change = [self.getRelativeChange(self.lam_taker_arrive),self.getRelativeChange(self.lam_seeker),self.getRelativeChange(self.eta_taker),self.getRelativeChange(self.P_taker),self.getRelativeChange(self.rho_taker),self.getRelativeChange(self.P_seeker)]
change = max(all_change)
print("iteration%s,%s,%s,%s,%s,%s,%s"%(self.iterate_time,all_change[0],all_change[1],all_change[2],all_change[3],all_change[4],all_change[5]))
self.iterate_time = self.iterate_time + 1
endtime = datetime.datetime.now()
print("Iteration Times: %s" % self.iterate_time)
print("Execution Time: %s second" % (endtime - starttime))
fo = open("haikou-experiments/results/experiments_log.txt", "a+")
fo.write("Study Period: %02s : %02s - %02s : %02s \n" % (PERIODS[self.HOUR_INDEX][0],PERIODS[self.HOUR_INDEX][1],PERIODS[self.HOUR_INDEX+1][0],PERIODS[self.HOUR_INDEX+1][1]))
fo.write("Platform: %s \n" % PLATFORM)
fo.write("Current Time: %s \n" % (time.asctime( time.localtime(time.time()))))
fo.write("Search Distance: %s m\n" % (MAX_SEARCH_LAYERS*500))
fo.write("Number of OD: %s 个\n" % len(self.all_ODs))
fo.write("Node: %s \n" % len(self.ALL_SEEKERS))
fo.write("Segment: %s \n" % len(self.ALL_TAKERS))
fo.write("Number of Variables: %s\n" % (len(self.lam_seeker) + len(self.lam_taker_arrive) + self.num_eta_st + len(self.eta_taker) + len(self.P_taker) + len(self.rho_taker) + len(self.P_seeker)))
fo.write("")
fo.write("Ieration Times: %s\n" % self.iterate_time)
fo.write("Ieration Time: %s second\n\n" % (endtime - starttime))
fo.close()
def obtainLamST(self,_cur,_last):
'''Get the arrival rates of seeker/taker'''
for i in range(len(self.all_seeker_keys)):
seeker_key = self.all_seeker_keys[i]
OD_id = self.ALL_SEEKERS[seeker_key]["OD_id"]
if self.ALL_SEEKERS[seeker_key]["type"] == 1:
self.lam_seeker[seeker_key][_cur] = self.all_ODs[OD_id]["lam_w"]
else:
last_seeker_key = self.all_seeker_keys[i-1]
sub_taker_key = self.ALL_SEEKERS[last_seeker_key]["sub_taker_key"]
self.lam_taker_arrive[sub_taker_key][_cur] = self.lam_seeker[last_seeker_key][_cur] * (1 - self.P_seeker[last_seeker_key][_last])
self.lam_seeker[seeker_key][_cur] =self.lam_taker_arrive[sub_taker_key][_cur] * (1 - self.P_taker[sub_taker_key][_last])
def obtainEtaST(self,_cur,_last):
'''Get the arrival rate of matching opportunities'''
for i in self.all_seeker_keys:
if self.ALL_SEEKERS[i]["matching_takers"] == []: continue
first_segment = self.ALL_SEEKERS[i]["matching_takers"][0]
s_n = {first_segment : self.lam_seeker[i][_last]}
self.eta_taker_seeker[first_segment][i][_cur] = self.lam_seeker[i][_last]
for j in range(len(self.ALL_SEEKERS[i]["matching_takers"])-1):
last_segment_key = self.ALL_SEEKERS[i]["matching_takers"][j]
current_segment_key = self.ALL_SEEKERS[i]["matching_takers"][j+1]
s_n[current_segment_key] = s_n[last_segment_key] * (1 - self.rho_taker[last_segment_key][_last])
self.eta_taker_seeker[current_segment_key][i][_cur] = s_n[current_segment_key]
def obtainEtaTaker(self,_cur,_last):
'''Get the aggregate arrival rate of matching opportunities'''
for i in self.all_taker_keys:
self.eta_taker[i][_cur] = 0
for j in self.ALL_TAKERS[i]["matching_seekers"]:
self.eta_taker[i][_cur] = self.eta_taker[i][_cur] + self.eta_taker_seeker[i][j][_cur]
def getNumberEtaST(self):
'''获得某个变量的个数'''
overall_num = 0
for key in self.all_taker_keys:
overall_num = overall_num + len(self.eta_taker_seeker[key])
return overall_num
def obtainPTaker(self,_cur,_last):
'''Get the matching probability of taker and the probability of having at least one taker in state taker'''
for i in self.all_taker_keys:
t_s = WAITING_TIME
if self.ALL_TAKERS[i]["type"] != 1:
t_s = self.ALL_TAKERS[i]["length"]/SPEED + 0.5
self.P_taker[i][_cur] = 1 - math.pow(E, -self.eta_taker[i][_last] * t_s)
if self.eta_taker[i][_last] == 0:
self.rho_taker[i][_cur] = t_s * self.lam_taker_arrive[i][_last]
else:
self.rho_taker[i][_cur] = (1 - math.pow(E, -self.eta_taker[i][_last] * t_s)) * self.lam_taker_arrive[i][_last]/self.eta_taker[i][_last]
def obtainPSeeker(self,_cur,_last):
'''Get the matching probability of seeker'''
for i in self.all_seeker_keys:
product = 1
for taker_id in self.ALL_SEEKERS[i]["matching_takers"]:
product = product * (1 - self.rho_taker[taker_id][_last])
self.P_seeker[i][_cur] = 1 - product
def getRelativeChange(self,all_dic):
last_index = self.iterate_time%2
overall_change = []
for i in all_dic.keys():
if all_dic[i][0] < 1e-4 and all_dic[i][1] == 1e-4: continue
if all_dic[i][0] == 0 or all_dic[i][1] == 0: continue
overall_change.append(abs(all_dic[i][0] - all_dic[i][1])/all_dic[i][last_index])
return max(overall_change)
def predictResults(self,final):
index = self.iterate_time%2
starttime = datetime.datetime.now()
matching_probability = {}
G_n,all_P_w = {},{}
bar = progressbar.ProgressBar(widgets=[ 'Probability: ', progressbar.Percentage(),' (', progressbar.SimpleProgress(), ') ',' (', progressbar.AbsoluteETA(), ') ',])
for i in bar(self.all_ODs.keys()):
start_seeker = self.all_ODs[i]["seeker_keys"][0]
start_taker = self.ALL_SEEKERS[start_seeker]["sub_taker_key"]
P_A_w = self.P_seeker[start_seeker][index]
P_B_w = (1 - self.P_seeker[start_seeker][index]) * self.P_taker[start_taker][index]
G_n[start_seeker] = 1
last_seeker_key = start_seeker
last_segment_key = self.ALL_SEEKERS[last_seeker_key]["sub_taker_key"]
for j in self.all_ODs[i]["seeker_keys"][1:]:
G_n[j] = G_n[last_seeker_key] * (1 - self.P_seeker[last_seeker_key][index]) * (1 - self.P_taker[last_segment_key][index])
last_seeker_key,last_segment_key = j,self.ALL_SEEKERS[j]["sub_taker_key"]
P_w = 1 - G_n[j]
P_C_w = P_w - P_A_w - P_B_w
matching_probability[i] = [P_w, P_A_w, P_B_w, P_C_w]
all_P_w[i] = P_w
# 预测拼车距离和共享距离
all_l_w, all_e_w = {},{}
bar = progressbar.ProgressBar(widgets=[ 'Distance: ', progressbar.Percentage(),' (', progressbar.SimpleProgress(), ') ',' (', progressbar.AbsoluteETA(), ') ',])
for i in bar(self.all_ODs.keys()):
l_w_0 = Schedule.distanceByHistory(self.all_ODs[i]["start_ver"],self.all_ODs[i]["end_ver"])
all_l_n_0, all_e_n_0 = [], []
for seeker in self.all_ODs[i]["seeker_keys"]:
if self.ALL_SEEKERS[seeker]["matching_takers"] != []:
l_n_0, e_n_0 = 0,0
overall_denominator = 0
for j in range(len(self.ALL_SEEKERS[seeker]["matching_takers"])):
matching_takers = self.ALL_SEEKERS[seeker]["matching_takers"][j]
detour = self.ALL_SEEKERS[seeker]["all_detour"][j]
shared_distance = self.ALL_SEEKERS[seeker]["all_shared_distance"][j]
l_n_s,e_n_s= l_w_0 + detour,shared_distance
multiplier = self.eta_taker_seeker[matching_takers][seeker][index] * self.rho_taker[matching_takers][index]
overall_denominator = overall_denominator + multiplier
l_n_0, e_n_0 = l_n_0 + multiplier * l_n_s, e_n_0 + multiplier * e_n_s
all_l_n_0.append(l_n_0/overall_denominator), all_e_n_0.append(e_n_0/overall_denominator)
else:
all_l_n_0.append(l_w_0), all_e_n_0.append(0)
# 路段中的距离计算
all_l_s_1, all_e_s_1 = [], []
for taker in self.all_ODs[i]["taker_keys"]:
l_s_1, e_s_1 = [],[]
if self.ALL_TAKERS[taker]["matching_seekers"] != []:
l_s_1, e_s_1 = 0,0
for j in range(len(self.ALL_TAKERS[taker]["matching_seekers"])):
matching_seekers = self.ALL_TAKERS[taker]["matching_seekers"][j]
detour = self.ALL_TAKERS[taker]["all_detour"][j]
shared_distance = self.ALL_TAKERS[taker]["all_shared_distance"][j]
l_n_s, e_n_s = l_w_0 + detour, shared_distance
l_s_1 = l_s_1 + self.eta_taker_seeker[taker][matching_seekers][index] * l_n_s
e_s_1 = e_s_1 + self.eta_taker_seeker[taker][matching_seekers][index] * e_n_s
all_l_s_1.append(l_s_1/self.eta_taker[taker][index]), all_e_s_1.append(e_s_1/self.eta_taker[taker][index])
else:
all_l_s_1.append(l_w_0), all_e_s_1.append(0)
# 综合的期望值计算
l_w = l_w_0 * (1 - all_P_w[i])
e_w = 0
for j in range(len(self.all_ODs[i]["seeker_keys"]) - 1):
seeker_key = self.all_ODs[i]["seeker_keys"][j]
segment_key = self.ALL_SEEKERS[seeker_key]["sub_taker_key"]
l_w = l_w + all_l_n_0[j]*G_n[seeker_key]*self.P_seeker[seeker_key][index] + all_l_s_1[j]*G_n[seeker_key]*(1-self.P_seeker[seeker_key][index])*self.P_taker[segment_key][index]
e_w = e_w + all_e_n_0[j]*G_n[seeker_key]*self.P_seeker[seeker_key][index] + all_e_s_1[j]*G_n[seeker_key]*(1-self.P_seeker[seeker_key][index])*self.P_taker[segment_key][index]
all_l_w[i] = l_w
all_e_w[i] = e_w
endtime = datetime.datetime.now()
print("Execution Time: %s second" % (endtime - starttime))
fo = open("haikou-experiments/results/experiments_log.txt", "a+")
fo.write("Execution Time: %s second\n\n" % (endtime - starttime))
fo.close()
with open("haikou-experiments/results/PREDICTION_OD_%s_PERIOD_%s_SAMPLE_%s_TENDENCY_%.2f.csv"%(self.max_OD_ID,self.HOUR_INDEX,self.min_samples,self.tendency),"w") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["OD_id", "start_ver", "end_ver", "num", "P_w", "l_w", "e_w"])
for i in self.all_ODs.keys():
writer.writerow([self.all_ODs[i]["OD_id"], self.all_ODs[i]["start_ver"], self.all_ODs[i]["end_ver"], self.all_ODs[i]["num"], all_P_w[i], all_l_w[i], all_e_w[i]])
def getFeasibleSeekers(self,seekers,all_shared_distance,all_detour):
new_seekers,new_shared_distance,new_detour = [],[],[]
for i in range(len(seekers)):
seeker,shared_distance,detour = seekers[i],all_shared_distance[i],all_detour[i]
if seeker in self.ALL_SEEKERS:
new_seekers.append(seeker)
new_shared_distance.append(shared_distance)
new_detour.append(detour)
return new_seekers,new_shared_distance,new_detour
def getFeasibleTakers(self,takers,all_shared_distance,all_detour):
new_takers,new_shared_distance,new_detour = [],[],[]
for i in range(len(takers)):
| |
contains new lines, just pass those through to print.
if message.find('\n') != -1:
sss = sss + prefix + message.replace('\n','\n'+' '*prelen)+'\n'
# Else if message is not empty, pass to wrapper.fill
elif message:
sss = sss + wrapper.fill(message) + '\n'
# Else just pass the empty message along with prefix
else:
sss = sss + prefix + '\n'
return sss
def print_img_summary(self):
"""Echo the object's __repr__ method."""
print(self.__repr__())
def __iter__(self):
'''Yield from default iter_window iterator.'''
for x in self.iter_window():
yield x
def iter_base(self, xoff, yoff, win_xsize, win_ysize, **kwargs):
'''
Base iterator function to yield data from array-like window parameters.
Parameters
----------
xoff : array_like
x offset(s) for the image regions to be read.
yoff : array_like
y offset(s) for the image regions to be read.
win_xsize : array_like
window x-dim size(s) for the image regions to be read.
win_ysize : array_like
window y-dim size(s) for the image regions to be read.
kwargs : optional
keyword arguments to be passed to get_data.
Yields
------
ndarray
Three dimensional numpy array of data from the requested region
of the image.
'''
logger.debug('*** begin iter_base ***')
# Broadcast array_like
windows = np.broadcast(xoff,yoff,win_xsize,win_ysize)
# Iterate through windows generated from input parameters
for w in windows:
logger.debug('window parameters: xoff %s, yoff %s, '
'win_xsize %s, win_ysize %s',
w[0], w[1], w[2], w[3])
yield self.get_data(window=w,**kwargs)
def iter_window(self, win_size=None, stride=None, **kwargs):
'''
Window iterator that yields data from the image based on win_size
and stride.
win_size and stride are both optional arguments. If neither are
passed, then the method pulls win_size from GDAL GetBlockSize() and
uses that to step through the image. If only win_size is provided,
the method yields adjoining windows of the size requested. If only
stride is provided, an error is rasied.
Parameters
----------
win_size : array-like, length 2, optional
The size of the requested image chip in x and y.
stride : array-like, length 2, optional
The size of the step between each yielded chip in x and y.
kwargs: optional
Arguments for get_data().
Yields
------
ndarray
Three dimensional numpy array of pixel values from the
requested region of the image.
'''
logger.debug('*** begin iter_window ***')
# Check input values
if win_size:
if any(x <= 0 for x in win_size):
raise ValueError('No value in win_size can be equal '
'to or less than zero.')
if stride:
if any(x <= 0 for x in stride):
raise ValueError('No value in stride can be equal '
'to or less than zero.')
# if NOT win_size and NOT stride
# use gdal to figure out block size and then continue on below.
if not win_size and not stride:
# Get block size from gdal
b = self._fobj.GetRasterBand(1)
win_size = b.GetBlockSize()
logger.debug('win_size is: %s, stride is: %s', win_size, stride)
# if win_size and NOT stride
# set stride to make windows adjoining
if win_size and not stride:
# Set vars for easy access below
xs = self.meta.shape[1]
ys = self.meta.shape[2]
xsize, ysize = win_size
# Find starting offsets by identifying the pixels that don't fit in
# the requested window blocks and then split the different
# between ends of the image using floor (int) to reduce fractions.
x_extra_pixels = xs % win_size[0]
xoff = int(x_extra_pixels / 2.0)
y_extra_pixels = ys % win_size[1]
yoff = int(y_extra_pixels / 2.0)
# Use while True to loop through get_data until outside the image
xoff_start = xoff
xsize, ysize = win_size
while True:
logger.debug(' xoff is %s,\tyoff is %s', xoff, yoff)
yield self.get_data(window=[xoff, yoff, xsize, ysize],**kwargs)
xoff += xsize
if xoff > self.meta.shape[1]:
xoff = xoff_start
yoff += ysize
if yoff > self.meta.shape[2]:
break
# if NOT win_size and stride, raise error
elif not win_size and stride:
raise ValueError('Setting stride and not setting win_size is not '
'allowed because there is no resonable value to '
'set win_size to. In this case stride can be '
'even or odd which could result in alternative '
'size return blocks around the center pixel '
'(or fractional pixel).')
# if win_size and stride
# just do it
elif win_size and stride:
# Set vars for easy access below
xs = self.meta.shape[1]
ys = self.meta.shape[2]
xsize, ysize = win_size
xstride, ystride = stride
# Find starting offset by identifying pixels that don't fit in
# the requested size/stride and then split the different between
# ends of the image using floor (int) to reduce fractions.
x_extra_pixels = (xs - xsize) % xstride
xoff = int(x_extra_pixels/2.0)
y_extra_pixels = (ys - ysize) % ystride
yoff = int(y_extra_pixels/2.0)
# Start the yield loop
xoff_start = xoff
while True:
logger.debug(' xoff is %s,\tyoff is %s', xoff, yoff)
yield self.get_data(window=[xoff, yoff, xsize, ysize], **kwargs)
xoff += xstride
if xoff > self.meta.shape[1]:
xoff = xoff_start
yoff += ystride
if yoff > self.meta.shape[2]:
break
def iter_window_random(self, win_size=None, no_chips=1000, **kwargs):
"""Random chip iterator.
Parameters
----------
win_size : array-like, length 2
The size of the requested image chip in x and y.
no_chips : int, optional
Number of chips to generate.
kwargs: optional
Arguments for get_data().
Yields
------
ndarray
Three dimensional numpy array of pixel values from the
requested region of the image.
"""
# Check input values
if win_size:
if any(x <= 0 for x in win_size):
raise ValueError('No value in win_size can be equal '
'to or less than zero.')
counter = no_chips
xs = self.meta.shape[1]
ys = self.meta.shape[2]
xsize, ysize = win_size
while True:
# select random offset
xoff = np.random.randint(xs-xsize+1)
yoff = np.random.randint(ys-ysize+1)
yield self.get_data(window=[xoff, yoff, xsize, ysize], **kwargs)
counter -= 1
if counter == 0: break
def iter_components(self, **kwargs):
"""This is a convenience method that iterataes (via yield) through
the components in the image object. Any kwargs valid for get_data
can be passed through.
kwargs can be any valid arugment for get_data
Parameters
----------
None
Yields
------
ndarray
Three dimensional numpy array of pixel values from the
requested region of the image.
"""
for c in xrange(len(self.files.dfile_tiles)):
yield self.get_data(component=c, **kwargs)
def iter_vector(self, vector=None, properties=False, filter=None, **kwargs):
"""This method iterates (via yeild) through a vector object or file.
Any kwargs valid for get_data can be passed through."""
if 'window' in kwargs.keys():
raise ValueError("The window argument is not valid for this " \
"method. They both define a retrieval " \
"geometry. Pass one or the other.")
if 'geom' in kwargs.keys():
raise ValueError("The geom argument is not valid for this " \
"method. The vector file passed in defines " \
"the retrieval geometry.")
# ToDo Test for overlap of geom and image data?
obj = ogr.Open(vector)
lyr = obj.GetLayer(0)
lyr_sr = lyr.GetSpatialRef()
img_proj = self.meta.projection_string
img_trans = self.meta.geo_transform
img_sr = osr.SpatialReference()
img_sr.ImportFromWkt(img_proj)
coord_trans = osr.CoordinateTransformation(lyr_sr, img_sr)
for feat in lyr:
# Return feature properties data is requested
if properties is True:
prop_out = feat.items()
elif properties:
if isinstance(properties, (list, tuple, str)):
if isinstance(properties, str):
properties = [properties]
it = feat.items()
if not all(x for x in properties if x in it.keys()):
raise ValueError("One or more of the requested "
"properties are not in the vector "
"feature.")
prop_out = {x: it[x] for x in properties if x in it.keys()}
if not prop_out:
prop_out = None
warnings.warn("No properties value found matching "
"request.")
else:
raise ValueError("Invalid properties argument.")
# Determine if the feature should be returned based on value of
# filter and if the value exists in the feature properties.
if filter:
# The filter should be either a list of dictionary key/value
# paris of length one, or of list of key/value pairs. The
# idea is that you can filter against more than one value
# of a key when you can pass a list of pairs.
if isinstance(filter, dict) & (len(filter) != 1):
raise ValueError("Filters should be passed in as a " \
"list of dictionaries that will " \
"be used to filter against the " \
"feature | |
<reponame>bkille/bitarray
"""
Tests for bitarray.util module
"""
from __future__ import absolute_import
import os
import sys
import unittest
from string import hexdigits
from random import choice, randint, random
from collections import Counter
from bitarray import (bitarray, frozenbitarray, bits2bytes, decodetree,
get_default_endian, _set_default_endian)
from bitarray.test_bitarray import Util
from bitarray.util import (zeros, urandom, make_endian, rindex, strip,
count_n, count_and, count_or, count_xor, subset,
ba2hex, hex2ba, ba2int, int2ba, huffman_code)
if sys.version_info[0] == 3:
unicode = str
tests = []
# ---------------------------------------------------------------------------
class TestsZeros(unittest.TestCase):
def test_1(self):
for default_endian in 'big', 'little':
_set_default_endian(default_endian)
a = zeros(0)
self.assertEqual(a, bitarray())
self.assertEqual(a.endian(), default_endian)
b = zeros(0, endian=None)
self.assertEqual(b.endian(), default_endian)
for n in range(100):
a = zeros(n)
self.assertEqual(a, bitarray(n * '0'))
for endian in 'big', 'little':
a = zeros(3, endian)
self.assertEqual(a, bitarray('000'))
self.assertEqual(a.endian(), endian)
def test_wrong_args(self):
self.assertRaises(TypeError, zeros) # no argument
self.assertRaises(TypeError, zeros, '')
self.assertRaises(TypeError, zeros, bitarray())
self.assertRaises(TypeError, zeros, [])
self.assertRaises(TypeError, zeros, 1.0)
self.assertRaises(ValueError, zeros, -1)
self.assertRaises(TypeError, zeros, 0, 1) # endian not string
self.assertRaises(ValueError, zeros, 0, 'foo') # endian wrong string
tests.append(TestsZeros)
# ---------------------------------------------------------------------------
class TestsRandom(unittest.TestCase):
def test_1(self):
for default_endian in 'big', 'little':
_set_default_endian(default_endian)
a = urandom(0)
self.assertEqual(a, bitarray())
self.assertEqual(a.endian(), default_endian)
b = urandom(0, endian=None)
self.assertEqual(b.endian(), default_endian)
for n in range(100):
a = urandom(n)
self.assertEqual(len(a), n)
self.assertEqual(b.endian(), default_endian)
a = urandom(1000)
b = urandom(1000)
self.assertNotEqual(a, b)
self.assertTrue(400 < a.count() < 600)
self.assertTrue(400 < b.count() < 600)
def test_wrong_args(self):
self.assertRaises(TypeError, urandom)
self.assertRaises(TypeError, urandom, '')
self.assertRaises(TypeError, urandom, bitarray())
self.assertRaises(TypeError, urandom, [])
self.assertRaises(TypeError, urandom, 1.0)
self.assertRaises(ValueError, urandom, -1)
self.assertRaises(TypeError, urandom, 0, 1)
self.assertRaises(ValueError, urandom, 0, 'foo')
tests.append(TestsRandom)
# ---------------------------------------------------------------------------
class TestsMakeEndian(unittest.TestCase, Util):
def test_simple(self):
a = bitarray('1110001', endian='big')
b = make_endian(a, 'big')
self.assertTrue(b is a)
c = make_endian(a, 'little')
self.assertTrue(c == a)
self.assertEqual(c.endian(), 'little')
# wrong arguments
self.assertRaises(TypeError, make_endian, '', 'big')
self.assertRaises(TypeError, make_endian, bitarray(), 1)
self.assertRaises(ValueError, make_endian, bitarray(), 'foo')
def test_empty(self):
a = bitarray(endian='little')
b = make_endian(a, 'big')
self.assertTrue(b == a)
self.assertEqual(len(b), 0)
self.assertEqual(b.endian(), 'big')
def test_from_frozen(self):
a = frozenbitarray('1101111', 'big')
b = make_endian(a, 'big')
self.assertTrue(b is a)
c = make_endian(a, 'little')
self.assertTrue(c == a)
self.assertEqual(c.endian(), 'little')
def test_random(self):
for a in self.randombitarrays():
aa = a.copy()
for endian in 'big', 'little':
b = make_endian(a, endian)
self.assertEqual(a, b)
self.assertEqual(b.endian(), endian)
if a.endian() == endian:
self.assertTrue(b is a)
self.assertEQUAL(a, aa)
tests.append(TestsMakeEndian)
# ---------------------------------------------------------------------------
class TestsRindex(unittest.TestCase, Util):
def test_simple(self):
self.assertRaises(TypeError, rindex)
self.assertRaises(TypeError, rindex, None)
self.assertRaises(TypeError, rindex, bitarray(), 1, 2)
for endian in 'big', 'little':
a = bitarray('00010110000', endian)
self.assertEqual(rindex(a), 6)
self.assertEqual(rindex(a, 1), 6)
self.assertEqual(rindex(a, 'A'), 6)
self.assertEqual(rindex(a, True), 6)
a = bitarray('00010110111', endian)
self.assertEqual(rindex(a, 0), 7)
self.assertEqual(rindex(a, None), 7)
self.assertEqual(rindex(a, False), 7)
a = frozenbitarray('00010110111', endian)
self.assertEqual(rindex(a, 0), 7)
self.assertEqual(rindex(a, None), 7)
self.assertEqual(rindex(a, False), 7)
for v in 0, 1:
self.assertRaises(ValueError, rindex,
bitarray(0, endian), v)
self.assertRaises(ValueError, rindex,
bitarray('000', endian), 1)
self.assertRaises(ValueError, rindex,
bitarray('11111', endian), 0)
def test_random(self):
for a in self.randombitarrays():
v = randint(0, 1)
try:
i = rindex(a, v)
except ValueError:
i = None
s = a.to01()
try:
j = s.rindex(str(v))
except ValueError:
j = None
self.assertEqual(i, j)
def test_3(self):
for _ in range(100):
n = randint(1, 100000)
v = randint(0, 1)
a = bitarray(n)
a.setall(1 - v)
lst = [randint(0, n - 1) for _ in range(100)]
for i in lst:
a[i] = v
self.assertEqual(rindex(a, v), max(lst))
def test_one_set(self):
for _ in range(100):
N = randint(1, 10000)
a = bitarray(N)
a.setall(0)
a[randint(0, N - 1)] = 1
self.assertEqual(rindex(a), a.index(1))
tests.append(TestsRindex)
# ---------------------------------------------------------------------------
class TestsStrip(unittest.TestCase, Util):
def test_simple(self):
self.assertRaises(TypeError, strip, '0110')
self.assertRaises(TypeError, strip, bitarray(), 123)
self.assertRaises(ValueError, strip, bitarray(), 'up')
for default_endian in 'big', 'little':
_set_default_endian(default_endian)
a = bitarray('00010110000')
self.assertEQUAL(strip(a), bitarray('0001011'))
self.assertEQUAL(strip(a, 'left'), bitarray('10110000'))
self.assertEQUAL(strip(a, 'both'), bitarray('1011'))
b = frozenbitarray('00010110000')
self.assertEqual(strip(b, 'both'), bitarray('1011'))
for mode in 'left', 'right', 'both':
self.assertEqual(strip(bitarray('000'), mode), bitarray())
self.assertEqual(strip(bitarray(), mode), bitarray())
def test_random(self):
for a in self.randombitarrays():
b = a.copy()
s = a.to01()
self.assertEqual(strip(a, 'left'), bitarray(s.lstrip('0')))
self.assertEqual(strip(a, 'right'), bitarray(s.rstrip('0')))
self.assertEqual(strip(a, 'both'), bitarray(s.strip('0')))
self.assertEQUAL(a, b)
def test_one_set(self):
for _ in range(100):
N = randint(1, 10000)
a = bitarray(N)
a.setall(0)
a[randint(0, N - 1)] = 1
self.assertEqual(strip(a, 'both'), bitarray('1'))
tests.append(TestsStrip)
# ---------------------------------------------------------------------------
class TestsCount_N(unittest.TestCase, Util):
@staticmethod
def count_n(a, n):
"return the index i for which a[:i].count() == n"
i, j = n, a.count(1, 0, n)
while j < n:
if a[i]:
j += 1
i += 1
return i
def check_result(self, a, n, i):
self.assertEqual(a.count(1, 0, i), n)
if i > 0:
self.assertTrue(a[i - 1])
def test_simple(self):
a = bitarray('111110111110111110111110011110111110111110111000')
b = a.copy()
self.assertEqual(len(a), 48)
self.assertEqual(a.count(), 37)
self.assertRaises(TypeError, count_n, '', 0)
self.assertEqual(count_n(a, 0), 0)
self.assertEqual(count_n(a, 20), 23)
self.assertEqual(count_n(a, 37), 45)
self.assertRaisesMessage(ValueError, "non-negative integer expected",
count_n, a, -1) # n < 0
self.assertRaisesMessage(ValueError, "n larger than bitarray size",
count_n, a, 49) # n > len(a)
self.assertRaisesMessage(ValueError, "n exceeds total count",
count_n, a, 38) # n > a.count()
self.assertRaises(TypeError, count_n, a, "7")
for n in range(0, 37):
i = count_n(a, n)
self.check_result(a, n, i)
self.assertEqual(a[:i].count(), n)
self.assertEqual(i, self.count_n(a, n))
self.assertEQUAL(a, b)
def test_frozen(self):
a = frozenbitarray('001111101111101111101111100111100')
self.assertEqual(len(a), 33)
self.assertEqual(a.count(), 24)
self.assertRaises(TypeError, count_n, '', 0)
self.assertEqual(count_n(a, 0), 0)
self.assertEqual(count_n(a, 10), 13)
self.assertEqual(count_n(a, 24), 31)
self.assertRaises(ValueError, count_n, a, -1) # n < 0
self.assertRaises(ValueError, count_n, a, 25) # n > a.count()
self.assertRaises(ValueError, count_n, a, 34) # n > len(a)
self.assertRaises(TypeError, count_n, a, "7")
def test_large(self):
for N in list(range(100)) + [1000, 10000, 100000]:
a = bitarray(N)
v = randint(0, 1)
a.setall(not v)
for _ in range(randint(0, min(N, 100))):
a[randint(0, N - 1)] = v
n = randint(0, a.count())
self.check_result(a, n, count_n(a, n))
# check for total count
tc = a.count()
self.assertTrue(count_n(a, tc) <= N)
self.assertRaises(ValueError, count_n, a, tc + 1)
def test_one_set(self):
N = 100000
for _ in range(10):
a = bitarray(N)
a.setall(0)
self.assertEqual(count_n(a, 0), 0)
self.assertRaises(ValueError, count_n, a, 1)
i = randint(0, N - 1)
a[i] = 1
self.assertEqual(count_n(a, 1), i + 1)
self.assertRaises(ValueError, count_n, a, 2)
def test_random(self):
for a in self.randombitarrays():
n = a.count() // 2
i = count_n(a, n)
self.check_result(a, n, i)
tests.append(TestsCount_N)
# ---------------------------------------------------------------------------
class TestsBitwiseCount(unittest.TestCase, Util):
def test_count_byte(self):
ones = bitarray(8)
ones.setall(1)
zeros = bitarray(8)
zeros.setall(0)
for i in range(0, 256):
a = bitarray()
a.frombytes(bytes(bytearray([i])))
cnt = a.count()
self.assertEqual(count_and(a, zeros), 0)
self.assertEqual(count_and(a, ones), cnt)
self.assertEqual(count_and(a, a), cnt)
self.assertEqual(count_or(a, zeros), cnt)
self.assertEqual(count_or(a, ones), 8)
self.assertEqual(count_or(a, a), cnt)
self.assertEqual(count_xor(a, zeros), cnt)
self.assertEqual(count_xor(a, ones), 8 - cnt)
self.assertEqual(count_xor(a, a), 0)
def test_bit_count1(self):
a = bitarray('001111')
aa = a.copy()
b = bitarray('010011')
bb = b.copy()
self.assertEqual(count_and(a, b), 2)
self.assertEqual(count_or(a, b), 5)
self.assertEqual(count_xor(a, b), 3)
for f in count_and, count_or, count_xor:
# not two arguments
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, f, a)
self.assertRaises(TypeError, f, a, b, 3)
# wrong argument types
self.assertRaises(TypeError, f, a, '')
self.assertRaises(TypeError, f, '1', b)
self.assertRaises(TypeError, f, a, 4)
self.assertEQUAL(a, aa)
self.assertEQUAL(b, bb)
b.append(1)
for f in count_and, count_or, count_xor:
self.assertRaises(ValueError, f, a, b)
self.assertRaises(ValueError, f,
bitarray('110', 'big'),
bitarray('101', 'little'))
def test_bit_count_frozen(self):
a = frozenbitarray('001111')
b = frozenbitarray('010011')
self.assertEqual(count_and(a, b), 2)
self.assertEqual(count_or(a, b), 5)
self.assertEqual(count_xor(a, b), 3)
def test_bit_count_random(self):
for n in list(range(50)) + [randint(1000, 2000)]:
a = urandom(n)
b = urandom(n)
self.assertEqual(count_and(a, b), (a & b).count())
self.assertEqual(count_or(a, b), (a | b).count())
self.assertEqual(count_xor(a, b), (a ^ b).count())
tests.append(TestsBitwiseCount)
# ---------------------------------------------------------------------------
class TestsSubset(unittest.TestCase, Util):
def test_basic(self):
a = frozenbitarray('0101')
b = bitarray('0111')
self.assertTrue(subset(a, b))
self.assertFalse(subset(b, a))
self.assertRaises(TypeError, subset)
self.assertRaises(TypeError, subset, a, '')
self.assertRaises(TypeError, subset, '1', b)
self.assertRaises(TypeError, subset, a, 4)
b.append(1)
self.assertRaises(ValueError, subset, a, b)
def subset_simple(self, a, b):
return (a & b).count() == a.count()
def test_True(self):
for a, b in [('', ''), ('0', '1'), ('0', '0'), ('1', '1'),
('000', '111'), ('0101', '0111'),
('000010111', '010011111')]:
a, b = bitarray(a), bitarray(b)
self.assertTrue(subset(a, b) is True)
self.assertTrue(self.subset_simple(a, b) is True)
def test_False(self):
for a, b in [('1', '0'), ('1101', '0111'),
('0000101111', '0100111011')]:
a, b = bitarray(a), bitarray(b)
self.assertTrue(subset(a, b) is False)
self.assertTrue(self.subset_simple(a, b) is False)
def test_random(self):
for a in self.randombitarrays(start=1):
b = a.copy()
# we set one random bit in b to 1, so a is always a subset of b
b[randint(0, len(a) - 1)] = 1
self.assertTrue(subset(a, b))
# but b in not always a subset of a
self.assertEqual(subset(b, a), self.subset_simple(b, a))
# we set all bits in a, which ensures that b is a subset of a
a.setall(1)
self.assertTrue(subset(b, a))
tests.append(TestsSubset)
# ---------------------------------------------------------------------------
CODEDICT = {'little': {}, 'big': {
'0': bitarray('0000'), '1': bitarray('0001'),
'2': bitarray('0010'), '3': bitarray('0011'),
'4': bitarray('0100'), '5': bitarray('0101'),
'6': bitarray('0110'), '7': bitarray('0111'),
'8': bitarray('1000'), '9': | |
in random.sample(bigElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
s = format_time("size 10000, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_average, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_average, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_average, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_average, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
s = "- " * 40
print(s)
f.write(s + "\n")
s = "Average (unsafe):"
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(smallElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
s = format_time("size 3, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_average_unsafe, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_average_unsafe, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_average_unsafe, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_average_unsafe, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(mediumElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
s = format_time("size 10, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_average_unsafe, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_average_unsafe, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_average_unsafe, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_average_unsafe, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(bigElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
s = format_time("size 10000, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_average_unsafe, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_average_unsafe, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_average_unsafe, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_average_unsafe, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
s = "- " * 40
print(s)
f.write(s + "\n")
s = "<NAME>:"
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(smallElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
s = format_time("size 3, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_dubois_prade, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_dubois_prade, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_dubois_prade, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_dubois_prade, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(mediumElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
s = format_time("size 10, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_dubois_prade, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_dubois_prade, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_dubois_prade, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_dubois_prade, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(bigElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
s = format_time("size 10000, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_dubois_prade, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_dubois_prade, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_dubois_prade, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_dubois_prade, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
s = "- " * 40
print(s)
f.write(s + "\n")
s = "Dubois Prade (unsafe):"
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(smallElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
s = format_time("size 3, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_dubois_prade_unsafe, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_dubois_prade_unsafe, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_dubois_prade_unsafe, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_dubois_prade_unsafe, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(mediumElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
| |
for k in cData:
if 'Hailstone2' in k:
# Grab Data
data = cData[k]
flows = data['F']
ineq = data['IN']
fin = data['FIN']
eq = data['EQ']
irr = data['U']
equ = data['EQU']
fequ = data['FEQU']
spec = data['M']
print("H2", end=" , ")
print("{:.1f}".format(flows['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(flows['flakeOnly']*100) , end=" , ")
print(flows['AT'] , end=" , ")
print("{:.1f}".format(ineq['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(ineq['flakeOnly']*100) , end=" , ")
print(ineq['AT'] , end=" , ")
print("{:.1f}".format(fin['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(fin['flakeOnly']*100) , end=" , ")
print(fin['AT'] , end=" , ")
print("{:.1f}".format(eq['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(eq['flakeOnly']*100) , end=" , ")
print(eq['AT'] , end=" , ")
print("{:.1f}".format(irr['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(irr['flakeOnly']*100) , end=" , ")
print(irr['AT'] , end=" , ")
print("{:.1f}".format(equ['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(equ['flakeOnly']*100) , end=" , ")
print(equ['AT'] , end=" , ")
print("{:.1f}".format(fequ['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(fequ['flakeOnly']*100) , end=" , ")
print(fequ['AT'] , end=" , ")
print("{:.1f}".format(spec['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(spec['flakeOnly']*100) , end=" , ")
print("")
# # Grab MWT
for k in cData:
if 'Molecular' in k:
# Grab Data
data = cData[k]
flows = data['F']
ineq = data['IN']
fin = data['FIN']
eq = data['EQ']
irr = data['U']
equ = data['EQU']
fequ = data['FEQU']
spec = data['M']
print("MWT", end=" , ")
print("{:.1f}".format(flows['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(flows['flakeOnly']*100) , end=" , ")
print(flows['AT'] , end=" , ")
print("-" , end=" , ")
print("-" , end=" , ")
print(0 , end=" , ")
print("{:.1f}".format(fin['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(fin['flakeOnly']*100) , end=" , ")
print(fin['AT'] , end=" , ")
print("-" , end=" , ")
print("-" , end=" , ")
print(0 , end=" , ")
print("{:.1f}".format(irr['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(irr['flakeOnly']*100) , end=" , ")
print(irr['AT'] , end=" , ")
print("{:.1f}".format(equ['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(equ['flakeOnly']*100) , end=" , ")
print(equ['AT'] , end=" , ")
print("{:.1f}".format(fequ['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(fequ['flakeOnly']*100) , end=" , ")
print(fequ['AT'] , end=" , ")
print("{:.1f}".format(spec['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(spec['flakeOnly']*100) , end=" , ")
print("")
# # Grab PP
for k in cData:
if 'PredatorPrey' in k:
# Grab Data
data = cData[k]
flows = data['F']
ineq = data['IN']
fin = data['FIN']
eq = data['EQ']
irr = data['U']
equ = data['EQU']
fequ = data['FEQU']
spec = data['M']
print("PP", end=" , ")
print("{:.1f}".format(flows['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(flows['flakeOnly']*100) , end=" , ")
print(flows['AT'] , end=" , ")
print("-" , end=" , ")
print("-" , end=" , ")
print(0 , end=" , ")
print("{:.1f}".format(fin['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(fin['flakeOnly']*100) , end=" , ")
print(fin['AT'] , end=" , ")
print("-" , end=" , ")
print("-" , end=" , ")
print(0 , end=" , ")
print("{:.1f}".format(irr['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(irr['flakeOnly']*100) , end=" , ")
print(irr['AT'] , end=" , ")
print("{:.1f}".format(equ['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(equ['flakeOnly']*100) , end=" , ")
print(equ['AT'] , end=" , ")
print("{:.1f}".format(fequ['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(fequ['flakeOnly']*100) , end=" , ")
print(fequ['AT'] , end=" , ")
print("-" , end=" , ")
print("-" , end=" , ")
print("")
# Grab ECG
for k in cData:
if 'EcoliGly' in k:
# Grab Data
data = cData[k]
flows = data['F']
ineq = data['IN']
fin = data['FIN']
eq = data['EQ']
irr = data['U']
equ = data['EQU']
fequ = data['FEQU']
spec = data['M']
print("ECG", end=" , ")
print("{:.1f}".format(flows['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(flows['flakeOnly']*100) , end=" , ")
print(flows['AT'] , end=" , ")
print("{:.1f}".format(ineq['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(ineq['flakeOnly']*100) , end=" , ")
print(ineq['AT'] , end=" , ")
print("{:.1f}".format(fin['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(fin['flakeOnly']*100) , end=" , ")
print(fin['AT'] , end=" , ")
print("{:.1f}".format(eq['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(eq['flakeOnly']*100) , end=" , ")
print(eq['AT'] , end=" , ")
print("-" , end=" , ")
print("-" , end=" , ")
print(0 , end=" , ")
print("{:.1f}".format(equ['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(equ['flakeOnly']*100) , end=" , ")
print(equ['AT'] , end=" , ")
print("{:.1f}".format(fequ['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(fequ['flakeOnly']*100) , end=" , ")
print(fequ['AT'] , end=" , ")
print("-" , end=" , ")
print("-" , end=" , ")
print("")
# # Grab H4
for k in cData:
if 'Hailstone4' in k:
# Grab Data
data = cData[k]
flows = data['F']
ineq = data['IN']
fin = data['FIN']
eq = data['EQ']
irr = data['U']
equ = data['EQU']
fequ = data['FEQU']
spec = data['M']
print("H4", end=" , ")
print("{:.1f}".format(flows['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(flows['flakeOnly']*100) , end=" , ")
print(flows['AT'] , end=" , ")
print("{:.1f}".format(ineq['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(ineq['flakeOnly']*100) , end=" , ")
print(ineq['AT'] , end=" , ")
print("{:.1f}".format(fin['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(fin['flakeOnly']*100) , end=" , ")
print(fin['AT'] , end=" , ")
print("{:.1f}".format(eq['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(eq['flakeOnly']*100) , end=" , ")
print(eq['AT'] , end=" , ")
print("{:.1f}".format(irr['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(irr['flakeOnly']*100) , end=" , ")
print(irr['AT'] , end=" , ")
print("{:.1f}".format(equ['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(equ['flakeOnly']*100) , end=" , ")
print(equ['AT'] , end=" , ")
print("{:.1f}".format(fequ['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(fequ['flakeOnly']*100) , end=" , ")
print(fequ['AT'] , end=" , ")
print("-" , end=" , ")
print("-" , end=" , ")
print("")
# # Grab EC
for k in cData:
if 'Ecoli' == k:
# Grab Data
data = cData[k]
flows = data['F']
ineq = data['IN']
fin = data['FIN']
eq = data['EQ']
irr = data['U']
equ = data['EQU']
fequ = data['FEQU']
spec = data['M']
print("EC", end=" , ")
print("{:.1f}".format(flows['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(flows['flakeOnly']*100) , end=" , ")
print(flows['AT'] , end=" , ")
print("{:.1f}".format(ineq['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(ineq['flakeOnly']*100) , end=" , ")
print(ineq['AT'] , end=" , ")
print("{:.1f}".format(fin['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(fin['flakeOnly']*100) , end=" , ")
print(fin['AT'] , end=" , ")
print("{:.1f}".format(eq['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(eq['flakeOnly']*100) , end=" , ")
print(eq['AT'] , end=" , ")
print("{:.1f}".format(irr['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(irr['flakeOnly']*100) , end=" , ")
print(irr['AT'] , end=" , ")
print("{:.1f}".format(equ['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(equ['flakeOnly']*100) , end=" , ")
print(equ['AT'] , end=" , ")
print("{:.1f}".format(fequ['mutantPercent']*100) , end=" , ")
print("{:.1f}".format(fequ['flakeOnly']*100) , end=" , ")
print(fequ['AT'] , end=" , ")
print("-" , end=" , ")
print("-" , end=" , ")
print("")
# Each Summary File is a column in the old Results.csv file
# Handle each subject indepenantly
def loadSummaryFile(sumPath):
val = {}
try:
#print(sumPath)
with open(sumPath, newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
# Create Dictionaries
abstractTests = {}
countCT = 0
countCTFail = 0
countCTFailDet = 0
countRun = 0
countRunNP = 0
for row in spamreader:
#print(', '.join(row)) # DEBUG
if row[0]== 'Test':
# Skip 1st row : Header
#print("Skipped Header")
pass
else:
# Grab 1st value (AT)
# Grab 2nd value (Input)
# Grab 3rd value (Runs)
# Grab last value (NP)
abstractTest = row[0]
inputName = row[1]
runNumber = int(row[2])
notPassNumber = int(row[-1])
# Check if AT exists in dictionary
keys = abstractTests.keys()
exists = False
for x in keys:
if abstractTest in x:
exists = True
break
#
if exists:
# Number of Inputs for AT
tmpDict = abstractTests[abstractTest];
testCount = tmpDict["testCount"]
tmpDict["testCount"] = testCount + 1
# Runs
runCount = tmpDict["runCount"]
tmpDict["runCount"] = runCount + runNumber
# Failures
npCount = tmpDict["npCount"]
tmpDict["npCount"] = npCount + notPassNumber
# Count Concrete Tests
if notPassNumber > 0:
countCTFail = countCTFail + 1;
countRunNP = countRunNP + notPassNumber;
# Falky Failure
fail = tmpDict["fail"]
tmpDict["fail"] = fail + 1
# Count Deterministic Failures
if runNumber == notPassNumber:
countCTFailDet = countCTFailDet + 1;
# Deterministic Failure
detFail = tmpDict["detFail"]
tmpDict["detFail"] = detFail + 1
else:
# First Row case to initialize dictionaries
# Number of Inputs for AT
tmpDict = {}
tmpDict["testCount"] = 1
# Runs
tmpDict["runCount"] = runNumber
# Failures
tmpDict["npCount"] = notPassNumber
# Count Concrete Tests
if notPassNumber > 0:
countCTFail = countCTFail + 1;
# Falky Failure
tmpDict["fail"] = 1
# Count Deterministic Failures
if runNumber == notPassNumber:
countCTFailDet = countCTFailDet + 1;
countRunNP = countRunNP + notPassNumber;
# Deterministic Failure
tmpDict["detFail"] = 1
else:
# If first test was | |
<filename>src/conformal_methods/utils.py
import numpy as np
import pandas as pd
from src.config import SRC
from numba import jit
from scipy.stats import norm
from scipy.stats import skewnorm
from sklearn.preprocessing import StandardScaler
def init_scoring_object(method, quantile=0.9):
def scoring_object(estimator, X, y):
if (method == "mean-based") | (method == "weighted-mean-based"):
y_pred = estimator.predict(X)
loss = np.mean((y - y_pred)**2)
return -loss.item()
if method == "quantile-based":
y_pred = estimator.predict(X)
if (quantile > 0) and (quantile < 1):
residual = y - y_pred
return -np.sum(residual * (quantile - (residual<0)))
else:
return np.nan
return scoring_object
def CQR_conformity_score(lower_quant_hat, upper_quant_hat, y_conf):
first_arg = lower_quant_hat.flatten() - y_conf.flatten()
second_arg = y_conf.flatten() - upper_quant_hat.flatten()
conf_args = np.column_stack((first_arg, second_arg))
return np.max(conf_args, axis=1)
def extract_intervals(conf_set_list):
# preallocate interval boundary matrix
intervals = np.zeros((len(conf_set_list), 2))
for i in range(len(conf_set_list)):
intervals[i, 0] = np.min(conf_set_list[i])
intervals[i, 1] = np.max(conf_set_list[i])
return intervals
def flatten(l):
new_l = []
for tup in l:
sublist = []
for i, subelement in enumerate(tup):
if isinstance(subelement, tuple):
for j in subelement:
sublist.append(j)
else:
sublist.append(subelement)
new_l.append(tuple(sublist))
return new_l
def cond_variance(X_mat, error_type, linear_part=None):
if error_type == "simple_linear":
cond_variance = (X_mat.flatten()) ** 2
elif error_type == "varying_squared_linear_part":
cond_variance = 1 + linear_part ** 2
# print(np.histogram(cond_variance))
elif error_type == "varying_third_moment_mu":
t_dist_part = 3.0 / (3 - 2)
cond_variance = (
t_dist_part
* (1 + 2 * np.abs(linear_part) ** 3 / np.mean(np.abs(linear_part) ** 3))
** 2
)
else:
raise ValueError("Please specify regular error_type.")
return cond_variance
def x_scale(X_mat, error_type, linear_part=None):
if error_type == "simple_linear":
scale = X_mat.flatten()
elif error_type == "varying_squared_linear_part":
scale = linear_part
elif error_type == "varying_third_moment_mu":
scale = linear_part
else:
raise ValueError("Please specify regular error_type.")
return scale
def construc_cond_metric_df(cond_variance, result_pred_bands, y_predict):
interval_lengths = result_pred_bands[:, 1] - result_pred_bands[:, 0]
covered = (y_predict.flatten() >= result_pred_bands[:, 0]) & (
y_predict.flatten() <= result_pred_bands[:, 1]
)
# df = pd.DataFrame(np.stack((cond_variance, interval_lengths, covered), axis=1))
df = np.stack((cond_variance, interval_lengths, covered), axis=1)
return df
def construc_cond_metric_df_simulation(x_scale, result_pred_bands, y_predict):
interval_lengths = result_pred_bands[:, 1] - result_pred_bands[:, 0]
covered = (y_predict.flatten() >= result_pred_bands[:, 0]) & (
y_predict.flatten() <= result_pred_bands[:, 1]
)
df = np.stack((x_scale, interval_lengths, covered), axis=1)
return df
@jit(nopython=True)
def conditional_cdf_hat(y_grid, y_vec, q_hat_conf_mat, q_hat_pred_mat):
# preallocate matrix for the predicted cdf values
f_hat_y_mat = np.zeros((q_hat_pred_mat.shape[0], len(y_grid.flatten())))
###
q_hat_conf_less_y_mat = q_hat_conf_mat <= y_vec.reshape(-1, 1)
f_hat_conf = (1.0 / q_hat_conf_less_y_mat.shape[1]) * np.sum(
q_hat_conf_less_y_mat, axis=1
)
###
for i, y in enumerate(y_grid):
q_hat_pred_less_y = q_hat_pred_mat <= y
f_hat_y = (1.0 / q_hat_pred_less_y.shape[1]) * np.sum(q_hat_pred_less_y, axis=1)
f_hat_y_mat[:, i] = f_hat_y
return f_hat_conf, f_hat_y_mat
@jit(nopython=True)
def p_y_func(alpha, y_grid, f_hat_conf, f_hat_y_mat):
f_hat_conf_abs_dev = np.abs(f_hat_conf.flatten() - 0.5)
f_hat_y_mat_abs_dev = np.abs(f_hat_y_mat - 0.5)
conf_set_list = []
# fix the X_n+1 prediction point:
for i in range(f_hat_y_mat.shape[0]):
conf_set = []
# fix the y grid value:
for j, y in enumerate(y_grid):
val = (
1
/ (len(f_hat_conf_abs_dev) + 1)
* np.sum(f_hat_y_mat_abs_dev[i, j] <= f_hat_conf_abs_dev)
)
if val > alpha:
conf_set.append(y)
conf_set_list.append(conf_set)
return conf_set_list
def extract_intervals(conf_set_list):
# preallocate interval boundary matrix
intervals = np.zeros((len(conf_set_list), 2))
for i in range(len(conf_set_list)):
intervals[i, 0] = np.min(conf_set_list[i])
intervals[i, 1] = np.max(conf_set_list[i])
return intervals
def calc_normal_params(mu_1, mu_0, X, heteroscedastic):
means = mu_1 - mu_0
if heteroscedastic:
variances = X[:,0]**2 + np.ones(len(means))
else:
variances = np.ones(len(means)) * 2
return means, variances
def get_oracle_interval(lower, upper):
def oracle_interval(mean, var):
std = np.sqrt(var)
norm_obj = norm(loc=mean,scale=std)
quantiles = norm_obj.ppf([lower, upper])
return quantiles
return oracle_interval
def get_oracle_intervals(means, variances):
oracle_interval_fun = get_oracle_interval(0.05, 0.95)
result = list(map(oracle_interval_fun, means, variances))
return result
def share_signif_fun(oracle_intervals, ite_pred_intervals):
which_oracle_ints_signif = np.logical_not((oracle_intervals[:,0] <= 0) & (oracle_intervals[:,1] >= 0))
which_predicted_ints_signif = np.logical_not((ite_pred_intervals[:,0] <= 0) & (ite_pred_intervals[:,1] >= 0))
oracle_signif_signs = np.sign(np.mean(oracle_intervals, axis=1))
predicted_signif_signs = np.sign(np.mean(ite_pred_intervals, axis=1))
same_sign = (oracle_signif_signs == predicted_signif_signs)
correctly_signif_given_oracle_signif = which_oracle_ints_signif & which_predicted_ints_signif & same_sign
if np.sum(which_oracle_ints_signif) == 0:
return -1.0
else:
return np.sum(correctly_signif_given_oracle_signif) / np.sum(which_oracle_ints_signif)
def share_signif_oracles(oracle_intervals, ite_vals):
which_oracle_ints_signif = np.logical_not((oracle_intervals[:,0] <= 0) & (oracle_intervals[:,1] >= 0))
which_ites_not_zero = (ite_vals != 0)
signif_oracles_given_ite_not_zero = which_oracle_ints_signif & which_ites_not_zero
return np.sum(signif_oracles_given_ite_not_zero) / len(oracle_intervals)
def share_signif_intervals_given_ite_not_zero(ite_pred_intervals, ite_vals):
which_predicted_ints_signif = np.logical_not((ite_pred_intervals[:,0] <= 0) & (ite_pred_intervals[:,1] >= 0))
which_ites_not_zero = (ite_vals != 0)
signif_intervals_given_ite_not_zero = which_predicted_ints_signif & which_ites_not_zero
return np.sum(signif_intervals_given_ite_not_zero) / len(ite_pred_intervals)
def generate_treatment_effects_helper(X, treatment_case):
n, p = X.shape
if treatment_case == "binary":
condition = 1 * (X[:,0] > 0.0)
treat = np.where(condition == 0, -1.0, condition)
tau_x = treat
elif treatment_case == "gaussian":
beta_treat = np.ones(p)
half_point = round(p/2)
beta_treat[:half_point] = 1.0
beta_treat[half_point:] = 0.0
# division by true standard deviation of the sum to yield variance 1
tau_x = (X @ beta_treat) / np.sqrt(half_point)
else:
raise ValueError("Please specify a valid main effect type.")
return tau_x
def dgp_ate_zero(n, p, effect_size, main_effect_case="const", treatment_case="binary"):
X = generate_X_fixed_positions(n = n, p=p, X_dist="normal", cor="none", standardize=False, rho=0.5)
tau_x = generate_treatment_effects_helper(X=X, treatment_case=treatment_case)
if main_effect_case == "const":
mu_1 = np.ones(n) + effect_size * tau_x
mu_0 = np.ones(n)
elif main_effect_case == "linear":
beta = np.ones(p)
beta[::2] = 0.0
mu_1 = X @ beta + effect_size * tau_x
mu_0 = X @ beta
elif main_effect_case == "non-linear":
beta = np.ones(p)
beta[::2] = 0.0
linear_part = X @ beta
base_fun = 2 * np.log(1 + np.exp(linear_part))
mu_1 = base_fun + effect_size * tau_x
mu_0 = base_fun
else:
raise ValueError("Please specify a valid main effect type.")
# noise:
eps_1 = np.random.normal(0, 1, n)
eps_0 = np.random.normal(0, 1, n)
# draw treatment assignment variable:
W = np.random.binomial(n=1, p=0.5, size=(n,))
# calculate other quantities of interest:
ite = mu_1 - mu_0 + eps_1 - eps_0
# observed y_obs depends on W:
y_obs = W * (mu_1 + eps_1) + (1 - W) * (mu_0 + eps_0)
return ite, mu_1, mu_0, eps_1, eps_0, y_obs, X, W
def generate_X_fixed_positions(
n,
p,
X_dist="normal",
cor="none",
standardize=False,
rho=0.15,
k=5,
alpha=5,
uniform_lower=0,
uniform_upper=1,
):
# Generate X matrix
if X_dist == "normal":
X = np.random.normal(0, 1, n * p).reshape((n, p))
if X_dist == "binom":
X = np.random.binomial(n=1, p=0.5, size=(n, p))
if X_dist == "uniform":
X = np.random.uniform(uniform_lower, uniform_upper, n * p).reshape((n, p))
if X_dist == "skewed_normal":
X = skewnorm.rvs(alpha, size=n * p).reshape((n, p))
if X_dist == "mixture":
X = np.zeros(n * p).reshape((n, p))
x1 = np.random.normal(0, 1, n * p).reshape((n, p))
x2 = np.random.binomial(n=1, p=0.5, size=(n, p))
x3 = skewnorm.rvs(5, size=n * p).reshape((n, p))
u = np.random.uniform(0, 1, p)
i1 = u <= 1 / 3
i2 = (1 / 3 < u) & (u <= 2 / 3)
i3 = u > 2 / 3
X[:, i1] = x1[:, i1]
X[:, i2] = x2[:, i2]
X[:, i3] = x3[:, i3]
# setting the decisive 5 covariates to a fixed distribution for later purposes
X[:, 0] = np.random.normal(0, 1, n)
X[:, 4] = np.random.binomial(n=1, p=0.5, size=n)
X[:, 6] = skewnorm.rvs(5, size=n)
X[:, 8] = skewnorm.rvs(5, size=n)
X[:, 9] = np.random.binomial(n=1, p=0.5, size=n)
# Pairwise correlation
if cor == "pair":
b = (-2 * np.sqrt(1 - rho) + 2 * np.sqrt((1 - rho) + p * rho)) / (2 * p)
a = b + np.sqrt(1 - rho)
# calculate symmetric square root of p x p matrix whose diagonals are 1 and off diagonals are rho:
sig_half = np.full(shape=(p, p), fill_value=b)
np.fill_diagonal(sig_half, a)
X = X @ sig_half
# Auto-correlation
if cor == "auto":
for j in range(p):
mat = X[:, max(0, j - k) : j + 1]
wts = np.random.uniform(0, 1, mat.shape[1]).flatten()
wts = wts / np.sum(wts)
tmp = mat * wts
X[:, j] = np.array(np.mean(tmp, axis=1))
# Standardize, if necessary
if standardize:
scaler = StandardScaler().fit(X)
X = scaler.transform(X)
return X
def flatten(l):
new_l = []
for tup in l:
sublist = []
for i, subelement in enumerate(tup):
if isinstance(subelement, tuple):
for j in subelement:
sublist.append(j)
else:
sublist.append(subelement)
new_l.append(tuple(sublist))
return new_l
def generate_y_fixed_positions(
X_mat,
eps_dist="normal",
error_type="const",
functional_form="linear",
sigma=1,
force_beta_positive=True,
non_zero_beta_count=None,
magnitude_nonzero_coeffs=1,
signal_noise_ratio=None,
alpha=5,
df=4,
):
n, p = X_mat.shape
if non_zero_beta_count is None:
non_zero_beta_count = int(np.ceil(p / 10))
if non_zero_beta_count is not None:
if non_zero_beta_count > p:
raise ValueError(
"Number of non-zero coefficients cannot exceed the number of covariates in X."
)
else:
non_zero_beta_count = int(non_zero_beta_count)
# calculate the linear part of the conditional expectation function, or the error multiplicator:
# Sample s variables uniformly at random, define true coefficients
if eps_dist == "t":
non_zero_coeffs = np.array([0, 4, 6, 8, 9])
beta = np.zeros(p)
| |
<gh_stars>10-100
import datetime as dt
from decimal import Decimal
from django.core.exceptions import ValidationError
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.translation import deactivate_all
from workbench import factories
from workbench.invoices.models import Invoice
from workbench.tools.formats import local_date_format
from workbench.tools.forms import WarningsForm
from workbench.tools.testing import check_code, messages
from workbench.tools.validation import in_days
def invoice_to_dict(invoice, **kwargs):
return {
"customer": invoice.customer_id or "",
"contact": invoice.contact_id or "",
"title": invoice.title,
"description": invoice.description,
"owned_by": invoice.owned_by_id,
"subtotal": invoice.subtotal,
"discount": invoice.discount,
"third_party_costs": invoice.third_party_costs,
"liable_to_vat": invoice.liable_to_vat,
"postal_address": invoice.postal_address,
"status": invoice.status,
"type": invoice.type,
"closed_on": invoice.closed_on and invoice.closed_on.isoformat() or "",
"invoiced_on": invoice.invoiced_on and invoice.invoiced_on.isoformat() or "",
"due_on": invoice.due_on and invoice.due_on.isoformat() or "",
"show_service_details": invoice.show_service_details,
**kwargs,
}
class InvoicesTest(TestCase):
fixtures = ["exchangerates.json"]
def setUp(self):
deactivate_all()
def test_down_payment_invoice(self):
"""Down payment invoices do not have service details and validate the
postal address (as any other invoice too)"""
project = factories.ProjectFactory.create()
self.client.force_login(project.owned_by)
url = project.urls["createinvoice"] + "?type=down-payment"
response = self.client.get(url)
self.assertContains(response, "Down payment")
self.assertNotContains(response, "id_show_service_details")
response = self.client.post(
url,
{
"contact": project.contact_id,
"title": project.title,
"owned_by": project.owned_by_id,
"discount": 0,
"liable_to_vat": 1,
"postal_address": "Street\nCity",
"subtotal": 2500,
"third_party_costs": 0,
},
)
self.assertContains(response, 'value="short-postal-address"')
response = self.client.post(
url,
{
"contact": project.contact_id,
"title": project.title,
"owned_by": project.owned_by_id,
"discount": 0,
"liable_to_vat": 1,
"postal_address": "Anything\nStreet\nCity",
"subtotal": 2500,
"third_party_costs": 0,
},
)
invoice = Invoice.objects.get()
self.assertRedirects(response, invoice.urls["detail"])
self.assertAlmostEqual(invoice.subtotal, Decimal("2500"))
def test_create_service_invoice_from_offer(self):
"""Service invoice creation from offers (services) works and results in
the expected invoice total"""
service = factories.ServiceFactory.create(cost=100, allow_logging=True)
url = service.project.urls["createinvoice"] + "?type=services&source=offer"
self.client.force_login(service.project.owned_by)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "id_show_service_details")
response = self.client.post(
url,
{
"contact": service.project.contact_id,
"title": service.project.title,
"owned_by": service.project.owned_by_id,
"discount": "0",
"liable_to_vat": "1",
"postal_address": "Anything\nStreet\nCity",
"selected_services": [service.pk],
"disable_logging": 1,
},
)
# print(response, response.content.decode("utf-8"))
invoice = Invoice.objects.get()
self.assertRedirects(response, invoice.urls["detail"])
self.assertEqual(invoice.subtotal, 100)
self.assertEqual(invoice.service_period, None)
service.refresh_from_db()
self.assertFalse(service.allow_logging)
service = invoice.services.get()
response = self.client.post(
service.urls["update"],
{
"title": service.title,
"description": service.description,
"effort_type": service.effort_type,
"effort_rate": service.effort_rate or "",
"effort_hours": service.effort_hours or "",
"cost": service.cost or "",
"third_party_costs": service.third_party_costs or "",
},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 202)
self.assertRedirects(
self.client.post(invoice.urls["delete"]), invoice.urls["list"]
)
self.assertEqual(Invoice.objects.count(), 0)
def test_create_service_invoice_from_logbook(self):
"""Service invoice creation from the logbook works and results in the
expected invoice total"""
project = factories.ProjectFactory.create()
service1 = factories.ServiceFactory.create(
project=project, title="cost-only", cost=100
)
service2 = factories.ServiceFactory.create(project=project, title="no-rate")
service3 = factories.ServiceFactory.create(
project=project,
title="with-rate",
effort_type="Consulting",
effort_rate=200,
)
service4 = factories.ServiceFactory.create(project=project, title="nothing")
cost = factories.LoggedCostFactory.create(
service=service1,
cost=10,
description="Test",
rendered_on=dt.date(2020, 3, 18),
)
hours = factories.LoggedHoursFactory.create(
service=service1,
hours=1,
description="Test",
rendered_on=dt.date(2020, 3, 20),
)
factories.LoggedHoursFactory.create(
service=service2, hours=2, rendered_on=dt.date(2020, 3, 20)
)
factories.LoggedHoursFactory.create(
service=service3, hours=3, rendered_on=dt.date(2020, 3, 22)
)
url = project.urls["createinvoice"] + "?type=services&source=logbook"
self.client.force_login(project.owned_by)
response = self.client.get(url)
# print(response, response.content.decode("utf-8"))
self.assertContains(response, "<strong>cost-only</strong><br>10.00")
self.assertContains(response, "1.0h logged but no hourly rate defined.")
self.assertContains(response, "<strong>no-rate</strong><br>0.00")
self.assertContains(response, "2.0h logged but no hourly rate defined.")
self.assertContains(response, "<strong>with-rate</strong><br>600.00")
self.assertContains(response, "id_show_service_details")
cost.service = service1
cost.save()
response = self.client.post(
url,
{
"contact": project.contact_id,
"title": project.title,
"owned_by": project.owned_by_id,
"discount": "0",
"liable_to_vat": "1",
"postal_address": "Anything\nStreet\nCity",
"selected_services": [
service1.pk,
service2.pk,
service3.pk,
service4.pk,
],
"disable_logging": 0,
},
)
invoice = Invoice.objects.get()
self.assertRedirects(response, invoice.urls["detail"])
self.assertEqual(invoice.subtotal, 610)
self.assertEqual(invoice.service_period, "18.03.2020 - 22.03.2020")
cost.refresh_from_db()
self.assertEqual(cost.invoice_service.invoice, invoice)
hours.refresh_from_db()
self.assertEqual(hours.invoice_service.invoice, invoice)
self.assertEqual(service1.invoice_services.get().invoice, invoice)
self.assertEqual(service2.invoice_services.get().invoice, invoice)
self.assertEqual(service3.invoice_services.get().invoice, invoice)
self.assertEqual(service4.invoice_services.count(), 0)
response = self.client.post(
cost.urls["update"],
{
"service": cost.service_id,
"rendered_on": cost.rendered_on.isoformat(),
"third_party_costs": cost.third_party_costs or "",
"cost": 2 * cost.cost,
"description": cost.description,
},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "This entry is already part of an invoice.")
response = self.client.post(
hours.urls["update"],
{
"service": hours.service_id,
"rendered_on": hours.rendered_on.isoformat(),
"rendered_by": hours.rendered_by_id,
"hours": hours.hours,
"description": hours.description,
},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "This entry is already part of an invoice.")
response = self.client.post(
cost.urls["update"],
{
"modal-service": cost.service_id,
"modal-rendered_by": cost.rendered_by_id,
"modal-rendered_on": cost.rendered_on.isoformat(),
"modal-third_party_costs": cost.third_party_costs or "",
"modal-cost": 2 * cost.cost,
"modal-description": cost.description,
WarningsForm.ignore_warnings_id: "part-of-invoice",
},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 202)
self.assertContains(
self.client.get("/"),
"Logged cost 'Test' has been updated successfully.",
)
cost.refresh_from_db()
self.assertAlmostEqual(cost.cost, Decimal("20"))
invoice.refresh_from_db()
self.assertAlmostEqual(invoice.subtotal, 610) # unchanged
response = self.client.get(invoice.urls["pdf"])
self.assertEqual(response.status_code, 200) # No crash
response = self.client.get(invoice.urls["xlsx"])
self.assertEqual(response.status_code, 200) # No crash
response = self.client.post(
invoice.urls["delete"],
{WarningsForm.ignore_warnings_id: "release-logged-services"},
)
self.assertRedirects(response, invoice.urls["list"])
self.assertEqual(Invoice.objects.count(), 0)
self.assertEqual(
messages(response),
[f"Invoice '{invoice}' has been deleted successfully."],
)
def test_delete_service_invoice_with_logs(self):
"""Deleting service invoices with related logbook entries unarchives
those entries"""
service = factories.ServiceFactory.create()
cost = factories.LoggedCostFactory.create(
cost=150, service=service, description="this"
)
url = service.project.urls["createinvoice"] + "?type=services&source=logbook"
self.client.force_login(service.project.owned_by)
response = self.client.post(
url,
{
"contact": service.project.contact_id,
"title": service.project.title,
"owned_by": service.project.owned_by_id,
"discount": "0",
"liable_to_vat": "1",
"postal_address": "Anything\nStreet\nCity",
"selected_services": [service.pk],
"disable_logging": 0,
},
)
invoice = Invoice.objects.get()
self.assertRedirects(response, invoice.urls["detail"])
self.assertAlmostEqual(invoice.subtotal, Decimal(150))
cost.refresh_from_db()
self.assertEqual(cost.invoice_service.invoice, invoice)
self.assertEqual(cost.invoice_service.project_service, service)
response = self.client.get(invoice.urls["delete"])
self.assertContains(response, WarningsForm.ignore_warnings_id)
response = self.client.post(invoice.urls["delete"])
self.assertContains(response, "Logged services are linked with this invoice.")
self.assertEqual(Invoice.objects.count(), 1)
cost.refresh_from_db()
self.assertTrue(cost.invoice_service)
response = self.client.post(
invoice.urls["delete"],
{WarningsForm.ignore_warnings_id: "release-logged-services"},
)
self.assertRedirects(response, invoice.urls["list"])
self.assertEqual(
messages(response),
[f"Invoice '{invoice}' has been deleted successfully."],
)
cost.refresh_from_db()
self.assertEqual(cost.invoice_service, None)
# Creating the invoice again succeeds.
response = self.client.post(
url,
{
"contact": service.project.contact_id,
"title": service.project.title,
"owned_by": service.project.owned_by_id,
"discount": "0",
"liable_to_vat": "1",
"postal_address": "Anything\nStreet\nCity",
"selected_services": [service.pk],
"disable_logging": 0,
},
)
invoice = Invoice.objects.get()
self.assertRedirects(response, invoice.urls["detail"])
self.assertAlmostEqual(invoice.subtotal, Decimal(150))
def test_pre_form(self):
"""Creating invoices directly shows a pre-form allowing selection of
customer/contact combinations"""
self.client.force_login(factories.UserFactory.create())
# pre_form does not have these fields
response = self.client.get(Invoice.urls["create"])
self.assertContains(response, 'method="GET"')
self.assertNotContains(response, 'id="id_title"')
self.assertNotContains(response, 'id="id_description"')
# Nonexistant entries
response = self.client.get(Invoice.urls["create"] + "?contact=0")
self.assertContains(response, 'method="GET"')
self.assertNotContains(response, 'id="id_title"')
self.assertNotContains(response, 'id="id_description"')
response = self.client.get(Invoice.urls["create"] + "?customer=0")
self.assertContains(response, 'method="GET"')
self.assertNotContains(response, 'id="id_title"')
self.assertNotContains(response, 'id="id_description"')
def test_create_update_person_invoice(self):
"""Test creating and updating invoices not linked to projects"""
person = factories.PersonFactory.create(
organization=factories.OrganizationFactory.create()
)
self.client.force_login(person.primary_contact)
url = Invoice.urls["create"] + f"?contact={person.pk}"
response = self.client.get(url)
self.assertContains(response, 'method="POST"')
self.assertNotContains(response, 'data-field-value="')
postal_address = factories.PostalAddressFactory.create(person=person)
response = self.client.get(url)
self.assertContains(response, 'data-field-value="', 1)
person.organization.default_billing_address = "Default"
person.organization.save()
response = self.client.get(url)
self.assertContains(response, 'data-field-value="', 2)
response = self.client.post(
url,
{
"customer": person.organization_id,
"contact": person.id,
"title": "Stuff",
"owned_by": person.primary_contact_id,
"subtotal": "110",
"discount": "10",
"liable_to_vat": "1",
"postal_address": postal_address.postal_address,
"third_party_costs": 0,
},
)
invoice = Invoice.objects.get()
self.assertRedirects(response, invoice.urls["detail"])
self.assertAlmostEqual(invoice.total_excl_tax, Decimal("100"))
self.assertAlmostEqual(invoice.total, Decimal("107.7"))
def test_customer_create_invoice(self):
"""Creating invoices for organizations shows the postal address selector too"""
person = factories.PersonFactory.create(
organization=factories.OrganizationFactory.create()
)
self.client.force_login(person.primary_contact)
response = self.client.get(
f"/invoices/create/?customer={person.organization.id}"
)
self.assertContains(
response, 'value="The Organization Ltd" placeholder="Organization"'
)
self.assertContains(response, 'id="id_postal_address"')
self.assertNotContains(response, 'data-field-value="')
self.assertNotContains(response, "id_show_service_details")
person.organization.default_billing_address = "Default"
person.organization.save()
response = self.client.get(
f"/invoices/create/?customer={person.organization.id}"
)
self.assertContains(response, 'id="id_postal_address"')
self.assertContains(response, 'data-field-value="')
def test_update_invoice(self):
"""Updating invoices produces a variety of errors and warnings"""
invoice = factories.InvoiceFactory.create(
contact=None, postal_address="Test\nStreet\nCity"
)
self.client.force_login(invoice.owned_by)
response = self.client.post(invoice.urls["update"], invoice_to_dict(invoice))
self.assertContains(response, "No contact selected.")
response = self.client.post(
invoice.urls["update"],
invoice_to_dict(invoice, **{WarningsForm.ignore_warnings_id: "no-contact"}),
)
self.assertRedirects(response, invoice.urls["detail"])
response = self.client.get(invoice.urls["delete"])
self.assertEqual(response.status_code, 200)
response = self.client.post(
invoice.urls["update"], invoice_to_dict(invoice, status=Invoice.SENT)
)
self.assertContains(
response, "Invoice and/or due date missing for selected state."
)
person = factories.PersonFactory.create(organization=invoice.customer)
response = self.client.post(
invoice.urls["update"],
invoice_to_dict(
invoice,
contact=person.id,
status=Invoice.SENT,
invoiced_on=dt.date.today().isoformat(),
due_on=dt.date.today().isoformat(),
),
)
self.assertRedirects(response, invoice.urls["detail"])
invoice.refresh_from_db()
response = self.client.post(
invoice.urls["update"],
invoice_to_dict(invoice, closed_on=dt.date.today().isoformat()),
)
self.assertContains(response, "Invalid status when closed on is already set.")
response = self.client.get(invoice.urls["delete"])
self.assertRedirects(response, invoice.urls["detail"])
self.assertEqual(
messages(response), ["Invoices in preparation may be deleted, others not."]
)
invoice.refresh_from_db()
response = self.client.post(
invoice.urls["update"],
invoice_to_dict(invoice, postal_address=invoice.postal_address + " hello"),
)
# print(response, response.content.decode("utf-8"))
self.assertContains(
response,
"You are attempting to change 'Postal address'."
" I am trying to prevent unintentional changes. Are you sure?",
)
response = self.client.post(
invoice.urls["update"], invoice_to_dict(invoice, status=Invoice.PAID)
)
self.assertRedirects(response, invoice.urls["detail"])
invoice.refresh_from_db()
self.assertEqual(invoice.closed_on, dt.date.today())
def test_list(self):
"""Filter form smoke test"""
factories.InvoiceFactory.create()
user = factories.UserFactory.create()
self.client.force_login(user)
code = check_code(self, "/invoices/")
code("")
code("q=test")
code("s=open")
code("s=40") # PAID
code(f"org={factories.OrganizationFactory.create().pk}")
code(f"owned_by={user.id}")
code("owned_by=-1") # mine
code("owned_by=0") # only inactive
code("export=xlsx")
@override_settings(BATCH_MAX_ITEMS=5)
def test_too_many_invoices(self):
"""Creating a PDF with too many invoices fails"""
invoice = factories.InvoiceFactory.create()
for i in range(5):
factories.InvoiceFactory.create(
customer=invoice.customer,
contact=invoice.contact,
owned_by=invoice.owned_by,
)
self.client.force_login(invoice.owned_by)
response = self.client.get("/invoices/?export=pdf")
self.assertRedirects(
response, "/invoices/?error=1", fetch_redirect_response=False
)
self.assertEqual(
messages(response), ["6 invoices in selection, that's too many."]
)
def test_list_pdfs(self):
"""Various checks when exporting PDFs of lists"""
user = factories.UserFactory.create()
self.client.force_login(user)
response = self.client.get("/invoices/?export=pdf")
self.assertEqual(response.status_code, 302)
self.assertEqual(messages(response), ["No invoices found."])
factories.InvoiceFactory.create(
invoiced_on=in_days(-60),
due_on=in_days(-45),
status=Invoice.SENT,
)
response = self.client.get("/invoices/?export=pdf")
self.assertEqual(response.status_code, 200)
self.assertEqual(response["content-type"], "application/pdf")
def test_model_validation(self):
"""Invoice model validation"""
invoice = Invoice(
title="Test",
customer=factories.OrganizationFactory.create(),
owned_by=factories.UserFactory.create(),
type=Invoice.FIXED,
_code=0,
status=Invoice.SENT,
postal_address="Test\nStreet\nCity",
)
msg = ["Invoice and/or due date missing for selected state."]
with self.assertRaises(ValidationError) as cm:
invoice.clean_fields(exclude=["status"])
self.assertEqual(list(cm.exception), msg)
with self.assertRaises(ValidationError) as cm:
invoice.clean_fields()
self.assertEqual(list(cm.exception), [("status", msg)])
with self.assertRaises(ValidationError) as cm:
Invoice(
title="Test",
customer=factories.OrganizationFactory.create(),
owned_by=factories.UserFactory.create(),
type=Invoice.FIXED,
_code=0,
status=Invoice.SENT,
postal_address="Test\nStreet\nCity",
invoiced_on=dt.date.today(),
due_on=in_days(-1),
).full_clean()
self.assertEqual(
list(cm.exception), [("due_on", ["Due date has to be after invoice date."])]
)
with self.assertRaises(ValidationError) as cm:
Invoice(
title="Test",
customer=factories.OrganizationFactory.create(),
owned_by=factories.UserFactory.create(),
| |
import numpy as np
import eccodes
import netCDF4 as nc
import Ngl
import Nio
import os
import datetime
from contextlib import ExitStack
import sys
current_path = sys.path[0]
ex_op_str = current_path[current_path.index('scripts')+8 : current_path.index('w2w_ensembleplots')-1]
sys.path.append('/lsdfos/kit/imk-tro/projects/MOD/Gruppe_Knippertz/nw5893/scripts/{}'.format(ex_op_str))
from w2w_ensembleplots.core.download_forecast import get_timeshift
def triangle_contourplot(variable, run, domain, model, stat_processing, plot_type):
# set data path and hours list #
if model == 'icon-eu-eps':
model_path_deprec = 'icon-eu-eps'
hours = list(range(0,48,1)) + list(range(48,72,3)) + list(range(72,120+1,6))
elif model == 'icon-global-eps':
model_path_deprec = 'icon-eps'
hours = list(range(0,48,1)) + list(range(48,72,3)) + list(range(72,120,6)) + list(range(120,180+1,12))
path = dict(base = '/lsdfos/kit/imk-tro/projects/MOD/Gruppe_Knippertz/nw5893/',
data = 'forecast_archive/{}/raw_grib/'.format(model_path_deprec),
grid = 'forecast_archive/{}/grid/'.format(model_path_deprec),
plots = 'plots/operational/triangle_contourplots/',
colorpalette = 'additional_data/colorpalettes/',
shapefiles = 'additional_data/shapefiles/',
topo = 'forecast_archive/{}/invariant/'.format(model_path_deprec))
# set basic plot path #
if plot_type == 'small_map_only' or plot_type == 'labelBar1' or plot_type == 'labelBar2' or plot_type == 'text':
if variable['name'] == 'tot_prec_24h':
path['plots'] = 'plots/operational/prob_of_exc/forecast/tot_prec_24h/'
elif variable['name'] == 'tot_prec_48h':
path['plots'] = 'plots/operational/prob_of_exc/forecast/tot_prec_48h/'
elif variable['name'] == 'tot_prec_06h':
path['plots'] = 'plots/operational/prob_of_exc/forecast/tot_prec_06h/'
elif variable['name'] == 'tot_prec_03h':
path['plots'] = 'plots/operational/prob_of_exc/forecast/tot_prec_03h/'
elif variable['name'] == 'tot_prec_01h':
path['plots'] = 'plots/operational/prob_of_exc/forecast/tot_prec_01h/'
elif variable['name'] == 'acc_prec':
path['plots'] = 'plots/operational/prob_of_exc/forecast/acc_prec/'
elif variable['name'] == 't_850hpa':
path['plots'] = 'plots/operational/prob_of_exc/forecast/t_850hpa/'
elif variable['name'] == 'mslp':
path['plots'] = 'plots/operational/prob_of_exc/forecast/mslp/'
elif variable['name'] == 'wind_10m':
path['plots'] = 'plots/operational/prob_of_exc/forecast/wind_10m/'
elif variable['name'] == 'wind_300hpa':
path['plots'] = 'plots/operational/prob_of_exc/forecast/wind_300hpa/'
elif variable['name'] == 'gph_500hpa':
path['plots'] = 'plots/operational/prob_of_exc/forecast/gph_500hpa/'
elif variable['name'] == 'gph_300hpa':
path['plots'] = 'plots/operational/prob_of_exc/forecast/gph_300hpa/'
elif variable['name'] == 'tqv':
path['plots'] = 'plots/operational/prob_of_exc/forecast/tqv/'
else:
print('error: variable unknown')
# create plot subfolders #
subfolder = 'run_{:4d}{:02d}{:02d}{:02d}'.format(run['year'], run['month'], run['day'], run['hour'])
if not os.path.isdir(path['base'] + path['plots'] + subfolder):
os.mkdir(path['base'] + path['plots'] + subfolder)
path['plots'] += subfolder + '/'
subfolder = domain['name']
if not os.path.isdir(path['base'] + path['plots'] + subfolder):
os.mkdir(path['base'] + path['plots'] + subfolder)
path['plots'] += subfolder + '/'
if plot_type != 'small_map_only' and plot_type != 'labelBar1' and plot_type != 'labelBar2' and plot_type != 'text':
if stat_processing['method'] == 'prob_of_exc':
subfolder = 'prob_of_exc'
elif stat_processing['method'] == 'member_extract':
subfolder = 'members'
else:
subfolder = 'statistical'
if not os.path.isdir(path['base'] + path['plots'] + subfolder):
os.mkdir(path['base'] + path['plots'] + subfolder)
path['plots'] += subfolder + '/'
# set path and filenames for first grib file variable #
if variable['name'] == 'tot_prec_24h'\
or variable['name'] == 'tot_prec_48h'\
or variable['name'] == 'tot_prec_06h'\
or variable['name'] == 'tot_prec_03h'\
or variable['name'] == 'tot_prec_01h'\
or variable['name'] == 'acc_prec':
if model == 'icon-eu-eps':
filename_beginning = 'icon-eu-eps_europe_icosahedral_single-level'
elif model == 'icon-global-eps':
filename_beginning = 'icon-eps_global_icosahedral_single-level'
dwd_varname = 'tot_prec'
path['data_subfolder'] = 'run_{:4d}{:02d}{:02d}{:02d}/tot_prec/'.format(\
run['year'], run['month'], run['day'], run['hour'])
elif variable['name'] == 't_850hpa':
filename_beginning = 'icon-eu-eps_europe_icosahedral_pressure-level'
dwd_varname = '850_t'
path['data_subfolder'] = 'run_{:4d}{:02d}{:02d}{:02d}/t_850hPa/'.format(\
run['year'], run['month'], run['day'], run['hour'])
elif variable['name'] == 'mslp':
filename_beginning = 'icon-eu-eps_europe_icosahedral_single-level'
dwd_varname = 'ps'
path['data_subfolder'] = 'run_{:4d}{:02d}{:02d}{:02d}/ps/'.format(\
run['year'], run['month'], run['day'], run['hour'])
elif variable['name'] == 'wind_10m':
if model == 'icon-eu-eps':
filename_beginning = 'icon-eu-eps_europe_icosahedral_single-level'
elif model == 'icon-global-eps':
filename_beginning = 'icon-eps_global_icosahedral_single-level'
dwd_varname = 'u_10m'
path['data_subfolder'] = 'run_{:4d}{:02d}{:02d}{:02d}/u_10m/'.format(\
run['year'], run['month'], run['day'], run['hour'])
elif variable['name'] == 'wind_300hpa':
filename_beginning = 'icon-eu-eps_europe_icosahedral_pressure-level'
dwd_varname = '300_u'
path['data_subfolder'] = 'run_{:4d}{:02d}{:02d}{:02d}/u_300hPa/'.format(\
run['year'], run['month'], run['day'], run['hour'])
elif variable['name'] == 'gph_500hpa':
filename_beginning = 'icon-eu-eps_europe_icosahedral_pressure-level'
dwd_varname = '500_fi'
path['data_subfolder'] = 'run_{:4d}{:02d}{:02d}{:02d}/fi_500hPa/'.format(\
run['year'], run['month'], run['day'], run['hour'])
elif variable['name'] == 'gph_300hpa':
filename_beginning = 'icon-eu-eps_europe_icosahedral_pressure-level'
dwd_varname = '300_fi'
path['data_subfolder'] = 'run_{:4d}{:02d}{:02d}{:02d}/fi_300hPa/'.format(\
run['year'], run['month'], run['day'], run['hour'])
elif variable['name'] == 'tqv':
filename_beginning = 'icon-eu-eps_europe_icosahedral_single-level'
dwd_varname = 'tqv'
path['data_subfolder'] = 'run_{:4d}{:02d}{:02d}{:02d}/tqv/'.format(\
run['year'], run['month'], run['day'], run['hour'])
# load first grib file variable #
filenames_all = []
for hour in hours:
filenames_all.append('{}_{:4d}{:02d}{:02d}{:02d}_{:03d}_{}.grib2'.format(\
filename_beginning, run['year'], run['month'], run['day'], run['hour'],\
hour, dwd_varname))
if model == 'icon-eu-eps':
data_array = np.empty((len(hours), 40, 75948), dtype='float32')
elif model == 'icon-global-eps':
data_array = np.empty((len(hours), 40, 327680), dtype='float32')
with ExitStack() as stack:
files_all = [stack.enter_context(open(path['base'] + path['data'] + path['data_subfolder'] + filename,'rb'))\
for filename in filenames_all]
for i, file in enumerate(files_all):
for j in range(40):
grib_id = eccodes.codes_grib_new_from_file(file)
data_array[i, j, :] = eccodes.codes_get_array(grib_id, 'values')
eccodes.codes_release(grib_id)
del files_all, grib_id, stack
# set path and filenames for second grib file variable #
if variable['name'] == 'mslp' or variable['name'] == 'wind_10m' or variable['name'] == 'wind_300hpa':
if variable['name'] == 'mslp':
dwd_varname = 't_2m'
path['data_subfolder'] = 'run_{:4d}{:02d}{:02d}{:02d}/t_2m/'.format(\
run['year'], run['month'], run['day'], run['hour'])
elif variable['name'] == 'wind_10m':
dwd_varname = 'v_10m'
path['data_subfolder'] = 'run_{:4d}{:02d}{:02d}{:02d}/v_10m/'.format(\
run['year'], run['month'], run['day'], run['hour'])
elif variable['name'] == 'wind_300hpa':
dwd_varname = '300_v'
path['data_subfolder'] = 'run_{:4d}{:02d}{:02d}{:02d}/v_300hPa/'.format(\
run['year'], run['month'], run['day'], run['hour'])
filenames_all = []
for hour in hours:
filenames_all.append('{}_{:4d}{:02d}{:02d}{:02d}_{:03d}_{}.grib2'.format(\
filename_beginning, run['year'], run['month'], run['day'], run['hour'],\
hour, dwd_varname))
# load second grib file variable #
if model == 'icon-eu-eps':
data_array2 = np.empty((len(hours), 40, 75948), dtype='float32')
elif model == 'icon-global-eps':
data_array2 = np.empty((len(hours), 40, 327680), dtype='float32')
with ExitStack() as stack:
files_all = [stack.enter_context(open(path['base'] + path['data'] + path['data_subfolder']\
+ filename,'rb')) for filename in filenames_all]
for i, file in enumerate(files_all):
for j in range(40):
grib_id = eccodes.codes_grib_new_from_file(file)
data_array2[i, j, :] = eccodes.codes_get_array(grib_id, 'values')
eccodes.codes_release(grib_id)
del files_all, grib_id, stack
else:
data_array2 = None
# call plotting function #
if stat_processing['method'] == 'member_extract':
if stat_processing['member'] == 'all':
for stat_processing['member'] in range(1,41):
plot_statistical_value_around_point(path, run, data_array, variable, domain, stat_processing)
return
else:
stat_processing['member'] = int(stat_processing['member'])
if stat_processing['method'] == 'prob_of_exc':
plot_prob_of_exc(path, run, hours, data_array, data_array2, variable, domain, model, stat_processing,
plot_type)
else:
plot_statistical_value_around_point(path, run, data_array, variable, domain, stat_processing)
del data_array, data_array2
return
########################################################################
########################################################################
########################################################################
def plot_statistical_value_around_point(path, run, data_tot_prec, variable, domain, stat_processing):
hours = list(range(0,48,1)) + list(range(48,72,3)) + list(range(72,120+1,6))
data_tot_prec_timespan = data_tot_prec[hours.index(variable['hour_end']), :, :]\
- data_tot_prec[hours.index(variable['hour_start']), :, :]
if stat_processing['method'] == 'max':
data_processed = data_tot_prec_timespan.max(axis=0)
elif stat_processing['method'] == 'min':
data_processed = data_tot_prec_timespan.min(axis=0)
elif stat_processing['method'] == 'median':
data_processed = np.percentile(data_tot_prec_timespan, 50, axis=0)
elif stat_processing['method'] == '10p':
data_processed = np.percentile(data_tot_prec_timespan, 10, axis=0)
elif stat_processing['method'] == '90p':
data_processed = np.percentile(data_tot_prec_timespan, 90, axis=0)
elif stat_processing['method'] == 'spread':
data_processed = data_tot_prec_timespan.std(axis=0)
elif stat_processing['method'] == 'member_extract':
data_processed = data_tot_prec_timespan[stat_processing['member']-1, :]
else:
print('statistical method "{}" not implemented!'.format(stat_processing['method']))
exit()
########################################################################
mpi_file = nc.Dataset(path['base'] + path['grid'] + 'icon_grid_0028_R02B07_N02.nc', 'r')
vlat = mpi_file.variables['clat_vertices'][:].data * 180./np.pi
vlon = mpi_file.variables['clon_vertices'][:].data * 180./np.pi
clat = mpi_file.variables['clat'][:].data * 180./np.pi
clon = mpi_file.variables['clon'][:].data * 180./np.pi
mpi_file.close()
########################################################################
if stat_processing['method'] == 'member_extract':
plot_name = 'iconeueps_{}_{:03d}-{:03d}h_{}_m{:02d}'.format(\
variable['name'], variable['hour_start'], variable['hour_end'], domain['name'],\
stat_processing['member'])
else:
plot_name = 'iconeueps_{}_stats_{}_{:03d}-{:03d}h_{}'.format(\
variable['name'], stat_processing['method'], variable['hour_start'], variable['hour_end'],\
domain['name'])
########################################################################
colorpalette_source = 'hclwizard'
if colorpalette_source == 'tristenca':
filename = 'colorscale_tristenca_tot_prec_monohue_blues.txt'
with open(path['base'] + path['colorpalette'] + filename, 'r') as f:
line = f.read()
hex_colors = []
for i in range(40):
start = i * 8 + 1
end = start + 6
hex_colors.append(line[start:end])
custom_palette_list = [[255, 255, 255]]
for hex_color in hex_colors[:]:
rgb_color = [int(hex_str, 16) for hex_str in [hex_color[:2], hex_color[2:4], hex_color[4:]]]
custom_palette_list.append(rgb_color)
elif colorpalette_source == 'hclwizard':
filename = 'colorscale_hclwizard_tot_prec_stat_YlBl.txt'
with open(path['base'] + path['colorpalette'] + filename, 'r') as f:
lines = f.readlines()
hex_colors = []
for line in lines:
hex_colors.append(line[2:8])
custom_palette_list = [[255, 255, 255]] # extra color for correct LabelBar view
custom_palette_list.append([255, 255, 255]) # color for 1% category
for hex_color in hex_colors[:]:
rgb_color = [int(hex_str, 16) for hex_str in [hex_color[:2], hex_color[2:4], hex_color[4:]]]
custom_palette_list.append(rgb_color)
custom_palette_list.append(custom_palette_list[-1]) # extra color for correct LabelBar view
custom_palette = np.array(custom_palette_list) / 255
########################################################################
x_resolution = 800
y_resolution = 800
wks_res = Ngl.Resources()
wks_res.wkWidth = x_resolution
wks_res.wkHeight = y_resolution
wks_res.wkColorMap = custom_palette
wks_type = 'png'
wks = Ngl.open_wks(wks_type, path['base'] + path['plots'] + plot_name, wks_res)
resources = Ngl.Resources()
if domain['method'] == 'centerpoint':
resources.mpProjection = 'Hammer'
resources.mpCenterLonF = domain['lon']
resources.mpCenterLatF = domain['lat']
cutout_plot = dict(
lat_min = domain['lat'] - domain['radius'] / 111.2,
lat_max = domain['lat'] + domain['radius'] / 111.2,
lon_min = domain['lon'] - domain['radius'] / (111.2 * np.cos(domain['lat']*np.pi/180)),
lon_max = domain['lon'] + domain['radius'] / (111.2 * np.cos(domain['lat']*np.pi/180)),
)
resources.mpLimitMode = 'latlon'
resources.mpMinLonF = cutout_plot['lon_min']
resources.mpMaxLonF = cutout_plot['lon_max']
resources.mpMinLatF = cutout_plot['lat_min']
resources.mpMaxLatF = cutout_plot['lat_max']
elif domain['method'] == 'deltalatlon':
resources.mpProjection = 'Hammer'
resources.mpCenterLonF = domain['lon']
resources.mpCenterLatF = domain['lat']
cutout_plot = dict(
lat_min = domain['lat'] - domain['deltalat'] / 111.2,
lat_max = domain['lat'] + domain['deltalat'] / 111.2,
lon_min = domain['lon'] - domain['deltalon'] / (111.2 * np.cos(domain['lat']*np.pi/180)),
lon_max = domain['lon'] + domain['deltalon'] / (111.2 * np.cos(domain['lat']*np.pi/180)),
)
resources.mpLimitMode = 'latlon'
resources.mpMinLonF = cutout_plot['lon_min']
resources.mpMaxLonF = cutout_plot['lon_max']
resources.mpMinLatF = cutout_plot['lat_min']
resources.mpMaxLatF = cutout_plot['lat_max']
else:
print('domain method "{}" not implemented!'.format(domain['method']))
exit()
resources.nglMaximize = False
resources.vpXF = 0.02
resources.vpYF = 0.92
resources.vpWidthF = 0.82
resources.vpHeightF = 0.70
########################################################################
# Turn on filled map areas:
resources.mpFillOn = True
# Set colors for [FillValue, Ocean, Land , InlandWater]:
resources.mpFillColors = ['pink','blue','white','blue']
resources.mpDataBaseVersion = 'MediumRes'
resources.mpDataSetName = 'Earth..4'
resources.mpOutlineBoundarySets = 'national'
resources.mpGeophysicalLineThicknessF = 3.0 * x_resolution / 1000
| |
# -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli import __version__
from tccli.utils import Utils
from tccli.exceptions import ConfigurationError
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.bm.v20180423 import bm_client as bm_client_v20180423
from tencentcloud.bm.v20180423 import models as models_v20180423
def doDescribeUserCmds(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeUserCmdsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeUserCmds(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doAttachCamRole(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.AttachCamRoleRequest()
model.from_json_string(json.dumps(args))
rsp = client.AttachCamRole(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyPsaRegulation(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyPsaRegulationRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyPsaRegulation(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePsaRegulations(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePsaRegulationsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribePsaRegulations(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeTaskOperationLog(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTaskOperationLogRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeTaskOperationLog(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doOfflineDevices(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.OfflineDevicesRequest()
model.from_json_string(json.dumps(args))
rsp = client.OfflineDevices(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeOsInfo(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeOsInfoRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeOsInfo(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doRunUserCmd(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.RunUserCmdRequest()
model.from_json_string(json.dumps(args))
rsp = client.RunUserCmd(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeCustomImageProcess(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeCustomImageProcessRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeCustomImageProcess(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doStartDevices(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.StartDevicesRequest()
model.from_json_string(json.dumps(args))
rsp = client.StartDevices(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doReloadDeviceOs(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ReloadDeviceOsRequest()
model.from_json_string(json.dumps(args))
rsp = client.ReloadDeviceOs(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDeviceHardwareInfo(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDeviceHardwareInfoRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeDeviceHardwareInfo(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeUserCmdTasks(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeUserCmdTasksRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeUserCmdTasks(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreatePsaRegulation(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreatePsaRegulationRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreatePsaRegulation(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDeviceClass(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDeviceClassRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeDeviceClass(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doBuyDevices(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.BuyDevicesRequest()
model.from_json_string(json.dumps(args))
rsp = client.BuyDevices(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyUserCmd(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.BmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyUserCmdRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyUserCmd(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteUserCmds(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
| |
# .. coding: utf-8
# $Id: __init__.py 8854 2021-10-15 16:03:31Z milde $
# :Author: <NAME> <<EMAIL>>
# Based on the html4css1 writer by <NAME>.
# :Maintainer: <EMAIL>
# :Copyright: © 2005, 2009, 2015 <NAME>,
# portions from html4css1 © David Goodger.
# :License: Released under the terms of the `2-Clause BSD license`_, in short:
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: https://opensource.org/licenses/BSD-2-Clause
# Use "best practice" as recommended by the W3C:
# http://www.w3.org/2009/cheatsheet/
"""
Plain HyperText Markup Language document tree Writer.
The output conforms to the `HTML 5` specification.
The cascading style sheet "minimal.css" is required for proper viewing,
the style sheet "plain.css" improves reading experience.
"""
__docformat__ = 'reStructuredText'
import mimetypes
import os.path
import docutils
from docutils import frontend, nodes, writers, io
from docutils.transforms import writer_aux
from docutils.writers import _html_base
from docutils.writers._html_base import PIL, url2pathname
class Writer(writers._html_base.Writer):
supported = ('html', 'html5', 'xhtml')
"""Formats this writer supports."""
default_stylesheets = ['minimal.css', 'plain.css']
default_stylesheet_dirs = ['.', os.path.abspath(os.path.dirname(__file__))]
default_template = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'template.txt')
settings_spec = frontend.filter_settings_spec(
writers._html_base.Writer.settings_spec,
# update specs with changed defaults or help string
template =
('Template file. (UTF-8 encoded, default: "%s")' % default_template,
['--template'],
{'default': default_template, 'metavar': '<file>'}),
stylesheet_path =
('Comma separated list of stylesheet paths. '
'Relative paths are expanded if a matching file is found in '
'the --stylesheet-dirs. With --link-stylesheet, '
'the path is rewritten relative to the output HTML file. '
'(default: "%s")' % ','.join(default_stylesheets),
['--stylesheet-path'],
{'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
'validator': frontend.validate_comma_separated_list,
'default': default_stylesheets}),
stylesheet_dirs =
('Comma-separated list of directories where stylesheets are found. '
'Used by --stylesheet-path when expanding relative path arguments. '
'(default: "%s")' % ','.join(default_stylesheet_dirs),
['--stylesheet-dirs'],
{'metavar': '<dir[,dir,...]>',
'validator': frontend.validate_comma_separated_list,
'default': default_stylesheet_dirs}),
initial_header_level =
('Specify the initial header level. Does not affect document '
'title & subtitle (see --no-doc-title). (default: 2 for "<h2>")',
['--initial-header-level'],
{'choices': '1 2 3 4 5 6'.split(), 'default': '2',
'metavar': '<level>'}),
no_xml_declaration =
('Omit the XML declaration.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'action': 'store_false'}),
)
settings_spec = settings_spec + (
'HTML5 Writer Options',
'',
(('Obsoleted by "--image-loading".',
['--embed-images'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Obsoleted by "--image-loading".',
['--link-images'],
{'dest': 'embed_images', 'action': 'store_false'}),
('Suggest at which point images should be loaded: '
'"embed", "link" (default), or "lazy".',
['--image-loading'],
{'choices': ('embed', 'link', 'lazy'),
'default': 'link'}),
('Append a self-link to section headings.',
['--section-self-link'],
{'default': 0, 'action': 'store_true'}),
('Do not append a self-link to section headings. (default)',
['--no-section-self-link'],
{'dest': 'section_self_link', 'action': 'store_false'}),
))
config_section = 'html5 writer'
def __init__(self):
self.parts = {}
self.translator_class = HTMLTranslator
class HTMLTranslator(writers._html_base.HTMLTranslator):
"""
This writer generates `polyglot markup`: HTML5 that is also valid XML.
Safe subclassing: when overriding, treat ``visit_*`` and ``depart_*``
methods as a unit to prevent breaks due to internal changes. See the
docstring of docutils.writers._html_base.HTMLTranslator for details
and examples.
"""
# meta tag to fix rendering in mobile browsers
viewport = ('<meta name="viewport" '
'content="width=device-width, initial-scale=1" />\n')
# <acronym> tag obsolete in HTML5. Use the <abbr> tag instead.
def visit_acronym(self, node):
# @@@ implementation incomplete ("title" attribute)
self.body.append(self.starttag(node, 'abbr', ''))
def depart_acronym(self, node):
self.body.append('</abbr>')
# no standard meta tag name in HTML5, use separate "author" meta tags
# https://www.w3.org/TR/html5/document-metadata.html#standard-metadata-names
def visit_authors(self, node):
self.visit_docinfo_item(node, 'authors', meta=False)
for subnode in node:
self.add_meta('<meta name="author" content="%s" />\n' %
self.attval(subnode.astext()))
def depart_authors(self, node):
self.depart_docinfo_item()
# use the <figcaption> semantic tag.
def visit_caption(self, node):
if isinstance(node.parent, nodes.figure):
self.body.append('<figcaption>\n')
self.body.append(self.starttag(node, 'p', ''))
def depart_caption(self, node):
self.body.append('</p>\n')
# <figcaption> is closed in depart_figure(), as legend may follow.
# use HTML block-level tags if matching class value found
supported_block_tags = set(('ins', 'del'))
def visit_container(self, node):
# If there is exactly one of the "supported block tags" in
# the list of class values, use it as tag name:
classes = node['classes']
tags = [cls for cls in classes
if cls in self.supported_block_tags]
if len(tags) == 1:
node.html5tagname = tags[0]
classes.remove(tags[0])
else:
node.html5tagname = 'div'
self.body.append(self.starttag(node, node.html5tagname,
CLASS='docutils container'))
def depart_container(self, node):
self.body.append('</%s>\n' % node.html5tagname)
# no standard meta tag name in HTML5, use dcterms.rights
# see https://wiki.whatwg.org/wiki/MetaExtensions
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright', meta=False)
self.add_meta('<meta name="dcterms.rights" content="%s" />\n'
% self.attval(node.astext()))
def depart_copyright(self, node):
self.depart_docinfo_item()
# no standard meta tag name in HTML5, use dcterms.date
def visit_date(self, node):
self.visit_docinfo_item(node, 'date', meta=False)
self.add_meta('<meta name="dcterms.date" content="%s" />\n'
% self.attval(node.astext()))
def depart_date(self, node):
self.depart_docinfo_item()
def visit_document(self, node):
title = (node.get('title', '') or os.path.basename(node['source'])
or 'untitled Docutils document')
self.head.append('<title>%s</title>\n' % self.encode(title))
def depart_document(self, node):
self.head_prefix.extend([self.doctype,
self.head_prefix_template %
{'lang': self.settings.language_code}])
self.html_prolog.append(self.doctype)
self.meta.insert(0, self.viewport)
self.head.insert(0, self.viewport)
self.meta.insert(0, self.content_type % self.settings.output_encoding)
self.head.insert(0, self.content_type % self.settings.output_encoding)
if 'name="dcterms.' in ''.join(self.meta):
self.head.append(
'<link rel="schema.dcterms" href="http://purl.org/dc/terms/"/>')
if self.math_header:
if self.math_output == 'mathjax':
self.head.extend(self.math_header)
else:
self.stylesheet.extend(self.math_header)
# skip content-type meta tag with interpolated charset value:
self.html_head.extend(self.head[1:])
self.body_prefix.append(self.starttag(node, 'main'))
self.body_suffix.insert(0, '</main>\n')
self.fragment.extend(self.body) # self.fragment is the "naked" body
self.html_body.extend(self.body_prefix[1:] + self.body_pre_docinfo
+ self.docinfo + self.body
+ self.body_suffix[:-1])
assert not self.context, 'len(context) = %s' % len(self.context)
# use new HTML5 <figure> and <figcaption> elements
def visit_figure(self, node):
atts = {}
if node.get('width'):
atts['style'] = 'width: %s' % node['width']
if node.get('align'):
atts['class'] = "align-" + node['align']
self.body.append(self.starttag(node, 'figure', **atts))
def depart_figure(self, node):
if len(node) > 1:
self.body.append('</figcaption>\n')
self.body.append('</figure>\n')
# use HTML5 <footer> element
def visit_footer(self, node):
self.context.append(len(self.body))
def depart_footer(self, node):
start = self.context.pop()
footer = [self.starttag(node, 'footer')]
footer.extend(self.body[start:])
footer.append('\n</footer>\n')
self.footer.extend(footer)
self.body_suffix[:0] = footer
del self.body[start:]
# use HTML5 <header> element
def visit_header(self, node):
self.context.append(len(self.body))
def depart_header(self, node):
start = self.context.pop()
header = [self.starttag(node, 'header')]
header.extend(self.body[start:])
header.append('</header>\n')
self.body_prefix.extend(header)
self.header.extend(header)
del self.body[start:]
# MIME types supported by the HTML5 <video> element
videotypes = ('video/mp4', 'video/webm', 'video/ogg')
def visit_image(self, node):
atts = {}
uri = node['uri']
mimetype = mimetypes.guess_type(uri)[0]
if mimetype not in self.videotypes:
return super(HTMLTranslator, self).visit_image(node)
# image size
if 'width' in node:
atts['width'] = node['width'].replace('px', '')
if 'height' in node:
atts['height'] = node['height'].replace('px', '')
if 'align' in node:
atts['class'] = 'align-%s' % node['align']
if 'controls' in node['classes']:
atts['controls'] = 'controls'
atts['title'] = node.get('alt', uri)
if self.settings.image_loading == 'lazy':
atts['loading'] = 'lazy'
# No newline in inline context or if surrounded by <a>...</a>.
if (isinstance(node.parent, nodes.TextElement) or
(isinstance(node.parent, nodes.reference) and
not isinstance(node.parent.parent, nodes.TextElement))):
suffix = ''
else:
suffix = '\n'
self.body.append('%s<a href="%s">%s</a>%s</video>%s'
% (self.starttag(node, 'video', suffix, src=uri, **atts),
uri, node.get('alt', uri), suffix, suffix))
def depart_image(self, node):
pass
# use HTML text-level tags if matching class value found
supported_inline_tags = set(('code', 'kbd', 'dfn', 'samp', 'var',
'bdi', 'del', 'ins', 'mark', 'small',
'b', 'i', 'q', 's', 'u'))
def visit_inline(self, node):
# Use `supported_inline_tags` if found in class values
classes = node['classes']
tags = [cls for cls in self.supported_inline_tags
if cls in classes]
if len(tags):
node.html5tagname = tags[0]
classes.remove(tags[0])
elif (classes == ['ln'] and isinstance(node.parent, nodes.literal_block)
and 'code' in node.parent.get('classes')):
if self.body[-1] == '<code>':
del(self.body[-1])
else:
self.body.append('</code>')
node.html5tagname = 'small'
else:
node.html5tagname = 'span'
self.body.append(self.starttag(node, node.html5tagname, ''))
def depart_inline(self, node):
self.body.append('</%s>' % node.html5tagname)
if (node.html5tagname == 'small' and node.get('classes') == ['ln']
and isinstance(node.parent, nodes.literal_block)):
self.body.append('<code data-lineno="%s">' % node.astext())
del(node.html5tagname)
# place inside HTML5 <figcaption> element (together with caption)
def visit_legend(self, node):
if not isinstance(node.parent[1], nodes.caption):
self.body.append('<figcaption>\n')
self.body.append(self.starttag(node, 'div', CLASS='legend'))
def depart_legend(self, node):
self.body.append('</div>\n')
# <figcaption> closed in visit_figure()
# use HTML text-level tags if matching class value found
def visit_literal(self, node):
classes = node['classes']
tags = [cls for cls in self.supported_inline_tags
if cls in classes]
if len(tags):
tagname = tags[0]
classes.remove(tags[0])
else:
tagname = 'span'
if tagname == 'code':
self.body.append(self.starttag(node, 'code', ''))
return
self.body.append(
self.starttag(node, tagname, '', CLASS='docutils literal'))
text = node.astext()
# remove hard line breaks (except if in a parsed-literal block)
if not isinstance(node.parent, nodes.literal_block):
text = text.replace('\n', ' ')
# Protect text like ``--an-option`` and the regular expression
# ``[+]?(\d+(\.\d*)?|\.\d+)`` from bad line wrapping
for token in self.words_and_spaces.findall(text):
if token.strip() and self.in_word_wrap_point.search(token):
self.body.append('<span class="pre">%s</span>'
% self.encode(token))
else:
self.body.append(self.encode(token))
self.body.append('</%s>' % tagname)
# Content already processed:
raise nodes.SkipNode
def depart_literal(self, node):
# skipped unless literal element is from "code" role:
self.body.append('</code>')
# Meta tags: 'lang' attribute replaced by 'xml:lang' in XHTML 1.1
# HTML5/polyglot recommends using both
def visit_meta(self, node):
| |
ca_climate_zone_metadata,
):
_compute_containment(
isd_station_metadata, "usaf_id", iecc_climate_zone_metadata, "iecc_climate_zone"
)
_compute_containment(
isd_station_metadata,
"usaf_id",
iecc_moisture_regime_metadata,
"iecc_moisture_regime",
)
_compute_containment(
isd_station_metadata, "usaf_id", ba_climate_zone_metadata, "ba_climate_zone"
)
_compute_containment(
isd_station_metadata, "usaf_id", ca_climate_zone_metadata, "ca_climate_zone"
)
def _find_zcta_closest_isd_stations(zcta_metadata, isd_station_metadata, limit=None):
if limit is None:
limit = 10
import pyproj
geod = pyproj.Geod(ellps="WGS84")
isd_usaf_ids, isd_lats, isd_lngs = zip(
*[
(
isd_station["usaf_id"],
float(isd_station["latitude"]),
float(isd_station["longitude"]),
)
for isd_station in isd_station_metadata.values()
]
)
isd_lats = np.array(isd_lats)
isd_lngs = np.array(isd_lngs)
for zcta in zcta_metadata.values():
zcta_lats = np.tile(zcta["latitude"], isd_lats.shape)
zcta_lngs = np.tile(zcta["longitude"], isd_lngs.shape)
dists = geod.inv(zcta_lngs, zcta_lats, isd_lngs, isd_lats)[2]
sorted_dists = np.argsort(dists)[:limit]
closest_isd_stations = []
for i, idx in enumerate(sorted_dists):
usaf_id = isd_usaf_ids[idx]
isd_station = isd_station_metadata[usaf_id]
closest_isd_stations.append(
{
"usaf_id": usaf_id,
"distance_meters": int(round(dists[idx])),
"rank": i + 1,
"iecc_climate_zone_match": (
zcta.get("iecc_climate_zone")
== isd_station.get("iecc_climate_zone")
),
"iecc_moisture_regime_match": (
zcta.get("iecc_moisture_regime")
== isd_station.get("iecc_moisture_regime")
),
"ba_climate_zone_match": (
zcta.get("ba_climate_zone")
== isd_station.get("ba_climate_zone")
),
"ca_climate_zone_match": (
zcta.get("ca_climate_zone")
== isd_station.get("ca_climate_zone")
),
}
)
zcta["closest_isd_stations"] = closest_isd_stations
def _create_table_structures(conn):
cur = conn.cursor()
cur.execute(
"""
create table isd_station_metadata (
usaf_id text not null
, wban_ids text not null
, recent_wban_id text not null
, name text not null
, icao_code text
, latitude text
, longitude text
, elevation text
, state text
, quality text default 'low'
, iecc_climate_zone text
, iecc_moisture_regime text
, ba_climate_zone text
, ca_climate_zone text
)
"""
)
cur.execute(
"""
create table isd_file_metadata (
usaf_id text not null
, year text not null
, wban_id text not null
)
"""
)
cur.execute(
"""
create table zcta_metadata (
zcta_id text not null
, geometry text
, latitude text not null
, longitude text not null
, state text
, iecc_climate_zone text
, iecc_moisture_regime text
, ba_climate_zone text
, ca_climate_zone text
)
"""
)
cur.execute(
"""
create table iecc_climate_zone_metadata (
iecc_climate_zone text not null
, geometry text
)
"""
)
cur.execute(
"""
create table iecc_moisture_regime_metadata (
iecc_moisture_regime text not null
, geometry text
)
"""
)
cur.execute(
"""
create table ba_climate_zone_metadata (
ba_climate_zone text not null
, geometry text
)
"""
)
cur.execute(
"""
create table ca_climate_zone_metadata (
ca_climate_zone text not null
, name text not null
, geometry text
)
"""
)
cur.execute(
"""
create table tmy3_station_metadata (
usaf_id text not null
, name text not null
, state text not null
, class text not null
)
"""
)
cur.execute(
"""
create table cz2010_station_metadata (
usaf_id text not null
)
"""
)
def _write_isd_station_metadata_table(conn, isd_station_metadata):
cur = conn.cursor()
rows = [
(
metadata["usaf_id"],
",".join(metadata["wban_ids"]),
metadata["recent_wban_id"],
metadata["name"],
metadata["icao_code"],
metadata["latitude"],
metadata["longitude"],
metadata["elevation"],
metadata["state"],
metadata["quality"],
metadata["iecc_climate_zone"],
metadata["iecc_moisture_regime"],
metadata["ba_climate_zone"],
metadata["ca_climate_zone"],
)
for station, metadata in sorted(isd_station_metadata.items())
]
cur.executemany(
"""
insert into isd_station_metadata(
usaf_id
, wban_ids
, recent_wban_id
, name
, icao_code
, latitude
, longitude
, elevation
, state
, quality
, iecc_climate_zone
, iecc_moisture_regime
, ba_climate_zone
, ca_climate_zone
) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?)
""",
rows,
)
cur.execute(
"""
create index isd_station_metadata_usaf_id on isd_station_metadata(usaf_id)
"""
)
cur.execute(
"""
create index isd_station_metadata_state on isd_station_metadata(state)
"""
)
cur.execute(
"""
create index isd_station_metadata_iecc_climate_zone on
isd_station_metadata(iecc_climate_zone)
"""
)
cur.execute(
"""
create index isd_station_metadata_iecc_moisture_regime on
isd_station_metadata(iecc_moisture_regime)
"""
)
cur.execute(
"""
create index isd_station_metadata_ba_climate_zone on
isd_station_metadata(ba_climate_zone)
"""
)
cur.execute(
"""
create index isd_station_metadata_ca_climate_zone on
isd_station_metadata(ca_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_isd_file_metadata_table(conn, isd_file_metadata):
cur = conn.cursor()
rows = [
(metadata["usaf_id"], year, station_data["wban_id"])
for isd_station, metadata in sorted(isd_file_metadata.items())
for year, year_data in sorted(metadata["years"].items())
for station_data in year_data
]
cur.executemany(
"""
insert into isd_file_metadata(
usaf_id
, year
, wban_id
) values (?,?,?)
""",
rows,
)
cur.execute(
"""
create index isd_file_metadata_usaf_id on
isd_file_metadata(usaf_id)
"""
)
cur.execute(
"""
create index isd_file_metadata_year on
isd_file_metadata(year)
"""
)
cur.execute(
"""
create index isd_file_metadata_usaf_id_year on
isd_file_metadata(usaf_id, year)
"""
)
cur.execute(
"""
create index isd_file_metadata_wban_id on
isd_file_metadata(wban_id)
"""
)
cur.close()
conn.commit()
def _write_zcta_metadata_table(conn, zcta_metadata, geometry=False):
cur = conn.cursor()
rows = [
(
metadata["zcta"],
metadata["geometry"] if geometry else None,
metadata["latitude"],
metadata["longitude"],
metadata["state"],
metadata["iecc_climate_zone"],
metadata["iecc_moisture_regime"],
metadata["ba_climate_zone"],
metadata["ca_climate_zone"],
)
for zcta, metadata in sorted(zcta_metadata.items())
]
cur.executemany(
"""
insert into zcta_metadata(
zcta_id
, geometry
, latitude
, longitude
, state
, iecc_climate_zone
, iecc_moisture_regime
, ba_climate_zone
, ca_climate_zone
) values (?,?,?,?,?,?,?,?,?)
""",
rows,
)
cur.execute(
"""
create index zcta_metadata_zcta_id on zcta_metadata(zcta_id)
"""
)
cur.execute(
"""
create index zcta_metadata_state on zcta_metadata(state)
"""
)
cur.execute(
"""
create index zcta_metadata_iecc_climate_zone on zcta_metadata(iecc_climate_zone)
"""
)
cur.execute(
"""
create index zcta_metadata_iecc_moisture_regime on zcta_metadata(iecc_moisture_regime)
"""
)
cur.execute(
"""
create index zcta_metadata_ba_climate_zone on zcta_metadata(ba_climate_zone)
"""
)
cur.execute(
"""
create index zcta_metadata_ca_climate_zone on zcta_metadata(ca_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_iecc_climate_zone_metadata_table(
conn, iecc_climate_zone_metadata, geometry=True
):
cur = conn.cursor()
rows = [
(metadata["iecc_climate_zone"], metadata["geometry"] if geometry else None)
for iecc_climate_zone, metadata in sorted(iecc_climate_zone_metadata.items())
]
cur.executemany(
"""
insert into iecc_climate_zone_metadata(
iecc_climate_zone
, geometry
) values (?,?)
""",
rows,
)
cur.execute(
"""
create index iecc_climate_zone_metadata_iecc_climate_zone on
iecc_climate_zone_metadata(iecc_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_iecc_moisture_regime_metadata_table(
conn, iecc_moisture_regime_metadata, geometry=True
):
cur = conn.cursor()
rows = [
(metadata["iecc_moisture_regime"], metadata["geometry"] if geometry else None)
for iecc_moisture_regime, metadata in sorted(
iecc_moisture_regime_metadata.items()
)
]
cur.executemany(
"""
insert into iecc_moisture_regime_metadata(
iecc_moisture_regime
, geometry
) values (?,?)
""",
rows,
)
cur.execute(
"""
create index iecc_moisture_regime_metadata_iecc_moisture_regime on
iecc_moisture_regime_metadata(iecc_moisture_regime)
"""
)
cur.close()
conn.commit()
def _write_ba_climate_zone_metadata_table(
conn, ba_climate_zone_metadata, geometry=True
):
cur = conn.cursor()
rows = [
(metadata["ba_climate_zone"], metadata["geometry"] if geometry else None)
for ba_climate_zone, metadata in sorted(ba_climate_zone_metadata.items())
]
cur.executemany(
"""
insert into ba_climate_zone_metadata(
ba_climate_zone
, geometry
) values (?,?)
""",
rows,
)
cur.execute(
"""
create index ba_climate_zone_metadata_ba_climate_zone on
ba_climate_zone_metadata(ba_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_ca_climate_zone_metadata_table(
conn, ca_climate_zone_metadata, geometry=True
):
cur = conn.cursor()
rows = [
(
metadata["ca_climate_zone"],
metadata["name"],
metadata["geometry"] if geometry else None,
)
for ca_climate_zone, metadata in sorted(ca_climate_zone_metadata.items())
]
cur.executemany(
"""
insert into ca_climate_zone_metadata(
ca_climate_zone
, name
, geometry
) values (?,?,?)
""",
rows,
)
cur.execute(
"""
create index ca_climate_zone_metadata_ca_climate_zone on
ca_climate_zone_metadata(ca_climate_zone)
"""
)
cur.close()
conn.commit()
def _write_tmy3_station_metadata_table(conn, tmy3_station_metadata):
cur = conn.cursor()
rows = [
(metadata["usaf_id"], metadata["name"], metadata["state"], metadata["class"])
for tmy3_station, metadata in sorted(tmy3_station_metadata.items())
]
cur.executemany(
"""
insert into tmy3_station_metadata(
usaf_id
, name
, state
, class
) values (?,?,?,?)
""",
rows,
)
cur.execute(
"""
create index tmy3_station_metadata_usaf_id on
tmy3_station_metadata(usaf_id)
"""
)
cur.close()
conn.commit()
def _write_cz2010_station_metadata_table(conn, cz2010_station_metadata):
cur = conn.cursor()
rows = [
(metadata["usaf_id"],)
for cz2010_station, metadata in sorted(cz2010_station_metadata.items())
]
cur.executemany(
"""
insert into cz2010_station_metadata(
usaf_id
) values (?)
""",
rows,
)
cur.execute(
"""
create index cz2010_station_metadata_usaf_id on
cz2010_station_metadata(usaf_id)
"""
)
cur.close()
conn.commit()
def build_metadata_db(
zcta_geometry=False,
iecc_climate_zone_geometry=True,
iecc_moisture_regime_geometry=True,
ba_climate_zone_geometry=True,
ca_climate_zone_geometry=True,
):
""" Build database of metadata from primary sources.
Downloads primary sources, clears existing DB, and rebuilds from scratch.
Parameters
----------
zcta_geometry : bool, optional
Whether or not to include ZCTA geometry in database.
iecc_climate_zone_geometry : bool, optional
Whether or not to include IECC Climate Zone geometry in database.
iecc_moisture_regime_geometry : bool, optional
Whether or not to include IECC Moisture Regime geometry in database.
ba_climate_zone_geometry : bool, optional
Whether or not to include Building America Climate Zone geometry in database.
ca_climate_zone_geometry : bool, optional
Whether or not to include California Building Climate Zone Area geometry in database.
"""
try:
import shapely
except ImportError:
raise ImportError("Loading polygons requires shapely.")
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError("Scraping TMY3 station data requires beautifulsoup4.")
try:
import pyproj
except ImportError:
raise ImportError("Computing distances requires pyproj.")
try:
import simplejson
except ImportError:
raise ImportError("Writing geojson requires simplejson.")
download_path = _download_primary_sources()
conn = metadata_db_connection_proxy.reset_database()
# Load data into memory
print("Loading ZCTAs")
zcta_metadata = _load_zcta_metadata(download_path)
print("Loading counties")
county_metadata = _load_county_metadata(download_path)
print("Merging county climate zones")
(
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
) = _create_merged_climate_zones_metadata(county_metadata)
print("Loading CA climate zones")
ca_climate_zone_metadata = _load_CA_climate_zone_metadata(download_path)
print("Loading ISD station metadata")
isd_station_metadata = _load_isd_station_metadata(download_path)
print("Loading ISD station file metadata")
isd_file_metadata = _load_isd_file_metadata(download_path, isd_station_metadata)
print("Loading TMY3 station metadata")
tmy3_station_metadata = _load_tmy3_station_metadata(download_path)
print("Loading CZ2010 station metadata")
cz2010_station_metadata = _load_cz2010_station_metadata()
# Augment data in memory
print("Computing ISD station quality")
# add rough station quality to station metadata
# (all months in last 5 years have at least 600 points)
_compute_isd_station_quality(isd_station_metadata, isd_file_metadata)
print("Mapping ZCTAs to climate zones")
# add county and ca climate zone mappings
_map_zcta_to_climate_zones(
zcta_metadata,
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
ca_climate_zone_metadata,
)
print("Mapping ISD stations to climate zones")
# add county and ca climate zone mappings
_map_isd_station_to_climate_zones(
isd_station_metadata,
iecc_climate_zone_metadata,
iecc_moisture_regime_metadata,
ba_climate_zone_metadata,
ca_climate_zone_metadata,
)
# Write tables
print("Creating table structures")
_create_table_structures(conn)
print("Writing ZCTA data")
_write_zcta_metadata_table(conn, zcta_metadata, geometry=zcta_geometry)
print("Writing IECC climate zone data")
| |
change it now since some already trained models could not be loaded after
'Bernoulli': 'B',
'BernoulliPlusMinusOne': 'BPM1',
'OneHotCategorical': 'OHC',
# initializers
"glorot_normal_initializer": 'gn',
"glorot_uniform_initializer": 'gu',
"xavier_initializer": 'x',
"truncated_normal_initializer": 't',
"variance_scaling_initializer": 'v',
"constant_initializer": 'c',
"constant": 'c',
"random_normal": 'n',
# custom networks
"CIFAR10TutorialNetwork": "CIFAR10TutorialNetwork",
# regularizers
"l2_regularizer": "Ltwo",
"l1_regularizer": "Lone",
"sum_regularizer": "Sum",
#keras.regularizers
"l2": "Ltwo",
"l1": "Lone",
# covariance parameterization
"softplus": 'S',
"linear_softplus": 'LS',
"exp": 'E',
# zero one methods
"clip": 'C',
#"sigmoid": 's', #already defined sigmoid "S"
# clipping gradient
"clip_by_global_norm": "GN",
"clip_by_norm": "N",
"clip_by_value": "V",
"none": "No",
"no": "No",
# preprocess filtering
"FromAE": "A",
}
#listWithPoints = lambda x: ",".join(re.sub('[( )\[\]]', '', str(list(x))).replace(' ', '').split(","))
def listWithPoints(x):
if isinstance(x, int):
x = [x]
return ",".join(re.sub('[( )\[\]]', '', str(list(x))).replace(' ', '').split(","))
def get_method_id(method_tuple):
"""Creates the id for a method of tensorflow.
Args:
method_tuple (tuple): A tuple composed of : (name of the method of tensorflow, kwargs to pass to the method, bool_activation).
Returns:
string: the idname of the method that we want to concatenate in the output filenames.
"""
# ipdb.set_trace()
# the name could be articulated, since I might want to get the initializers or regularizers
# from different submodules in tf.
# e.g. tf.contrib.layers.xavier_initializer
# the method name I am interested in is the one after the last dot.
if method_tuple is None:
return "0"
method_name = method_tuple[0].split('.')[-1]
method_kwargs = method_tuple[1]
methodid = method_name_short[method_name]
if method_name == 'dense':
methodid += str(method_kwargs['units'])
elif method_name == 'conv2d':
methodid += str(method_kwargs['filters']) + 'x' + re.sub('[( )]', '', str(method_kwargs['kernel_size']))
elif method_name == 'max_pooling2d':
methodid += re.sub('[( )]', '', str(method_kwargs['pool_size']))
elif method_name=='AveragePooling2D':
methodid += re.sub('[( )]', '', str(method_kwargs['pool_size']))
elif method_name=='AveragePooling2D':
methodid += re.sub('[( )]', '', str(method_kwargs['pool_size']))
elif method_name=='flatten':
pass
elif method_name == 'Linear':
#case when it is output layer
if "output_size" in method_kwargs:
methodid += str(method_kwargs["output_size"])
elif method_name == 'Concatenate':
methodid += str(method_kwargs['node_name'])
elif method_name=='LinearWN':
# case when it is output layer
if "output_size" in method_kwargs:
methodid += str(method_kwargs["output_size"])
methodid += 'wn'+str(int(method_kwargs['use_weight_norm']))
elif method_name=='Conv2D':
# case when it is output layer
if "output_channels" in method_kwargs:
methodid += str(method_kwargs["output_channels"])
methodid += 'k'+listWithPoints(method_kwargs['kernel_shape'])
elif method_name=='Conv2DTranspose':
# case when it is output layer
if "output_channels" in method_kwargs:
methodid += str(method_kwargs["output_channels"])
if "output_shape" in method_kwargs:
methodid += 'o'+listWithPoints(method_kwargs['output_shape'])
methodid += 'k'+listWithPoints(method_kwargs['kernel_shape'])
if "stride" in method_kwargs:
methodid += 's'+listWithPoints(method_kwargs['stride']) # if you need to changes to 'strides', talk to me (****)
elif method_name=='Conv2DWN':
methodid += str(method_kwargs['output_channels'])+'o'+listWithPoints(method_kwargs['kernel_shape'])+\
'wn'+str(int(method_kwargs['use_weight_norm']))
elif method_name=='ResUnit':
methodid += 'c' + str(method_kwargs['depth'])+'k'+listWithPoints(method_kwargs['kernel_shape'])+\
's' + str(method_kwargs['stride'])
elif method_name == 'VGGBlock':
methodid += 'c' + str(method_kwargs['channels']) + 'k' + listWithPoints(method_kwargs['kernel_shape']) + \
'd' + str(method_kwargs['prob_drop'])
if method_kwargs.get('logits_size', None) is not None:
methodid += "l" + listWithPoints(method_kwargs['logits_size'])
elif method_name=='ResNet18':
methodid += 'o'+str(method_kwargs['output_size'])+'wn'+str(int(method_kwargs['use_weight_norm']))
elif method_name=='ConvNet2D':
methodid += 'o' + listWithPoints(method_kwargs['output_channels']) + 'k' + listWithPoints(
method_kwargs['kernel_shapes']) + 's' + listWithPoints(method_kwargs['strides'])
elif method_name=='ConvNet2DTranspose':
methodid += 'o' + listWithPoints(method_kwargs['output_channels']) + 'k' + listWithPoints(
method_kwargs['kernel_shapes']) + 's' + listWithPoints(method_kwargs['strides'])
elif method_name=='ConvDec':
if method_kwargs.get('linear_first', None) is not None:
methodid += 'l' + listWithPoints(method_kwargs["linear_first"]["sizes"])
methodid += 'r' + listWithPoints(method_kwargs["linear_first"]["reshape"])
methodid += 'c' + listWithPoints(method_kwargs['channels']) \
+ 'k' + listWithPoints(method_kwargs['kernel_shape']) # + 's' + listWithPoints(method_kwargs['stride'])
elif method_name in ['ResEnc', 'ResDec']:
methodid += 'h' + str(method_kwargs['num_hiddens'])
methodid += 'rl' + str(method_kwargs['num_residual_layers'])
methodid += 'rh' + str(method_kwargs['num_residual_hiddens'])
methodid += 'd' + str(method_kwargs['prob_drop'])
if 'creg_scale' in method_kwargs and method_kwargs['creg_scale'] is not None:
methodid += 'cs' + str(method_kwargs['creg_scale'])
elif method_name == 'BatchFlatten':
pass
elif method_name == 'Identity':
pass
elif method_name == 'MaxPooling2D':
methodid += re.sub('[( )]', '', str(method_kwargs['pool_size'])) + 's' + listWithPoints(
method_kwargs['strides'])
elif method_name=='Dropout':
methodid += 'r' + str(method_kwargs['rate']) # tf.layers.dropout
elif method_name=='Sigmoid':
pass
elif method_name=='Tanh':
pass
elif method_name == 'RandomUniform':
methodid += 's' + str(method_kwargs['shape'])
methodid += 'min' + str(method_kwargs['minval'])
methodid += 'max' + str(method_kwargs['maxval'])
# for the moment we don't have mean and covariance as parameters
elif method_name == 'RandomGaussian':
methodid += 's' + str(method_kwargs['shape'])
elif method_name == 'Tanh':
pass
elif method_name=='AveragePooling2D':
methodid += re.sub('[( )]', '', str(method_kwargs['pool_size'])) + 's' + listWithPoints(method_kwargs['strides'])
#elif method_name=='Dropout':
# methodid += 'k'+str(method_kwargs['keep'])
elif method_name=='Sigmoid':
raise Exception("why nothing in the name? talk to ****")
elif method_name=='Identity':
pass
elif method_name=='BatchReshape':
pass
elif method_name=='BatchNorm':
methodid += '' #+ str(method_kwargs['offset']) + 's' + str(method_kwargs['scale']) + 'd' + str(method_kwargs['decay_rate'])
elif method_name=='LayerNorm':
methodid += ''
elif method_name=='GaussianDiagonal' \
or method_name=='GaussianDiagonalZeroOne' \
or method_name=='GaussianDiagonalPlusMinusOne' \
or method_name=='vonMisesFisher' \
or method_name=='LogitNormalDiagonal' \
or method_name=='LogitNormalDiagonalPlusMinusOne' \
or method_name=='LogisticDiagonalZeroOne' \
or method_name=='LogisticDiagonalPlusMinusOne':
# wrapped_module_name, wrapped_module_kwargs = method_kwargs["module_tuple"]
# **** the lower case is done to increase readibility
# methodid += "" + method_name_short[wrapped_module_name].lower()
module_tuple = copy.deepcopy(method_kwargs["module_tuple"])
if "output_size" in method_kwargs:
module_tuple[1]["output_size"] = method_kwargs["output_size"]
if "output_shape" in method_kwargs:
module_tuple[1]["output_channels"] = method_kwargs["output_shape"][-1]
methodid += "m"+get_method_id(module_tuple)
if "minimal_concentration" in method_kwargs or "minimal_covariance" in method_kwargs: # and method_kwargs["minimal_concentration"] != 1:
if method_name=='vonMisesFisher':
methodid += "mc" + str(method_kwargs["minimal_concentration"])
else:
methodid += "mc" + str(method_kwargs["minimal_covariance"])
if "zero_one_method" in method_kwargs and method_kwargs["zero_one_method"] != "sigmoid":
methodid += "zo" + str(method_name_short[method_kwargs["zero_one_method"]])
if "scalar_covariance" in method_kwargs:
scov = method_kwargs["scalar_covariance"]
if scov == True:
methodid += "scT"
elif isinstance(scov, float):
methodid += "sc" + str(scov)
if method_name=='LogitNormalDiagonal' or method_name=='LogitNormalDiagonalPlusMinusOne':
#import pdb;pdb.set_trace()
if "clip_value" in method_kwargs:
clip = method_kwargs["clip_value"]
methodid += "cv" + str(clip)
if check_key_in("creg_tuple", method_kwargs):
metric_name, cscale = method_kwargs["creg_tuple"]
methodid += 'cr' + metric_name[0].upper() + 'cs' + "{:.4g}".format(cscale)
#methodid += "_r" + regularization_info(method_kwargs)
elif method_name=='Bernoulli' or method_name=='BernoulliPlusMinusOne':
if "output_size" in method_kwargs:
methodid += str(method_kwargs["output_size"])
clip = method_kwargs["clip_value"]
methodid += "cv" + str(clip)
elif method_name=='OneHotCategorical':
if "output_size" in method_kwargs:
methodid += str(method_kwargs["output_size"])
clip = method_kwargs["clip_value"]
methodid += "cv" + str(clip)
elif method_name == 'CIFAR10TutorialNetwork':
pass
elif "variance_scaling_initializer" in method_name:
pass
elif "glorot_normal_initializer" in method_name or "glorot_uniform_initializer" in method_name:
pass
elif "truncated_normal_initializer" in method_name or "random_normal" in method_name:
methodid += str(method_kwargs['stddev'])
elif "constant_initializer" in method_name or "constant" in method_name:
methodid += str(method_kwargs['value'])
elif method_name in ["l1_regularizer", "l2_regularizer", "sum_regularizer"]:
methodid += str(method_kwargs["scale"])
elif method_name in ["l1", "l2"]:
methodid += str(method_kwargs["l"])
elif method_name == "softplus":
pass
elif method_name == "linear_softplus":
pass
elif method_name == "exp":
pass
# PREPROCESSING SECTION used to prefilter transform an image with some method
elif method_name == "FromAE":
methodid += hash_this(method_kwargs["filename"], trunc=3)
methodid += "t" + str(method_kwargs["transform_prob"])
methodid += "n" + str(method_kwargs["noisy_transform_prob"])
# Here implement your favourite method
# elif :
#
else:
print('----------------------')
print('ERROR ', method_name)
raise ValueError("id rule for `%s` has to be implemented." % method_name)
# support for contractive only in some layers for the moment, but it could be easily extended,
# just add your layer and test it
if "contractive_regularizer" in method_kwargs:
if method_name == "Linear" or method_name == "GaussianDiagonal" or method_name == "GaussianDiagonalPlusMinusOne":
methodid += regularization_info(method_kwargs)
else:
raise ValueError("contractive_regularizers not supported for `%s`." % method_name)
return methodid
def check_key_in(key, kwargs):
return (key in kwargs) and (kwargs[key] is not None)
def get_clipping_id(clipping_tuple):
method = clipping_tuple[0]
if not method:
return method_name_short["none"]
else:
value = clipping_tuple[1]["value"]
return method_name_short[method] + "{:.4g}".format(value)
def eval_method_from_tuple(module, method_tuple, instantiate=True):
"""
Args:
module (python module): module from which take the method.
method_tuple (tuple): (method_path, method_kwargs).
Returns:
if method_tuple is None returns None
otherwise returns
module.method_path(**method_kwargs)
"""
if not method_tuple:
return None
method_fn = load_method_fn_from_method_path(module, method_tuple[0])
# import pdb;pdb.set_trace()
if instantiate:
return method_fn(**method_tuple[1])
else:
return method_fn
def load_method_fn_from_method_path(module, method_path):
"""
Args:
module (python module): module from which take the method.
method_path (string): path to the method.
Returns:
if method_path is None returns None
otherwise returns
module.method_path(**method_kwargs)
"""
if not method_path:
return None
mpathsplit = method_path.split(".")
method_name = mpathsplit[-1]
path = module.__name__
middle_path = '.'.join(mpathsplit[:-1])
if middle_path:
path += '.' + middle_path
last_module = importlib.import_module(path)
method_fn = getattr(last_module, method_name)
return method_fn
def try_load_class_from_modules(class_path, modules):
LayerClass = None
for module in modules:
try:
LayerClass = load_method_fn_from_method_path(module, class_path)
except:
pass
if LayerClass is None:
raise Exception("problem loading class: {:}, not found in modules {:}".format(class_path, modules))
return LayerClass
def load_class(module_plus_class, relative=False, base_path=''):
# assemble class path
class_path = ''
# if class_base_path prepend this to the path
if base_path:
class_path = base_path
# if the prepended path does not finish with a dot add it
if class_path[-1] != '.':
class_path += '.'
class_path += module_plus_class
# split in | |
<filename>apps/apis/system_603/system_603.py
# @Copyright(C), OldFive, 2020.
# @Date : 2021/3/11 0011 10:48:20
# @Author : OldFive
# @Version : 0.1
# @Description :
# @History :
# @Other:
# ▒█████ ██▓ ▓█████▄ █████▒██▓ ██▒ █▓▓█████
# ▒██▒ ██▒▓██▒ ▒██▀ ██▌▓██ ▒▓██▒▓██░ █▒▓█ ▀
# ▒██░ ██▒▒██░ ░██ █▌▒████ ░▒██▒ ▓██ █▒░▒███
# ▒██ ██░▒██░ ░▓█▄ ▌░▓█▒ ░░██░ ▒██ █░░▒▓█ ▄
# ░ ████▓▒░░██████▒░▒████▓ ░▒█░ ░██░ ▒▀█░ ░▒████▒
# ░ ▒░▒░▒░ ░ ▒░▓ ░ ▒▒▓ ▒ ▒ ░ ░▓ ░ ▐░ ░░ ▒░ ░
# ░ ▒ ▒░ ░ ░ ▒ ░ ░ ▒ ▒ ░ ▒ ░ ░ ░░ ░ ░ ░
# ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░░ ░
# ░ ░ ░ ░ ░ ░ ░ ░ ░
# ░ ░
#
"""
603系统相关接口
"""
# Standard library imports
# Third party imports
from fastapi import APIRouter, Depends
from starlette.status import *
# Local application imports
from apps.utils import resp_code
from apps.utils.comm_ret import comm_ret
from apps.utils.mysql_conn_pool.mysql_helper import MySqLHelper
import datetime
import time
from loguru import logger
from apps.utils.tools import data_processing, get_before_date_time, get_now_date_time
from typing import Optional
from typing import Set
from fastapi import FastAPI
from pydantic import BaseModel
system_603 = APIRouter()
@system_603.get("/sms/rcv",summary = "短信接收数据量")
async def get_sms_rcv(start_time: Optional[str] = None, end_time: Optional[str] = None):
"""
## **param**:
start_time: 开始时间(可选参数) str 默认 当前时间前一天
end_time: 结束时间(可选参数) str 默认 当前时间
## **return**:
[
{
"date": "2021-03-01 14:57:00",
"value": 75775
},
...
]
"""
if start_time == None:
start_time = get_before_date_time()
if end_time == None:
end_time = get_now_date_time()
sql = """SELECT
d_time,
SUM( sjjs_1m )
FROM
t_603_sms_sjjs
WHERE
d_time BETWEEN '{}'
AND '{}'
GROUP BY
d_time
ORDER BY
d_time """.format(start_time, end_time)
print(sql)
result = get_msg_data(sql)
return comm_ret(data=result)
@system_603.get("/sms/load", summary = "短信加载数据量")
async def get_sms_load(start_time: Optional[str] = None, end_time: Optional[str] = None):
"""
## **param**:
start_time: 开始时间(可选参数) str 默认 当前时间前一天
end_time: 结束时间(可选参数) str 默认 当前时间
## **return**:
[
{
"date": "2021-03-01 14:57:00",
"value": 75775
},
...
]
"""
if start_time == None:
start_time = get_before_date_time()
if end_time == None:
end_time = get_now_date_time()
sql = """SELECT
d_time,
SUM( load_1m )
FROM
t_603_sms_load
WHERE
d_time BETWEEN '{}'
AND '{}'
GROUP BY
d_time
ORDER BY
d_time """.format(start_time, end_time)
print(5555, sql)
result = get_msg_data(sql)
return comm_ret(data=result)
@system_603.get('/sms/datas', summary = "短信前后端数据对比")
async def get_sms_datas(start_time: Optional[str] = None, end_time: Optional[str] = None):
"""
## **param**:
start_time: 开始时间(可选参数) str 默认 当前时间前一天
end_time: 结束时间(可选参数) str 默认 当前时间
## **return**:
[
{
"date": "2021-03-01 15:07:00",
"rcv": 217,
"load": 384
},
...
]
"""
if start_time == None:
start_time = get_before_date_time()
if end_time == None:
end_time = get_now_date_time()
sql = """SELECT
t1.d_time,
t1.rcv,
t2.lod
FROM
( SELECT d_time, sum( sjjs_1m ) AS rcv FROM t_603_sms_sjjs GROUP BY d_time ) AS t1,
( SELECT d_time, sum( load_1m ) AS lod FROM t_603_sms_load GROUP BY d_time ) AS t2
WHERE
t1.d_time = t2.d_time
AND t1.d_time BETWEEN '{}' AND '{}' """.format(start_time, end_time)
print(sql)
result = get_msg_data(sql)
return comm_ret(data=result)
@system_603.get("/mms/rcv", summary = "彩信接收数据量")
async def get_mms_rcv(start_time: Optional[str] = None, end_time: Optional[str] = None):
"""
## **param**:
start_time: 开始时间(可选参数) str 默认 当前时间前一天
end_time: 结束时间(可选参数) str 默认 当前时间
## **return**:
[
{
"date": "2021-03-01 14:57:00",
"value": 75775
},
...
]
"""
if start_time == None:
start_time = get_before_date_time()
if end_time == None:
end_time = get_now_date_time()
sql = """SELECT
d_time,
SUM( sjjs_1m )
FROM
t_603_mms_sjjs
WHERE
d_time BETWEEN '{}'
AND '{}'
GROUP BY
d_time
ORDER BY
d_time """.format(start_time, end_time)
print(sql)
result = get_msg_data(sql)
return comm_ret(data=result)
@system_603.get("/mms/load", summary = "彩信加载数据量")
async def get_mms_load(start_time: Optional[str] = None, end_time: Optional[str] = None):
"""
## **param**:
start_time: 开始时间(可选参数) str 默认 当前时间前一天
end_time: 结束时间(可选参数) str 默认 当前时间
## **return**:
[
{
"date": "2021-03-01 14:57:00",
"value": 75775
},
...
]
"""
if start_time == None:
start_time = get_before_date_time()
if end_time == None:
end_time = get_now_date_time()
sql = """SELECT
d_time,
SUM( load_1m )
FROM
t_603_mms_load
WHERE
d_time BETWEEN '{}'
AND '{}'
GROUP BY
d_time
ORDER BY
d_time """.format(start_time, end_time)
print(sql)
result = get_msg_data(sql)
return comm_ret(data=result)
@system_603.get('/mms/datas',summary = "彩信前后端数据对比")
async def get_mms_datas(start_time: Optional[str] = None, end_time: Optional[str] = None):
"""
## **param**:
start_time: 开始时间(可选参数) str 默认 当前时间前一天
end_time: 结束时间(可选参数) str 默认 当前时间
## **return**:
[
{
"date": "2021-03-01 15:07:00",
"rcv": 217,
"load": 384
},
...
]
"""
if start_time == None:
start_time = get_before_date_time()
if end_time == None:
end_time = get_now_date_time()
sql = """SELECT
t3.d_time,
t3.rcv,
t4.lod
FROM
( SELECT d_time, sum( sjjs_1m ) AS rcv FROM t_603_mms_sjjs GROUP BY d_time ) AS t3,
( SELECT d_time, sum( load_1m ) AS lod FROM t_603_mms_load GROUP BY d_time ) AS t4
WHERE
t3.d_time = t4.d_time
AND t3.d_time BETWEEN '{}' AND '{}' """.format(start_time, end_time)
print(sql)
result = get_msg_data(sql)
return comm_ret(data=result)
def get_msg_data(sql: str):
"""
读取短彩信数据 返回相同格式数据
@param:
sql: sql语句 str
@return:
[
{
"date": "2021-03-01 14:57:00",
"value": 75775
},
或者
{
"date": "2021-03-01 15:07:00",
"rcv": 217,
"load": 384
}
]
"""
db = MySqLHelper()
rows = db.selectall(sql=sql)
data_list = [list(row) for row in rows]
temp_data = data_processing(data_list, 2000)
ret = []
for item in temp_data:
if len(item) == 2:
temp_dict = {}
temp_dict["date"] = item[0]
temp_dict["value"] = item[1]
else:
temp_dict = {}
temp_dict["date"] = item[0]
temp_dict["rcv"] = item[1]
temp_dict["load"] = item[2]
ret.append(temp_dict)
return ret
@system_603.get('/relation/location/new', summary = "返回所有机房 最近一组的关联率信息")
async def get_relation_new(end_time: Optional[str] = None):
"""
## **param**:
end_time: 结束时间(可选参数) str 默认 当前时间
## **return**:
[
{
"date": "2021-03-01 14:34:40",
"value": 665746.87,
"location": "lt_hld"
},
...
]
"""
if end_time == None:
end_time = get_now_date_time()
# 开始时间:end_time 减去 20分钟
start_time = datetime.datetime.strptime(end_time, "%Y-%m-%d %H:%M:%S") + datetime.timedelta(minutes=-20)
db = MySqLHelper()
sql = """SELECT
d_time,
AVG( relate_rate ),
location,
abbr
FROM
t_603_relate_rate,
t_603_relate_rate_base
WHERE
t_603_relate_rate_base.ip_addr = t_603_relate_rate.ip_addr
AND d_time BETWEEN '{}' AND '{}'
GROUP BY
location """.format(start_time, end_time)
print(sql)
rows = db.selectall(sql=sql)
data_list = [list(row) for row in rows]
temp_data = data_processing(data_list, 2000)
result = []
for item in temp_data:
temp_dict = {}
temp_dict["date"] = item[0]
temp_dict["value"] = item[1]
temp_dict["location"] = "_".join(item[3].split("_")[0:2])
result.append(temp_dict)
return comm_ret(data=result)
@system_603.get('/relation/location/ip', summary = "获取某机房所有ip关联率信息")
async def get_relation_ip(location: str, start_time: Optional[str] = None, end_time: Optional[str] = None):
"""
## **param**:
location: 机房位置(必传参数) str 格式 dx_ds
start_time: 开始时间(可选参数) str 默认 当前时间前一天
end_time: 结束时间(可选参数) str 默认 当前时间
## **return**:
[
{
"0.1": [
{
"date": "2021-03-01 15:34:17",
"value": 84.39
},
...
],
...
}
]
"""
if start_time == None:
start_time = get_before_date_time()
if end_time == None:
end_time = get_now_date_time()
db = MySqLHelper()
sql = """SELECT
d_time,
relate_rate,
abbr
FROM
t_603_relate_rate,
t_603_relate_rate_base
WHERE
t_603_relate_rate.ip_addr = t_603_relate_rate_base.ip_addr
AND abbr LIKE '{}_%'
AND d_time BETWEEN '{}' AND '{}' """.format(location, start_time, end_time)
print(sql)
rows = db.selectall(sql=sql)
data_list = [list(row) for row in rows]
temp_data = data_processing(data_list, 2000)
result = {}
for item in temp_data:
temp = item[2].split('_')[2]
temp_dict = {}
temp_dict['date'] = item[0]
temp_dict['value'] = item[1]
if temp not in result.keys():
result.setdefault(temp, []).append(temp_dict)
else:
result[temp].append(temp_dict)
return comm_ret(data=result)
@system_603.get('/up_down/new', summary = "获取所有指标 所有运营商 最近一组的上下行速率")
async def get_up_down_new(end_time:Optional[str] = None):
"""
## **param:**
end_time: 结束时间(可选参数) str 默认 当前时间
## **return:**
"Gn/A11": {
"Gn1": {
"date": "2021-03-01 10:00:00",
"req": 0.9966,
"rsp": 1
},
...
},
...
"""
if end_time == None:
end_time = get_now_date_time()
# 开始时间:end_time 减去 15分钟
# *****************************************************
start_time = datetime.datetime.strptime(end_time, "%Y-%m-%d %H:%M:%S") + datetime.timedelta(minutes=-30)
db = MySqLHelper()
sql = """SELECT
isp,
protocol,
d_time,
req_match_rate,
rsp_match_rate
FROM
t_603_req_rsp_match
WHERE
d_time BETWEEN '{}' AND '{}' """.format(start_time, end_time)
print(sql)
rows = db.selectall(sql=sql)
data_list = [list(row) for row in rows]
temp_data = data_processing(data_list, 2000)
result = {}
for item in temp_data:
temp_dict = {}
temp_dict['date'] = item[2]
temp_dict['req'] = item[3]
temp_dict['rsp'] = item[4]
if not (item[1] == 'Gn' or item[1] == 'A11'):
if item[1] not in result.keys():
t1 = result.setdefault(item[1],{})
t1[item[0]] = temp_dict
else:
if item[0] not in result[item[1]].keys():
result[item[1]][item[0]] = temp_dict
else:
if not (item[1] == 'Gn' and item[0] == 3):
t4 = result.setdefault("Gn/A11",{})
t4[(item[1]+str(item[0]))] = temp_dict
return comm_ret(data = result)
@system_603.get('/up_down/datas', summary = "获取某指标中 某运营商 的上下行速率")
async def get_up_down_datas(isp:str, protocol:str, start_time:Optional[str] = None, end_time:Optional[str] = None):
"""
## **param**:
protocol: 指标名称(必传参数) str 格式 MC 或 Gn/A11
isp: 运营商(必传参数) str 格式 1 或 Gn1
start_time: 开始时间(可选参数) str 默认 当前时间前一天
end_time: 结束时间(可选参数) str 默认 当前时间
| |
<gh_stars>1-10
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetAlarmResult',
'AwaitableGetAlarmResult',
'get_alarm',
]
@pulumi.output_type
class GetAlarmResult:
"""
A collection of values returned by getAlarm.
"""
def __init__(__self__, alarm_id=None, body=None, compartment_id=None, defined_tags=None, destinations=None, display_name=None, freeform_tags=None, id=None, is_enabled=None, metric_compartment_id=None, metric_compartment_id_in_subtree=None, namespace=None, pending_duration=None, query=None, repeat_notification_duration=None, resolution=None, resource_group=None, severity=None, state=None, suppression=None, time_created=None, time_updated=None):
if alarm_id and not isinstance(alarm_id, str):
raise TypeError("Expected argument 'alarm_id' to be a str")
pulumi.set(__self__, "alarm_id", alarm_id)
if body and not isinstance(body, str):
raise TypeError("Expected argument 'body' to be a str")
pulumi.set(__self__, "body", body)
if compartment_id and not isinstance(compartment_id, str):
raise TypeError("Expected argument 'compartment_id' to be a str")
pulumi.set(__self__, "compartment_id", compartment_id)
if defined_tags and not isinstance(defined_tags, dict):
raise TypeError("Expected argument 'defined_tags' to be a dict")
pulumi.set(__self__, "defined_tags", defined_tags)
if destinations and not isinstance(destinations, list):
raise TypeError("Expected argument 'destinations' to be a list")
pulumi.set(__self__, "destinations", destinations)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if freeform_tags and not isinstance(freeform_tags, dict):
raise TypeError("Expected argument 'freeform_tags' to be a dict")
pulumi.set(__self__, "freeform_tags", freeform_tags)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_enabled and not isinstance(is_enabled, bool):
raise TypeError("Expected argument 'is_enabled' to be a bool")
pulumi.set(__self__, "is_enabled", is_enabled)
if metric_compartment_id and not isinstance(metric_compartment_id, str):
raise TypeError("Expected argument 'metric_compartment_id' to be a str")
pulumi.set(__self__, "metric_compartment_id", metric_compartment_id)
if metric_compartment_id_in_subtree and not isinstance(metric_compartment_id_in_subtree, bool):
raise TypeError("Expected argument 'metric_compartment_id_in_subtree' to be a bool")
pulumi.set(__self__, "metric_compartment_id_in_subtree", metric_compartment_id_in_subtree)
if namespace and not isinstance(namespace, str):
raise TypeError("Expected argument 'namespace' to be a str")
pulumi.set(__self__, "namespace", namespace)
if pending_duration and not isinstance(pending_duration, str):
raise TypeError("Expected argument 'pending_duration' to be a str")
pulumi.set(__self__, "pending_duration", pending_duration)
if query and not isinstance(query, str):
raise TypeError("Expected argument 'query' to be a str")
pulumi.set(__self__, "query", query)
if repeat_notification_duration and not isinstance(repeat_notification_duration, str):
raise TypeError("Expected argument 'repeat_notification_duration' to be a str")
pulumi.set(__self__, "repeat_notification_duration", repeat_notification_duration)
if resolution and not isinstance(resolution, str):
raise TypeError("Expected argument 'resolution' to be a str")
pulumi.set(__self__, "resolution", resolution)
if resource_group and not isinstance(resource_group, str):
raise TypeError("Expected argument 'resource_group' to be a str")
pulumi.set(__self__, "resource_group", resource_group)
if severity and not isinstance(severity, str):
raise TypeError("Expected argument 'severity' to be a str")
pulumi.set(__self__, "severity", severity)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if suppression and not isinstance(suppression, dict):
raise TypeError("Expected argument 'suppression' to be a dict")
pulumi.set(__self__, "suppression", suppression)
if time_created and not isinstance(time_created, str):
raise TypeError("Expected argument 'time_created' to be a str")
pulumi.set(__self__, "time_created", time_created)
if time_updated and not isinstance(time_updated, str):
raise TypeError("Expected argument 'time_updated' to be a str")
pulumi.set(__self__, "time_updated", time_updated)
@property
@pulumi.getter(name="alarmId")
def alarm_id(self) -> str:
return pulumi.get(self, "alarm_id")
@property
@pulumi.getter
def body(self) -> str:
"""
The human-readable content of the notification delivered. Oracle recommends providing guidance to operators for resolving the alarm condition. Consider adding links to standard runbook practices. Avoid entering confidential information. Example: `High CPU usage alert. Follow runbook instructions for resolution.`
"""
return pulumi.get(self, "body")
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the alarm.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Mapping[str, Any]:
"""
Usage of predefined tag keys. These predefined keys are scoped to namespaces. Example: `{"Operations.CostCenter": "42"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter
def destinations(self) -> Sequence[str]:
"""
A list of destinations to which the notifications for this alarm will be delivered. Each destination is represented by an [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) related to the supported destination service. For example, a destination using the Notifications service is represented by a topic OCID. Supported destination services: Notifications Service. Limit: One destination per supported destination service.
"""
return pulumi.get(self, "destinations")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
A user-friendly name for the alarm. It does not have to be unique, and it's changeable. Avoid entering confidential information.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Mapping[str, Any]:
"""
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"Department": "Finance"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter
def id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the alarm.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> bool:
"""
Whether the alarm is enabled. Example: `true`
"""
return pulumi.get(self, "is_enabled")
@property
@pulumi.getter(name="metricCompartmentId")
def metric_compartment_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the metric being evaluated by the alarm.
"""
return pulumi.get(self, "metric_compartment_id")
@property
@pulumi.getter(name="metricCompartmentIdInSubtree")
def metric_compartment_id_in_subtree(self) -> bool:
"""
When true, the alarm evaluates metrics from all compartments and subcompartments. The parameter can only be set to true when metricCompartmentId is the tenancy OCID (the tenancy is the root compartment). A true value requires the user to have tenancy-level permissions. If this requirement is not met, then the call is rejected. When false, the alarm evaluates metrics from only the compartment specified in metricCompartmentId. Default is false. Example: `true`
"""
return pulumi.get(self, "metric_compartment_id_in_subtree")
@property
@pulumi.getter
def namespace(self) -> str:
"""
The source service or application emitting the metric that is evaluated by the alarm. Example: `oci_computeagent`
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter(name="pendingDuration")
def pending_duration(self) -> str:
"""
The period of time that the condition defined in the alarm must persist before the alarm state changes from "OK" to "FIRING". For example, a value of 5 minutes means that the alarm must persist in breaching the condition for five minutes before the alarm updates its state to "FIRING".
"""
return pulumi.get(self, "pending_duration")
@property
@pulumi.getter
def query(self) -> str:
"""
The Monitoring Query Language (MQL) expression to evaluate for the alarm. The Alarms feature of the Monitoring service interprets results for each returned time series as Boolean values, where zero represents false and a non-zero value represents true. A true value means that the trigger rule condition has been met. The query must specify a metric, statistic, interval, and trigger rule (threshold or absence). Supported values for interval: `1m`-`60m` (also `1h`). You can optionally specify dimensions and grouping functions. Supported grouping functions: `grouping()`, `groupBy()`. For details about Monitoring Query Language (MQL), see [Monitoring Query Language (MQL) Reference](https://docs.cloud.oracle.com/iaas/Content/Monitoring/Reference/mql.htm). For available dimensions, review the metric definition for the supported service. See [Supported Services](https://docs.cloud.oracle.com/iaas/Content/Monitoring/Concepts/monitoringoverview.htm#SupportedServices).
"""
return pulumi.get(self, "query")
@property
@pulumi.getter(name="repeatNotificationDuration")
def repeat_notification_duration(self) -> str:
"""
The frequency at which notifications are re-submitted, if the alarm keeps firing without interruption. Format defined by ISO 8601. For example, `PT4H` indicates four hours. Minimum: PT1M. Maximum: P30D.
"""
return pulumi.get(self, "repeat_notification_duration")
@property
@pulumi.getter
def resolution(self) -> str:
"""
The time between calculated aggregation windows for the alarm. Supported value: `1m`
"""
return pulumi.get(self, "resolution")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> str:
"""
Resource group specified as a filter for metric data retrieved by the alarm. A resource group is a custom string that can be used as a filter. Only one resource group can be applied per metric. A valid resourceGroup value starts with an alphabetical character and includes only alphanumeric characters, periods (.), underscores (_), hyphens (-), and dollar signs ($). Avoid entering confidential information. Example: `frontend-fleet`
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter
def severity(self) -> str:
"""
The perceived type of response required when the alarm is in the "FIRING" state. Example: `CRITICAL`
"""
return pulumi.get(self, "severity")
@property
@pulumi.getter
def state(self) -> str:
"""
The current lifecycle state of the alarm. Example: `DELETED`
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def suppression(self) -> 'outputs.GetAlarmSuppressionResult':
"""
The configuration details for suppressing an alarm.
"""
return pulumi.get(self, "suppression")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The date and time the alarm was created. Format defined by RFC3339. Example: `2019-02-01T01:02:29.600Z`
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> str:
"""
| |
import asyncio
import logging
import re
import socket
import warnings
from asyncio import AbstractEventLoop
from ipaddress import ip_address, IPv4Network
from abc import ABCMeta
from abc import abstractmethod
from typing import Callable
from typing import List
from typing import Optional
from typing import Tuple
MAX_COMMAND_LINE_RESPONSE = 8*1024*1024 # 8 MiB
MessageCallback = Callable[[str], None]
class DeviceNotFoundError(Exception):
"""An exception that is raised when a device couldn't be found."""
class DeviceTimeoutError(Exception):
"""An exception that is raised when a timeout has occurred."""
class Connection(metaclass=ABCMeta):
@abstractmethod
async def open(self) -> None:
raise NotImplementedError
@abstractmethod
async def close(self) -> None:
raise NotImplementedError
@abstractmethod
async def write_command_line(self, message: str) -> None:
raise NotImplementedError
@abstractmethod
async def write_monitoring_line(self, message: str) -> None:
raise NotImplementedError
@abstractmethod
async def read_command_line(self) -> str:
raise NotImplementedError
@abstractmethod
def subscribe_monitoring_line(self, callback: MessageCallback) -> None:
raise NotImplementedError
@property
@abstractmethod
def monitoring_line_supported(self) -> bool:
raise NotImplementedError
@property
@abstractmethod
def loop(self) -> AbstractEventLoop:
raise NotImplementedError
class DiscoveryProtocol(asyncio.DatagramProtocol):
"""An implementation of a DatagramProtocol for device discovery.
Attributes:
device_name (str): The name of the DeCoP device.
"""
import ifaddr
def __init__(self, device_name: str) -> None:
self._device_name = device_name
self._regex = re.compile(r'\("(.*?)" "(.*?)" "(.*?)" "(.*?)" "(.*?)" "(.*?)" (\d+) "(.*?)" (\d+) (\d+)\)')
self._result = asyncio.Future() # type: asyncio.Future
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
sock = transport.get_extra_info('socket')
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)
for adapter in self.ifaddr.get_adapters():
for ip in adapter.ips:
if ip.is_IPv4:
net = IPv4Network("{}/{}".format(ip.ip, ip.network_prefix), strict=False)
if not net.is_link_local and not net.is_loopback:
transport.sendto(b'whoareyou?', (str(net.broadcast_address), 60010))
def datagram_received(self, data: bytes, addr: Tuple[str, int]) -> None:
match = self._regex.match(data.decode('utf-8', 'replace'))
if match:
ls = match.groups()
if len(ls) == 10 and (ls[0] == self._device_name or ls[5] == self._device_name):
self.result.set_result((ip_address(ls[7]), int(ls[8]), int(ls[9])))
@property
def result(self) -> asyncio.Future:
"""asyncio.Future: The result of the discovery process."""
return self._result
class MonitoringLineProtocol(asyncio.Protocol):
"""An implementation of an asyncio protocol that routes monitoring line updates to callbacks.
Attributes:
loop (AbstractEventLoop): An asyncio event loop.
callbacks (List[MessageCallback]): A list of callbacks.
host (str): A host identifier, can be an IP address, a hostname, or a system label.
monitoring_line_port (int): The network port of the monitoring line.
"""
def __init__(self, loop: AbstractEventLoop, callbacks: List[MessageCallback], host: str, monitoring_line_port: int) -> None:
self._logger = logging.getLogger(__name__)
self._loop = loop
self._callbacks = callbacks
self._host = host
self._monitoring_port = monitoring_line_port
self._regex = re.compile(r'\(.*? .*? ((\"(\\.|[^"\\])*\")|.*?)\)\r?\n')
self._stream_buffer = ''
def connection_made(self, transport):
pass
def data_received(self, data):
line, self._stream_buffer = self._readline(self._stream_buffer + data.decode('utf-8', 'replace'))
while line:
self._logger.debug("%s:%d - MON RX: %s", self._host, self._monitoring_port, repr(line))
for callback in self._callbacks:
callback(line)
line, self._stream_buffer = self._readline(self._stream_buffer)
def connection_lost(self, exc):
pass
def _readline(self, buffer: str) -> Tuple[str, str]:
"""Reads a line from a monitoring line buffer.
Args:
buffer: A monitoring line buffer.
Returns:
Tuple[str, str]: A tuple containing the extracted line and the remaining buffer.
"""
m = self._regex.match(buffer)
if m is not None:
start, end = m.span()
return buffer[start:end], buffer[end:]
else:
return '', buffer
class NetworkConnection(Connection):
"""A network connection for the command and monitoring lines.
Attributes:
host (str): A host identifier, can be an IP address, a hostname, or
a system label.
command_line_port (int): The network port of the command line.
monitoring_line_port (int): The network port of the monitoring line.
timeout (int): Timeout in seconds.
loop (AbstractEventLoop): The event loop.
"""
def __init__(self, host: str, command_line_port: int = 1998, monitoring_line_port: int = 1999, timeout: int = 5, loop: AbstractEventLoop = None) -> None:
self._logger = logging.getLogger(__name__)
self._host = host
self._command_port = command_line_port
self._monitoring_port = monitoring_line_port
self._timeout = timeout
self._loop = asyncio.get_event_loop() if loop is None else loop
self._command_line_reader = None # type: Optional[asyncio.StreamReader]
self._command_line_writer = None # type: Optional[asyncio.StreamWriter]
self._monitor_callbacks = [] # type: List[MessageCallback]
self._monitor_protocol = None # type: Optional[MonitoringLineProtocol]
self._monitor_transport = None # type: Optional[asyncio.WriteTransport]
if loop:
warnings.warn('"loop" parameter is deprecated and will be removed in a future version', DeprecationWarning, stacklevel=2)
async def open(self) -> None:
"""Opens a connection to the device.
Raises:
DeviceNotFoundError: If connecting to the device failed.
"""
try:
# Try to parse as IP address e.g. '192.168.1.32'
self._host = ip_address(self._host)
except ValueError:
try:
# Try to resolve DNS entry e.g. 'dlcpro.host.com'
self._host = ip_address(socket.gethostbyname(self._host))
except (ValueError, OSError):
# Try to find system-label via UDP broadcast
self._host, self._command_port, self._monitoring_port = await self.find_device(self._host)
self._logger.debug("Opening network connection to '%s:%d,%d'", self._host, self._command_port, self._monitoring_port)
if self._host is None:
raise DeviceNotFoundError()
# Open command line
self._command_line_reader, self._command_line_writer = await asyncio.open_connection(self._host.compressed, self._command_port, limit=MAX_COMMAND_LINE_RESPONSE, loop=self._loop)
try:
# Purge welcome message
await asyncio.wait_for(self._command_line_reader.readuntil(b'\n> '), self._timeout)
except asyncio.TimeoutError as exc:
raise DeviceTimeoutError('Timeout while waiting for command prompt') from exc
# Open monitoring line
if self._monitoring_port is not None and self._monitoring_port > 0:
protocol = MonitoringLineProtocol(self._loop, self._monitor_callbacks, self._host, self._monitoring_port)
self._monitor_transport, self._monitor_protocol = await self._loop.create_connection(lambda: protocol, self._host.compressed, self._monitoring_port)
async def close(self) -> None:
"""Closes the network connection."""
self._logger.debug("Closing network connection to '%s'", self._host)
if self._command_line_writer is not None:
self._command_line_writer.close()
self._command_line_writer = None
if self._monitor_transport is not None:
self._monitor_transport.close()
self._monitor_transport = None
self._monitor_callbacks = []
async def write_command_line(self, message: str) -> None:
"""Sends a message to the command line.
Args:
message (str): The message to send to the command line.
"""
self._logger.debug("%s:%d - CMD TX: %s", self._host, self._command_port, repr(message))
self._command_line_writer.write(message.encode())
async def read_command_line(self) -> str:
"""Reads a message from the command line of the device.
Returns:
str: The message read from the command line.
Raises:
DeviceTimeoutError: If there was a timeout while waiting for the message.
"""
try:
result = await asyncio.wait_for(self._command_line_reader.readuntil(b'\n> '), self._timeout)
except asyncio.TimeoutError as exc:
raise DeviceTimeoutError('Timeout while waiting for response') from exc
result = result.decode('utf-8', 'replace')
if result.endswith('\n> '):
self._logger.debug("%s:%d - CMD RX: %s", self._host, self._command_port, repr(result[:-3]))
return result[:-3]
return str()
async def write_monitoring_line(self, message: str) -> None:
"""Sends a message to the monitoring line.
Args:
message: The message to send to the monitoring line.
"""
self._logger.debug("%s:%d - MON TX: %s", self._host, self._monitoring_port, repr(message))
self._monitor_transport.write(message.encode())
def subscribe_monitoring_line(self, callback: MessageCallback) -> None:
self._monitor_callbacks.append(callback)
@property
def monitoring_line_supported(self) -> bool:
return self._monitor_transport is not None
@property
def loop(self) -> AbstractEventLoop:
return self._loop
async def find_device(self, device_name: str) -> Tuple[Optional[ip_address], int, int]:
"""Try to find a device in the network by name.
Args:
device_name (str): The name of the device.
Returns:
Tuple[Optional[ip_address], int, int]: A tuple containing the IP address and the command line and monitoring line ports.
"""
protocol = DiscoveryProtocol(device_name)
transport, _ = await self._loop.create_datagram_endpoint(lambda: protocol, local_addr=('0.0.0.0', 0))
try:
return await asyncio.wait_for(protocol.result, self._timeout, loop=self._loop)
except asyncio.TimeoutError:
return None, 0, 0
finally:
transport.close()
class SerialConnection(Connection):
"""A serial connection for the command line.
Attributes:
port (str): The name of the serial port (e.g. 'COM1' or '/dev/ttyUSB0').
baudrate (int): The number of transferred bits per second.
timeout (int): The communication timeout (in seconds).
loop (int): An asyncio event loop.
"""
import serial
def __init__(self, port: str, baudrate: int = 115200, timeout: int = 5, loop: AbstractEventLoop = None) -> None:
self._logger = logging.getLogger(__name__)
self._port = port
self._baudrate = baudrate
self._timeout = timeout
self._loop = asyncio.get_event_loop() if loop is None else loop
self._serial = None
if loop:
warnings.warn('"loop" parameter is deprecated and will be removed in a future version', DeprecationWarning, stacklevel=2)
async def open(self) -> None:
"""Opens a connection to the device.
Raises:
DeviceNotFoundError: If connecting to the device failed.
"""
try:
self._logger.debug("Opening serial connection to '%s' with %d baud", self._port, self._baudrate)
self._serial = self.serial.serial_for_url(self._port, baudrate=self._baudrate)
except self.serial.serialutil.SerialException as ex:
raise DeviceNotFoundError() from ex
# Temporarily set shorter timeout
self._serial.timeout = 0.5
# Disable serial echo (\x12) and cancel the device interpreter state (\x03)
await self.write_command_line('\x12\x03')
# Purge the input buffer by reading a possible welcome message and the
# prompt created by the cancel
await self._loop.run_in_executor(None, lambda: self._serial.read_until(b'\n> '))
await self._loop.run_in_executor(None, lambda: self._serial.read_until(b'\n> '))
# Restore the original timeout
self._serial.timeout = self._timeout
async def close(self) -> None:
"""Closes the serial connection."""
if self._serial is not None:
self._logger.debug("Closing serial connection to '%s'", self._port)
self._serial.close()
self._serial = None
async def write_command_line(self, message: str) -> None:
"""Sends a message to the command line.
Args:
message (str): The message to send to the command line.
"""
self._logger.debug("%s - CMD TX: %s", self._port, repr(message))
await self._loop.run_in_executor(None, lambda: self._serial.write(message.encode()))
async def write_monitoring_line(self, message: str) -> None:
raise NotImplementedError
| |
else None
if not description:
raise errors.InvalidInput('Description is required when pending a listing for deletion')
listing = model_access.pending_delete_listing(user, listing, description)
return Response(data={"listing": {"id": listing.id}},
status=status.HTTP_201_CREATED)
except Exception as e:
logger.error('Exception: {}'.format(e), extra={'request': request})
raise errors.RequestException('Error pending listing for deletion')
class ListingRejectionViewSet(viewsets.ModelViewSet):
"""
ModelViewSet for getting all Listing Rejections
Access Control
===============
- AppsMallSteward can view
URIs
======
POST /api/listing/{pk}/rejection
Summary:
Add a ListingRejection
Request:
data: ListingRejectionSerializer Schema
Response:
200 - Successful operation - ListingActivitySerializer
GET /api/listing/{pk}/rejection
Summary:
Find a ListingRejection Entry by ID
Response:
200 - Successful operation - ListingActivitySerializer
"""
permission_classes = (permissions.IsOrgStewardOrReadOnly,)
serializer_class = serializers.ListingActivitySerializer
def get_queryset(self):
queryset = model_access.get_rejection_listings(
self.request.user.username)
return queryset
def list(self, request, listing_pk=None):
queryset = self.get_queryset().filter(listing__id=listing_pk)
serializer = serializers.ListingActivitySerializer(queryset,
context={'request': request}, many=True)
return Response(serializer.data)
def create(self, request, listing_pk=None):
try:
user = generic_model_access.get_profile(request.user.username)
listing = model_access.get_listing_by_id(request.user.username,
listing_pk)
rejection_description = request.data['description']
listing = model_access.reject_listing(user, listing,
rejection_description)
return Response(data={"listing": {"id": listing.id}},
status=status.HTTP_201_CREATED)
except Exception as e:
logger.error('Exception: {}'.format(e), extra={'request': request})
raise errors.RequestException('Error rejecting listing')
class ScreenshotViewSet(viewsets.ModelViewSet):
"""
Listing Types
ModelViewSet for getting all Screenshots for a given listing
Access Control
===============
- All users can view
URIs
======
GET /api/screenshot/
Summary:
Get a list of all system-wide Screenshot entries
Response:
200 - Successful operation - [ScreenshotSerializer]
POST /api/screenshot/
Summary:
Add a Screenshot
Request:
data: ScreenshotSerializer Schema
Response:
200 - Successful operation - ScreenshotSerializer
GET /api/screenshot/{pk}
Summary:
Find a Screenshot Entry by ID
Response:
200 - Successful operation - ScreenshotSerializer
PUT /api/screenshot/{pk}
Summary:
Update a Screenshot Entry by ID
PATCH /api/screenshot/{pk}
Summary:
Update (Partial) a Screenshot Entry by ID
DELETE /api/screenshot/{pk}
Summary:
Delete a Screenshot Entry by ID
"""
permission_classes = (permissions.IsUser,)
queryset = model_access.get_all_screenshots()
serializer_class = serializers.ScreenshotSerializer
class TagViewSet(viewsets.ModelViewSet):
"""
Listing Types
ModelViewSet for getting all Tags for a given listing
Access Control
===============
- All users can view
URIs
======
GET /api/tag/
Summary:
Get a list of all system-wide Tag entries
Response:
200 - Successful operation - [TagSerializer]
POST /api/tag/
Summary:
Add a Tag
Request:
data: TagSerializer Schema
Response:
200 - Successful operation - TagSerializer
GET /api/tag/{pk}
Summary:
Find a Tag Entry by ID
Response:
200 - Successful operation - TagSerializer
PUT /api/tag/{pk}
Summary:
Update a Tag Entry by ID
PATCH /api/tag/{pk}
Summary:
Update (Partial) a Tag Entry by ID
DELETE /api/tag/{pk}
Summary:
Delete a Tag Entry by ID
"""
permission_classes = (permissions.IsUser,)
queryset = model_access.get_all_tags()
serializer_class = serializers.TagSerializer
class ListingViewSet(viewsets.ModelViewSet):
"""
Get all listings this user can see
Listing Types
ModelViewSet for getting all Listings
Access Control
===============
- All users can view
URIs
======
GET /api/listing
Summary:
Get a list of all system-wide Listings
Response:
200 - Successful operation - [ListingSerializer]
POST /api/listing/
Summary:
Add a Listing
Request:
data: ListingSerializer Schema
Response:
200 - Successful operation - ListingSerializer
GET /api/listing/{pk}
Summary:
Find a Listing Entry by ID
Response:
200 - Successful operation - ListingSerializer
PUT /api/listing/{pk}
Summary:
Update a Listing Entry by ID
PATCH /api/listing/{pk}
Summary:
Update (Partial) a Listing Entry by ID
DELETE /api/listing/{pk}
Summary:
Delete a Listing Entry by ID
"""
permission_classes = (permissions.IsUser,)
serializer_class = serializers.ListingSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter)
search_fields = ('title', 'id', 'owners__display_name', 'agency__title', 'agency__short_name',)
ordering_fields = ('id', 'agency__title', 'agency__short_name', 'is_enabled', 'is_featured',
'edited_date', 'security_marking', 'is_private', 'approval_status', 'approved_date',
'avg_rate', 'total_votes')
case_insensitive_ordering_fields = ('title',)
ordering = ('is_deleted', '-edited_date')
def get_queryset(self):
approval_status = self.request.query_params.get('approval_status', None)
# org = self.request.query_params.get('org', None)
orgs = self.request.query_params.getlist('org', False)
enabled = self.request.query_params.get('enabled', None)
ordering = self.request.query_params.get('ordering', None)
owners_id = self.request.query_params.get('owners_id', None)
if enabled:
enabled = enabled.lower()
if enabled in ['true', '1']:
enabled = True
else:
enabled = False
if ordering:
ordering = [s.strip() for s in ordering.split(',')]
else:
# always default to last modified for consistency
ordering = ['-edited_date']
listings = model_access.get_listings(self.request.user.username)
if owners_id:
listings = listings.filter(owners__id=owners_id)
if approval_status:
listings = listings.filter(approval_status=approval_status)
if orgs:
listings = listings.filter(agency__short_name__in=orgs)
if enabled is not None:
listings = listings.filter(is_enabled=enabled)
# have to handle this case manually because the ordering includes an app multiple times
# if there are multiple owners. We instead do sorting by case insensitive compare of the
# app owner that comes first alphabetically
param = [s for s in ordering if 'owners__display_name' == s or '-owners__display_name' == s]
if ordering is not None and param:
orderby = 'min'
if param[0].startswith('-'):
orderby = '-min'
listings = listings.annotate(min=Min(Lower('owners__display_name'))).order_by(orderby)
self.ordering = None
# Django REST filters are canse sensitive by default, so we handle case_insensitive fields
# manually. May want to abstract this functionality in an OrderingFilter sub-class
case_insensitive_ordering = [s for s in ordering if s in self.case_insensitive_ordering_fields or
s.startswith('-') and s[1:] in self.case_insensitive_ordering_fields]
if ordering is not None and case_insensitive_ordering:
for field in case_insensitive_ordering:
if field.startswith('-'):
listings = listings.order_by(Lower(field[1:])).reverse()
else:
listings = listings.order_by(Lower(field))
self.ordering = None
return listings
def list(self, request):
queryset = serializers.ListingSerializer.setup_eager_loading(self.get_queryset())
queryset = self.filter_queryset(queryset)
counts_data = model_access.put_counts_in_listings_endpoint(queryset)
# it appears that because we override the queryset here, we must
# manually invoke the pagination methods
page = self.paginate_queryset(queryset)
if page is not None:
serializer = serializers.ListingSerializer(page,
context={'request': request}, many=True)
r = self.get_paginated_response(serializer.data)
# add counts to response
r.data['counts'] = counts_data
return r
serializer = serializers.ListingSerializer(queryset,
context={'request': request}, many=True)
r = Response(serializer.data)
# add counts to response
counts = {'counts': counts_data}
r.data.append(counts)
return r
def create(self, request):
"""
Save a new Listing - only title is required
Sample Payload:
{
"title":"My Test App",
"description":"This is the full description of my app",
"descriptionShort":"short app description",
"contacts":[
{
"type":"Technical Support",
"name":"Tech Support Contact",
"organization":"ABC Inc",
"email":"<EMAIL>",
"securePhone":"555-555-5555",
"unsecurePhone":"111-222-3454"
}
],
"tags":[
"tag1",
"tag2"
],
"type":"Web Application",
"usage_requirements":"None",
"system_requirements":"None",
"versionName":"1.0.0",
"launchUrl":"http://www.google.com/myApp",
"whatIsNew":"Nothing is new",
"owners":[
{
"username":"alan"
}
],
"agency":"Test Organization",
"categories":[
"Entertainment",
"Media and Video"
],
"intents":[
"application/json/edit",
"application/json/view"
],
"docUrls":[
{
"name":"wiki",
"url":"http://www.wikipedia.com/myApp"
}
],
"smallIconId":"b0b54993-0668-4419-98e8-787e4c3a2dc2",
"largeIconId":"e94128ab-d32d-4241-8820-bd2c69a64a87",
"bannerIconId":"ecf79771-79a0-4884-a36d-5820c79c6d72",
"featuredBannerIconId":"c3e6a369-4773-485e-b369-5cebaa331b69",
"changeLogs":[
],
"screenshots":[
{
"smallImageId":"0b8db892-b669-4e86-af23-d899cb4d4d91",
"largeImageId":"80957d25-f34b-48bc-b860-b353cfd9e101"
}
]
}
---
parameters:
- name: body
required: true
paramType: body
parameters_strategy:
form: replace
query: replace
omit_serializer: true
"""
# logger.debug('inside ListingViewSet.create', extra={'request': request})
serializer = serializers.ListingSerializer(data=request.data,
context={'request': request}, partial=True)
if not serializer.is_valid():
logger.error('{0!s}'.format(serializer.errors), extra={'request': request})
raise errors.ValidationException('{0}'.format(serializer.errors))
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
def retrieve(self, request, pk=None):
"""
Get a Listing by id
"""
queryset = self.get_queryset().get(pk=pk)
serializer = serializers.ListingSerializer(queryset,
context={'request': request})
# TODO: Refactor in future to use django ordering (mlee)
temp = serializer.data.get('screenshots')
temp.sort(key=operator.itemgetter('order'))
return Response(serializer.data)
def destroy(self, request, pk=None):
"""
Delete a listing
"""
queryset = self.get_queryset()
listing = get_object_or_404(queryset, pk=pk)
description = request.data['description'] if 'description' in request.data else None
if not description:
raise errors.InvalidInput('Description is required when deleting a listing')
model_access.delete_listing(request.user.username, listing, description)
return Response(status=status.HTTP_204_NO_CONTENT)
def update(self, request, pk=None):
"""
Update a Listing
Sample payload:
{
"id":45,
"title":"My Test App",
"description":"This is the full description of my app",
"descriptionShort":"short app description",
"contacts":[
{
"securePhone":"555-555-5555",
"unsecurePhone":"111-222-3454",
"email":"<EMAIL>",
"organization":"ABC Inc",
"name":"Tech <NAME>",
"type":"Technical Support"
}
],
"totalReviews":0,
"avgRate":0,
"totalRate1":0,
"totalRate2":0,
"totalRate3":0,
"totalRate4":0,
"height":null,
"width":null,
"totalRate5":0,
"totalVotes":0,
"tags":[
"tag2",
"tag1"
],
"type":"Web Application",
"uuid":"e378c427-bba6-470c-b2f3-e550b9129504",
"usage_requirements":"None",
"system_requirements":"None",
"iframe_compatible":false,
"versionName":"1.0.0",
"launchUrl":"http://www.google.com/myApp",
"whatIsNew":"Nothing is new",
"owners":[
{
"displayName":"kevink",
"username":"kevink",
"id":5
}
],
"agency":"Test Organization",
"agencyShort":"TO",
"currentRejection":null,
"isEnabled":true,
"categories":[
"Media and Video",
"Entertainment"
],
"editedDate":"2015-08-12T10:53:47.036+0000",
"intents":[
"application/json/edit",
"application/json/view"
],
"docUrls":[
{
"url":"http://www.wikipedia.com/myApp",
"name":"wiki"
}
],
"approvalStatus":"IN_PROGRESS",
"isFeatured":false,
"smallIconId":"b0b54993-0668-4419-98e8-787e4c3a2dc2",
"largeIconId":"e94128ab-d32d-4241-8820-bd2c69a64a87",
"bannerIconId":"ecf79771-79a0-4884-a36d-5820c79c6d72",
"featuredBannerIconId":"c3e6a369-4773-485e-b369-5cebaa331b69",
"changeLogs":[
],
"screenshots":[
{
"largeImageId":"80957d25-f34b-48bc-b860-b353cfd9e101",
"smallImageId":"0b8db892-b669-4e86-af23-d899cb4d4d91"
}
]
}
"""
# logger.debug('inside ListingViewSet.update', extra={'request': request})
instance = self.get_queryset().get(pk=pk)
serializer = serializers.ListingSerializer(instance, data=request.data, context={'request': request}, partial=True)
# logger.debug('created ListingSerializer', extra={'request': request})
if not serializer.is_valid():
logger.error('{0!s}'.format(serializer.errors), extra={'request': request})
raise errors.ValidationException('{0}'.format(serializer.errors))
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
def partial_update(self, request, pk=None):
"""
TODO: Probably don't use this (PATCH)
"""
pass
class ListingUserViewSet(viewsets.ModelViewSet):
"""
Listing Types
Get all listings owned by this user
ModelViewSet for getting all ListingUserViewSets
Access Control
===============
- All users can view
URIs
======
GET /api/self/listing
Summary:
Get a list of all system-wide Listing User entries
Response:
200 - Successful operation - [ListingSerializer]
GET /api/self/listing/{pk}
Summary:
Find a ListingUserViewSet Entry by ID
Response:
200 - Successful operation - ListingSerializer
"""
permission_classes = (permissions.IsUser,)
serializer_class = serializers.ListingSerializer
def get_queryset(self):
return model_access.get_self_listings(self.request.user.username)
def list(self, request):
return super(ListingUserViewSet, self).list(self, request)
class ListingSearchViewSet(viewsets.ModelViewSet):
"""
Search for | |
vaex.dataset.DatasetArrays):
merged = vaex.dataset.DatasetArrays({**self.dataset._columns, item: value})
else:
left = self.dataset
if item in self.dataset:
left = left.dropped(item)
right = vaex.dataset.DatasetArrays({item: value})
merged = left.merged(right)
self.df._dataset = merged
self.df._length = len(value)
if self.df._length_unfiltered is None:
self.df._length_unfiltered = self.df._length
self.df._length_original = self.df._length
self.df._index_end = self.df._length_unfiltered
def __iter__(self):
return iter(self.dataset)
def __getitem__(self, item):
return self.dataset[item]
class DataFrameLocal(DataFrame):
"""Base class for DataFrames that work with local file/data"""
def __init__(self, dataset=None, name=None):
if dataset is None:
dataset = vaex.dataset.DatasetArrays()
name = name or "no-name"
else:
name = name or dataset.name
super(DataFrameLocal, self).__init__(name)
self._dataset = dataset
if hasattr(dataset, 'units'):
self.units.update(dataset.units)
if hasattr(dataset, 'ucds'):
self.ucds.update(dataset.ucds)
self.column_names = list(self.dataset)
if len(self.dataset):
self._length = self.dataset.row_count
if self._length_unfiltered is None:
self._length_unfiltered = self._length
self._length_original = self._length
self._index_end = self._length_unfiltered
# self.path = dataset.path
self.mask = None
self.columns = ColumnProxy(self)
for column_name in self.column_names:
self._initialize_column(column_name)
def _fill_filter_mask(self):
if self.filtered and self._filter_filled is False:
task = vaex.tasks.TaskFilterFill(self)
# we also get the count, which is almost for free
@delayed
def set_length(count):
self._cached_filtered_length = int(count)
self._filter_filled = True
set_length(self.count(delay=True))
task = self.executor.schedule(task)
self.execute()
def __getstate__(self):
state = self.state_get(skip=[self.dataset])
return {
'state': state,
'dataset': self.dataset,
'_future_behaviour': self. _future_behaviour,
}
def __setstate__(self, state):
self._init()
self.executor = get_main_executor()
self.columns = ColumnProxy(self)
dataset = state['dataset']
self._dataset = dataset
assert dataset.row_count is not None
self._length_original = dataset.row_count
self._length_unfiltered = self._length_original
self._cached_filtered_length = None
self._filter_filled = False
self._index_start = 0
self._index_end = self._length_original
self._future_behaviour = state['_future_behaviour']
self.state_set(state['state'], use_active_range=True, trusted=True)
@property
def dataset(self):
return self._dataset
@dataset.setter
def dataset(self, dataset):
if self._dataset.row_count != dataset.row_count:
self._length_original = dataset.row_count
self._length_unfiltered = self._length_original
self._cached_filtered_length = None
self._filter_filled = False
self._index_start = 0
self._index_end = self._length_original
self._dataset = dataset
self._invalidate_caches()
def hashed(self, inplace=False) -> DataFrame:
'''Return a DataFrame with a hashed dataset'''
df = self.copy() if not inplace else self
df.dataset = df.dataset.hashed()
return df
def _readonly(self, inplace=False):
# make arrays read only if possible
df = self if inplace else self.copy()
assert isinstance(self.dataset, vaex.dataset.DatasetArrays)
columns = {}
for key, ar in self.columns.items():
columns[key] = ar
if isinstance(ar, np.ndarray):
columns[key] = ar = ar.view() # make new object so we don't modify others
ar.flags['WRITEABLE'] = False
df._dataset = vaex.dataset.DatasetArrays(columns)
return df
_dict_mapping = {
pa.uint8(): pa.int16(),
pa.uint16(): pa.int32(),
pa.uint32(): pa.int64(),
pa.uint64(): pa.int64(),
}
def _auto_encode_type(self, expression, type):
if not self._future_behaviour:
return type
if self.is_category(expression):
if vaex.dtype(type).is_encoded:
return type # already encoded
value_type = vaex.array_types.to_arrow(self.category_labels(expression)).type
type = vaex.array_types.to_arrow_type(type)
type = self._dict_mapping.get(type, type)
type = pa.dictionary(type, value_type)
return type
def _auto_encode_data(self, expression, values):
if not self._future_behaviour:
return values
if vaex.array_types.is_arrow_array(values) and pa.types.is_dictionary(values.type):
return values
if self.is_category(expression):
dictionary = vaex.array_types.to_arrow(self.category_labels(expression))
offset = self.category_offset(expression)
if offset != 0:
values = values - offset
values = vaex.array_types.to_arrow(values)
to_type = None
if values.type in self._dict_mapping:
values = values.cast(self._dict_mapping[values.type])
if isinstance(values, pa.ChunkedArray):
chunks = [pa.DictionaryArray.from_arrays(k, dictionary) for k in values.chunks]
values = pa.chunked_array(chunks)
else:
values = pa.DictionaryArray.from_arrays(values, dictionary)
return values
@docsubst
def categorize(self, column, min_value=0, max_value=None, labels=None, inplace=False):
"""Mark column as categorical.
This may help speed up calculations using integer columns between a range of [min_value, max_value].
If max_value is not given, the [min_value and max_value] are calcuated from the data.
Example:
>>> import vaex
>>> df = vaex.from_arrays(year=[2012, 2015, 2019], weekday=[0, 4, 6])
>>> df = df.categorize('year', min_value=2020, max_value=2019)
>>> df = df.categorize('weekday', labels=['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'])
>>> df
# year weekday
0 2012 0
1 2015 4
2 2019 6
>>> df.is_category('year')
True
:param column: column to assume is categorical.
:param labels: labels to associate to the values between min_value and max_value
:param min_value: minimum integer value (if max_value is not given, this is calculated)
:param max_value: maximum integer value (if max_value is not given, this is calculated)
:param labels: Labels to associate to each value, list(range(min_value, max_value+1)) by default
:param inplace: {inplace}
"""
df = self if inplace else self.copy()
column = _ensure_string_from_expression(column)
if df[column].dtype != int:
raise TypeError(f'Only integer columns can be marked as categorical, {column} is {df[column].dtype}')
if max_value is not None:
labels = list(range(min_value, max_value+1))
N = len(labels)
else:
vmin, vmax = df.minmax(column)
if labels is None:
N = int(vmax + 1)
labels = list(range(vmin, vmax+1))
min_value = vmin
else:
min_value = vmin
if (vmax - vmin) >= len(labels):
raise ValueError('value of {} found, which is larger than number of labels {}'.format(vmax, len(labels)))
df._categories[column] = dict(labels=labels, N=len(labels), min_value=min_value)
return df
def ordinal_encode(self, column, values=None, inplace=False, lazy=False):
"""Encode column as ordinal values and mark it as categorical.
The existing column is renamed to a hidden column and replaced by a numerical columns
with values between [0, len(values)-1].
:param lazy: When False, it will materialize the ordinal codes.
"""
column = _ensure_string_from_expression(column)
df = self if inplace else self.copy()
# for the codes, we need to work on the unfiltered dataset, since the filter
# may change, and we also cannot add an array that is smaller in length
df_unfiltered = df.copy()
# maybe we need some filter manipulation methods
df_unfiltered.select_nothing(name=FILTER_SELECTION_NAME)
df_unfiltered._length_unfiltered = df._length_original
df_unfiltered.set_active_range(0, df._length_original)
expression = df_unfiltered[column]
if lazy:
if values is None:
found_values = df_unfiltered.unique(column, array_type='numpy-arrow')
minimal_type = vaex.utils.required_dtype_for_max(len(found_values), signed=True)
dtype = vaex.dtype_of(found_values)
if dtype == int:
min_value = found_values.min()
max_value = found_values.max()
if (max_value - min_value +1) == len(found_values):
warnings.warn(f'It seems your column {column} is already ordinal encoded (values between {min_value} and {max_value}), automatically switching to use df.categorize')
return df.categorize(column, min_value=min_value, max_value=max_value, inplace=inplace)
values = found_values
else:
values = expression.dtype.create_array(values)
fp = f'hash-map-unique-{expression.fingerprint()}'
hash_map_unique_name = fp.replace('-', '_')
hash_map_unique = vaex.hash.HashMapUnique.from_keys(values, fingerprint=fp)
df.add_variable(hash_map_unique_name, hash_map_unique)
expr = df._expr('hashmap_apply({}, {}, check_missing=True)'.format(column, hash_map_unique_name))
df[column] = expr
df._categories[column] = dict(labels=values, N=len(values), min_value=0)
return df # no else but return to avoid large diff
# codes point to the index of found_values
# meaning: found_values[codes[0]] == ds[column].values[0]
found_values, codes = df_unfiltered.unique(column, return_inverse=True, array_type='numpy-arrow')
max_code = codes.max()
minimal_type = vaex.utils.required_dtype_for_max(max_code, signed=True)
codes = codes.astype(minimal_type)
dtype = vaex.dtype_of(found_values)
if dtype == int:
min_value = found_values.min()
max_value = found_values.max()
if (max_value - min_value +1) == len(found_values):
warnings.warn(f'It seems your column {column} is already ordinal encoded (values between {min_value} and {max_value}), automatically switching to use df.categorize')
return df.categorize(column, min_value=min_value, max_value=max_value, inplace=inplace)
if isinstance(found_values, array_types.supported_arrow_array_types):
# elements of arrow arrays are not in arrow arrays, e.g. ar[0] in ar is False
# see tests/arrow/assumptions_test.py::test_in_pylist
found_values = found_values.to_pylist()
if values is None:
values = found_values
else:
# we have specified which values we should support, anything
# not found will be masked
translation = np.zeros(len(found_values), dtype=np.uint64)
# mark values that are in the column, but not in values with a special value
missing_value = len(found_values)
for i, found_value in enumerate(found_values):
try:
found_value = found_value.decode('ascii')
except:
pass
if found_value not in values: # not present, we need a missing value
translation[i] = missing_value
else:
translation[i] = values.index(found_value)
codes = translation[codes]
if missing_value in translation:
# all special values will be marked as missing
codes = np.ma.masked_array(codes, codes==missing_value)
original_column = df.rename(column, '__original_' + column, unique=True)
df.add_column(column, codes)
df._categories[column] = dict(labels=values, N=len(values), min_value=0)
return df
# for backward compatibility
label_encode = _hidden(vaex.utils.deprecated('use ordinal_encode')(ordinal_encode))
@property
def data(self):
"""Gives direct access to the data as numpy arrays.
Convenient when working with IPython in combination with small DataFrames, since this gives tab-completion.
Only real columns (i.e. no virtual) columns can be accessed, for getting the data from virtual columns, use
DataFrame.evaluate(...).
Columns can be accessed by their names, which are attributes. The attributes are of type numpy.ndarray.
Example:
>>> df = vaex.example()
>>> r = np.sqrt(df.data.x**2 + df.data.y**2)
"""
class Datas(object):
pass
datas = Datas()
for name, array in self.columns.items():
setattr(datas, name, array[:])
return datas
def copy(self, column_names=None, treeshake=False):
'''Make a shallow copy of a DataFrame. One can also specify a subset of columns.
This is a fairly cheap operation, since no memory copies of the underlying data are made.
{note_copy}
:param list column_names: A subset of columns to use for the DataFrame copy. If None, all the columns are copied.
:param bool treeshake: Get rid of variables not used.
| |
elif choice['which'] == 'os':
if choice['spool'] is not None:
self.gCommand.set_spool(choice['spool'])
useSudo = False
sudoUser = ""
if choice['SU'] is not None:
useSudo = True
sudoUser = choice['SU']
if choice['SH'] is None:
choice['SH'] = ""
if choice['repeat'] is None:
choice['repeat'] = [""]
if len(choice['cmd']) == 0:
os_parser.print_help()
else:
self.gCommand.os(cmd=" ".join(choice['cmd']), sudo=useSudo, sudoUser=sudoUser, shell=choice['SH'], repeat=" ".join(choice['repeat']))
elif choice['which'] == 'scp':
self.gCommand.scp(mode=choice['mode'], source=choice['src'], dest=choice['dest'], recursive=choice['recursive'], suffix=True, batch=choice['batch'])
elif choice['which'] == 'db2':
if choice['spool'] is not None:
self.gCommand.set_spool(choice['spool'])
if choice['USR'] is None:
choice['USR'] = 'current'
if choice['ENV'] is None:
choice['ENV'] = ''
else:
choice['ENV'] = ' '.join(choice['ENV']) + '; '
if choice['SH'] is None:
choice['SH'] = ""
if choice['IN']:
vlevel = "IN"
else:
vlevel = "DB"
if choice['repeat'] is None:
choice['repeat'] = [""]
if len(choice['cmd']) == 0:
db2_parser.print_help()
else:
self.gCommand.db2(command=" ".join(choice['cmd']), user=choice['USR'], env=choice['ENV'], shell=choice['SH'], level=vlevel, osmode=choice['OS'], repeat=" ".join(choice['repeat']))
elif choice['which'] == 'scan':
if choice['yes']:
self.gCommand.scan(dry=choice['dry'])
else:
parser.print_help()
except SystemExit:
pass
except Exception:
self.gLogging.error("cannot parse given arguments")
def do_exit(self, args):
"""
This method saves command history and exit program
"""
self.gLogging.debug("do_exit invoked")
atexit.register(readline.write_history_file, "{}/{}".format(gutils.gcpath(), self.gConfig['CMD']['histfile']))
self.gCommand.exit()
###Utilities
def header(self):
"""
This method gives a pretty header on start
"""
rows, columns = os.popen('stty size', 'r').read().split()
self.gLogging.show("".center(int(columns)))
self.gLogging.show("".center(int(columns)))
self.gLogging.show(Fore.RED + " ##### # ####### ###### # # ".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.RED + "# # # # # # # # # # ".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.RED + "# # # # # # # # # ".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.RED + "# #### # # # ###### # # # ".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.RED + "# # # # # # # ####### # ".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.RED + "# # # # # # # # # # ".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.RED + " ##### ####### ####### ###### # # #######".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.RED + "".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.RED + "".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.RED + " ##### ####### # # ##### ####### # #######".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.RED + "# # # # ## # # # # # # # ".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.RED + "# # # # # # # # # # # ".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.RED + "# # # # # # ##### # # # ##### ".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.RED + "# # # # # # # # # # # ".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.RED + "# # # # # ## # # # # # # ".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.RED + " ##### ####### # # ##### ####### ####### #######".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(" by <NAME>".center(int(columns)))
self.gLogging.show("".center(int(columns)))
self.gLogging.show("".center(int(columns)))
self.gLogging.show(Fore.LIGHTYELLOW_EX + "With Great Power Comes Great Responsibility".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.LIGHTWHITE_EX + self.gConfig['LOGGING']['gversion'].center(int(columns)) + Style.RESET_ALL)
self.gLogging.show("".center(int(columns)))
self.gLogging.show("".center(int(columns)))
if 'do_yaml' in [func for func in dir(GcConsole) if callable(getattr(GcConsole, func)) and not func.startswith("__")]:
self.gLogging.show("plugins:".center(int(columns)))
self.gLogging.show("".center(int(columns)))
self.gLogging.show(Fore.LIGHTYELLOW_EX + "Panaceum".center(int(columns)) + Style.RESET_ALL)
self.gLogging.show(Fore.LIGHTWHITE_EX + self.gConfig['PANACEUM']['pversion'].center(int(columns)) + Style.RESET_ALL)
try:
with urlopen("https://raw.githubusercontent.com/grzegorzcode/GlobalConsole/master/VERSION") as serv:
sversion = str(serv.readline()).split(".")
major = sversion[0].split("'")[1]
minor = sversion[1]
fix = sversion[2]
sversion = int("{major}{minor}{fix}".format(major=major, minor=minor, fix=fix))
sversionF = "{major}.{minor}.{fix}".format(major=major, minor=minor, fix=fix)
lversion = self.gConfig['LOGGING']['gversion'].split(".")
major = lversion[0]
minor = lversion[1]
fix = lversion[2]
lversion = int("{major}{minor}{fix}".format(major=major, minor=minor, fix=fix))
if sversion > lversion:
self.gLogging.show(Fore.LIGHTYELLOW_EX + "!! A NEW VERSION {new} available !!".format(new=sversionF).center(int(columns)) + Style.RESET_ALL)
except Exception:
self.gLogging.warning("cannot connect to github.com to check for a new version")
def emptyline(self):
"""
This method rewrites default behaviour of ``cmd`` module to re-run last command when enter pressed on empty prompt.
"""
pass
def postcmd(self, stop, line):
"""
This method shows number of active connections after every command
"""
if self.gConfig['CMD']['show_connections'] == 'YES':
sys.stdout.write(u"\u001b[1000D" + u"\u001b[34m" + "active connections: " + str(len(self.gCommand.connections)) + " \n" + u"\u001b[0m")
sys.stdout.flush()
return stop
def do_shell(self, args):
"""
This method gives a possibility to run local os shell commands
"""
self.gLogging.debug("do_shell invoked")
description = "work with local shell"
try:
os.system(args)
except SystemExit:
pass
except Exception:
self.gLogging.error("cannot parse given arguments")
def complete_history(self, text, line, start_index, end_index):
"""
This method uses ``inlist`` variable to enable ``cmd`` module command autocompletion
"""
inlist = ['show', 'clear', 'run', 'find']
if text:
return [item for item in inlist if item.startswith(text)]
else:
return inlist
def do_history(self, args):
"""
This method handles all tasks related to management of history of commands
"""
self.gLogging.debug("do_history invoked")
description = "work with commands history"
try:
#import argcomplete
parser = argparse.ArgumentParser(prog="history", add_help=True, epilog=self.epilog, description=description, usage="history <command> [<args>]")
subparsers = parser.add_subparsers()
clear_parser = subparsers.add_parser('clear', description="clear history", usage="history clear <args>")
clear_parser.set_defaults(which='clear')
clear_parser.add_argument('-Y', '--yes', action='store_true', required=True, help="confirm")
show_parser = subparsers.add_parser('show', description="show history", usage="history show") #, aliases=['s']
show_parser.set_defaults(which='show')
rem_parser = subparsers.add_parser('run', description="run command again", usage="history run <args>")
rem_parser.set_defaults(which='run')
rem_parser.add_argument('-c', '--command', type=int, required=True, help="command number")
find_parser = subparsers.add_parser('find', description="find command", usage="history find <args>")
find_parser.set_defaults(which='find')
find_parser.add_argument('-c', '--command', type=str, required=True, help="command substring")
#completer = argcomplete.CompletionFinder(parser)
#readline.set_completer_delims("")
#readline.set_completer(completer.rl_complete)
#readline.parse_and_bind("tab: complete")
choice = vars(parser.parse_args(args.split()))
if len(args) == 0:
parser.print_help()
elif choice['which'] == 'clear':
if choice['yes']:
readline.clear_history()
else:
self.gLogging.show("skipped.. ")
elif choice['which'] == 'show':
for i in range(readline.get_current_history_length()):
print(i+1, readline.get_history_item(i + 1))
elif choice['which'] == 'run':
self.onecmd(readline.get_history_item(choice['command']))
elif choice['which'] == 'find':
for i in range(readline.get_current_history_length()):
if choice['command'] in readline.get_history_item(i + 1):
print(i+1, readline.get_history_item(i + 1))
else:
parser.print_help()
except SystemExit:
pass
except Exception:
self.gLogging.error("cannot parse given arguments")
def do_version(self, args):
"""
This method prints GC version
"""
try:
self.gLogging.show(self.gConfig['LOGGING']['gversion'])
except SystemExit:
pass
except Exception:
self.gLogging.error("cannot parse given arguments")
def complete_var(self, text, line, start_index, end_index):
"""
This method uses ``inlist`` variable to enable ``cmd`` module command autocompletion
"""
inlist = ['show', 'edit', 'rem', 'purge']
if text:
return [item for item in inlist if item.startswith(text)]
else:
return inlist
def do_var(self, args):
"""
This method handles all tasks related to management of variables
"""
self.gLogging.debug("do_var invoked")
description = "work with variables"
try:
parser = argparse.ArgumentParser(prog="var", add_help=True, epilog=self.epilog, description=description, usage="var <command> [<args>]")
subparsers = parser.add_subparsers()
show_parser = subparsers.add_parser('show', description="show variables", usage="var show <args>")
show_parser.set_defaults(which='show')
show_parser.add_argument('-r', '--reverse', action='store_true', help="reverse sort order")
edit_parser = subparsers.add_parser('edit', description="add or update variable", usage="var edit <args>")
edit_parser.set_defaults(which='edit')
edit_parser.add_argument('-V', '--varname', type=str, required=True, help="variable to add or update")
edit_parser.add_argument('-v', '--value', type=str, required=True, help="variable to add or update")
edit_parser.add_argument('-p', '--persistent', action='store_true', help="persistent variable if True")
rem_parser = subparsers.add_parser('rem', description="remove variable", usage="var rem <args>")
rem_parser.set_defaults(which='rem')
rem_parser.add_argument('-V', '--varname', type=str, required=True, help="variable to remove")
purge_parser = subparsers.add_parser('purge', description="purge variables", usage="var purge <args>")
purge_parser.set_defaults(which='purge')
purge_parser.add_argument('-Y', '--yes', action='store_true', required=True, help="confirm")
choice = vars(parser.parse_args(args.split()))
if len(args) == 0:
parser.print_help()
elif choice['which'] == 'show':
self.gCommand.gVars.getVars(sort_reverse=choice['reverse'])
elif choice['which'] == 'edit':
self.gCommand.gVars.updateVar(varname=choice['varname'], value=choice['value'], persistent=choice['persistent'])
elif choice['which'] == 'rem':
self.gCommand.gVars.removeVar(varname=choice['varname'])
elif choice['which'] == 'purge':
if choice['yes']:
self.gCommand.gVars.purgeVars()
else:
self.gLogging.show("skipped.. ")
else:
parser.print_help()
except SystemExit:
pass
except Exception:
self.gLogging.error("cannot parse given arguments")
def do_batch(self, args):
"""
This method gives a possibility to run commands being stored in a file
"""
self.gLogging.debug("do_batch invoked")
description = "work with batch files"
try:
parser = argparse.ArgumentParser(prog="batch", add_help=True, epilog=self.epilog, description=description, usage="batch -f <filename>")
parser.add_argument('-f', '--filename', type=str, required=True, help="batch file")
errLine = ""
errLineNr = 0
choice = parser.parse_args(args.split())
if choice.filename:
with open(choice.filename, 'r') as infile:
self.gCommand.chain_proceed = 1
self.gCommand.check = ([], True)
for x, line in enumerate(infile.readlines()):
line = line.strip("\n")
if len(line) > 0:
if not line.startswith("#") and not line.startswith("exit") and not line.startswith("quit"):
errLine = line
errLineNr = x+1
self.gLogging.show("command: %s" % line)
self.onecmd(line)
if self.gCommand.chain_proceed == 0:
raise AssertionError
self.gCommand.chain_proceed = 1
except AssertionError:
self.gLogging.info("--!--check failed on line:{}, command: {}".format(errLineNr, errLine))
#self.gLogging.info("--!--check condition: {}, is present: {}".format(" ".join(self.gCommand.check[0]), self.gCommand.check[1]))
self.gCommand.chain_proceed = 1
self.gCommand.check = ([], True)
except SystemExit:
pass
except Exception:
self.gLogging.error("cannot parse given arguments")
def complete_check(self, text, line, start_index, end_index):
"""
This method uses ``inlist`` variable to enable ``cmd`` module command autocompletion
"""
inlist = ['set', 'show']
if text:
return [item for item in inlist if item.startswith(text)]
else:
return inlist
def do_check(self, args):
"""
This method handles all tasks related to management of variables being used by ``batch`` method as a control points
"""
self.gLogging.debug("do_check invoked")
description = "check step in chain of commands"
try:
parser = argparse.ArgumentParser(prefix_chars='-', prog="check", add_help=True, epilog=self.epilog, description=description, usage="check <command> [<args>]")
subparsers = parser.add_subparsers()
set_parser = subparsers.add_parser('set', description="set text to check", usage="check set <args>")
set_parser.set_defaults(which='set')
set_parser.add_argument('cmd', | |
False, False, False, False],
[False, False, True, False, False, False, False],
[False, False, True, False, False, False, False],
[False, False, False, False, False, False, False],
],
)
self.assertEqual(table.del_span(zone), True)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
],
)
def test_span_sp4(self):
table = self.table.clone
zone = "g1:g4"
table.set_span(zone)
# span change only display
self.assertEqual(
table.get_values(),
[
[1, 1, 1, 2, 3, 3, 3],
[1, 1, 1, 2, 3, 3, 3],
[1, 1, 1, 2, 3, 3, 3],
[1, 2, 3, 4, 5, 6, 7],
],
)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, True],
[False, False, False, False, False, False, True],
[False, False, False, False, False, False, True],
[False, False, False, False, False, False, True],
],
)
self.assertEqual(table.del_span(zone), True)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
],
)
def test_span_sp4(self):
table = self.table2.clone
zone = "g1:g4"
table.set_span(zone, merge=True)
# span change only display
self.assertEqual(
table.get_values(),
[
["a", "b", 1, "d", 3, 3, "3 3 3 7"],
[1, "", "C", "", 3, 3, None],
[1, 1, 1, 2, 3, 3, None],
[1, 2, 3, 4, 5, 6, None],
],
)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, True],
[False, False, False, False, False, False, True],
[False, False, False, False, False, False, True],
[False, False, False, False, False, False, True],
],
)
self.assertEqual(table.del_span(zone), True)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
],
)
def test_span_sp5(self):
table = self.table.clone
zone = "a3:c4"
table.set_span(zone)
# span change only display
self.assertEqual(
table.get_values(),
[
[1, 1, 1, 2, 3, 3, 3],
[1, 1, 1, 2, 3, 3, 3],
[1, 1, 1, 2, 3, 3, 3],
[1, 2, 3, 4, 5, 6, 7],
],
)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[True, True, True, False, False, False, False],
[True, True, True, False, False, False, False],
],
)
self.assertEqual(table.del_span(zone), True)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
],
)
def test_span_sp5_merge(self):
table = self.table2.clone
zone = "a3:c4"
table.set_span(zone, merge=True)
# span change only display
self.assertEqual(
table.get_values(),
[
["a", "b", 1, "d", 3, 3, 3],
[1, "", "C", "", 3, 3, 3],
["1 1 1 1 2 3", None, None, 2, 3, 3, 3],
[None, None, None, 4, 5, 6, 7],
],
)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[True, True, True, False, False, False, False],
[True, True, True, False, False, False, False],
],
)
self.assertEqual(table.del_span(zone), True)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
],
)
def test_span_sp6(self):
table = self.table.clone
zone = "b3:f3"
table.set_span(zone)
# span change only display
self.assertEqual(
table.get_values(),
[
[1, 1, 1, 2, 3, 3, 3],
[1, 1, 1, 2, 3, 3, 3],
[1, 1, 1, 2, 3, 3, 3],
[1, 2, 3, 4, 5, 6, 7],
],
)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, True, True, True, True, True, False],
[False, False, False, False, False, False, False],
],
)
self.assertEqual(table.del_span(zone), True)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
],
)
def test_span_sp6_2zone(self):
table = self.table.clone
zone = "b3:f3"
table.set_span(zone)
# span change only display
self.assertEqual(
table.get_values(),
[
[1, 1, 1, 2, 3, 3, 3],
[1, 1, 1, 2, 3, 3, 3],
[1, 1, 1, 2, 3, 3, 3],
[1, 2, 3, 4, 5, 6, 7],
],
)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, True, True, True, True, True, False],
[False, False, False, False, False, False, False],
],
)
zone2 = "a2:a4"
table.set_span(zone2)
# span change only display
self.assertEqual(
table.get_values(),
[
[1, 1, 1, 2, 3, 3, 3],
[1, 1, 1, 2, 3, 3, 3],
[1, 1, 1, 2, 3, 3, 3],
[1, 2, 3, 4, 5, 6, 7],
],
)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, False],
[True, False, False, False, False, False, False],
[True, True, True, True, True, True, False],
[True, False, False, False, False, False, False],
],
)
self.assertEqual(table.del_span(zone), True)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, False],
[True, False, False, False, False, False, False],
[True, False, False, False, False, False, False],
[True, False, False, False, False, False, False],
],
)
self.assertEqual(table.del_span(zone2), True)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
],
)
def test_span_bigger(self):
table = self.table.clone
zone = "e2:i4"
table.set_span(zone)
# span change only display
self.assertEqual(
table.get_values(),
[
[1, 1, 1, 2, 3, 3, 3, None, None],
[1, 1, 1, 2, 3, 3, 3, None, None],
[1, 1, 1, 2, 3, 3, 3, None, None],
[1, 2, 3, 4, 5, 6, 7, None, None],
],
)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, False],
[False, False, False, False, True, True, True, True, True],
[False, False, False, False, True, True, True, True, True],
[False, False, False, False, True, True, True, True, True],
],
)
self.assertEqual(table.del_span(zone), True)
res = []
for r in table.get_cells():
test_row = []
for cell in r:
test_row.append(cell._is_spanned())
res.append(test_row)
self.assertEqual(
res,
[
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False, False],
],
)
def | |
"""
The module implements abstraction for arrays used by Zserio python extension.
"""
import typing
from zserio.bitposition import alignto
from zserio.bitsizeof import (bitsizeof_varuint16, bitsizeof_varuint32, bitsizeof_varuint64, bitsizeof_varuint,
bitsizeof_varint16, bitsizeof_varint32, bitsizeof_varint64, bitsizeof_varint,
bitsizeof_varsize, bitsizeof_string, bitsizeof_bitbuffer)
from zserio.bitreader import BitStreamReader
from zserio.bitwriter import BitStreamWriter
from zserio.bitbuffer import BitBuffer
from zserio.hashcode import calc_hashcode, HASH_SEED
from zserio.exception import PythonRuntimeException
class Array:
"""
Abstraction for arrays to which Zserio arrays are mapped in python.
"""
def __init__(self,
array_traits: typing.Any,
raw_array: typing.Optional[typing.List] = None,
*,
is_auto: bool = False,
is_implicit: bool = False,
set_offset_method: typing.Optional[typing.Callable[[int, int], None]] = None,
check_offset_method: typing.Optional[typing.Callable[[int, int], None]] = None) -> None:
"""
Constructor.
:param array_traits: Array traits which specify the array type.
:param raw_array: Native python list which will be hold by this abstraction.
:param is_auto: True if mapped Zserio array is auto array.
:param is_implicit: True if mapped Zserio array is implicit array.
:param set_offset_method: Set offset method if mapped Zserio array is indexed offset array.
:param check_offset_method: Check offset method if mapped Zserio array is indexed offset array.
"""
self._raw_array: typing.List = [] if raw_array is None else raw_array
self._array_traits: typing.Any = array_traits
self._is_auto: bool = is_auto
self._is_implicit: bool = is_implicit
self._set_offset_method: typing.Optional[typing.Callable[[int, int], None]] = set_offset_method
self._check_offset_method: typing.Optional[typing.Callable[[int, int], None]] = check_offset_method
@classmethod
def from_reader(cls: typing.Type['Array'],
array_traits: typing.Any,
reader: BitStreamReader,
size: int = 0,
*,
is_auto: bool = False,
is_implicit: bool = False,
set_offset_method: typing.Optional[typing.Callable[[int, int], None]] = None,
check_offset_method: typing.Optional[typing.Callable[[int, int], None]] = None) -> 'Array':
"""
Constructs array and reads elements from the given bit stream reader.
:param array_traits: Array traits which specify the array type.
:param reader: Bit stream from which to read.
:param size: Number of elements to read or None in case of implicit or auto arrays.
:param raw_array: Native python list which will be hold by this abstraction.
:param is_auto: True if mapped Zserio array is auto array.
:param is_implicit: True if mapped Zserio array is implicit array.
:param set_offset_method: Set offset method if mapped Zserio array is indexed offset array.
:param check_offset_method: Check offset method if mapped Zserio array is indexed offset array.
:returns: Array instance filled using given bit stream reader.
"""
instance = cls(array_traits, is_auto=is_auto, is_implicit=is_implicit,
set_offset_method=set_offset_method, check_offset_method=check_offset_method)
instance.read(reader, size)
return instance
def __eq__(self, other: object) -> bool:
# it's enough to check only raw_array because compound types which call this are always the same type
if isinstance(other, Array):
return self._raw_array == other._raw_array
return False
def __hash__(self) -> int:
hashcode = HASH_SEED
for element in self._raw_array:
hashcode = calc_hashcode(hashcode, hash(element))
return hashcode
def __len__(self) -> int:
return len(self._raw_array)
def __getitem__(self, key: int) -> typing.Any:
return self._raw_array[key]
def __setitem__(self, key: int, value: typing.Any) -> None:
self._raw_array[key] = value
@property
def raw_array(self) -> typing.List:
"""
Gets raw array.
:returns: Native python list which is hold by the array.
"""
return self._raw_array
def bitsizeof(self, bitposition: int) -> int:
"""
Returns length of array stored in the bit stream in bits.
:param bitposition: Current bit stream position.
:returns: Length of the array stored in the bit stream in bits.
"""
end_bitposition = bitposition
size = len(self._raw_array)
if self._is_auto:
end_bitposition += bitsizeof_varsize(size)
if self._array_traits.HAS_BITSIZEOF_CONSTANT and size > 0:
element_size = self._array_traits.bitsizeof()
if self._set_offset_method is None:
end_bitposition += size * element_size
else:
end_bitposition = alignto(8, end_bitposition)
end_bitposition += element_size + (size - 1) * alignto(8, element_size)
else:
for element in self._raw_array:
if self._set_offset_method is not None:
end_bitposition = alignto(8, end_bitposition)
end_bitposition += self._array_traits.bitsizeof(end_bitposition, element)
return end_bitposition - bitposition
def initialize_offsets(self, bitposition: int) -> int:
"""
Initializes indexed offsets for the array.
:param bitposition: Current bit stream position.
:returns: Updated bit stream position which points to the first bit after the array.
"""
end_bitposition = bitposition
size = len(self._raw_array)
if self._is_auto:
end_bitposition += bitsizeof_varsize(size)
for index in range(size):
if self._set_offset_method is not None:
end_bitposition = alignto(8, end_bitposition)
self._set_offset_method(index, end_bitposition)
end_bitposition = self._array_traits.initialize_offsets(end_bitposition, self._raw_array[index])
return end_bitposition
def read(self, reader: BitStreamReader, size: int = 0) -> None:
"""
Reads array from the bit stream.
:param reader: Bit stream from which to read.
:param size: Number of elements to read or None in case of implicit or auto arrays.
:raises PythonRuntimeException: If the array does not have elements with constant bit size.
"""
self._raw_array.clear()
if self._is_implicit:
if not self._array_traits.HAS_BITSIZEOF_CONSTANT:
raise PythonRuntimeException("Array: Implicit array elements must have constant bit size!")
element_size = self._array_traits.bitsizeof()
remaining_bits = reader.buffer_bitsize - reader.bitposition
read_size = remaining_bits // element_size
for index in range(read_size):
self._raw_array.append(self._array_traits.read(reader, index))
else:
if self._is_auto:
read_size = reader.read_varsize()
else:
read_size = size
for index in range(read_size):
if self._check_offset_method is not None:
reader.alignto(8)
self._check_offset_method(index, reader.bitposition)
self._raw_array.append(self._array_traits.read(reader, index))
def write(self, writer: BitStreamWriter) -> None:
"""
Writes array to the bit stream.
:param writer: Bit stream where to write.
"""
size = len(self._raw_array)
if self._is_auto:
writer.write_varsize(size)
for index in range(size):
if self._check_offset_method is not None:
writer.alignto(8)
self._check_offset_method(index, writer.bitposition)
self._array_traits.write(writer, self._raw_array[index])
class BitFieldArrayTraits:
"""
Array traits for unsigned fixed integer Zserio types (uint16, uint32, uint64, bit:5, etc...).
"""
HAS_BITSIZEOF_CONSTANT = True
def __init__(self, numbits: int) -> None:
"""
Constructor.
:param numbits: Number of bits for unsigned fixed integer Zserio type.
"""
self._numbits = numbits
def bitsizeof(self) -> int:
"""
Returns length of unsigned fixed integer Zserio type stored in the bit stream in bits.
:returns: Length of unsigned fixed integer Zserio type in bits.
"""
return self._numbits
def initialize_offsets(self, bitposition: int, _value: int) -> int:
"""
Initializes indexed offsets for unsigned fixed integer Zserio type.
:param bitposition: Current bit stream position.
:param _value: Not used.
:returns: Updated bit stream position which points to the first bit after unsigned fixed integer type.
"""
return bitposition + self.bitsizeof()
def read(self, reader: BitStreamReader, _index: int) -> int:
"""
Reads unsigned fixed integer Zserio type from the bit stream.
:param reader: Bit stream from which to read.
:param _index: Not used.
"""
return reader.read_bits(self._numbits)
def write(self, writer: BitStreamWriter, value: int) -> None:
"""
Writes unsigned fixed integer Zserio type to the bit stream.
:param writer: Bit stream where to write.
:param value: Unsigned fixed integer Zserio type to write.
"""
writer.write_bits(value, self._numbits)
class SignedBitFieldArrayTraits:
"""
Array traits for signed fixed integer Zserio types (int16, int32, int64, int:5, etc...).
"""
HAS_BITSIZEOF_CONSTANT = True
def __init__(self, numbits: int) -> None:
"""
Constructor.
:param numbits: Number of bits for signed fixed integer Zserio type.
"""
self._numbits = numbits
def bitsizeof(self) -> int:
"""
Returns length of signed fixed integer Zserio type stored in the bit stream in bits.
:returns: Length of signed fixed integer Zserio type in bits.
"""
return self._numbits
def initialize_offsets(self, bitposition: int, _value: int) -> int:
"""
Initializes indexed offsets for signed fixed integer Zserio type.
:param bitposition: Current bit stream position.
:param _value: Not used.
:returns: Updated bit stream position which points to the first bit after signed fixed integer type.
"""
return bitposition + self.bitsizeof()
def read(self, reader: BitStreamReader, _index: int) -> int:
"""
Reads signed fixed integer Zserio type from the bit stream.
:param reader: Bit stream from which to read.
:param _index: Not used.
"""
return reader.read_signed_bits(self._numbits)
def write(self, writer: BitStreamWriter, value: int) -> None:
"""
Writes signed fixed integer Zserio type to the bit stream.
:param writer: Bit stream where to write.
:param value: Signed fixed integer Zserio type to write.
"""
writer.write_signed_bits(value, self._numbits)
class VarUInt16ArrayTraits:
"""
Array traits for Zserio varuint16 type.
"""
HAS_BITSIZEOF_CONSTANT = False
@staticmethod
def bitsizeof(_bitposition: int, value: int) -> int:
"""
Returns length of Zserio varuint16 type stored in the bit stream in bits.
:param _bitposition: Not used.
:param value: Zserio varuint16 type value.
:returns: Length of given Zserio varuint16 type in bits.
"""
return bitsizeof_varuint16(value)
@staticmethod
def initialize_offsets(bitposition: int, value: int) -> int:
"""
Initializes indexed offsets for Zserio varuint16 type.
:param bitposition: Current bit stream position.
:param value: Zserio varuint16 type value.
:returns: Updated bit stream position which points to the first bit after Zserio varuint16 type.
"""
return bitposition + VarUInt16ArrayTraits.bitsizeof(bitposition, value)
@staticmethod
def read(reader: BitStreamReader, _index: int) -> int:
"""
Reads | |
import os
import os.path
import random
from operator import add
from datetime import datetime, date, timedelta
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import shutil
import ema_workbench
import time
## Step 2: Function for initiating the main dictionary of climate stations
def create_dic(a):
'''Function: creating a dictionary for each climate station'''
a = {}
keys = ['fM', 'iPot', 'rSnow', 'dSnow', 'cPrec', 'dP', 'elev', 'lat', 'long', 'fileName']
a = {key: None for key in keys}
return a
def initialize_input_dict (mainFolderSki):
''' This function returns a dictionary , and addresses of 4 folders'''
'''Step 1'''
rootFolder = mainFolderSki
inputFolder = os.path.join(rootFolder,'input')
ablationFolder = os.path.join(inputFolder, 'Ablation')
accumulationFolder = os.path.join(inputFolder, 'Accumulation')
climate_ref_Folder = os.path.join(inputFolder, 'Climate_ref')
climate_Ref_Folder_org = os.path.join(inputFolder, 'Climate_ref_no_randomness_0')
climate_ref_Folder_rand_1 = os.path.join(inputFolder, 'Climate_ref_randomness_1')
climate_ref_Folder_rand_2 = os.path.join(inputFolder, 'Climate_ref_randomness_2')
'''Step 2: Reading all files names inside the Ablation, Accumulation, and Climate folders'''
ablationFiles = []
for filename in os.walk(ablationFolder):
ablationFiles = filename[2]
accumulationFiles = list()
for filename in os.walk(accumulationFolder):
accumulationFiles = filename[2]
climate_ref_Files = list()
for filename in os.walk(climate_ref_Folder):
climate_ref_Files = filename[2]
'''Step 3: Reading files inside ablation folder '''
os.chdir(ablationFolder)
with open(ablationFiles[0], 'r') as file:
FM1 = file.read()
with open(ablationFiles[1], 'r') as file:
Ipot1 = file.read()
with open(ablationFiles[2], 'r') as file:
Rsnow1 = file.read()
'''Step 4: Reading the lines of files inside ablation folder'''
FM1 = FM1.replace('\n', '\t')
FM1 = FM1.split('\t')
Ipot1 = Ipot1.replace('\n', '\t').split('\t')
Rsnow1 = Rsnow1.replace('\n', '\t').split('\t')
'''Step 5: Reading the lines of files inside accumulation folder'''
os.chdir(accumulationFolder)
with open(accumulationFiles[0], 'r') as file:
cPrec = file.read()
with open(accumulationFiles[1], 'r') as file:
dSnow1 = file.read()
cPrec = cPrec.replace('\n', '\t')
cPrec = cPrec.split('\t')
dSnow1 = dSnow1.replace('\n', '\t').split('\t')
'''Step 6: Reading the lines of files inside climate folder'''
os.chdir(climate_ref_Folder)
with open('pcp.txt', 'r') as file:
pcpData = file.read()
with open('tmp.txt', 'r') as file:
tmpData = file.read()
pcpData = pcpData.split('\n')
for i in range(len(pcpData)):
pcpData[i] = pcpData[i].split(',')
'''Step 7: Initialazing the input dictionary of climate stations which holds the information of accumulation
and ablation, and etc of the stations'''
nameStn = []
for file in climate_ref_Files:
if 'p.csv' in file:
#nameStn.append('n_' + file[-25: -5])
nameStn.append(file[-25: -5])
stnDicts = []
for i in range(len(nameStn)):
stnDicts.append(create_dic(nameStn[i]))
'''Step 8: Assigning the file names to the dictionary'''
for i in range (len(nameStn)):
stnDicts[i]['fileName'] = nameStn[i]
'''Step 9: Assigning the accumulation and ablation values'''
for stnDict in stnDicts:
for i, element in enumerate(FM1):
if element == stnDict['fileName'][:]:
#if element == stnDict['fileName'][2:]:
stnDict['fM'] = FM1[i+1]
for i, element in enumerate(Ipot1):
if element == stnDict['fileName'][:]:
#if element == stnDict['fileName'][2:]:
stnDict['iPot'] = Ipot1[i+1]
for i, element in enumerate(Rsnow1):
if element == stnDict['fileName'][:]:
#if element == stnDict['fileName'][2:]:
stnDict['rSnow'] = Rsnow1[i+1]
for i, element in enumerate(dSnow1):
if element == stnDict['fileName'][:]:
#if element == stnDict['fileName'][2:]:
stnDict['dSnow'] = dSnow1[i+1]
for i, element in enumerate(cPrec):
stnDict['cPrec'] = cPrec[1]
stnDict['dP'] = cPrec[3]
'''Step 10: Assigning the elevation, Lat and long to the dictionaries'''
for i in range(len(stnDicts)):
for j in range(1, len(pcpData)):
#if pcpData[j][1][2:-1] == stnDicts[i]['fileName'][2:]:
if pcpData[j][1][:-1] == stnDicts[i]['fileName'][:]:
stnDicts[i]['lat']= pcpData[j][2]
stnDicts[i]['long']= pcpData[j][3]
stnDicts[i]['elev']= pcpData[j][4]
return stnDicts, inputFolder, ablationFolder, accumulationFolder, climate_ref_Folder, climate_Ref_Folder_org, \
climate_ref_Folder_rand_1, climate_ref_Folder_rand_2
# Step 3 Snow Model
## S3.1 Initializiing the main dictionary for a case study
caseStudyStns = {}
inputFolder = ''
ablationFolder = ''
accumulationFolder = ''
climateFolder = ''
climateFolder_org = ''
climateFolder1 = ''
climateFolder2 = ''
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case1_sattel-hochstuckli'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case2_Atzmaening'
root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case3_hoch-ybrig\setup1'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case4_villars-diablerets_elevations_b1339'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case4_villars-diablerets_elevations_b1822'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case4_villars-diablerets_elevations_b2000'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case4_villars-diablerets_elevations_b2500'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case5_champex'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case6_davos_elevations_b1564'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case6_davos_elevations_b2141'
#root = r'C:\Saeid\Prj100\SA_2\snowModelUZH\case6_davos_elevations_b2584'
## calling the function with multiple return values
caseStudyStns, inputFolder, ablationFolder, accumulationFolder, climateFolder, climateFolder_org, \
climateFolder1, climateFolder2 = initialize_input_dict(root)
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
## 1st column as index: makaing date from 01 01 1981 to 2099 12 31
from datetime import timedelta, date
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date ).days + 1)):
yield start_date + timedelta(n)
### OR Let's make this function in a more OOP way:
class Policy_Ski:
def __init__(self, x1SnowThershold):
self.x1SnowThershold = x1SnowThershold
def policy_release2(self):
return(self.x1SnowThershold)
def policy_release3(self):
''' this function should make a matrix of evaluation fot the condition of 100 day ay minimum condition'''
pass
class Economic_Model_Ski:
def __init__(self, xCostDay, xRevenueDay):
self.costDayFixed = xCostDay
self.revenueDayFixed = xRevenueDay
def economic_costDay(self):
return(self.costDayFixed)
def economic_revenueDay(self):
return(self.revenueDayFixed)
class RCP_Model:
def __init__(self, xRCP, xClimateModel):
self.input1 = round(xRCP)
#self.input1 = xRCP
self.input2 = xClimateModel
def rcpGenerator(self):
if self.input1 == 1:
RCP = str(2.6)
rcpInt = 1
if self.input1 == 2:
RCP = str(4.5)
rcpInt = 2
if self.input1 == 3:
RCP = str(8.5)
rcpInt = 3
return(RCP, rcpInt)
def climateModel(self):
a, b = RCP_Model.rcpGenerator(self)
if b == 1:
climateModel = round(self.input2*11)
elif b == 2:
climateModel = 11 + max(1,round(self.input2*25))
else:
climateModel = 36 + max(1, round(self.input2*31))
return (int(climateModel))
def tipping_points_freq(df, xGoodDays):
"""
This function, calculates the frequency of tipping points for each individual resort
"""
dfColumns= df.columns
scenarios_length= len(dfColumns)
simulations_Length = len(df[dfColumns[1]])
tipping_freq = np.zeros(scenarios_length)
for i in range (1, scenarios_length, 1):
m = 0
for j in range (1 , simulations_Length, 1):
if float(df[dfColumns[i]].iloc[j]) < xGoodDays:
m += 1
if m == 3:
tipping_freq[i] += 1
m = 0
else:
m = 0
continue
#break
return tipping_freq
# XLR Framework
def snow_Model (xRCP=None, xClimateModel=None, Xfactor1 = None, X2fM = None, X3iPot = None, X4rSnow = None,
X5temp = None, X6tempArt = None, xCostDay = None, xRevenueDay = None, x1SnowThershold = None,
xGoodDays = None):
'''' This function controls the Ski resort model in an XLR framework'''
''' VERY IMPORTANT --- Controling the randomness --- VERY IMPORTANT'''
xClimateRandomness = round(Xfactor1)
if (xClimateRandomness == 1):
os.chdir(climateFolder_org)
src = os.getcwd()
os.chdir(climateFolder)
dst = os.getcwd()
#copytree(src, dst)
print('Original CH2018 is being used')
elif (xClimateRandomness == 2) :
os.chdir(climateFolder1)
src = os.getcwd()
os.chdir(climateFolder)
dst = os.getcwd()
#copytree(src, dst)
print('Random Climate realization version 1 is being used')
else:
os.chdir(climateFolder2)
src = os.getcwd()
os.chdir(climateFolder)
dst = os.getcwd()
#copytree(src, dst)
print('Random Climate realization version 2 is being used')
os.chdir(climateFolder)
fnames = os.listdir()
#randomness_pcp_tmp(fnames, Xfactor1)
print('Snow_Model: Matching the station names values with CSV files!')
'''Matching the station names values in the dictionary of stations with CSV files in Climate folder of the case Study'''
pcpCaseStudy = []
tmpCaseStudy = []
if (xClimateRandomness == 1):
for i in range(len(caseStudyStns)):
pcpCaseStudy.append(os.path.join(climateFolder, caseStudyStns[i]['fileName'] + 'p.csv'))
tmpCaseStudy.append(os.path.join(climateFolder, caseStudyStns[i]['fileName'] + 't.csv'))
elif (xClimateRandomness == 2) :
for i in range(len(caseStudyStns)):
pcpCaseStudy.append(os.path.join(climateFolder1, caseStudyStns[i]['fileName'] + 'p.csv'))
tmpCaseStudy.append(os.path.join(climateFolder1, caseStudyStns[i]['fileName'] + 't.csv'))
else:
for i in range(len(caseStudyStns)):
pcpCaseStudy.append(os.path.join(climateFolder2, caseStudyStns[i]['fileName'] + 'p.csv'))
tmpCaseStudy.append(os.path.join(climateFolder2, caseStudyStns[i]['fileName'] + 't.csv'))
print('Snow_Model: Building a database for each csv file (tmp and pcp)!')
'''Step 6: building a database for each precipitation and temperature file in Climate folder and saving them in a list'''
'''6.1 reading the csv files as databases'''
dfpcp = [None for _ in range(len(pcpCaseStudy))]
dftmp = [None for _ in range(len(tmpCaseStudy))]
for i in range(len(pcpCaseStudy)):
dfpcp[i] = pd.read_csv(pcpCaseStudy[i])
dftmp[i] = pd.read_csv(tmpCaseStudy[i])
'''6.2 making a header for output files'''
dfpcpCol = dfpcp[0].columns
dftmpCol = dftmp[0].columns
'''6.3 defining the length of simulations and scenarios'''
scenariosLength = len(dfpcpCol)
simulationLength = len(dftmp[0][dftmpCol[0]]) - 1
'''Reading the beginning and end of the simulation'''
start_date = date(1981, 1, 1)
end_date = date(2099, 12, 31)
dateList = []
for single_date in daterange(start_date, end_date):
dateList.append(single_date.strftime("%m/%d/%Y"))
seasonList = []
for n in range (1981, 2100, 1):
seasonList.append(str(n))
print('Snow_Model: Part 1 Running the model, daily output!')
'''################################ PART1 ################################'''
'''Running the model for each climate station:'''
for k in range(len(caseStudyStns)):
'''making a header for output files'''
dfpcpCol = dfpcp[k].columns
dftmpCol = dftmp[k].columns
#X2fM = caseStudyStns[k].get("fM") # change 0 to i for all stations
#X3iPot = caseStudyStns[k].get("iPot")
#X4rSnow = caseStudyStns[k].get("rSnow")
'''defining the length | |
# -*- coding: utf-8 -*-
"""MixNet - S / M / L, MicroNet.
* Note: SHRINKING IS NOT SUPPORTED!
- Author: Curt-Park
- Email: <EMAIL>
- Paper: https://arxiv.org/abs/1907.09595
- Differences from the original model:
Every Mixblock has a skip connection
Swish function replaced with HSwish
Mixblock doesn't use group conv operation
Squeeze-and-Excitation is located behind projection
- Reference:
https://github.com/leaderj1001/Mixed-Depthwise-Convolutional-Kernels
https://github.com/Kthyeon/micronet_neurips_challenge
"""
from typing import Any, Dict, List, Tuple
import torch
import torch.nn as nn
from src.models.common_activations import HSwish
from src.models.common_layers import (
ConvBN,
ConvBNReLU,
Identity,
MDConvBlock,
SqueezeExcitation,
)
def round_filters(
n_filters: int, multiplier: float = 1.0, divisor: int = 8, min_depth: int = None
) -> int:
"""Get the number of channels."""
multiplier = multiplier
divisor = divisor
min_depth = min_depth
if not multiplier:
return n_filters
n_filters = int(n_filters * multiplier)
min_depth = min_depth or divisor
n_filters_new = max(min_depth, int(n_filters + divisor / 2) // divisor * divisor)
if n_filters_new < 0.9 * n_filters:
n_filters_new += divisor
return n_filters_new
class MixBlock(nn.Module):
"""MixBlock: Using different kernel sizes for each channel chunk."""
def __init__(
self,
in_channels: int,
out_channels: int,
n_chunks: int,
stride: int,
expand_ratio: float,
se_ratio: float,
hswish: bool,
) -> None:
"""Initialize."""
super(MixBlock, self).__init__()
self.in_channels = in_channels
self.exp_channels = int(in_channels * expand_ratio)
self.out_channels = out_channels
self.n_chunks = n_chunks
self.stride = stride
self.expand_ratio = expand_ratio
self.has_se = se_ratio is not None
self.se_ratio = se_ratio
self.hswish = hswish
self.expand_conv = Identity()
if self.in_channels != self.exp_channels:
self.expand_conv = (
nn.Sequential(
ConvBN(self.in_channels, self.exp_channels, kernel_size=1),
HSwish(inplace=True),
)
if self.hswish
else ConvBNReLU(self.in_channels, self.exp_channels, kernel_size=1)
)
self.mdconv = nn.Sequential(
MDConvBlock(
self.exp_channels,
n_chunks=self.n_chunks,
stride=self.stride,
with_relu=not self.hswish,
),
Identity() if not self.hswish else HSwish(inplace=True),
)
self.proj_conv = ConvBN(self.exp_channels, self.out_channels, kernel_size=1)
self.se = (
SqueezeExcitation(self.out_channels, self.se_ratio)
if self.has_se
else Identity()
)
self.downsample = (
ConvBN(
self.in_channels, self.out_channels, kernel_size=1, stride=self.stride
)
if self.stride != 1 or self.in_channels != self.out_channels
else Identity()
)
def _add(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""Sum two tensors (elementwise)."""
return x + y
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward."""
out = self.expand_conv(x)
out = self.mdconv(out)
out = self.proj_conv(out)
out = self.se(out)
out = self._add(out, self.downsample(x))
return out
class MixNet(nn.Module):
"""MixNet architecture."""
def __init__(
self,
stem: int,
stem_stride: int,
head: int,
last_out_channels: int,
block_args: Tuple[List[Any], ...],
dropout: float = 0.2,
num_classes: int = 1000,
Block: "type" = MixBlock,
) -> None:
"""Initialize."""
super(MixNet, self).__init__()
self.block_args = block_args
self.stem = nn.Sequential(
ConvBN(
in_channels=3,
out_channels=stem,
kernel_size=3,
stride=stem_stride,
),
HSwish(inplace=True),
)
layers = []
for (
in_channels,
out_channels,
n_chunks,
stride,
expand_ratio,
se_ratio,
hswish,
) in block_args:
layers.append(
Block(
in_channels=in_channels,
out_channels=out_channels,
n_chunks=n_chunks,
stride=stride,
expand_ratio=expand_ratio,
se_ratio=se_ratio,
hswish=hswish,
)
)
self.layers = nn.Sequential(*layers)
if head:
self.head = nn.Sequential(
ConvBN(
in_channels=last_out_channels,
out_channels=head,
kernel_size=1,
),
HSwish(inplace=True),
)
else:
self.head = Identity()
head = last_out_channels
self.adapt_avg_pool2d = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(p=dropout)
self.fc = nn.Linear(head, num_classes)
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
"""Actual forward procedure."""
out = self.stem(x)
out = self.layers(out)
out = self.head(out)
out = self.adapt_avg_pool2d(out)
out = torch.flatten(out, 1)
out = self.fc(out)
return out
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward."""
return self._forward_impl(x)
def get_model_kwargs(model_type: str, num_classes: int, dataset: str) -> Dict[str, Any]:
"""Return the model kwargs according to the momdel type."""
if model_type == "MICRONET":
kwargs = micronet(num_classes=num_classes, dataset=dataset)
elif model_type == "S":
kwargs = mixnet_s(num_classes=num_classes, dataset=dataset)
elif model_type == "M":
kwargs = mixnet_m(num_classes=num_classes, dataset=dataset)
elif model_type == "L":
kwargs = mixnet_l(num_classes=num_classes, dataset=dataset)
else:
raise NotImplementedError
return kwargs
def get_model(model_type: str, num_classes: int, dataset: str) -> nn.Module:
"""Constructs a MixNet model."""
kwargs = get_model_kwargs(model_type, num_classes, dataset)
return MixNet(**kwargs)
def micronet(
num_classes: int = 100,
multiplier: float = 1.0,
divisor: int = 8,
min_depth: int = None,
dataset: str = "IMAGENET",
) -> Dict[str, Any]:
"""Build MixNet-SS."""
if dataset == "CIFAR100":
# in_channels, out_channels, n_chunks, stride, expand_ratio, se_ratio, hswish
small = (
[32, 16, 1, 1, 3, None, False],
[16, 16, 1, 1, 3, None, False],
[16, 32, 1, 2, 3, None, False],
[32, 32, 1, 1, 3, 0.25, True],
[32, 48, 1, 1, 3, 0.25, True],
[48, 48, 1, 1, 3, 0.25, True],
[48, 48, 1, 1, 3, 0.25, True],
[48, 72, 1, 2, 3, 0.25, True],
[72, 72, 1, 1, 3, 0.25, True],
[72, 72, 1, 1, 3, 0.25, True],
[72, 72, 1, 1, 3, 0.25, True],
[72, 72, 1, 1, 3, 0.25, True],
[72, 80, 1, 2, 3, 0.25, True],
[80, 88, 1, 1, 3, 0.25, True],
[88, 88, 1, 1, 3, 0.25, True],
[88, 106, 1, 1, 3, 0.25, True],
)
stem = 32
stem_stride = 1
last_out_channels = 106
dropout = 0.3
else:
raise NotImplementedError
return dict(
stem=stem,
stem_stride=stem_stride,
head=0, # head not used
last_out_channels=last_out_channels,
block_args=small,
num_classes=num_classes,
dropout=dropout,
)
def mixnet_s(
num_classes: int = 100,
multiplier: float = 1.0,
divisor: int = 8,
min_depth: int = None,
dataset: str = "IMAGENET",
) -> Dict[str, Any]:
"""Build MixNet-S."""
if dataset == "IMAGENET":
# in_channels, out_channels, n_chunks, stride, expand_ratio, se_ratio, hswish
small = (
[16, 16, 1, 1, 1, None, False],
[16, 24, 1, 2, 6, None, False],
[24, 24, 1, 1, 3, None, False],
[24, 40, 3, 2, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 80, 3, 2, 6, 0.25, True],
[80, 80, 2, 1, 6, 0.25, True],
[80, 80, 2, 1, 6, 0.25, True],
[80, 120, 3, 1, 6, 0.5, True],
[120, 120, 4, 1, 3, 0.5, True],
[120, 120, 4, 2, 3, 0.5, True],
[120, 200, 5, 1, 6, 0.5, True],
[200, 200, 4, 1, 6, 0.5, True],
[200, 200, 4, 1, 6, 0.5, True],
)
stem = round_filters(16, multiplier)
stem_stride = 2
last_out_channels = round_filters(200, multiplier)
head = round_filters(1536, multiplier)
elif dataset == "CIFAR100":
small = (
[16, 16, 1, 1, 1, None, False],
[16, 24, 1, 1, 6, None, False],
[24, 24, 1, 1, 3, None, False],
[24, 40, 3, 2, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 80, 3, 2, 6, 0.25, True],
[80, 80, 2, 1, 6, 0.25, True],
[80, 80, 2, 1, 6, 0.25, True],
[80, 120, 3, 1, 6, 0.5, True],
[120, 120, 4, 1, 3, 0.5, True],
[120, 120, 4, 2, 3, 0.5, True],
[120, 200, 5, 1, 6, 0.5, True],
[200, 200, 4, 1, 6, 0.5, True],
[200, 200, 4, 1, 6, 0.5, True],
)
stem = round_filters(16, multiplier)
stem_stride = 1
last_out_channels = round_filters(200, multiplier)
head = round_filters(1536, multiplier)
else:
raise NotImplementedError
return dict(
stem=stem,
stem_stride=stem_stride,
head=head,
last_out_channels=last_out_channels,
block_args=small,
num_classes=num_classes,
)
def mixnet_m(
num_classes: int = 1000,
multiplier: float = 1.0,
divisor: int = 8,
min_depth: int = None,
dataset: str = "IMAGENET",
) -> Dict[str, Any]:
"""Build MixNet-M."""
if dataset == "IMAGENET":
medium: Tuple[List[Any], ...] = (
[24, 24, 1, 1, 1, None, False],
[24, 32, 3, 2, 6, None, False],
[32, 32, 1, 1, 3, None, False],
[32, 40, 4, 2, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 80, 3, 2, 6, 0.25, True],
[80, 80, 4, 1, 6, 0.25, True],
[80, 80, 4, 1, 6, 0.25, True],
[80, 80, 4, 1, 6, 0.25, True],
[80, 120, 1, 1, 6, 0.5, True],
[120, 120, 4, 1, 3, 0.5, True],
[120, 120, 4, 1, 3, 0.5, True],
[120, 120, 4, 1, 3, 0.5, True],
[120, 200, 4, 2, 6, 0.5, True],
[200, 200, 4, 1, 6, 0.5, True],
[200, 200, 4, 1, 6, 0.5, True],
[200, 200, 4, 1, 6, 0.5, True],
)
stem = round_filters(24, multiplier)
stem_stride = 2
last_out_channels = round_filters(200, multiplier)
head = round_filters(1536, multiplier=1.0)
elif dataset == "CIFAR100":
medium = (
[24, 24, 1, 1, 1, None, False],
[24, 32, 3, 1, 6, None, False],
[32, 32, 1, 1, 3, None, False],
[32, 40, 4, 2, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 40, 2, | |
import argparse
import collections
import copy
import datetime
import enum
import json
import logging
import os.path
import re
import sys
import traceback
import typing
import uuid
import yaml
import dateparser
import git
import GPUtil
import d3m
from d3m import container, environment_variables, exceptions, utils, types
from d3m.metadata import base as metadata_base, hyperparams as hyperparams_module, pipeline as pipeline_module, problem
from d3m.primitive_interfaces import base
__all__ = ('PipelineRun', 'User', 'RuntimeEnvironment')
logger = logging.getLogger(__name__)
DOCKER_MAC_ADDRESS_MASK = 0x0242ac110000
PROC_INFO_RE = re.compile(r'^([^:]+?)\s*:\s*(.*)$')
PROC_MEMORY_PATH = '/proc/meminfo'
PROC_CPU_PATH = '/proc/cpuinfo'
PROC_CPU_MODEL_NAME_KEY = 'model name'
PROC_CPU_PHYSICAL_ID_KEY = 'physical id'
PROC_CPU_CORES_KEY = 'cpu cores'
PROC_TOTAL_MEMORY_KEY = 'MemTotal'
CGROUP_MEMORY_LIMIT_PATH = '/sys/fs/cgroup/memory/memory.limit_in_bytes'
CGROUP_CPU_SHARES_PATH = '/sys/fs/cgroup/cpu/cpu.shares'
CGROUP_CPU_CFS_PERIOD_US_PATH = '/sys/fs/cgroup/cpu/cpu.cfs_period_us'
CGROUP_CPU_CFS_QUOTA_US_PATH = '/sys/fs/cgroup/cpu/cpu.cfs_quota_us'
WORKER_ID_NAMESPACE = uuid.UUID('2e4b9ab7-2207-4975-892b-0e01bf95babf')
# Comma because we unpack the list of validators returned from "load_schema_validators".
PIPELINE_RUN_SCHEMA_VALIDATOR, = utils.load_schema_validators(metadata_base.SCHEMAS, ('pipeline_run.json',))
PIPELINE_RUN_SCHEMA_VERSION = 'https://metadata.datadrivendiscovery.org/schemas/v0/pipeline_run.json'
class User(dict):
def __init__(self, id_: str, chosen: bool = False, rationale: str = None) -> None:
super().__init__()
self['id'] = id_
self['chosen'] = chosen
if rationale is not None:
self['rationale'] = rationale
@classmethod
def _yaml_representer(cls, dumper: yaml.Dumper, data: typing.Any) -> typing.Any:
return dumper.represent_dict(data)
utils.yaml_add_representer(User, User._yaml_representer)
class PipelineRunStep:
def __init__(
self, step_type: metadata_base.PipelineStepType, start: str, environment: typing.Dict[str, typing.Any] = None
) -> None:
self.type = step_type
self.status: typing.Dict[str, typing.Any] = {}
self.start: str = start
self.end: typing.Optional[str] = None
self.environment = environment
def to_json_structure(self) -> typing.Dict:
if self.start is None:
raise exceptions.InvalidStateError("Start timestamp not set.")
if self.end is None:
raise exceptions.InvalidStateError("End timestamp not set.")
if 'state' not in self.status:
raise exceptions.InvalidStateError("Status not set.")
json_structure = {
'type': self.type.name,
'status': self.status,
'start': self.start,
'end': self.end
}
if self.environment is not None:
json_structure['environment'] = self.environment
return json_structure
def set_successful(self, message: str = None) -> None:
self.status['state'] = metadata_base.PipelineRunStatusState.SUCCESS.name
if message is not None and message:
self.status['message'] = message
def set_failed(self, message: str = None) -> None:
self.status['state'] = metadata_base.PipelineRunStatusState.FAILURE.name
if message is not None and message:
self.status['message'] = message
def set_end_timestamp(self) -> None:
self.end = utils.datetime_for_json(utils.now())
class PipelineRunPrimitiveStep(PipelineRunStep):
def __init__(
self, step: pipeline_module.PrimitiveStep, start: str, environment: typing.Dict[str, typing.Any] = None,
) -> None:
super().__init__(
step_type=metadata_base.PipelineStepType.PRIMITIVE,
start=start,
environment=environment
)
self.hyperparams: typing.Optional[hyperparams_module.Hyperparams] = None
self.pipeline_hyperparams: typing.Optional[typing.Set[str]] = None
self.random_seed: typing.Optional[int] = None
self.method_calls: typing.List[typing.Dict[str, typing.Any]] = []
self.arguments = step.arguments
def to_json_structure(self) -> typing.Dict:
json_structure = super().to_json_structure()
# Validate that the Method calls are finished, and they have status.
for method_call in self.method_calls:
if 'end' not in method_call:
raise exceptions.InvalidStateError("End timestamp not set.")
if 'status' not in method_call:
raise exceptions.InvalidStateError("Status not set.")
if self.method_calls:
json_structure['method_calls'] = self.method_calls
if self.random_seed is not None:
json_structure['random_seed'] = self.random_seed
hyperparams_json_structure = self._hyperparams_to_json_structure()
if hyperparams_json_structure is not None:
json_structure['hyperparams'] = hyperparams_json_structure
return json_structure
def _hyperparams_to_json_structure(self) -> typing.Optional[typing.Dict]:
if self.hyperparams is None or self.pipeline_hyperparams is None:
return None
hyperparams_json = {}
for hyperparameter_name, value in self.hyperparams.items():
if hyperparameter_name in self.pipeline_hyperparams:
continue
hyperparams_json[hyperparameter_name] = {
'type': metadata_base.ArgumentType.VALUE.name,
'data': self.hyperparams.configuration[hyperparameter_name].value_to_json_structure(value),
}
if hyperparams_json:
return hyperparams_json
else:
return None
def add_method_call(
self, method_name: str, *, runtime_arguments: typing.Dict = None,
environment: typing.Dict[str, typing.Any] = None
) -> int:
"""
Returns
-------
The id of the method call.
"""
if runtime_arguments is None:
runtime_arguments = {}
else:
# We convert everything directly to json structure.
def recurse(item: typing.Any) -> typing.Any:
if isinstance(item, enum.Enum):
return item.name
elif not isinstance(item, typing.Dict):
return item
else:
_json_structure = {}
for key, value in item.items():
_json_structure[key] = recurse(value)
return _json_structure
runtime_arguments = recurse(runtime_arguments)
if method_name == '__init__' and runtime_arguments:
raise exceptions.InvalidArgumentValueError(
f'MethodCall with method `__init__` cannot have arguments. '
f'Hyper-parameters are the arguments to `__init__`.'
)
method_call: typing.Dict[str, typing.Any] = {
'name': method_name,
}
if runtime_arguments:
method_call['arguments'] = runtime_arguments
# we store everything as json structure.
if environment is not None:
method_call['environment'] = environment
self.method_calls.append(method_call)
return len(self.method_calls) - 1
def set_method_call_start_timestamp(self, method_call_id: int) -> None:
self.method_calls[method_call_id]['start'] = utils.datetime_for_json(utils.now())
def set_method_call_end_timestamp(self, method_call_id: int) -> None:
if 'start' not in self.method_calls[method_call_id]:
raise exceptions.InvalidStateError("Start timestamp not set.")
self.method_calls[method_call_id]['end'] = utils.datetime_for_json(utils.now())
def set_method_call_result_metadata(self, method_call_id: int, result: typing.Union[base.CallResult, base.MultiCallResult]) -> None:
metadata = None
if isinstance(result, base.CallResult):
if result.value is not None and isinstance(result.value, types.Container):
metadata = {
# TODO: Should we use "to_internal_json_structure" here?
'value': result.value.metadata.to_json_structure()
}
elif isinstance(result, base.MultiCallResult):
metadata = {
# TODO: Should we use "to_internal_json_structure" here?
produce_method_name: value.metadata.to_json_structure()
for produce_method_name, value in result.values.items()
if value is not None and isinstance(value, types.Container)
}
# check if metadata is empty
if metadata is not None:
for key, value in metadata.items():
if value is not None:
self.method_calls[method_call_id]['metadata'] = metadata
break
def set_method_call_successful(self, method_call_id: int, message: str = None) -> None:
self.method_calls[method_call_id]['status'] = {
'state': metadata_base.PipelineRunStatusState.SUCCESS.name,
}
if message is not None and message:
self.method_calls[method_call_id]['status']['message'] = message
def set_method_call_failed(self, method_call_id: int, message: str = None) -> None:
self.method_calls[method_call_id]['status'] = {
'state': metadata_base.PipelineRunStatusState.FAILURE.name,
}
if message is not None and message:
self.method_calls[method_call_id]['status']['message'] = message
def get_method_call_logging_callback(self, method_call_id: int) -> typing.Callable:
if 'logging' not in self.method_calls[method_call_id]:
self.method_calls[method_call_id]['logging'] = []
return self.method_calls[method_call_id]['logging'].append
class PipelineRunSubpipelineStep(PipelineRunStep):
def __init__(self, start: str, random_seed: int, environment: typing.Dict[str, typing.Any] = None) -> None:
super().__init__(
step_type=metadata_base.PipelineStepType.SUBPIPELINE,
start=start,
environment=environment,
)
self.random_seed = random_seed
self.steps: typing.List[typing.Dict] = []
def to_json_structure(self) -> typing.Dict:
json_structure = super().to_json_structure()
json_structure['random_seed'] = self.random_seed
if self.steps:
json_structure['steps'] = self.steps
return json_structure
def add_step(self, step: typing.Dict) -> None:
self.steps.append(step)
class PipelineRun:
STEPS = 'steps'
METHOD_CALLS = 'method_calls'
def __init__(
self, pipeline: pipeline_module.Pipeline, problem_description: problem.Problem = None, *,
phase: metadata_base.PipelineRunPhase, context: metadata_base.Context,
environment: typing.Dict[str, typing.Any], random_seed: int, previous_pipeline_run: 'PipelineRun' = None,
is_standard_pipeline: bool = False, users: typing.Sequence[User] = None,
) -> None:
self.schema = PIPELINE_RUN_SCHEMA_VERSION
self.pipeline = {
'id': pipeline.id,
'digest': pipeline.get_digest(),
}
self.datasets: typing.List[typing.Dict[str, typing.Any]] = []
self.problem: typing.Optional[typing.Dict[str, typing.Any]] = None
if problem_description is not None:
self._set_problem(problem_description)
self.steps: typing.List[PipelineRunStep] = []
self.status: typing.Dict[str, typing.Any] = {}
self.start: typing.Optional[str] = None
self.end: typing.Optional[str] = None
self.run: typing.Dict[str, typing.Any] = {
'phase': phase.name,
'is_standard_pipeline': is_standard_pipeline,
}
self.context = context
self.previous_pipeline_run_id = previous_pipeline_run.get_id() if previous_pipeline_run is not None else None
if users is None:
self.users: typing.List[User] = []
else:
self.users = list(users)
self.environment = environment
self.random_seed = random_seed
self.is_standard_pipeline = is_standard_pipeline
self._components: typing.Dict[str, typing.Any] = {}
self._step_start_timestamps: typing.Dict[int, str] = {}
def _to_json_structure(self) -> typing.Dict:
if self.start is None:
raise exceptions.InvalidStateError("Start timestamp not set.")
if self.end is None:
raise exceptions.InvalidStateError("End timestamp not set.")
if 'state' not in self.status:
raise exceptions.InvalidStateError("Status not set.")
# Scoring datasets are set only when scoring is used without data preparation.
if 'scoring' in self.run:
if 'data_preparation' in self.run:
if 'datasets' in self.run['scoring']:
raise exceptions.InvalidStateError(
"Scoring datasets must not be provided when scoring is used with data preparation pipeline.",
)
elif 'datasets' not in self.run['scoring']:
raise exceptions.InvalidStateError(
"Scoring datasets must be provided when scoring is used without data preparation pipeline.",
)
json_structure = {
'schema': self.schema,
'pipeline': self.pipeline,
'datasets': self.datasets,
'status': self.status,
'start': self.start,
'end': self.end,
'run': self.run,
'environment': self.environment,
'random_seed': self.random_seed,
}
if self.steps:
json_structure['steps'] = [step.to_json_structure() for step in self.steps]
if self.previous_pipeline_run_id is not None:
json_structure['previous_pipeline_run'] = {
'id': self.previous_pipeline_run_id
}
if self.context is not None:
json_structure['context'] = self.context.name
if self.problem is not None:
json_structure['problem'] = self.problem
if self.users:
json_structure['users'] = self.users
json_structure['id'] = utils.compute_hash_id(json_structure)
return json_structure
def to_json_structure(self) -> typing.Dict:
# We raise exception here instead of waiting for schema validation to fails to provide a more helpful error message.
# See: https://gitlab.com/datadrivendiscovery/d3m/issues/355
if not self.is_standard_pipeline and not self.datasets:
raise exceptions.InvalidStateError("Pipeline run for a non-standard pipeline cannot be converted to a JSON structure.")
# TODO: Remove "utils.to_json_structure" once sure that "_to_json_structure" really returns a JSON structure.
json_structure = utils.to_json_structure(self._to_json_structure())
PIPELINE_RUN_SCHEMA_VALIDATOR.validate(json_structure)
return json_structure
def to_yaml(self, file: typing.IO[typing.Any], *, appending: bool = False, **kwargs: typing.Any) -> typing.Optional[str]:
obj = self.to_json_structure()
if appending and 'explicit_start' not in kwargs:
kwargs['explicit_start'] = True
return utils.yaml_dump(obj, stream=file, **kwargs)
def add_input_dataset(self, dataset: container.Dataset) -> None:
metadata = dataset.metadata.query(())
self.datasets.append({
'id': metadata['id'],
'digest': metadata['digest'],
})
def add_primitive_step(self, step: pipeline_module.PrimitiveStep) -> int:
if not isinstance(step, pipeline_module.PrimitiveStep):
raise exceptions.InvalidArgumentTypeError('step must be of type PrimitiveStep, not {}'.format(type(step)))
self.steps.append(
PipelineRunPrimitiveStep(step, self._step_start_timestamps[len(self.steps)])
)
return len(self.steps) - 1
def _get_primitive_step(self, primitive_step_id: int) -> PipelineRunPrimitiveStep:
if primitive_step_id >= len(self.steps):
raise exceptions.InvalidArgumentValueError('There does not exist a step with id {}'.format(primitive_step_id))
primitive_step = self.steps[primitive_step_id]
if not isinstance(primitive_step, PipelineRunPrimitiveStep):
raise exceptions.InvalidArgumentValueError('Step id {} does not refer to a PipelineRunPrimitiveStep'.format(primitive_step_id))
return primitive_step
def set_primitive_step_hyperparams(
self, primitive_step_id: int,
hyperparams: hyperparams_module.Hyperparams,
pipeline_hyperparams: typing.Dict[str, typing.Dict],
) | |
NO_ERROR = 0
MISTAKE = 1
FIXED_MISTAKE = 2
CHEAT = 3
ACROSS = 0
DOWN = 1
def make_hash(data):
try:
from hashlib import md5
m = md5()
except:
import md5
m = md5.new()
m.update(data)
return m.hexdigest()
class BinaryFile:
def __init__(self, filename=None):
if type(filename) == type(''): f = file(filename, 'rb')
else: f = filename
self.data = list(f.read())
f.close()
self.index = 0
def save(self, filename):
f = file(filename, 'wb+')
f.write(''.join(self.data))
f.close()
def seek(self, pos):
self.index = pos
def length(self):
return len(self.data)
def position(self):
return self.index
def write_char(self, c):
self.data[self.index] = c
self.index += 1
def read_char(self):
c = self.data[self.index]
self.index += 1
return c
def read_byte(self):
return ord(self.read_char())
def read_chars(self, count):
r = ''
for i in range(count):
r += self.read_char()
return r
def read_bytes(self, count):
r = []
for i in range(count):
r.append(self.read_byte())
return r
def read_string(self):
if self.index == len(self.data): return ''
s = ''
c = self.read_char()
while ord(c) is not 0 and self.index < len(self.data):
s += c
c = self.read_char()
return unicode(s, 'cp1252') # This is the Windows character set
def hashcode(self):
return make_hash(''.join(self.data))
class PersistentPuzzle:
def __init__(self):
self.responses = {}
self.errors = {}
self.clock = 0
self.clock_running = False
def get_size(self, m):
width = 0
height = 0
for (x, y) in m.keys():
if x > width: width = x
if y > height: height = y
width += 1
height += 1
return (width, height)
def to_binary(self):
(width, height) = self.get_size(self.responses)
bin1 = [' ']*width*height
bin2 = [' ']*width*height
for ((x, y), r) in self.responses.items():
index = y * width + x
bin1[index] = self.responses[x, y]
if bin1[index] == '': bin1[index] = chr(0)
for ((x, y), r) in self.errors.items():
index = y * width + x
bin2[index] = chr(self.errors[x, y])
bin = ''.join(bin1 + bin2)
data = (width, height, int(self.clock), bin, int(self.clock_running))
return '%d %d %d %s %d' % data
def get_int(self, s, pos):
pos0 = pos
while pos < len(s) and s[pos].isdigit(): pos += 1
return (int(s[pos0:pos]), pos)
def from_binary(self, bin):
pos = 0
(width, pos) = self.get_int(bin, pos)
pos += 1
(height, pos) = self.get_int(bin, pos)
pos += 1
(self.clock, pos) = self.get_int(bin, pos)
pos += 1
count = width*height
bin1 = bin[pos:pos+count]
pos += count
bin2 = bin[pos:pos+count]
try:
pos += count + 1 # skip the space
(self.clock_running, pos) = self.get_int(bin, pos)
except ValueError:
self.clock_running = False
self.responses = {}
self.errors = {}
i = 0
for y in range(height):
for x in range(width):
if bin1[i] == chr(0): self.responses[x, y] = ''
else: self.responses[x, y] = bin1[i]
self.errors[x, y] = ord(bin2[i])
i += 1
class Puzzle:
def __init__(self, filename):
self.load_file(filename)
def load_file(self, filename):
f = BinaryFile(filename)
self.f = f
f.seek(0x2c)
self.width = f.read_byte()
self.height = f.read_byte()
f.seek(0x32)
self.locked = (f.read_byte() == 0x04)
f.seek(0x34)
self.answers = {}
self.errors = {}
for y in range(self.height):
for x in range(self.width):
self.answers[x, y] = f.read_char()
self.errors[x, y] = NO_ERROR
self.responses = {}
for y in range(self.height):
for x in range(self.width):
c = f.read_char()
if c == '-': c = ''
self.responses[x, y] = c
def massage(s):
return s
self.title = massage(f.read_string())
self.author = massage(f.read_string())
self.copyright = massage(f.read_string())
self.clues = []
def read_clue():
clue = massage(f.read_string())
self.clues.append(clue)
return clue
self.setup(read_clue)
self.notebook = massage(f.read_string())
while f.position() < f.length():
code = f.read_chars(4)
count = f.read_byte() + 256*f.read_byte()
junk = f.read_bytes(2)
data = f.read_bytes(count)
zero = f.read_byte()
self.process_section(code, data)
def setup(self, read_clue):
self.across_clues = {}
self.down_clues = {}
self.across_map = {}
self.down_map = {}
self.number_map = {}
self.number_rev_map = {}
self.mode_maps = [self.across_map, self.down_map]
self.mode_clues = [self.across_clues, self.down_clues]
self.is_across = {}
self.is_down = {}
self.circles = {}
number = 1
for y in range(self.height):
for x in range(self.width):
# NYTimes: April 30, 2006
is_fresh_x = (self.is_black(x-1, y)
and not self.is_black(x+1, y))
is_fresh_y = (self.is_black(x, y-1)
and not self.is_black(x, y+1))
if not self.is_black(x, y):
if is_fresh_x:
self.across_map[x, y] = number
self.across_clues[number] = read_clue()
else:
if self.across_map.has_key((x-1, y)):
self.across_map[x, y] = self.across_map[x-1, y]
if is_fresh_y:
self.down_map[x, y] = number
self.down_clues[number] = read_clue()
else:
if self.down_map.has_key((x, y-1)):
self.down_map[x, y] = self.down_map[x, y-1]
if is_fresh_x or is_fresh_y:
self.is_across[number] = is_fresh_x
self.is_down[number] = is_fresh_y
self.number_map[number] = (x, y)
self.number_rev_map[x, y] = number
number += 1
#else:
# self.across_map[x, y] = 0
# self.down_map[x, y] = 0
self.max_number = number-1
def process_section(self, code, data):
if code == 'GEXT':
index = 0
for y in range(self.height):
for x in range(self.width):
if data[index] == 0x80:
self.circles[x, y] = True
index += 1
def hashcode(self):
(width, height) = (self.width, self.height)
data = [' ']*width*height
for ((x, y), r) in self.responses.items():
index = y * width + x
if r == '.': data[index] = '1'
else: data[index] = '0'
s1 = ''.join(data)
s2 = ';'.join(self.clues)
return make_hash(s1 + s2)
def save(self, fname):
f = self.f
f.seek(0x34 + self.width * self.height)
for y in range(self.height):
for x in range(self.width):
c = self.responses[x, y]
if c == '': c = '-'
f.write_char(c)
f.save(fname)
def is_locked(self):
return self.locked
def is_black(self, x, y):
return self.responses.get((x, y), '.') == '.'
def is_circled(self, x, y):
return self.circles.has_key((x, y))
def is_empty(self):
for ((x, y), r) in self.responses.items():
if r != '.' and r != '': return False
return True
def is_word_filled(self, x, y, dir):
if not self.is_mode_valid(x, y, dir): return False
n = self.number(x, y, dir)
(x, y) = self.number_map[n]
hit = False
while not hit:
if self.responses[x, y] == '': return False
((x, y), hit) = self.next_cell(x, y, dir, 1, False)
return True
def clue(self, x, y, mode):
assert self.is_mode_valid(x, y, mode)
if mode is ACROSS: return self.across_clues[self.across_map[x, y]]
if mode is DOWN: return self.down_clues[self.down_map[x, y]]
def number(self, x, y, mode):
assert self.is_mode_valid(x, y, mode)
return self.mode_maps[mode][x, y]
def cell_has_number(self, x, y):
return self.number_rev_map.has_key((x, y))
def number_of_cell(self, x, y):
return self.number_rev_map[x, y]
def cell_of_number(self, number):
return self.number_map[number]
def is_mode_valid(self, x, y, mode):
return self.mode_maps[mode].has_key((x, y))
def next_cell(self, x, y, mode, incr, skip_black):
(x0, y0) = (x, y)
while x >= 0 and x < self.width and y >= 0 and y < self.height:
if mode is ACROSS: x += incr
else: y += incr
if not skip_black or not self.is_black(x, y): break
if self.is_black(x, y): return ((x0, y0), True)
else: return ((x, y), False)
def find_blank_cell_recursive(self, x, y, mode, incr):
if self.responses[x, y] == '' or self.errors[x, y] == MISTAKE:
return (x, y)
else:
((x, y), hit) = self.next_cell(x, y, mode, incr, False)
if hit: return None
else: return self.find_blank_cell_recursive(x, y, mode, incr)
def find_blank_cell(self, x, y, mode, incr):
r = self.find_blank_cell_recursive(x, y, mode, incr)
if r == None:
(x1, y1) = self.number_map[self.mode_maps[mode][x, y]]
r = self.find_blank_cell_recursive(x1, y1, mode, incr)
if r == None: return (x, y)
else: return r
else: return r
def is_cell_correct(self, x, y):
return self.responses[x, y] == self.answers[x, y]
def is_puzzle_correct(self):
for x in range(self.width):
for y in range(self.height):
if not self.is_black(x, y) and not self.is_cell_correct(x, y):
return False
return True
def is_puzzle_filled(self):
for x in range(self.width):
for y in range(self.height):
if not self.is_black(x, y) and self.responses[x, y] == '':
return False
return True
def incr_number(self, x, y, mode, incr):
assert self.is_mode_valid(x, y, mode)
n = self.mode_maps[mode][x, y]
while True:
n += incr
if not self.number_map.has_key(n): return 0
if mode == ACROSS and self.is_across[n]: break
if mode == DOWN and self.is_down[n]: break
return n
def initial_number(self, mode):
n = 1
while True:
if mode == ACROSS and self.is_across[n]: break
if mode == DOWN and self.is_down[n]: break
n += 1
return n
def final_number(self, mode):
n = self.max_number
while True:
if mode == ACROSS and self.is_across[n]: break
if mode == DOWN and self.is_down[n]: break
n -= 1
return n
def set_letter(self, x, y, c):
self.responses[x, y] = c
def get_letter(self, x, y):
return self.responses[x, y]
def get_answer(self, x, y):
return self.answers[x, y]
def get_error(self, x, y):
return self.errors[x, y]
def set_error(self, x, y, err):
self.errors[x, y] = err
def is_blank(self, x, y):
return self.responses[x, y] == ''
| |
"""
Contains useful graphic generators. Currently, effect measure plots and functional form assessment plots
are implemented. Uses matplotlib to generate graphics. Future inclusions include forest plots
Contents:
Functional form assessment- func_form_plot()
Forest plot/ effect measure plot- EffectMeasurePlot()
P-value distribution plot- pvalue_plot()
Spaghetti plot- spaghetti_plot()
Receiver-Operator Curve- roc()
Dynamic risk plot- dynamic_risk_plot()
"""
import numpy as np
import pandas as pd
from scipy.stats import norm
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.genmod.families import links
from statsmodels.nonparametric.smoothers_lowess import lowess
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker as mticker
class EffectMeasurePlot:
"""Used to generate effect measure plots. effectmeasure plot accepts four list type objects.
effectmeasure_plot is initialized with the associated names for each line, the point estimate,
the lower confidence limit, and the upper confidence limit.
Plots will resemble the following form:
_____________________________________________ Measure % CI
| |
1 | --------o------- | x n, 2n
| |
2 | ----o---- | w m, 2m
| |
|___________________________________________|
# # # #
The following functions (and their purposes) live within effectmeasure_plot
labels(**kwargs)
Used to change the labels in the plot, as well as the center and scale. Inputs are
keyword arguments
KEYWORDS:
-effectmeasure + changes the effect measure label
-conf_int + changes the confidence interval label
-scale + changes the scale to either log or linear
-center + changes the reference line for the center
colors(**kwargs)
Used to change the color of points and lines. Also can change the shape of points.
Valid colors and shapes for matplotlib are required. Inputs are keyword arguments
KEYWORDS:
-errorbarcolor + changes the error bar colors
-linecolor + changes the color of the reference line
-pointcolor + changes the color of the points
-pointshape + changes the shape of points
plot(t_adjuster=0.01,decimal=3,size=3)
Generates the effect measure plot of the input lists according to the pre-specified
colors, shapes, and labels of the class object
Arguments:
-t_adjuster + used to refine alignment of the table with the line graphs.
When generate plots, trial and error for this value are usually
necessary
-decimal + number of decimal places to display in the table
-size + size of the plot to generate
Example)
>>>lab = ['One','Two'] #generating lists of data to plot
>>>emm = [1.01,1.31]
>>>lcl = ['0.90',1.01]
>>>ucl = [1.11,1.53]
>>>
>>>x = zepid.graphics.effectmeasure_plot(lab,emm,lcl,ucl) #initializing effectmeasure_plot with the above lists
>>>x.labels(effectmeasure='RR') #changing the table label to 'RR'
>>>x.colors(pointcolor='r') #changing the point colors to red
>>>x.plot(t_adjuster=0.13) #generating the effect measure plot
"""
def __init__(self, label, effect_measure, lcl, ucl):
"""Initializes effectmeasure_plot with desired data to plot. All lists should be the same
length. If a blank space is desired in the plot, add an empty character object (' ') to
each list at the desired point.
Inputs:
label
-list of labels to use for y-axis
effect_measure
-list of numbers for point estimates to plot. If point estimate has trailing zeroes,
input as a character object rather than a float
lcl
-list of numbers for upper confidence limits to plot. If point estimate has trailing
zeroes, input as a character object rather than a float
ucl
-list of numbers for upper confidence limits to plot. If point estimate has
trailing zeroes, input as a character object rather than a float
"""
self.df = pd.DataFrame()
self.df['study'] = label
self.df['OR'] = effect_measure
self.df['LCL'] = lcl
self.df['UCL'] = ucl
self.df['OR2'] = self.df['OR'].astype(str).astype(float)
if (all(isinstance(item, float) for item in lcl)) & (all(isinstance(item, float) for item in effect_measure)):
self.df['LCL_dif'] = self.df['OR'] - self.df['LCL']
else:
self.df['LCL_dif'] = (pd.to_numeric(self.df['OR'])) - (pd.to_numeric(self.df['LCL']))
if (all(isinstance(item, float) for item in ucl)) & (all(isinstance(item, float) for item in effect_measure)):
self.df['UCL_dif'] = self.df['UCL'] - self.df['OR']
else:
self.df['UCL_dif'] = (pd.to_numeric(self.df['UCL'])) - (pd.to_numeric(self.df['OR']))
self.em = 'OR'
self.ci = '95% CI'
self.scale = 'linear'
self.center = 1
self.errc = 'dimgrey'
self.shape = 'd'
self.pc = 'k'
self.linec = 'gray'
def labels(self, **kwargs):
"""Function to change the labels of the outputted table. Additionally, the scale and reference
value can be changed.
Accepts the following keyword arguments:
effectmeasure
-changes the effect measure label
conf_int
-changes the confidence interval label
scale
-changes the scale to either log or linear
center
-changes the reference line for the center
"""
if 'effectmeasure' in kwargs:
self.em = kwargs['effectmeasure']
if 'ci' in kwargs:
self.ci = kwargs['conf_int']
if 'scale' in kwargs:
self.scale = kwargs['scale']
if 'center' in kwargs:
self.center = kwargs['center']
def colors(self, **kwargs):
"""Function to change colors and shapes.
Accepts the following keyword arguments:
errorbarcolor
-changes the error bar colors
linecolor
-changes the color of the reference line
pointcolor
-changes the color of the points
pointshape
-changes the shape of points
"""
if 'errorbarcolor' in kwargs:
self.errc = kwargs['errorbarcolor']
if 'pointshape' in kwargs:
self.shape = kwargs['pointshape']
if 'linecolor' in kwargs:
self.linec = kwargs['linecolor']
if 'pointcolor' in kwargs:
self.pc = kwargs['pointcolor']
def plot(self, figsize=(3, 3), t_adjuster=0.01, decimal=3, size=3, max_value=None, min_value=None):
"""Generates the matplotlib effect measure plot with the default or specified attributes.
The following variables can be used to further fine-tune the effect measure plot
t_adjuster
-used to refine alignment of the table with the line graphs. When generate plots, trial
and error for this value are usually necessary. I haven't come up with an algorithm to
determine this yet...
decimal
-number of decimal places to display in the table
size
-size of the plot to generate
max_value
-maximum value of x-axis scale. Default is None, which automatically determines max value
min_value
-minimum value of x-axis scale. Default is None, which automatically determines min value
"""
tval = []
ytick = []
for i in range(len(self.df)):
if (np.isnan(self.df['OR2'][i]) == False):
if ((isinstance(self.df['OR'][i], float)) & (isinstance(self.df['LCL'][i], float)) & (
isinstance(self.df['UCL'][i], float))):
tval.append([round(self.df['OR2'][i], decimal), (
'(' + str(round(self.df['LCL'][i], decimal)) + ', ' + str(
round(self.df['UCL'][i], decimal)) + ')')])
else:
tval.append(
[self.df['OR'][i], ('(' + str(self.df['LCL'][i]) + ', ' + str(self.df['UCL'][i]) + ')')])
ytick.append(i)
else:
tval.append([' ', ' '])
ytick.append(i)
if max_value is None:
if pd.to_numeric(self.df['UCL']).max() < 1:
maxi = round(((pd.to_numeric(self.df['UCL'])).max() + 0.05),
2) # setting x-axis maximum for UCL less than 1
if (pd.to_numeric(self.df['UCL']).max() < 9) and (pd.to_numeric(self.df['UCL']).max() >= 1):
maxi = round(((pd.to_numeric(self.df['UCL'])).max() + 1),
0) # setting x-axis maximum for UCL less than 10
if pd.to_numeric(self.df['UCL']).max() > 9:
maxi = round(((pd.to_numeric(self.df['UCL'])).max() + 10),
0) # setting x-axis maximum for UCL less than 100
else:
maxi = max_value
if min_value is None:
if pd.to_numeric(self.df['LCL']).min() > 0:
mini = round(((pd.to_numeric(self.df['LCL'])).min() - 0.1), 1) # setting x-axis minimum
if pd.to_numeric(self.df['LCL']).min() < 0:
mini = round(((pd.to_numeric(self.df['LCL'])).min() - 0.05), 2) # setting x-axis minimum
else:
mini = min_value
plt.figure(figsize=figsize) # blank figure
gspec = gridspec.GridSpec(1, 6) # sets up grid
plot = plt.subplot(gspec[0, 0:4]) # plot of data
tabl = plt.subplot(gspec[0, 4:]) # table of OR & CI
plot.set_ylim(-1, (len(self.df))) # spacing out y-axis properly
if self.scale == 'log':
try:
plot.set_xscale('log')
except:
raise ValueError('For the log scale, all values must be positive')
plot.axvline(self.center, color=self.linec, zorder=1)
plot.errorbar(self.df.OR2, self.df.index, xerr=[self.df.LCL_dif, self.df.UCL_dif], marker='None', zorder=2,
ecolor=self.errc, elinewidth=(size / size), linewidth=0)
plot.scatter(self.df.OR2, self.df.index, c=self.pc, s=(size * 25), marker=self.shape, zorder=3,
edgecolors='None')
plot.xaxis.set_ticks_position('bottom')
plot.yaxis.set_ticks_position('left')
plot.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
plot.get_xaxis().set_minor_formatter(matplotlib.ticker.NullFormatter())
plot.set_yticks(ytick)
plot.set_xlim([mini, maxi])
plot.set_xticks([mini, self.center, maxi])
plot.set_xticklabels([mini, self.center, maxi])
plot.set_yticklabels(self.df.study)
plot.yaxis.set_ticks_position('none')
plot.invert_yaxis() # invert y-axis to align values properly with table
tb = tabl.table(cellText=tval, cellLoc='center', loc='right', colLabels=[self.em, self.ci],
bbox=[0, t_adjuster, 1, 1])
tabl.axis('off')
tb.auto_set_font_size(False)
tb.set_fontsize(12)
for key, cell in tb.get_celld().items():
cell.set_linewidth(0)
return plot
def functional_form_plot(df, outcome, var, f_form=None, outcome_type='binary', link_dist=None, ylims=None,
loess_value=0.4, legend=True, model_results=True, loess=True, points=False, discrete=False):
"""Creates a LOESS plot to aid in functional form assessment for continuous variables.
Plots can be created for binary and continuous outcomes. Default options are set to create
a functional form plot for a binary outcome. To convert to a continuous outcome,
outcome_type needs to be changed, in addition to the link_dist
Returns a matplotlib graph with a LOESS line (dashed red-line), regression line (sold blue-line),
and confidence interval (shaded blue)
df:
-dataframe that contains the variables of interest
outcome:
-Column name of the outcome variable of interest
var:
-Column name of the variable of interest for the functional form assessment
f_form:
-Regression equation | |
headers=self.headers)
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-rollback"}},
headers=self.headers,
)
resp = self.app.get(self.source_collection, headers=self.headers)
assert resp.json["data"]["status"] == "signed"
def test_rollbacks_if_review_already_requested(self):
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-review"}},
headers=self.headers,
)
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-rollback"}},
headers=self.headers,
)
resp = self.app.get(self.source_collection, headers=self.headers)
assert resp.json["data"]["status"] == "signed"
def test_tracking_fields_are_updated(self):
resp = self.app.get(self.source_collection, headers=self.headers)
before_date = resp.json["data"]["last_edit_date"]
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-rollback"}},
headers=self.headers,
)
resp = self.app.get(self.source_collection, headers=self.headers)
after_date = resp.json["data"]["last_edit_date"]
assert before_date != after_date
def test_comments_are_reset(self):
self.app.patch_json(
self.source_collection,
{
"data": {
"last_editor_comment": "please check that",
"last_reviewer_comment": "looks good",
}
},
headers=self.headers,
)
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-rollback"}},
headers=self.headers,
)
resp = self.app.get(self.source_collection, headers=self.headers)
assert resp.json["data"]["last_editor_comment"] == ""
assert resp.json["data"]["last_reviewer_comment"] == ""
def test_recreates_deleted_record(self):
resp = self.app.delete(
self.source_collection + "/records?_limit=1&_sort=last_modified",
headers=self.headers,
)
deleted_id = resp.json["data"][0]["id"]
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-rollback"}},
headers=self.headers,
)
resp = self.app.get(
self.source_collection + f"/records/{deleted_id}",
headers=self.headers,
status=200,
)
assert resp.json["data"]["title"] == "hello"
def test_reverts_updated_records(self):
resp = self.app.get(
self.source_collection + "/records?_limit=1&_sort=last_modified",
headers=self.headers,
)
update_id = resp.json["data"][0]["id"]
self.app.put_json(
self.source_collection + f"/records/{update_id}",
{"data": {"title": "Ave"}},
headers=self.headers,
)
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-rollback"}},
headers=self.headers,
)
resp = self.app.get(
self.source_collection + f"/records/{update_id}",
headers=self.headers,
status=200,
)
assert resp.json["data"]["title"] == "hello"
def test_removes_created_records(self):
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-rollback"}},
headers=self.headers,
)
self.app.get(
self.source_collection + "/records/r1", headers=self.headers, status=404
)
self.app.get(
self.source_collection + "/records/r2", headers=self.headers, status=404
)
def test_also_resets_changes_on_preview(self):
resp = self.app.get(self.preview_collection + "/records", headers=self.headers)
size_setup = len(resp.json["data"])
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-review"}},
headers=self.headers,
)
resp = self.app.get(self.preview_collection + "/records", headers=self.headers)
size_before = len(resp.json["data"])
assert size_setup != size_before
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-rollback"}},
headers=self.headers,
)
resp = self.app.get(self.preview_collection + "/records", headers=self.headers)
size_after = len(resp.json["data"])
assert size_before != size_after
assert size_setup == size_after
def test_preview_signature_is_refreshed(self):
resp = self.app.get(self.preview_collection, headers=self.headers)
sign_before = resp.json["data"]["signature"]["signature"]
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-rollback"}},
headers=self.headers,
)
resp = self.app.get(self.preview_collection, headers=self.headers)
sign_after = resp.json["data"]["signature"]["signature"]
assert sign_before != sign_after
def test_does_not_recreate_tombstones(self):
# Approve creation of r1 and r2.
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-review"}},
headers=self.headers,
)
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-sign"}},
headers=self.other_headers,
)
# Delete r1.
self.app.delete(
self.source_collection + "/records/r1",
headers=self.headers,
)
# Approve deletion of r1.
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-review"}},
headers=self.headers,
)
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-sign"}},
headers=self.other_headers,
)
# Recreate r1.
self.app.put_json(
self.source_collection + "/records/r1",
{"data": {"title": "Servus"}},
headers=self.headers,
)
# Rollback.
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-rollback"}},
headers=self.headers,
)
# r1 should be deleted again.
self.app.get(
self.source_collection + "/records/r1", headers=self.headers, status=404
)
self.app.get(
self.preview_collection + "/records/r1", headers=self.headers, status=404
)
self.app.get(
self.destination_collection + "/records/r1",
headers=self.headers,
status=404,
)
class UserGroupsTest(SignoffWebTest, FormattedErrorMixin, unittest.TestCase):
@classmethod
def get_app_settings(cls, extras=None):
settings = super().get_app_settings(extras)
return settings
def setUp(self):
super(UserGroupsTest, self).setUp()
self.editor_headers = get_user_headers("edith:her")
resp = self.app.get("/", headers=self.editor_headers)
self.editor = resp.json["user"]["id"]
self.editor_headers = get_user_headers("emo:billier")
resp = self.app.get("/", headers=self.editor_headers)
self.editor = resp.json["user"]["id"]
self.reviewer_headers = get_user_headers("ray:weaver")
resp = self.app.get("/", headers=self.reviewer_headers)
self.reviewer = resp.json["user"]["id"]
self.app.put_json(
"/buckets/alice/groups/editors",
{"data": {"members": [self.editor]}},
headers=self.headers,
)
self.app.put_json(
"/buckets/alice/groups/reviewers",
{"data": {"members": [self.reviewer]}},
headers=self.headers,
)
def test_only_editors_can_ask_to_review(self):
resp = self.app.patch_json(
self.source_collection,
{"data": {"status": "to-review"}},
headers=self.reviewer_headers,
status=403,
)
self.assertFormattedError(
response=resp,
code=403,
errno=ERRORS.FORBIDDEN,
error="Forbidden",
message="Not in editors group",
)
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-review"}},
headers=self.editor_headers,
)
def test_only_reviewers_can_ask_to_sign(self):
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-review"}},
headers=self.editor_headers,
)
resp = self.app.patch_json(
self.source_collection,
{"data": {"status": "to-sign"}},
headers=self.editor_headers,
status=403,
)
self.assertFormattedError(
response=resp,
code=403,
errno=ERRORS.FORBIDDEN,
error="Forbidden",
message="Not in reviewers group",
)
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-sign"}},
headers=self.reviewer_headers,
)
class SpecificUserGroupsTest(SignoffWebTest, FormattedErrorMixin, unittest.TestCase):
@classmethod
def get_app_settings(cls, extras=None):
settings = super().get_app_settings(extras)
cls.source_collection1 = "/buckets/alice/collections/cid1"
cls.source_collection2 = "/buckets/alice/collections/cid2"
settings["kinto.signer.resources"] = "%s -> %s\n%s -> %s" % (
cls.source_collection1,
cls.source_collection1.replace("alice", "destination"),
cls.source_collection2,
cls.source_collection2.replace("alice", "destination"),
)
settings["signer.alice.cid1.editors_group"] = "editeurs"
settings["signer.alice.cid1.reviewers_group"] = "revoyeurs"
return settings
def setUp(self):
super(SpecificUserGroupsTest, self).setUp()
self.app.put_json(self.source_collection1, headers=self.headers)
self.app.put_json(self.source_collection2, headers=self.headers)
self.someone_headers = get_user_headers("sam:wan")
self.editor_headers = get_user_headers("emo:billier")
resp = self.app.get("/", headers=self.editor_headers)
self.editor = resp.json["user"]["id"]
self.app.put_json(
"/buckets/alice/groups/editeurs",
{"data": {"members": [self.editor]}},
headers=self.headers,
)
def test_editors_cannot_ask_to_review_if_not_specifically_configured(self):
resp = self.app.patch_json(
self.source_collection2,
{"data": {"status": "to-review"}},
headers=self.someone_headers,
status=403,
)
self.assertFormattedError(
response=resp,
code=403,
errno=ERRORS.FORBIDDEN,
error="Forbidden",
message="Not in editors group",
)
def test_only_specific_editors_can_ask_to_review(self):
resp = self.app.patch_json(
self.source_collection1,
{"data": {"status": "to-review"}},
headers=self.someone_headers,
status=403,
)
self.assertFormattedError(
response=resp,
code=403,
errno=ERRORS.FORBIDDEN,
error="Forbidden",
message="Not in editeurs group",
)
def test_only_reviewers_can_ask_to_sign(self):
self.app.patch_json(
self.source_collection1,
{"data": {"status": "to-review"}},
headers=self.editor_headers,
)
resp = self.app.patch_json(
self.source_collection1,
{"data": {"status": "to-sign"}},
headers=self.editor_headers,
status=403,
)
self.assertFormattedError(
response=resp,
code=403,
errno=ERRORS.FORBIDDEN,
error="Forbidden",
message="Not in revoyeurs group",
)
class PreviewCollectionTest(SignoffWebTest, unittest.TestCase):
@classmethod
def get_app_settings(cls, extras=None):
settings = super().get_app_settings(extras)
cls.preview_bucket = "/buckets/preview"
cls.preview_collection = cls.preview_bucket + "/collections/pcid"
settings["signer.to_review_enabled"] = "true"
settings["kinto.signer.resources"] = "%s -> %s -> %s" % (
cls.source_collection,
cls.preview_collection,
cls.destination_collection,
)
return settings
def test_the_preview_collection_is_updated_and_signed(self):
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-review"}},
headers=self.headers,
)
self.app.get(self.preview_bucket, headers=self.headers)
resp = self.app.get(self.preview_collection, headers=self.headers)
assert "signature" in resp.json["data"]
resp = self.app.get(self.preview_collection + "/records", headers=self.headers)
assert len(resp.json["data"]) == 2
def test_the_preview_collection_receives_kinto_admin_ui_attributes(self):
self.app.patch_json(
self.source_collection,
{
"data": {
"status": "to-review",
"displayFields": ["age"],
"schema": {"type": "object"},
}
},
headers=self.headers,
)
resp = self.app.get(self.preview_collection, headers=self.headers)
assert resp.json["data"]["displayFields"] == ["age"]
assert "schema" in resp.json["data"]
def test_the_preview_collection_is_also_resigned(self):
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-review"}},
headers=self.headers,
)
resp = self.app.get(self.preview_collection, headers=self.headers)
signature_preview_before = resp.json["data"]["signature"]
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-sign"}},
headers=self.other_headers,
)
resp = self.app.get(self.destination_collection, headers=self.headers)
signature_destination_before = resp.json["data"]["signature"]
# status is signed.
resp = self.app.get(self.source_collection, headers=self.headers)
assert resp.json["data"]["status"] == "signed"
# Resign.
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-resign"}},
headers=self.headers,
)
resp = self.app.get(self.destination_collection, headers=self.headers)
signature_destination_after = resp.json["data"]["signature"]
assert signature_destination_before != signature_destination_after
resp = self.app.get(self.preview_collection, headers=self.headers)
signature_preview_after = resp.json["data"]["signature"]
assert signature_preview_before != signature_preview_after
def test_the_preview_collection_is_emptied_when_source_records_are_deleted(self):
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-review"}},
headers=self.headers,
)
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-sign"}},
headers=self.other_headers,
)
resp = self.app.get(self.source_collection + "/records", headers=self.headers)
records = resp.json["data"]
for r in records:
self.app.delete(
self.source_collection + "/records/" + r["id"], headers=self.headers
)
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-review"}},
headers=self.headers,
)
resp = self.app.get(self.preview_collection + "/records", headers=self.headers)
records = resp.json["data"]
assert len(records) == 0
def test_the_preview_collection_is_emptied_when_source_is_deleted(self):
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-review"}},
headers=self.headers,
)
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-sign"}},
headers=self.other_headers,
)
self.app.delete(self.source_collection + "/records", headers=self.headers).json[
"data"
]
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-review"}},
headers=self.headers,
)
resp = self.app.get(self.preview_collection + "/records", headers=self.headers)
records = resp.json["data"]
assert len(records) == 0
def test_last_editor_comment_are_reset_on_review(self):
self.app.patch_json(
self.source_collection,
{
"data": {
"last_editor_comment": "please check that",
"last_reviewer_comment": "looks good",
}
},
headers=self.headers,
)
self.app.patch_json(
self.source_collection,
{"data": {"status": "to-review"}},
headers=self.headers,
)
resp = self.app.get(self.source_collection, headers=self.headers)
assert resp.json["data"]["last_editor_comment"] == ""
class CollectionDelete(SignoffWebTest, unittest.TestCase):
@classmethod
def get_app_settings(cls, extras=None):
settings = super().get_app_settings(extras)
cls.source_bucket = "/buckets/source"
cls.source_collection = cls.source_bucket + "/collections/cid"
cls.preview_bucket = "/buckets/preview"
cls.preview_collection = cls.preview_bucket + "/collections/cid"
cls.destination_bucket = "/buckets/destination"
cls.destination_collection = cls.destination_bucket + "/collections/cid"
settings["signer.to_review_enabled"] = "true"
settings["kinto.signer.resources"] = (
"%s -> %s -> %s"
% (cls.source_bucket, cls.preview_bucket, cls.destination_bucket)
+ "\n %s -> %s"
% (
cls.source_bucket + "/collections/no-preview",
cls.destination_bucket + "/collections/no-preview",
)
+ "\n /buckets/some-bucket -> /buckets/some-other"
)
return settings
def setUp(self):
super(CollectionDelete, self).setUp()
self.app.put(
self.source_bucket + "/collections/no-preview", headers=self.headers
)
self.app.put(
self.destination_bucket + "/collections/extra", headers=self.headers
)
self.app.put(
self.preview_bucket + "/collections/no-preview", headers=self.headers
)
def test_cannot_delete_preview_collection_if_used(self):
self.app.delete(self.preview_collection, headers=self.headers, status=403)
def test_cannot_delete_destination_collection_if_used(self):
self.app.delete(self.destination_collection, headers=self.headers, status=403)
self.app.delete(
self.destination_bucket + "/collections/no-preview",
headers=self.headers,
status=403,
)
def test_can_delete_preview_if_source_is_deleted(self):
self.app.delete(self.source_collection, headers=self.headers)
self.app.delete(self.preview_collection, headers=self.headers)
def test_can_delete_preview_if_unused(self):
self.app.delete(
self.preview_bucket + "/collections/no-preview", headers=self.headers
)
def test_can_delete_destination_if_unused(self):
self.app.delete(
self.destination_bucket + "/collections/extra", headers=self.headers
)
class NoReviewTest(SignoffWebTest, unittest.TestCase):
"""
If preview collection is set in config, we create it
and copy the records there, even if review is disabled.
"""
source_bucket = "/buckets/dev"
source_collection = "/buckets/dev/collections/normandy"
preview_bucket = "/buckets/stage"
preview_collection = "/buckets/stage/collections/normandy"
destination_bucket = "/buckets/prod"
destination_collection = "/buckets/prod/collections/normandy"
@classmethod
def get_app_settings(cls, extras=None):
settings = super().get_app_settings(extras)
# preview collection exists.
settings["kinto.signer.resources"] = " -> ".join(
(cls.source_bucket, cls.preview_bucket, cls.destination_bucket)
)
# dev/onecrl has review enabled.
settings["signer.to_review_enabled"] = "true"
# dev/normandy has review disabled.
settings["signer.dev.normandy.to_review_enabled"] = "false"
return settings
def setUp(self):
super().setUp()
# Make the preview bucket readable (to obtain explicit 404 when collections
# don't exist instead of ambiguous 403)
self.app.put_json(
self.preview_bucket,
{"permissions": {"read": ["system.Everyone"]}},
headers=self.headers,
)
self.app.put(self.source_bucket + "/collections/onecrl", headers=self.headers)
self.app.put(self.source_collection, headers=self.headers)
def test_the_preview_collection_is_created_when_review_enabled(self):
self.app.get(self.preview_bucket + "/collections/onecrl", headers=self.headers)
def test_the_preview_collection_is_created_when_review_disabled(self):
self.app.get(self.preview_collection, headers=self.headers)
def test_the_preview_collection_is_updated_when_review_enabled(self):
before = len(
self.app.get(
self.preview_bucket + "/collections/onecrl/records",
headers=self.headers,
).json["data"]
)
self.app.post_json(
self.source_bucket + "/collections/onecrl/records",
{"data": {"title": "Hallo"}},
headers=self.headers,
)
self.app.patch_json(
self.source_bucket + "/collections/onecrl",
{"data": {"status": "to-review"}},
headers=self.headers,
)
after = len(
self.app.get(
self.preview_bucket + "/collections/onecrl/records",
headers=self.headers,
).json["data"]
)
assert after > before, "Preview was not updated when review enabled"
def test_the_preview_collection_is_updated_when_review_disabled(self):
before = len(
self.app.get(
self.preview_collection + "/records", headers=self.headers
).json["data"]
)
self.app.post_json(
self.source_collection + "/records",
{"data": {"title": "Hallo"}},
headers=self.headers,
)
self.app.patch_json(
self.source_collection,
{"data": {"status": | |
import networkx as nx
import io, sys
from collections import defaultdict, Counter
import numpy as np
from Bio.Seq import translate, reverse_complement, Seq
from Bio import SeqIO
from panaroo.cdhit import align_dna_cdhit
from panaroo.isvalid import del_dups
from joblib import Parallel, delayed
import os
import gffutils as gff
from io import StringIO
import edlib
from .merge_nodes import delete_node, remove_member_from_node
from tqdm import tqdm
import re
# @profile
def find_missing(G,
gff_file_handles,
dna_seq_file,
prot_seq_file,
gene_data_file,
merge_id_thresh,
search_radius,
prop_match,
pairwise_id_thresh,
n_cpu,
remove_by_consensus=False,
verbose=True):
# Iterate over each genome file checking to see if any missing accessory genes
# can be found.
# generate mapping between internal nodes and gff ids
id_to_gff = {}
with open(gene_data_file, 'r') as infile:
next(infile)
for line in infile:
line = line.split(",")
if line[2] in id_to_gff:
raise NameError("Duplicate internal ids!")
id_to_gff[line[2]] = line[3]
# identify nodes that have been merged at the protein level
merged_ids = {}
for node in G.nodes():
if (len(G.nodes[node]['centroid']) >
1) or (G.nodes[node]['mergedDNA']):
for sid in sorted(G.nodes[node]['seqIDs']):
merged_ids[sid] = node
merged_nodes = defaultdict(dict)
with open(gene_data_file, 'r') as infile:
next(infile)
for line in infile:
line = line.split(",")
if line[2] in merged_ids:
mem = int(sid.split("_")[0])
if merged_ids[line[2]] in merged_nodes[mem]:
merged_nodes[mem][merged_ids[line[2]]] = G.nodes[
merged_ids[line[2]]]["dna"][G.nodes[merged_ids[
line[2]]]['maxLenId']]
else:
merged_nodes[mem][merged_ids[line[2]]] = line[5]
# iterate through nodes to identify accessory genes for searching
# these are nodes missing a member with at least one neighbour that has that member
n_searches = 0
search_list = defaultdict(lambda: defaultdict(set))
conflicts = defaultdict(set)
for node in G.nodes():
for neigh in G.neighbors(node):
# seen_mems = set()
for sid in sorted(G.nodes[neigh]['seqIDs']):
member = int(sid.split("_")[0])
conflicts[member].add((neigh, id_to_gff[sid]))
if member not in G.nodes[node]['members']:
if len(G.nodes[node]["dna"][G.nodes[node]
['maxLenId']]) <= 0:
print(G.nodes[node]["dna"])
raise NameError("Problem!")
search_list[member][node].add(
(G.nodes[node]["dna"][G.nodes[node]['maxLenId']],
id_to_gff[sid]))
n_searches += 1
if verbose:
print("Number of searches to perform: ", n_searches)
print("Searching...")
all_hits, all_node_locs, max_seq_lengths = zip(*Parallel(n_jobs=n_cpu)(
delayed(search_gff)(search_list[member],
conflicts[member],
gff_handle,
merged_nodes=merged_nodes[member],
search_radius=search_radius,
prop_match=prop_match,
pairwise_id_thresh=pairwise_id_thresh,
merge_id_thresh=merge_id_thresh)
for member, gff_handle in tqdm(enumerate(gff_file_handles),
disable=(not verbose))))
if verbose:
print("translating hits...")
hits_trans_dict = {}
for member, hits in enumerate(all_hits):
hits_trans_dict[member] = Parallel(n_jobs=n_cpu)(
delayed(translate_to_match)(hit[1], G.nodes[hit[0]]["protein"][0])
for hit in hits)
# remove nodes that conflict (overlap)
nodes_by_size = sorted([(G.nodes[node]['size'], node)
for node in G.nodes()],
reverse=True)
nodes_by_size = [n[1] for n in nodes_by_size]
member = 0
bad_node_mem_pairs = set()
bad_nodes = set()
for node_locs, max_seq_length in zip(all_node_locs, max_seq_lengths):
seq_coverage = defaultdict(
lambda: np.zeros(max_seq_length + 2, dtype=bool))
for node in nodes_by_size:
if node in bad_nodes: continue
if node not in node_locs: continue
contig_id = node_locs[node][0]
loc = node_locs[node][1]
if np.sum(seq_coverage[contig_id][loc[0]:loc[1]]) >= (
0.5 * (max(G.nodes[node]['lengths']))):
if member in G.nodes[node]['members']:
remove_member_from_node(G, node, member)
# G.nodes[node]['members'].remove(str(member))
# G.nodes[node]['size'] -= 1
bad_node_mem_pairs.add((node, member))
else:
seq_coverage[contig_id][loc[0]:loc[1]] = True
member += 1
for node in G.nodes():
if len(G.nodes[node]['members']) <= 0:
bad_nodes.add(node)
for node in bad_nodes:
if node in G.nodes():
delete_node(G, node)
# remove by consensus
if remove_by_consensus:
if verbose:
print("removing by consensus...")
node_hit_counter = Counter()
for member, hits in enumerate(all_hits):
for node, dna_hit in hits:
if dna_hit == "": continue
if node in bad_nodes: continue
if (node, member) in bad_node_mem_pairs: continue
node_hit_counter[node] += 1
for node in G:
if node_hit_counter[node] > G.nodes[node]['size']:
bad_nodes.add(node)
for node in bad_nodes:
if node in G.nodes():
delete_node(G, node)
if verbose:
print("Updating output...")
n_found = 0
with open(dna_seq_file, 'a') as dna_out:
with open(prot_seq_file, 'a') as prot_out:
with open(gene_data_file, 'a') as data_out:
for member, hits in enumerate(all_hits):
i = -1
for node, dna_hit in hits:
i += 1
if dna_hit == "": continue
if node in bad_nodes: continue
if (node, member) in bad_node_mem_pairs: continue
hit_protein = hits_trans_dict[member][i]
G.nodes[node]['members'].add(member)
G.nodes[node]['size'] += 1
G.nodes[node]['dna'] = del_dups(G.nodes[node]['dna'] +
[dna_hit])
dna_out.write(">" + str(member) + "_refound_" +
str(n_found) + "\n" + dna_hit + "\n")
G.nodes[node]['protein'] = del_dups(
G.nodes[node]['protein'] + [hit_protein])
prot_out.write(">" + str(member) + "_refound_" +
str(n_found) + "\n" + hit_protein +
"\n")
data_out.write(",".join([
os.path.splitext(
os.path.basename(
gff_file_handles[member]))[0], "",
str(member) + "_refound_" + str(n_found),
str(member) + "_refound_" +
str(n_found), hit_protein, dna_hit, "", ""
]) + "\n")
G.nodes[node]['seqIDs'] |= set(
[str(member) + "_refound_" + str(n_found)])
n_found += 1
if verbose:
print("Number of refound genes: ", n_found)
return (G)
def search_gff(node_search_dict,
conflicts,
gff_handle_name,
merged_nodes,
search_radius=10000,
prop_match=0.2,
pairwise_id_thresh=0.95,
merge_id_thresh=0.7,
n_cpu=1):
gff_handle = open(gff_handle_name, 'r')
# sort sets to fix order
conflicts = sorted(conflicts)
for node in node_search_dict:
node_search_dict[node] = sorted(node_search_dict[node])
split = gff_handle.read().replace(',', '').split("##FASTA\n")
node_locs = {}
if len(split) != 2:
raise NameError("File does not appear to be in GFF3 format!")
# load fasta
contigs = {}
max_seq_len = 0
with StringIO(split[1]) as temp_fasta:
for record in SeqIO.parse(temp_fasta, 'fasta'):
contigs[record.id] = np.array(list(str(record.seq)))
max_seq_len = max(max_seq_len, len(contigs[record.id]))
# load gff annotation
parsed_gff = gff.create_db("\n".join(
[l for l in split[0].splitlines() if '##sequence-region' not in l]),
dbfn=":memory:",
force=True,
keep_order=True,
from_string=True)
# mask regions that already have genes and convert back to string
seen = set()
for node, geneid in conflicts:
gene = parsed_gff[geneid]
start = min(gene.start, gene.end)
end = max(gene.start, gene.end)
if node in merged_nodes:
db_seq = contigs[gene[0]][max(0, (start -
search_radius)):(end +
search_radius)]
db_seq = "".join(list(db_seq))
hit, loc = search_dna(db_seq,
merged_nodes[node],
prop_match=(end - start) /
float(len(merged_nodes[node])),
pairwise_id_thresh=merge_id_thresh,
refind=False)
# update location
loc[0] = loc[0] + max(0, (start - search_radius))
loc[1] = loc[1] + max(0, (start - search_radius))
node_locs[node] = [gene[0], loc]
else:
node_locs[node] = [gene[0], [start - 1, end]]
for node, geneid in conflicts:
gene = parsed_gff[geneid]
start = min(gene.start, gene.end)
end = max(gene.start, gene.end)
# contigs[gene[0]][(start - 1):end] = "X"
if (gene[0], start - 1, end) in seen:
raise NameError("Duplicate entry!!!")
seen.add((gene[0], start - 1, end))
for sid in contigs:
contigs[sid] = "".join(list(contigs[sid]))
# search for matches
hits = []
for node in node_search_dict:
best_hit = ""
best_loc = None
for search in node_search_dict[node]:
gene = parsed_gff[search[1]]
start = min(gene.start, gene.end)
end = max(gene.start, gene.end)
db_seq = contigs[gene[0]][max(0, (start -
search_radius)):(end +
search_radius)]
hit, loc = search_dna(db_seq,
search[0],
prop_match,
pairwise_id_thresh,
refind=True)
# update location
loc[0] = loc[0] + max(0, (start - search_radius))
loc[1] = loc[1] + max(0, (start - search_radius))
if len(hit) > len(best_hit):
best_hit = hit
best_loc = [gene[0], loc]
hits.append((node, best_hit))
if (best_loc is not None) and (best_hit != ""):
node_locs[node] = best_loc
gff_handle.close()
return [hits, node_locs, max_seq_len]
def repl(m):
return ('X' * len(m.group()))
def search_dna(db_seq, search_sequence, prop_match, pairwise_id_thresh,
refind):
found_dna = ""
start = None
end = None
max_hit = 0
loc = [0, 0]
# found=False
# if search_sequence=="":
# if refind:
# print(">>>>>>>>>>>>>")
# print(db_seq)
# found=True
added_E_len = int(len(search_sequence) / 2)
for i, db in enumerate([db_seq, str(Seq(db_seq).reverse_complement())]):
# add some Ns at the start and end to deal with fragments at the end of contigs
db = "E" * added_E_len + db + "E" * added_E_len
aln = edlib.align(search_sequence,
db,
mode="HW",
task='path',
k=10 * len(search_sequence),
additionalEqualities=[
('A', 'N'),
('C', 'N'),
('G', 'N'),
('T', 'N'),
('A', 'E'),
('C', 'E'),
('G', 'E'),
('T', 'E'),
])
# remove trailing inserts
cig = re.split(r'(\d+)', aln['cigar'])[1:]
if cig[-1] == "I":
aln['editDistance'] -= int(cig[-2])
if cig[1] == "I":
aln['editDistance'] -= int(cig[0])
if aln['editDistance'] == -1:
start = -1
else:
# take hit that is closest to the centre of the neighbouring gene
centre = len(db) / 2.0
tloc = min(aln['locations'],
key=lambda x: min(centre - x[0], centre - x[1]))
start = tloc[0]
end = tloc[1] + 1
# if found:
# print(aln)
# print(start, end)
# skip if nothing was found
if start == -1: continue
possible_dbs = [db]
if db.find("NNNNNNNNNNNNNNNNNNNN") != -1:
possible_dbs += [
re.sub("^[ACGTEX]{0,}NNNNNNNNNNNNNNNNNNNN", repl, db, 1),
re.sub("NNNNNNNNNNNNNNNNNNNN[ACGTEX]{0,}$", repl, db, 1)
]
for posdb in possible_dbs:
# skip if alignment is too short
n_X = posdb[start:end].count("X")
n_E = posdb[start:end].count("E")
aln_length = float(end - start - n_X - n_E)
if (aln_length / len(search_sequence)) <= prop_match: continue
if (posdb[start:end].count("A") + posdb[start:end].count("C") +
posdb[start:end].count("G") + posdb[start:end].count("T")
) / len(search_sequence) <= prop_match:
continue
# determine an approximate percentage identity
pid = 1.0 - (aln['editDistance'] - n_X) / (1.0 * aln_length)
# skip if identity below threshold
if pid <= pairwise_id_thresh: continue
# if found:
# print("aln_length:", aln_length)
# print("pid:", pid)
if max_hit < (pid * aln_length):
found_dna = posdb[start:end]
max_hit = (pid * aln_length)
if i == 0:
loc = [start, end]
else:
loc = [len(posdb) - tloc[1] - 1, len(posdb) - | |
<reponame>annihilatorrrr/pytgbot
# -*- coding: utf-8 -*-
"""
A cli for bot api.
Supported commands are all `Bot.*` commands.
They need to be called with the parameter being json.
For the command
Custom commands to make stuff easier:
`msg <peer> <text>`
"""
from itertools import chain
import requests
from DictObject import DictObject
from luckydonaldUtils.exceptions import assert_type_or_raise
from pytgbot.api_types.receivable.inline import InlineQuery
from pytgbot.api_types.receivable.updates import Message, Update
from pytgbot.api_types.receivable.peer import Peer, Chat, User
from pytgbot.exceptions import TgApiException
from pytgbot import Bot
from luckydonaldUtils.logger import logging
from luckydonaldUtils.interactions import input, answer
from luckydonaldUtils.encoding import to_binary as b, to_native as n
from inspect import getmembers, ismethod, getargspec, formatargspec
from threading import Thread
# cool input
import readline
# cool output
# see iterm2_image module source,
# at https://github.com/zakx/iterm2_image/blob/f1134a720c37a515c5b15c438ae7bca92d4d4c55/iterm2_image.py
from io import BytesIO
from base64 import b64encode
import sys
def read_file_to_buffer(filename):
"""
Reads a file to string buffer
:param filename:
:return:
"""
f = open(filename, "r")
buf = BytesIO(f.read())
f.close()
return buf
# end def
def iterm_show_file(filename, data=None, inline=True, width="auto", height="auto", preserve_aspect_ratio=True):
"""
https://iterm2.com/documentation-images.html
:param filename:
:param data:
:param inline:
:param width:
:param height:
:param preserve_aspect_ratio:
Size:
- N (Number only): N character cells.
- Npx (Number + px): N pixels.
- N% (Number + %): N percent of the session's width or height.
- auto: The image's inherent size will be used to determine an appropriate dimension.
:return:
"""
width = str(width) if width is not None else "auto"
height = str(height) if height is not None else "auto"
if data is None:
data = read_file_to_buffer(filename)
# end if
data_bytes = data.getvalue()
output = "\033]1337;File=" \
"name={filename};size={size};inline={inline};" \
"preserveAspectRatio={preserve};width={width};height={height}:{data}\a\n".format(
filename=n(b64encode(b(filename))), size=len(data_bytes), inline=1 if inline else 0,
width=width, height=height, preserve=1 if preserve_aspect_ratio else 0,
data=n(b64encode(data_bytes)),
)
#sys.stdout.write(output)
return output
# end if
try:
from somewhere import API_KEY # so I don't upload them to github :D
except ImportError:
API_KEY = None
# end if
__author__ = 'luckydonald'
logger = logging.getLogger(__name__)#
logging.add_colored_handler(level=logging.INFO)
cached_chats = {}
class Color(object):
"""
utility to return ansi colored text.
just to store the colors next to the function.
"""
# Color codes: http://misc.flogisoft.com/bash/tip_colors_and_formatting
def __init__(self):
self.formatter = self.create_formatter()
# end def
RESET = 0
DEFAULT = 39
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
GREY = 90
LIGHT_BLUE = 94
BG_RED = 41
BG_GREY = 100
BG_DEFAULT = 49
color_prefix = '\033['
def prepare_color(self, color_number):
return '%s%dm' % (self.color_prefix, color_number)
# end def
def create_formatter(self):
return DictObject.objectify(dict(
color_black=self.prepare_color(self.BLACK),
color_red=self.prepare_color(self.RED),
color_green=self.prepare_color(self.GREEN),
color_yellow=self.prepare_color(self.YELLOW),
color_blue=self.prepare_color(self.BLUE),
color_lightblue=self.prepare_color(self.LIGHT_BLUE),
color_magenta=self.prepare_color(self.MAGENTA),
color_cyan=self.prepare_color(self.CYAN),
color_white=self.prepare_color(self.WHITE),
color_grey=self.prepare_color(self.GREY),
color_off=self.prepare_color(self.DEFAULT), # Default foreground color
background_red=self.prepare_color(self.BG_RED),
background_grey=self.prepare_color(self.BG_GREY),
background_default=self.prepare_color(self.BG_DEFAULT), # turn of background
background_off=self.prepare_color(self.BG_DEFAULT), # Default background color
inverse_on=self.prepare_color(7), # Reverse (invert the foreground and background colors)
inverse_off=self.prepare_color(27), # Reset reverse
all_off=self.prepare_color(self.RESET),
))
# end def
def overwrite_color(self, string, color, prefix=False, reset=False):
"""
:param string: input
:param color: new color
:param prefix: if it also should start the color to at the beginning.
:param reset: if it also should end the color at the ending.
:type reset: bool | int | str
:return:
"""
if isinstance(color, int):
color = self.prepare_color(color)
# end if
prefix = color if prefix else ""
if isinstance(reset, int):
reset = self.prepare_color(reset)
elif isinstance(reset, bool):
reset = self.formatter.color_off if reset else ""
# end if
return (
prefix +
string.replace(self.formatter.color_off, self.formatter.color_off+color).replace(self.formatter.all_off, self.formatter.all_off + color) +
reset
)
# end def
# end class
class CLI(object):
METHOD_INCLUDES = {} # added to in the __init__ method.
METHOD_EXCLUDES = ["do",]
def __init__(self, API_KEY, debug=False):
if API_KEY is None:
API_KEY = self.ask_for_apikey()
self._api_key = API_KEY
self.bot = Bot(API_KEY, return_python_objects=True)
self.me = self.bot.get_me()
logger.info("Information about myself: {info}".format(info=self.me))
self.METHOD_INCLUDES.update({
"debug": self.cmd_debug,
"help": self.cmd_help,
})
self.color = Color()
self.update_thread = self.create_update_thread()
self.functions = {k: v for k, v in self.get_functions()}
self.current_candidates = []
self.is_debug = debug
self.query = "pytgbot> " # in front of the input.
self.register_tab_completion()
# end def
def print(self, text):
current_input = readline.get_line_buffer()
delete_chars = len(self.query) + len(current_input)
# remove the input:
sys.stdout.flush()
for i in range(delete_chars):
sys.stdout.write("\b \b")
# end for
sys.stdout.write("\033[K")
sys.stdout.flush()
# actual output
sys.stdout.write(str(text))
sys.stdout.write("\n")
sys.stdout.flush()
# load back the input
sys.stdout.write(self.query)
sys.stdout.flush()
sys.stdout.write(current_input)
sys.stdout.flush()
readline.redisplay()
# end def
def ask_for_apikey(self):
return answer("Input your bot API key.")
# end def
def register_tab_completion(self):
# Register our completer function
readline.parse_and_bind('tab: complete')
readline.set_completer(self.complete)
# Use the tab key for completion
# end def
def create_update_thread(self):
tg_update_thread = Thread(target=self.get_updates, name="telegram update thread")
tg_update_thread.daemon = True
tg_update_thread.start()
return tg_update_thread
# end def
def run(self):
print("You can enter commands now.")
while True:
sys.stdout.write("\r")
sys.stdout.flush()
cmd = input(self.query)
try:
result_str = self.parse_input(cmd)
if result_str:
self.print(result_str)
except Exception as e:
logger.exception("Error.")
self.print("{color_red}{error}{all_off}".format(
error=e, **self.color.formatter
))
# end try
# end while
# end def
def update_callback(self, string):
try:
self.print_update(string)
except AttributeError:
self.print(string)
raise
# end try
# end def
def get_updates(self):
last_update_id = 0
while True: # loop forever.
for update in self.bot.get_updates(
limit=100, offset=last_update_id + 1, poll_timeout=60, error_as_empty=True
): # for every new update
last_update_id = update.update_id
if self.is_debug:
logger.info(repr(last_update_id))
# end if
try:
self.update_callback(update)
except Exception:
logger.exception("update_callback failed\nIncomming update: {u}".format(u=update))
# end def
# end for
# end while
# end def
def print_update(self, update):
self.print(self.process_update(update))
# end def
def process_update(self, update):
assert_type_or_raise(update, Update, parameter_name="update") # is message
if update.message:
return self.process_message(update.message)
elif update.channel_post:
return self.process_message(update.channel_post)
elif update.inline_query:
qry = update.inline_query
assert_type_or_raise(qry, InlineQuery, parameter_name="update.inline_query")
qry_from_print = self.color.overwrite_color(
self.print_peer(qry.from_peer, id_prefix=True),
self.color.formatter.color_yellow,
prefix=True, reset=self.color.formatter.color_white
)
return "[query {id}] {from_print}:{all_off} {text}".format(from_print=qry_from_print, id=qry.id, text=qry.query, **self.color.formatter)
else:
return str(update)
# end if
# end def
def process_message(self, msg):
# prepare prefix with chat infos:
assert_type_or_raise(msg, Message, parameter_name="msg")
user_print=None
if "from_peer" in msg and msg.from_peer: # 'channel' might have no `from_peer`.
user_print = self.color.overwrite_color(
self.print_peer(msg.from_peer, id_prefix="user"),
self.color.formatter.color_yellow,
prefix=True, reset=self.color.formatter.color_white
)
if msg.chat.type == 'private':
prefix = "{background_grey}{color_white}[msg {message_id}]{color_off} {color_yellow}{user}{color_white}: ".format(
message_id=msg.message_id, user=user_print, **self.color.formatter
)
elif msg.chat.type in ('group', 'supergroup'):
group_print = self.color.overwrite_color(
self.print_peer(msg.chat, id_prefix=True),
self.color.formatter.color_yellow,
prefix = True, reset = self.color.formatter.color_white
)
prefix = "{background_grey}{color_white}[msg {message_id}]{color_off} {color_yellow}{user}{color_white} in {chat}: ".format(
message_id=msg.message_id, user=user_print, chat=group_print, **self.color.formatter
)
elif msg.chat.type == 'channel':
group_print = self.color.overwrite_color(
self.print_peer(msg.chat, id_prefix=True),
self.color.formatter.color_yellow,
prefix=True, reset=self.color.formatter.color_white
)
prefix = "{background_grey}{color_white}[msg {message_id}]{color_off} {color_white}In {chat}: ".format(
message_id=msg.message_id, user=user_print, chat=group_print, **self.color.formatter
)
else:
prefix = "{background_grey}{color_white}[msg {message_id}]{color_off} {color_red}UNKNOWN ORIGIN{color_white}: ".format(
message_id=msg.message_id, **self.color.formatter
)
# end if
# now the message types
if "text" in msg:
return prefix + self.color.formatter.color_red + msg.text + self.color.formatter.all_off
if "photo" in msg:
photo = msg.photo[0]
for p in msg.photo[1:]:
if p.file_size > photo.file_size:
photo = p
# end if
# end for
return prefix + "\n" + self.process_file(photo, msg.caption, file_type="photo", height="10") + self.color.formatter.all_off
if "sticker" in msg:
return prefix + "\n" + self.process_file(msg.sticker, msg.caption, file_type="sticker", as_png=True, height="10") + self.color.formatter.all_off
# end if
# end def
def process_file(self, file, caption, file_type="file", as_png=False, inline=True, height=None):
file_object = self.bot.get_file(file.file_id)
file_url = self.bot.get_download_url(file_object)
file_content = get_file(file_url, as_png=as_png)
file_name = file_url.split("/")[-1]
if as_png:
file_name = file_name + ".png"
# end if
save_file_name = str(file.file_id) + "__" + file_name
return "[{type} {file_id}]\n{image}{color_red}{caption}{color_off}".format(
file_id=file.file_id, caption=(" " + caption if caption else ""),
image=iterm_show_file(save_file_name, data=file_content, inline=inline, height=height),
type=file_type, file_name=save_file_name, **self.color.formatter
)
# end def
def parse_input(self, cmd):
if " " not in cmd: # functions like get_me doesn't need params.
command, args = cmd, None
else:
command, args = cmd.split(" ", maxsplit=1)
if command == "msg":
user, message = args.split(" ", maxsplit=1)
try:
user = cached_chats[user]
except KeyError:
return "{color_red}{inverse_on}[FAIL]{inverse_off} I don't have that peer cached.{all_off}".format(
**self.color.formatter
)
# TODO: accept anyway? So you can use a username or something?
try:
result = self.bot.send_msg(user, message)
return "{color_green}{inverse_on}[ OK ]{inverse_off} {0}{all_off}".format(result, **self.color.formatter)
except TgApiException as e:
return "{color_red}{inverse_on}[FAIL]{inverse_off} {0}{all_off}".format(e, **self.color.formatter)
# end try
# end if
if command not in self.functions:
return '{color_red}{inverse_on}ERROR:{inverse_off} The function {0!r} was not found.{all_off}'.format(
command, **self.color.formatter
)
# end if
cmd_func = self.functions[command]
try:
if args:
parsed_args = parse_args(args)
if not isinstance(parsed_args, (list, dict)):
parsed_args = [str(parsed_args)]
# end if not isinstance
if isinstance(parsed_args, list):
call_result = cmd_func(*parsed_args)
else:
assert isinstance(parsed_args, dict)
call_result = cmd_func(**parsed_args)
# end if isinstance
else:
call_result = cmd_func()
# end if
# | |
== 0:
for widget in widgets:
widget.hide()
else:
combox_date_cols.set_active(0)
for widget in widgets:
widget.show()
self._refresh_view()
data_source.connect('rows-changed', self.on_data_source_rows_changed)
def add_options_filter(self, attr, options, add_empty_option=True):
"""Add optional options filter for attr.
:param str attr: the attr that will be filtered
:param iterable options: the options that will be displayed
on the filter as a tuple with (label, option).
The label will be displayed as on the combo and the
option will be used to generate the WHERE clause
:param bool add_empty_option: if we should add an empty
option as the first option in the combo. Its label will
be the label of the column in question and selecting
it will be the same as not filtering by that attr.
:returns: the newly created combobox
:rtype: :class:`Gtk.ComboBox`
"""
for col_dict in self.model.columns:
if col_dict['name'] == attr:
label = col_dict['display']
break
else:
raise ValueError
model = Gtk.ListStore(str, object)
if add_empty_option:
model.append((label, NO_FILTER_OPTION))
for option in options:
model.append(option)
combo = Gtk.ComboBox()
combo.set_model(model)
renderer = Gtk.CellRendererText()
combo.pack_start(renderer, True)
combo.add_attribute(renderer, 'text', 0)
combo.set_active(0)
combo.set_id_column(0)
combo.connect('changed', self.on_filter_changed, attr)
self.extra_filter_widgets[attr] = combo
self.container.extra_filters.pack_start(
combo, expand=False, fill=False, padding=0)
self.container.extra_filters.show_all()
# Make sure this separator is visible. It may have been hidden
# if we don't have any datetime columns.
self.container.filters_separator.show()
return combo
def set_selected_row_by_id(self, row_id,
scroll_to_row=True, callback=None):
"""Set the selected row by id.
:param int row_id: The id of the row to set as the selected one
:param bool scroll_to_row: If we should scroll to that row,
putting it in the center of the window
:param callable callback: A callback to call after the
rows has been loaded
"""
row = self.model.get_row_by_id(row_id, load_rows=True)
if row is None:
logger.warning("No row found with id %s", row_id)
if callback is not None:
assert callable(callback)
callback()
return
path = Gtk.TreePath(row.path)
self.view.set_cursor(path)
def _callback():
"""Timeout callback."""
if scroll_to_row:
with self.view.block_draw():
if self.view is self.tree_view:
self.view.scroll_to_cell(path, None, True, 0.5, 0.0)
elif self.view is self.icon_view:
self.view.scroll_to_path(path, True, 0.5, 0.0)
if callback is not None:
assert callable(callback)
GObject.idle_add(callback)
# Acording to the documentation, PRIORITY_HIGH_IDLE + 20 is
# used by redrawing operations so PRIORITY_HIGH_IDLE + 25
# should be enought to make sure we call callback just after
# the widget finishes redrawing itself.
GObject.timeout_add(100, _callback,
priority=GLib.PRIORITY_HIGH_IDLE + 25)
###
# Callbacks
###
def on_scrolled(self, vadj):
"""Load new records upon scroll to end of visible rows.
:param vadj: Adjustment widget associated with vertical scrollbar
:type vadj: :class:`Gtk.Adjustment`
"""
scrolled_to_bottom = (
vadj.get_value() == (vadj.get_upper() - vadj.get_page_size()) or
vadj.get_page_size() == vadj.get_upper())
if scrolled_to_bottom:
self.model.add_rows()
self._set_visible_range()
return False
def on_popup_column_visibility_changed(self, popup, name, value):
"""Set the list of columns to display based on column checkboxes.
:param popup: the columns popup
:type popup: :class:`OptionsPopup`
:param str name: the name of the columns
: param bool value: the new column visibility
"""
if value:
self.model.display_columns.add(name)
else:
self.model.display_columns.discard(name)
self.model.data_source.set_visible_columns(
self.model.display_columns)
self.view.refresh()
def on_popup_view_changed(self, popup, new_view):
"""Set the actual view based on the options popup option.
:param popup: the columns popup
:type popup: :class:`OptionsPopup`
:param int new_view: either :attr:`OptionsPopup.VIEW_TREE` or
:attr:`OptionsPopup.VIEW_ICON`
"""
if new_view == OptionsPopup.VIEW_ICON:
self.view = self.icon_view
self.tree_view.set_model(None)
self.model.image_max_size = 100.0
self.model.image_draw_border = True
elif new_view in [OptionsPopup.VIEW_TREE, OptionsPopup.VIEW_FLAT]:
# Changing view from/to flat will make expanded_ids have no meaning
self.tree_view.expanded_ids.clear()
self.view = self.tree_view
self.icon_view.set_model(None)
self.model.image_max_size = 24.0
self.model.image_draw_border = False
else:
raise AssertionError("Unrecognized option %r" % (new_view, ))
# We want flat for flat and iconview, and only if we really have
# a flat column.
self.model.active_params['flat'] = (
self.model.flat_column_idx is not None and
new_view in [OptionsPopup.VIEW_FLAT, OptionsPopup.VIEW_ICON])
child = self.container.grid_scrolledwindow.get_child()
self.container.grid_scrolledwindow.remove(child)
self.container.grid_scrolledwindow.add(self.view)
self.view.show_all()
self._refresh_view()
# FIXME: Is there a way to keep the selection after the view was
# refreshed? The actual selected paths are not guaranteed to be the
# same, so how can we get them again?
if self.selected_record_callback:
self.selected_record_callback(None)
def on_treeview_cursor_changed(self, view):
"""Get the data for a selected record and run optional callback.
:param view: The treeview containing the row
:type view: Gtk.TreeView
"""
selection = view.get_selection()
model, row_iterator = selection.get_selected()
if row_iterator and self.selected_record_callback:
record = self.model.data_source.get_single_record(
model[row_iterator][self.model.id_column_idx])
self.selected_record_callback(record)
elif self.selected_record_callback:
self.selected_record_callback(None)
def on_iconview_selection_changed(self, view):
"""Get the data for a selected record and run optional callback.
:param view: The icon view containing the selected record
:type view: :class:`Gtk.IconView`
"""
selections = view.get_selected_items()
row_iterator = selections and self.model.get_iter(selections[0])
if row_iterator and self.selected_record_callback:
model = view.get_model()
record = self.model.data_source.get_single_record(
model[row_iterator][self.model.id_column_idx])
self.selected_record_callback(record)
elif self.selected_record_callback:
self.selected_record_callback(None)
def on_iconview_item_activated(self, view, path):
"""Get the data the activated record and run optional callback.
:param view: The icon view containing the selected record
:type view: :class:`Gtk.IconView`
:param path: the activated path
"""
if not path or not self.activated_icon_callback:
return
row_iterator = view.model.get_iter(path)
record = self.model.data_source.get_single_record(
self.model[row_iterator][self.model.id_column_idx])
self.activated_icon_callback(record, view.pixbuf_column)
def on_treeview_row_activated(self, view, path, column):
"""Handle row-activated signal on the treeview.
Run the optional :obj:`.activated_row_callback` when
a row gets activated.
:param view: The treeview containing the row
:type view: :class:`Gtk.TreeView`
:param path: the activated path
:type path: :class:`Gtk.TreePath`
:param column: the column that was activated on the row
:type column: class:`Gtk.TreeViewColumn`
"""
if self.activated_row_callback is None:
return
row = self.model[self.model.get_iter(path)]
selected_id = row[self.model.id_column_idx]
record = self.model.data_source.get_single_record(selected_id)
self.activated_row_callback(record)
def on_tree_view_row_expanded(self, treeview, iter_, path):
"""Handle row-expanded events.
Make sure visible range will be updated on the model.
:param treeview: the treeview that had one of its rows expanded
:type treeview: :class:`Gtk.TreeView`
:param iter_: the iter pointing to the expanded row
:type iter_: :class:`Gtk.TreeIter`
:param path: the path pointing to the expanded row
:type path: :class:`Gtk.TreePath`
"""
GObject.idle_add(self._set_visible_range)
def on_tree_view_row_collapsed(self, treeview, iter_, path):
"""Handle row-collapsed events.
Make sure visible range will be updated on the model.
:param treeview: the treeview that had one of its rows collapsed
:type treeview: :class:`Gtk.TreeView`
:param iter_: the iter pointing to the collapsed row
:type iter_: :class:`Gtk.TreeIter`
:param path: the path pointing to the collapsed row
:type path: :class:`Gtk.TreePath`
"""
GObject.idle_add(self._set_visible_range)
def on_image_cache_manager_image_loaded(self, cm):
"""Handle image-loaded event for image cache manager.
When an image finishes loading, queue a redraw to make sure
the image will be loaded.
:param cm: the cache manager that emited the event
:type cm: :class: `datagrid_gtk3.utils.imageutils.ImageCacheManager`
"""
self.view.queue_draw()
def on_filter_changed(self, combo, attr):
"""Handle selection changed on filter comboboxes.
:param combo: the combo that received the signal
:type combo: :class:`Gtk.ComboBox`
:param str attr: the name of the attr to filter
"""
model = combo.get_model()
value = model[combo.get_active()][1]
if value is NO_FILTER_OPTION:
remove_keys = [attr]
update_dict = None
else:
remove_keys = None
update_dict = {
attr: {
'operator': 'is' if value is None else '=',
'param': value,
}
}
self._refresh_view(update_dict=update_dict, remove_keys=remove_keys)
def on_data_source_rows_changed(self, data_source, params, ids):
"""Handle data_source rows-changed signal.
When a row gets updated on data source, make sure to reflect
them on the view, without requiring to do an extra query for that.
:param data_source: The data source which emitted the signal
:type data_source: `datagrid_gtk3.db.DataSource`
:param params: A dict of params that got updated, mapped as
``column_name: new_value``
:type params: dict
:param ids: The row ids that got updated
:type ids: [int]
"""
params_idx = [
(self.model.data_source.columns_idx[k], v)
for k, v in params.iteritems()]
rows = (row
for id_, row in self.model.row_id_mapper.iteritems()
if ids is None or id_ in ids)
for row in rows:
for idx, value in params_idx:
row.data[idx] = value
path = Gtk.TreePath(row.path)
self.model.row_changed(path, self.model.get_iter(row.path))
# Even if we call `view.queue_draw` here, it would only be updated
# when it got focused. By setting refresh_draw to True, it will
# force it to refresh the values when the view gets visible on the
# screen, even if it is not focused atm.
self.view.refresh_draw = True
def on_data_loaded(self, model, total_recs):
"""Update the total records label.
:param model: Current datagrid model
:type model: :class:`DataGridModel`
:param int total_recs: Total records for current query
"""
self.container.label_num_recs.set_markup(
'<small>%d records</small>' % total_recs
)
def on_search_clicked(self, widget):
"""Execute the full-text search for given keyword.
:param widget: The widget that called the event
:type widget: :class:`Gtk.Widget`
"""
search = self.container.entry_search.get_text()
update_dict = {
'search': {
'operator': '=',
'param': search
| |
1:
self.y_min = plotData.plotdict["y_lims"][0] - plotData.plotdict["y_lims"][1]
self.y_max = plotData.plotdict["y_lims"][0] + plotData.plotdict["y_lims"][1]
else:
tmp_y_min = self.y_min * ((plotData.plotdict["y_rel_lims"][0] if self.y_min > 0.0 else 2.0-plotData.plotdict["y_rel_lims"][0]) if self.max_dim < 3 else 1.0)
tmp_y_max = self.y_max * ((plotData.plotdict["y_rel_lims"][1] if self.y_max > 0.0 else 2.0-plotData.plotdict["y_rel_lims"][1]) if self.max_dim < 3 else 1.0)
if not plotData.plotdict["y_lims"] is None:
center = plotData.plotdict["y_lims"][0]
width = max([abs(y - center) for y in [tmp_y_min, tmp_y_max]])
self.y_min = center - width
self.y_max = center + width
else:
self.y_min = tmp_y_min
self.y_max = tmp_y_max
else:
if plotData.plotdict["sym_y_lims"]:
log.warning("Symmetric limits are not yet implemented for logarithmic axes!")
if not plotData.plotdict["y_lims"] is None:
self.y_min = plotData.plotdict["y_lims"][0]
elif self.max_dim < 3:
self.y_min *= (plotData.plotdict["y_rel_lims"][0] if self.y_min > 0.0 else 2.0-plotData.plotdict["y_rel_lims"][0])
if not plotData.plotdict["y_lims"] is None and len(plotData.plotdict["y_lims"]) > 1:
self.y_max = plotData.plotdict["y_lims"][1]
elif self.max_dim < 3:
self.y_max *= (plotData.plotdict["y_rel_lims"][1] if self.y_max > 0.0 else 2.0-plotData.plotdict["y_rel_lims"][1])
if plotData.plotdict["cms"]:
self.y_max *= 1.2
if self.y_min == self.y_max:
self.y_max += 1.0
# z lims
if plotData.plotdict["sym_z_lims"] and (not plotData.plotdict["z_log"]):
if not plotData.plotdict["z_lims"] is None and len(plotData.plotdict["z_lims"]) > 1:
self.z_min = plotData.plotdict["z_lims"][0] - plotData.plotdict["z_lims"][1]
self.z_max = plotData.plotdict["z_lims"][0] + plotData.plotdict["z_lims"][1]
else:
tmp_z_min = self.z_min * (0.99 if self.z_min > 0.0 else 1.01)
tmp_z_max = self.z_max * (1.01 if self.z_max > 0.0 else 0.99)
if not plotData.plotdict["z_lims"] is None:
center = plotData.plotdict["z_lims"][0]
width = max([abs(z - center) for z in [tmp_z_min, tmp_z_max]])
self.z_min = center - width
self.z_max = center + width
else:
self.z_min = tmp_z_min
self.z_max = tmp_z_max
else:
if plotData.plotdict["sym_z_lims"]:
log.warning("Symmetric limits are not yet implemented for logarithmic axes!")
if not plotData.plotdict["z_lims"] is None:
self.z_min = plotData.plotdict["z_lims"][0]
elif not self.z_min is None:
if plotData.plotdict["z_log"]:
self.z_min *= (0.9 if self.z_min > 0.0 else 1.1)
else:
self.z_min *= (0.99 if self.z_min > 0.0 else 1.01)
if not plotData.plotdict["z_lims"] is None and len(plotData.plotdict["z_lims"]) > 1:
self.z_max = plotData.plotdict["z_lims"][1]
elif not self.z_max is None:
if plotData.plotdict["z_log"]:
self.z_max *= (1.1 if self.z_max > 0.0 else 0.9)
else:
self.z_max *= (1.01 if self.z_max > 0.0 else 0.99)
if (not self.z_min is None) and (not self.z_max is None) and (self.z_min == self.z_max):
self.z_max += 1.0
# y subplot lims
if plotData.plotdict["sym_y_subplot_lims"]:
if not plotData.plotdict["y_subplot_lims"] is None and len(plotData.plotdict["y_subplot_lims"]) > 1:
self.y_sub_min = plotData.plotdict["y_subplot_lims"][0] - plotData.plotdict["y_subplot_lims"][1]
self.y_sub_max = plotData.plotdict["y_subplot_lims"][0] + plotData.plotdict["y_subplot_lims"][1]
else:
tmp_y_sub_min = self.y_sub_min * (0.9 if self.y_sub_min > 0.0 else 1.1)
tmp_y_sub_max = self.y_sub_max * (1.1 if self.y_sub_max > 0.0 else 0.9)
if not plotData.plotdict["y_subplot_lims"] is None:
center = plotData.plotdict["y_subplot_lims"][0]
width = max([abs(y - center) for y in [tmp_y_sub_min, tmp_y_sub_max]])
self.y_sub_min = center - width
self.y_sub_max = center + width
else:
self.y_sub_min = tmp_y_sub_min
self.y_sub_max = tmp_y_sub_max
else:
if not plotData.plotdict["y_subplot_lims"] is None:
self.y_sub_min = plotData.plotdict["y_subplot_lims"][0]
elif not self.y_sub_min is None:
self.y_sub_min *= (0.9 if self.y_sub_min > 0.0 else 1.1)
if not plotData.plotdict["y_subplot_lims"] is None and len(plotData.plotdict["y_subplot_lims"]) > 1:
self.y_sub_max = plotData.plotdict["y_subplot_lims"][1]
elif not self.y_sub_max is None:
self.y_sub_max *= (1.1 if self.y_sub_max > 0.0 else 0.9)
if (not self.y_sub_min is None) and (not self.y_sub_max is None) and (self.y_sub_min == self.y_sub_max):
self.y_sub_max += 1.0
# z subplot lims
if not self.z_sub_min is None:
self.z_sub_min *= (0.9 if self.z_sub_min > 0.0 else 1.1)
if not self.z_sub_max is None:
self.z_sub_max *= (1.1 if self.z_sub_max > 0.0 else 0.9)
if (not self.z_sub_min is None) and (not self.z_sub_max is None) and (self.z_sub_min == self.z_sub_max):
self.z_sub_max += 1.0
def make_plots(self, plotData):
super(PlotRoot, self).make_plots(plotData)
# draw empty histograms for the axes
n_binsX = len(plotData.plotdict["x_tick_labels"]) if plotData.plotdict["x_tick_labels"] else 1
n_binsY = len(plotData.plotdict["y_tick_labels"]) if plotData.plotdict["y_tick_labels"] else 1
n_sub_binsX = len(plotData.plotdict["x_tick_labels"]) if plotData.plotdict["x_tick_labels"] else 1
n_sub_binsY = len(plotData.plotdict["y_tick_labels"]) if plotData.plotdict["y_tick_labels"] else 1
if plotData.plot.plot_pad:
plotData.plot.plot_pad.cd()
if self.max_dim == 2:
self.axes_histogram = ROOT.TH2F("axes_histogram", "", n_binsX, self.x_min, self.x_max, n_binsY, self.y_min, self.y_max)
self.axes_histogram.SetMinimum(self.y_min)
self.axes_histogram.SetMaximum(self.y_max)
else:
self.axes_histogram = ROOT.TH2F("axes_histogram", "", n_binsX, self.x_min, self.x_max, n_binsY, self.y_min, self.y_max)
self.axes_histogram.SetMinimum(self.z_min)
self.axes_histogram.SetMaximum(self.z_max)
# axis labels
if (not plotData.plotdict["x_label"] is None) and (plotData.plotdict["x_label"] != ""):
self.axes_histogram.GetXaxis().SetTitle(plotData.plotdict["x_label"])
if (not plotData.plotdict["y_label"] is None) and (plotData.plotdict["y_label"] != ""):
self.axes_histogram.GetYaxis().SetTitle(plotData.plotdict["y_label"])
if (self.max_dim > 2) and (not plotData.plotdict["z_label"] is None) and (plotData.plotdict["z_label"] != ""):
self.axes_histogram.GetZaxis().SetTitle(plotData.plotdict["z_label"])
# tick labels
if plotData.plotdict["x_tick_labels"] and len(plotData.plotdict["x_tick_labels"]) > 0:
for x_bin in range(n_binsX):
self.axes_histogram.GetXaxis().SetBinLabel(x_bin+1, plotData.plotdict["x_tick_labels"][x_bin])
if plotData.plotdict["y_tick_labels"] and len(plotData.plotdict["y_tick_labels"]) > 0:
for y_bin in range(n_binsY):
self.axes_histogram.GetYaxis().SetBinLabel(y_bin+1, plotData.plotdict["y_tick_labels"][y_bin])
# avoid scientific notation for x-axis
self.axes_histogram.GetXaxis().SetNoExponent(True)
# shift the exponent for y-axis to avoid overlapping with title
ROOT.TGaxis.SetExponentOffset(-0.069,0.015,"y")
self.axes_histogram.Draw("AXIS")
for line_graph in self.plot_vertical_lines:
line_graph.Draw("L SAME")
if plotData.plot.subplot_pad:
plotData.plot.subplot_pad.cd()
if self.max_sub_dim == 2:
self.subplot_axes_histogram = ROOT.TH2F("subplot_axes_histogram", "", n_sub_binsX, self.x_min, self.x_max, n_sub_binsY, self.y_sub_min, self.y_sub_max)
self.subplot_axes_histogram.SetMinimum(self.y_sub_min)
self.subplot_axes_histogram.SetMaximum(self.y_sub_max)
else:
self.subplot_axes_histogram = ROOT.TH2F("subplot_axes_histogram", "", n_sub_binsX, self.x_min, self.x_max, n_sub_binsY, self.y_sub_min, self.y_sub_max)
self.subplot_axes_histogram.SetMinimum(self.z_sub_min)
self.subplot_axes_histogram.SetMaximum(self.z_sub_max)
# axis labels
if (not plotData.plotdict["x_label"] is None) and (plotData.plotdict["x_label"] != ""):
self.subplot_axes_histogram.GetXaxis().SetTitle(plotData.plotdict["x_label"])
if (not plotData.plotdict["y_subplot_label"] is None) and (plotData.plotdict["y_subplot_label"] != ""):
self.subplot_axes_histogram.GetYaxis().SetTitle(plotData.plotdict["y_subplot_label"])
#if (self.max_sub_dim > 2) and (not plotData.plotdict["z_subplot_label"] is None) and (plotData.plotdict["z_subplot_label"] != ""):
# self.subplot_axes_histogram.GetZaxis().SetTitle(plotData.plotdict["z_subplot_label"])
# tick labels
if plotData.plotdict["x_tick_labels"] and len(plotData.plotdict["x_tick_labels"]) > 0:
for x_bin in range(n_sub_binsX):
self.subplot_axes_histogram.GetXaxis().SetBinLabel(x_bin+1, plotData.plotdict["x_tick_labels"][x_bin])
# avoid scientific notation for x-axis
self.subplot_axes_histogram.GetXaxis().SetNoExponent(True)
self.subplot_axes_histogram.Draw("AXIS")
for line_graph in self.subplot_line_graphs:
line_graph.Draw("L SAME")
for nick, subplot, marker, colors, colormap in zip(
plotData.plotdict["nicks"],
plotData.plotdict["subplots"],
plotData.plotdict["markers"],
plotData.plotdict["colors"],
plotData.plotdict["colormap"]
):
# select pad to plot on
pad = plotData.plot.subplot_pad if subplot else plotData.plot.plot_pad
pad.cd()
# set color map
if colormap:
if len(set(colors)) == 1:
ROOT.gStyle.SetPalette(colors[0])
else:
reds = [ROOT.gROOT.GetColor(color).GetRed() for color in colors]
greens = [ROOT.gROOT.GetColor(color).GetGreen() for color in colors]
blues = [ROOT.gROOT.GetColor(color).GetBlue() for color in colors]
ROOT.TColor.CreateGradientColorTable(
len(colors),
array.array("d", [float(index) / (len(colors)-1) for index in xrange(len(colors))]),
array.array("d", reds),
array.array("d", greens),
array.array("d", blues),
ROOT.gStyle.GetNdivisions("Z")
)
# draw
root_object = plotData.plotdict["root_objects"][nick]
if "RTOL" in marker.upper() and isinstance(root_object, ROOT.TH1):
root_object.__class__ = ROOT.CustomHistogram
root_object.GetPainter()
root_object.Draw(marker + " SAME")
pad.Update()
def modify_axes(self, plotData):
super(PlotRoot, self).modify_axes(plotData)
# setting for Z axis
for root_object in plotData.plotdict["root_objects"].values():
if isinstance(root_object, ROOT.TH1):
palette = root_object.GetListOfFunctions().FindObject("palette")
else:
palette = root_object.GetHistogram().GetListOfFunctions().FindObject("palette")
if palette != None:
root_object.GetZaxis().SetTitleOffset(1.5)
palette.SetTitleOffset(1.5)
palette.SetTitleSize(root_object.GetYaxis().GetTitleSize())
n_contour_levels = 50 # number of divisions
if isinstance(root_object, ROOT.TH1):
root_object.SetContour(n_contour_levels)
elif isinstance(root_object, ROOT.TGraph2D):
root_object.GetHistogram().SetContour(n_contour_levels)
# logaritmic axis
if plotData.plotdict["x_log"]:
plotData.plot.plot_pad.SetLogx()
self.axes_histogram.GetXaxis().SetMoreLogLabels((math.log10(self.x_max) - math.log10(self.x_min)) < 3.0)
if plotData.plotdict["y_log"]:
plotData.plot.plot_pad.SetLogy()
self.axes_histogram.GetYaxis().SetMoreLogLabels((math.log10(self.y_max) - math.log10(self.y_min)) < 3.0)
if plotData.plotdict["z_log"]:
plotData.plot.plot_pad.SetLogz()
self.axes_histogram.GetZaxis().SetMoreLogLabels((math.log10(self.z_max) - math.log10(self.z_min)) < 3.0)
if not plotData.plot.subplot_pad is None:
if plotData.plotdict["x_log"]:
plotData.plot.subplot_pad.SetLogx()
self.subplot_axes_histogram.GetXaxis().SetMoreLogLabels((math.log10(self.x_max) - math.log10(self.x_min)) < 3.0)
if plotData.plotdict["y_subplot_log"]:
plotData.plot.subplot_pad.SetLogy()
self.subplot_axes_histogram.GetYaxis().SetMoreLogLabels((math.log10(self.y_sub_max) - math.log10(self.y_sub_min)) < 3.0)
if plotData.plotdict["z_subplot_log"]:
plotData.plot.subplot_pad.SetLogz()
self.subplot_axes_histogram.GetZaxis().SetMoreLogLabels((math.log10(self.z_sub_max) - math.log10(self.z_sub_min)) < 3.0)
if not self.axes_histogram is None:
self.reversed_axes = PlotRoot._set_axis_limits(plotData.plot.plot_pad, self.axes_histogram, self.max_dim, [self.x_min, self.x_max], [self.y_min, self.y_max], [self.z_min, self.z_max], reverse_x_axis=plotData.plotdict["reverse_x_axis"], reverse_y_axis=plotData.plotdict["reverse_y_axis"], reverse_z_axis=plotData.plotdict["reverse_z_axis"])
if not self.subplot_axes_histogram is None:
self.reversed_subplot_axes = PlotRoot._set_axis_limits(plotData.plot.subplot_pad, self.subplot_axes_histogram, self.max_dim, [self.x_min, self.x_max], [self.y_sub_min, self.y_sub_max], [self.z_sub_min, self.z_sub_max], reverse_x_axis=plotData.plotdict["reverse_x_axis"], reverse_y_axis=False, reverse_z_axis=False)
for nick, subplot, marker in zip(
plotData.plotdict["nicks"],
plotData.plotdict["subplots"],
plotData.plotdict["markers"]
):
root_object = plotData.plotdict["root_objects"][nick]
if subplot:
PlotRoot._set_axis_limits(plotData.plot.subplot_pad, root_object, self.max_dim, [self.x_min, self.x_max], [self.y_sub_min, self.y_sub_max], [self.z_sub_min, self.z_sub_max], reverse_x_axis=plotData.plotdict["reverse_x_axis"], reverse_y_axis=False, reverse_z_axis=False)
else:
PlotRoot._set_axis_limits(plotData.plot.plot_pad, root_object, self.max_dim, [self.x_min, self.x_max], [self.y_min, self.y_max], [self.z_min, self.z_max], reverse_x_axis=plotData.plotdict["reverse_x_axis"], reverse_y_axis=plotData.plotdict["reverse_y_axis"], reverse_z_axis=plotData.plotdict["reverse_z_axis"])
if not self.subplot_axes_histogram is None:
self.axes_histogram.GetXaxis().SetLabelSize(0)
self.axes_histogram.GetXaxis().SetTitleSize(0)
self.axes_histogram.GetYaxis().SetLabelSize(self.axes_histogram.GetYaxis().GetLabelSize() / (1.0 - self.plot_subplot_slider_y))
self.axes_histogram.GetYaxis().SetTitleSize((self.axes_histogram.GetYaxis().GetTitleSize() / (1.0 - self.plot_subplot_slider_y))-0.01)
self.axes_histogram.GetYaxis().SetTitleOffset(self.axes_histogram.GetYaxis().GetTitleOffset() * (1.0 - self.plot_subplot_slider_y))
self.subplot_axes_histogram.GetXaxis().SetLabelSize(self.subplot_axes_histogram.GetXaxis().GetLabelSize() / self.plot_subplot_slider_y)
self.subplot_axes_histogram.GetXaxis().SetTitleSize((self.subplot_axes_histogram.GetXaxis().GetTitleSize() / self.plot_subplot_slider_y) - 0.01)
self.subplot_axes_histogram.GetYaxis().SetLabelSize(self.subplot_axes_histogram.GetYaxis().GetLabelSize() / self.plot_subplot_slider_y)
self.subplot_axes_histogram.GetYaxis().SetTitleSize((self.subplot_axes_histogram.GetYaxis().GetTitleSize() / self.plot_subplot_slider_y) - 0.01)
self.subplot_axes_histogram.GetXaxis().SetTitleOffset(2.0 * self.subplot_axes_histogram.GetXaxis().GetTitleOffset() * self.plot_subplot_slider_y+0.2)
self.subplot_axes_histogram.GetYaxis().SetTitleOffset(self.subplot_axes_histogram.GetYaxis().GetTitleOffset() * self.plot_subplot_slider_y)
self.subplot_axes_histogram.GetYaxis().SetNdivisions(5, 0, 0)
if not plotData.plotdict["x_title_offset"] is None:
if not self.subplot_axes_histogram is None:
self.subplot_axes_histogram.GetXaxis().SetTitleOffset(plotData.plotdict["x_title_offset"])
else:
self.axes_histogram.GetXaxis().SetTitleOffset(plotData.plotdict["x_title_offset"])
if not plotData.plotdict["y_title_offset"] is None:
self.axes_histogram.GetYaxis().SetTitleOffset(plotData.plotdict["y_title_offset"])
if not self.subplot_axes_histogram is None and not plotData.plotdict["y_subplot_title_offset"] is None:
self.subplot_axes_histogram.GetYaxis().SetTitleOffset(plotData.plotdict["y_subplot_title_offset"])
if plotData.plotdict["x_labels_vertical"]:
if not self.subplot_axes_histogram is None:
self.subplot_axes_histogram.LabelsOption("v", "X")
else:
self.axes_histogram.LabelsOption("v", "X")
palettes = [(root_object if isinstance(root_object, ROOT.TH1) else root_object.GetHistogram()).GetListOfFunctions().FindObject("palette") for root_object in plotData.plotdict["root_objects"].values()]
if all([palette == None for palette in palettes]) and (plotData.plotdict["right_pad_margin"] is None):
plotData.plot.plot_pad.SetRightMargin(0.05)
if not plotData.plot.subplot_pad is None:
plotData.plot.subplot_pad.SetRightMargin(0.05)
if (self.max_dim < 3) and (plotData.plotdict["right_pad_margin"] is None):
plotData.plot.plot_pad.SetRightMargin(0.05)
if not plotData.plot.subplot_pad is None:
plotData.plot.subplot_pad.SetRightMargin(0.05)
# redraw axes only and update the canvas
plotData.plot.plot_pad.cd()
self.axes_histogram.Draw("AXIS SAME")
plotData.plot.plot_pad.Update()
if not plotData.plot.subplot_pad is None:
plotData.plot.subplot_pad.cd()
if not self.subplot_axes_histogram is None:
self.subplot_axes_histogram.Draw("AXIS SAME")
plotData.plot.subplot_pad.Update()
plotData.plot.canvas.Update()
#tdrstyle.fixOverlay(self.plot_pad)
def add_grid(self, plotData):
super(PlotRoot, self).add_grid(plotData)
plotData.plot.plot_pad.cd()
if (plotData.plotdict["grid"] or plotData.plotdict["x_grid"]):
plotData.plot.plot_pad.SetGridx()
if (plotData.plotdict["grid"] or plotData.plotdict["y_grid"]):
plotData.plot.plot_pad.SetGridy()
if not plotData.plot.subplot_pad is None:
plotData.plot.subplot_pad.cd()
if (plotData.plotdict["subplot_grid"] == True):
plotData.plot.subplot_pad.SetGrid()
def add_labels(self, plotData):
super(PlotRoot, self).add_labels(plotData)
# TODO: transform legend coordinates so that same values for plots with subplots can be specified
"""
pad_pos_x_pixel = [plotData.plot.plot_pad.UtoPixel(x) for x in [0.0, 1.0]]
pad_pos_y_pixel = [plotData.plot.plot_pad.VtoPixel(y) for y in [0.0, 1.0]]
canvas_pos_x_pixel = [plotData.plot.canvas.UtoPixel(x) for x in [0.0, 1.0]]
canvas_pos_y_pixel = [plotData.plot.canvas.VtoPixel(y) for y in [0.0, 1.0]]
legend_pos_x_pixel = [int(pad_pos_x_pixel[0] + (x * (pad_pos_x_pixel[1] - pad_pos_x_pixel[0]))) for x in plotData.plotdict["legend"][::2]]
legend_pos_y_pixel = [int(pad_pos_y_pixel[0] + (y * (pad_pos_y_pixel[1] - pad_pos_y_pixel[0]))) for y in plotData.plotdict["legend"][1::2]]
legend_pos_x_user = [float(x - canvas_pos_x_pixel[0]) / float(canvas_pos_x_pixel[1] - canvas_pos_x_pixel[0]) for x in legend_pos_x_pixel]
legend_pos_y_user = [float(y - canvas_pos_y_pixel[0]) / float(canvas_pos_y_pixel[1] - canvas_pos_y_pixel[0]) for y in legend_pos_y_pixel]
transformed_legend_pos = tools.flattenList(zip(legend_pos_x_user, legend_pos_y_user))
"""
transformed_legend_pos = plotData.plotdict["legend"]
plotData.plot.plot_pad.cd()
self.legend = None
if plotData.plotdict["legend"] != None:
ROOT.gStyle.SetLegendBorderSize(0)
self.legend = ROOT.TLegend(*transformed_legend_pos)
self.legend.SetNColumns(plotData.plotdict["legend_cols"])
self.legend.SetColumnSeparation(0.1)
for subplot, nick, label, legend_marker in zip(
plotData.plotdict["subplots"],
plotData.plotdict["nicks"],
plotData.plotdict["labels"],
plotData.plotdict["legend_markers"],
):
if subplot == True:
pass # legend entries are currently added to the upper plot legend
root_object = plotData.plotdict["root_objects"][nick]
if legend_marker is None:
# TODO: defaults should be defined in prepare_args function
legend_marker = "FLP"
if isinstance(root_object, ROOT.TH1):
legend_marker = "F"
elif isinstance(root_object, ROOT.TGraph):
legend_marker = "LP"
elif isinstance(root_object, ROOT.TF1):
legend_marker = "L"
if (not label is None) and (label != ""):
self.legend.AddEntry(root_object, label, legend_marker)
defaultrootstyle.set_legend_style(self.legend)
self.legend.Draw()
def add_texts(self, plotData):
super(PlotRoot, self).add_texts(plotData)
self.text_box = ROOT.TPaveText(0.0, 0.0, 1.0, 1.0, "NDC")
self.text_box.SetFillStyle(0)
self.text_box.SetBorderSize(0)
self.text_box.SetShadowColor(0)
self.text_box.SetTextAlign(22)
text_size = self.axes_histogram.GetXaxis().GetLabelSize()
if not self.subplot_axes_histogram is None:
text_size = self.subplot_axes_histogram.GetXaxis().GetLabelSize() * (1-self.plot_subplot_slider_y-0.12)
self.text_box.SetTextSize(text_size)
for x, y, text, size in zip(plotData.plotdict["texts_x"], plotData.plotdict["texts_y"], plotData.plotdict["texts"], plotData.plotdict["texts_size"]):
text_object = self.text_box.AddText(x, y, text)
if not size is None:
text_object.SetTextSize(size)
# lumi and energy: outside plot, top right, with best possible offset
if self.dataset_title != "":
if "Simulation" in plotData.plotdict["extra_text"]:
CMS_lumi.lumi_sqrtS = ""
else:
self.dataset_title = re.sub(r"\\mathrm{(fb|pb)}", re.search(r"\\mathrm{(fb|pb)}", self.dataset_title).group(1), self.dataset_title)
year = "("
if plotData.plotdict["year"] != "":
year += plotData.plotdict["year"] + ", "
CMS_lumi.lumi_sqrtS = self.dataset_title.replace("$", "").replace("\,", "").split("(")[0] + year + self.dataset_title.replace("$", "").replace("\,", "").split("(")[1]
CMS_lumi.lumiTextSize = 0.5
if not self.subplot_axes_histogram is None:
CMS_lumi.lumiTextOffset = 0.4
# normal plot title (e.g., 'own work', name of the channel...): outside plot, top left
y_title = 0.95 if self.subplot_axes_histogram is None else 0.923
if (not plotData.plotdict["title"] is None) and (plotData.plotdict["title"] != ""):
x_title = 0.2
title = self.text_box.AddText(x_title, y_title, plotData.plotdict["title"])
title.SetTextAlign(11)
# CMS text (only if specified): inside plot, top left
CMS_lumi.cmsTextSize = 0.5
if not (plotData.plotdict["cms"] or plotData.plotdict["cms_outframe"]):
CMS_lumi.cmsText = ""
CMS_lumi.extraText = plotData.plotdict["extra_text"]
if plotData.plotdict["cms_outframe"]:
CMS_lumi.relPosX = 0.12
CMS_lumi.CMS_lumi(plotData.plot.canvas, 0, 0)
else:
CMS_lumi.CMS_lumi(plotData.plot.canvas, 0, 11)
# Draw the text | |
# -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2009 <NAME> <<EMAIL>>
# 2010 <NAME> <<EMAIL>>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
import sys
from gi import _gobject
from gi.repository import GObject
from ..overrides import override
from ..importer import modules
if sys.version_info >= (3, 0):
_basestring = str
_callable = lambda c: hasattr(c, '__call__')
else:
_basestring = basestring
_callable = callable
Gtk = modules['Gtk']._introspection_module
__all__ = []
if Gtk._version == '2.0':
import warnings
warn_msg = "You have imported the Gtk 2.0 module. Because Gtk 2.0 \
was not designed for use with introspection some of the \
interfaces and API will fail. As such this is not supported \
by the pygobject development team and we encourage you to \
port your app to Gtk 3 or greater. PyGTK is the recomended \
python module to use with Gtk 2.0"
warnings.warn(warn_msg, RuntimeWarning)
class Widget(Gtk.Widget):
def translate_coordinates(self, dest_widget, src_x, src_y):
success, dest_x, dest_y = super(Widget, self).translate_coordinates(
dest_widget, src_x, src_y)
if success:
return (dest_x, dest_y,)
def render_icon(self, stock_id, size, detail=None):
return super(Widget, self).render_icon(stock_id, size, detail)
Widget = override(Widget)
__all__.append('Widget')
class Container(Gtk.Container, Widget):
def __len__(self):
return len(self.get_children())
def __contains__(self, child):
return child in self.get_children()
def __iter__(self):
return iter(self.get_children())
def __bool__(self):
return True
# alias for Python 2.x object protocol
__nonzero__ = __bool__
def get_focus_chain(self):
success, widgets = super(Container, self).get_focus_chain()
if success:
return widgets
Container = override(Container)
__all__.append('Container')
class Editable(Gtk.Editable):
def insert_text(self, text, position):
pos = super(Editable, self).insert_text(text, -1, position)
return pos
def get_selection_bounds(self):
success, start_pos, end_pos = super(Editable, self).get_selection_bounds()
if success:
return (start_pos, end_pos,)
else:
return tuple()
Editable = override(Editable)
__all__.append("Editable")
class Action(Gtk.Action):
def __init__(self, name, label, tooltip, stock_id, **kwds):
Gtk.Action.__init__(self, name=name, label=label, tooltip=tooltip, stock_id=stock_id, **kwds)
Action = override(Action)
__all__.append("Action")
class RadioAction(Gtk.RadioAction):
def __init__(self, name, label, tooltip, stock_id, value, **kwds):
Gtk.RadioAction.__init__(self, name=name, label=label, tooltip=tooltip, stock_id=stock_id, value=value, **kwds)
RadioAction = override(RadioAction)
__all__.append("RadioAction")
class ActionGroup(Gtk.ActionGroup):
def __init__(self, name, **kwds):
super(ActionGroup, self).__init__(name = name, **kwds)
def add_actions(self, entries, user_data=None):
"""
The add_actions() method is a convenience method that creates a number
of gtk.Action objects based on the information in the list of action
entry tuples contained in entries and adds them to the action group.
The entry tuples can vary in size from one to six items with the
following information:
* The name of the action. Must be specified.
* The stock id for the action. Optional with a default value of None
if a label is specified.
* The label for the action. This field should typically be marked
for translation, see the set_translation_domain() method. Optional
with a default value of None if a stock id is specified.
* The accelerator for the action, in the format understood by the
gtk.accelerator_parse() function. Optional with a default value of
None.
* The tooltip for the action. This field should typically be marked
for translation, see the set_translation_domain() method. Optional
with a default value of None.
* The callback function invoked when the action is activated.
Optional with a default value of None.
The "activate" signals of the actions are connected to the callbacks and
their accel paths are set to <Actions>/group-name/action-name.
"""
try:
iter(entries)
except (TypeError):
raise TypeError('entries must be iterable')
def _process_action(name, stock_id=None, label=None, accelerator=None, tooltip=None, callback=None):
action = Action(name, label, tooltip, stock_id)
if callback is not None:
if user_data is None:
action.connect('activate', callback)
else:
action.connect('activate', callback, user_data)
self.add_action_with_accel(action, accelerator)
for e in entries:
# using inner function above since entries can leave out optional arguments
_process_action(*e)
def add_toggle_actions(self, entries, user_data=None):
"""
The add_toggle_actions() method is a convenience method that creates a
number of gtk.ToggleAction objects based on the information in the list
of action entry tuples contained in entries and adds them to the action
group. The toggle action entry tuples can vary in size from one to seven
items with the following information:
* The name of the action. Must be specified.
* The stock id for the action. Optional with a default value of None
if a label is specified.
* The label for the action. This field should typically be marked
for translation, see the set_translation_domain() method. Optional
with a default value of None if a stock id is specified.
* The accelerator for the action, in the format understood by the
gtk.accelerator_parse() function. Optional with a default value of
None.
* The tooltip for the action. This field should typically be marked
for translation, see the set_translation_domain() method. Optional
with a default value of None.
* The callback function invoked when the action is activated.
Optional with a default value of None.
* A flag indicating whether the toggle action is active. Optional
with a default value of False.
The "activate" signals of the actions are connected to the callbacks and
their accel paths are set to <Actions>/group-name/action-name.
"""
try:
iter(entries)
except (TypeError):
raise TypeError('entries must be iterable')
def _process_action(name, stock_id=None, label=None, accelerator=None, tooltip=None, callback=None, is_active=False):
action = Gtk.ToggleAction(name, label, tooltip, stock_id)
action.set_active(is_active)
if callback is not None:
if user_data is None:
action.connect('activate', callback)
else:
action.connect('activate', callback, user_data)
self.add_action_with_accel(action, accelerator)
for e in entries:
# using inner function above since entries can leave out optional arguments
_process_action(*e)
def add_radio_actions(self, entries, value=None, on_change=None, user_data=None):
"""
The add_radio_actions() method is a convenience method that creates a
number of gtk.RadioAction objects based on the information in the list
of action entry tuples contained in entries and adds them to the action
group. The entry tuples can vary in size from one to six items with the
following information:
* The name of the action. Must be specified.
* The stock id for the action. Optional with a default value of None
if a label is specified.
* The label for the action. This field should typically be marked
for translation, see the set_translation_domain() method. Optional
with a default value of None if a stock id is specified.
* The accelerator for the action, in the format understood by the
gtk.accelerator_parse() function. Optional with a default value of
None.
* The tooltip for the action. This field should typically be marked
for translation, see the set_translation_domain() method. Optional
with a default value of None.
* The value to set on the radio action. Optional with a default
value of 0. Should be specified in applications.
The value parameter specifies the radio action that should be set
active. The "changed" signal of the first radio action is connected to
the on_change callback (if specified and not None) and the accel paths
of the actions are set to <Actions>/group-name/action-name.
"""
try:
iter(entries)
except (TypeError):
raise TypeError('entries must be iterable')
first_action = None
def _process_action(group_source, name, stock_id=None, label=None, accelerator=None, tooltip=None, entry_value=0):
action = RadioAction(name, label, tooltip, stock_id, entry_value)
# FIXME: join_group is a patch to Gtk+ 3.0
# otherwise we can't effectively add radio actions to a
# group. Should we depend on 3.0 and error out here
# or should we offer the functionality via a compat
# C module?
if hasattr(action, 'join_group'):
action.join_group(group_source)
if value == entry_value:
action.set_active(True)
self.add_action_with_accel(action, accelerator)
return action
for e in entries:
# using inner function above since entries can leave out optional arguments
action = _process_action(first_action, *e)
if first_action is None:
first_action = action
if first_action is not None and on_change is not None:
if user_data is None:
first_action.connect('changed', on_change)
else:
first_action.connect('changed', on_change, user_data)
ActionGroup = override(ActionGroup)
__all__.append('ActionGroup')
class UIManager(Gtk.UIManager):
| |
<gh_stars>1-10
#
# Copyright (c) 2019 <NAME> <<EMAIL>>
#
# See the file LICENSE for your rights.
#
"""
Custom Keras and PyTorch classes.
"""
from keras import backend as K
from keras.callbacks import Callback, EarlyStopping
from keras.layers.convolutional import ZeroPadding2D, ZeroPadding3D
from keras.layers.local import LocallyConnected2D
from keras.utils import conv_utils
from keras.engine.base_layer import InputSpec
import numpy as np
try:
from s2cnn import S2Convolution, SO3Convolution
except ImportError:
pass
# ==================================================================================================================== #
# Keras classes
# ==================================================================================================================== #
class AdamLearningRateTracker(Callback):
def on_epoch_end(self, epoch, logs=None, beta_1=0.9, beta_2=0.999,):
optimizer = self.model.optimizer
it = K.cast(optimizer.iterations, K.floatx())
lr = K.cast(optimizer.lr, K.floatx())
decay = K.cast(optimizer.decay, K.floatx())
t = K.eval(it + 1.)
new_lr = K.eval(lr * (1. / (1. + decay * it)))
lr_t = K.eval(new_lr * (K.sqrt(1. - K.pow(beta_2, t)) / (1. - K.pow(beta_1, t))))
print(' - LR: {:.6f}'.format(lr_t))
class SGDLearningRateTracker(Callback):
def on_epoch_end(self, epoch, logs=None):
optimizer = self.model.optimizer
it = K.cast(optimizer.iterations, K.floatx())
lr = K.cast(optimizer.lr, K.floatx())
decay = K.cast(optimizer.decay, K.floatx())
new_lr = K.eval(lr * (1. / (1. + decay * it)))
print(' - LR: {:.6f}'.format(new_lr))
class BatchHistory(Callback):
def on_train_begin(self, logs=None):
self.history = []
self.epoch = 0
def on_epoch_begin(self, epoch, logs=None):
self.history.append({})
def on_epoch_end(self, epoch, logs=None):
self.epoch += 1
def on_batch_end(self, batch, logs=None):
logs = logs or {}
for k, v in logs.items():
self.history[self.epoch].setdefault(k, []).append(v)
class RunHistory(Callback):
"""Callback that records events into a `History` object.
Adapted from keras.callbacks.History to include logging to Azure experiment runs.
"""
def __init__(self, run):
self.epoch = []
self.history = {}
self.run = run
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
self.run.log(k, v)
class RNNResetStates(Callback):
def on_epoch_begin(self, epoch, logs=None):
self.model.reset_states()
class EarlyStoppingMin(EarlyStopping):
"""
Extends the keras.callbacks.EarlyStopping class to provide the option to force training for a minimum number of
epochs.
"""
def __init__(self, min_epochs=0, **kwargs):
"""
:param min_epochs: int: train the network for at least this number of epochs before early stopping
:param kwargs: passed to EarlyStopping.__init__()
"""
super(EarlyStoppingMin, self).__init__(**kwargs)
if not isinstance(min_epochs, int) or min_epochs < 0:
raise ValueError('min_epochs must be an integer >= 0')
self.min_epochs = min_epochs
def on_epoch_end(self, epoch, logs=None):
if epoch < self.min_epochs:
return
current = self.get_monitor_value(logs)
if current is None:
return
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
if self.restore_best_weights:
self.best_weights = self.model.get_weights()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
if self.restore_best_weights:
if self.verbose > 0:
print('Restoring model weights from the end of '
'the best epoch')
self.model.set_weights(self.best_weights)
class PeriodicPadding2D(ZeroPadding2D):
"""Periodic-padding layer for 2D input (e.g. image).
This layer can add periodic rows and columns
at the top, bottom, left and right side of an image tensor.
Adapted from keras.layers.ZeroPadding2D by @jweyn
# Arguments
padding: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding
is applied to height and width.
- If tuple of 2 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_height_pad, symmetric_width_pad)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_pad, bottom_pad), (left_pad, right_pad))`
data_format: A string,
one of `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
# Input shape
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
# Output shape
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, padded_rows, padded_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, padded_rows, padded_cols)`
"""
def __init__(self,
padding=(1, 1),
data_format=None,
**kwargs):
super(PeriodicPadding2D, self).__init__(padding=padding,
data_format=data_format,
**kwargs)
def call(self, inputs):
if K.backend() == 'plaidml.keras.backend':
shape = inputs.shape.dims
else:
shape = inputs.shape
if self.data_format == 'channels_first':
top_slice = slice(shape[2] - self.padding[0][0], shape[2])
bottom_slice = slice(0, self.padding[0][1])
left_slice = slice(shape[3] - self.padding[1][0], shape[3])
right_slice = slice(0, self.padding[1][1])
# Pad the horizontal
outputs = K.concatenate([inputs[:, :, :, left_slice], inputs, inputs[:, :, :, right_slice]], axis=3)
# Pad the vertical
outputs = K.concatenate([outputs[:, :, top_slice], outputs, outputs[:, :, bottom_slice]], axis=2)
else:
top_slice = slice(shape[1] - self.padding[0][0], shape[1])
bottom_slice = slice(0, self.padding[0][1])
left_slice = slice(shape[2] - self.padding[1][0], shape[2])
right_slice = slice(0, self.padding[1][1])
# Pad the horizontal
outputs = K.concatenate([inputs[:, :, left_slice], inputs, inputs[:, :, right_slice]], axis=2)
# Pad the vertical
outputs = K.concatenate([outputs[:, top_slice], outputs, outputs[:, bottom_slice]], axis=1)
return outputs
class PeriodicPadding3D(ZeroPadding3D):
"""Zero-padding layer for 3D data (spatial or spatio-temporal).
# Arguments
padding: int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric padding
is applied to height and width.
- If tuple of 3 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.
- If tuple of 3 tuples of 2 ints:
interpreted as
`((left_dim1_pad, right_dim1_pad),
(left_dim2_pad, right_dim2_pad),
(left_dim3_pad, right_dim3_pad))`
data_format: A string,
one of `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
# Input shape
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth,
first_axis_to_pad, second_axis_to_pad, third_axis_to_pad)`
# Output shape
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_padded_axis, second_padded_axis, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth,
first_padded_axis, second_padded_axis, third_axis_to_pad)`
"""
def __init__(self,
padding=(1, 1),
data_format=None,
**kwargs):
super(PeriodicPadding3D, self).__init__(padding=padding,
data_format=data_format,
**kwargs)
def call(self, inputs):
if K.backend() == 'plaidml.keras.backend':
shape = inputs.shape.dims
else:
shape = inputs.shape
if self.data_format == 'channels_first':
low_slice = slice(shape[2] - self.padding[0][0], shape[2])
high_slice = slice(0, self.padding[0][1])
top_slice = slice(shape[3] - self.padding[1][0], shape[3])
bottom_slice = slice(0, self.padding[1][1])
left_slice = slice(shape[4] - self.padding[2][0], shape[4])
right_slice = slice(0, self.padding[2][1])
# Pad the horizontal
outputs = K.concatenate([inputs[:, :, :, :, left_slice], inputs, inputs[:, :, :, :, right_slice]], axis=4)
# Pad the vertical
outputs = K.concatenate([outputs[:, :, top_slice], outputs, outputs[:, :, bottom_slice]], axis=3)
# Pad the depth
outputs = K.concatenate([outputs[:, low_slice], outputs, outputs[:, high_slice]], axis=2)
else:
low_slice = slice(shape[1] - self.padding[0][0], shape[1])
high_slice = slice(0, self.padding[0][1])
top_slice = slice(shape[2] - self.padding[1][0], shape[2])
bottom_slice = slice(0, self.padding[1][1])
left_slice = slice(shape[3] - self.padding[2][0], shape[3])
right_slice = slice(0, self.padding[2][1])
# Pad the horizontal
outputs = K.concatenate([inputs[:, :, :, left_slice], inputs, inputs[:, :, :, right_slice]], axis=3)
# Pad the vertical
outputs = K.concatenate([outputs[:, :, top_slice], outputs, outputs[:, :, bottom_slice]], axis=2)
# Pad the depth
outputs = K.concatenate([outputs[:, low_slice], outputs, outputs[:, high_slice]], axis=1)
return outputs
class RowConnected2D(LocallyConnected2D):
"""Row-connected layer for 2D inputs.
The `RowConnected2D` layer works similarly
to the `Conv2D` layer, except that weights are shared only along rows,
that is, a different set of filters is applied at each
different row of the input.
Adapted from keras.layers.local.LocallyConnected2D by @jweyn
# Examples
```python
# apply a 3x3 unshared weights convolution with 64 output filters
# on a 32x32 image with `data_format="channels_last"`:
model = Sequential()
model.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))
# now model.output_shape == (None, 30, 30, 64)
# notice that this layer will consume (30*30)*(3*3*3*64)
# + (30*30)*64 parameters
# add a 3x3 unshared weights convolution on top, with 32 output filters:
model.add(LocallyConnected2D(32, (3, 3)))
# now model.output_shape == (None, 28, 28, 32)
```
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be | |
n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
_lambda = 100
assert_allclose([[3.320665], [25.265821]],
reg_grad(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_grad_data1_3(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-1], [2]])
_lambda = 750
assert_allclose([[9.480465], [104.783153]],
reg_grad(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_grad_data1_4(self, data1, err):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = -8.4 * ones((n + 1, 1), dtype=float64)
_lambda = 0.762
def J(theta):
return reg_cost_func(X, y, theta, _lambda)
assert_allclose(reg_grad(X, y, theta, _lambda),
numerical_grad(J, theta, err),
rtol=0, atol=0.001)
def test_reg_grad_data1_5(self, data1, err):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = 3.2 * ones((n + 1, 1), dtype=float64)
_lambda = 154
def J(theta):
return reg_cost_func(X, y, theta, _lambda)
assert_allclose(reg_grad(X, y, theta, _lambda),
numerical_grad(J, theta, err),
rtol=0, atol=0.001)
def test_reg_grad_data1_6(self, data1, err):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-12.4], [23.56]])
_lambda = 943
def J(theta):
return reg_cost_func(X, y, theta, _lambda)
assert_allclose(reg_grad(X, y, theta, _lambda),
numerical_grad(J, theta, err),
rtol=0, atol=0.001)
def test_reg_grad_data2_1(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
_lambda = 0
assert_allclose([[-338407.808], [-759579615.064], [-1113679.894]],
reg_grad(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_grad_data2_2(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
_lambda = 1000000
assert_allclose([[-338407.808], [-759558338.468], [-1092403.298]],
reg_grad(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_grad_data2_3(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-25.3], [32], [7.8]])
_lambda = 1000000
assert_allclose([[-276391.444681],
[-615660007.370213],
[-740838.968085]],
reg_grad(X, y, theta, _lambda),
rtol=0, atol=0.001)
# PREDICT
def test_predict_1(self):
X = array([[3.5]])
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-3.6303], [1.1664]])
assert_allclose([[0.4521]],
predict(X, theta),
rtol=0, atol=0.001)
def test_predict_2(self):
X = array([[3.5]])
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = zeros((n + 1, 1), dtype=float64)
assert_allclose([[0]],
predict(X, theta),
rtol=0, atol=0.001)
def test_predict_3(self):
X = array([[-3.5, 2.7]])
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
assert_allclose([[0.2]],
predict(X, theta),
rtol=0, atol=0.001)
def test_predict_4(self):
X = array([[-3.5, 2.7]])
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = -1 * ones((n + 1, 1), dtype=float64)
assert_allclose([[-0.2]],
predict(X, theta),
rtol=0, atol=0.001)
# HYPOTHESYS
def test_h_1(self):
X = array([[3.5]])
m, _ = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-3.6303], [1.1664]])
assert_allclose([[0.4521]],
h(X, theta),
rtol=0, atol=0.001)
def test_h_2(self):
X = array([[3.5]])
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = zeros((n + 1, 1), dtype=float64)
assert_allclose([[0]],
h(X, theta),
rtol=0, atol=0.001)
def test_h_3(self):
X = array([[-3.5, 2.7]])
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
assert_allclose([[0.2]],
h(X, theta),
rtol=0, atol=0.001)
def test_h_4(self):
X = array([[-3.5, 2.7]])
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = -1 * ones((n + 1, 1), dtype=float64)
assert_allclose([[-0.2]],
h(X, theta),
rtol=0, atol=0.001)
# LINEAR REGRESSION CLASS
def test_LinearRegression_constructor1(self, data1):
theta = array([[1.], [0.6], [1.]])
lr = LinearRegression(theta)
assert_allclose(array([[1.], [0.6], [1.]]),
lr.theta,
rtol=0, atol=0.001)
def test_LinearRegression_constructor2(self):
lr = LinearRegression()
assert lr.theta is None
def test_LinearRegression_cost_data1_1(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
_, n = X.shape
theta = ones((n + 1, 1), dtype=float64)
lr = LinearRegression(theta)
assert_allclose([[10.266]],
lr.cost(X, y),
rtol=0, atol=0.001)
def test_LinearRegression_normal_fit(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
lr = LinearRegression()
lr.fit(X, y, strategy="normal_equation")
assert_allclose([[-3.896], [1.193]],
lr.theta,
rtol=0, atol=0.001)
def test_LinearRegression_fit_BGD(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
lr = LinearRegression()
lr.fit(X, y, strategy="BGD", alpha=1, num_iters=1)
assert_allclose([[340412.659], [764209128.191], [1120367.702]],
lr.theta,
rtol=0, atol=0.001)
def test_LinearRegression_fit_SGD(self, err):
X = array([[0, 1, 2], [-1, 5, 3], [2, 0, 1]])
y = array([[0.3], [1.2], [0.5]])
lr = LinearRegression()
lr.fit(X, y, strategy="SGD", alpha=1, num_iters=1)
assert_allclose(array([[2.3], [11.2], [-11.7], [-2.2]]),
lr.theta,
rtol=0, atol=0.001, equal_nan=False)
def test_LinearRegression_fit_MBGD1(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
m = len(X)
lr = LinearRegression()
lr.fit(X, y, strategy="MBGD", alpha=1, num_iters=1, b=m)
assert_allclose([[340412.659], [764209128.191], [1120367.702]],
lr.theta,
rtol=0, atol=0.001)
def test_LinearRegression_fit_MBGD2(self, err):
X = array([[0, 1, 2], [-1, 5, 3], [2, 0, 1]])
y = array([[0.3], [1.2], [0.5]])
lr = LinearRegression()
lr.fit(X, y, strategy="MBGD", alpha=1, num_iters=1, b=1)
assert_allclose(array([[2.3], [11.2], [-11.7], [-2.2]]),
lr.theta,
rtol=0, atol=0.001, equal_nan=False)
def test_LinearRegression_predict(self):
X = array([[3.5]])
theta = array([[-3.6303], [1.1664]])
lr = LinearRegression(theta)
assert_allclose([[0.4521]],
lr.predict(X),
rtol=0, atol=0.001)
# RIDGE LINEAR REGRESSION CLASS
def test_RidgeLinearRegression_constructor1(self, data1):
theta = ones((3, 1), dtype=float64)
lr = RidgeLinearRegression(theta, _lambda=13.50)
assert lr._lambda == 13.50
assert_allclose(array([[1.], [1.], [1.]]),
lr.theta,
rtol=0, atol=0.001)
def test_RidgeLinearRegression_constructor2(self):
lr = RidgeLinearRegression()
assert lr.theta is None
assert lr._lambda == 0
def test_RidgeLinearRegression_cost_data1_1(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
_, n = X.shape
theta = ones((n + 1, 1), dtype=float64)
lr = RidgeLinearRegression(theta, _lambda=0)
assert_allclose([[10.266]],
lr.cost(X, y),
rtol=0, atol=0.001)
def test_RidgeLinearRegression_cost_data1_2(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
_, n = X.shape
theta = ones((n + 1, 1), dtype=float64)
lr = RidgeLinearRegression(theta, _lambda=100)
assert_allclose([[10.781984]],
lr.cost(X, y),
rtol=0, atol=0.001)
def test_RidgeLinearRegression_cost_data2_1(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
_, n = X.shape
theta = ones((n + 1, 1), dtype=float64)
lr = RidgeLinearRegression(theta, _lambda=0)
assert_allclose([[64828197300.798]],
lr.cost(X, y),
rtol=0, atol=0.001)
def test_RidgeLinearRegression_cost_data2_2(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
_, n = X.shape
theta = ones((n + 1, 1), dtype=float64)
lr = RidgeLinearRegression(theta, _lambda=1000000)
assert_allclose([[64828218577.393623]],
lr.cost(X, y),
rtol=0, atol=0.001)
def test_RidgeLinearRegression_normal_fit_data1_1(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
lr = RidgeLinearRegression(_lambda=0)
lr.fit(X, y, strategy="normal_equation")
assert_allclose([[-3.896], [1.193]],
lr.theta,
rtol=0, atol=0.001)
def test_RidgeLinearRegression_normal_fit_data1_2(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
lr = RidgeLinearRegression(_lambda=1)
lr.fit(X, y, strategy="normal_equation")
assert_allclose([[-3.889], [1.192]],
lr.theta,
rtol=0, atol=0.001)
def test_RidgeLinearRegression_fit_BGD1(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
lr = RidgeLinearRegression(_lambda=0)
lr.fit(X, y, strategy="BGD", alpha=1, num_iters=1)
assert_allclose([[340412.659], [764209128.191], [1120367.702]],
lr.theta,
rtol=0, atol=0.001)
def test_RidgeLinearRegression_fit_BGD2(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
_, n = X.shape
theta = ones((n + 1, 1), dtype=float64)
lr = RidgeLinearRegression(theta, _lambda=100)
lr.fit(X, y, strategy="BGD", alpha=1, num_iters=1)
assert_allclose([[-2.321], [-24.266]],
lr.theta,
rtol=0, atol=0.001)
def test_RidgeLinearRegression_fit_SGD1(self, err):
X = array([[0, 1, 2], [-1, 5, 3], [2, 0, 1]])
y = array([[0.3], [1.2], [0.5]])
lr = RidgeLinearRegression(_lambda=0)
lr.fit(X, y, strategy="SGD", alpha=1, num_iters=1)
assert_allclose(array([[2.3], [11.2], [-11.7], [-2.2]]),
lr.theta,
rtol=0, atol=0.001, equal_nan=False)
def test_RidgeLinearRegression_fit_SGD2(self, err):
X = array([[0, 1, 2], [-1, 5, 3], [2, 0, 1]])
y = array([[0.3], [1.2], [0.5]])
lr = RidgeLinearRegression(_lambda=10)
lr.fit(X, y, strategy="SGD", alpha=1, num_iters=1)
assert_allclose(array([[8.3], [-0.8], [132.3], [123.8]]),
lr.theta,
rtol=0, atol=0.001, equal_nan=False)
def test_RidgeLinearRegression_fit_MBGD1(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
m = len(X)
lr = RidgeLinearRegression(_lambda=0)
lr.fit(X, y, strategy="MBGD", alpha=1, num_iters=1, b=m)
assert_allclose([[340412.659], [764209128.191], [1120367.702]],
lr.theta,
rtol=0, atol=0.001)
def test_RidgeLinearRegression_fit_MBGD2(self, err):
X = array([[0, 1, 2], [-1, 5, 3], [2, 0, 1]])
y = array([[0.3], [1.2], [0.5]])
lr = RidgeLinearRegression(_lambda=0)
lr.fit(X, y, strategy="MBGD", alpha=1, num_iters=1, b=1)
assert_allclose(array([[2.3], [11.2], [-11.7], [-2.2]]),
lr.theta,
rtol=0, atol=0.001, equal_nan=False)
def test_RidgeLinearRegression_fit_MBGD3(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
theta = ones((n + 1, 1), dtype=float64)
lr = RidgeLinearRegression(theta, _lambda=100)
lr.fit(X, y, strategy="MBGD", alpha=1, num_iters=1, b=m)
assert_allclose([[-2.321], [-24.266]],
lr.theta,
rtol=0, atol=0.001)
def test_RidgeLinearRegression_fit_MBGD4(self, data1):
X = array([[0, 1, 2], | |
<filename>RPG.py
#This game is part of The Python-Game-Book
# TODO: equipped weapon for monster does not affect combat stat ?
# TODO: seperate IO from other code
# TODO: detect empty list_items for use, drop, equip etc
# TODO: give monster armor and weapons
# TODO: encumberance or cancel fight after 100 rounds
# TODO: cancel for every menu
# TODO: create random armors and weapons
# TODO: monsters corpses spawn loot
# TODO: history of slain enemys ( room 0?)
# TODO: game won condition
# TODO: monsters can pick up/drop weapons
# TODO: armor and weapons can shatter
import random
import sys
#import logging
if sys.version_info[0] < 3:
print("this script need python3. You are using python 2 or lower.")
sys.exit()
#logging.basicConfig(filename='horst.log',level=logging.DEBUG)
class Game(object):
"""
holds all information for a game. Later it may be
possible to load / save different games
"""
number = 0
def __init__(self):
self.number = Game.number
Game.number += 1
self.rooms = {} # dictionary of rooms, key is room number
self.items = {} # dictionary of items, key is item number
self.monsters = {} # dictionary of monsters, key is monster number
self.players = [] # dictionary of players, key is player number, value = monsternumber
self.effects = {} # dictionary of effects (for items), key is effect name
class Monster(object):
number = 1 # number 1 should be reserved for player
def __init__(
self,
game,
where=0,
adjective="",
description="",
boss=False,
carrier=False
):
self.number = Monster.number
Monster.number += 1
game.monsters[self.number] = self # add monster into game dict
self.adjective = adjective
self.location = where # room number
self.description = description
self.hitpoints = random.randint(5, 15)
self.player = False
self.carrier = carrier # can carry items ?
self.attack = 10 + random.randint(-2, 2)
self.defense = 10 + random.randint(-2, 2)
self.speed = 10 + random.randint(-2, 2)
self.damage = 3 + random.randint(-2, 2)
self.armor = 3 + random.randint(-2, 2)
self.agressive = False
self.slots = {
"head": 1,
"body": 1,
"hand": 2,
"finger": 2,
"neck": 1,
"feet": 2
} # only one magic ring per ring-finger
if description == "":
if boss:
self.adjective = random.choice(
(
"deadly", "fantastic", "creepy", "ugly", "epic"
)
)
self.description = random.choice(
(
"dragon", "cave drake", "sea serpent", "gorgon",
"giant beetle", "arch druid"
)
)
self.hitpoints *= 5
self.attack += random.randint(2, 7)
self.defense += random.randint(2, 7)
self.speed += random.randint(2, 7)
else:
self.adjective = random.choice(
(
"weak", "boring", "tired", "cheap", "old"
)
)
self.description = random.choice(
(
"goblin", "ork", "troll", "mice", "rat", "dwarf",
"spider"
)
)
def info(self):
txt = "Monster number {}: {} {} with {} hitpoints\n".format(
self.number, self.adjective, self.description, self.hitpoints
)
if self.carrier:
txt += "This monster can carry items\n"
return txt
def inspect(self, game):
return "{}\n{:2}\n{:2}\n{:2}\n{:2}\n{:2}".format(
self.description, self.hitpoints, self.attack, self.defense,
self.speed, self.damage, self.armor
)
def list_items(
self,
game,
active_only=False,
wearable_only=False,
passive_only=False,
magic_only=False
):
return [] #TODO: give monsters armor and weapons
def leftcol(self):
#return "name \nhitpoints \nattack \ndefense \nspeed \ndamage \narmor \n"
return "\n\n\n\n\n\n\n"
def calculate_values(self, game):
"""calculate all bonus and malus from equipped weapons and armors toward combat stats.
returns dict with combat values"""
values = {
"attack": self.attack,
"defense": self.defense,
"speed": self.speed,
"damage": self.damage,
"armor": self.armor
}
items = self.list_items(game, True, True, False, False)
for i in items:
values["attack"] += game.items[i].attackbonus
values["defense"] += game.items[i].defensebonus
values["speed"] += game.items[i].speedbonus
values["damage"] += game.items[i].damagebonus
values["armor"] += game.items[i].armorbonus
return values
class Player(Monster):
#playernumber = 1
def __init__(self, game, where=0, name="hero"):
"""need game instance.
"""
Monster.__init__(self, game, where, carrier=True)
game.players.append(self.number) # add my monsternumber to game.players
self.playerindex = len(game.players) - 1
self.name = name
self.player = True
self.description = "player" # overwrite monster
self.adjective = "human" # overwrite monster
#self.inventory = [] # list of itemnumbers (player carry items)
self.maxcarry = 35 # kg
self.carry = 0 # current mass of all carried items in kg
self.damage += random.randint(1, 5)
self.armor += random.randint(1, 5)
self.speed += random.randint(1, 5)
self.weapon = None
#self.armor = None
#self.loaction = where # start room number
def show_inventory(self, game, itemnumberlist):
txt = ""
txt += "\n==== Your inventory ====\n"
for itemnumber in itemnumberlist:
i = game.items[itemnumber]
if not game.items[itemnumber].active:
e = "rucksack"
else:
e = "(equipped)"
txt += "{}...{}...{} kg {}\n".format(
itemnumber, game.items[itemnumber].description,
game.items[itemnumber].mass, e
)
txt += "You're currently carrying {:.2f} kg, that is {:.2f}% of your capacity".format(
self.carry, (self.carry / self.maxcarry) * 100
)
return txt
def list_items(
self, game, active_only, wearable_only, passive_only, magic_only
):
"""returns list of itemnumber
of the items in the inventory of the player"""
txt = ""
items = []
for itemnumber in game.items:
i = game.items[itemnumber]
if i.location == -self.number:
if active_only and not i.active:
continue
if passive_only and i.active:
continue
if magic_only and not i.is_magic:
continue
if wearable_only and not i.is_weapon and not i.is_armor:
continue
items.append(itemnumber)
#txt += game.items[itemnumber].description
return items
def pickup_item(self, game):
txt, items = game.rooms[self.location].list_items(game)
if len(items) > 0:
output("please select item number to pick up\n")
output(txt)
i = select_number(items)
m = game.items[i].mass
if m > self.maxcarry:
return "You fail to pick up this item. Reason: You can only carry {} kg. \n and try to pick up {} kg. Become stronger and try again!".format(
self.maxcarry, m
)
elif m + self.carry > self.maxcarry:
return "You fail to pick up this item. Reason: You already carry {} kg. Picking up {} would exceed your max. carry capacity of {} kg. Drop some items first or become stronger!".format(
self.carry, m, self.maxcarry
)
else:
game.items[i].location = -self.number # negative monster number
self.carry += m
return "You now carry items for a total weight of {} kg".format(
self.carry
)
else:
return "this room has no items so there is nothing to pick up\n"
def drop_item(self, game):
items = self.list_items(game, False, False, False, False) # do not drop equipped items
if len(items) > 0:
output(self.show_inventory(game, self.list_items(game, False, False, False, False)))
output("select itemnumber to drop\n")
i = select_number(items)
if game.items[i].never_drop:
return "you can not drop this item, sorry! Try another item"
game.items[i].location = self.location # drop item in my room
self.carry -= game.items[i].mass # update player
return "you drop the {} to the floor\n".format(game.items[i].description)
else:
return "you carry no items so you can drop nothing\n"
def use_item(self, game):
"""launch effect of magic item (must be in inventory)"""
items = self.list_items(game, False, False, False, True)
if len(items) > 0:
output(self.show_inventory(game, self.list_items(game, False, False, False, True)))
output("select itemnumber to use/equip\n")
i = select_number(items)
if game.items[i].effect == None:
return "this item has no effect/is not equippable"
txt = ""
game.items[i].charges -= 1
if game.items[i].charges == 0:
# destroy item (move to room 0)
game.items[i].location = 0
txt += "while using the effect, the item has destroyed itself\n"
txt += game.effects[game.items[i].effect].action(game, victim=self.number)
return txt
else:
return "you carry no magic items so you can use nothing"
def inspect(self, game):
"""all you ever wanted to know about yourself, but was too afraid to ask"""
txt = ""
weapontext = ""
armortext = ""
items = self.list_items(game, True, False, False, False)
for i in items:
if game.items[i].is_weapon:
weapontext += game.items[i].description + " and "
elif game.items[i].is_armor:
armortext += game.items[i].description + ", "
if weapontext == "":
weapontext = "no weapon"
else:
weapontext = weapontext[:-5] # remove last 5 chars
if armortext == "":
armortext = "no armor"
else:
armortext = armortext[:-2] # remove last 2 chars
txt += "you are a {} with {} hitpoints wielding ".format(self.description, self.hitpoints)
txt += "{} and wearing: {}\n".format(weapontext, armortext)
attr = self.calculate_values(game).keys()
#left = self.leftcol().splitlines()
#right = self.inspect().splitlines()
v = self.calculate_values(game)
total = v.values()
base = []
for k in attr:
base.append(self.__getattribute__(k))
both = zip(attr, base, total)
txt += " attribute base eqip tmp.effect total\n"
for pair in both:
txt += "{:>10}: {:>2} {:>2} {:>2}\n".format(pair[0], pair[1], pair[2] - pair[1], pair[2])
return txt
def equip(self, game):
"""ask user of itemnumber to wear/wield/remove"""
items = self.list_items(game, False, True, False, False)
txt = "Please select number of item to wield/wear/equip.\n If item is already equipped, it will be put back in the inventory\n"
for itemnumber in items:
i = game.items[itemnumber]
if | |
(bool) [create,query,edit]
When true, sets the Shaded Displayattribute of the fluid to AsRender: all fluid properties displayed as hardware
rendered. When false, displays only the currently selected paintable attribute of the fluid.
- displayVelocity : dv (bool) [create,query,edit]
Turns on/off velocity display, independently of the above dar/displayAsRendersetting. Use this flag to enable velocity
display while only displaying density, for example.
- doAutoSave : das (bool) [edit]
Execute the -autoSave command if there are unsaved painted fluid properties.
- dragSlider : dsl (unicode) [create,edit]
Sets the current brush drag state for resizing or offsetting the brush (like the 'b' and 'm' default hotkeys). The
string argument is one of: radius, lowradius, opacity, value, depth, displacement, uvvectoror none. C: Default is none.
- duringStrokeCmd : dsk (unicode) []
- dynclonemode : dcm (bool) []
- exists : ex (bool) [create]
Returns true or false depending upon whether the specified object exists. Other flags are ignored.
- expandfilename : eef (bool) [create,edit]
If true, it will expand the name of the export file and concatenate it with the surface name. Otherwise it will take the
name as it is. C: Default is true.
- exportaspectratio : ear (float) []
- exportfilemode : efm (unicode) [create,query,edit]
Specifies the export channel.The valid entries here are: alpha, luminance, rgb, rgba. C: Default is luminance/rgb. Q:
When queried, it returns a string.
- exportfilesave : esf (unicode) [edit]
Exports the attribute map and saves to a specified file.
- exportfilesizex : fsx (int) [create,query,edit]
Specifies the width of the attribute map to export. C: Default width is 256. Q: When queried, it returns an integer.
- exportfilesizey : fsy (int) [create,query,edit]
Specifies the width of the attribute map to export. C: Default width is 256. Q: When queried, it returns an integer.
- exportfiletype : eft (unicode) [create,query,edit]
Specifies the image file format. It can be one of the following: iff, tiff, jpeg, alias, rgb, fitpostScriptEPS,
softimage, wavefrontRLA, wavefrontEXP. C: default is tiff. Q: When queried, it returns a string.
- filterNodes : fon (bool) []
- history : ch (bool) [create]
If this is a tool command, turn the construction history on for the tool in question.
- image1 : i1 (unicode) [create,query,edit]
First of three possible icons representing the tool associated with the context.
- image2 : i2 (unicode) [create,query,edit]
Second of three possible icons representing the tool associated with the context.
- image3 : i3 (unicode) [create,query,edit]
Third of three possible icons representing the tool associated with the context.
- importfileload : ifl (unicode) [edit]
Load the attribute map a specified file.
- importfilemode : ifm (unicode) [create,query,edit]
Specifies the channel to import. The valid entries here are: alpha, luminance, red, green, blue, and rgbC: Default is
alpha. Q: When queried, it returns a string.
- importreassign : irm (bool) [create,query,edit]
Specifies if the multiply atrribute maps are to be reassigned while importing. Only maps previously exported from within
Artisan can be reassigned. C: Default is FALSE. Q: When queried, it returns a boolean.
- interactiveUpdate : iu (bool) []
- lastRecorderCmd : lrc (unicode) []
- lastStampName : lsn (unicode) []
- lowerradius : lr (float) [create,query,edit]
Sets the lower size of the brush (only apply on tablet).
- makeStroke : mst (int) []
- mappressure : mp (unicode) [create,query,edit]
Sets the tablet pressure mapping when the table is used. There are four options: none- the pressure has no effect,
opacity- the pressure is mapped to the opacity, radius- the is mapped to modify the radius of the brush, both- the
pressure modifies both the opacity and the radius. C: Default is none. Q: When queried, it returns a string.
- maxvalue : mxv (float) []
- minvalue : miv (float) []
- name : n (unicode) [create]
If this is a tool command, name the tool appropriately.
- objattrArray : oaa (unicode) []
- opacity : op (float) [create,query,edit]
Sets the brush opacity. C: Default is 1.0. Q: When queried, it returns a float.
- outline : o (bool) [create,query,edit]
Specifies if the brush should be drawn. C: Default is TRUE. Q: When queried, it returns a boolean.
- outwhilepaint : owp (bool) [create,query,edit]
Specifies if the brush outline should be drawn while painting. C: Default is FALSE. Q: When queried, it returns a
boolean.
- paintNodeArray : pna (unicode) []
- paintattrselected : pas (unicode) []
- paintmode : pm (unicode) [create,query,edit]
Specifies the paint mode. There are two possibilities: screenand tangent. C: Default is screen. Q: When queried, it
returns a string.
- paintoperationtype : pot (unicode) []
- pickColor : pcm (bool) []
- pickValue : pv (bool) []
- playbackCursor : plc (float, float) []
- playbackPressure : plp (float) []
- preserveclonesource : pcs (bool) []
- profileShapeFile : psf (unicode) [query,edit]
Passes a name of the image file for the stamp shape profile.
- projective : prm (bool) [create,query,edit]
Specifies the projective paint mode. C: Default is 'false'. Q: When queried, it returns a boolean.
- property : p (unicode) [create,query,edit]
Specifies a property to paint on the fluid. Valid values are color, density,
densityAndColor,densityAndFuel,temperature,fuel, velocity.
- radius : r (float) [create,query,edit]
Sets the size of the brush. C: Default is 1.0 cm. Q: When queried, it returns a float.
- rampMaxColor : rxc (float, float, float) []
- rampMinColor : rmc (float, float, float) []
- record : rec (bool) []
- reflection : rn (bool) [create,query,edit]
Specifies the reflection mode. C: Default is 'false'. Q: When queried, it returns a boolean.
- reflectionaboutorigin : rno (bool) []
- reflectionaxis : ra (unicode) [create,query,edit]
Specifies the reflection axis. There are three possibilities: x, yand z. C: Default is x. Q: When queried, it returns a
string.
- rgbValue : rgb (float, float, float) [create,query,edit]
Specifies the values of the red, green, and blue components of the color to use when painting the property color.
- screenRadius : scR (float) []
- selectclonesource : scs (bool) []
- selectedattroper : sao (unicode) []
- showactive : sa (bool) [create,query,edit]
Sets on/off the display of the surface isoparms. C: Default is TRUE. Q: When queried, it returns a boolean.
- stampDepth : stD (float) []
- stampProfile : stP (unicode) [create,query,edit]
Sets the brush profile of the current stamp. Currently, the following profiles are supported: gaussian, soft, solidand
square. C: Default is gaussian. Q: When queried, it returns a string.
- stampSpacing : stS (float) []
- strokesmooth : ssm (unicode) []
- surfaceConformedBrushVertices : scv (bool) [create,query,edit]
Enables/disables the the display of the effective brush area as affected vertices.
- tablet : tab (bool) [query]
Returns true if the tablet device is present, false if it is absent
- tangentOutline : to (bool) [create,query,edit]
Enables/disables the display of the brush circle tangent to the surface.
- toolOffProc : tfp (unicode) []
- toolOnProc : top (unicode) []
- useColorRamp : ucr (bool) []
- useMaxMinColor : umc (bool) []
- useStrokeDirection : usd (bool) [create,query,edit]
Applicable only during velocitypainting. Specifies whether the value of the painted velocity should come from the
direction of the brush stroke, overriding the value specified by the -v/-velocity flag.
- usepressure : up (bool) [create,query,edit]
Sets the tablet pressure on/off. C: Default is false. Q: When queried, it returns a | |
"""
one_mode is an example file which seeks to verify the new Hamiltonian algorithm and update sequence
as derived by Dan and Stephen. It will ignore much of the class structure applied to OLIVE in an
attempt to provide a simple example of the algorithm. Once verified, it will be implemented within
OLIVE using the appropriate class structure, memory and data management, I/O, etc. This version uses
a time-dependent Hamiltonian map-based algorithm, as the previous algorithm was unfit to consider vector potentials
with constant geometric dependence along the z-axis.
<NAME>
Last Updated: 11/01/2016
Initial: 09/08/2016
Sequencing:
-----------
Map approach: A single step is the concatenation of the following operators:
M_R(1/2) M_x(1/2) M_y(1/2) M_z(1/2) M_y(1/2) M_x(1/2) M_R(1/2)
where M_R expresses the action of the field Hamiltonian, which is in effect a harmonic oscillator. This
can be represented by a simple rotation, as before.
The different M_x type maps represent a sequence of kicks (momenta updates) and drifts (coordinate updates) wherein
only the coordinate specified by the subscript (x,y,z) is updated. An example is as follows:
M_x = Kick_ps(all of them 4-updates), Drift_x, Kick_ps(all of them again).
Note that the 1st kickP and the second kickP differ in FORM by a minus sign due to the similarity transform done to
produce the kicks from the Hamiltonian. Also note that even though the form of the computations are the same, they
must be re-evaluated using the new x-coordinate that was updated by the drift operation.
Initialization requirements:
- We are given mechanical momentum, and all relevant field quantities.
- Field quantities include eigenmodes of interest (for pillbox this means specifying mode #s l = (m,n,p) and initial
strengths Q_l.
- We must compute frequencies, etc. as desired, and produce an array of mode information corresponding to a
mode index (l) as we see fit. (e.g. l = 0 -> TM mode with m,n,p = 1,1,0 and has corresponding initial Q_l and P_l).
Beginning the algorithm requires transforming the mechanical momentum p to canonical momentum P = p + (e/c)A.
Usage:
------
python one_mode.py
"""
import numpy as np
import matplotlib as mpl
# Set the default backend for use with MacOSX
mpl.use('TkAgg')
import matplotlib.pyplot as plt
from scipy.constants import m_e as me_mks
from scipy.constants import e as e_mks
from scipy.constants import c as c_mks
from eigenmodes import compute_wavenumbers
from eigenmodes import calc_A_x, calc_A_y, calc_A_z
from eigenmodes import dx_int_A_z, dy_int_A_z, dz_int_A_z
from eigenmodes import calc_int_A_z
from eigenmodes import omega_l
#from eigenmodes import deriv_int_Ax, deriv_int_Ay
#from eigenmodes import OMEGA
# Set the default mass and charge for an electron
m = me_mks*1.e3 #cgs
q = 4.80320451e-10 #esu 1.*e
c = c_mks*1.e2 #cgs
q_over_c = q/c
class BeamLoader(object):
"""Simple class that simulates particles coupling to cavity fields"""
NUM_STEPS = 101 # Fix these for now
# NUM_PARTICLES = 2 #Fix these for now
q_over_c = q / c # fix this as well
def __init__(self, q0, p0, Q0, P0, ws, omegas, modes, maxTau, G, tau0=0):
'''
q0 (ndArray): initial particle positions - dimension (num_particles,3)
p0 (ndArray): initial particle momenta - dimension (num_particles,3)
Q0 (ndArray): initial field amplitudes - dimension (num_modes)
P0 (ndArray): initial amplitude of mode envelope oscillations - dimension (num_modes)
ws (ndArray): particle weightings as a multiple of q/c (num_particles)
omegas (ndArray): mode frequencies - dimension (num_modes)
modes (ndArray): wavenumbers for the modes - dimension (num_modes,3)
maxTau (float): final tau value for simulation (corresponding to a distance c*t_end)
G (float): Geometric factor for computing field energies ((a*b*d)/4 for rectangular cavity)
DEPRECATED-M (float): normalization factor for cavity eigenmodes (1./(16.*np.pi*c)*(a*b*d) for rectangular cavity)
'''
# Particles step through tau=ct from tau = 0 to tau = T in N total steps of length h = T/N
# The first and last (partial) steps have coordinate updates of length 0.5h to set up the initial leap
# Particles are assumed to have fixed beta, so z velocity is fixed
# Position at step k is beta*(k-1/2)*h (because position after step 1 is 1/2, after step 2 is 1.5, etc.)
# self.k = 0 #initial step number
self.num_steps = BeamLoader.NUM_STEPS
self.num_particles = len(q0)
self.num_modes = np.size(np.asarray(Q0))
# define step size
self.h = maxTau / self.num_steps
self.tau = tau0
# h = self.__h
# Position quantities
self.x = q0[:, 0]
self.y = q0[:, 1]
self.z = q0[:, 2]
# Charge and mass quantities - now weighted weighted
self.mass = ws * m
self.qs = ws * q
# print ws
# print self.mass
# print self.qs
# Momentum quantities - need to be weighted
self.px = ws * p0[:, 0]
self.py = ws * p0[:, 1]
self.pz = ws * p0[:, 2]
# Field quantities
self.modes = modes
self.omegas = omegas # these are preloaded as omega/c !
self.Q = np.asarray(Q0)
self.P = np.asarray(P0)
self.G = G
self.M = G / (4 * np.pi * c)
# equations for Ks and Ms for computing field energy
self.Ml = self.M * np.ones(self.num_modes)
self.Kl = self.M * (self.modes[:, 0] ** 2 + self.modes[:, 1] ** 2)
# self.OMEGA = np.asarray(W0)
# equations for Ks and Ms
# M_leqn = lambda m,n,p: a*b*c/4.
# K_leqn = lambda m,n,p: (a*b*c/4.)*((m*np.pi/a)**2 + (n*np.pi/b)**2)
# print np.sqrt(self.pz**2*c**2 + (self.mass*c**2)**2)/(self.mass*c*c)
# convert mechanical momentum (P0) to canonical momentum
self.gmc_history = []
self.convert_mechanical_to_canonical()
self.calc_gamma_m_c()
# Construct history arrays to store updates
self.x_history = [self.x]
self.y_history = [self.y]
self.z_history = [self.z]
self.px_history = [self.px]
self.py_history = [self.py]
self.pz_history = [self.pz]
self.Q_history = [self.Q]
self.P_history = [self.P]
self.tau_history = [tau0]
# print self.gmc_history
# print self.pz
# print "Particles loaded with initial gammas {}".format( self.gmc /(m*c))
# Compute gamma and beta-gamma - note that beta = betagamma/gamma will be held constant
# p_array = np.einsum('ij,ij->i', p0, p0)
# self.gamma = np.sqrt((p_array)**2 + (m*c**2)**2) #an extra factor of c already in p
# self.beta_gamma = p_array/(m*c*c)
def convert_mechanical_to_canonical(self):
'''Convert mechanical momenta to canonical momenta for the current particle state'''
A_x = calc_A_x(self.modes[:, 0], self.modes[:, 1], self.modes[:, 2], self.x, self.y, self.z)
A_y = calc_A_y(self.modes[:, 0], self.modes[:, 1], self.modes[:, 2], self.x, self.y, self.z)
A_z = calc_A_z(self.modes[:, 0], self.modes[:, 1], self.modes[:, 2], self.x, self.y, self.z)
self.px = self.px + (self.qs / c) * np.dot(self.Q, A_x)
self.py = self.py + (self.qs / c) * np.dot(self.Q, A_y)
self.pz = self.pz + (self.qs / c) * np.dot(self.Q, A_z)
def convert_canonical_to_mechanical(self):
'''Convert mechanical momenta to canonical momenta for the current particle state'''
A_x = calc_A_x(self.modes[:, 0], self.modes[:, 1], self.modes[:, 2], self.x, self.y, self.z)
A_y = calc_A_y(self.modes[:, 0], self.modes[:, 1], self.modes[:, 2], self.x, self.y, self.z)
A_z = calc_A_z(self.modes[:, 0], self.modes[:, 1], self.modes[:, 2], self.x, self.y, self.z)
self.px = self.px - (self.qs / c) * np.dot(self.Q, A_x)
self.py = self.py - (self.qs / c) * np.dot(self.Q, A_y)
self.pz = self.pz - (self.qs / c) * np.dot(self.Q, A_z)
def calc_gamma_m_c(self):
'''Compute the quantity gamma*m*c for every particle and update the corresponding member variable'''
A_x = calc_A_x(self.modes[:, 0], self.modes[:, 1], self.modes[:, 2], self.x, self.y, self.z)
A_y = calc_A_y(self.modes[:, 0], self.modes[:, 1], self.modes[:, 2], self.x, self.y, self.z)
A_z = calc_A_z(self.modes[:, 0], self.modes[:, 1], self.modes[:, 2], self.x, self.y, self.z)
self.gmc = np.sqrt((self.px - (self.qs / c) * np.dot(self.Q, A_x)) ** 2 + (
self.py - (self.qs / c) * np.dot(self.Q, A_y)) ** 2 + (self.pz - (self.qs / c) * np.dot(self.Q, A_z)) ** 2 + (
self.mass * c) ** 2)
self.gmc_history.append(self.gmc / (self.mass * c))
def update_q(self, k=0, step=1.):
'''Update for all particles a single component qk -> qk+1(or 1/2) given the momentum pk
Arguments:
k (int): Index of coordinate being advance (k=0 for x, k=1 for y, k=2 for z)
step (Float): Fraction of a step to advance the coordinates (usually 0.5 or 1)
'''
if k == 0:
self.x = self.x + step * self.h * self.px / self.gmc
elif k == 1:
self.y = self.y + step * self.h * self.py / self.gmc
elif k == 2:
self.z = self.z + step * self.h * self.pz / self.gmc
else:
raise ValueError("Coordinate index outside of range [0,1,2]")
def kick_p(self, k=0, sign=1, step=1.):
'''Kick p is the kick portion of the coupling Hamiltonian map, which updates each component of p as well
as the field | |
<filename>toughradius/manage/models.py<gh_stars>1-10
#!/usr/bin/env python
#coding:utf-8
import sqlalchemy
import warnings
warnings.simplefilter('ignore', sqlalchemy.exc.SAWarning)
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relation
from sqlalchemy.orm import scoped_session, sessionmaker
from hashlib import md5
from toughlib import utils
import functools
DeclarativeBase = declarative_base()
def get_metadata(db_engine):
global DeclarativeBase
metadata = DeclarativeBase.metadata
metadata.bind = db_engine
return metadata
class SystemSession(DeclarativeBase):
"""session表"""
__tablename__ = 'system_session'
__table_args__ = {
'mysql_engine' : 'MEMORY'
}
key = Column(u'_key', Unicode(length=512), primary_key=True, nullable=False,doc=u"session key")
value = Column(u'_value', Unicode(length=2048), nullable=False,doc=u"session value")
time = Column(u'_time', INTEGER(), nullable=False,doc=u"session timeout")
class SystemCache(DeclarativeBase):
"""cache表"""
__tablename__ = 'system_cache'
__table_args__ = {
'mysql_engine' : 'MEMORY'
}
key = Column(u'_key', Unicode(length=512), primary_key=True, nullable=False,doc=u"cache key")
value = Column(u'_value', Unicode(length=8192), nullable=False,doc=u"cache value")
time = Column(u'_time', INTEGER(), nullable=False,doc=u"cache timeout")
class TrNode(DeclarativeBase):
"""区域表"""
__tablename__ = 'tr_node'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"区域编号")
node_name = Column(u'node_name', Unicode(length=32), nullable=False,doc=u"区域名")
node_desc = Column(u'node_desc', Unicode(length=64), nullable=False,doc=u"区域描述")
class TrOperator(DeclarativeBase):
"""操作员表 操作员类型 0 系统管理员 1 普通操作员"""
__tablename__ = 'tr_operator'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"操作员id")
operator_type = Column('operator_type', INTEGER(), nullable=False,doc=u"操作员类型")
operator_name = Column(u'operator_name', Unicode(32), nullable=False,doc=u"操作员名称")
operator_pass = Column(u'operator_pass', Unicode(length=128), nullable=False,doc=u"操作员密码")
operator_status = Column(u'operator_status', INTEGER(), nullable=False,doc=u"操作员状态,0/1")
operator_desc = Column(u'operator_desc', Unicode(255), nullable=False,doc=u"操作员描述")
class TrOperatorNodes(DeclarativeBase):
"""操作员表关联区域"""
__tablename__ = 'tr_operator_nodes'
__table_args__ = {}
operator_name = Column(u'operator_name', Unicode(32),primary_key=True,nullable=False,doc=u"操作员名称")
node_name = Column(u'node_name', Unicode(32), primary_key=True,nullable=False,doc=u"区域名称")
class TrOperatorProducts(DeclarativeBase):
"""操作员表关联产品"""
__tablename__ = 'tr_operator_products'
__table_args__ = {}
# column definitions
operator_name = Column(u'operator_name', Unicode(32), primary_key=True, nullable=False, doc=u"操作员名称")
product_id = Column(u'product_id', Unicode(32), primary_key=True, nullable=False, doc=u"资费ID")
class TrOperatorRule(DeclarativeBase):
"""操作员权限表"""
__tablename__ = 'tr_operator_rule'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"权限id")
operator_name = Column(u'operator_name', Unicode(32), nullable=False,doc=u"操作员名称")
rule_path = Column(u'rule_path', Unicode(128), nullable=False,doc=u"权限URL")
rule_name = Column(u'rule_name', Unicode(128), nullable=False,doc=u"权限名称")
rule_category = Column(u'rule_category', Unicode(128), nullable=False,doc=u"权限分类")
class TrParam(DeclarativeBase):
"""系统参数表 """
__tablename__ = 'tr_param'
__table_args__ = {}
param_name = Column(u'param_name', Unicode(length=64), primary_key=True, nullable=False,doc=u"参数名")
param_value = Column(u'param_value', Unicode(length=1024), nullable=False,doc=u"参数值")
param_desc = Column(u'param_desc', Unicode(length=255),doc=u"参数描述")
class TrBas(DeclarativeBase):
"""BAS设备表"""
__tablename__ = 'tr_bas'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"设备id")
dns_name = Column(u'dns_name', Unicode(length=128), nullable=True, doc=u"DNS名称")
vendor_id = Column(u'vendor_id', Unicode(length=32), nullable=False,doc=u"厂商标识")
ip_addr = Column(u'ip_addr', Unicode(length=15), nullable=True,doc=u"IP地址")
bas_name = Column(u'bas_name', Unicode(length=64), nullable=False,doc=u"bas名称")
bas_secret = Column(u'bas_secret', Unicode(length=64), nullable=False,doc=u"共享密钥")
coa_port = Column(u'coa_port', INTEGER(), nullable=False,doc=u"CoA端口")
time_type = Column(u'time_type', SMALLINT(), nullable=False,doc=u"时区类型")
class TrBasNode(DeclarativeBase):
"""BAS设备关联区域"""
__tablename__ = 'tr_bas_node'
__table_args__ = {}
bas_id = Column(u'bas_id', INTEGER(), primary_key=True, nullable=False,doc=u"设备id")
node_id = Column(u'node_id', INTEGER(), primary_key=True, nullable=False,doc=u"区域id")
class TrRoster(DeclarativeBase):
"""黑白名单 0 白名单 1 黑名单"""
__tablename__ = 'tr_roster'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"黑白名单id")
mac_addr = Column('mac_addr', Unicode(length=17), nullable=False,doc=u"mac地址")
begin_time = Column('begin_time', Unicode(length=19), nullable=False,doc=u"生效开始时间")
end_time = Column('end_time', Unicode(length=19), nullable=False,doc=u"生效结束时间")
roster_type = Column('roster_type', SMALLINT(), nullable=False,doc=u"黑白名单类型")
class TrCustomer(DeclarativeBase):
"""用户信息表"""
__tablename__ = 'tr_customer'
__table_args__ = {}
customer_id = Column('customer_id', INTEGER(),
Sequence('customer_id_seq', start=100001, increment=1),
primary_key=True,nullable=False,doc=u"用户id")
node_id = Column('node_id', INTEGER(), nullable=False,doc=u"区域id")
customer_name = Column('customer_name', Unicode(length=64), nullable=False,doc=u"用户登录名")
password = Column('password', Unicode(length=128), nullable=False,doc=u"用户登录密码")
realname = Column('realname', Unicode(length=64), nullable=False,doc=u"")
idcard = Column('idcard', Unicode(length=32),doc=u"用户证件号码")
sex = Column('sex', SMALLINT(), nullable=True,doc=u"用户性别0/1")
age = Column('age', INTEGER(), nullable=True,doc=u"用户年龄")
email = Column('email', Unicode(length=255), nullable=True,doc=u"用户邮箱")
email_active = Column('email_active', SMALLINT(), default=0,doc=u"用户邮箱激活状态")
active_code = Column('active_code', Unicode(length=32), nullable=False,doc=u"邮箱激活码")
mobile = Column('mobile', Unicode(length=16), nullable=True,doc=u"用户手机")
mobile_active = Column('mobile_active', SMALLINT(), default=0,doc=u"用户手机绑定状态")
address = Column('address', Unicode(length=255), nullable=True,doc=u"用户地址")
customer_desc = Column(u'customer_desc', Unicode(255),doc=u"用户描述")
create_time = Column('create_time', Unicode(length=19), nullable=False,doc=u"创建时间")
update_time = Column('update_time', Unicode(length=19), nullable=False,doc=u"更新时间")
class TrCustomerOrder(DeclarativeBase):
"""
订购信息表(交易记录)
pay_status交易支付状态:0-未支付,1-已支付,2-已取消
"""
__tablename__ = 'tr_customer_order'
__table_args__ = {}
order_id = Column('order_id', Unicode(length=32),primary_key=True,nullable=False,doc=u"订单id")
customer_id = Column('customer_id', INTEGER(),nullable=False,doc=u"用户id")
product_id = Column('product_id', INTEGER(),nullable=False,doc=u"资费id")
account_number = Column('account_number', Unicode(length=32),nullable=False,doc=u"上网账号")
order_fee = Column('order_fee', INTEGER(), nullable=False,doc=u"订单费用")
actual_fee = Column('actual_fee', INTEGER(), nullable=False,doc=u"实缴费用")
pay_status = Column('pay_status', INTEGER(), nullable=False,doc=u"支付状态")
accept_id = Column('accept_id', INTEGER(),nullable=False,doc=u"受理id")
order_source = Column('order_source', Unicode(length=64), nullable=False,doc=u"订单来源")
order_desc = Column('order_desc', Unicode(length=255),doc=u"订单描述")
create_time = Column('create_time', Unicode(length=19), nullable=False,doc=u"交易时间")
class TrAccount(DeclarativeBase):
"""
上网账号表,每个会员可以同时拥有多个上网账号
account_number 为每个套餐对应的上网账号,每个上网账号全局唯一
用户状态 0:"预定",1:"正常", 2:"停机" , 3:"销户", 4:"到期"
"""
__tablename__ = 'tr_account'
__table_args__ = {}
account_number = Column('account_number', Unicode(length=32),primary_key=True,nullable=False,doc=u"上网账号")
customer_id = Column('customer_id', INTEGER(),nullable=False,doc=u"用户id")
client_id = Column('client_id', INTEGER(),nullable=False,doc=u"客户id")
product_id = Column('product_id', INTEGER(),nullable=False,doc=u"资费id")
group_id = Column('group_id', INTEGER(),doc=u"用户组id")
password = Column('password', Unicode(length=128), nullable=False,doc=u"上网密码")
status = Column('status', INTEGER(), nullable=False,doc=u"用户状态")
install_address = Column('install_address', Unicode(length=128), nullable=False,doc=u"装机地址")
balance = Column('balance', INTEGER(), nullable=False, default=0, doc=u"用户余额-分")
time_length = Column('time_length', INTEGER(), nullable=False,default=0,doc=u"用户时长-秒")
flow_length = Column('flow_length', INTEGER(), nullable=False,default=0,doc=u"用户流量-kb")
expire_date = Column('expire_date', Unicode(length=10), nullable=False,doc=u"过期时间- ####-##-##")
user_concur_number = Column('user_concur_number', INTEGER(), nullable=False,doc=u"用户并发数")
bind_mac = Column('bind_mac', SMALLINT(), nullable=False,doc=u"是否绑定mac")
bind_vlan = Column('bind_vlan', SMALLINT(), nullable=False,doc=u"是否绑定vlan")
mac_addr = Column('mac_addr', Unicode(length=17),doc=u"mac地址")
vlan_id1 = Column('vlan_id1', INTEGER(),doc=u"内层vlan")
vlan_id2 = Column('vlan_id2', INTEGER(),doc=u"外层vlan")
ip_address = Column('ip_address', Unicode(length=15),doc=u"静态IP地址")
last_pause = Column('last_pause', Unicode(length=19),doc=u"最后停机时间")
account_desc = Column(u'account_desc', Unicode(255),doc=u"用户描述")
create_time = Column('create_time', Unicode(length=19), nullable=False,doc=u"创建时间")
update_time = Column('update_time', Unicode(length=19), nullable=False,doc=u"更新时间")
class TrAccountAttr(DeclarativeBase):
"""上网账号扩展策略属性表"""
__tablename__ = 'tr_account_attr'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"属性id")
account_number = Column('account_number', Unicode(length=32),nullable=False,doc=u"上网账号")
attr_type = Column('attr_type', INTEGER(), default=1, doc=u"属性类型,0,一般;1,radius属性")
attr_name = Column(u'attr_name', Unicode(length=255), nullable=False,doc=u"属性名")
attr_value = Column(u'attr_value', Unicode(length=255), nullable=False,doc=u"属性值")
attr_desc = Column(u'attr_desc', Unicode(length=255),doc=u"属性描述")
UniqueConstraint('account_number','attr_name','attr_type',name='tr_account_attr_idx')
class TrProduct(DeclarativeBase):
'''
资费信息表
资费类型 product_policy 0 预付费包月 1 预付费时长 2 买断包月 3 买断时长 4 预付费流量 5 买断流量 6 自由资费
销售状态 product_status 0 正常 1 停用 资费停用后不允许再订购
'''
__tablename__ = 'tr_product'
__table_args__ = {}
id = Column('id', INTEGER(),primary_key=True,autoincrement=1,nullable=False,doc=u"资费id")
product_name = Column('product_name', Unicode(length=64), nullable=False,doc=u"资费名称")
product_policy = Column('product_policy', INTEGER(), nullable=False,doc=u"资费策略")
product_status = Column('product_status', SMALLINT(), nullable=False,doc=u"资费状态")
bind_mac = Column('bind_mac', SMALLINT(), nullable=False,doc=u"是否绑定mac")
bind_vlan = Column('bind_vlan', SMALLINT(), nullable=False,doc=u"是否绑定vlan")
concur_number = Column('concur_number', INTEGER(), nullable=False,doc=u"并发数")
fee_period = Column('fee_period', Unicode(length=11),doc=u"开放认证时段")
fee_months = Column('fee_months', INTEGER(),doc=u"买断授权月数")
fee_times = Column('fee_times', INTEGER(),doc=u"买断时长(秒)")
fee_flows = Column('fee_flows', INTEGER(),doc=u"买断流量(kb)")
fee_price = Column('fee_price', INTEGER(),nullable=False,doc=u"资费价格")
fee_period = Column('fee_period', Unicode(length=11),doc=u"计费认证时段")
input_max_limit = Column('input_max_limit', INTEGER(), nullable=False,doc=u"上行速率")
output_max_limit = Column('output_max_limit', INTEGER(), nullable=False,doc=u"下行速率")
create_time = Column('create_time', Unicode(length=19), nullable=False,doc=u"创建时间")
update_time = Column('update_time', Unicode(length=19), nullable=False,doc=u"更新时间")
class TrProductAttr(DeclarativeBase):
'''资费扩展属性表'''
__tablename__ = 'tr_product_attr'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"属性id")
product_id = Column('product_id', INTEGER(),nullable=False,doc=u"资费id")
attr_type = Column('attr_type', INTEGER(), default=1, doc=u"属性类型,0,一般;1,radius属性")
attr_name = Column(u'attr_name', Unicode(length=255), nullable=False,doc=u"属性名")
attr_value = Column(u'attr_value', Unicode(length=255), nullable=False,doc=u"属性值")
attr_desc = Column(u'attr_desc', Unicode(length=255),doc=u"属性描述")
UniqueConstraint('product_id','attr_type',name='tr_product_attr_idx')
class TrBilling(DeclarativeBase):
"""计费信息表 is_deduct 0 未扣费 1 已扣费"""
__tablename__ = 'tr_billing'
__table_args__ = { }
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"计费id")
account_number = Column(u'account_number', Unicode(length=253), nullable=False,doc=u"上网账号")
nas_addr = Column(u'nas_addr', Unicode(length=15), nullable=False,doc=u"bas地址")
acct_session_id = Column(u'acct_session_id', Unicode(length=253), nullable=False,doc=u"会话id")
acct_start_time = Column(u'acct_start_time', Unicode(length=19), nullable=False,doc=u"计费开始时间")
acct_session_time = Column(u'acct_session_time', INTEGER(), nullable=False,doc=u"会话时长")
input_total = Column(u'input_total', INTEGER(),doc=u"会话的上行流量(kb)")
output_total = Column(u'output_total', INTEGER(),doc=u"会话的下行流量(kb)")
acct_times = Column(u'acct_times', INTEGER(), nullable=False,doc=u"扣费时长(秒)")
acct_flows = Column(u'acct_flows', INTEGER(), nullable=False,doc=u"扣费流量(kb)")
acct_fee = Column(u'acct_fee', INTEGER(), nullable=False,doc=u"应扣费用")
actual_fee = Column('actual_fee', INTEGER(), nullable=False,doc=u"实扣费用")
balance = Column('balance', INTEGER(), nullable=False,doc=u"当前余额")
time_length = Column('time_length', INTEGER(), nullable=False,default=0,doc=u"当前用户时长-秒")
flow_length = Column('flow_length', INTEGER(), nullable=False,default=0,doc=u"当前用户流量-kb")
is_deduct = Column(u'is_deduct', INTEGER(), nullable=False,doc=u"是否扣费")
create_time = Column('create_time', Unicode(length=19), nullable=False,doc=u"计费时间")
class TrTicket(DeclarativeBase):
"""上网日志表"""
__tablename__ = 'tr_ticket'
__table_args__ = { }
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"日志id")
account_number = Column(u'account_number', Unicode(length=253), nullable=False,doc=u"上网账号")
acct_input_gigawords = Column(u'acct_input_gigawords', INTEGER(),doc=u"会话的上行的字(4字节)的吉倍数")
acct_output_gigawords = Column(u'acct_output_gigawords', INTEGER(),doc=u"会话的下行的字(4字节)的吉倍数")
acct_input_octets = Column(u'acct_input_octets', INTEGER(),doc=u"会话的上行流量(字节数)")
acct_output_octets = Column(u'acct_output_octets', INTEGER(),doc=u"会话的下行流量(字节数)")
acct_input_packets = Column(u'acct_input_packets', INTEGER(),doc=u"会话的上行包数量")
acct_output_packets = Column(u'acct_output_packets', INTEGER(),doc=u"会话的下行包数量")
acct_session_id = Column(u'acct_session_id', Unicode(length=253), nullable=False,doc=u"会话id")
acct_session_time = Column(u'acct_session_time', INTEGER(), nullable=False,doc=u"会话时长")
acct_start_time = Column(u'acct_start_time', Unicode(length=19), nullable=False,doc=u"会话开始时间")
acct_stop_time = Column(u'acct_stop_time', Unicode(length=19), nullable=False,doc=u"会话结束时间")
acct_terminate_cause = Column(u'acct_terminate_cause',INTEGER(),doc=u"会话中止原因")
mac_addr = Column(u'mac_addr', Unicode(length=128),doc=u"mac地址")
calling_station_id = Column(u'calling_station_id', Unicode(length=128),doc=u"用户接入物理信息")
framed_netmask = Column(u'framed_netmask', Unicode(length=15),doc=u"地址掩码")
framed_ipaddr = Column(u'framed_ipaddr', Unicode(length=15),doc=u"IP地址")
nas_class = Column(u'nas_class', Unicode(length=253),doc=u"bas class")
nas_addr = Column(u'nas_addr', Unicode(length=15), nullable=False,doc=u"bas地址")
nas_port = Column(u'nas_port', Unicode(length=32),doc=u"接入端口")
nas_port_id = Column(u'nas_port_id', Unicode(length=255),doc=u"接入端口物理信息")
nas_port_type = Column(u'nas_port_type', INTEGER(),doc=u"接入端口类型")
service_type = Column(u'service_type', INTEGER(),doc=u"接入服务类型")
session_timeout = Column(u'session_timeout', INTEGER(),doc=u"会话超时时间")
start_source = Column(u'start_source', INTEGER(), nullable=False,doc=u"会话开始来源")
stop_source = Column(u'stop_source', INTEGER(), nullable=False,doc=u"会话中止来源")
class TrOnline(DeclarativeBase):
"""用户在线信息表"""
__tablename__ = 'tr_online'
__table_args__ = {
'mysql_engine' : 'MEMORY'
}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"在线id")
account_number = Column(u'account_number', Unicode(length=32), nullable=False, index=True, doc=u"上网账号")
nas_addr = Column(u'nas_addr', Unicode(length=32), nullable=False,index=True, doc=u"bas地址")
acct_session_id = Column(u'acct_session_id', Unicode(length=64),index=True, nullable=False,doc=u"会话id")
acct_start_time = Column(u'acct_start_time', Unicode(length=19), nullable=False,doc=u"会话开始时间")
framed_ipaddr = Column(u'framed_ipaddr', Unicode(length=32), nullable=False,doc=u"IP地址")
mac_addr = Column(u'mac_addr', Unicode(length=32), nullable=False,doc=u"mac地址")
nas_port_id = Column(u'nas_port_id', Unicode(length=255), nullable=False,doc=u"接入端口物理信息")
billing_times = Column(u'billing_times', INTEGER(), nullable=False,doc=u"已记账时间")
input_total = Column(u'input_total', INTEGER(),doc=u"上行流量(kb)")
output_total = Column(u'output_total', INTEGER(),doc=u"下行流量(kb)")
start_source = Column(u'start_source', SMALLINT(), nullable=False,doc=u"记账开始来源")
UniqueConstraint('nas_addr', 'acct_session_id', name='unique_nas_session')
class TrAcceptLog(DeclarativeBase):
'''
业务受理日志表
open:开户 pause:停机 resume:复机 cancel:销户 next:续费 charge:充值
'''
__tablename__ = 'tr_accept_log'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"日志id")
accept_type = Column(u'accept_type', Unicode(length=16), nullable=False,doc=u"受理类型")
accept_desc = Column(u'accept_desc', Unicode(length=512),doc=u"受理描述")
account_number = Column(u'account_number', Unicode(length=32), nullable=False,doc=u"上网账号")
operator_name = Column(u'operator_name', Unicode(32),doc=u"操作员名")
accept_source = Column(u'accept_source', Unicode(length=128),doc=u"受理渠道来源")
accept_time = Column(u'accept_time', Unicode(length=19), nullable=False,doc=u"受理时间")
class TrOperateLog(DeclarativeBase):
"""操作日志表"""
__tablename__ = 'tr_operate_log'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"日志id")
operator_name = Column(u'operator_name', Unicode(32), nullable=False,doc=u"操作员名称")
operate_ip = Column(u'operate_ip', Unicode(length=128),doc=u"操作员ip")
operate_time = Column(u'operate_time', Unicode(length=19), nullable=False,doc=u"操作时间")
operate_desc = Column(u'operate_desc', Unicode(length=1024),doc=u"操作描述")
###############################################################################
# 统计数据模型 ####
###############################################################################
class TrOnlineStat(DeclarativeBase):
"""用户在线统计表 """
__tablename__ = 'tr_online_stat'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"id")
node_id = Column('node_id', INTEGER(),nullable=False,doc=u"区域id")
stat_time = Column(u'stat_time', INTEGER(), nullable=False,doc=u"统计时间")
total = Column(u'total', INTEGER(),doc=u"在线数")
class TrFlowStat(DeclarativeBase):
"""用户在线统计表 """
__tablename__ = 'tr_flow_stat'
__table_args__ = {}
id = Column(u'id', INTEGER(), primary_key=True, nullable=False,doc=u"id")
node_id = Column('node_id', INTEGER(),nullable=False,doc=u"区域id")
stat_time = Column(u'stat_time', INTEGER(), nullable=False,doc=u"统计时间")
input_total | |
data structure.
Args:
tree (Tree): tree data structure containing the model.
body (Body): body data structure instance.
joint_tag (ET.Element): joint XML tag.
parent_body (Body, None): the parent body instance.
Returns:
Joint: joint data structure instance.
"""
# create joint with the corresponding attributes
attrib = joint_tag.attrib
# get joint name
name = attrib.get('name')
if name is None:
name = '__prl_joint_' + str(self._joint_cnt)
self._joint_cnt += 1
# create joint data structure
joint = Joint(joint_id=self._joint_cnt, name=name, dtype=attrib.get('type'), position=attrib.get('pos'),
axis=attrib.get('axis'), friction=attrib.get('frictionloss'), damping=attrib.get('damping'),
parent=parent_body, child=body)
limited = attrib.get('limited')
if limited is not None:
limited = limited.lower().strip()
if limited == 'true':
joint.limits = attrib.get('range')
# add joint in tree and parent body
tree.joints[joint.name] = joint
if parent_body is not None:
parent_body.joints[joint.name] = joint
return joint
def _parse_inertial(self, body, inertial_tag): # DONE
"""
Parse the inertial tag if present, and set the inertial data structure to the given body.
From the main documentation [2]: "This element specifies the mass and inertial properties of the body. If this
element is not included in a given body, the inertial properties are inferred from the geoms attached to the
body. When a compiled MJCF model is saved, the XML writer saves the inertial properties explicitly using this
element, even if they were inferred from geoms. The inertial frame is such that its center coincides with the
center of mass of the body, and its axes coincide with the principal axes of inertia of the body. Thus the
inertia matrix is diagonal in this frame.
Attributes:
- pos (real[3], required): position of the inertial frame.
- quat, axisangle, xyaxes, zaxis, euler: orientation of the inertial frame.
- mass (real, required): mass of the body.
- diaginertia (real[3], optional): diagonal inertia matrix, expressing the body inertia relative to the
inertial frame.
- fulldiagonal (real[6], optional): Full inertia matrix M (Ixx, Iyy, Izz, Ixy, Ixz, Iyz)."
Args:
body (Body): body data structure instance.
inertial_tag (ET.Element, None): XML element.
"""
if inertial_tag is not None:
# instantiate inertial data structure
inertial = Inertial()
# position and orientation
position = inertial_tag.attrib.get('pos')
if position is not None:
inertial.position = position
orientation = self._get_orientation(inertial_tag.attrib)
if orientation is not None:
inertial.orientation = orientation
# mass and inertia
inertial.mass = inertial_tag.attrib.get('mass')
inertia = inertial_tag.attrib.get('diaginertia')
if inertia is not None:
inertial.inertia = inertia
else:
inertial.inertia = inertial_tag.attrib.get('fullinertia')
# add inertial element to body
body.add_inertial(inertial)
def _parse_geom(self, body, geom_tag):
"""
Parse the geom tag if present, and set the visuals, and collisions to the given body. It can also set the
inertial elements if it was not defined previously.
From the main documentation [2]: "This element creates a geom, and attaches it rigidly to the body within
which the geom is defined. Multiple geoms can be attached to the same body. At runtime they determine the
appearance and collision properties of the body. At compile time they can also determine the inertial
properties of the body, depending on the presence of the inertial element and the setting of the
inertiafromgeom attribute of compiler. This is done by summing the masses and inertias of all geoms attached
to the body with geom group in the range specified by the inertiagrouprange attribute of compiler. The geom
masses and inertias are computed using the geom shape, a specified density or a geom mass which implies a
density, and the assumption of uniform density.
Attributes:
- name (string, optional): Name of the geom.
- class (string, optional): Defaults class for setting unspecified attributes.
- type (string, [plane, hfield, sphere, capsule, ellipsoid, cylinder, box, mesh], "sphere"): Type of
geometric shape
- contype (int, "1"): This attribute and the next specify 32-bit integer bitmasks used for contact
filtering of dynamically generated contact pairs. Two geoms can collide if the contype of one geom is
compatible with the conaffinity of the other geom or vice versa. Compatible means that the two bitmasks
have a common bit set to 1.
Args:
body (Body): body data structure instance.
geom_tag (ET.Element): geom XML field.
"""
# visual #
attrib = geom_tag.attrib
dtype = attrib.get('type')
visual = Visual(name=attrib.get('name'), dtype=dtype, color=attrib.get('rgba'))
# set position and orientation
visual.position = attrib.get('pos')
visual.orientation = self._get_orientation(attrib)
# compute size (rescale them)
if dtype == 'plane':
size = attrib.get('size')
if dtype in {'capsule', 'cylinder', 'ellipsoid', 'box'}:
fromto = attrib.get('fromto')
if fromto is not None:
fromto = np.array([float(n) for n in fromto.split()])
from_pos, to_pos = fromto[:3], fromto[3:]
v = to_pos - from_pos
pos = from_pos + v / 2.
length = np.linalg.norm(v)
z = v / length
z_ = np.zeros([0., 0., 1.]) # old z axis
x = np.cross(z, z_)
y = np.cross(z, x)
rot = np.array([x, y, z]).T
# set new position and orientation
visual.pos = pos
visual.orientation = rot
# set size
if dtype == 'capsule':
size = None # TODO
# check texture
material_name = attrib.get('material') # TODO
if 'material' in self.assets: # and material in :
# material =
# visual.material =
pass
# check mesh
mesh = attrib.get('mesh')
if mesh is not None:
# get the mesh from the assets
mesh_dict = self.assets.get('mesh')
if mesh_dict is not None:
mesh = mesh_dict.get(mesh)
# check mesh format
# get the texture for the mesh
# set visual to body
body.add_visual(visual)
# collision #
if not (attrib.get('contype') == "0" and attrib.get('conaffinity') == "0"):
# copy collision shape information from visual shape
collision = Collision()
collision.name = visual.name
collision.frame = visual.frame
collision.geometry = visual.geometry
body.add_collision(collision)
# inertial # just the mass
# if the <inertial> tag was not given, compute based on information in geom
if body.inertial is None:
inertial = Inertial()
# if mesh, load it in memory
if dtype == 'mesh':
mesh = trimesh.load(mesh)
if not mesh.is_watertight: # mesh.is_convex
raise ValueError("Could not compute the volume because the mesh is not watertight...")
# get mass
mass = inertial.mass
if mass is None:
# if the mass is defined
if attrib.get('mass') is not None:
inertial.mass = attrib.get('mass')
mass = inertial.mass # this makes the conversion to float
# if the mass is not defined, compute it from the density
else:
density = float(attrib.get('density', 1000.))
dimensions = float(attrib.get('fitscale', 1)) if dtype == 'mesh' else visual.size
mass = Inertial.compute_mass_from_density(shape=dtype, dimensions=dimensions, density=density,
mesh=mesh)
inertial.mass = mass
# compute inertia if not given
if inertial.inertia is None:
dimensions = float(attrib.get('fitscale', 1)) if dtype == 'mesh' else visual.size
inertia = Inertial.compute_inertia(shape=dtype, dimensions=dimensions, mass=mass, mesh=mesh)
inertial.inertia = inertia
# add inertial in body
body.add_inertial(inertial)
def _parse_site(self, body, site_tag, site_idx):
"""
Parse the site XML field.
From the main documentation [1]: "Sites are light geoms. They have the same appearance properties but cannot
participate in collisions and cannot be used to infer body masses. On the other hand sites can do things that
geoms cannot do: they can specify the volumes of touch sensors, the attachment of IMU sensors, the routing of
spatial tendons, the end-points of slider-crank actuators. These are all spatial quantities, and yet they do
not correspond to entities that should have mass or collide other entities - which is why the site element
was created. Sites can also be used to specify points (or rather frames) of interest to the user."
Args:
body (Body): body data structure instance.
site_tag (ET.Element): site XML field.
site_idx (int): site index.
"""
# visual #
pass
def _parse_contact(self, parent_tag):
"""
Parse contact XML field.
Args:
parent_tag (ET.Element): parent XML element to check if it has a 'compiler' tag.
"""
pass
def _parse_equality_constraint(self, parent_tag):
"""
Parse the equality XML field.
Args:
parent_tag (ET.Element): parent XML element to check if it has a 'compiler' tag.
"""
pass
def _parse_actuator(self, parent_tag):
"""
Parse the actuator XML field.
Args:
parent_tag (ET.Element): parent XML element to check if it has a 'compiler' tag.
"""
pass
def _parse_sensor(self, parent_tag):
"""
Parse the sensor XML field.
Args:
parent_tag (ET.Element): parent XML | |
<gh_stars>10-100
import torch
import torch.nn as nn
import numpy as np
import cell_level_search
from genotypes import PRIMITIVES
import torch.nn.functional as F
from operations import *
from decoding_formulas import Decoder
class AutoDeeplab (nn.Module) :
def __init__(self, num_classes, num_layers, criterion = None, \
filter_multiplier = 8, block_multiplier_d = 4, block_multiplier_c = 5, \
step_d = 4, step_c = 5, distributed_layer=5 ,cell=cell_level_search.Cell):
super(AutoDeeplab, self).__init__()
self.cells = nn.ModuleList()
self._num_layers = num_layers
self._num_classes = num_classes
self._step_d = step_d
self._step_c = step_c
self._block_multiplier_d = block_multiplier_d
self._block_multiplier_c = block_multiplier_c
self._filter_multiplier = filter_multiplier
self._criterion = criterion
self.distributed_layer = distributed_layer
self._initialize_alphas_betas ()
f_initial = int(self._filter_multiplier)
half_f_initial = int(f_initial / 2)
self.stem0 = nn.Sequential(
nn.Conv2d(3, half_f_initial * self._block_multiplier_d, 3, stride=2, padding=1),
nn.BatchNorm2d(half_f_initial* self._block_multiplier_d),
nn.ReLU ()
)
self.stem1 = nn.Sequential(
nn.Conv2d(half_f_initial* self._block_multiplier_d, half_f_initial* self._block_multiplier_d, 3, stride=1, padding=1),
nn.BatchNorm2d(half_f_initial* self._block_multiplier_d),
nn.ReLU ()
)
self.stem2 = nn.Sequential(
nn.Conv2d(half_f_initial* self._block_multiplier_d, f_initial* self._block_multiplier_d, 3, stride=2, padding=1),
nn.BatchNorm2d(f_initial* self._block_multiplier_d),
nn.ReLU ()
)
# intitial_fm = C_initial
for i in range (self._num_layers) :
if i == 0 :
cell1 = cell (self._step_d, self._block_multiplier_d, -1,
None, f_initial, None,
self._filter_multiplier)
cell2 = cell (self._step_d, self._block_multiplier_d, -1,
f_initial, None, None,
self._filter_multiplier * 2)
self.cells += [cell1]
self.cells += [cell2]
elif i == 1 :
cell1 = cell (self._step_d, self._block_multiplier_d, f_initial,
None, self._filter_multiplier, self._filter_multiplier * 2,
self._filter_multiplier)
cell2 = cell (self._step_d, self._block_multiplier_d, -1,
self._filter_multiplier, self._filter_multiplier * 2, None,
self._filter_multiplier * 2)
cell3 = cell (self._step_d, self._block_multiplier_d, -1,
self._filter_multiplier * 2, None, None,
self._filter_multiplier * 4)
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
elif i == 2 :
cell1 = cell (self._step_d, self._block_multiplier_d, self._filter_multiplier,
None, self._filter_multiplier, self._filter_multiplier * 2,
self._filter_multiplier)
cell2 = cell (self._step_d, self._block_multiplier_d, self._filter_multiplier * 2,
self._filter_multiplier, self._filter_multiplier * 2, self._filter_multiplier * 4,
self._filter_multiplier * 2)
cell3 = cell (self._step_d, self._block_multiplier_d, -1,
self._filter_multiplier * 2, self._filter_multiplier * 4, None,
self._filter_multiplier * 4)
cell4 = cell (self._step_d, self._block_multiplier_d, -1,
self._filter_multiplier * 4, None, None,
self._filter_multiplier * 8)
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
self.cells += [cell4]
elif i == 3 :
cell1 = cell (self._step_d, self._block_multiplier_d, self._filter_multiplier,
None, self._filter_multiplier, self._filter_multiplier * 2,
self._filter_multiplier)
cell2 = cell (self._step_d, self._block_multiplier_d, self._filter_multiplier * 2,
self._filter_multiplier, self._filter_multiplier * 2, self._filter_multiplier * 4,
self._filter_multiplier * 2)
cell3 = cell (self._step_d, self._block_multiplier_d, self._filter_multiplier * 4,
self._filter_multiplier * 2, self._filter_multiplier * 4, self._filter_multiplier * 8,
self._filter_multiplier * 4)
cell4 = cell (self._step_d, self._block_multiplier_d, -1,
self._filter_multiplier * 4, self._filter_multiplier * 8, None,
self._filter_multiplier * 8)
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
self.cells += [cell4]
elif i < distributed_layer :
cell1 = cell (self._step_d, self._block_multiplier_d, self._filter_multiplier,
None, self._filter_multiplier, self._filter_multiplier * 2,
self._filter_multiplier)
cell2 = cell (self._step_d, self._block_multiplier_d, self._filter_multiplier * 2,
self._filter_multiplier, self._filter_multiplier * 2, self._filter_multiplier * 4,
self._filter_multiplier * 2)
cell3 = cell (self._step_d, self._block_multiplier_d, self._filter_multiplier * 4,
self._filter_multiplier * 2, self._filter_multiplier * 4, self._filter_multiplier * 8,
self._filter_multiplier * 4)
cell4 = cell (self._step_d, self._block_multiplier_d, self._filter_multiplier * 8,
self._filter_multiplier * 4, self._filter_multiplier * 8, None,
self._filter_multiplier * 8)
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
self.cells += [cell4]
elif i == distributed_layer:
cell1 = cell (self._step_c, self._block_multiplier_c, self._filter_multiplier,
None, self._filter_multiplier, self._filter_multiplier * 2,
self._filter_multiplier, block_multiplier_d=self._block_multiplier_d
, dist_prev_prev=True)
cell2 = cell (self._step_c, self._block_multiplier_c, self._filter_multiplier * 2,
self._filter_multiplier, self._filter_multiplier * 2, self._filter_multiplier * 4,
self._filter_multiplier * 2, block_multiplier_d=self._block_multiplier_d
, dist_prev_prev=True)
cell3 = cell (self._step_c, self._block_multiplier_c, self._filter_multiplier * 4,
self._filter_multiplier * 2, self._filter_multiplier * 4, self._filter_multiplier * 8,
self._filter_multiplier * 4, block_multiplier_d=self._block_multiplier_d
, dist_prev_prev=True)
cell4 = cell (self._step_c, self._block_multiplier_c, self._filter_multiplier * 8,
self._filter_multiplier * 4, self._filter_multiplier * 8, None,
self._filter_multiplier * 8, block_multiplier_d=self._block_multiplier_d
, dist_prev_prev=True)
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
self.cells += [cell4]
elif i == distributed_layer+1:
cell1 = cell (self._step_c, self._block_multiplier_c, self._filter_multiplier,
None, self._filter_multiplier, self._filter_multiplier * 2,
self._filter_multiplier, dist_prev_prev=True)
cell2 = cell (self._step_c, self._block_multiplier_c, self._filter_multiplier * 2,
self._filter_multiplier, self._filter_multiplier * 2, self._filter_multiplier * 4,
self._filter_multiplier * 2, dist_prev_prev=True)
cell3 = cell (self._step_c, self._block_multiplier_c, self._filter_multiplier * 4,
self._filter_multiplier * 2, self._filter_multiplier * 4, self._filter_multiplier * 8,
self._filter_multiplier * 4, dist_prev_prev=True)
cell4 = cell (self._step_c, self._block_multiplier_c, self._filter_multiplier * 8,
self._filter_multiplier * 4, self._filter_multiplier * 8, None,
self._filter_multiplier * 8, dist_prev_prev=True)
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
self.cells += [cell4]
else :
cell1 = cell (self._step_c, self._block_multiplier_c, self._filter_multiplier,
None, self._filter_multiplier, self._filter_multiplier * 2,
self._filter_multiplier)
cell2 = cell (self._step_c, self._block_multiplier_c, self._filter_multiplier * 2,
self._filter_multiplier, self._filter_multiplier * 2, self._filter_multiplier * 4,
self._filter_multiplier * 2)
cell3 = cell (self._step_c, self._block_multiplier_c, self._filter_multiplier * 4,
self._filter_multiplier * 2, self._filter_multiplier * 4, self._filter_multiplier * 8,
self._filter_multiplier * 4)
cell4 = cell (self._step_c, self._block_multiplier_c, self._filter_multiplier * 8,
self._filter_multiplier * 4, self._filter_multiplier * 8, None,
self._filter_multiplier * 8)
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
self.cells += [cell4]
self.aspp_device_4 = nn.Sequential (
ASPP (self._filter_multiplier * self._block_multiplier_d, self._num_classes, 24, 24) #96 / 4 as in the paper
)
self.aspp_device_8 = nn.Sequential (
ASPP (self._filter_multiplier * 2 * self._block_multiplier_d, self._num_classes, 12, 12) #96 / 8
)
self.aspp_device_16 = nn.Sequential (
ASPP (self._filter_multiplier * 4 * self._block_multiplier_d, self._num_classes, 6, 6) #96 / 16
)
self.aspp_device_32 = nn.Sequential (
ASPP (self._filter_multiplier * 8 * self._block_multiplier_d, self._num_classes, 3, 3) #96 / 32
)
self.aspp_4 = nn.Sequential (
ASPP (self._filter_multiplier * self._block_multiplier_c, self._num_classes, 24, 24) #96 / 4 as in the paper
)
self.aspp_8 = nn.Sequential (
ASPP (self._filter_multiplier * 2 * self._block_multiplier_c, self._num_classes, 12, 12) #96 / 8
)
self.aspp_16 = nn.Sequential (
ASPP (self._filter_multiplier * 4 * self._block_multiplier_c, self._num_classes, 6, 6) #96 / 16
)
self.aspp_32 = nn.Sequential (
ASPP (self._filter_multiplier * 8 * self._block_multiplier_c, self._num_classes, 3, 3) #96 / 32
)
def forward (self, x) :
#TODO: GET RID OF THESE LISTS, we dont need to keep everything.
#TODO: Is this the reason for the memory issue ?
self.level_4 = []
self.level_8 = []
self.level_16 = []
self.level_32 = []
temp = self.stem0 (x)
temp = self.stem1 (temp)
self.level_4.append (self.stem2 (temp))
count = 0
normalized_betas = torch.randn(12, 4, 3).cuda().half()
# Softmax on alphas and betas
if torch.cuda.device_count() > 1:
img_device = torch.device('cuda', x.get_device())
normalized_alphas_d = F.softmax(self.alphas_d.to(device=img_device), dim=-1)
normalized_alphas_c = F.softmax(self.alphas_c.to(device=img_device), dim=-1)
# normalized_betas[layer][ith node][0 : ➚, 1: ➙, 2 : ➘]
for layer in range (len(self.betas)):
if layer == 0:
normalized_betas[layer][0][1:] = F.softmax (self.betas[layer][0][1:].to(device=img_device), dim=-1)
elif layer == 1:
normalized_betas[layer][0][1:] = F.softmax (self.betas[layer][0][1:].to(device=img_device), dim=-1)
normalized_betas[layer][1] = F.softmax (self.betas[layer][1].to(device=img_device), dim=-1)
elif layer == 2:
normalized_betas[layer][0][1:] = F.softmax (self.betas[layer][0][1:].to(device=img_device), dim=-1)
normalized_betas[layer][1] = F.softmax (self.betas[layer][1].to(device=img_device), dim=-1)
normalized_betas[layer][2] = F.softmax (self.betas[layer][2].to(device=img_device), dim=-1)
else :
normalized_betas[layer][0][1:] = F.softmax (self.betas[layer][0][1:].to(device=img_device), dim=-1)
normalized_betas[layer][1] = F.softmax (self.betas[layer][1].to(device=img_device), dim=-1)
normalized_betas[layer][2] = F.softmax (self.betas[layer][2].to(device=img_device), dim=-1)
normalized_betas[layer][3][:2] = F.softmax (self.betas[layer][3][:2].to(device=img_device), dim=-1)
else:
normalized_alphas_d = F.softmax(self.alphas_d, dim=-1)
normalized_alphas_c = F.softmax(self.alphas_c, dim=-1)
for layer in range (len(self.betas)):
if layer == 0:
normalized_betas[layer][0][1:] = F.softmax (self.betas[layer][0][1:], dim=-1)
elif layer == 1:
normalized_betas[layer][0][1:] = F.softmax (self.betas[layer][0][1:], dim=-1)
normalized_betas[layer][1] = F.softmax (self.betas[layer][1], dim=-1)
elif layer == 2:
normalized_betas[layer][0][1:] = F.softmax (self.betas[layer][0][1:], dim=-1)
normalized_betas[layer][1] = F.softmax (self.betas[layer][1], dim=-1)
normalized_betas[layer][2] = F.softmax (self.betas[layer][2], dim=-1)
else :
normalized_betas[layer][0][1:] = F.softmax (self.betas[layer][0][1:], dim=-1)
normalized_betas[layer][1] = F.softmax (self.betas[layer][1], dim=-1)
normalized_betas[layer][2] = F.softmax (self.betas[layer][2], dim=-1)
normalized_betas[layer][3][:2] = F.softmax (self.betas[layer][3][:2], dim=-1)
for layer in range (self._num_layers) :
if layer == 0 :
level4_new, = self.cells[count] (None, None, self.level_4[-1], None, normalized_alphas_d)
count += 1
level8_new, = self.cells[count] (None, self.level_4[-1], None, None, normalized_alphas_d)
count += 1
level4_new = normalized_betas[layer][0][1] * level4_new
level8_new = normalized_betas[layer][0][2] * level8_new
self.level_4.append (level4_new)
self.level_8.append (level8_new)
elif layer == 1 :
level4_new_1, level4_new_2 = self.cells[count] (self.level_4[-2],
None,
self.level_4[-1],
self.level_8[-1],
normalized_alphas_d)
count += 1
level4_new = normalized_betas[layer][0][1] * level4_new_1 + normalized_betas[layer][1][0] * level4_new_2
level8_new_1, level8_new_2 = self.cells[count] (None,
self.level_4[-1],
self.level_8[-1],
None,
normalized_alphas_d)
count += 1
level8_new = normalized_betas[layer][0][2] * level8_new_1 + normalized_betas[layer][1][1] * level8_new_2
level16_new, = self.cells[count] (None,
self.level_8[-1],
None,
None,
normalized_alphas_d)
level16_new = normalized_betas[layer][1][2] * level16_new
count += 1
self.level_4.append (level4_new)
self.level_8.append (level8_new)
self.level_16.append (level16_new)
elif layer == 2 :
level4_new_1, level4_new_2 = self.cells[count] (self.level_4[-2],
None,
self.level_4[-1],
self.level_8[-1],
normalized_alphas_d)
count += 1
level4_new = normalized_betas[layer][0][1] * level4_new_1 + normalized_betas[layer][1][0] * level4_new_2
level8_new_1, level8_new_2, level8_new_3 = self.cells[count] (self.level_8[-2],
self.level_4[-1],
self.level_8[-1],
self.level_16[-1],
normalized_alphas_d)
count += 1
level8_new | |
# Copyright 2020-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Count remaining (non-zero) weights in the encoder (i.e. the transformer layers).
Sparsity and remaining weights levels are equivalent: sparsity % = 100 - remaining weights %.
"""
import click, click_log
import torch
import shutil
import json
import sh
from pathlib import Path
from transformers import BertForQuestionAnswering, TFBertForQuestionAnswering, BertConfig
from block_movement_pruning.emmental.modules import MaskedLinear
from block_movement_pruning.emmental.utils import read_info, check_is_ampere_sparse, nnz_blocks_count
import jinja2
import logging
logger = logging.getLogger(__name__)
click_log.basic_config(logger)
class ModelPostProcessor:
def __init__(self, path):
self.path = Path(path).resolve()
def add_parameter(self, name, original_parameter, parameter, is_linear_layer_weight):
raise NotImplementedError("Please implement add_parameter in your ModelPostProcessor subclass")
def finish(self):
raise NotImplementedError("Please implement finish in your ModelPostProcessor subclass")
def run(self):
st = torch.load(self.path / "pytorch_model.bin", map_location="cuda")
logging.info("name".ljust(60, " "), "Remaining Weights %", "Remaining Weight")
for name, param in st.items():
if "encoder" not in name:
self.add_parameter(name, param, None, is_linear_layer_weight=False)
continue
if name.endswith(".weight") and "LayerNorm" not in name:
weights = MaskedLinear.masked_weights_from_state_dict(st, name, **self.mask_args)
self.add_parameter(name, param, weights, is_linear_layer_weight=True)
elif MaskedLinear.check_name(name):
pass
else:
self.add_parameter(name, param, None, is_linear_layer_weight=False)
return self.finish()
class ParameterCounter(ModelPostProcessor):
def __init__(self, path):
super().__init__(path)
self.info = read_info(self.path)
self.config = self.info["config"]
self.args = self.info["args"]
self.mask_args = self.masked_weights_args(self.args)
self.ampere_enabled = self.mask_args["ampere_pruning_method"] != "disabled"
self.mask_block_rows, self.mask_block_cols = self.args["mask_block_rows"], self.args["mask_block_cols"]
self.block_sparse_enabled = self.mask_block_rows > 1 or self.mask_block_cols > 1
self.is_ampere_valid = True
self.nnz_total = 0
self.blocks_total = 0
self.remaining_count = 0 # Number of remaining (not pruned) params in the encoder
self.encoder_count = 0 # Number of params in the encoder
def masked_weights_args(self, args):
ret = dict(pruning_method=args["pruning_method"],
pruning_submethod=args.get("pruning_submethod", "default"),
threshold=args["final_threshold"],
ampere_pruning_method=args["ampere_pruning_method"],
mask_block_rows=args["mask_block_rows"],
mask_block_cols=args["mask_block_cols"],
rows_shuffle_r=None, # TODO : some methods requires
cols_shuffle_r=None, # TODO
kind=None # TODO
)
return ret
def add_parameter(self, name, original_parameter, parameter, is_linear_layer_weight):
if is_linear_layer_weight:
if self.ampere_enabled:
if not check_is_ampere_sparse(parameter.t()):
logger.debug(f"{name} is not ampere sparse")
self.is_ampere_valid = False
if self.block_sparse_enabled:
nnz, blocks = nnz_blocks_count(parameter, self.mask_block_rows, self.mask_block_cols)
self.nnz_total += nnz
self.blocks_total += blocks
logger.debug(f"{name} block density {nnz}/{blocks}={nnz / blocks}")
mask_ones = (parameter != 0).sum().item()
logger.debug(name.ljust(60, " "), str(round(100 * mask_ones / original_parameter.numel(), 3)).ljust(20, " "),
str(mask_ones), str(original_parameter.numel()))
self.remaining_count += mask_ones
self.encoder_count += original_parameter.numel()
else:
logger.debug(f"Adding {name}: {original_parameter.numel()}")
self.remaining_count += original_parameter.numel()
self.encoder_count += original_parameter.numel()
def finish(self):
ret = {}
ret["parameters"] = self.encoder_count
ret["nnz_parameters"] = self.remaining_count
ret["global_density"] = self.remaining_count / self.encoder_count
ret["ampere"] = self.ampere_enabled
if self.ampere_enabled:
ret["is_ampere_valid"] = self.is_ampere_valid
ret["block_sparse"] = self.block_sparse_enabled
if self.block_sparse_enabled:
nnz_total = self.nnz_total
blocks_total = self.blocks_total
total_block_density = nnz_total / blocks_total
ret["block_sparse_nnz"] = nnz_total
ret["block_sparse_total"] = blocks_total
ret["block_sparse_density"] = total_block_density
threshold = self.mask_args['threshold']
if abs(total_block_density - threshold) > 0.001:
if self.mask_args["pruning_method"] == "sigmoied_threshold":
ret["is_block_sparse_valid"] = True
else:
ret["is_block_sparse_valid"] = False
logger.debug(
f"Total block density {nnz_total}/{blocks_total}={total_block_density * 100:0.2f}% !!!!=== {threshold * 100}%=threshold")
logger.debug("TOTAL BLOCK SPARSITY VERY DIFFERENT FROM THRESHOLD")
else:
ret["is_block_sparse_valid"] = True
logger.debug(
f"Total block density {nnz_total}/{blocks_total}={total_block_density * 100:0.2f}% ~ {threshold * 100}%=threshold")
self.info["check_report"] = ret
self.info["original_path"] = str(self.path)
return self.info
class Normalizer(ParameterCounter):
def __init__(self, path, dest_path):
super().__init__(path)
self.dest_path = Path(dest_path).resolve()
if self.dest_path is None:
self.dest_path = self.path.parent / f"normalized_{self.path.name}"
self.pruned_model = {}
def add_parameter(self, name, original_parameter, parameter, is_linear_layer_weight):
super().add_parameter(name=name,
original_parameter = original_parameter,
parameter=parameter,
is_linear_layer_weight=is_linear_layer_weight)
if is_linear_layer_weight:
self.pruned_model[name] = parameter
else:
self.pruned_model[name] = original_parameter
def finish(self):
report = super().finish()
if not self.dest_path.exists():
shutil.copytree(self.path, self.dest_path)
logger.debug(f"\nCreated folder {self.dest_path}")
logger.debug(f"Saving to {self.dest_path}")
torch.save(self.pruned_model, self.dest_path / "pytorch_model.bin")
s = json.dumps(report)
with (self.dest_path / "report.json").open("w") as report_file:
report_file.write(s)
return report
class BestCheckpointExtractor():
def __init__(self, input_dir, output_dir, minf1 = 70):
self.input_dir = Path(input_dir).resolve()
self.output_dir = Path(output_dir).resolve()
self.minf1 = minf1
def run(self):
maxf1 = 0
best_checkpoint_dir = None
for p in os.listdir(self.input_dir):
if p.startswith("checkpoint-"):
checkpoint_dir = self.input_dir / p
report = read_info(checkpoint_dir)
if report["result"] is None:
continue
f1 = report["result"]["f1"]
if f1 > self.minf1:
if maxf1 < f1:
maxf1 = f1
best_checkpoint_dir = checkpoint_dir
if best_checkpoint_dir is not None:
dest = self.output_dir / self.input_dir.name
if not dest.exists():
shutil.copytree(best_checkpoint_dir, dest, dirs_exist_ok=False)
with (dest / "original_directory.txt").open("w") as f:
f.write(str(self.input_dir) + "\n")
class Evaluator():
def __init__(self, path, patch_model):
self.path = Path(path).resolve()
self.patch_model = patch_model
def run(self):
path = str(self.path)
command = ['masked_run_squad.py',
'--model_type',
'bert',
'--model_name_or_path',
path,
'--tokenizer_name',
'bert-base-uncased',
'--output_dir',
path,
'--data_dir',
'squad_data',
'--predict_file',
'dev-v1.1.json',
'--do_eval',
'--do_lower_case',
'--per_gpu_eval_batch_size',
'16',
'--max_seq_length',
'384',
'--doc_stride',
'128']
if self.patch_model:
command.append("--patch_model_for_eval")
sh.python(command, _fg=True)
class PackagerException(Exception):
pass
class BadF1ModelException(PackagerException):
pass
import contextlib
import os
@contextlib.contextmanager
def cd(path):
old_path = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_path)
class Packager:
def __init__(self, src_path, git_base_path, minimal_f1 = 80):
self.src_path = Path(src_path).resolve()
self.git_base_path = Path(git_base_path).resolve()
self.version = 1
self.minimal_f1 = minimal_f1
def load_info(self):
self.info_from_files = read_info(self.src_path)
with (self.src_path / "report.json").open("r") as f:
self.report = self.rewrite_report(json.loads(f.read()))
self.model_owner_name = "madlag" # sh.transformers_cli("whoami").split("\n")[0]
self.model_name = self.build_model_name()
print(self.model_name)
def rewrite_report(self, report):
del report["args"]["output_dir"]
del report["args"]["device"]
del report["original_path"]
f1_ref = self.info_from_files["result"]["f1"]
exact_ref = self.info_from_files["result"]["exact"]
f1 = report["result"]["f1"]
exact = report["result"]["exact"]
for renaming in ("check_report", "sparsity"), ("result", "precision"):
report[renaming[1]] = report[renaming[0]]
del report[renaming[0]]
assert (abs(f1 - f1_ref) < 1e-5)
assert (abs(exact - exact_ref) < 1e-5)
performance = {}
for suffix, type in ("", "dense"), ("_patched", "pytorch_block_sparse"):
e = json.loads((self.src_path / f"evaluation_timings_{suffix}.json").open().read())
performance[type] = e
for k in performance:
performance[k]["eval_elapsed_time"] = performance[k]["elapsed_time"]
del performance[k]["elapsed_time"]
performance["speedup"] = performance["dense"]["eval_elapsed_time"] / performance["pytorch_block_sparse"][
"eval_elapsed_time"]
report["performance"] = performance
report["sparsity"]["block_size"] = (
report["config"]["mask_block_rows"], report["config"]["mask_block_cols"])
return report
def build_model_name(self):
is_ampere = self.report["config"]["ampere_pruning_method"] == "annealing"
density = self.report["sparsity"]["block_sparse_density"]
if is_ampere:
density /= 2
name = f"bert-base-uncased-squad1.1-block-sparse-{density:.2f}"
if is_ampere:
name += "-ampere"
name += f"-v{self.version}"
return name
def check(self):
f1 = self.report["precision"]["f1"]
if f1 < self.minimal_f1:
raise BadF1ModelException(f"Model {self.model_name} f1={f1} does not reach given minimal f1={self.minimal_f1}")
else:
return True
def create_git(self):
git_path = self.git_base_path / self.model_owner_name / self.model_name
if not git_path.parent.exists():
git_path.parent.mkdir(parents=True)
if not git_path.exists():
sh.transformers_cli("repo", "create", "-y", f"{self.model_name}")
with cd(git_path.parent):
sh.git("clone", f"https://huggingface.co/{self.model_owner_name}/{self.model_name}")
return git_path
def create_readme(self):
model_card_path = Path("model_card")
(self.git_path / model_card_path).mkdir(exist_ok=True)
images = {}
images["pruning_image"] = "pruning.svg"
model_card_base_url = f"https://huggingface.co/{self.model_owner_name}/{self.model_name}/raw/main/model_card"
urlbase = f"{model_card_base_url}/layer_images"
js_path = f"/{self.model_owner_name}/{self.model_name}/raw/main/model_card/density.js"
if True:
from block_movement_pruning.model_card_graphics import PruningInfoPlotter, DensityPlotter
p = PruningInfoPlotter(self.report["sparsity"]["pruned_heads"], self.report["config"]["num_attention_heads"])
p.plot()
# TEMPORARY
model = BertForQuestionAnswering.from_pretrained(self.src_path)
dp = DensityPlotter(model, self.git_path / model_card_path / "layer_images", url_base=urlbase, js_path=js_path)
dp.plot()
js, density_html = dp.get_html()
(self.git_path / model_card_path / "density.js").open("w").write(js)
p.save_image(str(self.git_path / model_card_path / images["pruning_image"]))
pruned_heads_graphic = p.get_html()
else:
density_html = ""
template_file = Path(__file__).parent / "README_MODEL.md.jinja"
template = jinja2.Template(template_file.open().read())
ret = template.render(**self.report, images=images, burl=model_card_base_url, density_html=density_html)
with (self.git_path / "README.md").open("w") as readme_file:
readme_file.write(ret)
def copy_model_files(self):
modified = False
from pytorch_block_sparse.util import BertHeadsPruner
if not (self.git_path / "tf_model.h5").exists():
tf_model = TFBertForQuestionAnswering.from_pretrained(self.src_path, from_pt=True)
tf_model.save_pretrained(self.git_path)
modified = True
devel = True
if not (self.git_path / "pytorch_model.bin").exists() or devel:
model = BertForQuestionAnswering.from_pretrained(self.src_path)
to_prune, head_count = BertHeadsPruner(model).get_pruned_heads()
model.prune_heads(to_prune)
config = model.config
config.pruned_heads = to_prune
self.report["sparsity"]["pruned_heads"] = to_prune
config.block_size = [config.mask_block_rows, config.mask_block_cols]
KEYS_TO_DELETE = ["pruning_submethod", "shuffling_method", "in_shuffling_group", "out_shuffling_group"]
KEYS_TO_DELETE += ["ampere_mask_init", "ampere_pruning_method", "ampere_mask_scale", "mask_init", "mask_scale"]
KEYS_TO_DELETE += ["pruning_method", "mask_block_rows", "mask_block_cols", "gradient_checkpointing"]
KEYS_TO_DELETE += ["initializer_range", "intermediate_size", "hidden_dropout_prob", "layer_norm_eps"]
for key in KEYS_TO_DELETE:
delattr(config, key)
config.architectures = ["BertForQuestionAnswering"]
config.name_or_path = f"{self.model_owner_name}/{self.model_name}"
model.save_pretrained(self.git_path)
modified = True
self.report["sparsity"]["total_pruned_attention_heads"] = sum([len(t) for t in to_prune.values()])
self.report["sparsity"]["total_attention_heads"] = self.report["config"]["num_attention_heads"] * self.report["config"]["num_hidden_layers"]
self.report["packaging"] = {}
self.report["packaging"]["pytorch_final_file_size"] = os.stat(self.git_path / "pytorch_model.bin").st_size
self.report["packaging"]["model_owner"] = self.model_owner_name
self.report["packaging"]["model_name"] = f"{self.model_owner_name}/{self.model_name}"
#PRODUCED_PATHES = ["dev-v1.1.json", "nbest_predictions_.json", "predictions_.json"]
FILES = "special_tokens_map.json", "tokenizer_config.json", "vocab.txt" #, "report.json"
for file in FILES:
if not (self.git_path / file).exists():
shutil.copyfile(self.src_path / file, self.git_path / file)
modified = True
if not (self.git_path / "model_meta.json").exists() or devel:
with (self.git_path / "model_meta.json").open("w") as file:
report_string = pretty_json(self.report)
file.write(report_string)
else:
self.report = json.loads((self.git_path / "model_meta.json").open().read())
return modified
def add_files(self):
# "pytorch_model.bin", "tf_model.h5"
files = ["config.json", "special_tokens_map.json", "tokenizer_config.json", "vocab.txt"]
files += ["README.md", "model_meta.json", "model_card/pruning.svg", "model_card/layer_images", "model_card/density.js"]
with cd(self.git_path):
sh.git("add", *files, _fg=True)
def commit(self):
with cd(self.git_path):
#sh.git("status", _fg=True)
sh.git("commit", "-m", "Adding modes, graphs and metadata.", _fg=True)
def push(self):
with cd(self.git_path):
sh.git("status", _fg=True)
sh.git("push", _fg=True)
def test(self):
# Download the model and do some basic stuff
pass
def run(self):
"https://huggingface.co/madlag/bert-base-uncased-squad-v1-sparse0.25/raw/main/config.json"
self.load_info()
self.check()
self.git_path = self.create_git()
self.copy_model_files()
self.create_readme()
#if modified:
self.add_files()
self.commit()
self.push()
@click.group()
@click_log.simple_verbosity_option(logger, default="INFO")
@click.pass_context
def cli(ctx):
ctx.obj = {}
import sys
indent = 4
if sys.version_info.major == | |
+ range(0xD66, 0xD6F+1)
+ range(0xE50, 0xE59+1) + range(0xED0, 0xED9+1)
+ range(0xF20, 0xF29+1))
is_digit_char = _make_set_predicate(DIGIT_CHARS)
EXTENDING_CHARS = tuple(
[0xB7, 0x2D0, 0x2D1, 0x387, 0x640, 0xE46, 0xEC6, 0x3005]
+ range(0x3031, 0x3035+1) + range(0x309D, 0x309E+1)
+ range(0x30FC, 0x30FE+1))
is_extending_char = _make_set_predicate(EXTENDING_CHARS)
IDEOGRAPHIC_CHARS = tuple(
range(0x4E00, 0x9FA5+1) + range(0x3021, 0x3029+1))
is_ideographic_char = _make_set_predicate(IDEOGRAPHIC_CHARS)
LETTER_CHARS = BASE_CHARS + IDEOGRAPHIC_CHARS
is_letter_char = _make_set_predicate(LETTER_CHARS)
NAME_CHARS = LETTER_CHARS + DIGIT_CHARS + (46, 45, 95, 58) \
+ COMBINING_CHARS + EXTENDING_CHARS
is_name_char = _make_set_predicate(NAME_CHARS)
del _make_set_predicate
def parse_Name(self, where):
s, u = self.get_char_and_unicode()
if not self.is_name_char(u):
raise ParseError("illegal character in name: %s (%d)" % (`s`, u))
i = 1
while 1:
c, u = self.get_char_and_unicode(i)
if u not in self.NAME_CHARS:
break
i = i + 1
s = s + c
self.discard_chars(i)
return s
def parse_ExternalID(self):
str = self.get_ascii(6)
if str == "PUBLIC":
# public system id w/ optional system id
self.discard_chars(len(str))
self.require_whitespace("ExternalID")
id = self.get_quoted_string()
if not id:
raise ParseError("could not parse doctype declaration:"
" bad public id")
self.values.public_id = id
self.require_whitespace("ExternalID")
self.values.system_id = self.get_quoted_string()
elif str == "SYSTEM":
# system id
self.discard_chars(len(str))
self.require_whitespace("ExternalID")
id = self.get_quoted_string()
if not id:
raise ParseError("could not parse doctype declaration:"
" bad system id")
self.values.system_id = id
else:
raise ParseError("illegal external ID")
def get_quoted_string(self):
c, u = self.get_char_and_unicode()
if u not in (34, 39):
raise ParseError("illegal quoted string")
self.discard_chars(1)
quote_mark = u
s = ''
while 1:
c, u = self.get_char_and_unicode()
if not c:
raise ParseError("could not find end of quoted string")
self.discard_chars(1)
if u == quote_mark:
break
s = s + c
return s
def skip_comment(self):
self.require_ascii("<!--", "comment")
self.skip_past_ascii("-->", "comment")
def skip_pi(self):
self.require_ascii("<?", "processing instruction")
self.skip_past_ascii("?>", "processing instruction")
def skip_to_doctype(self):
# This should probably be implemented by any extractor for which we
# care about performance.
while 1:
self.skip_whitespace()
try:
c = self.get_ascii(1)
except ConversionError:
self.discard_chars(1)
else:
if not c:
break
if c == "<":
# might be something interesting
try:
prefix = self.get_ascii(4)
except ConversionError:
# If this fails, assume there's something non-white in
# there; allow the exception to be raised since there's
# probably illegal data before the document element.
prefix = self.get_ascii(2)
if prefix == "<!--":
self.skip_comment()
elif prefix[:2] == "<?":
self.skip_pi()
else:
break
else:
# way bad!
raise ParseError("could not locate doctype declaration"
" or start of document element")
def skip_whitespace(self):
"""Trim leading whitespace, returning the number of characters
stripped.
The default implementation is slow; subclasses should override it.
"""
count = 0
try:
while 1:
c, u = self.get_char_and_unicode(count)
if not c:
break
if u not in (0x9, 0xA, 0xD, 0x20):
break
count = count + 1
except ConversionError:
pass
if count:
self.discard_chars(count)
return count
def require_whitespace(self, where):
"""Trim leading whitespace, returning the number of characters
stripped or raising ParseError is no whitespace was present."""
numchars = self.skip_whitespace()
if not numchars:
raise ParseError("required whitespace in " + where)
def get_ascii(self, count):
raise NotImplementedError
def get_char_and_unicode(self, index=0):
raise NotImplementedError
def require_ascii(self, str, where):
width = len(str)
data = self.get_ascii(width)
if data != str:
raise ParseError("required text '%s' missing in %s" % (str, where))
self.discard_chars(width)
def skip_past_ascii(self, str, what):
width = len(str)
initchar = str[0]
subs = range(1, width)
while 1:
try:
data = self.get_ascii(width)
except ConversionError:
self.discard_chars(1)
else:
if len(data) < width:
raise ParseError("could not locate end of " + what)
if data == str:
self.discard_chars(width)
return
for i in subs:
if data[i] == initchar:
self.discard_chars(i)
else:
self.discard_chars(width)
def discard_chars(self, count):
raise NotImplementedError
class ISO8859Extractor(Extractor):
__declattr_rx = re.compile(
"([a-z]*)=\"((?:[^?\"]|\?[^?>\"]|\?(?=\?))*)\"", re.MULTILINE)
__gi_rx = re.compile("[a-zA-Z_:][-a-zA-Z_:0-9.]*")
__id_rx = re.compile(r"""(?:'[^']*'|\"[^\"]*\")""",
re.MULTILINE | re.VERBOSE)
def yank_id(self):
self.require_whitespace("doctype declaration: ExternalID")
m = self.__id_rx.match(self.buffer)
if not m:
return None
self.buffer = self.buffer[m.end():]
return string.lstrip(m.group())[1:-1]
def parse_doctype(self):
self.require_ascii("<!DOCTYPE", "doctype declaration")
self.require_whitespace("doctype declaration")
m = self.__gi_rx.match(self.buffer)
if not m:
raise ParseError("could not parse doctype declaration: no name")
self.values.doc_elem = m.group()
self.discard_chars(len(self.values.doc_elem))
whitechars = self.skip_whitespace()
if not self.buffer:
raise ParseError("could not parse doctype declaration:"
" insufficient data")
if self.get_ascii(1) in ">[":
# reached internal subset or end of declaration; we're done
return
if not whitechars:
raise ParseError("whitespace required between document type and"
" document type declaration")
self.parse_ExternalID()
def skip_to_doctype(self):
while self.buffer:
self.buffer = string.lstrip(self.buffer)
if self.buffer[:4] == "<!--":
self.skip_comment()
elif self.buffer[:2] == "<?":
self.skip_pi()
else:
break
def skip_pi(self):
pos = string.find(self.buffer, "?>", 2)
if pos < 0:
raise ParseError("could not scan over processing instruction")
self.buffer = self.buffer[pos + 2:]
def skip_comment(self):
pos = string.find(self.buffer, "-->", 4)
if pos < 0:
raise ParseError("could not scan over comment")
self.buffer = self.buffer[pos + 4:]
def skip_whitespace(self):
old_buffer = self.buffer
self.buffer = string.lstrip(old_buffer)
return len(old_buffer) - len(self.buffer)
def get_ascii(self, count):
# not quite right, but good enough for now
return self.buffer[:count]
def get_char_and_unicode(self, index=0):
# really only good for iso-8859-1
c = self.buffer[index:index + 1]
if c:
return c, ord(c)
else:
return c, None
def discard_chars(self, count):
self.buffer = self.buffer[count:]
def lower(self, str):
return string.lower(str)
class ISO8859_1_Extractor(ISO8859Extractor):
Encodings = ("iso-8859-1", "iso-latin-1", "latin-1")
def get_ascii(self, count):
return self.buffer[:count]
def get_char_and_unicode(self, index=0):
c = self.buffer[index:index + 1]
if c:
return c, ord(c)
else:
return c, None
add_extractor_class(ISO8859_1_Extractor)
for c in "23456789":
class _Extractor(ISO8859Extractor):
Encodings = ("iso-8859-" + c,)
try:
_Extractor.__name__ = "ISO8859_%s_Extractor" % c
except TypeError:
# older Python versions wouldn't allow __name__ to be set on a class
pass
exec "ISO8859_%s_Extractor = _Extractor" % c
add_extractor_class(_Extractor)
del _Extractor
class UTF8Extractor(ISO8859Extractor):
Encodings = ("utf-8",)
def get_char_and_unicode(self, index=0):
raise NotImplementedError
add_extractor_class(UTF8Extractor)
class EBCDICExtractor(Extractor):
Encodings = ("ebcdic",)
# This table was taken from the source code of GNU recode 3.4.
__ASCII_TO_EBCDIC = [
0, 1, 2, 3, 55, 45, 46, 47, # 0 - 7
22, 5, 37, 11, 12, 13, 14, 15, # 8 - 15
16, 17, 18, 19, 60, 61, 50, 38, # 16 - 23
24, 25, 63, 39, 28, 29, 30, 31, # 24 - 31
64, 79, 127, 123, 91, 108, 80, 125, # 32 - 39
77, 93, 92, 78, 107, 96, 75, 97, # 40 - 47
240, 241, 242, 243, 244, 245, 246, 247, # 48 - 55
248, 249, 122, 94, 76, 126, 110, 111, # 56 - 63
124, 193, 194, 195, 196, 197, 198, 199, # 64 - 71
200, 201, 209, 210, 211, 212, 213, 214, # 72 - 79
215, 216, 217, 226, 227, 228, 229, 230, # 80 - 87
231, 232, 233, 74, 224, 90, 95, 109, # 88 - 95
121, 129, 130, 131, 132, 133, 134, 135, # 96 - 103
136, 137, 145, 146, 147, 148, 149, 150, # 104 - 111
151, 152, 153, 162, 163, 164, 165, 166, # 112 - 119
167, 168, 169, 192, 106, 208, 161, 7, # 120 - 127
32, 33, 34, 35, 36, 21, 6, 23, # 128 - 135
40, 41, 42, 43, 44, 9, 10, 27, # 136 - 143
48, 49, 26, 51, 52, 53, 54, 8, # 144 - 151
56, 57, 58, 59, 4, 20, 62, 225, # 152 - 159
65, 66, 67, 68, 69, 70, 71, 72, # 160 - 167
73, 81, 82, 83, 84, 85, 86, 87, # 168 - 175
88, 89, 98, 99, 100, 101, 102, 103, # 176 - 183
104, 105, 112, 113, 114, 115, 116, 117, # 184 - 191
118, 119, 120, 128, 138, 139, 140, 141, # 192 - 199
142, 143, 144, 154, 155, 156, 157, 158, # 200 - 207
159, 160, 170, 171, 172, 173, 174, 175, # 208 - 215
176, 177, 178, 179, 180, 181, 182, 183, # 216 - 223
184, 185, 186, 187, 188, 189, 190, 191, # 224 - 231
202, 203, 204, 205, 206, 207, 218, 219, # 232 - 239
220, 221, 222, 223, 234, 235, 236, 237, # 240 - 247
238, 239, 250, 251, 252, 253, 254, 255, # 248 - 255
]
_m = [None] * 256
for _i in range(len(__ASCII_TO_EBCDIC)):
_e = __ASCII_TO_EBCDIC[_i]
__ASCII_TO_EBCDIC[_i] | |
#!/usr/bin/env python
# coding: utf-8
# In[24]:
import json
import random
import numpy as np
from pathlib import Path
from typing import Tuple, List
import math
import matplotlib.pyplot as plt
#get_ipython().run_line_magic('matplotlib', 'inline')
from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, LSTM, concatenate, GlobalMaxPooling2D, RepeatVector, GlobalAveragePooling2D, Dropout, TimeDistributed
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.applications import vgg16
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications import mobilenet_v2
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau, EarlyStopping
from tensorflow.keras.utils import plot_model, Sequence, to_categorical, multi_gpu_model
from livelossplot.tf_keras import PlotLossesCallback
# from tensorboardcolab import *
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
tf.keras.backend.set_session(session)
import platform
print('Python', platform.python_version())
# In[25]:
import tensorflow
tensorflow.__version__
# In[26]:
COMMANDS = ['left', 'get_left_lane', 'keep_lane', 'straight', 'get_right_lane', 'right']
def command_to_onehot(command: str) -> np.array:
"""keep_lane -> [0. 0. 1. 0. 0. 0.]"""
all_zeros = np.zeros(len(COMMANDS))
all_zeros[COMMANDS.index(command)] = 1
return all_zeros
def onehot_to_command(onehot: np.array):
"""[0. 0. 1. 0. 0. 0.] -> keep_lane"""
return COMMANDS[np.argmax(onehot)]
def load_image(path: Path):
height, width, channels = 224, 224, 3
image = load_img(str(path), target_size=(height, width, channels))
return image
def load_preprocessed_image(path: Path, preprocessor='mobilenet_v2') -> np.ndarray:
"""preprocessor: mobilenet_v2 or vgg16"""
image = load_image(path)
image_arr = img_to_array(image)
height, width, channels = image_arr.shape
image_reshaped = image_arr.reshape(height, width, channels)
if preprocessor == 'mobilenet_v2':
image_preprocessed = mobilenet_v2.preprocess_input(image_reshaped)
else:
image_preprocessed = vgg16.preprocess_input(image_reshaped)
return image_preprocessed
def load_json(path: Path) -> Tuple[np.ndarray, str]:
with path.open() as f:
data = json.load(f)
return data
def get_spline(data: dict) -> np.array:
"""Spline is reshaped from (7,2) -> (14, 1).
1D np.array (first 7 distance values, then 7 angle values)
"""
waypoints = data['spline']
relative_distance = lambda waypoint: waypoint[0]
relative_angle = lambda waypoint: waypoint[1]
distances = [relative_distance(wp) for wp in waypoints]
angles = [relative_angle(wp) for wp in waypoints]
# normalized_dist = lambda dist: (dist - STATS['min_dist']) / (STATS['max_dist'] - STATS['min_dist'])
# normalized_angle = lambda angle: (angle - STATS['min_angle']) / (STATS['max_angle'] - STATS['min_angle'])
# distances = [normalized_dist(dist) for dist, _ in waypoints]
# angles = [normalized_angle(angle) for _, angle in waypoints]
return np.array(distances + angles)
def get_command_input(data: dict):
return command_to_onehot(data['command'])
# def (path: Path) -> Tuple[np.ndarray, str]:
# steering_angle = data.get('angle')
# is_intersection = data.get('intersection_ahead')
# return , command_onehot, steering_angle, is_intersection
#------------------------------------------------------------------------
# Visualization
def rel_point(point, angle, length):
'''
point - Tuple (x, y)
angle - Angle (OY perspective, not OX!)
length - Length of the line you want to plot.
'''
# unpack the first point
x, y = point
# find the end point
endx = length * math.sin(math.radians(angle)) + x
endy = length * math.cos(math.radians(angle)) + y
return endx, endy
def plottable_spline(spline: np.array, shift_by: Tuple[float, float] = (200, 112), scale: float = 5) -> Tuple[np.array, np.array]:
"""Transform 1D array into points that can be easily visualized with plt.plot(xs, ys)."""
vertical_shift, horizontal_shift = shift_by
xs, ys = [], []
last_point = (0, 0)
distances_arr = true_dist(spline[:7])
angles_arr = true_angle(spline[7:14])
for rel_distance, rel_angle in zip(distances_arr, angles_arr):
x, y = rel_point(last_point, rel_angle, rel_distance)
xs.append(x)
ys.append(y)
last_point = (x, y)
xs = np.array(xs)
ys = np.array(ys)
xs = xs * scale + horizontal_shift
ys = vertical_shift - ys * scale
return xs, ys
# In[27]:
TRAIN_DATASET_DIRS = [
Path('/home/bwroblew/Datasets/waypoint_predition_combined/train_dataset1+2'),
Path('/home/bwroblew/Datasets/waypoint_predition_combined/train_dataset3')
]
TEST_DATASET_DIRS = [
Path('/home/bwroblew/Datasets/waypoint_predition_combined/test_dataset1')
]
# In[28]:
def paths(dirs, pattern):
together = []
for directory in dirs:
together += list(directory.glob(pattern))
return together
img_train = paths(TRAIN_DATASET_DIRS, '*.png')
json_train = paths(TRAIN_DATASET_DIRS, '*.json')
assert len(img_train) == len(json_train)
print(f'{len(img_train)} images found in {TRAIN_DATASET_DIRS}')
img_test = paths(TEST_DATASET_DIRS, '*.png')
json_test = paths(TEST_DATASET_DIRS, '*.json')
assert len(img_test) == len(json_test)
print(f'{len(img_test)} images found in {TEST_DATASET_DIRS}')
print(f'{len(img_train) / (len(img_train)+len(img_test)) * 100}% is training data')
# In[29]:
def calculate_spline_stats(json_paths):
all_distances = []
all_angles = []
for idx, json_path in enumerate(json_paths):
data = load_json(json_path)
waypoints = data['spline']
distances = [dist for dist, _ in waypoints]
angles = [angle for _, angle in waypoints]
all_distances += distances
all_angles += angles
# print(f'{idx}/{len(json_paths)}')
assert len(all_distances) == len(all_angles)
plt.hist(all_distances, bins=100)
plt.show()
plt.hist(all_angles, bins=200)
plt.show()
return dict(
mean_dist = sum(all_distances) / len(all_distances),
mean_angle = sum(all_angles) / len(all_angles),
min_dist = min(all_distances),
min_angle = min(all_angles),
max_dist = max(all_distances),
max_angle = max(all_angles),
)
STATS = calculate_spline_stats(json_train + json_test)
print('OSZUSTWO NORMALIZACJI')
STATS['min_angle'] = -100
STATS['max_angle'] = 100
STATS['max_dist'] = 10
print(json.dumps(STATS, indent=4))
# In[30]:
def true_dist(normalized_distances: np.array):
denominator = STATS['max_dist'] - STATS['min_dist']
return STATS['min_dist'] + (normalized_distances * denominator)
def true_angle(normalized_angles: np.array):
denominator = STATS['max_angle'] - STATS['min_angle']
return STATS['min_angle'] + (normalized_angles * denominator)
# In[31]:
# Fetch all images and json files in sequence
def visualize_dataset_sample(img_path: Path, predicted_spline=None, last_row_only: bool=False):
json_path = img_path.parent / f'{img_path.stem}.json'
# Load original image (without preprocessing)
original_image = load_image(img_path)
# Load spline and command
data = load_json(json_path)
flatten_spline = get_spline(data)
command = data.get('command')
steering_angle = data.get('angle')
intersection_ahead = data.get('intersection_ahead')
# Display image with command
plt.title(f'Original [{command}]')
plt.imshow(original_image)
# Overlay with spline
xs, ys = plottable_spline(flatten_spline)
plt.plot(xs, ys, '.-r')
if predicted_spline is not None:
xs, ys = plottable_spline(predicted_spline)
plt.plot(xs, ys, '.-b')
# Overlay is intersection
plt.text(224/2, 220, "Intersection: YES" if intersection_ahead else "Intersection: NO", color='w')
# Overlay with steering angle
plt.barh(200, steering_angle*100)
# if predicted_spline is not None:
# last_frame_ax = axes[-1]
# xs, ys = plottable_spline(predicted_spline)
# last_frame_ax.plot(xs, ys, '.-b')
plt.show()
# save_dir = '/content/gdrive/My Drive/plots/'
# Path(save_dir).mkdir(exist_ok=True, parents=True)
# plot_path = f'{save_dir}/}.png'
# plt.savefig(plot_path)
#print('test data')
#start_idx = random.randint(0, len(img_test))
#sample_img_paths = img_test[start_idx:start_idx+50]
#for path in sample_img_paths:
# visualize_dataset_sample(path, predicted_spline=None, last_row_only=True)
# In[32]:
# # Train data
# sequences_dir = [path for path in Path(train_dataset_dir).iterdir() if path.is_dir()]
# seq_dir = random.choice(sequences_dir)
# visualize_sequence(gt_dir=seq_dir, predicted_spline=None, last_row_only=True)
def examples_per_command(path: Path):
count = {'left': 0, 'get_left_lane': 0, 'keep_lane': 0, 'straight': 0, 'get_right_lane': 0, 'right': 0}
json_paths = list(path.glob('*.json'))
for json_path in json_paths:
with json_path.open() as f:
data = json.load(f)
count[data['command']] += 1
return count
# In[33]:
# train_balance = examples_per_command(TRAIN_DATASET_DIR)
# test_balance = examples_per_command(TEST_DATASET_DIR)
# print('Train examples: ', json.dumps(train_balance, indent=2))
# print('Test examples: ', json.dumps(test_balance, indent=2))
# In[46]:
def visualize_batch(batch):
"""Visualizes first sample in batch"""
show_last = 10
fig, axes = plt.subplots(2, 5, figsize=(45, 20), sharex='all', sharey='all')
fig.tight_layout()
axes = axes.ravel()
X, Y = batch
imgs = X[0] # Shape: (bs, 224, 224, 3)
cmds = X[1] # Shape: (bs, 6)
splines = Y # Shape: (bs, 14)
# print(imgs.shape)
# print(cmds.shape)
# print(splines.shape)
for idx_in_batch in range(show_last):
ax = axes[idx_in_batch]
# Load preprocessed image
preprocessed_image = imgs[idx_in_batch]
# Load spline and command
command = onehot_to_command(cmds[idx_in_batch])
# Display image with command
ax.title.set_text(f'Original [{idx_in_batch}][{command}]')
# Line below solves this warning: https://stackoverflow.com/questions/49643907/clipping-input-data-to-the-valid-range-for-imshow-with-rgb-data-0-1-for-floa
preprocessed_image = (preprocessed_image * 100).astype(np.uint8)
ax.imshow(preprocessed_image)
# Overlay with spline
xs, ys = plottable_spline(splines[idx_in_batch])
ax.plot(xs, ys, '.-w')
plt.show()
class ImageDatasetGenerator(Sequence):
def __init__(self, dataset_dirs: List[Path], batch_size: int):
"""Load paths"""
self.batch_size = batch_size
self.img_paths = []
for dataset_dir in dataset_dirs:
self.img_paths += [Path(img) for img in dataset_dir.glob('**/*.png')]
def __len__(self):
"""Returns number of batches"""
return len(self.img_paths) // self.batch_size
def __getitem__(self, idx):
"""Prepares and returns shuffled mini-batches"""
batch_range = range(idx * self.batch_size, (idx + 1) * self.batch_size)
batch_of_imgs = []
batch_of_commands = []
batch_of_splines = []
for img_id in batch_range:
img_path = self.img_paths[img_id]
json_path = img_path.parent / f'{img_path.stem}.json'
data = load_json(json_path)
# X1 => Image
batch_of_imgs.append(load_preprocessed_image(img_path))
# X2 => Command
command_one_hot = get_command_input(data)
batch_of_commands.append(command_one_hot)
# Y => Expected sequence of splines
spline = get_spline(data)
batch_of_splines.append(spline)
# Prepare randomized indexes for shuffling mini-batches
indices = np.arange(self.batch_size)
np.random.shuffle(indices)
# Convert to numpy array and shuffle each batch in the same way
batch_of_imgs = np.array(batch_of_imgs)[indices]
batch_of_commands = np.array(batch_of_commands)[indices]
batch_of_splines = np.array(batch_of_splines)[indices]
# Shape: [(bs, seq_len, 224, 224, 3), (bs, seq_len, cmd_onehot_len)], (bs, seq_len, 14)
return [batch_of_imgs, batch_of_commands], batch_of_splines
#bs = 32
#print('Calculated with bs =', 32)
#ds = ImageDatasetGenerator(TRAIN_DATASET_DIRS, batch_size=bs)
#print('Train batches:', len(ds))
#ds = ImageDatasetGenerator(TEST_DATASET_DIRS, batch_size=bs)
#print('Test batches:', len(ds))
# In[47]:
#random_batch_idx = random.randint(0, len(ds) - 1)
#visualize_batch(ds[random_batch_idx])
# In[48]:
import os
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.python.eager import context
class TrainValTensorBoard(TensorBoard):
def __init__(self, log_dir, **kwargs):
self.val_log_dir = os.path.join(log_dir, 'validation')
training_log_dir = os.path.join(log_dir, 'training')
super(TrainValTensorBoard, self).__init__(training_log_dir, **kwargs)
def set_model(self, model):
if context.executing_eagerly():
self.val_writer = tf.contrib.summary.create_file_writer(self.val_log_dir)
else:
self.val_writer = tf.summary.FileWriter(self.val_log_dir)
super(TrainValTensorBoard, self).set_model(model)
def _write_custom_summaries(self, step, logs=None):
logs = logs or {}
val_logs = {k.replace('val_', ''): v for k, v in logs.items() if 'val_' in k}
if context.executing_eagerly():
with self.val_writer.as_default(), tf.contrib.summary.always_record_summaries():
for name, value in val_logs.items():
tf.contrib.summary.scalar(name, value.item(), step=step)
else:
for name, value in val_logs.items():
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.val_writer.add_summary(summary, step)
self.val_writer.flush()
logs = {k: v for k, v in logs.items() if not 'val_' in k}
super(TrainValTensorBoard, self)._write_custom_summaries(step, logs)
def on_train_end(self, logs=None):
super(TrainValTensorBoard, self).on_train_end(logs)
self.val_writer.close()
# In[49]:
LOSS_FUNCTION_NAME | |
from collections import defaultdict
import random
from math import ceil
import json
import pickle
def general_results():
with open('data/dependencies.p', 'rb') as f:
trees = pickle.load(f)
with open('data/negation_concepts.json', 'r') as f:
annotated_concepts = json.load(f)
conjunct_concepts = NegationDetection.extract_conjunct_concepts(trees, annotated_concepts)
Detector = NegationDetection(linked_concepts=False)
data = Detector.traintest_split(annotated_concepts)
test = data['test']
results = {}
results['forward'], _ = Detector.lookforward_baseline(trees, test, conjunct_concepts)
results['punctuation'], _ = Detector.punctuation_baseline(trees, test, conjunct_concepts)
Detector.linked_concepts = True
results['forward_conj'], _ = Detector.lookforward_baseline(trees, test, conjunct_concepts)
results['punctuation_conj'], _ = Detector.punctuation_baseline(trees, test, conjunct_concepts)
with open('negation_results.json', 'w') as f:
json.dump(results, f)
def cue_level_results():
with open('data/dependencies.p', 'rb') as f:
trees = pickle.load(f)
with open('data/negation_concepts.json', 'r') as f:
annotated_concepts = json.load(f)
conjunct_concepts = NegationDetection.extract_conjunct_concepts(trees, annotated_concepts)
Detector = NegationDetection(linked_concepts=False, cue_level_evaluation=True)
data = Detector.traintest_split(annotated_concepts)
test = data['test']
bundled_results = []
cue_level_results, _ = Detector.punctuation_baseline(trees, test, conjunct_concepts)
for cue, results in cue_level_results.items():
precision, recall = results['positive_precision'], results['positive_recall']
try:
F1 = 2 * ((precision * recall) / (precision + recall))
except:
F1 = 0
bundled_results.append((cue, (precision, recall, F1)))
with open('negation_cue_results.json', 'w') as f:
json.dump(bundled_results, f)
# pretty printing
for cue, res in bundled_results:
print(cue, '&', ' & '.join([str(round(x * 100, 2)) for x in res]), r'\\')
class NegationDetection:
def __init__(self, modality='negation', linked_concepts=True, cue_level_evaluation=False):
self.modality_cues = {'negation': ['no', 'without', 'not']}
assert modality in ['negation', 'speculation']
self.modality = modality
self.linked_concepts = linked_concepts
self.seed = 1993
self.cue_level_evaluation = cue_level_evaluation
def traintest_split(self, annotated_sentences):
# splits annotated sentences in train and test sentences
train_amount = ceil(len(annotated_sentences) / 2)
sentence_ids = sorted(annotated_sentences.keys())
random.seed(self.seed)
train_sentence_ids = random.sample(sentence_ids, train_amount)
train_sentences, test_sentences = {}, {}
for sentence_id, sentence_data in annotated_sentences.items():
if sentence_id in train_sentence_ids:
train_sentences[sentence_id] = sentence_data
else:
test_sentences[sentence_id] = sentence_data
# also output statistics!
numtrain, numtrainmodal = 0, 0
numtest, numtestmodal = 0, 0
for sentence_id, sentence_data in train_sentences.items():
for concept_id, concept_data in sentence_data.items():
numtrain += 1
if concept_data[self.modality]:
numtrainmodal += 1
for sentence_id, sentence_data in test_sentences.items():
for concept_id, concept_data in sentence_data.items():
numtest += 1
if concept_data[self.modality]:
numtestmodal += 1
statistics = {'# of train sentences': len(train_sentences),
'# of test sentences': len(test_sentences),
'# of train concepts': numtrain,
'# of test concepts': numtest,
'# of {} train concepts'.format(self.modality): numtrainmodal,
'# of {} test concepts'.format(self.modality): numtestmodal}
data = {'train': train_sentences,
'test': test_sentences,
'statistics': statistics}
return data
@staticmethod
def evaluate_confusion_matrix(confusion_matrix):
true_pos = len(confusion_matrix['true_pos'])
true_neg = len(confusion_matrix['true_neg'])
false_pos = len(confusion_matrix['false_pos'])
false_neg = len(confusion_matrix['false_neg'])
try:
positive_precision = true_pos / (true_pos + false_pos)
except ZeroDivisionError:
positive_precision = None
try:
positive_recall = true_pos / (true_pos + false_neg)
except ZeroDivisionError:
positive_recall = None
try:
negative_precision = true_neg / (true_neg + false_neg)
except ZeroDivisionError:
negative_precision = None
try:
negative_recall = true_neg / (true_neg + false_pos)
except ZeroDivisionError:
negative_recall = None
try:
accuracy = (true_pos + true_neg) / (true_pos + true_neg + false_pos + false_neg)
except ZeroDivisionError:
accuracy = None
try:
majority_baseline = max(true_pos + false_neg, true_neg + false_pos) / (true_pos + false_neg + true_neg + false_pos)
except ZeroDivisionError:
majority_baseline = None
results = {'accuracy': accuracy,
'majority_baseline': majority_baseline,
'positive_precision': positive_precision,
'positive_recall': positive_recall,
'negative_precision': negative_precision,
'negative_recall': negative_recall}
return results
def evaluate_confusion_matrix_cue_level(self, confusion_matrix):
cue_level_confusion_matrix = defaultdict(lambda: defaultdict(list))
# true pos
for conf in confusion_matrix['true_pos']:
evaluation_data = {'sentence_id': conf['sentence_id'],
'concept_id': conf['concept_id']}
predicted_cues = {predicted_cue.lower() for predicted_cue in conf['predicted_cues']}
for true_cue in conf['true_cues']:
if true_cue in predicted_cues:
cue_level_confusion_matrix[true_cue]['true_pos'].append(evaluation_data)
else:
if true_cue in self.modality_cues[self.modality]:
cue_level_confusion_matrix[true_cue]['false_neg'].append(evaluation_data)
# false pos
for conf in confusion_matrix['false_pos']:
evaluation_data = {'sentence_id': conf['sentence_id'],
'concept_id': conf['concept_id']}
predicted_cues = {predicted_cue.lower() for predicted_cue in conf['predicted_cues']}
for predicted_cue in predicted_cues:
if predicted_cue in self.modality_cues[self.modality]:
cue_level_confusion_matrix[predicted_cue]['false_pos'].append(evaluation_data)
# false neg
for conf in confusion_matrix['false_neg']:
evaluation_data = {'sentence_id': conf['sentence_id'],
'concept_id': conf['concept_id']}
for true_cue in conf['true_cues']:
if true_cue in self.modality_cues[self.modality]:
cue_level_confusion_matrix[true_cue]['false_neg'].append(evaluation_data)
# combine into cue-level results
cue_level_results = {}
for cue in self.modality_cues[self.modality]:
true_pos = len(cue_level_confusion_matrix[cue]['true_pos'])
false_pos = len(cue_level_confusion_matrix[cue]['false_pos'])
false_neg = len(cue_level_confusion_matrix[cue]['false_neg'])
try:
positive_precision = true_pos / (true_pos + false_pos)
except ZeroDivisionError:
positive_precision = None
try:
positive_recall = true_pos / (true_pos + false_neg)
except ZeroDivisionError:
positive_recall = None
results = {'positive_precision': positive_precision,
'positive_recall': positive_recall}
cue_level_results[cue] = results
return cue_level_results, cue_level_confusion_matrix
def fill_confusion_matrix(self, confusion_matrix, sentence_id, sentence_concepts, concept_predictions):
for concept_id, concept_data in sentence_concepts.items():
cue_data = concept_data['cue_data']
true_cues = {cue_id_data['cue'].lower() for cue_id_data in cue_data.values()}
predicted_cues = concept_predictions[concept_id]
# check for matches
evaluation_data = {'sentence_id': sentence_id,
'concept_id': concept_id,
'predicted_cues': predicted_cues,
'true_cues': true_cues}
if concept_data[self.modality]: # if ground truth is modality
if predicted_cues:
confusion_matrix['true_pos'].append(evaluation_data)
else:
confusion_matrix['false_neg'].append(evaluation_data)
else:
if predicted_cues:
confusion_matrix['false_pos'].append(evaluation_data)
else:
confusion_matrix['true_neg'].append(evaluation_data)
def lookforward_baseline(self, trees, annotated_concepts, conjunct_concepts):
confusion_matrix = defaultdict(list)
for sentence_id, sentence_concepts in sorted(annotated_concepts.items()):
tree = trees[sentence_id]
concept_predictions = defaultdict(list)
for token in tree:
if token.form.lower() in self.modality_cues[self.modality]:
cue = token.form.lower()
# assign all following concept_ids negation status
start_index = token.index - 1
for concept_id, concept_data in sentence_concepts.items():
concept_idxs = concept_data['token_idxs']
if max(concept_idxs) > start_index:
concept_predictions[concept_id].append(cue)
# link predictions for linked concepts
if self.linked_concepts:
conjunct_cs = conjunct_concepts[sentence_id]
if conjunct_cs:
conjunct_cs = defaultdict(list, conjunct_cs)
for concept_id, cues in list(concept_predictions.items()):
linked_concepts = conjunct_cs[concept_id]
for linked_concept in linked_concepts:
concept_predictions[linked_concept] += cues
# compare concept predictions to ground truth and fill confusion matrix accordingly
self.fill_confusion_matrix(confusion_matrix, sentence_id, sentence_concepts, concept_predictions)
if not self.cue_level_evaluation:
results = self.evaluate_confusion_matrix(confusion_matrix)
else:
results, confusion_matrix = self.evaluate_confusion_matrix_cue_level(confusion_matrix)
print(results)
return results, confusion_matrix
def punctuation_baseline(self, trees, annotated_concepts, conjunct_concepts):
# FIXED!
confusion_matrix = defaultdict(list)
for sentence_id, sentence_concepts in sorted(annotated_concepts.items()):
tree = trees[sentence_id]
concept_predictions = defaultdict(list)
for token in tree:
if token.form.lower() in self.modality_cues[self.modality]:
cue = token.form.lower()
# assign all following concept_ids negation status if they appear before first following punctuation
start_index = token.index - 1
next_punctuation_index = None
for t in tree[start_index:]:
if t.form in '!?.;,:':
next_punctuation_index = t.index - 1
break
start_index = token.index - 1
for concept_id, concept_data in sentence_concepts.items():
concept_idxs = concept_data['token_idxs']
# skip concept if it appears past the first following punctuation
if next_punctuation_index:
if min(concept_idxs) > next_punctuation_index:
continue
if max(concept_idxs) > start_index:
concept_predictions[concept_id].append(cue)
# link predictions for linked concepts
if self.linked_concepts:
conjunct_cs = conjunct_concepts[sentence_id]
if conjunct_cs:
conjunct_cs = defaultdict(list, conjunct_cs)
for concept_id, cues in list(concept_predictions.items()):
linked_concepts = conjunct_cs[concept_id]
for linked_concept in linked_concepts:
concept_predictions[linked_concept] += cues
# compare concept predictions to ground truth and fill confusion matrix accordingly
self.fill_confusion_matrix(confusion_matrix, sentence_id, sentence_concepts, concept_predictions)
if not self.cue_level_evaluation:
results = self.evaluate_confusion_matrix(confusion_matrix)
else:
results, confusion_matrix = self.evaluate_confusion_matrix_cue_level(confusion_matrix)
print(results)
return results, confusion_matrix
def cue_specific_dependency_rules(self, token, tree):
affected_token_idxs = []
"""
# example of a possible simple rule:
cue = token.form.lower()
if cue == 'no':
if token.deprel == 'neg':
head_index = token.head
affected_token_idxs.append(head_index - 1)
"""
return affected_token_idxs
def dependency_model(self, trees, annotated_concepts, conjunct_concepts):
# insert rules into this dependency model using the function self.cue_specific_dependency_rules
confusion_matrix = defaultdict(list)
for sentence_id, sentence_concepts in sorted(annotated_concepts.items()):
tree = trees[sentence_id]
concept_predictions = defaultdict(list)
for token in tree:
if token.form.lower() in self.modality_cues[self.modality]:
cue = token.form.lower()
affected_token_idxs = self.cue_specific_dependency_rules(token, tree)
# assign all affected concept_ids negation status
for concept_id, concept_data in sentence_concepts.items():
concept_idxs = concept_data['token_idxs']
for concept_idx in concept_idxs:
if concept_idx in affected_token_idxs:
concept_predictions[concept_id].append(cue)
# link predictions for linked concepts
if self.linked_concepts:
conjunct_cs = conjunct_concepts[sentence_id]
if conjunct_cs:
conjunct_cs = defaultdict(list, conjunct_cs)
for concept_id, cues in list(concept_predictions.items()):
linked_concepts = conjunct_cs[concept_id]
for linked_concept in linked_concepts:
concept_predictions[linked_concept] += cues
# compare concept predictions to ground truth and fill confusion matrix accordingly
self.fill_confusion_matrix(confusion_matrix, sentence_id, sentence_concepts, concept_predictions)
if not self.cue_level_evaluation:
results = self.evaluate_confusion_matrix(confusion_matrix)
else:
results, confusion_matrix = self.evaluate_confusion_matrix_cue_level(confusion_matrix)
print(results)
return results, confusion_matrix
@staticmethod
def extract_conjunct_concepts(trees, annotated_concepts):
# FINISHED! extracts conjunct concepts for each sentence
conjunct_concepts = {}
for sentence_id, sentence_concepts in sorted(annotated_concepts.items()):
# check if concepts are conjunct by seeing whether items have a 'conj' deprelation to other tokens
tree = trees[sentence_id]
all_concept_ids = set()
for concept_data in sentence_concepts.values():
all_concept_ids.update(concept_data['token_idxs'])
# linked concepts
linked_concepts = defaultdict(set)
for token in tree:
if token.index - 1 not in all_concept_ids:
continue
if token.deprel == 'conj':
head_token_idx = token.head - 1
head_concepts = set()
if head_token_idx in all_concept_ids:
# extract all concepts containing the head index
for concept, concept_data in sentence_concepts.items():
if head_token_idx in concept_data['token_idxs']:
head_concepts.add(concept)
# extract all concepts containing the dependent index
for concept, concept_data in sentence_concepts.items():
if token.index - 1 in concept_data['token_idxs']:
for head_concept in head_concepts:
linked_concepts[head_concept].add(concept)
# fuse linked_concepts (iterate 2 times for this)
for c, linked_cs in list(linked_concepts.items()):
for linked_c in linked_cs:
linked_concepts[linked_c].add(c)
linked_concepts[linked_c].update(linked_cs)
linked_concepts = {k: {x for x in v | |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class FilterTrackPropertyType(str, Enum):
unknown = "Unknown" #: The unknown track property type.
type = "Type" #: The type.
name = "Name" #: The name.
language = "Language" #: The language.
four_cc = "FourCC" #: The fourCC.
bitrate = "Bitrate" #: The bitrate.
class FilterTrackPropertyCompareOperation(str, Enum):
equal = "Equal" #: The equal operation.
not_equal = "NotEqual" #: The not equal operation.
class CreatedByType(str, Enum):
user = "User"
application = "Application"
managed_identity = "ManagedIdentity"
key = "Key"
class MetricUnit(str, Enum):
bytes = "Bytes" #: The number of bytes.
count = "Count" #: The count.
milliseconds = "Milliseconds" #: The number of milliseconds.
class MetricAggregationType(str, Enum):
average = "Average" #: The average.
count = "Count" #: The count of a number of items, usually requests.
total = "Total" #: The sum.
class StorageAccountType(str, Enum):
primary = "Primary" #: The primary storage account for the Media Services account.
secondary = "Secondary" #: A secondary storage account for the Media Services account.
class StorageAuthentication(str, Enum):
system = "System" #: System authentication.
managed_identity = "ManagedIdentity" #: Managed Identity authentication.
class AccountEncryptionKeyType(str, Enum):
system_key = "SystemKey" #: The Account Key is encrypted with a System Key.
customer_key = "CustomerKey" #: The Account Key is encrypted with a Customer Key.
class ManagedIdentityType(str, Enum):
system_assigned = "SystemAssigned" #: A system-assigned managed identity.
none = "None" #: No managed identity.
class PrivateEndpointConnectionProvisioningState(str, Enum):
succeeded = "Succeeded"
creating = "Creating"
deleting = "Deleting"
failed = "Failed"
class PrivateEndpointServiceConnectionStatus(str, Enum):
pending = "Pending"
approved = "Approved"
rejected = "Rejected"
class AssetStorageEncryptionFormat(str, Enum):
none = "None" #: The Asset does not use client-side storage encryption (this is the only allowed value for new Assets).
media_storage_client_encryption = "MediaStorageClientEncryption" #: The Asset is encrypted with Media Services client-side encryption.
class AssetContainerPermission(str, Enum):
read = "Read" #: The SAS URL will allow read access to the container.
read_write = "ReadWrite" #: The SAS URL will allow read and write access to the container.
read_write_delete = "ReadWriteDelete" #: The SAS URL will allow read, write and delete access to the container.
class ContentKeyPolicyPlayReadyUnknownOutputPassingOption(str, Enum):
unknown = "Unknown" #: Represents a ContentKeyPolicyPlayReadyUnknownOutputPassingOption that is unavailable in current API version.
not_allowed = "NotAllowed" #: Passing the video portion of protected content to an Unknown Output is not allowed.
allowed = "Allowed" #: Passing the video portion of protected content to an Unknown Output is allowed.
allowed_with_video_constriction = "AllowedWithVideoConstriction" #: Passing the video portion of protected content to an Unknown Output is allowed but with constrained resolution.
class ContentKeyPolicyPlayReadyLicenseType(str, Enum):
unknown = "Unknown" #: Represents a ContentKeyPolicyPlayReadyLicenseType that is unavailable in current API version.
non_persistent = "NonPersistent" #: Non persistent license.
persistent = "Persistent" #: Persistent license. Allows offline playback.
class ContentKeyPolicyPlayReadyContentType(str, Enum):
unknown = "Unknown" #: Represents a ContentKeyPolicyPlayReadyContentType that is unavailable in current API version.
unspecified = "Unspecified" #: Unspecified content type.
ultra_violet_download = "UltraVioletDownload" #: Ultraviolet download content type.
ultra_violet_streaming = "UltraVioletStreaming" #: Ultraviolet streaming content type.
class ContentKeyPolicyRestrictionTokenType(str, Enum):
unknown = "Unknown" #: Represents a ContentKeyPolicyRestrictionTokenType that is unavailable in current API version.
swt = "Swt" #: Simple Web Token.
jwt = "Jwt" #: JSON Web Token.
class ContentKeyPolicyFairPlayRentalAndLeaseKeyType(str, Enum):
unknown = "Unknown" #: Represents a ContentKeyPolicyFairPlayRentalAndLeaseKeyType that is unavailable in current API version.
undefined = "Undefined" #: Key duration is not specified.
dual_expiry = "DualExpiry" #: Dual expiry for offline rental.
persistent_unlimited = "PersistentUnlimited" #: Content key can be persisted with an unlimited duration
persistent_limited = "PersistentLimited" #: Content key can be persisted and the valid duration is limited by the Rental Duration value
class AacAudioProfile(str, Enum):
aac_lc = "AacLc" #: Specifies that the output audio is to be encoded into AAC Low Complexity profile (AAC-LC).
he_aac_v1 = "HeAacV1" #: Specifies that the output audio is to be encoded into HE-AAC v1 profile.
he_aac_v2 = "HeAacV2" #: Specifies that the output audio is to be encoded into HE-AAC v2 profile.
class H265VideoProfile(str, Enum):
auto = "Auto" #: Tells the encoder to automatically determine the appropriate H.265 profile.
main = "Main" #: Main profile (https://x265.readthedocs.io/en/default/cli.html?highlight=profile#profile-level-tier)
class StretchMode(str, Enum):
none = "None" #: Strictly respect the output resolution without considering the pixel aspect ratio or display aspect ratio of the input video.
auto_size = "AutoSize" #: Override the output resolution, and change it to match the display aspect ratio of the input, without padding. For example, if the input is 1920x1080 and the encoding preset asks for 1280x1280, then the value in the preset is overridden, and the output will be at 1280x720, which maintains the input aspect ratio of 16:9.
auto_fit = "AutoFit" #: Pad the output (with either letterbox or pillar box) to honor the output resolution, while ensuring that the active video region in the output has the same aspect ratio as the input. For example, if the input is 1920x1080 and the encoding preset asks for 1280x1280, then the output will be at 1280x1280, which contains an inner rectangle of 1280x720 at aspect ratio of 16:9, and pillar box regions 280 pixels wide at the left and right.
class VideoSyncMode(str, Enum):
auto = "Auto" #: This is the default method. Chooses between Cfr and Vfr depending on muxer capabilities. For output format MP4, the default mode is Cfr.
passthrough = "Passthrough" #: The presentation timestamps on frames are passed through from the input file to the output file writer. Recommended when the input source has variable frame rate, and are attempting to produce multiple layers for adaptive streaming in the output which have aligned GOP boundaries. Note: if two or more frames in the input have duplicate timestamps, then the output will also have the same behavior
cfr = "Cfr" #: Input frames will be repeated and/or dropped as needed to achieve exactly the requested constant frame rate. Recommended when the output frame rate is explicitly set at a specified value
vfr = "Vfr" #: Similar to the Passthrough mode, but if the input has frames that have duplicate timestamps, then only one frame is passed through to the output, and others are dropped. Recommended when the number of output frames is expected to be equal to the number of input frames. For example, the output is used to calculate a quality metric like PSNR against the input
class H265Complexity(str, Enum):
speed = "Speed" #: Tells the encoder to use settings that are optimized for faster encoding. Quality is sacrificed to decrease encoding time.
balanced = "Balanced" #: Tells the encoder to use settings that achieve a balance between speed and quality.
quality = "Quality" #: Tells the encoder to use settings that are optimized to produce higher quality output at the expense of slower overall encode time.
class ChannelMapping(str, Enum):
front_left = "FrontLeft" #: The Front Left Channel.
front_right = "FrontRight" #: The Front Right Channel.
center = "Center" #: The Center Channel.
low_frequency_effects = "LowFrequencyEffects" #: Low Frequency Effects Channel. Sometimes referred to as the Subwoofer.
back_left = "BackLeft" #: The Back Left Channel. Sometimes referred to as the Left Surround Channel.
back_right = "BackRight" #: The Back Right Channel. Sometimes referred to as the Right Surround Channel.
stereo_left = "StereoLeft" #: The Left Stereo channel. Sometimes referred to as Down Mix Left.
stereo_right = "StereoRight" #: The Right Stereo channel. Sometimes referred to as Down Mix Right.
class TrackAttribute(str, Enum):
bitrate = "Bitrate" #: The bitrate of the track.
language = "Language" #: The language of the track.
class AttributeFilter(str, Enum):
all = "All" #: All tracks will be included.
top = "Top" #: The first track will be included when the attribute is sorted in descending order. Generally used to select the largest bitrate.
bottom | |
# -*- coding: utf-8 -*-
"""
@author: github.com/byochim
"""
import sys
from classlist import investment_models
from classlist import lifetime_earnings_models
def main():
'''
Introductory message briefly explaining the program.
'''
print("Welcome to the Investment Calculator."
"\nThis calculator contains basic models for ROI, NPV, PP, and DCF."
"\nAdditionally, it contains a model to calculate lifetime earnings, or earnings over n years."
"\nGiven a starting salary x, will provide you with an estimate of your total earnings over n years."
"\nYear-to-year salary increases are estimated at 3%."
"\nEnter the required data when prompted and you will receive a calculation for the chosen model."
"\nType 'exit' at any main prompt to exit the program."
)
print("")
choose()
def choose():
'''
Prompts user to choose between the two main calculators.
'''
choice = input("\nPress 1 for the investment models calculator or press 2 for the lifetime earnings calculator: ")
if choice == "1":
choice_investment_models()
if choice == "2":
choice_lifetime_earnings_model()
choice = choice.upper()
# Converts to uppercase to filter exit command
if choice == 'EXIT':
# Exits program
print("Goodbye.")
sys.exit()
else:
# Re-enters choose function if the user doesn't input a valid command
print("Invalid input.")
choose()
def choice_investment_models():
'''
Prompts user to choose an investment model for calculation.
'''
choice = input("\nWhat would you like to calculate first? (ROI, NPV, PP, or DCF): ")
choice = choice.upper()
# Converts to uppercase to filter input
if choice == "ROI":
ROI()
if choice == "NPV":
NPV()
if choice == "PP":
PP()
if choice == "DCF":
DCF()
if choice == 'EXIT':
print("Goodbye.")
sys.exit()
else:
# Re-enters choice_investment_models function if the user doesn't input a valid command
print("Invalid input.")
choice_investment_models()
def ROI():
'''
Input: user data required for the ROI model
Output: ROI calculation
'''
# Controls in place in case user does not enter a number
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
cost = abs(float(input("Enter the initial investment cost: ")))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
while True:
try:
# Converts from str to float for decimals and the calculation
gain = float(input("\nEnter the gain or loss from the investment: "))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
# Calls the calc_ROI function from the classlist file in order to calculate the ROI
print("\nYour ROI is " + str(investment_models.calc_ROI(cost, gain)) + "%.")
end()
def NPV():
'''
Input: user data required for the NPV model
Output: NPV calculation
'''
# Controls in place in case user does not enter a number
while True:
try:
# Converts from str to float for decimals and the calculation
# Rounds to the nearest whole number for calculation
# Abs is to control for negative inputs
time = abs(round(float(input("Enter the length of the project or investment in years (decimals will be rounded to the nearest whole number): "))))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
discount = abs(float(input("Enter the discount rate (please enter in decimal form): ")))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
cost = abs(float(input("Enter the initial investment cost: ")))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
# Calls the calc_NPV function from the classlist file in order to calculate the NPV
# Prints in dollar format
print("\nYour NPV is " + "${:,.2f}".format(investment_models.calc_NPV(time, discount, cost)))
end()
def PP():
'''
Input: user data required for the PP model
Output: PP calculation
'''
# Controls in place in case user does not enter a number
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
cost = abs(float(input("Enter the initial investment cost: ")))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
annual_gain = abs(float(input("Enter the annual net cash flow gained from the investment: ")))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
# Calls the calc_PP function from the classlist file in order to calculate the PP
print("\nYour PP is " + str(investment_models.calc_PP(cost, annual_gain)) + " years.")
end()
def DCF():
'''
Input: user data required for the DCF model
Output: DCF calculation
'''
# Controls in place in case user does not enter a number
while True:
try:
# Converts from str to float for decimals and the calculation
# Rounds to the nearest whole number for calculation
# Abs is to control for negative inputs
time = abs(round(float(input("Enter the length of the project or investment in years (decimals will be rounded to the nearest whole number): "))))
except ValueError:
print("Invalid input. Please enter an integer.")
continue
break
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
discount = abs(float(input("Enter the discount rate (please enter in decimal form): ")))
except ValueError:
print("Invalid input. Please enter an integer.")
continue
break
# Calls the calc_DCF function from the classlist file in order to calculate the DCF
# Prints in dollar format
print("\nYour DCF is " + "${:,.2f}".format(investment_models.calc_DCF(time, discount)))
end()
def choice_lifetime_earnings_model():
'''
Gives user option to include the cost of a degree or certification with the calculation.
'''
choice = (input("Would you like to calculate the earnings of a degree or certification? (Y/N): "))
choice = choice.upper()
# Converts to uppercase to filter exit command
if choice == "Y" or choice == "YES":
earnings_degree()
if choice == "N" or choice == "NO":
earnings()
if choice == "EXIT":
# Exits program
print("\nGoodbye.")
sys.exit()
else:
# Re-enters choice_lifetime_earningsModel function if the user doesn't input a valid command
print("\nInvalid input.")
choice_lifetime_earnings_model()
def earnings():
'''
Input: user data required for earnings calculation (no degree/cert)
Output: earnings calculation without degree/cert
'''
# Controls in place in case user does not enter a number
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
starting_salary = abs(float(input("Enter starting salary: ")))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
while True:
try:
# Converts from str to float for decimals and the calculation
# Rounds to the nearest whole number for calculation
# Abs is to control for negative inputs
years = abs(round(float(input("Enter the number of years you'd like to calculate (decimals will be rounded to the nearest whole number): "))))
except ValueError:
print("Invalid input. Please enter a valid number.")
continue
break
# Calls the calc_earnings function from the class-list file in order to calculate the earnings
# Prints in dollar format
print("\nYour earnings over a " + str(years) + " period, given a starting salary of " + "${:,.2f}".format(starting_salary) + ", total " + "${:,.2f}".format(lifetime_earnings_models.calc_earnings(starting_salary, years)))
end()
def earnings_degree():
'''
Input: user data required for earnings calculation
Output: earnings calculation with degree/cert
'''
# Controls in place in case user does not enter a number
while True:
try:
# Converts from str to float for decimals and the calculation
# Abs is to control for negative inputs
cost = abs(float(input("Enter the total cost of the degree or certification: ")))
except ValueError:
print("Invalid input. Please enter a number.")
continue
break
while True:
try:
# Converts from str to float for decimals and the | |
arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if info_ is not None and not isinstance(info_, (dict, ScaleApplicationInfo)):
raise Exception("Expected info_ to be a ScaleApplicationInfo, received: {}".format(type(info_)))
self.error = error_
self.info = info_
self.unknown_fields = unknown_fields
class ScaleApplicationResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~ScaleApplicationResult]
'''
results_ = [ScaleApplicationResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class ScaleApplicationsParams(Type):
_toSchema = {'applications': 'applications'}
_toPy = {'applications': 'applications'}
def __init__(self, applications=None, **unknown_fields):
'''
applications : typing.Sequence[~ScaleApplicationParams]
'''
applications_ = [ScaleApplicationParams.from_json(o) for o in applications or []]
# Validate arguments against known Juju API types.
if applications_ is not None and not isinstance(applications_, (bytes, str, list)):
raise Exception("Expected applications_ to be a Sequence, received: {}".format(type(applications_)))
self.applications = applications_
self.unknown_fields = unknown_fields
class SerializedModel(Type):
_toSchema = {'bytes_': 'bytes', 'charms': 'charms', 'resources': 'resources', 'tools': 'tools'}
_toPy = {'bytes': 'bytes_', 'charms': 'charms', 'resources': 'resources', 'tools': 'tools'}
def __init__(self, bytes_=None, charms=None, resources=None, tools=None, **unknown_fields):
'''
bytes_ : typing.Sequence[int]
charms : typing.Sequence[str]
resources : typing.Sequence[~SerializedModelResource]
tools : typing.Sequence[~SerializedModelTools]
'''
bytes__ = bytes_
charms_ = charms
resources_ = [SerializedModelResource.from_json(o) for o in resources or []]
tools_ = [SerializedModelTools.from_json(o) for o in tools or []]
# Validate arguments against known Juju API types.
if bytes__ is not None and not isinstance(bytes__, (bytes, str, list)):
raise Exception("Expected bytes__ to be a Sequence, received: {}".format(type(bytes__)))
if charms_ is not None and not isinstance(charms_, (bytes, str, list)):
raise Exception("Expected charms_ to be a Sequence, received: {}".format(type(charms_)))
if resources_ is not None and not isinstance(resources_, (bytes, str, list)):
raise Exception("Expected resources_ to be a Sequence, received: {}".format(type(resources_)))
if tools_ is not None and not isinstance(tools_, (bytes, str, list)):
raise Exception("Expected tools_ to be a Sequence, received: {}".format(type(tools_)))
self.bytes_ = bytes__
self.charms = charms_
self.resources = resources_
self.tools = tools_
self.unknown_fields = unknown_fields
class SerializedModelResource(Type):
_toSchema = {'application': 'application', 'application_revision': 'application-revision', 'charmstore_revision': 'charmstore-revision', 'name': 'name', 'unit_revisions': 'unit-revisions'}
_toPy = {'application': 'application', 'application-revision': 'application_revision', 'charmstore-revision': 'charmstore_revision', 'name': 'name', 'unit-revisions': 'unit_revisions'}
def __init__(self, application=None, application_revision=None, charmstore_revision=None, name=None, unit_revisions=None, **unknown_fields):
'''
application : str
application_revision : SerializedModelResourceRevision
charmstore_revision : SerializedModelResourceRevision
name : str
unit_revisions : typing.Mapping[str, ~SerializedModelResourceRevision]
'''
application_ = application
application_revision_ = SerializedModelResourceRevision.from_json(application_revision) if application_revision else None
charmstore_revision_ = SerializedModelResourceRevision.from_json(charmstore_revision) if charmstore_revision else None
name_ = name
unit_revisions_ = unit_revisions
# Validate arguments against known Juju API types.
if application_ is not None and not isinstance(application_, (bytes, str)):
raise Exception("Expected application_ to be a str, received: {}".format(type(application_)))
if application_revision_ is not None and not isinstance(application_revision_, (dict, SerializedModelResourceRevision)):
raise Exception("Expected application_revision_ to be a SerializedModelResourceRevision, received: {}".format(type(application_revision_)))
if charmstore_revision_ is not None and not isinstance(charmstore_revision_, (dict, SerializedModelResourceRevision)):
raise Exception("Expected charmstore_revision_ to be a SerializedModelResourceRevision, received: {}".format(type(charmstore_revision_)))
if name_ is not None and not isinstance(name_, (bytes, str)):
raise Exception("Expected name_ to be a str, received: {}".format(type(name_)))
if unit_revisions_ is not None and not isinstance(unit_revisions_, dict):
raise Exception("Expected unit_revisions_ to be a Mapping, received: {}".format(type(unit_revisions_)))
self.application = application_
self.application_revision = application_revision_
self.charmstore_revision = charmstore_revision_
self.name = name_
self.unit_revisions = unit_revisions_
self.unknown_fields = unknown_fields
class SerializedModelResourceRevision(Type):
_toSchema = {'description': 'description', 'fingerprint': 'fingerprint', 'origin': 'origin', 'path': 'path', 'revision': 'revision', 'size': 'size', 'timestamp': 'timestamp', 'type_': 'type', 'username': 'username'}
_toPy = {'description': 'description', 'fingerprint': 'fingerprint', 'origin': 'origin', 'path': 'path', 'revision': 'revision', 'size': 'size', 'timestamp': 'timestamp', 'type': 'type_', 'username': 'username'}
def __init__(self, description=None, fingerprint=None, origin=None, path=None, revision=None, size=None, timestamp=None, type_=None, username=None, **unknown_fields):
'''
description : str
fingerprint : str
origin : str
path : str
revision : int
size : int
timestamp : str
type_ : str
username : str
'''
description_ = description
fingerprint_ = fingerprint
origin_ = origin
path_ = path
revision_ = revision
size_ = size
timestamp_ = timestamp
type__ = type_
username_ = username
# Validate arguments against known Juju API types.
if description_ is not None and not isinstance(description_, (bytes, str)):
raise Exception("Expected description_ to be a str, received: {}".format(type(description_)))
if fingerprint_ is not None and not isinstance(fingerprint_, (bytes, str)):
raise Exception("Expected fingerprint_ to be a str, received: {}".format(type(fingerprint_)))
if origin_ is not None and not isinstance(origin_, (bytes, str)):
raise Exception("Expected origin_ to be a str, received: {}".format(type(origin_)))
if path_ is not None and not isinstance(path_, (bytes, str)):
raise Exception("Expected path_ to be a str, received: {}".format(type(path_)))
if revision_ is not None and not isinstance(revision_, int):
raise Exception("Expected revision_ to be a int, received: {}".format(type(revision_)))
if size_ is not None and not isinstance(size_, int):
raise Exception("Expected size_ to be a int, received: {}".format(type(size_)))
if timestamp_ is not None and not isinstance(timestamp_, (bytes, str)):
raise Exception("Expected timestamp_ to be a str, received: {}".format(type(timestamp_)))
if type__ is not None and not isinstance(type__, (bytes, str)):
raise Exception("Expected type__ to be a str, received: {}".format(type(type__)))
if username_ is not None and not isinstance(username_, (bytes, str)):
raise Exception("Expected username_ to be a str, received: {}".format(type(username_)))
self.description = description_
self.fingerprint = fingerprint_
self.origin = origin_
self.path = path_
self.revision = revision_
self.size = size_
self.timestamp = timestamp_
self.type_ = type__
self.username = username_
self.unknown_fields = unknown_fields
class SerializedModelTools(Type):
_toSchema = {'uri': 'uri', 'version': 'version'}
_toPy = {'uri': 'uri', 'version': 'version'}
def __init__(self, uri=None, version=None, **unknown_fields):
'''
uri : str
version : str
'''
uri_ = uri
version_ = version
# Validate arguments against known Juju API types.
if uri_ is not None and not isinstance(uri_, (bytes, str)):
raise Exception("Expected uri_ to be a str, received: {}".format(type(uri_)))
if version_ is not None and not isinstance(version_, (bytes, str)):
raise Exception("Expected version_ to be a str, received: {}".format(type(version_)))
self.uri = uri_
self.version = version_
self.unknown_fields = unknown_fields
class SetConstraints(Type):
_toSchema = {'application': 'application', 'constraints': 'constraints'}
_toPy = {'application': 'application', 'constraints': 'constraints'}
def __init__(self, application=None, constraints=None, **unknown_fields):
'''
application : str
constraints : Value
'''
application_ = application
constraints_ = Value.from_json(constraints) if constraints else None
# Validate arguments against known Juju API types.
if application_ is not None and not isinstance(application_, (bytes, str)):
raise Exception("Expected application_ to be a str, received: {}".format(type(application_)))
if constraints_ is not None and not isinstance(constraints_, (dict, Value)):
raise Exception("Expected constraints_ to be a Value, received: {}".format(type(constraints_)))
self.application = application_
self.constraints = constraints_
self.unknown_fields = unknown_fields
class SetExternalControllerInfoParams(Type):
_toSchema = {'info': 'info'}
_toPy = {'info': 'info'}
def __init__(self, info=None, **unknown_fields):
'''
info : ExternalControllerInfo
'''
info_ = ExternalControllerInfo.from_json(info) if info else None
# Validate arguments against known Juju API types.
if info_ is not None and not isinstance(info_, (dict, ExternalControllerInfo)):
raise Exception("Expected info_ to be a ExternalControllerInfo, received: {}".format(type(info_)))
self.info = info_
self.unknown_fields = unknown_fields
class SetExternalControllersInfoParams(Type):
_toSchema = {'controllers': 'controllers'}
_toPy = {'controllers': 'controllers'}
def __init__(self, controllers=None, **unknown_fields):
'''
controllers : typing.Sequence[~SetExternalControllerInfoParams]
'''
controllers_ = [SetExternalControllerInfoParams.from_json(o) for o in controllers or []]
# Validate arguments against known Juju API types.
if controllers_ is not None and not isinstance(controllers_, (bytes, str, list)):
raise Exception("Expected controllers_ to be a Sequence, received: {}".format(type(controllers_)))
self.controllers = controllers_
self.unknown_fields = unknown_fields
class SetMachineBlockDevices(Type):
_toSchema = {'machine_block_devices': 'machine-block-devices'}
_toPy = {'machine-block-devices': 'machine_block_devices'}
def __init__(self, machine_block_devices=None, **unknown_fields):
'''
machine_block_devices : typing.Sequence[~MachineBlockDevices]
'''
machine_block_devices_ = [MachineBlockDevices.from_json(o) for o in machine_block_devices or []]
# Validate arguments against known Juju API types.
if machine_block_devices_ is not None and not isinstance(machine_block_devices_, (bytes, str, list)):
raise Exception("Expected machine_block_devices_ to be a Sequence, received: {}".format(type(machine_block_devices_)))
self.machine_block_devices = machine_block_devices_
self.unknown_fields = unknown_fields
class SetMachineNetworkConfig(Type):
_toSchema = {'config': 'config', 'tag': 'tag'}
_toPy = {'config': 'config', 'tag': 'tag'}
def __init__(self, config=None, tag=None, **unknown_fields):
'''
config : typing.Sequence[~NetworkConfig]
tag : str
'''
config_ = [NetworkConfig.from_json(o) for o in config or []]
tag_ = tag
# Validate arguments against known Juju API types.
if config_ is not None and not isinstance(config_, (bytes, str, list)):
raise Exception("Expected config_ to be a Sequence, received: {}".format(type(config_)))
if tag_ | |
import h5py
import logging
import operator
import numpy as np
import westpa
from westpa.cli.core import w_init
from westpa.cli.core import w_run
from westpa.core.extloader import get_object
from westpa.core.segment import Segment
from westpa import analysis
import json
import os
import shutil
import pickle
import importlib.util
import tqdm
import mdtraj as md
from rich.logging import RichHandler
from matplotlib import pyplot as plt
# Ensure this is installed via pip. msm_we's setup.py is all set up for that.
# Navigate to the folder where msm_we is, and run python3 -m pip install .
# If you're doing development on msm_we, add the -e flag to pip, i.e. "python3 -m pip install -e ."
# -e will install it in editable mode, so changes to msm_we will take effect next time it's imported.
# Otherwise, if you modify the msm_we code, you'll need to re-install it through pip.
from msm_we import msm_we
import ray
import tempfile
EPS = np.finfo(np.float64).eps
log = logging.getLogger(__name__)
log.setLevel("INFO")
log.propagate = False
log.addHandler(RichHandler())
msm_we_logger = logging.getLogger("msm_we.msm_we")
msm_we_logger.setLevel("INFO")
# Map structure types to extensions.
# This tells the plugin what extension to put on generated start-state files.
STRUCT_EXTENSIONS = {
md.formats.PDBTrajectoryFile: "pdb",
md.formats.AmberRestartFile: "rst7",
}
EXTENSION_LOCKFILE = 'doing_extension'
def check_target_reached(h5_filename):
"""
Check if the target state was reached, given the data in a WEST H5 file.
Parameters
----------
h5_filename: string
Path to a WESTPA HDF5 data file
"""
with h5py.File(h5_filename, 'r') as h5_file:
# Get the key to the final iteration. Need to do -2 instead of -1 because there's an empty-ish final iteration
# written.
for iteration_key in list(h5_file['iterations'].keys())[-2:0:-1]:
endpoint_types = h5_file[f'iterations/{iteration_key}/seg_index']['endpoint_type']
if Segment.SEG_ENDPOINT_RECYCLED in endpoint_types:
log.debug(f"recycled segment found in file {h5_filename} at iteration {iteration_key}")
return True
return False
def fix_deprecated_initialization(initialization_state):
"""
I changed my initialization JSON schema to use underscores instead of hyphens so I can directly expand it into
keywords arguments to w_init. This just handles any old-style JSON files I still had, so they don't choke and die.
"""
log.debug(f"Starting processing, dict is now {initialization_state}")
# Some of my initial files had this old-style formatting. Handle it for now, but remove eventually
for old_key, new_key in [
('tstate-file', 'tstate_file'),
('bstate-file', 'bstate_file'),
('sstate-file', 'sstate_file'),
('segs-per-state', 'segs_per_state'),
]:
if old_key in initialization_state.keys():
log.warning(
f"This initialization JSON file uses the deprecated " f"hyphenated form for {old_key}. Replace with underscores."
)
value = initialization_state.pop(old_key)
initialization_state[new_key] = value
log.debug(f"Finished processing, dict is now {initialization_state}")
return initialization_state
# TODO: Break this out into a separate module, let it be specified (if it's necessary) as a plugin option
# This may not always be required -- i.e. you may be able to directly output to the h5 file in your propagator
def prepare_coordinates(plugin_config, h5file, we_h5filename):
"""
Copy relevant coordinates from trajectory files into <iteration>/auxdata/coord of the h5 file.
Directly modifies the input h5 file.
Adds ALL coordinates to auxdata/coord.
Adapted from original msmWE collectCoordinates.py script.
Parameters
----------
plugin_config: YAMLConfig object
Stores the configuration options provided to the plugin in the WESTPA configuration file
h5file: h5py.File
WESTPA h5 data file
we_h5filename: string
Name of the WESTPA h5 file
"""
refPDBfile = plugin_config.get('ref_pdb_file')
modelName = plugin_config.get('model_name')
# TODO: Don't need this explicit option, use WEST_SIM_ROOT or something
WEfolder = plugin_config.get('we_folder')
parentTraj = plugin_config.get('parent_traj_filename')
childTraj = plugin_config.get('child_traj_filename')
pcoord_ndim = plugin_config.get('pcoord_ndim', 1)
model = msm_we.modelWE()
log.info('Preparing coordinates...')
# Only need the model to get the number of iterations and atoms
# TODO: Replace this with something more lightweight, get directly from WE
log.debug(f'Doing collectCoordinates on WE file {we_h5filename}')
model.initialize(
[we_h5filename],
refPDBfile,
modelName,
# Pass some dummy arguments -- these aren't important, this model is just created for convenience
# in the coordinate collection. Dummy arguments prevent warnings from being raised.
basis_pcoord_bounds=None,
target_pcoord_bounds=None,
tau=1,
pcoord_ndim=pcoord_ndim,
_suppress_boundary_warning=True,
)
model.get_iterations()
log.debug(f"Found {model.maxIter} iterations")
n_iter = None
for n_iter in tqdm.tqdm(range(1, model.maxIter + 1)):
nS = model.numSegments[n_iter - 1].astype(int)
coords = np.zeros((nS, 2, model.nAtoms, 3))
dsetName = "/iterations/iter_%08d/auxdata/coord" % int(n_iter)
coords_exist = False
try:
dset = h5file.create_dataset(dsetName, np.shape(coords))
except (RuntimeError, ValueError):
log.debug('coords exist for iteration ' + str(n_iter) + ' NOT overwritten')
coords_exist = True
continue
for iS in range(nS):
trajpath = WEfolder + "/traj_segs/%06d/%06d" % (n_iter, iS)
try:
coord0 = np.squeeze(md.load(f'{trajpath}/{parentTraj}', top=model.reference_structure.topology)._xyz)
except OSError:
log.warning("Parent traj file doesn't exist, loading reference structure coords")
coord0 = np.squeeze(model.reference_structure._xyz)
coord1 = np.squeeze(md.load(f'{trajpath}/{childTraj}', top=model.reference_structure.topology)._xyz)
coords[iS, 0, :, :] = coord0
coords[iS, 1, :, :] = coord1
if not coords_exist:
dset[:] = coords
log.debug(f"Wrote coords for {n_iter} iterations.")
def msmwe_compute_ss(plugin_config, west_files):
"""
Prepare and initialize an msm_we model, and use it to predict a steady-state distribution.
1. Load coordinate data
2. Perform dimensionality reduction
3. Compute flux and transition matrices
4. Compute steady-state distribution (via eigenvectors of transition matrix)
5. Compute target-state flux
TODO
----
This function does far too many things. Break it up a bit.
Parameters
----------
plugin_config: YAMLConfig object
Stores the configuration options provided to the plugin in the WESTPA configuration file
last_iter: int
The last WE iteration to use for computing steady-state.
Returns
-------
ss_alg: np.ndarray
The steady-state distribution
ss_flux: float
Flux into target state
model: modelWE object
The modelWE object produced for analysis.
"""
n_lag = 0
log.debug("Initializing msm_we")
# TODO: Refactor this to use westpa.core.extloader.get_object
# I'm reinventing the wheel a bit here, I can replace almost all this code w/ that
# ##### Monkey-patch modelWE with the user-override functions
override_file = plugin_config.get('user_functions')
# First, import the file with the user-override functions
# This is a decently janky implementation, but it seems to work, and I don't know of a better way of doing it.
# This is nice because it avoids mucking around with the path, which I think is a Good Thing.
# We're given a path to the user-specified file containing overrides
# This comes from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
# I don't think the name provided here actually matters
user_override_spec = importlib.util.spec_from_file_location("override_module", override_file)
user_overrides = importlib.util.module_from_spec(user_override_spec)
# Make the functions that were overriden in override_file available in the namespace under user_overrides
user_override_spec.loader.exec_module(user_overrides)
# So now we can do the actual monkey-patching of modelWE.
# We monkey-patch at the module level rather than just override the function in the instanced object
# so that the functions retain access to self.
msm_we.modelWE.processCoordinates = user_overrides.processCoordinates
# ##### Done with monkey-patching.
model = msm_we.modelWE()
streaming = plugin_config.get('streaming', False)
refPDBfile = plugin_config.get('ref_pdb_file')
modelName = plugin_config.get('model_name')
n_clusters = plugin_config.get('n_clusters')
tau = plugin_config.get('tau', None)
pcoord_ndim = plugin_config.get('pcoord_ndim', 1)
basis_pcoord_bounds = np.array(plugin_config.get('basis_pcoord_bounds', np.nan), dtype=float)
target_pcoord_bounds = np.array(plugin_config.get('target_pcoord_bounds', np.nan), dtype=float)
if np.isnan(basis_pcoord_bounds).any() or np.isnan(target_pcoord_bounds).any():
log.critical(
"Target and/or basis pcoord bounds were not specified. "
"Set them using the 'basis_pcoord_bounds' or 'target_pcoord_bounds' parameters. "
"'basis/target_pcoord1_min/max' and 'basis/target_pcoord1' are no longer supported. "
"See https://jdrusso.github.io/msm_we/api.html#msm_we.msm_we.modelWE.initialize for details."
)
if tau is None:
log.warning('No tau provided to restarting plugin. Defaulting to 1.')
tau = 1
# Fire up the model object
model.initialize(
west_files,
refPDBfile,
modelName,
basis_pcoord_bounds=basis_pcoord_bounds,
target_pcoord_bounds=target_pcoord_bounds,
tau=tau,
pcoord_ndim=pcoord_ndim,
)
model.dimReduceMethod = plugin_config.get('dim_reduce_method')
model.n_lag = n_lag
log.debug("Loading in iteration data.. (this could take a while)")
# First dimension is the total number of segments
model.get_iterations()
model.get_coordSet(model.maxIter)
model.dimReduce()
first_iter, last_iter = model.first_iter, model.maxIter
clusterFile = modelName + "_clusters_s" + str(first_iter) + "_e" + str(last_iter) + "_nC" + str(n_clusters) + ".h5"
# TODO: Uncomment this to actually load the clusterFile if it exists. For now, disable for development.
exists = os.path.isfile(clusterFile)
exists = False
log.warning("Skipping any potential cluster reloading!")
log.info(f"Launching Ray with {plugin_config.get('n_cpus', 1)} cpus")
ray_tempdir_root = plugin_config.get('ray_temp_dir', None)
if ray_tempdir_root is not None:
ray_tempdir = tempfile.TemporaryDirectory(dir=ray_tempdir_root)
log.info(f"Using {ray_tempdir.name} as temp_dir for Ray")
ray.init(
num_cpus=plugin_config.get('n_cpus', 1), _temp_dir=ray_tempdir.name, ignore_reinit_error=True, include_dashboard=False
)
else:
ray.init(num_cpus=plugin_config.get('n_cpus', 1), ignore_reinit_error=True, include_dashboard=False)
# If a cluster file with the name corresponding to these parameters exists, load clusters from it.
if exists:
log.debug("loading clusters...")
model.load_clusters(clusterFile)
# Otherwise, do the clustering (which will create and save to that file)
else:
# FIXME: This gives the wrong shape, but loading from the clusterfile gives the right shape
log.debug("clustering coordinates into " + str(n_clusters) + " clusters...")
model.cluster_coordinates(n_clusters, streaming=streaming)
first_iter = 1
model.get_fluxMatrix(n_lag, first_iter, last_iter) # extracts flux matrix, output model.fluxMatrixRaw
log.debug(f"Unprocessed flux matrix has shape {model.fluxMatrixRaw.shape}")
model.organize_fluxMatrix() # gets rid of bins with no connectivity, sorts along p1, output | |
(_configDef['zdesk_config']['zdesk_link']) + str(ticketid) + "\">" + str(ticketid) + "</a></td></tr><tr>" \
"<td style='width:4%;border:1px solid blue;border-bottom: double blue;text-align:center'>PRIORITY</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(priority) + "</td></tr><tr>" \
"<td style='width:4%;border:1px solid blue;border-bottom: double blue;text-align:center'>STATUS</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(status) + "</td></tr><tr>" \
"<td style='width:4%;border:1px solid blue;border-bottom: double blue;text-align:center'>CREATED</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(created_at) + "</td></tr><tr>" \
"<td style='width:4%;border:1px solid blue;border-bottom: double blue;text-align:center'>UPDATED</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(updated_at) + "</td></tr><tr>" \
"<td style='width:6%;border:1px solid blue;border-bottom: double blue;text-align:center'>REQUESTER</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(requesterTicket) + "\">" + str(requesterName) + "</a></td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>COMPANY</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(OrgTicket) + "\">" + str(organization) + "</a></td></tr><tr>" \
"<td style='width:6%;border:1px solid blue;border-bottom: double blue;text-align:center'>ASSIGNEE</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(assigned) + "</td></tr><tr>" \
"<td style='width:4.5%;border:1px solid blue;border-bottom: double blue;text-align:center'>SEVERITY</td>" \
"<td style='border:1px solid black;text-align:center'>" + tags + "</td>" \
"</tr></thead><tbody></tbody></table>"
#allTicket += "- <a href=\"" + (_configDef['zdesk_config']['zdesk_link']) + str(ticketid) + "\">" + str(ticketid) + "</a> : " + str(subject) + " (assignee: " + str(assigned) + " updated: " + str(updated_at) + " status: <b>" + str(status) + "</b>) <br/>"
allTicket += "- <a href=\"" + (_configDef['zdesk_config']['zdesk_link']) + str(ticketid) + "\">" + str(ticketid) + "</a><b> " + str(sevv) + " </b> : " + str(subject) + " (requester: " + str(requesterName) + " assignee: " + str(assigned) + " updated: <b>" + str(updated_at) + "</b> status <b>" + str(status) + "</b>) <br/>"
UniqueToken = len(set(table_header.split()))
#print(UniqueToken)
# dataLenght = len(str(table_body))
dataLenght = len(str(table_header))
#print(str(dataLenght))
limitReached = False
#if dataLenght >= 70000:
if dataLenght >= int(_configDef['limit']['character']) or UniqueToken >= int(_configDef['limit']['token']):
limitReached = True
if counter:
messageDetail.ReplyToChatV2_noBotLog("This result exceed the character limit and therefore will show into separate message")
if limitReached:
# table_bodyFull += ("<card iconSrc =\"https://thumb.ibb.co/csXBgU/Symphony2018_App_Icon_Mobile.png\" accent=\"tempo-bg-color--blue\"><header>" + str(allTicket) + "</header><body>" + str(table_header) + "</body></card>")
table_bodyFull += ("<card iconSrc =\"\" accent=\"tempo-bg-color--blue\"><header>" + str(allTicket) + "</header><body>" + str(table_header) + "</body></card>")
reply = str(table_bodyFull)
messageDetail.ReplyToChatV2_noBotLog(str(reply))
dataLenght = ""
table_header = ""
UniqueToken = ""
table_bodyFull = ""
allTicket = ""
counter = False
if table_header == "":
return messageDetail.ReplyToChatV2_noBotLog("There is no result for this search")
else:
# table_bodyFull += ("<card iconSrc =\"https://thumb.ibb.co/csXBgU/Symphony2018_App_Icon_Mobile.png\" accent=\"tempo-bg-color--blue\"><header>" + str(allTicket) + "</header><body>" + str(table_header) + "</body></card>")
table_bodyFull += ("<card iconSrc =\"\" accent=\"tempo-bg-color--blue\"><header>" + str(allTicket) + "</header><body>" + str(table_header) + "</body></card>")
reply = str(table_bodyFull)
#return messageDetail.ReplyToChatV2_noBotLog(str(reply))
messageDetail.ReplyToChatV2_noBotLog(str(reply))
return messageDetail.ReplyToChatV2_noBotLog("End of Result")
else:
botlog.LogSymphonyInfo("The calling user is an end user, cannot call the function /recent")
except:
return messageDetail.ReplyToChat("I am sorry, I was working on a different task, can you please retry")
####################################
######### CREATE TICKET ##########
####################################
def TicketCreate(messageDetail):
botlog.LogSymphonyInfo("#####################################")
botlog.LogSymphonyInfo("Bot Call: Create Agent Zendesk Ticket")
botlog.LogSymphonyInfo("#####################################")
try:
emailZendesk = ""
isAllowed = False
commandCallerUID = messageDetail.FromUserId
connComp.request("GET", "/pod/v3/users?uid=" + commandCallerUID, headers=headersCompany)
resComp = connComp.getresponse()
dataComp = resComp.read()
data_raw = str(dataComp.decode('utf-8'))
# data_dict = ast.literal_eval(data_raw)
data_dict = json.loads(str(data_raw))
dataRender = json.dumps(data_dict, indent=2)
d_org = json.loads(dataRender)
for index_org in range(len(d_org["users"])):
firstName = d_org["users"][index_org]["firstName"]
lastName = d_org["users"][index_org]["lastName"]
displayName = d_org["users"][index_org]["displayName"]
#companyName = d_org["users"][index_org]["company"]
companyNameTemp = d_org["users"][index_org]["company"]
companyTemp = str(companyNameTemp).replace("&", "&").replace("<", "<").replace('"', """).replace("'", "'").replace(">", ">")
companyName = str(companyTemp)
userID = str(d_org["users"][index_org]["id"])
#emailAddress = str(d_org["users"][index_org]["emailAddress"])
#################################################
try:
emailAddress = str(d_org["users"][index_org]["emailAddress"])
#print("User is connected: " + emailAddress)
emailZendesk = emailAddress
connectionRequired = False
except:
connectionRequired = True
# if connectionRequired:
data_lenght = len(dataComp)
if data_lenght > 450:
try:
#print("inside > 450")
query = "type:user " + emailAddress
except:
query = "type:user " + firstName + " " + lastName
botlog.LogSymphonyInfo(query)
elif data_lenght < 450:
try:
#print("inside < 450")
#query = "type:user " + emailAddress + " organization:" + companyName
query = "type:user " + emailAddress
except:
#query = "type:user " + firstName + " " + lastName + " organization:" + companyName
query = "type:user " + firstName + " " + lastName
botlog.LogSymphonyInfo(query)
else:
return messageDetail.ReplyToChat("No user information available")
botlog.LogSymphonyInfo(query)
testconfig = {
'zdesk_email': _configDef['zdesk_config']['zdesk_email'],
'zdesk_password': _configDef['zdesk_config']['zdesk_password'],
'zdesk_url': _configDef['zdesk_config']['zdesk_url'],
'zdesk_token': True
}
zendesk = Zendesk(**testconfig)
results = zendesk.search(query=query)
#print(results)
if str(results).startswith(
"{'results': [], 'facets': None, 'next_page': None, 'previous_page': None, 'count': 0}"):
return messageDetail.ReplyToChat(
"This user does not exist on Zendesk, the name is misspelled or does not belong to this organisation.")
elif str(results).startswith(
"{'results': [], 'facets': {'type': {'entry': 0, 'ticket': 0, 'organization': 0, 'user': 0, 'article': 0, 'group': 0}}, 'next_page': None, 'previous_page': None, 'count': 0}"):
return messageDetail.ReplyToChat(
"This organisation/company does not exist in Zendesk or name is misspelled.")
else:
data = json.dumps(results, indent=2)
d = json.loads(data)
for index in range(len(d["results"])):
# name = d["results"][index]["name"]
# email = str(d["results"][index]["email"])
role = str(d["results"][index]["role"])
#print(role)
#############################
organization_id = str(d["results"][index]["organization_id"])
zendeskUser_id = str(d["results"][index]["id"])
try:
# Convert the Zendesk ID to company name
conn.request("GET", "/api/v2/users/" + zendeskUser_id + "/organizations.json", headers=headers)
res = conn.getresponse()
companyID = res.read()
compNameRaw = str(companyID.decode("utf-8"))
data_dict = json.loads(str(compNameRaw))
data = json.dumps(data_dict, indent=2)
# data_dict = ast.literal_eval(data)
d_org = json.loads(data)
try:
org_Name = str(d_org["organizations"][0]["name"])
org_name_temp = str(org_Name).replace("<", "<").replace("\"", """).replace("&","&").replace("'", "'").replace(">", ">")
organization = str(org_name_temp)
except:
organization = "Company not yet created"
except:
organization = "None"
#############################
botlog.LogSymphonyInfo("The calling user is a Zendesk " + role)
if role == "Administrator" or role == "admin" or role == "Agent" or role == "agent":
isAllowed = True
#print(role)
botlog.LogSymphonyInfo("Role of the calling user: " + role)
else:
isAllowed = False
#################################################
botlog.LogSymphonyInfo(firstName + " " + lastName + " (" + displayName + ") from Company/Pod name: " + str(companyName) + " with UID: " + str(userID))
callerCheck = (firstName + " " + lastName + " - " + displayName + " - " + companyName + " - " + str(userID))
if callerCheck in AccessFile and isAllowed:
streamType = (messageDetail.ChatRoom.Type)
#print(streamType)
callername = messageDetail.Sender.Name
botlog.LogSymphonyInfo("**********")
botlog.LogSymphonyInfo("Zendesk - createTicket function invoked by " + callername + " with the below details:")
#splitting the message from Symphony into 3 by ,
# details is taking 2nd and 3rd array object to use in ticket creation
try:
detail = messageDetail.Command.MessageFlattened.split("|")
#print(detail)
except:
return messageDetail.ReplyToChat("No data to split")
#flat is used for getting the uid from flattened
try:
flat = messageDetail.Command.MessageFlattened.split("_u_")
#print(flat)
except:
return messageDetail.ReplyToChatV2("Please use the following format: <b>/createTicket subject, description</b>")
try:
#Removing the string of the function in the output
ticketSubject = str(detail[0][10:]).replace(""", "\"").replace("&","&").replace("<","<").replace("'","'").replace(">",">")
#botlog.LogSymphonyInfo("Ticket Subject: " + ticketSubject)
except:
return messageDetail.ReplyToChatV2("You did not enter a Subject. Please use the following format: <b>/createTicket subject, description</b>")
try:
ticketDescription = str(detail[1]).replace("\u200b", "").replace("\n", "\n")
#botlog.LogSymphonyInfo("Ticket Description: " + ticketDescription)
#botlog.LogSymphonyInfo("**********")
except:
return messageDetail.ReplyToChatV2("You did not enter a Subject. Please use the following format: <b>/createTicket subject, description</b>")
new_ticket = \
{
'ticket': {
'requester': {
'name': firstName + " " + lastName,
'email': emailAddress,
},
#{
'subject': str(organization) + ": " + ticketSubject,
'comment': ticketDescription,
'priority': "normal",
'type': "incident",
'ticket_field_entries':
[
{ #This will use the zendesl custom field to get the severity
#Using the parameter to be able to change this in the config file directly
'ticket_field_id': _configDef['zdesk_config']['zdesk_sev_field'],
'value': 'severity_3'
},
]
}
}
####################################
# base64Encoded = base64.b64encode(
# bytes((emailZendesk + "/token:" + _configDef['zdesk_config']['zdesk_password']), 'utf-8'))
# base64Enc = (base64Encoded.decode("utf-8"))
# print(str(base64Enc))
# base = ("Basic " + base64Enc)
# print(str(base))
#
# headers = {
# 'email_address': emailZendesk + "/token",
# 'password': (_configDef['zdesk_config']['zdesk_password']),
# 'authorization': base,
# 'cache-control': "no-cache",
# 'content-type': "application/json"
# }
####################################
testconfig = {
'zdesk_email': _configDef['zdesk_config']['zdesk_email'],
'zdesk_password': _configDef['zdesk_config']['zdesk_password'],
'zdesk_url': _configDef['zdesk_config']['zdesk_url'],
'zdesk_token': True
}
zendesk = Zendesk(**testconfig)
####################################
#Create a ticket and get its URL.
result = zendesk.ticket_create(data=new_ticket)
#print("Ticket is the following " +result)
ticketidSplit = result.split("/")
ticketURLid = ticketidSplit[6][:-5]
#print("Ticket ID " +ticketURLid)
tempLink = _configDef['zdesk_config']['zdesk_link'] + ticketURLid
result = tempLink
linkCreated = "<a href =\"" + _configDef['zdesk_config']['zdesk_link'] + ticketURLid + "\">" + result +"</a>"
#print("Link for ticket " +linkCreated)
return messageDetail.ReplyToChatV2("New Zendesk Ticket created: " +linkCreated)
else:
return messageDetail.ReplyToChat("You aren't authorised to use this command. Please consult Symphony Support team")
except:
try:
botlog.LogSymphonyInfo("Inside second try for ticketCreate")
emailZendesk = ""
isAllowed = False
commandCallerUID = messageDetail.FromUserId
connComp.request("GET", "/pod/v3/users?uid=" + commandCallerUID, headers=headersCompany)
| |
# -*- coding:utf-8 -*-
"""
mincheng:<EMAIL>
"""
from __future__ import division
import sys
import printlog
import datetime
import os
import time
import sklearn
from sklearn.metrics import confusion_matrix
from baselines import sclearn
import evaluation
from collections import defaultdict
import tensorflow as tf
import mslstm
import config
import loaddata
import numpy as np
import visualize
from sklearn.metrics import accuracy_score
from baselines import nnkeras,sclearn
import matplotlib.pyplot as plt
flags = tf.app.flags
FLAGS = flags.FLAGS
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert inputs.shape[0] == targets.shape[0]
if shuffle:
indices = np.arange(inputs.shape[0])
np.random.shuffle(indices)
for start_idx in range(0, inputs.shape[0] - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def pprint(msg,method=''):
#if not 'Warning' in msg:
if 1<0:
sys.stdout = printlog.PyLogger('',method+'_'+str(FLAGS.num_neurons1))
print(msg)
try:
sys.stderr.write(msg+'\n')
except:
pass
#sys.stdout.flush()
else:
print(msg)
#def sess_run(commander,data,label):
#global sess, data_x, data_y
#return sess.run(commander, {data_x: data, data_y: label})
def train_lstm(method,filename_train_list,filename_test,trigger_flag,evalua_flag,is_binary_class,result_list_dict,evaluation_list):
global tempstdout
FLAGS.option = method
dropout = 0.8
x_train, y_train, x_val, y_val, x_test, y_test = loaddata.get_data(FLAGS.pooling_type, FLAGS.is_add_noise, FLAGS.noise_ratio, FLAGS.data_dir,
filename_test, FLAGS.sequence_window, trigger_flag,is_binary_class,
multiScale=FLAGS.is_multi_scale, waveScale=FLAGS.scale_levels,
waveType=FLAGS.wave_type)
"""
if filename_test == 'HB_AS_Leak.txt':
filename_train = 'HB_C_N_S.txt'
elif filename_test == 'HB_Code_Red_I.txt':
filename_train = 'HB_A_N_S.txt'
elif filename_test == 'HB_Nimda.txt':
filename_train = 'HB_A_C_S.txt'
elif filename_test == 'HB_Slammer.txt':
filename_train = 'HB_A_C_N.txt'
print(filename_test)
#x_train, y_train, x_val, y_val = loaddata.get_trainData(FLAGS.pooling_type, FLAGS.is_add_noise, FLAGS.noise_ratio, FLAGS.data_dir,
# filename_train, FLAGS.sequence_window, trigger_flag,is_binary_class,
# multiScale=FLAGS.is_multi_scale, waveScale=FLAGS.scale_levels,
# waveType=FLAGS.wave_type)
#x_test, y_test = loaddata.get_testData(FLAGS.pooling_type, FLAGS.is_add_noise, FLAGS.noise_ratio, FLAGS.data_dir,
# filename_test, FLAGS.sequence_window, trigger_flag,is_binary_class,
# multiScale=FLAGS.is_multi_scale, waveScale=FLAGS.scale_levels,
# waveType=FLAGS.wave_type)
"""
#loaddata.Multi_Scale_Plotting_2(x_train)
if FLAGS.is_multi_scale:
FLAGS.scale_levels = x_train.shape[1]
FLAGS.input_dim = x_train.shape[-1]
FLAGS.number_class = y_train.shape[1]
if "Nimda" in filename_test:
FLAGS.batch_size = int(int(x_train.shape[0])/5)
else:
FLAGS.batch_size = int(x_train.shape[0])
else:
FLAGS.input_dim = x_train.shape[-1]
FLAGS.number_class = y_train.shape[1]
if "Nimda" in filename_test:
FLAGS.batch_size = int(int(x_train.shape[0])/5)
else:
FLAGS.batch_size = int(x_train.shape[0])
#g = tf.Graph()
with tf.Graph().as_default():
#config = tf.ConfigProto()
config = tf.ConfigProto(device_count={'/gpu': 0}) #turn GPU on and off
#config = tf.ConfigProto(log_device_placement=True)
#config.gpu_options.per_process_gpu_memory_fraction = 0.2
#with tf.variable_scope("middle")as scope:
tf.set_random_seed(1337)
#global_step = tf.Variable(0,name="global_step",trainable=False)
data_x,data_y = mslstm.inputs(FLAGS.option)
#output_u_w,prediction, label = mslstm.inference(data_x,data_y,FLAGS.option)
is_training = tf.placeholder(tf.bool)
prediction, label,output_last = mslstm.inference(data_x,data_y,FLAGS.option,is_training)
loss = mslstm.loss_(prediction, label)
tran_op,optimizer = mslstm.train(loss)
minimize = optimizer.minimize(loss)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
#summary_op = tf.merge_all_summaries()
weights = tf.Variable(tf.constant(0.1, shape=[len(y_test)*FLAGS.sequence_window, 1, FLAGS.scale_levels]),
name="weights123")
init_op = tf.global_variables_initializer()
#init_op = tf.initialize_all_variables()
sess = tf.Session(config=config)
sess.run(init_op)
#summary_writer = tf.train.SummaryWriter(FLAGS.log_dir, sess.graph)
#saver = tf.train.Saver()
saver = tf.train.Saver({"my_weights": weights})
epoch_training_loss_list = []
epoch_training_acc_list = []
epoch_val_loss_list = []
epoch_val_acc_list = []
early_stopping = 10
no_of_batches = int(len(x_train) / FLAGS.batch_size)
#visualize.curve_plotting_withWindow(x_train, y_train, 0, "Train_"+'_'+FLAGS.option)
#visualize.curve_plotting_withWindow(x_test, y_test, 2, "Test_"+'_'+FLAGS.option)
total_iteration = 0
for i in range(FLAGS.max_epochs):
if early_stopping > 0:
pass
else:
break
j_iteration = 0
for j_batch in iterate_minibatches(x_train,y_train,FLAGS.batch_size,shuffle=False):
j_iteration += 1
total_iteration += 1
inp, out = j_batch
sess.run(minimize, {data_x: inp, data_y: out, is_training:True})
training_acc, training_loss = sess.run((accuracy, loss), {data_x: inp, data_y: out,is_training:True})
#sys.stdout = tempstdout
val_acc, val_loss = sess.run((accuracy, loss), {data_x:x_val, data_y:y_val,is_training:True})
pprint(
FLAGS.option + "_Epoch%s" % (str(i + 1)) + ">" * 3 +'_Titer-'+str(total_iteration) +'_iter-'+str(j_iteration)+ str(FLAGS.wave_type) + '-' + str(FLAGS.scale_levels) + '-' + str(FLAGS.learning_rate)+'-'+str(FLAGS.num_neurons1)+'-'+str(FLAGS.num_neurons2)+ ">>>=" + "train_accuracy: %s, train_loss: %s" % (
str(training_acc), str(training_loss)) \
+ ",\tval_accuracy: %s, val_loss: %s" % (str(val_acc), str(val_loss)), method)
epoch_training_loss_list.append(training_loss)
epoch_training_acc_list.append(training_acc)
epoch_val_loss_list.append(val_loss)
epoch_val_acc_list.append(val_acc)
try:
max_val_acc = epoch_val_acc_list[-2]
except:
max_val_acc = 0
if epoch_val_acc_list[-1] < max_val_acc:
early_stopping -= 1
elif epoch_val_acc_list[-1] >= max_val_acc:
early_stopping = 10
if val_loss > 10 or val_loss == np.nan:
break
if 1<0:
#pprint("PPP")
weights_results = sess.run(output_last, {data_x:x_test, data_y: y_test})
#print(weights_results)
#sys.stdout = tempstdout
visualize.curve_plotting(weights_results,y_test,filename_test,FLAGS.option)
#pprint("QQQ")
with open(filename_test+"_EA.txt",'w')as fout:
fout.write(weights_results)
#sess.run(weights.assign(weights_results))
else:
pass
#weights = output_u_w.eval(session=sess)
#weights = saver.restore(sess, "./tf_tmp/model.ckpt")
#pprint(weights)
#weight_list = return_max_index(weights)
result = sess.run(prediction, {data_x:x_test, data_y: y_test})
#print(result)
#pprint(result)
#print("LLL")
saver.save(sess, "./tf_tmp/model.ckpt")
sess.close()
#results = evaluation.evaluation(y_test, result)#Computing ACCURACY, F1-Score, .., etc
if is_binary_class == True:
#sys.stdout = tempstdout
results = evaluation.evaluation(y_test, result, trigger_flag, evalua_flag) # Computing ACCURACY,F1-score,..,etc
y_test = loaddata.reverse_one_hot(y_test)
result = loaddata.reverse_one_hot(result)
else:
symbol_list = [0, 1, 2, 3, 4]
sys.stdout = tempstdout
print(y_test)
print(result)
y_test = loaddata.reverse_one_hot(y_test)
result = loaddata.reverse_one_hot(result)
confmat = confusion_matrix(y_test, result, labels=symbol_list)
visualize.plotConfusionMatrix(confmat)
#accuracy = sklearn.metrics.accuracy_score(y_test, result)
symbol_list2 = [0]
y_ = []
for symbol in symbol_list2:
for tab in range(len(y_test)):
if y_test[tab] == symbol and y_test[tab] == result[tab]:
y_.append(symbol)
# print(y_test[0:10])
# rint(result[0:10])
# print("Accuracy is :"+str(accuracy))
accuracy = float(len(y_)) / (list(result).count(symbol))
print("Accuracy of " + str(symbol) + " is :" + str(accuracy))
print("True is ")
# print(y_test)
print("The 0 of True is " + str(list(y_test).count(0)))
print("The 1 of True is " + str(list(y_test).count(1)))
print("The 2 of True is " + str(list(y_test).count(2)))
print("The 3 of True is " + str(list(y_test).count(3)))
print("The 4 of True is " + str(list(y_test).count(4)))
# print(len(y_test))
print("Predict is ")
# print(result)
print("The 0 of Predict is " + str(list(result).count(0)))
print("The 1 of Predict is " + str(list(result).count(1)))
print("The 2 of Predict is " + str(list(result).count(2)))
print("The 3 of Predict is " + str(list(result).count(3)))
print("The 4 of Predict is " + str(list(result).count(4)))
print("Accuracy is :" + str(accuracy))
f1_score = sklearn.metrics.f1_score(y_test, result,average="macro")
print("F-score is :" + str(f1_score))
results = {'ACCURACY': accuracy, 'F1_SCORE': f1_score, 'AUC': 9999, 'G_MEAN': 9999}
sys.stdout = tempstdout
#print(weights_results.shape)
#print("215")
y_test2 = np.array(y_test)
result2 = np.array(result)
#results = accuracy_score(y_test2, result2)
#print(y_test2)
#print(result2)
#print(results)
with open(os.path.join(os.path.join(os.getcwd(),'stat'),"StatFalseAlarm_" + filename_test + "_True.txt"), "w") as fout:
for tab in range(len(y_test2)):
fout.write(str(int(y_test2[tab])) + '\n')
with open(os.path.join(os.path.join(os.getcwd(),'stat'),"StatFalseAlarm_" + filename_test + "_" + method + "_" + "_Predict.txt"), "w") as fout:
for tab in range(len(result2)):
fout.write(str(int(result2[tab])) + '\n')
#eval_list = ["AUC", "G_MEAN","ACCURACY","F1_SCORE"]
for each_eval in evaluation_list:
result_list_dict[each_eval].append(results[each_eval])
if evalua_flag:
with open(os.path.join(FLAGS.output, "TensorFlow_Log" + filename_test + ".txt"), "a")as fout:
if not FLAGS.is_multi_scale:
outfileline = FLAGS.option + "_epoch:" + str(FLAGS.max_epochs) + ",_lr:" + str(FLAGS.learning_rate) + ",_multi_scale:" + str(FLAGS.is_multi_scale) + ",hidden_nodes: "+str(FLAGS.num_neurons1)+"/"+str(FLAGS.num_neurons2) + "\n"
else:
outfileline = FLAGS.option + "_epoch:" + str(FLAGS.max_epochs) + ",_wavelet:"+str(FLAGS.wave_type) + ",_lr:" + str(FLAGS.learning_rate) + ",_multi_scale:" + str(FLAGS.is_multi_scale) + ",_train_set_using_level:" + str(FLAGS.scale_levels) + ",hidden_nodes: "+str(FLAGS.num_neurons1)+"/"+str(FLAGS.num_neurons2) + "\n"
fout.write(outfileline)
for each_eval in evaluation_list:
#for eachk, eachv in result_list_dict.items():
fout.write(each_eval + ": " + str(round(np.mean(result_list_dict[each_eval]), 3)) + ",\t")
fout.write('\n')
return epoch_training_acc_list,epoch_val_acc_list,epoch_training_loss_list,epoch_val_loss_list
else:
return results
def train_classic(method,filename_train,filename_test, trigger_flag,evalua_flag,is_binary_class,evaluation_list):
return sclearn.Basemodel(method,filename_train,filename_test,trigger_flag,evalua_flag,evaluation_list)
def train(method,filename_train,filename_test,trigger_flag,evalua_flag,is_binary_class,evaluation_list,wave_type='db1'):
global data_x, data_y
result_list_dict = defaultdict(list)
#evaluation_list = ["ACCURACY", "F1_SCORE", "AUC", "G_MEAN"]
for each in evaluation_list:
result_list_dict[each] = []
if 'L' in method or 'RNN' in method:
sys.stdout = tempstdout
if method == '1L' or method == '2L' or method == '3L' \
or method == '4L' or method == '5L' or method == 'RNN':
#FLAGS.learning_rate = 0.01
FLAGS.is_multi_scale = False
elif 'AL' == method:
#FLAGS.learning_rate = 0.01
FLAGS.is_multi_scale = False
else:
#FLAGS.learning_rate = 0.05
FLAGS.is_multi_scale = True
FLAGS.wave_type = wave_type
return train_lstm(method,filename_train,filename_test,trigger_flag,evalua_flag,is_binary_class,result_list_dict,evaluation_list)
else:
sys.stdout = tempstdout
return train_classic(method,filename_train,filename_test,trigger_flag,evalua_flag,is_binary_class,result_list_dict,evaluation_list)
def main(unused_argv):
global tempstdout
#main function
#wave_type_list =['db1','db2','haar','coif1','db1','db2','haar','coif1','db1','db2']
wave_type_list = ['haar']
multi_scale_value_list = [2,3,4,5,6,10]
case_label = {'SVM':'SVM','NB':'NB','DT':'DT','Ada.Boost':'Ada.Boost','RF':'RF','1NN':'1NN','1NN-DTW':'DTW',
'SVMF':'SVMF','SVMW':'SVMW','MLP':'MLP','RNN':'RNN','1L':'LSTM','2L':'2-LSTM','3L':'3-LSTM',\
'AL':'ALSTM','HL':'MSLSTM','HAL':'MSLSTM'}
trigger_flag = 1
evalua_flag = True
is_binary_class = True
single_layer = True
if is_binary_class:
filename_list = ["HB_AS_Leak.txt","HB_Code_Red_I.txt","HB_Nimda.txt","HB_Slammer.txt"]
#filename_list = ["HB_Slammer.txt"] # HB_Code_Red_I.txt
# HB_Nimda.txt
# HB_Slammer.txt
else:
filename_list = ["HB_ALL.txt"]
if trigger_flag == 1 :
if single_layer:
#case = ['AL']
#case = ['1L','3L','AL']
case = ['MLP','RNN','1L','2L','3L','AL']
else:
case = ['HL','HAL']
#case = ['HL','HAL']
else:
case = ["1NN"]
#case = ["RF","SVM","SVMF","SVMW","NB","DT","Ada.Boost","1NN"]
#case = ["NB","1NN","Ada.Boost","RF"]
if evalua_flag:
evaluation_list = ["AUC", "G_MEAN", "ACCURACY", "F1_SCORE"]
else:
evaluation_list = ["FPR", "TPR","AUC","G_MEAN"]
wave_type = wave_type_list[0]
hidden_unit1_list = [8,16,32,64,128,256]
#hidden_unit1_list = [16]
hidden_unit2_list = [8,16,32,64,128]
#hidden_unit2_list = [8]
#combination_list = [(16,8),(16,32),(16,64),(32,64),(128,16)]
#combination_list = [(8,8),(8,32),(16,8),(16,64),(128,16),(128,64)]
#learning_rate_list = [0.001, 0.01, 0.05, 0.1]
learning_rate_list = [0.1,0.05,0.01,0.001]
for tab in range(len(filename_list)):
case_list = []
train_acc_list = []
val_acc_list = []
train_loss_list = []
val_loss_list = []
if single_layer:
combination_list = hidden_unit1_list
else:
combination_list = []
for each1 in hidden_unit1_list:
for each2 in hidden_unit2_list:
combination_list.append((each1, each2))
"""
if filename_list[tab] == "HB_AS_Leak.txt":
combination_list = [(32, 64), (32, 128), (64, 64)]
elif filename_list[tab] == "HB_Code_Red_I.txt":
combination_list = [(32, 32), (16, 8), (16, 64), (32, 64)]
elif filename_list[tab] == "HB_Nimda.txt":
combination_list = [(8, 32), (32, 64)]
elif filename_list[tab] == "HB_Slammer.txt":
combination_list = [(16, 8), (16, 32), (16, 64)]
"""
results = {}
for each_case in case:
if 1>0:
case_list.append(case_label[each_case])
if trigger_flag: #
sys.stdout = tempstdout
if each_case == 'MLP':
if evalua_flag:
nnkeras.Basemodel(each_case, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list)
else:
results[case_label[each_case]] = nnkeras.Basemodel(each_case, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list)
else:
if evalua_flag:
for learning_rate in learning_rate_list:
FLAGS.learning_rate = learning_rate
for each_comb in combination_list:
if not 'H' in each_case:
FLAGS.num_neurons1 = | |
import datetime
from mimetypes import guess_type
from django import forms
from django.contrib import admin, messages
from django.contrib.admin.util import unquote
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.decorators import permission_required
from django.core.exceptions import ValidationError, PermissionDenied
from django.db.models import Q
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, redirect, render
from django.template.context import RequestContext
from django.utils.decorators import method_decorator
from django.utils.encoding import force_unicode
from django.utils.functional import update_wrapper
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _, ungettext
from django.views.decorators.csrf import csrf_protect
# from django.contrib.admin.views.decorators import staff_member_required
from metashare import settings
from metashare.accounts.models import EditorGroup, EditorGroupManagers
from metashare.repository.models import LrQuality, TranslationQuality
from metashare.repository.editor.editorutils import FilteredChangeList
from metashare.repository.editor.forms import StorageObjectUploadForm
from metashare.repository.editor.inlines import ReverseInlineFormSet, \
ReverseInlineModelAdmin
# from metashare.repository.editor.lookups import MembershipDummyLookup
from metashare.repository.editor.schemamodel_mixin import encode_as_inline
from metashare.repository.editor.superadmin import SchemaModelAdmin
# from metashare.repository.editor.widgets import OneToManyWidget
from metashare.repository.models import resourceComponentTypeType_model, \
corpusInfoType_model, languageDescriptionInfoType_model, \
lexicalConceptualResourceInfoType_model, \
corpusMediaTypeType_model, languageDescriptionMediaTypeType_model, \
lexicalConceptualResourceMediaTypeType_model, resourceInfoType_model, \
licenceInfoType_model, User
from metashare.repository.supermodel import SchemaModel
# from metashare.repository.views import MAXIMUM_READ_BLOCK_SIZE
from metashare.stats.model_utils import saveLRStats, UPDATE_STAT, INGEST_STAT, DELETE_STAT
from metashare.storage.models import PUBLISHED, INGESTED, INTERNAL, \
ALLOWED_ARCHIVE_EXTENSIONS
from metashare.utils import verify_subclass, create_breadcrumb_template_params
from os.path import split, getsize
csrf_protect_m = method_decorator(csrf_protect)
class ResourceComponentInlineFormSet(ReverseInlineFormSet):
'''
A formset with custom save logic for resources.
'''
def clean(self):
actual_instance = self.get_actual_resourceComponentType()
error_list = ''
if isinstance(actual_instance, corpusInfoType_model):
error_list = error_list + self.clean_corpus(actual_instance)
elif isinstance(actual_instance, languageDescriptionInfoType_model):
error_list = error_list + self.clean_langdesc(actual_instance)
elif isinstance(actual_instance, lexicalConceptualResourceInfoType_model):
error_list = error_list + self.clean_lexicon(actual_instance)
# elif isinstance(actual_instance, toolServiceInfoType_model):
# error_list = error_list + self.clean_toolservice(actual_instance)
else:
raise Exception, "unexpected resource component class type: {}".format(actual_instance.__class__.__name__)
try:
actual_instance.full_clean()
except ValidationError:
#raise ValidationError('The content of the {} general info is not valid.'.format(self.get_actual_resourceComponentType()._meta.verbose_name))
#raise AssertionError("Meaningful error message for general info")
error_list = error_list + 'The content of the {} general info is not valid.'.format(self.get_actual_resourceComponentType()._meta.verbose_name)
if error_list != '':
raise ValidationError(error_list)
super(ResourceComponentInlineFormSet, self).clean()
def clean_media(self, parent, fieldnames):
'''
Clean the list of media data in the XXMediaType parent object.
'''
error = ''
for modelfieldname in fieldnames:
if modelfieldname not in self.data:
continue
value = self.data[modelfieldname]
if not value:
error = error + format(modelfieldname) + ' error. '
return error
def clean_corpus_one2many(self, corpusmediatype):
error = ''
media = 'corpusTextInfo'
flag = 'showCorpusTextInfo'
if flag in self.data and self.data[flag]:
num_infos = corpusmediatype.corpustextinfotype_model_set.all().count()
if num_infos == 0:
error += media + ' error. '
media = 'corpusVideoInfo'
flag = 'showCorpusVideoInfo'
if flag in self.data and self.data[flag]:
num_infos = corpusmediatype.corpusvideoinfotype_model_set.all().count()
if num_infos == 0:
error += media + ' error. '
return error
def clean_corpus(self, corpus):
return self.clean_corpus_one2many(corpus.corpusMediaType) \
+ self.clean_media(corpus.corpusMediaType, \
('corpusAudioInfo', 'corpusImageInfo', 'corpusTextNumericalInfo', 'corpusTextNgramInfo'))
def clean_langdesc(self, langdesc):
return self.clean_media(langdesc.languageDescriptionMediaType, \
('languageDescriptionTextInfo', 'languageDescriptionVideoInfo', 'languageDescriptionImageInfo'))
def clean_lexicon(self, lexicon):
return self.clean_media(lexicon.lexicalConceptualResourceMediaType, \
('lexicalConceptualResourceTextInfo', 'lexicalConceptualResourceAudioInfo', \
'lexicalConceptualResourceVideoInfo', 'lexicalConceptualResourceImageInfo'))
def clean_toolservice(self, tool):
return ''
def save_media(self, parent, fieldnames):
'''
Save the list of media data in the XXMediaType parent object.
'''
for modelfieldname in fieldnames:
if modelfieldname not in self.data:
continue
value = self.data[modelfieldname]
if not value:
continue
modelfield = parent._meta.get_field(modelfieldname)
child_id = int(value)
child = modelfield.rel.to.objects.get(pk=child_id)
setattr(parent, modelfieldname, child)
parent.save()
def save_corpus(self, corpus, commit):
self.save_media(corpus.corpusMediaType, \
('corpusAudioInfo', 'corpusImageInfo', 'corpusTextNumericalInfo', 'corpusTextNgramInfo'))
def save_langdesc(self, langdesc, commit):
self.save_media(langdesc.languageDescriptionMediaType, \
('languageDescriptionTextInfo', 'languageDescriptionVideoInfo', 'languageDescriptionImageInfo'))
def save_lexicon(self, lexicon, commit):
self.save_media(lexicon.lexicalConceptualResourceMediaType, \
('lexicalConceptualResourceTextInfo', 'lexicalConceptualResourceAudioInfo', \
'lexicalConceptualResourceVideoInfo', 'lexicalConceptualResourceImageInfo'))
def save_toolservice(self, tool, commit):
pass
def get_actual_resourceComponentType(self):
if not (self.forms and self.forms[0].instance):
raise Exception, "Cannot save for unexisting instance"
if self.forms[0].instance.pk is not None:
actual_instance = self.forms[0].instance
else:
actual_instance = resourceComponentTypeType_model.objects.get(pk=self.data['resourceComponentId'])
self.forms[0].instance = actual_instance # we need to use the resourceComponentType we created earlier
actual_instance = actual_instance.as_subclass()
return actual_instance
def save(self, commit=True):
actual_instance = self.get_actual_resourceComponentType()
if isinstance(actual_instance, corpusInfoType_model):
self.save_corpus(actual_instance, commit)
elif isinstance(actual_instance, languageDescriptionInfoType_model):
self.save_langdesc(actual_instance, commit)
elif isinstance(actual_instance, lexicalConceptualResourceInfoType_model):
self.save_lexicon(actual_instance, commit)
# elif isinstance(actual_instance, toolServiceInfoType_model):
# self.save_toolservice(actual_instance, commit)
else:
raise Exception, "unexpected resource component class type: {}".format(actual_instance.__class__.__name__)
super(ResourceComponentInlineFormSet, self).save(commit)
return (actual_instance,)
# pylint: disable-msg=R0901
class ResourceComponentInline(ReverseInlineModelAdmin):
formset = ResourceComponentInlineFormSet
def __init__(self,
parent_model,
parent_fk_name,
model, admin_site,
inline_type):
super(ResourceComponentInline, self). \
__init__(parent_model, parent_fk_name, model, admin_site, inline_type)
self.template = 'repository/editor/resourceComponentInline.html'
# pylint: disable-msg=R0901
class IdentificationInline(ReverseInlineModelAdmin):
readonly_fields = ('metaShareId',)
def change_resource_status(resource, status, precondition_status=None):
'''
Change the status of the given resource to the new status given.
If precondition_status is not None, then apply the change ONLY IF the
current status of the resource is precondition_status; otherwise do nothing.
The status of non-master copy resources is never changed.
'''
if not hasattr(resource, 'storage_object'):
raise NotImplementedError, "{0} has no storage object".format(resource)
if resource.storage_object.master_copy and \
(precondition_status is None \
or precondition_status == resource.storage_object.publication_status):
resource.storage_object.publication_status = status
resource.storage_object.save()
# explicitly write metadata XML and storage object to the storage folder
resource.storage_object.update_storage()
return True
return False
def has_edit_permission(request, res_obj):
"""
Returns `True` if the given request has permission to edit the metadata
for the current resource, `False` otherwise.
"""
return request.user.is_active and (request.user.is_superuser \
or request.user in res_obj.owners.all() \
or res_obj.editor_groups.filter(name__in=
request.user.groups.values_list('name', flat=True)).count() != 0)
def has_publish_permission(request, queryset):
"""
Returns `True` if the given request has permission to change the publication
status of all given language resources, `False` otherwise.
"""
# if not request.user.is_superuser:
# for obj in queryset:
# res_groups = obj.editor_groups.all()
# # we only allow a user to ingest/publish/unpublish a resource if she
# # is a manager of one of the resource's `EditorGroup`s
# if not any(res_group.name == mgr_group.managed_group.name
# for res_group in res_groups
# for mgr_group in EditorGroupManagers.objects.filter(name__in=
# request.user.groups.values_list('name', flat=True))):
# return False
if not request.user.is_staff or request.user.groups.filter(name='naps').exists():
return False
return True
# Check if resource is valid for contributed resources that are imported
# in the repository as internal and can be published by mistake
# without the remaining required data filled in
def resourceIsValid(res):
dist_ok = False
linguality_ok = False
lang_ok = False
size_ok = False
lcrt_ok = False
ldt_ok = False
from django.core.exceptions import ObjectDoesNotExist
try:
if res.distributionInfo:
dist_ok = True
except ObjectDoesNotExist:
pass
## based on resource type:
corpus_media = res.resourceComponentType.as_subclass()
# 2. check if lingualityType exists
if isinstance(corpus_media, corpusInfoType_model):
media_type = corpus_media.corpusMediaType
for corpus_info in media_type.corpustextinfotype_model_set.all():
if corpus_info.lingualityInfo.lingualityType:
linguality_ok = True
break
for corpus_info in media_type.corpustextinfotype_model_set.all():
if corpus_info.languageinfotype_model_set.all().count() > 0:
lang_ok = True
break
for corpus_info in media_type.corpustextinfotype_model_set.all():
if corpus_info.sizeinfotype_model_set.all().count() > 0:
size_ok = True
break
return dist_ok and linguality_ok and lang_ok and size_ok
elif isinstance(corpus_media, lexicalConceptualResourceInfoType_model):
lcr_media_type = corpus_media.lexicalConceptualResourceMediaType
if corpus_media.lexicalConceptualResourceType:
lcrt_ok = True
if lcr_media_type.lexicalConceptualResourceTextInfo:
if lcr_media_type.lexicalConceptualResourceTextInfo.lingualityInfo.lingualityType:
linguality_ok = True
if lcr_media_type.lexicalConceptualResourceTextInfo.languageinfotype_model_set.all().count() > 0:
lang_ok = True
if lcr_media_type.lexicalConceptualResourceTextInfo.sizeinfotype_model_set.all().count() > 0:
size_ok = True
return dist_ok and lcrt_ok and linguality_ok and lang_ok and size_ok
elif isinstance(corpus_media, languageDescriptionInfoType_model):
ld_media_type = corpus_media.languageDescriptionMediaType
if corpus_media.languageDescriptionType:
ldt_ok = True
if ld_media_type.languageDescriptionTextInfo:
if ld_media_type.languageDescriptionTextInfo.lingualityInfo.lingualityType:
linguality_ok = True
if ld_media_type.languageDescriptionTextInfo.languageinfotype_model_set.all().count() > 0:
lang_ok = True
if ld_media_type.languageDescriptionTextInfo.sizeinfotype_model_set.all().count() > 0:
size_ok = True
return dist_ok and ldt_ok and linguality_ok and lang_ok and size_ok
return dist_ok and ldt_ok
class MetadataForm(forms.ModelForm):
def save(self, commit=True):
today = datetime.date.today()
if not self.instance.metadataCreationDate:
self.instance.metadataCreationDate = today
self.instance.metadataLastDateUpdated = today
return super(MetadataForm, self).save(commit)
class MetadataInline(ReverseInlineModelAdmin):
form = MetadataForm
readonly_fields = ('metadataCreationDate', 'metadataLastDateUpdated',)
class ResourceModelAdmin(SchemaModelAdmin):
inline_type = 'stacked'
custom_one2one_inlines = {'identificationInfo':IdentificationInline,
'resourceComponentType':ResourceComponentInline,
'metadataInfo':MetadataInline, }
content_fields = ('resourceComponentType',)
# list_display = ('__unicode__', 'resource_type', 'publication_status', 'resource_Owners', 'editor_Groups',)
list_display = ('__unicode__', 'resource_type', 'publication_status', 'resource_Owners',)
list_filter = ('storage_object__publication_status',)
actions = ('publish_action', 'unpublish_action', 'ingest_action',
'export_xml_action', 'delete', 'add_group', 'remove_group',
'add_owner', 'remove_owner')
hidden_fields = ('storage_object', 'owners', 'editor_groups',)
def publish_action(self, request, queryset):
if has_publish_permission(request, queryset):
successful = 0
for obj in queryset:
if resourceIsValid(obj):
if change_resource_status(obj, status=PUBLISHED,
precondition_status=INGESTED):
successful += 1
saveLRStats(obj, UPDATE_STAT, request)
else:
messages.error(request,
_('Only valid resources can be published; '
'please, edit the resource and re-try'))
return
if successful > 0:
messages.info(request, ungettext(
'Successfully published %(ingested)s ingested resource.',
'Successfully published %(ingested)s ingested resources.',
successful) % {'ingested': successful})
else:
messages.error(request,
_('Only ingested resources can be published.'))
else:
messages.error(request, _('You do not have the permission to ' \
'perform this action for all selected resources.'))
publish_action.short_description = _("Publish selected ingested resources")
def unpublish_action(self, request, queryset):
if has_publish_permission(request, queryset):
successful = 0
for obj in queryset:
if change_resource_status(obj, status=INGESTED,
precondition_status=PUBLISHED):
successful += 1
saveLRStats(obj, INGEST_STAT, request)
if successful > 0:
messages.info(request, ungettext(
'Successfully unpublished %s published resource.',
'Successfully unpublished %s published resources.',
successful) % (successful,))
else:
messages.error(request,
_('Only published resources can be unpublished.'))
else:
messages.error(request, _('You do not have the permission to ' \
'perform this | |
to the size of each partition.
::
t = Tree("(f,((d, ((a,b),c)),e));")
print t
#
# /-f
# |
# | /-d
# ----| |
# | /---| /-a
# | | | /---|
# | | \---| \-b
# \---| |
# | \-c
# |
# \-e
t.ladderize()
print t
# /-f
# ----|
# | /-e
# \---|
# | /-d
# \---|
# | /-c
# \---|
# | /-a
# \---|
# \-b
"""
if not self.is_leaf():
n2s = {}
for n in self.get_children():
s = n.ladderize(direction=direction)
n2s[n] = s
self.children.sort(lambda x,y: cmp(n2s[x], n2s[y]))
if direction == 1:
self.children.reverse()
size = sum(n2s.values())
else:
size = 1
return size
def sort_descendants(self, attr="name"):
"""
.. versionadded: 2.1
This function sort the branches of a given tree by
considerening node names. After the tree is sorted, nodes are
labeled using ascendent numbers. This can be used to ensure
that nodes in a tree with the same node names are always
labeled in the same way. Note that if duplicated names are
present, extra criteria should be added to sort nodes.
Unique id is stored as a node._nid attribute
"""
node2content = self.get_cached_content(store_attr=attr, container_type=list)
def sort_by_content(x, y):
return cmp(str(sorted(node2content[x])),
str(sorted(node2content[y])))
for n in self.traverse():
if not n.is_leaf():
n.children.sort(sort_by_content)
def get_cached_content(self, store_attr=None, container_type=set, _store=None):
"""
.. versionadded: 2.2
Returns a dictionary pointing to the preloaded content of each
internal node under this tree. Such a dictionary is intended
to work as a cache for operations that require many traversal
operations.
:param None store_attr: Specifies the node attribute that
should be cached (i.e. name, distance, etc.). When none, the
whole node instance is cached.
:param _store: (internal use)
"""
if _store is None:
_store = {}
for ch in self.children:
ch.get_cached_content(store_attr=store_attr,
container_type=container_type,
_store=_store)
if self.children:
val = container_type()
for ch in self.children:
if type(val) == list:
val.extend(_store[ch])
if type(val) == set:
val.update(_store[ch])
_store[self] = val
else:
if store_attr is None:
val = self
else:
val = getattr(self, store_attr)
_store[self] = container_type([val])
return _store
def robinson_foulds(self, t2, attr_t1="name", attr_t2="name",
unrooted_trees=False, expand_polytomies=False,
polytomy_size_limit=5, skip_large_polytomies=False):
"""
.. versionadded: 2.2
Returns the Robinson-Foulds symmetric distance between current
tree and a different tree instance.
:param t2: target tree
:param name attr_t1: Compare trees using a custom node
attribute as a node name.
:param name attr_t2: Compare trees using a custom node
attribute as a node name in target tree.
:param False attr_t2: If True, consider trees as unrooted.
:param False expand_polytomies: If True, all polytomies in the reference
and target tree will be expanded into all possible binary
trees. Robinson-foulds distance will be calculated between all
tree combinations and the minimum value will be returned.
See also, :func:`NodeTree.expand_polytomy`.
:returns: (symmetric distance, total partitions, common node
names, partitions in current tree, partitions in target tree)
"""
ref_t = self
target_t = t2
if not unrooted_trees and (len(ref_t.children) !=
2 or len(target_t.children) != 2):
raise ValueError("Unrooted tree found! You may want to activate the unrooted_trees flag.")
if expand_polytomies and unrooted_trees:
raise ValueError("expand_polytomies and unrooted_trees arguments cannot be enabled at the same time")
if expand_polytomies:
ref_trees = [Tree(nw) for nw in
ref_t.expand_polytomies(map_attr=attr_t1,
polytomy_size_limit=polytomy_size_limit,
skip_large_polytomies=skip_large_polytomies)]
target_trees = [Tree(nw) for nw in
target_t.expand_polytomies(map_attr=attr_t2,
polytomy_size_limit=polytomy_size_limit,
skip_large_polytomies=skip_large_polytomies)]
attr_t1, attr_t2 = "name", "name"
else:
ref_trees = [ref_t]
target_trees = [target_t]
min_comparison = None
for t1 in ref_trees:
t1_content = t1.get_cached_content()
t1_leaves = t1_content[t1]
if unrooted_trees:
edges1 = set([
tuple(sorted([tuple(sorted([getattr(n, attr_t1) for n in content if hasattr(n, attr_t1)])),
tuple(sorted([getattr(n, attr_t1) for n in t1_leaves-content if hasattr(n, attr_t1)]))]))
for content in t1_content.itervalues()])
else:
edges1 = set([
tuple(sorted([getattr(n, attr_t1) for n in content if hasattr(n, attr_t1)]))
for content in t1_content.itervalues()])
for t2 in target_trees:
t2_content = t2.get_cached_content()
t2_leaves = t2_content[t2]
if unrooted_trees:
edges2 = set([
tuple(sorted([
tuple(sorted([getattr(n, attr_t2) for n in content if hasattr(n, attr_t2)])),
tuple(sorted([getattr(n, attr_t2) for n in t2_leaves-content if hasattr(n, attr_t2)]))]))
for content in t2_content.itervalues()])
else:
edges2 = set([
tuple(sorted([getattr(n, attr_t2) for n in content if hasattr(n, attr_t2)]))
for content in t2_content.itervalues()])
rf = len(edges1 ^ edges2)
max_parts = len(edges1 | edges2)
target_names = set([getattr(_n, attr_t1) for _n in t1_leaves])
ref_names = set([getattr(_n, attr_t2) for _n in t2_leaves])
common_names = target_names & ref_names
if not min_comparison or min_comparison[0] > rf:
min_comparison = [rf, max_parts, common_names, edges1, edges2]
return min_comparison
def get_partitions(self):
"""
.. versionadded: 2.1
It returns the set of all possible partitions under a
node. Note that current implementation is quite inefficient
when used in very large trees.
t = Tree("((a, b), e);")
partitions = t.get_partitions()
# Will return:
# a,b,e
# a,e
# b,e
# a,b
# e
# b
# a
"""
all_leaves = frozenset(self.get_leaf_names())
all_partitions = set([all_leaves])
for n in self.iter_descendants():
p1 = frozenset(n.get_leaf_names())
p2 = frozenset(all_leaves - p1)
all_partitions.add(p1)
all_partitions.add(p2)
return all_partitions
def convert_to_ultrametric(self, tree_length, strategy="balanced"):
"""
.. versionadded: 2.1
Converts a tree to ultrametric topology (all leaves must have
the same distance to root). Note that, for visual inspection
of ultrametric trees, node.img_style["size"] should be set to
0.
"""
# pre-calculate how many splits remain under each node
node2max_depth = {}
for node in self.traverse("postorder"):
if not node.is_leaf():
max_depth = max([node2max_depth[c] for c in node.children]) + 1
node2max_depth[node] = max_depth
else:
node2max_depth[node] = 1
node2dist = {self: 0.0}
tree_length = float(tree_length)
step = tree_length / node2max_depth[self]
for node in self.iter_descendants("levelorder"):
if strategy == "balanced":
node.dist = (tree_length - node2dist[node.up]) / node2max_depth[node]
node2dist[node] = node.dist + node2dist[node.up]
elif strategy == "fixed":
if not node.is_leaf():
node.dist = step
else:
node.dist = tree_length - ((node2dist[node.up]) * step)
node2dist[node] = node2dist[node.up] + 1
node.dist = node.dist
def check_monophyly(self, values, target_attr, ignore_missing=False):
"""
Returns True if a given target attribute is monophyletic under
this node for the provided set of values.
If not all values are represented in the current tree
structure, a ValueError exception will be raised to warn that
strict monophyly could never be reached (this behaviour can be
avoided by enabling the `ignore_missing` flag.
:param values: a set of values for which monophyly is
expected.
:param target_attr: node attribute being used to check
monophyly (i.e. species for species trees, names for gene
family trees, or any custom feature present in the tree).
:param False ignore_missing: Avoid raising an Exception when
missing attributes are found.
"""
if type(values) != set:
values = set(values)
# This is the only time I traverse the tree, then I use cached
# leaf content
n2leaves = self.get_cached_content()
# Locate leaves matching requested attribute values
targets = [leaf for leaf in n2leaves[self]
if getattr(leaf, target_attr) in values]
# Raise an error if requested attribute values are not even present
if not ignore_missing:
missing_values = values - set([getattr(n, target_attr) for n
in targets])
if missing_values:
raise ValueError("Expected '%s' value(s) not found: %s" %(
target_attr, ','.join(missing_values)))
# Check monophyly with get_common_ancestor. Note that this
# step does not require traversing the tree again because
# targets are node instances instead of node names, and
# get_common_ancestor function is smart enough to detect it
# and avoid unnecessary traversing.
common = self.get_common_ancestor(targets)
observed = n2leaves[common]
foreign_leaves = [leaf for leaf in observed
if getattr(leaf, target_attr) not in values]
if not foreign_leaves:
return True, "monophyletic"
else:
# if the requested attribute is not monophyletic in this
# node, let's differentiate between poly and paraphyly.
poly_common = self.get_common_ancestor(foreign_leaves)
# if the common ancestor of all foreign leaves is self
# contained, we have a paraphyly. Otherwise, polyphyly.
polyphyletic = [leaf for leaf in poly_common if
getattr(leaf, target_attr) in values]
if polyphyletic:
return False, "polyphyletic"
else:
return False, "paraphyletic"
def get_monophyletic(self, values, target_attr):
"""
.. versionadded:: 2.2
Returns a list of nodes matching the provided monophyly
criteria. For | |
import logging
import signal
from base64 import b64encode
from concurrent import futures
from datetime import datetime
from time import perf_counter_ns
import grpc
import numpy as np
import pytz
from google.protobuf.timestamp_pb2 import Timestamp
from pfizer import compute_probs as compute_pfizer_probs
from proto import corical_pb2, corical_pb2_grpc
from risks import generate_relatable_risks
from tts import compute_probs, scenario_to_vec
from tts_util import get_age_bracket, get_age_bracket_pz, get_link
utc = pytz.UTC
logging.basicConfig(format="%(asctime)s: %(name)s: %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
server = grpc.server(futures.ThreadPoolExecutor(32))
server.add_insecure_port(f"[::]:21000")
def now():
return datetime.now(utc)
def Timestamp_from_datetime(dt: datetime):
pb_ts = Timestamp()
pb_ts.FromDatetime(dt)
return pb_ts
def generate_bar_graph_risks(input_risks):
return sorted(input_risks, key=lambda br: br.risk) + [
corical_pb2.BarGraphRisk(
label=r["event"],
risk=r["risk"],
is_relatable=True,
)
for r in generate_relatable_risks([risk.risk for risk in input_risks])
]
class Corical(corical_pb2_grpc.CoricalServicer):
def ComputeTTS(self, request, context):
start = perf_counter_ns()
time = Timestamp_from_datetime(now())
logger.info(request)
messages = []
# sex
if request.sex == "female":
sex_label = "female"
sex_vec = np.array([0.0, 1.0])
elif request.sex == "male":
sex_label = "male"
sex_vec = np.array([1.0, 0.0])
elif request.sex == "other":
sex_label = "person of unspecified sex"
sex_vec = np.array([0.5, 0.5])
messages.append(
corical_pb2.Message(
heading="Sex disclaimer",
text="We do not have data on the chosen sex, so the results reflect a population with 50% females and 50% males",
severity="info",
)
)
else:
context.abort(grpc.StatusCode.FAILED_PRECONDITION, "Invalid sex")
# az shots
vaccine_labels = {
"None": ("not had any vaccines", "no"),
"OneAZ_under_3_weeks": ("had one shot of AstraZeneca vaccine (1-3 weeks ago)", "first shot of the AstraZeneca vaccine"),
"TwoAZ_under_2_months": ("had two shots of AstraZeneca vaccine (2 months ago)", "second shot of the AstraZeneca vaccine"),
"TwoAZ_2to4_months": ("had two shots of AstraZeneca vaccine (2-4 months after the vaccine)", "second shot of the AstraZeneca vaccine"),
"TwoAZ_4to6_months": ("had two shots of AstraZeneca vaccine (4-6 months after the vaccine)", "second shot of the AstraZeneca vaccine"),
"TwoAZ_OnePfz_under_2_months": ("had two shots of AstraZeneca vaccine followed by a Pfizer vaccine (2 months ago)", "Pfizer booster vaccine"),
}
if request.vaccine == "None":
comparison_doses = ["OneAZ_under_3_weeks", "TwoAZ_under_2_months"]
elif request.vaccine == "OneAZ_under_3_weeks":
comparison_doses = ["None", "TwoAZ_under_2_months"]
elif request.vaccine == "TwoAZ_under_2_months":
comparison_doses = ["OneAZ_under_3_weeks", "TwoAZ_OnePfz_under_2_months"]
elif request.vaccine == "TwoAZ_2to4_months":
comparison_doses = ["OneAZ_under_3_weeks", "TwoAZ_OnePfz_under_2_months"]
elif request.vaccine == "TwoAZ_4to6_months":
comparison_doses = ["OneAZ_under_3_weeks", "TwoAZ_OnePfz_under_2_months"]
elif request.vaccine == "TwoAZ_OnePfz_under_2_months":
comparison_doses = ["OneAZ_under_3_weeks", "None"]
# age
age_label, age_value, age_ix = get_age_bracket(request.age)
link = get_link(request.sex, age_ix)
# if link:
# printable = corical_pb2.PrintableButton(
# url=link, text=f"Get printable graphs for a {age_label} {sex_label}"
# )
# else:
# printable = None
printable = None
# community transmission
ct_vec = scenario_to_vec(request.transmission)
if request.transmission == "None":
transmission_label = "no"
elif request.transmission == "Ten_percent":
transmission_label = "a huge number of cases"
elif request.transmission == "Five_percent":
transmission_label = "a large number of cases"
elif request.transmission == "Two_percent":
transmission_label = "a lot of cases"
elif request.transmission == "ATAGI_Med":
transmission_label = "few cases"
elif request.transmission == "ATAGI_Low":
transmission_label = "not many cases"
else:
transmission_label = request.transmission
if request.transmission == "None":
messages.append(
corical_pb2.Message(
heading="Note",
text="You have selected a scenario with no community transmission. This is only a temporary situation and will change when state or national borders open.",
severity="warning",
)
)
# variant
variant_vec = None
# for tables
if request.vaccine == "None":
explanation = f"Results shown for a {age_label} {sex_label} who has not been vaccinated, when there are {transmission_label} community transmission, the risks of the folnot many casesing events are shown."
else:
explanation = f"Results shown for a {age_label} {sex_label} who has {vaccine_labels[request.vaccine][0]}, and under {transmission_label} community transmission, the risks of the folnot many casesing events are shown."
blood_clot_brief = (
"You may have heard that the AstraZeneca vaccine can give you a type of rare blood clotting. This is also called thrombosis with thrombocytopenia syndrome (TTS). "
)
# for graphs
subtitle = f"These results are for a {age_label} {sex_label}."
# Pfizer booster subtitle
pz_booster_subtitle = f"You may have heard that the Pfizer vaccine can cause inflammation of your heart muscle. This is also called myocarditis."
# for output groups
# logger.info(f"{doses=}")
logger.info(f"{age_value=}")
logger.info(f"{sex_vec=}")
logger.info(f"{variant_vec=}")
logger.info(f"{ct_vec=}")
cmp = []
current_case = None
for i, cdose in enumerate([request.vaccine] + comparison_doses):
label, shot_ordinal = vaccine_labels[cdose]
cur = {
"label": label,
"is_other_shot": i != 0,
"shot_ordinal": shot_ordinal,
}
(
cur["symptomatic_infection"],
cur["get_tts"],
cur["die_from_covid_given_infected"],
cur["die_from_tts"],
cur["die_from_clots"],
cur["die_from_covid"],
cur["get_clots_covid_given_infected"],
cur["die_from_clots_covid_given_infected"],
cur["get_myocarditis_vax"],
cur["die_myocarditis_vax"],
cur["get_myocarditis_given_covid"],
cur["die_myocarditis_given_covid"],
cur["get_myocarditis_bg"],
cur["die_myocarditis_bg"],
) = compute_probs(cdose, age_value, sex_vec, ct_vec, variant_vec)
cmp.append(cur)
if cdose == request.vaccine:
logging.info("Saving current case")
current_case = cur
bar_graphs_list = [
corical_pb2.BarGraph(
title=f"What is my chance of getting COVID-19?",
subtitle=f"This is your chance of getting COVID-19 over a 2-month period. These results are for a {age_label} {sex_label} when there are {transmission_label} in your community.",
risks=generate_bar_graph_risks(
[
corical_pb2.BarGraphRisk(
label=f"Chance of getting COVID-19 if you have {d['label']}",
risk=d["symptomatic_infection"],
is_other_shot=d["is_other_shot"],
)
for d in cmp
]
),
),
corical_pb2.BarGraph(
title="If I get COVID-19, what are my chances of dying?",
subtitle=subtitle,
risks=generate_bar_graph_risks(
[
corical_pb2.BarGraphRisk(
label=f"Chance of dying from COVID-19 if you have {d['label']}",
risk=d["die_from_covid_given_infected"],
is_other_shot=d["is_other_shot"],
)
for d in cmp
]
),
),
corical_pb2.BarGraph(
title="What is my chance of getting rare blood clots (TTS) from the AstraZeneca shots?",
subtitle=blood_clot_brief + " " + subtitle ,
risks=generate_bar_graph_risks(
[
corical_pb2.BarGraphRisk(
label=f"Chance of getting rare blood clots if I get COVID-19 (infection)",
risk=cmp[0]["get_clots_covid_given_infected"],
is_other_shot=True,
),
]
+ [
corical_pb2.BarGraphRisk(
label=f"Your chance of rare blood clots after the {d['shot_ordinal']} will increase by: ",
risk=d["get_tts"],
is_other_shot=d["is_other_shot"],
)
for d in cmp
if (d["get_tts"] > 0.0 or d['label'] == cmp[0]['label']) and d['shot_ordinal'] != "no" and d['shot_ordinal'] != "Pfizer booster vaccine"
]
+ [
corical_pb2.BarGraphRisk(
label=f"Your chance of rare blood clots after the {d['shot_ordinal']} will increase by: ",
risk=0,
is_other_shot=d["is_other_shot"],
bar_text="0 No evidence of increased chance of TTS after Pfizer vaccine",
)
for d in cmp
if (d['label'] == cmp[0]['label']) and (d['shot_ordinal'] == "Pfizer booster vaccine")
]
),
),
corical_pb2.BarGraph(
title="What is my chance of dying from rare blood clots (TTS) from the AstraZeneca shots?",
subtitle=blood_clot_brief + " " + subtitle ,
risks=generate_bar_graph_risks(
[
corical_pb2.BarGraphRisk(
label=f"Chance of dying from rare blood clots if I get COVID-19 (infection)",
risk=cmp[0]["die_from_clots_covid_given_infected"],
is_other_shot=True,
),
]
+ [
corical_pb2.BarGraphRisk(
label=f"Your chance of dying from rare blood clots after the {d['shot_ordinal']} will increase by:",
risk=d["die_from_tts"],
is_other_shot=d["is_other_shot"],
)
for d in cmp
if (d["die_from_tts"] > 0.0 or d['label'] == cmp[0]['label']) and d['shot_ordinal'] != "no" and d['shot_ordinal'] != "Pfizer booster vaccine"
]
+ [
corical_pb2.BarGraphRisk(
label=f"Your chance of dying from rare blood clots after the {d['shot_ordinal']} will increase by:",
risk=0,
is_other_shot=d["is_other_shot"],
bar_text = "0 No evidence of increased chance of TTS after Pfizer vaccine",
)
for d in cmp
if (d['label'] == cmp[0]['label']) and (d['shot_ordinal'] == "Pfizer booster vaccine")
]
),
),
]
if request.vaccine == "TwoAZ_OnePfz_under_2_months":
bar_graphs_list.append(
corical_pb2.BarGraph(
title="What is my chance of having inflammation of my heart muscle (myocarditis) after receiving the Pfizer vaccine for my third dose?",
subtitle=pz_booster_subtitle + " " + subtitle,
risks=generate_bar_graph_risks(
[
# corical_pb2.BarGraphRisk(
# label=f"Chance of getting myocarditis if I am diagnosed with COVID-19",
# risk=cmp[0]["get_myocarditis_given_covid"],
# ),
corical_pb2.BarGraphRisk(
label=f"Chance of having myocarditis in 2 months even if you haven’t had any vaccine and haven’t had COVID-19 (infection)",
risk=cmp[0]["get_myocarditis_bg"],
is_other_shot=True,
),
]
+ [
corical_pb2.BarGraphRisk(
label=f"Chance of having myocarditis if I get COVID-19 (infection)",
risk=cmp[0]["get_myocarditis_given_covid"],
is_other_shot=True,
),
]
+ [
corical_pb2.BarGraphRisk(
label=f"Your chance of myocarditis after the {d['shot_ordinal']} will increase by:",
risk=d["get_myocarditis_vax"],
is_other_shot=d["is_other_shot"],
)
for d in cmp
if d["get_myocarditis_vax"] > 0.0 or d['label'] == cmp[0]['label']
]
),
),
)
bar_graphs_list.append(
corical_pb2.BarGraph(
title="What is my chance of dying from inflammation of my heart muscle (myocarditis) after receiving the Pfizer vaccine for my third dose?",
subtitle=pz_booster_subtitle + " " + subtitle,
risks=generate_bar_graph_risks(
[
corical_pb2.BarGraphRisk(
label=f"Chance of dying from myocarditis in 2 months even if you haven’t had any vaccine and haven’t had COVID-19 (infection)",
risk=cmp[0]["die_myocarditis_bg"],
is_other_shot=True,
),
]
+ [
corical_pb2.BarGraphRisk(
label=f"Chance of dying from myocarditis if I get COVID-19 (infection)",
risk=cmp[0]["die_myocarditis_given_covid"],
is_other_shot=True,
),
]
+ [
corical_pb2.BarGraphRisk(
label=f"Your chance of dying from myocarditis after the {d['shot_ordinal']} will increase by:",
risk=d["die_myocarditis_vax"],
is_other_shot=d["is_other_shot"],
)
for d in cmp
if d["die_myocarditis_vax"] > 0.0 or d['label'] == cmp[0]['label']
]
),
),
)
scenario_description = f"Here are your results. These are for a {age_label} {sex_label} when there are {transmission_label} in your community. They are based on the number and | |
creating fake
#data and replicating a fake POST request, we are able to simulate the expected functionality of each of the functions, and evaluate the
#expected output.
#This happy path test case demonstrates what an ideal test case and result would be, with the expected input and output that one would
#expect in a normal and properly functioning use case.
#fake
#happy-path
def test_fake_cc_min_payment_calc_implementation():
s = soundex.getInstance()
dict2 = {
"CC Balance": 5000,
"CC Interest Rate": 6,
"Minimum Payment Percent": 9
}
sendJson = json.dumps(dict2)
headers = {'Content-type': 'application/json', 'apiKey': apiKey}
resDict = dict()
resDict['Monthly Payment'] = 450.00
resDict['Months'] = 49
resDict['Total Payment'] = 5289.53
with app.test_client() as client:
# send data as POST form to endpoint
response = client.post(url+'/cc_min_payment_calc', data=sendJson, content_type='application/json', headers=headers)
# response = client.post(
# url+'/cc_payoff',
# json=dict2,
# )
# check result from server with expected data
#print(response.data)
# print(response.data.decode('UTF-8').replace("\\",""))
# print("k")
# print(json.dumps(resDict))
str2 = str(response.data.decode('UTF-8').replace("\\",""))
assert str2[1:-1] == json.dumps(resDict)
print("Fake test to see if proper output is displayed from CC Min Payment Calc API")
#assert response.status_code == 200 and s.soundex(str(response.data.decode('UTF-8'))) == s.soundex(str(json.dumps(resDict)))
#The reason for creating a mock was to ensure the correct code implementation for HTTP requests was done properly and to ensure
#that the test did not rely on an existing connection, and would be able to be run on a mock connection.
#mock
def test_mock_cc_min_payment_calc_connection():
headers = {'Content-type': 'application/json', 'apiKey': apiKey}
s = soundex.getInstance()
dict2 = {
"CC Balance": 5000,
"CC Interest Rate": 6,
"Minimum Payment Percent": 9
}
sendJson = json.dumps(dict2)
with app.test_client() as client:
response = client.post(url+'/cc_min_payment_calc', data=sendJson, content_type='application/json', headers=headers)
assert response.status_code == 200
print("Mock test to test the connection and response code for the CC Min Payment Calc endpoint")
#The reason for creating a mock was to ensure the correct code implementation for HTTP requests was done properly and to ensure
#that the test did not rely on an existing connection, and would be able to be run on a mock connection.
#mock
def test_cc_min_payment_calc_invalid_request():
s = soundex.getInstance()
dict2 = {
"CC Balance": 5000,
"CC Interest Rate": 6,
"Minimum Payment Percent": 9
}
sendJson = json.dumps(dict2)
headers = {'Content-type': 'application/json', 'apiKey': apiKey}
with app.test_client() as client:
response = client.put(url+'/cc_min_payment_calc', data=sendJson, content_type='application/json', headers=headers)
assert response.status_code == 405
print("Mock test invalid request for CC Min Payment Calc")
#The reason for creating a mock was to ensure the correct code implementation for HTTP requests was done properly and to ensure
#that the test did not rely on an existing connection, and would be able to be run on a mock connection.
#mock
def test_cc_min_payment_calc_no_key():
s = soundex.getInstance()
dict2 = {
"CC Balance": 5000,
"CC Interest Rate": 6,
"Minimum Payment Percent": 9
}
sendJson = json.dumps(dict2)
headers = {'Content-type': 'application/json'}
with app.test_client() as client:
try:
response = client.post(url+'/cc_min_payment_calc', data=sendJson, content_type='application/json', headers=headers)
assert False
except KeyError:
assert True
print("Mock test no API key for CC Min Payment Calc")
#The reason for creating a mock was to ensure the correct code implementation for HTTP requests was done properly and to ensure
#that the test did not rely on an existing connection, and would be able to be run on a mock connection.
#mock
def test_cc_min_payment_calc_invalid_key():
s = soundex.getInstance()
dict2 = {
"CC Balance": 5000,
"CC Interest Rate": 6,
"Minimum Payment Percent": 9
}
sendJson = json.dumps(dict2)
headers = {'Content-type': 'application/json', 'apiKey': 'invalid'}
with app.test_client() as client:
response = client.post(url+'/cc_min_payment_calc', data=sendJson, content_type='application/json', headers=headers)
assert response.status_code == 401
print("Mock test invalid API key for CC Min Payment Calc")
#The reason for creating a fake test double was to ensure the properly functionality of the Bank Functions themselves, by creating fake
#data and replicating a fake POST request, we are able to simulate the expected functionality of each of the functions, and evaluate the
#expected output.
#This happy path test case demonstrates what an ideal test case and result would be, with the expected input and output that one would
#expect in a normal and properly functioning use case.
#fake
#happy-path
def test_fake_mortgage_calc_implementation():
s = soundex.getInstance()
dict2 = {
"Home Price": 480000,
"Down Payment": 50,
"Loan Length": 30,
"Interest Rate": 10
}
sendJson = json.dumps(dict2)
headers = {'Content-type': 'application/json', 'apiKey': apiKey}
resDict = dict()
resDict['Monthly Payment'] = 2106.17
resDict['Amount Paid in Interest'] = 518221.84
resDict['Amount Paid in Principle'] = 240000.00
resDict['Total Amount Paid'] = 758221.84
with app.test_client() as client:
# send data as POST form to endpoint
response = client.post(url+'/mortgage_calc', data=sendJson, content_type='application/json', headers=headers)
# response = client.post(
# url+'/cc_payoff',
# json=dict2,
# )
# check result from server with expected data
#print(response.data)
# print(response.data.decode('UTF-8').replace("\\",""))
# print("k")
# print(json.dumps(resDict))
str2 = str(response.data.decode('UTF-8').replace("\\",""))
assert str2[1:-1] == json.dumps(resDict)
print("Fake test to see if proper output is displayed from Mortgage Calc API")
#assert response.status_code == 200 and s.soundex(str(response.data.decode('UTF-8'))) == s.soundex(str(json.dumps(resDict)))
#The reason for creating a mock was to ensure the correct code implementation for HTTP requests was done properly and to ensure
#that the test did not rely on an existing connection, and would be able to be run on a mock connection.
#mock
def test_mock_mortgage_calc_connection():
headers = {'Content-type': 'application/json', 'apiKey': apiKey}
s = soundex.getInstance()
dict2 = {
"Home Price": 480000,
"Down Payment": 50,
"Loan Length": 30,
"Interest Rate": 10
}
sendJson = json.dumps(dict2)
with app.test_client() as client:
response = client.post(url+'/mortgage_calc', data=sendJson, content_type='application/json', headers=headers)
assert response.status_code == 200
print("Mock test to test the connection and response code for the Mortgage Calc endpoint")
#The reason for creating a mock was to ensure the correct code implementation for HTTP requests was done properly and to ensure
#that the test did not rely on an existing connection, and would be able to be run on a mock connection.
#mock
def test_mortgage_calc_invalid_request():
s = soundex.getInstance()
dict2 = {
"Home Price": 480000,
"Down Payment": 50,
"Loan Length": 30,
"Interest Rate": 10
}
sendJson = json.dumps(dict2)
headers = {'Content-type': 'application/json', 'apiKey': apiKey}
with app.test_client() as client:
response = client.put(url+'/mortgage_calc', data=sendJson, content_type='application/json', headers=headers)
assert response.status_code == 405
print("Mock test invalid request for Mortgage Calc")
#The reason for creating a mock was to ensure the correct code implementation for HTTP requests was done properly and to ensure
#that the test did not rely on an existing connection, and would be able to be run on a mock connection.
#mock
def test_mortgage_calc_no_key():
s = soundex.getInstance()
dict2 = {
"Home Price": 480000,
"Down Payment": 50,
"Loan Length": 30,
"Interest Rate": 10
}
sendJson = json.dumps(dict2)
headers = {'Content-type': 'application/json'}
with app.test_client() as client:
try:
response = client.post(url+'/mortgage_calc', data=sendJson, content_type='application/json', headers=headers)
assert False
except KeyError:
assert True
print("Mock test no API key for Mortgage Calc")
#The reason for creating a mock was to ensure the correct code implementation for HTTP requests was done properly and to ensure
#that the test did not rely on an existing connection, and would be able to be run on a mock connection.
#mock
def test_mortgage_calc_invalid_key():
s = soundex.getInstance()
dict2 = {
"Home Price": 480000,
"Down Payment": 50,
"Loan Length": 30,
"Interest Rate": 10
}
sendJson = json.dumps(dict2)
headers = {'Content-type': 'application/json', 'apiKey': 'invalid'}
with app.test_client() as client:
response = client.post(url+'/mortgage_calc', data=sendJson, content_type='application/json', headers=headers)
assert response.status_code == 401
print("Mock test invalid API key for Mortgage Calc")
#The reason for creating a fake test double was to ensure the properly functionality of the Bank Functions themselves, by creating fake
#data and replicating a fake POST request, we are able to simulate the expected functionality of each of the functions, and evaluate the
#expected output.
#This happy path test case demonstrates what an ideal test case and result would be, with the expected input and output that one would
#expect in a normal and properly functioning use case.
#fake
#happy-path
def test_fake_cdCalc_implementation():
s = soundex.getInstance()
dict2 = {
"Init Deposit": 2500,
"Year Period": 3.5,
"Interest Rate": 3.5
}
sendJson = json.dumps(dict2)
headers = {'Content-type': 'application/json', 'apiKey': apiKey}
resDict = dict()
resDict['Total Balance'] = 2819.88
resDict['Total Interest'] = 319.88
with app.test_client() as client:
# send data as POST form to endpoint
response = client.post(url+'/cdCalc', data=sendJson, content_type='application/json', headers=headers)
# response = client.post(
# url+'/cc_payoff',
# json=dict2,
# )
# check result from server with expected data
#print(response.data)
# print(response.data.decode('UTF-8').replace("\\",""))
# print("k")
# | |
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
"""ncurses Python application for "Getting Things Done".
[Methodology - Getting Things Done]
[Method - Todo.txt]
Todo.txt is a text data format designed to store personal task data. It follows
philosophies of simplicity, portability and functionality.
More details here:
http://todotxt.com/
Todo.txt is intending to follow the GTD methodology. It does do this but it
also, by necessity, imposes a specifc approach to the GTD methodology. This is
why I see it as a "method" more than just a format.
The creators do a fairly good job of explaining this method here:
https://github.com/ginatrapani/todo.txt-cli/wiki/The-Todo.txt-Format
Salient point about this method are:
- Meta-data about tasks only includes dates, completion, priority, projects and
contexts.
- There is no nesting of priority, projects or contexts. Your tasks are "flat".
- Dates to do not include time.
- Priorities are letters only, not numbers.
- All meta data can be written right into your task description.
[TaskWarrior - What It Got Wrong]
TaskWarrior is a command-line tool for implementing personal task management and
is probably the most popular one. TaskWarrior has good documentation and can be
used to facilitate a GTD methodology. It has a proven track-record of being very
useful and facilating productivity among those of us who still use the terminal.
Besides inventing it's own serialization format (they could have just use JSON,
no?), I found a few things frustrating about it to the point where I stopped
using it altogether. I thought and though about my experience with TaskWarrior
and the many other task management apps I tried out. I really wanted something
that I could use on a powerful UNIX shell but something about TaskWarrior just
didn't make it work right for me. Then I realized what it was: context.
No, not "context" in the todo.txt sense (mentioned above). Context, as in, what
is happening in my head as I work through my task list. It's the thing that GUI
applications have that terminal applications can't have. With a GUI app, I can
instantly and visually see all of my tasks. I can then pluck out ones I need to
change (mark "done"!) and move on. All the GUI apps focussed on the right
thing: presenting the tasks and allowing me to take my sweet time to decide what
to do. TaskWarrior could not do this. It presented the data, then would exit.
Once I figured out what to do, I told TaskWarrior through lots of typing and
then it happened. But I sacrificed context. I had to reprint the list again to
decide on the next thing to do.
By the time I had done my morning routine, my fingers were tired and I had a
feeling of not quite remembering what changes I had done. The problem was that
I could perform an action or get context (print tasks) but not both. Every time
I got context, I had to lose it to do something and then go back and get it
again.
Another problem was all the typing. If you look at your command history, you'll
see a lot of the same patterns over and over again with very few things changed.
This is an indicator that the user is being asked to do a lot of overhead to do
something simple.
"But that's what you do on a terminal!" Yes, but not if you have to do the same
thing over and over again. The terminal is a user interface and every terminal
application needs to strive to be as user-friendly as possible. If you're
making a website or a GUI app, you focus on how users go through the app and
use the essential functions of it. You care and you modify the design to appeal
to more users and make things easy without sacrificing functionality. Why should
a terminal application not do the same?
So TaskWarrior sucks at presenting a persistent context from which I can make
multiple decisions on. And TaskWarrior makes me type a lot of stuff for it.
Those two things made it a very user-unfriendly application for me. As a result,
I had to stop using it, no matter how many features it had.
[Application - Task Menu]
Like any good programmer (is that what I am?) I decided to write something that
took a different approach, in hopes that it would be useful to me and possibly
others.
Task Menu, is a curses-based application. This gives it the contextual power of
the GUI apps but the portability and leaness of a terminal app. It's the best
of both worlds!
Task Menu also applies a limited set of "views" you can have on your tasks. It
removed the ability for you, the user, to add new views or customize in that
way. This is another deviation from TaskWarrior which boasts customization.
I believe that by making the app ncurses-based and by having pre-defined views,
you, the user, can use your brain for what it's supposed to be used for: task
management.
There are generally 2 "modes" that your brain is in right before you fire up
your personal task management system: 1) you don't remember what needed to be
done and you need to see it OR 2) you have something very specific in mind that
you need to do and just want to do it.
For scenario #1, you need a way to see lots of tasks at once and scroll through
them if needed. curses works for this.
For scenario #2, you need to type as little as possible to tell the application
what to do. curses again words great because your enviornment (your context) is
already there, so all you need to do is hit a single key and something can
happen ("done"!).
[So why the limited number of views?]
Given the Todo.txt method, with it's "3 axis" of tasks, it turns out that we can
pivot off of those axis in 6 possible ways (3 factorial, for you math nerds).
They are:
- By Priority then Project
- By Priority then Context
- By Project then Priority
- By Project then Context
- By Context then Priority
- By Context then Project
You pivot off of one of then first, that leaves only 2 more "axis" to pivot
from. So you pivot off of the second one, and that leaves you with the last
remaining "axis" automatically. From those two pivot points, the application
can know everything it needs to present to you a filtered view of the tasks
that match that criteria.
And THAT is how Task Menu works. Since each tasks contains all its meta-data
already, all you need to see is the task itself. Thus, the only possible
variations you could have come from the "axis" themselves.
I don't know if Todo.txt intended this as a consequence. But the data itself
makes this possible.
"""
import collections
import datetime
import inspect
import itertools
import os
import string
import sys
import time
import urwid
#TODO_TEXT_FILE = os.path.join(os.path.expanduser('~'), '.todo.txt')
TODO_TEXT_FILE = os.path.join(os.path.expanduser('~'), 'todo.test.txt')
DIMENSIONS = ('projects', 'contexts', 'priority')
# LABEL - CATEGORY - GROUPING
VIEWS = ((u'[Pri/Ctx]', 'priority', 'contexts'),
(u'[Prj/Ctx]', 'projects', 'contexts'),
(u'[Prj/Pri]', 'projects', 'priority'),
(u'[Ctx/Prj]', 'contexts', 'projects'),
(u'[Ctx/Pri]', 'contexts', 'priority'),
(u'[Pri/Prj]', 'priority', 'projects'))
class Border(urwid.LineBox):
"""Draws a border around the widget with optional title.
Same as urwid.LineBox but the title is a little fancier and it's aligned left.
"""
def __init__(self, *args, **kwargs):
super(Border, self).__init__(*args, **kwargs)
# Remove the first line in the title to force the title to align left
if len(self.tline_widget.contents) == 3:
self.tline_widget.contents.pop(0)
def format_title(self, text):
if not text:
return ''
return u'┤ %s ├' % text
class Task(urwid.WidgetPlaceholder):
def __init__(self, S, todotxtfile):
self._todotxtfile = todotxtfile
self.UpdateFromString(S)
super(Task, self).__init__(self.text_widget_attrmap)
def __str__(self):
return self.text
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.text)
def selectable(self):
return True
def keypress(self, size, key):
return key
def _BuildTextWidget(self):
if self.completed:
icon = 'x'
elif self.creation_date and (datetime.date.today() - self.creation_date).days > 21:
icon = '!'
else:
icon = ' '
self.text_widget = urwid.Text([('prefix', ' '),
'[%s]' % icon,
' ',
self.text])
self.text_widget_attrmap = urwid.AttrMap(self.text_widget,
{'prefix': 'prefix:normal', None: 'normal'},
{'prefix': 'prefix:selected', None: 'selected'})
return self.text_widget_attrmap
def _Parse(self, line):
"""Parse a single-line string S as a task in the todo.txt format.
See: https://github.com/ginatrapani/todo.txt-cli/wiki/The-Todo.txt-Format
"""
line_stripped = line.strip()
# Completed
if line_stripped.startswith('x '):
completed = True
line_stripped = line_stripped[2:]
else:
completed = False
| |
+ (n - 1,)
key = key.transpose(qk_perm)
query = query.transpose(qk_perm)
# v -> (bs, <non-attention dims>, num_heads, channels, <attention dims>)
v_perm = batch_dims + (n - 1,) + axis
value = value.transpose(v_perm)
query = query / jnp.sqrt(depth).astype(dtype)
batch_dims_t = tuple(range(len(batch_dims)))
#softMax on key
key_dims=tuple(range(key.ndim - len(axis), key.ndim))
key_soft = softmax(key, axis=key_dims)
key_soft = key_soft.astype(dtype)
# carry out the dot product between softMax(key)T and value
part_results = lax.dot_general(
key_soft,
value, (((n - 1,), (n - 1,)), (batch_dims_t, batch_dims_t)),
precision=precision)
# apply attention bias: masking, droput, proximity bias, ect.
if bias is not None:
part_results = part_results + bias
# apply dropout
if not deterministic and dropout_rate > 0.:
if dropout_rng is None:
dropout_rng = make_rng()
keep_prob = jax.lax.tie_in(part_results, 1.0 - dropout_rate)
if broadcast_dropout:
# dropout is broadcast across the batch+head+non-attention dimension
dropout_dims = part_results.shape[-(2 * len(axis)):]
dropout_shape = (tuple([1] * len(batch_dims_t)) + dropout_dims)
keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape)
else:
keep = random.bernoulli(dropout_rng, keep_prob, part_results.shape)
multiplier = (keep.astype(part_results.dtype) /
jnp.asarray(keep_prob, dtype=dtype))
part_results = part_results * multiplier
# carry out the dot product between query and part_results
results = lax.dot_general(
query,
part_results, (((n - 1,), (n - 1,)), (batch_dims_t, batch_dims_t)),
precision=precision)
# normalize the results
norm_dims = tuple(range(results.ndim - len(axis), results.ndim))
results = results/cmath.sqrt(norm_dims)
# back to (bs, dim1, dim2, ..., dimN, num_heads, channels)
perm_inv = _invert_perm(qk_perm)
results = results.transpose(perm_inv)
return results
def _invert_perm(perm):
perm_inv = [0] * len(perm)
for i, j in enumerate(perm):
perm_inv[j] = i
return tuple(perm_inv)
@struct.dataclass
class _CacheEntry:
key: np.ndarray
value: np.ndarray
i: np.ndarray
def scan_in_dim(*args, **kwargs):
warnings.warn('scan_in_dim moved to flax.jax_utils',
DeprecationWarning)
return jax_utils.scan_in_dim(*args, **kwargs)
class Cache(Collection):
"""The `flax.nn` module is Deprecated, use `flax.linen` instead.
Learn more and find an upgrade guide at
https://github.com/google/flax/blob/master/flax/linen/README.md"
Collect intermediate activations for efficient autoregressive decoding."""
def initialize_cache(self, shape, dtype=None):
"""Initialize the cache for the given input shape.
Args:
shape: the shape of the batch and attention dimensions.
dtype: the dtype of the autoregressive cache.
Returns:
the initialized cache
"""
if dtype is None:
dtype = jnp.float32
def _init(shape_data):
ndim = int(shape_data[0])
tail_shape = tuple(shape_data[1:])
full_shape = shape + tail_shape
if len(full_shape) != ndim:
raise ValueError('Shape should be a tuple with the shape of the batch'
'and attention dims.')
return _CacheEntry(key=jnp.zeros(full_shape, dtype=dtype),
value=jnp.zeros(full_shape, dtype=dtype),
i=jnp.zeros((), jnp.uint32))
return Cache(jax.tree_map(_init, self.state))
jax.tree_util.register_pytree_node(
Cache, iterate_collection, collection_from_iterable)
class MultiHeadDotProductAttention_Modified(Module):
"""The `flax.nn` module is Deprecated, use `flax.linen` instead.
Learn more and find an upgrade guide at
https://github.com/google/flax/blob/master/flax/linen/README.md"
Multi-head dot-product attention."""
def __init__(self):
self.qk_dim = 8
self.nb_random_features = 10000
self.unstructured_random_matrix_creator = functools.partial(
GaussianUnstructuredRandomMatrix, nb_random_features,
qk_dim)
self.ortho_random_matrix_creator = functools.partial(
GaussianOrthogonalRandomMatrix, self.nb_random_features,
self.qk_dim)
self.fast_unstruct_rfm_dot_product_attention = FastAttentionviaLowRankDecomposition(
self.unstructured_random_matrix_creator,
kernel_feature_creator)
self.fast_ortho_rfm_dot_product_attention = FastAttentionviaLowRankDecomposition(
self.ortho_random_matrix_creator, kernel_feature_creator)
def apply(self,
inputs_q,
inputs_kv,
num_heads,
dtype=jnp.float32,
qkv_features=None,
out_features=None,
attention_axis=None,
causal_mask=False,
padding_mask=None,
key_padding_mask=None,
segmentation=None,
key_segmentation=None,
cache=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None,
kernel_init=default_kernel_init,
bias_init=zeros,
bias=True
):
"""Applies multi-head dot product attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
This can be used for encoder-decoder attention by specifying both `inputs_q`
and `inputs_kv` orfor self-attention by only specifying `inputs_q` and
setting `inputs_kv` to None.
Args:
inputs_q: input queries of shape `[bs, dim1, dim2, ..., dimN, features]`.
inputs_kv: key/values of shape `[bs, dim1, dim2, ..., dimN, features]`
or None for self-attention, inn which case key/values will be derived
from inputs_q.
num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
dtype: the dtype of the computation (default: float32)
qkv_features: dimension of the key, query, and value.
out_features: dimension of the last projection
attention_axis: axes over which the attention is applied ( 'None' means
attention over all axes, but batch, heads, and features).
causal_mask: boolean specifying whether to apply a causal mask on the
attention weights. If True, the output at timestep `t` will not depend
on inputs at timesteps strictly greater than `t`.
padding_mask: boolean specifying query tokens that are pad token w/ False.
key_padding_mask: boolean specifying key-value tokens that are pad token
w/ False.
segmentation: segment indices for packed inputs_q data.
key_segmentation: segment indices for packed inputs_kv data.
cache: an instance of `flax.nn.attention.Cache` used for efficient
autoregressive decoding.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the kernel of the Dense layers.
bias_init: initializer for the bias of the Dense layers.
bias: bool: whether pointwise QKVO dense transforms use bias.
attention_fn: dot_product_attention or compatible function. Accepts
query, key, value, and returns output of shape
`[bs, dim1, dim2, ..., dimN,, num_heads, value_channels]``
Returns:
output of shape `[bs, dim1, dim2, ..., dimN, features]`.
"""
assert causal_mask or not cache, (
'Caching is only support for causal attention.')
if inputs_kv is None:
inputs_kv = inputs_q
is_self_attention = inputs_kv is inputs_q
if attention_axis is None:
attention_axis = tuple(range(1, inputs_q.ndim - 1))
features = out_features or inputs_q.shape[-1]
qkv_features = qkv_features or inputs_q.shape[-1]
assert qkv_features % num_heads == 0, (
'Memory dimension must be divisible by number of heads.')
head_dim = qkv_features // num_heads
dense = DenseGeneral.partial(
axis=-1,
features=(num_heads, head_dim),
kernel_init=kernel_init,
bias_init=bias_init,
bias=bias,
precision=precision)
# project inputs_q to multi-headed q/k/v
# dimensions are then [bs, dims..., n_heads, n_features_per_head]
query, key, value = (dense(inputs_q, dtype=dtype, name='query'),
dense(inputs_kv, dtype=dtype, name='key'),
dense(inputs_kv, dtype=dtype, name='value'))
if cache:
assert isinstance(cache, Cache), 'cache must be an instance of Cache'
if self.is_initializing():
cache.store(np.array((key.ndim,) + key.shape[-2:], dtype=np.int32))
else:
cache_entry = cache.retrieve(None)
expected_shape = list(cache_entry.key.shape[:-2])
for attn_dim in attention_axis:
expected_shape[attn_dim] = 1
expected_shape = tuple(expected_shape) + inputs_q.shape[-1:]
if expected_shape != inputs_q.shape:
raise ValueError('Invalid shape provided, '
'expected shape %s instead got %s.' %
(expected_shape, inputs_q.shape))
if not isinstance(cache_entry, _CacheEntry):
raise ValueError('Cache is not initialized.')
cshape = cache_entry.key.shape
indices = [0] * len(cshape)
i = cache_entry.i
attn_size = np.prod(np.take(cshape, attention_axis))
for attn_dim in attention_axis:
attn_size //= cshape[attn_dim]
indices[attn_dim] = i // attn_size
i = i % attn_size
key = lax.dynamic_update_slice(cache_entry.key, key, indices)
value = lax.dynamic_update_slice(cache_entry.value, value, indices)
one = jnp.array(1, jnp.uint32)
cache_entry = cache_entry.replace(i=cache_entry.i + one,
key=key,
value=value)
cache.store(cache_entry)
# create attention masks
mask_components = []
if causal_mask:
if cache and not self.is_initializing():
bias_pre_shape = (1,) * (key.ndim - 1)
attn_shape = tuple(np.take(key.shape, attention_axis))
attn_size = np.prod(attn_shape)
ii = jnp.arange(attn_size, dtype=jnp.uint32)
mask = ii < cache_entry.i
mask_components.append(mask.reshape(bias_pre_shape + attn_shape))
else:
mask_components.append(_make_causal_mask(key, attention_axis))
if (padding_mask is not None or key_padding_mask is not None) and not cache:
if key_padding_mask is None:
if is_self_attention:
key_padding_mask = padding_mask
else:
key_padding_shape = [inputs_kv.shape[dim] for dim in attention_axis]
key_padding_mask = jnp.full(key_padding_shape, True)
if padding_mask is None:
if is_self_attention:
padding_mask = key_padding_mask
else:
padding_shape = [inputs_q.shape[dim] for dim in attention_axis]
padding_mask = jnp.full(padding_shape, True)
padding_mask = make_padding_mask(
padding_mask_query=padding_mask,
padding_mask_key=key_padding_mask,
query_shape=query.shape,
key_shape=key.shape,
attention_axis=attention_axis)
mask_components.append(padding_mask)
if segmentation is not None:
if key_segmentation is None:
assert is_self_attention
key_segmentation = segmentation
segmentation_mask = make_padding_mask(
padding_mask_query=segmentation,
padding_mask_key=key_segmentation,
query_shape=query.shape,
key_shape=key.shape,
attention_axis=attention_axis,
segmentation_mask=True)
mask_components.append(segmentation_mask)
if mask_components:
attention_mask = mask_components[0]
for component in mask_components[1:]:
attention_mask = jnp.logical_and(attention_mask, component)
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0, jnp.full(attention_mask.shape, 0.).astype(dtype),
jnp.full(attention_mask.shape, -1e10).astype(dtype))
else:
attention_bias = None
# apply attention
x = self.fast_unstruct_rfm_dot_product_attention.dot_product_attention(
query,
key,
value,
dtype=dtype,
axis=attention_axis,
bias=attention_bias,
precision=precision,
dropout_rng=dropout_rng,
dropout_rate=dropout_rate,
broadcast_dropout=broadcast_dropout,
deterministic=deterministic)
# back to the original inputs dimensions
out = DenseGeneral(
x,
features=features,
axis=(-2, -1),
kernel_init=kernel_init,
bias_init=bias_init,
bias=bias,
dtype=dtype,
precision=precision,
name='out')
return out
# TODO(flax-dev): Consider refactoring MultiHeadDotProductAttention and moving
# causal_mask and cache support into this class instead.
SelfAttention = MultiHeadDotProductAttention_Modified.partial(inputs_kv=None)
def make_padding_mask(padding_mask_query,
padding_mask_key,
query_shape,
key_shape,
attention_axis=None,
segmentation_mask=False):
"""The `flax.nn` module is Deprecated, use `flax.linen` instead.
Learn more and find an upgrade guide at
https://github.com/google/flax/blob/master/flax/linen/README.md"
Makes padding mask for attention weights.
In case of 1d inputs (i.e., `[bs, len, features]`, the attention weights will
be `[bs, len, len]` and this function makes a square matrix [len, len].
Args:
padding_mask_query: padding mask of query <bs, qdim1,.., qdimn>
padding_mask_key: padding mask of | |
#!/usr/bin/env python
import os
import pwd
import sys
import grp
import time
import copy
import pysam
import bisect
import tarfile
import logging
import argparse
from io import StringIO, BytesIO
from tempfile import NamedTemporaryFile
from collections import Counter, defaultdict
from pbsuite.utils.setupLogging import setupLogging
USAGE="""\
Parse and cluster mapped tails from a bam to create breakpoint candidates."""
class Bread():
"""
Holds a read that has a break in it
and all relevant information for clustering
"""
def __init__(self, read, readRef, log='h'):
"""
extract information from pysam.AlignedRead
if log=='h' get higher quality end
if log=='p' get prolog only
if log=='e' get epilog only
"""
self.read = read
self.readRef = readRef
if read.is_reverse:
begin = read.aend
end = read.pos
strand = 1
else:
begin = read.pos
end = read.aend
strand = 0
foundBreak = False #for the p only and looking for e or viceversa
self.proref = getTag(read, "PR")
self.prostr = getTag(read, "PI")
self.propos = getTag(read, "PP")
#for the if we want hq and epi is higher quality
self.promaq = getTag(read, "PQ") or -1
self.prorem = getTag(read, "PS")
# if we have an pro and (we want hq or we want pro
if self.propos is not None and (log == 'h' or log == 'p'):
foundBreak = True
if self.propos <= begin:
s, e, a, b, uq, dq, uR, dR = (self.propos, begin, \
"p", "i", self.promaq, \
self.read.mapq, self.proref, \
readRef)
ud = '3' if self.prostr == 0 else '5'
dd = '5' if self.read.is_reverse else '3'
else:
s, e, a, b, uq, dq, uR, dR = (begin, self.propos, \
"i", "p", self.read.mapq, \
self.promaq, readRef, \
self.proref)
dd = '3' if self.prostr == 0 else '5'
ud = '5' if self.read.is_reverse else '3'
rmSeq = self.prorem if self.prorem is not None else 0
inv = False if self.prostr == strand else True
self.epiref = getTag(read, "ER")
self.epistr = getTag(read, "EI")
self.epipos = getTag(read, "EP")
self.epimaq = getTag(read, "EQ") or -1
self.epirem = getTag(read, "ES")
#Choose higher quality or force epilog
# if we have an epi and ((we want hq and it's of higher quality) or we want epi
if (self.epipos is not None) and ((log == 'h' and self.epimaq > self.promaq) or log == 'e'):
foundBreak = True
if self.epipos <= end:
s, e, a, b, uq, dq, uR, dR = (self.epipos, end, \
"e", "i", self.epimaq, \
self.read.mapq, self.epiref, \
readRef)
ud = '3' if self.epistr == 0 else '5'
dd = '5' if self.read.is_reverse else '3'
else:
s, e, a, b, uq, dq, uR, dR = (end, self.epipos, \
"i", "e", self.read.mapq, \
self.epimaq, readRef, \
self.epiref)
dd = '3' if self.epistr == 0 else '5'
ud = '5' if self.read.is_reverse else '3'
rmSeq = self.epirem if self.epirem is not None else 0
inv = False if self.epistr == strand else True
self.has_tail = foundBreak
if not self.has_tail:
return
self.uRef = uR
self.dRef = dR
#reference key; for sorting
j = [uR, dR]; j.sort()
self.refKey = "_".join(j)
#Points
self.uBreak = s
self.dBreak = e
#P,I,E
self.uTail = a
self.dTail = b
#Strands
self.uDir = ud
self.dDir = dd
#Mapqs
self.uMapq = uq if uq is not None else 255
self.dMapq = dq if dq is not None else 255
#Inv
self.isInverted = inv
#Remain
self.remainSeq = rmSeq
def near(self, other):
"""
Is this Bread and it's mate near the other Bread
"""
#Same target
if self.refKey != other.refKey:
return False
# are our components within buffer bp of each other
if abs(self.uBreak - other.uBreak) > BUFFER:
return False
if abs(self.dBreak - other.dBreak) > BUFFER:
return False
return self.annotate() == other.annotate()
"""
del
->p| |i-> ud=3,dd=3
->i| |e-> ud=3,dd=3
<-i| |p<- ud=5,dd=5
<-e| |i<- ud=5,dd=5
ins gain same as del but close can estimate size from remaining
ins sequence
|i<- <-e| ud=5,dd=5
|p<- <-i| ud=5,dd=5
|e-> ->i| ud=3,dd=3
|i-> ->p| ud=3,dd=3
inv sequence
->p| <-i| ud=3,dd=5
->i| <-e| ud=3,dd=5
|i-> |p<- ud=3,dd=5
|e-> |i<- ud=3,dd=5
|p<- |i-> ud=5,dd=3
|i<- |e-> ud=5,dd=3
<-i| ->p| ud=5,dd=3
<-e| ->i| ud=5,dd=3
missed adapter evidence is any one of these. (a || b)
both would suggest some kind of duplication inversion
but that requires non-local information.
These are the same as the inversion/but the sequence is
on top of itself
->i| a ud=3
<-e| a dd=5
->p| a ud=3
<-i| a dd=5
|e-> b ud=3
|i<- b dd=5
|i-> b ud=3
|p<- b dd=5
"""
#brute force
#ins = ["i<-=<-e", "p<-=<-i", "e->=->i", "i->=->p"]
#dele = ["->p=i->", "->i=e->", "<-i=<-p", "<-e=i<-"]
#inv = ["->p%<-i", "->i%<-e", "i->%p<-", "e->%i<-", \
#"p<-%i->", "i<-%e->", "<-i%->p", "<-e%->i"]
#tloc = ["->i=e->", "->p=i->", "<-i=p<-", "<-e=i<-",\
#"i->=->p", "i->=->p", "p<-=i<-", "i<-=e<-"]
#if self.bpStr() in ins and other.bpStr() in ins:
#return True
#if self.bpStr() in dele and other.bpStr() in dele:
#return True
#if self.bpStr() in inv and other.bpStr() in inv:
#return True
#if self.uRef != self.dRef
#return False
# are we moving in the same direction
# this creates 2 cluters - one per strand
if self.annotate() != other.annotate():
return False
if self.uDir == other.uDir and self.dDir == other.dDir:
return True
elif self.read.is_reverse != other.read.is_reverse:
#If we're on opposite strands,
#but our pieces are pointing together
if self.uDir != other.uDir and self.dDir != other.dDir:
return True
else:
if self.uTail == 'i' and other.uTail in ['p', 'e'] or \
self.dTail == 'i' and other.dTail in ['p', 'e']:
#This can't be true
if self.uDir == other.dDir and self.dDir == other.uDir \
and self.read.is_reverse == other.read.is_reverse:
return True
return False
def getInvStr(self):
return "%" if self.isInverted else "="
def getRevStr(self):
return "<-" if self.read.is_reverse else "->"
def annotate(self):
"""
based on the properties of orientation, create annotation
of what possible variant is here
"""
ins = ["i<-=<-e", "p<-=<-i", "e->=->i", "i->=->p"]
dele = ["->p=i->", "->i=e->", "<-i=p<-", "<-e=i<-"]
inv = ["->p%<-i", "->i%<-e", "i->%p<-", "e->%i<-", \
"p<-%i->", "i<-%e->", "<-i%->p", "<-e%->i"]
if self.uRef != self.dRef:
self.estsize = -1
return "TLOC"
bps = self.bpStr()
if bps in ins:
if abs(self.uBreak - self.dBreak) < 100:
#rmSeq = self.epirem if self.epirem is not None else 0
self.estsize = int(self.remainSeq)
else:
self.estsize = int(abs(self.uBreak - self.dBreak))
return "INS"
if bps in dele:
#Could be insertion with repeat
if abs(self.uBreak - self.dBreak) < 100 and \
self.remainSeq >= 100:#Shouldn't hardcode:
self.estsize = int(self.remainSeq)
return "INS"
self.estsize = int(abs(self.uBreak - self.dBreak))
return "DEL"
if bps in inv:
self.estsize = int(abs(self.uBreak - self.dBreak))
return "INV"
#never gets here... unless XinvxX
return "UNK"
if self.uRef == self.dRef:
if self.uDir != self.dDir:
self.estsize = int(abs(self.uBreak - self.dBreak))
return "INV"
if self.dBreak - self.uBreak < 100 and self.remainSeq >= 100:
self.estsize = int(self.remainSeq)
return "INS"
elif self.uDir == self.dDir:
ut, dt = self.__tailtoint__()
self.estsize = int(abs(self.uBreak - self.dBreak))
if (self.uDir == '3' and ut > dt) or (self.dDir == '5' and ut < dt):
return "INS"
else:
return "DEL"
else:
self.estsize = -1
return "TLOC"
def __tailtoint__(self):
"""
returns the uTail and dTail as ints
"""
trans = {"p":1,
"i":2,
"e":3}
x = trans[self.uTail]
y = trans[self.dTail]
return x, y
def bpStr(self):
def swap(a, b):
trans = str.maketrans("<>","><")
if a in ['p','i','e']:
b = b.translate(trans)[::-1]
elif b in ['p','i','e']:
a = a.translate(trans)[::-1]
return (b, a)
x, y = self.__tailtoint__()
if (x < y):
uC = ("->", self.uTail)
dC = (self.dTail, "->")
elif (x > y):
uC = (self.uTail, "->")
dC = ("->", self.dTail)
if self.uDir == '5':
uC = swap(*uC)
if self.dDir == '5':
dC = swap(*dC)
return "".join(uC) + self.getInvStr() + "".join(dC)
def anyNone(self):
"""
This is an old debugging method
"""
if self.uTail is None:
logging.debug("uTail none %s" % str(self.read.qname))
elif self.uMapq is None:
logging.debug("uMapq none %s" % str(self.read.qname))
elif self.uDir is None:
logging.debug("uDir none %s" % str(self.read.qname))
elif self.uBreak is None:
logging.debug("uBrea none %s" % str(self.read.qname))
elif self.getInvStr() is None:
logging.debug("getIn none %s" % str(self.read.qname))
elif self.getRevStr() is None:
logging.debug("getRe none %s" % str(self.read.qname))
elif self.dBreak is None:
logging.debug("dBrea none %s" % str(self.read.qname))
elif self.dDir is None:
logging.debug("dDir none %s" % str(self.read.qname))
elif self.dMapq is None:
logging.debug("dMapq none %s" | |
paneAssetTemplates.move(64, 34)
# Publish::Templates::AssetPane: Publish default button
def setAssetDefault():
txt_asset.setText(self.framework.prefs.get('flameMenuPublisher', {}).get('templates', {}).get('Asset', {}).get('flame_render').get('default', ''))
btn_assetDefault = QtWidgets.QPushButton('Default', paneAssetTemplates)
btn_assetDefault.setFocusPolicy(QtCore.Qt.NoFocus)
btn_assetDefault.setFixedSize(88, 28)
btn_assetDefault.move(0, 0)
btn_assetDefault.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #424142; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}')
btn_assetDefault.clicked.connect(setAssetDefault)
# Publish::Templates::AssetPane: Publish template text field
txt_asset_value = self.framework.prefs.get('flameMenuPublisher', {}).get('templates', {}).get('Asset', {}).get('flame_render').get('value', '')
txt_asset = QtWidgets.QLineEdit(txt_asset_value, paneAssetTemplates)
txt_asset.setFocusPolicy(QtCore.Qt.ClickFocus)
txt_asset.setFixedSize(588, 28)
txt_asset.move (94, 0)
txt_asset.setStyleSheet('QLineEdit {color: #9a9a9a; background-color: #373e47; border-top: 1px inset #black; border-bottom: 1px inset #545454}')
# Publish::Templates::AssetPane: Publish template fields button
asset_template_fields = self.framework.prefs.get('flameMenuPublisher', {}).get('templates', {}).get('Asset', {}).get('fields', [])
def addAssetField(field):
txt_asset.insert(field)
btn_assetFields = QtWidgets.QPushButton('Add Field', paneAssetTemplates)
btn_assetFields.setFixedSize(88, 28)
btn_assetFields.move(688, 0)
btn_assetFields.setFocusPolicy(QtCore.Qt.NoFocus)
btn_assetFields.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #424142; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}')
btn_assetFields_menu = QtWidgets.QMenu()
for field in asset_template_fields:
action = btn_assetFields_menu.addAction(field)
x = lambda chk=False, field=field: addAssetField(field)
action.triggered[()].connect(x)
btn_assetFields.setMenu(btn_assetFields_menu)
# Publish::Templates::AssetPane: Batch template default button
def setAssetBatchDefault():
txt_assetBatch.setText(self.framework.prefs.get('flameMenuPublisher', {}).get('templates', {}).get('Asset', {}).get('flame_batch').get('default', ''))
btn_assetBatchDefault = QtWidgets.QPushButton('Default', paneAssetTemplates)
btn_assetBatchDefault.setFocusPolicy(QtCore.Qt.NoFocus)
btn_assetBatchDefault.setFixedSize(88, 28)
btn_assetBatchDefault.move(0, 34)
btn_assetBatchDefault.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #424142; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}')
btn_assetBatchDefault.clicked.connect(setAssetBatchDefault)
# Publish::Templates::AssetPane: Batch template text field
txt_assetBatch_value = self.framework.prefs.get('flameMenuPublisher', {}).get('templates', {}).get('Asset', {}).get('flame_batch').get('value', '')
txt_assetBatch = QtWidgets.QLineEdit(txt_assetBatch_value, paneAssetTemplates)
txt_assetBatch.setFocusPolicy(QtCore.Qt.ClickFocus)
txt_assetBatch.setMinimumSize(588, 28)
txt_assetBatch.move(94, 34)
txt_assetBatch.setStyleSheet('QLineEdit {color: #9a9a9a; background-color: #373e47; border-top: 1px inset #black; border-bottom: 1px inset #545454}')
# Publish::Templates::AssetPane: Batch template fields button
def addAssetBatchField(field):
txt_assetBatch.insert(field)
btn_assetBatchFields = QtWidgets.QPushButton('Add Field', paneAssetTemplates)
btn_assetBatchFields.setFocusPolicy(QtCore.Qt.NoFocus)
btn_assetBatchFields.setMinimumSize(88, 28)
btn_assetBatchFields.move(688, 34)
btn_assetBatchFields.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #424142; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}')
btn_assetBatchFields_menu = QtWidgets.QMenu()
for field in asset_template_fields:
action = btn_assetBatchFields_menu.addAction(field)
x = lambda chk=False, field=field: addAssetBatchField(field)
action.triggered[()].connect(x)
btn_assetBatchFields.setMenu(btn_assetBatchFields_menu)
# Publish::Templates::AssetPane: Version template default button
def setAssetVersionDefault():
txt_assetVersion.setText(self.framework.prefs.get('flameMenuPublisher', {}).get('templates', {}).get('Asset', {}).get('version_name').get('default', ''))
btn_assetVersionDefault = QtWidgets.QPushButton('Default', paneAssetTemplates)
btn_assetVersionDefault.setFocusPolicy(QtCore.Qt.NoFocus)
btn_assetVersionDefault.setMinimumSize(88, 28)
btn_assetVersionDefault.move(0, 68)
btn_assetVersionDefault.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #424142; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}')
btn_assetVersionDefault.clicked.connect(setAssetVersionDefault)
# Publish::Templates::AssetPane: Vesrion template text field
txt_assetVersion_value = self.framework.prefs.get('flameMenuPublisher', {}).get('templates', {}).get('Asset', {}).get('version_name').get('value', '')
txt_assetVersion = QtWidgets.QLineEdit(txt_assetVersion_value, paneAssetTemplates)
txt_assetVersion.setFocusPolicy(QtCore.Qt.ClickFocus)
txt_assetVersion.setMinimumSize(256, 28)
txt_assetVersion.move(94, 68)
txt_assetVersion.setStyleSheet('QLineEdit {color: #9a9a9a; background-color: #373e47; border-top: 1px inset #black; border-bottom: 1px inset #545454}')
# Publish::Templates::AssetPane: Version template fields button
def addAssetVersionField(field):
txt_assetVersion.insert(field)
btn_assetVersionFields = QtWidgets.QPushButton('Add Field', paneAssetTemplates)
btn_assetVersionFields.setFocusPolicy(QtCore.Qt.NoFocus)
btn_assetVersionFields.setMinimumSize(88, 28)
btn_assetVersionFields.move(356, 68)
btn_assetVersionFields.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #424142; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}')
btn_assetVersionFields_menu = QtWidgets.QMenu()
for field in asset_template_fields:
action = btn_assetVersionFields_menu.addAction(field)
x = lambda chk=False, field=field: addAssetVersionField(field)
action.triggered[()].connect(x)
btn_assetVersionFields.setMenu(btn_assetVersionFields_menu)
# Publish::Templates::AssetPane: Version zero button
'''
def update_assetVersionZero():
publish_prefs = self.framework.prefs.get('flameMenuPublisher', {})
version_zero = publish_prefs.get('version_zero', False)
if version_zero:
btn_assetVersionZero.setStyleSheet('QPushButton {font:italic; background-color: #4f4f4f; color: #d9d9d9; border-top: 1px inset #555555; border-bottom: 1px inset black}')
else:
btn_assetVersionZero.setStyleSheet('QPushButton {color: #989898; background-color: #373737; border-top: 1px inset #555555; border-bottom: 1px inset black}')
def clicked_assetVersionZero():
publish_prefs = self.framework.prefs.get('flameMenuPublisher', {})
version_zero = publish_prefs.get('version_zero', False)
self.framework.prefs['flameMenuPublisher']['version_zero'] = not version_zero
update_shotVersionZero()
update_assetVersionZero()
btn_assetVersionZero = QtWidgets.QPushButton('Start From Zero', paneAssetTemplates)
btn_assetVersionZero.setFocusPolicy(QtCore.Qt.NoFocus)
btn_assetVersionZero.setMinimumSize(108, 28)
btn_assetVersionZero.move(450, 102)
btn_assetVersionZero.clicked.connect(clicked_shotVersionZero)
update_assetVersionZero()
'''
# Publish::Templates::AssetPane: END OF SECTION
vbox_publish.addWidget(paneTemplates)
panePublish.setLayout(vbox_publish)
panePublish.setFixedSize(860, 280)
panePublish.move(160, 10)
panePublish.setVisible(False)
# Superclips
paneSuperclips.setFixedSize(840, 264)
paneSuperclips.move(172, 20)
paneSuperclips.setVisible(False)
lbl_paneSuperclips = QtWidgets.QLabel('Superclis', paneSuperclips)
lbl_paneSuperclips.setStyleSheet('QFrame {color: #989898}')
lbl_paneSuperclips.setFixedSize(840, 264)
lbl_paneSuperclips.setAlignment(QtCore.Qt.AlignCenter)
lbl_paneSuperclips.setFrameStyle(QtWidgets.QFrame.Box | QtWidgets.QFrame.Plain)
# Close button
def close_prefs_dialog():
self.framework.prefs['flameMenuPublisher']['templates']['Shot']['flame_render']['value'] = txt_shot.text()
self.framework.prefs['flameMenuPublisher']['templates']['Shot']['flame_batch']['value'] = txt_shotBatch.text()
self.framework.prefs['flameMenuPublisher']['templates']['Shot']['version_name']['value'] = txt_shotVersion.text()
self.framework.save_prefs()
window.accept()
close_btn = QtWidgets.QPushButton('Close', window)
close_btn.setFocusPolicy(QtCore.Qt.NoFocus)
close_btn.setFixedSize(88, 28)
close_btn.move(924, 292)
close_btn.setStyleSheet('QPushButton {color: #9a9a9a; background-color: #424142; border-top: 1px inset #555555; border-bottom: 1px inset black}'
'QPushButton:pressed {font:italic; color: #d9d9d9}')
close_btn.clicked.connect(close_prefs_dialog)
# Set default tab and start window
action_showShot()
pressPublish()
window.exec_()
def rescan(self, *args, **kwargs):
if not self.flame:
try:
import flame
self.flame = flame
except:
self.flame = None
self.connector.cache_retrive_result(self.active_projects_uid, True)
if self.flame:
self.flame.execute_shortcut('Rescan Python Hooks')
self.log_debug('Rescan Python Hooks')
class flameBatchBlessing(flameMenuApp):
def __init__(self, framework):
flameMenuApp.__init__(self, framework)
# app defaults
if not self.prefs.master.get(self.name):
self.prefs['flame_batch_root'] = '/var/tmp/flameMenuSG/flame_batch_setups'
self.prefs['enabled'] = True
self.prefs['use_project'] = True
self.root_folder = self.batch_setup_root_folder()
def batch_setup_root_folder(self):
try:
import flame
except:
return False
flame_batch_name = flame.batch.name.get_value()
current_project_name = flame.project.current_project.name
if self.prefs.get('use_project'):
flame_batch_path = os.path.join(
self.prefs.get('flame_batch_root'),
current_project_name,
flame_batch_name)
else:
flame_batch_path = os.path.join(
self.prefs.get('flame_batch_root'),
flame_batch_name)
if not os.path.isdir(flame_batch_path):
try:
os.makedirs(flame_batch_path)
self.log_debug('creating %s' % flame_batch_path)
except:
print ('PYTHON\t: %s can not create %s' % (self.framework.bundle_name, flame_batch_path))
return False
return flame_batch_path
def collect_clip_uids(self, render_dest):
# collects clip uids from locations specified in render_dest dictionary
# returns: dictionary of lists of clip uid's at the locations specified
# in render_dest dictionary.
# clip_uids = {
# 'Batch Reels': {
# 'BatchReel Name': [uid1, uid2]
# }
# 'Batch Shelf Reels': {
# 'Shelf Reel Name 1': [uid3, uid4]
# 'Shelf Reel Name 2': [uid5, uid6, uid7]
# }
# 'Libraries': {
# 'Library Name 3': [uid8, uid9]
# }
# 'Reel Groups': {
# 'Reel Group Name 1': {
# 'Reel 1': []
# 'Reel 2: []
# }
# 'Reel Group Name 2': {
# 'Reel 1': []
# 'Reel 2: []
# }
#
# }
# }
import flame
collected_uids = dict()
for dest in render_dest.keys():
if dest == 'Batch Reels':
render_dest_names = list(render_dest.get(dest))
if not render_dest_names:
continue
batch_reels = dict()
for reel in flame.batch.reels:
current_uids = list()
if reel.name in render_dest_names:
for clip in reel.clips:
current_uids.append(clip.uid)
batch_reels[reel.name] = current_uids
collected_uids['Batch Reels'] = batch_reels
batch_shelf_reels = dict()
for reel in flame.batch.shelf_reels:
current_uids = list()
if reel.name in render_dest_names:
for clip in reel.clips:
current_uids.append(clip.uid)
batch_shelf_reels[reel.name] = current_uids
collected_uids['Batch Shelf Reels'] = batch_shelf_reels
elif dest == 'Libraries':
render_dest_names = list(render_dest.get(dest))
if not render_dest_names:
continue
libraries = dict()
current_workspace_libraries = flame.project.current_project.current_workspace.libraries
for library in current_workspace_libraries:
current_uids = list()
if library.name in render_dest_names:
for clip in library.clips:
current_uids.append(clip.uid)
libraries[library.name] = current_uids
collected_uids['Libraries'] = libraries
elif dest == 'Reel Groups':
render_dest_names = list(render_dest.get(dest))
if not render_dest_names:
continue
reel_groups = dict()
current_desktop_reel_groups = flame.project.current_project.current_workspace.desktop.reel_groups
for reel_group in current_desktop_reel_groups:
reels = dict()
if reel_group.name in render_dest_names:
for reel in reel_group.reels:
current_uids = list()
for clip in reel.clips:
current_uids.append(clip.uid)
reels[reel.name] = current_uids
reel_groups[reel_group.name] = reels
collected_uids['Reel Groups'] = reel_groups
return collected_uids
def bless_clip(self, clip, **kwargs):
batch_setup_name = kwargs.get('batch_setup_name')
batch_setup_file = kwargs.get('batch_setup_file')
blessing_string = str({'batch_file': batch_setup_file})
for version in clip.versions:
for track in version.tracks:
for segment in track.segments:
new_comment = segment.comment + blessing_string
segment.comment = new_comment
self.log_debug('blessing %s with %s' % (clip.name, blessing_string))
return True
def bless_batch_renders(self, userData):
import flame
# finds clips that was not in the render destionations before
# and blesses them by adding batch_setup_name to the comments
batch_setup_name = userData.get('batch_setup_name')
batch_setup_file = userData.get('batch_setup_file')
render_dest_uids = userData.get('render_dest_uids')
for dest in render_dest_uids.keys():
previous_uids = None
if dest == 'Batch Reels':
batch_reels_dest = render_dest_uids.get(dest)
for batch_reel_name in batch_reels_dest.keys():
previous_uids = batch_reels_dest.get(batch_reel_name)
for reel in flame.batch.reels:
if reel.name == batch_reel_name:
for clip in reel.clips:
if clip.uid not in previous_uids:
self.bless_clip(clip,
batch_setup_name = batch_setup_name,
batch_setup_file = batch_setup_file)
elif dest == 'Batch Shelf Reels':
batch_shelf_reels_dest = render_dest_uids.get(dest)
for batch_shelf_reel_name in batch_shelf_reels_dest.keys():
previous_uids = batch_shelf_reels_dest.get(batch_shelf_reel_name)
for reel in flame.batch.shelf_reels:
if reel.name == batch_shelf_reel_name:
for clip in reel.clips:
if clip.uid not in previous_uids:
self.bless_clip(clip,
batch_setup_name = batch_setup_name,
batch_setup_file = batch_setup_file)
elif dest == 'Libraries':
libraries_dest = render_dest_uids.get(dest)
current_workspace_libraries = flame.project.current_project.current_workspace.libraries
for library_name in libraries_dest.keys():
previous_uids = libraries_dest.get(library_name)
for library in current_workspace_libraries:
if library.name == library_name:
for clip in library.clips:
if clip.uid not in previous_uids:
try:
self.bless_clip(clip,
batch_setup_name = batch_setup_name,
batch_setup_file = batch_setup_file)
except:
print ('PYTHON\t: %s unable to bless %s' % (self.framework.bundle_name, clip.name))
print ('PYTHON\t: %s libraries are protected from editing' % self.framework.bundle_name)
continue
elif dest == 'Reel Groups':
reel_grous_dest = render_dest_uids.get(dest)
current_desktop_reel_groups = flame.project.current_project.current_workspace.desktop.reel_groups
for reel_group_name in reel_grous_dest.keys():
for desktop_reel_group in current_desktop_reel_groups:
if desktop_reel_group.name == reel_group_name:
reels = reel_grous_dest[reel_group_name]
for reel_name in reels.keys():
previous_uids = reels.get(reel_name)
for reel in desktop_reel_group.reels:
if reel.name == reel_name:
for clip in reel.clips:
if clip.uid not in previous_uids:
self.bless_clip(clip,
batch_setup_name = batch_setup_name,
batch_setup_file = batch_setup_file)
def create_batch_uid(self):
# generates UUID for the batch setup
import uuid
from datetime import datetime
uid = ((str(uuid.uuid1()).replace('-', '')).upper())
timestamp = (datetime.now()).strftime('%Y%b%d_%H%M').upper()
return timestamp + '_' + uid[:3]
class flameMenuNewBatch(flameMenuApp):
def __init__(self, framework, connector):
# app configuration settings
| |
: np.ndarray, shape=(N, 12)
Bitmap of active notes, relative to the given root.
root : np.ndarray, shape=(N,)
Absolute pitch class number.
Returns
-------
bitmap : np.ndarray, shape=(N, 12)
Absolute bitmaps of active pitch classes.
"""
abs_bitmaps = []
for bitmap, chord_root in zip(bitmaps, roots):
abs_bitmaps.append(rotate_bitmap_to_root(bitmap, chord_root))
return np.asarray(abs_bitmaps)
# --- Comparison Routines ---
def validate(reference_labels, estimated_labels):
"""Checks that the input annotations to a comparison function look like
valid chord labels.
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
"""
N = len(reference_labels)
M = len(estimated_labels)
if N != M:
raise ValueError(
"Chord comparison received different length lists: "
"len(reference)=%d\tlen(estimates)=%d" % (N, M))
for labels in [reference_labels, estimated_labels]:
for chord_label in labels:
validate_chord_label(chord_label)
# When either label list is empty, warn the user
if len(reference_labels) == 0:
warnings.warn('Reference labels are empty')
if len(estimated_labels) == 0:
warnings.warn('Estimated labels are empty')
def weighted_accuracy(comparisons, weights):
"""Compute the weighted accuracy of a list of chord comparisons.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> # Here, we're using the "thirds" function to compare labels
>>> # but any of the comparison functions would work.
>>> comparisons = mir_eval.chord.thirds(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
comparisons : np.ndarray
List of chord comparison scores, in [0, 1] or -1
weights : np.ndarray
Weights (not necessarily normalized) for each comparison.
This can be a list of interval durations
Returns
-------
score : float
Weighted accuracy
"""
N = len(comparisons)
# There should be as many weights as comparisons
if weights.shape[0] != N:
raise ValueError('weights and comparisons should be of the same'
' length. len(weights) = {} but len(comparisons)'
' = {}'.format(weights.shape[0], N))
if (weights < 0).any():
raise ValueError('Weights should all be positive.')
if np.sum(weights) == 0:
warnings.warn('No nonzero weights, returning 0')
return 0
# Find all comparison scores which are valid
valid_idx = (comparisons >= 0)
# If no comparable chords were provided, warn and return 0
if valid_idx.sum() == 0:
warnings.warn("No reference chords were comparable "
"to estimated chords, returning 0.")
return 0
# Remove any uncomparable labels
comparisons = comparisons[valid_idx]
weights = weights[valid_idx]
# Normalize the weights
total_weight = float(np.sum(weights))
normalized_weights = np.asarray(weights, dtype=float)/total_weight
# Score is the sum of all weighted comparisons
return np.sum(comparisons*normalized_weights)
def thirds(reference_labels, estimated_labels):
"""Compare chords along root & third relationships.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.thirds(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0]
"""
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones = encode_many(reference_labels, False)[:2]
est_roots, est_semitones = encode_many(estimated_labels, False)[:2]
eq_roots = ref_roots == est_roots
eq_thirds = ref_semitones[:, 3] == est_semitones[:, 3]
comparison_scores = (eq_roots * eq_thirds).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores
def thirds_inv(reference_labels, estimated_labels):
"""Score chords along root, third, & bass relationships.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.thirds_inv(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0]
"""
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones, ref_bass = encode_many(reference_labels, False)
est_roots, est_semitones, est_bass = encode_many(estimated_labels, False)
eq_root = ref_roots == est_roots
eq_bass = ref_bass == est_bass
eq_third = ref_semitones[:, 3] == est_semitones[:, 3]
comparison_scores = (eq_root * eq_third * eq_bass).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores
def triads(reference_labels, estimated_labels):
"""Compare chords along triad (root & quality to #5) relationships.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.triads(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0]
"""
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones = encode_many(reference_labels, False)[:2]
est_roots, est_semitones = encode_many(estimated_labels, False)[:2]
eq_roots = ref_roots == est_roots
eq_semitones = np.all(
np.equal(ref_semitones[:, :8], est_semitones[:, :8]), axis=1)
comparison_scores = (eq_roots * eq_semitones).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores
def triads_inv(reference_labels, estimated_labels):
"""Score chords along triad (root, quality to #5, & bass) relationships.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.triads_inv(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0]
"""
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones, ref_bass = encode_many(reference_labels, False)
est_roots, est_semitones, est_bass = encode_many(estimated_labels, False)
eq_roots = ref_roots == est_roots
eq_basses = ref_bass == est_bass
eq_semitones = np.all(
np.equal(ref_semitones[:, :8], est_semitones[:, :8]), axis=1)
comparison_scores = (eq_roots * eq_semitones * eq_basses).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores
def tetrads(reference_labels, estimated_labels):
"""Compare chords along tetrad (root & full quality) relationships.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.tetrads(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0]
"""
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones = encode_many(reference_labels, False)[:2]
est_roots, est_semitones = encode_many(estimated_labels, False)[:2]
eq_roots = ref_roots == est_roots
eq_semitones = np.all(np.equal(ref_semitones, est_semitones), axis=1)
comparison_scores = (eq_roots * eq_semitones).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores
def tetrads_inv(reference_labels, estimated_labels):
"""Compare chords along tetrad (root, full quality, & bass) relationships.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.tetrads_inv(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0]
"""
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones, ref_bass = encode_many(reference_labels, False)
est_roots, est_semitones, est_bass = encode_many(estimated_labels, False)
eq_roots = ref_roots == est_roots
eq_basses = ref_bass == est_bass
eq_semitones = np.all(np.equal(ref_semitones, est_semitones), axis=1)
comparison_scores = (eq_roots * eq_semitones * eq_basses).astype(np.float)
# Ignore 'X' chords
| |
of shape (n + p,)
"""
# Compute population parameters and sensitivities dtheta/dvartheta
params, dvartheta = \
self._covariate_model.compute_population_sensitivities(
parameters)
# Compute log-likelihood and sensitivities dscore/deta, dscore/dtheta
score, sensitivities = self._population_model.compute_sensitivities(
params, observations)
# Propagate sensitivity of score to vartheta
# i.e. dscore/dvartheta = sum_i dscore/dtheta_i * dtheta_i/dvartheta
# Note dvartheta has shape (p, p') and dtheta has shape (p')
n = len(observations)
deta = sensitivities[:n]
dtheta = sensitivities[n:]
dvartheta = dvartheta @ dtheta
# Stack results
sensitivities = np.hstack((deta, dvartheta))
return (score, sensitivities)
def get_covariate_model(self):
"""
Returns the covariate model.
"""
return self._covariate_model
def get_covariate_names(self):
"""
Returns the names of the covariates. If name is
not set, defaults are returned.
"""
return self._covariate_model.get_covariate_names()
def get_parameter_names(self):
"""
Returns the names of the model parameters. If name is
not set, defaults are returned.
"""
return self._covariate_model.get_parameter_names()
def n_hierarchical_parameters(self, n_ids):
"""
Returns a tuple of the number of individual parameters and the number
of population parameters that this model expects in context of a
:class:`HierarchicalLogLikelihood`, when ``n_ids`` individuals are
modelled.
Parameters
----------
n_ids
Number of individuals.
"""
# Get number of individual parameters
n_ids, _ = self._population_model.n_hierarchical_parameters(n_ids)
return (n_ids, self._covariate_model.n_parameters())
def n_covariates(self):
"""
Returns the number of covariates.
"""
return self._covariate_model.n_covariates()
def n_parameters(self):
"""
Returns the number of parameters of the population model.
"""
return self._covariate_model.n_parameters()
def sample(
self, parameters, n_samples=None, seed=None, covariates=None,
return_psi=False):
r"""
Returns random samples from the population distribution.
By default samples from
.. math::
\psi \sim \mathbb{P}(\cdot | \vartheta, \chi)
are returned. If ``return_psi=False`` samples from
.. math::
\eta \sim \mathbb{P}(\cdot | \theta)
are returned.
:param parameters: Values of the model parameters.
:type parameters: List, np.ndarray of shape (p,)
:param n_samples: Number of samples. If ``None``, one sample is
returned.
:type n_samples: int, optional
:param seed: A seed for the pseudo-random number generator.
:type seed: int, np.random.Generator, optional
:param covariates: Values for the covariates. If ``None``, default
is assumed defined by the :class:`CovariateModel`.
:type covariates: List, np.ndarray of shape (c,)
:param return_psi: Boolean flag that indicates whether the parameters
of the individual likelihoods are returned or the transformed
inter-individual fluctuations.
:type return_psi: bool, optional
:returns: Samples from population model conditional on covariates.
:rtype: np.ndarray of shape (n_samples,)
"""
# Check that covariates has the correct dimensions
if covariates is not None:
covariates = np.array(covariates)
n_covariates = self._covariate_model.n_covariates()
if len(covariates) != n_covariates:
raise ValueError(
'Covariates must be of length n_covariates.')
# Add dimension to fit shape (n, c) for later convenience
covariates = np.reshape(covariates, (1, n_covariates))
# Compute population parameters
eta_dist_params = self._covariate_model.compute_population_parameters(
parameters)
# Sample eta from population model
eta = self._population_model.sample(eta_dist_params, n_samples, seed)
if not return_psi:
return eta
# Compute psi
psi = self._covariate_model.compute_individual_parameters(
parameters, eta, covariates)
return psi
def set_covariate_names(self, names=None, update_param_names=False):
"""
Sets the names of the covariates.
:param names: A list of parameter names. If ``None``, covariate names
are reset to defaults.
:type names: List
:param update_param_names: Boolean flag indicating whether parameter
names should be updated according to new covariate names. By
default parameter names are not updated.
:type update_param_names: bool, optional
"""
self._covariate_model.set_covariate_names(names, update_param_names)
def set_parameter_names(self, names=None):
"""
Sets the names of the population model parameters.
Parameters
----------
names
An array-like object with string-convertable entries of length
:meth:`n_parameters`. If ``None``, parameter names are reset to
defaults.
"""
self._covariate_model.set_parameter_names(names)
class GaussianModel(PopulationModel):
r"""
A population model which assumes that model parameters across individuals
are distributed according to a Gaussian distribution.
A Gaussian population model assumes that a model parameter
:math:`\psi` varies across individuals such that :math:`\psi` is
Gaussian distributed in the population
.. math::
p(\psi |\mu, \sigma) =
\frac{1}{\sqrt{2\pi} \sigma}
\exp\left(-\frac{(\psi - \mu )^2}
{2 \sigma ^2}\right).
Here, :math:`\mu` and :math:`\sigma ^2` are the
mean and variance of the Gaussian distribution.
Any observed individual with parameter :math:`\psi _i` is
assumed to be a realisation of the random variable :math:`\psi`.
Extends :class:`PopulationModel`.
"""
def __init__(self):
super(GaussianModel, self).__init__()
# Set number of parameters
self._n_parameters = 2
# Set default parameter names
self._parameter_names = ['Mean', 'Std.']
@staticmethod
def _compute_log_likelihood(mean, var, observations): # pragma: no cover
r"""
Calculates the log-likelihood using numba speed up.
"""
# Compute log-likelihood score
n_ids = len(observations)
log_likelihood = \
- n_ids * np.log(2 * np.pi * var) / 2 \
- np.sum((observations - mean) ** 2) / (2 * var)
# If score evaluates to NaN, return -infinity
if np.isnan(log_likelihood):
return -np.inf
return log_likelihood
@staticmethod
def _compute_pointwise_ll(mean, var, observations): # pragma: no cover
r"""
Calculates the pointwise log-likelihoods using numba speed up.
"""
# Compute log-likelihood score
log_likelihood = \
- np.log(2 * np.pi * var) / 2 \
- (observations - mean) ** 2 / (2 * var)
# If score evaluates to NaN, return -infinity
mask = np.isnan(log_likelihood)
if np.any(mask):
log_likelihood[mask] = -np.inf
return log_likelihood
return log_likelihood
def _compute_sensitivities(self, mean, var, psi): # pragma: no cover
r"""
Calculates the log-likelihood and its sensitivities using numba
speed up.
Expects:
mean = float
var = float
Shape observations = (n_obs,)
Returns:
log_likelihood: float
sensitivities: np.ndarray of shape (n_obs + 2,)
"""
# Compute log-likelihood score
n_ids = len(psi)
log_likelihood = self._compute_log_likelihood(mean, var, psi)
# If score evaluates to NaN, return -infinity
if np.isnan(log_likelihood):
return -np.inf, np.full(shape=n_ids + 2, fill_value=np.inf)
# Compute sensitivities w.r.t. observations (psi)
dpsi = (mean - psi) / var
# Copmute sensitivities w.r.t. parameters
dmean = np.sum(psi - mean) / var
dstd = (-n_ids + np.sum((psi - mean)**2) / var) / np.sqrt(var)
sensitivities = np.concatenate((dpsi, np.array([dmean, dstd])))
return log_likelihood, sensitivities
def compute_log_likelihood(self, parameters, observations):
r"""
Returns the log-likelihood of the population model parameters.
The log-likelihood of a truncated Gaussian distribution is the log-pdf
evaluated at the observations
.. math::
L(\mu , \sigma | \Psi) =
\sum _{i=1}^N
\log p(\psi _i |
\mu , \sigma ) ,
where
:math:`\Psi := (\psi _1, \ldots , \psi _N)`
are the "observed" :math:`\psi` from :math:`N` individuals.
.. note::
Note that in the context of PKPD modelling the individual
parameters are never "observed" directly, but rather inferred
from biomarker measurements.
Parameters
----------
parameters
An array-like object with the model parameter values, i.e.
[:math:`\mu`, :math:`\sigma`].
observations
An array like object with the parameter values for the individuals,
i.e. [:math:`\psi _1, \ldots , \psi _N`].
"""
observations = np.asarray(observations)
mean, std = parameters
var = std**2
eps = 1E-12
if (std <= 0) or (var <= eps):
# The std. of the Gaussian distribution is strictly positive
return -np.inf
return self._compute_log_likelihood(mean, var, observations)
def compute_pointwise_ll(self, parameters, observations):
r"""
Returns the pointwise log-likelihood of the model parameters for
each observation.
The pointwise log-likelihood of a truncated Gaussian distribution is
the log-pdf evaluated at the observations
.. math::
L(\mu , \sigma | \psi _i) =
\log p(\psi _i |
\mu , \sigma ) ,
where
:math:`\psi _i` are the "observed" parameters :math:`\psi` from
individual :math:`i`.
Parameters
----------
parameters
An array-like object with the model parameter values, i.e.
[:math:`\mu`, :math:`\sigma`].
observations
An array like object with the parameter values for the individuals,
i.e. [:math:`\psi _1, \ldots , \psi _N`].
"""
observations = np.asarray(observations)
mean, std = parameters
var = std**2
eps = 1E-6
if (std <= 0) or (var <= eps):
# The std. of the Gaussian distribution is strictly positive
return np.full(shape=len(observations), fill_value=-np.inf)
return self._compute_pointwise_ll(mean, var, observations)
def compute_sensitivities(self, parameters, observations):
r"""
Returns the log-likelihood of the population parameters and its
sensitivity w.r.t. the observations and the parameters.
Parameters
----------
parameters
An array-like object with the parameters of the population model.
observations
An array-like object with the observations of the individuals. Each
entry is assumed to belong to one individual.
"""
observations = np.asarray(observations)
mean, std = parameters
var = std**2
eps = 1E-6
if (std <= 0) or (var <= eps):
# The std. of the Gaussian distribution is strictly positive
n_obs = len(observations)
return -np.inf, np.full(shape=(n_obs + 2,), fill_value=np.inf)
return self._compute_sensitivities(mean, var, observations)
def get_parameter_names(self):
"""
Returns the name | |
<reponame>Scopetta197/chromium
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Virtual Me2Me implementation. This script runs and manages the processes
# required for a Virtual Me2Me desktop, which are: X server, X desktop
# session, and Host process.
# This script is intended to run continuously as a background daemon
# process, running under an ordinary (non-root) user account.
import atexit
import base64
import errno
import getpass
import hashlib
import hmac
import json
import logging
import optparse
import os
import random
import signal
import socket
import subprocess
import sys
import tempfile
import time
import urllib2
import uuid
# Local modules
import gaia_auth
import keygen
REMOTING_COMMAND = "remoting_me2me_host"
# Command-line switch for passing the config path to remoting_me2me_host.
HOST_CONFIG_SWITCH_NAME = "host-config"
# Needs to be an absolute path, since the current working directory is changed
# when this process self-daemonizes.
SCRIPT_PATH = os.path.dirname(sys.argv[0])
if SCRIPT_PATH:
SCRIPT_PATH = os.path.abspath(SCRIPT_PATH)
else:
SCRIPT_PATH = os.getcwd()
# These are relative to SCRIPT_PATH.
EXE_PATHS_TO_TRY = [
".",
"../../out/Debug",
"../../out/Release"
]
CONFIG_DIR = os.path.expanduser("~/.config/chrome-remote-desktop")
HOME_DIR = os.environ["HOME"]
X_LOCK_FILE_TEMPLATE = "/tmp/.X%d-lock"
FIRST_X_DISPLAY_NUMBER = 20
X_AUTH_FILE = os.path.expanduser("~/.Xauthority")
os.environ["XAUTHORITY"] = X_AUTH_FILE
# Globals needed by the atexit cleanup() handler.
g_desktops = []
g_pidfile = None
class Authentication:
"""Manage authentication tokens for Chromoting/xmpp"""
def __init__(self, config_file):
self.config_file = config_file
def generate_tokens(self):
"""Prompt for username/password and use them to generate new authentication
tokens.
Raises:
Exception: Failed to get new authentication tokens.
"""
print "Email:",
self.login = raw_input()
password = <PASSWORD>.getpass("Password: ")
chromoting_auth = gaia_auth.GaiaAuthenticator('chromoting')
self.chromoting_auth_token = chromoting_auth.authenticate(self.login,
password)
xmpp_authenticator = gaia_auth.GaiaAuthenticator('chromiumsync')
self.xmpp_auth_token = xmpp_authenticator.authenticate(self.login,
password)
def load_config(self):
try:
settings_file = open(self.config_file, 'r')
data = json.load(settings_file)
settings_file.close()
self.login = data["xmpp_login"]
self.chromoting_auth_token = data["chromoting_auth_token"]
self.xmpp_auth_token = data["xmpp_auth_token"]
except:
return False
return True
def save_config(self):
data = {
"xmpp_login": self.login,
"chromoting_auth_token": self.chromoting_auth_token,
"xmpp_auth_token": self.xmpp_auth_token,
}
# File will contain private keys, so deny read/write access to others.
old_umask = os.umask(0066)
settings_file = open(self.config_file, 'w')
settings_file.write(json.dumps(data, indent=2))
settings_file.close()
os.umask(old_umask)
class Host:
"""This manages the configuration for a host.
Callers should instantiate a Host object (passing in a filename where the
config will be kept), then should call either of the methods:
* register(auth): Create a new Host configuration and register it
with the Directory Service (the "auth" parameter is used to
authenticate with the Service).
* load_config(): Load a config from disk, with details of an existing Host
registration.
After calling register() (or making any config changes) the method
save_config() should be called to save the details to disk.
"""
server = 'www.googleapis.com'
url = 'https://' + server + '/chromoting/v1/@me/hosts'
def __init__(self, config_file):
self.config_file = config_file
self.host_id = str(uuid.uuid1())
self.host_name = socket.gethostname()
self.host_secret_hash = None
self.private_key = None
def register(self, auth):
"""Generates a private key for the stored |host_id|, and registers it with
the Directory service.
Args:
auth: Authentication object with credentials for authenticating with the
Directory service.
Raises:
urllib2.HTTPError: An error occurred talking to the Directory server
(for example, if the |auth| credentials were rejected).
"""
logging.info("HostId: " + self.host_id)
logging.info("HostName: " + self.host_name)
logging.info("Generating RSA key pair...")
(self.private_key, public_key) = keygen.generateRSAKeyPair()
logging.info("Done")
json_data = {
"data": {
"hostId": self.host_id,
"hostName": self.host_name,
"publicKey": public_key,
}
}
params = json.dumps(json_data)
headers = {
"Authorization": "GoogleLogin auth=" + auth.chromoting_auth_token,
"Content-Type": "application/json",
}
request = urllib2.Request(self.url, params, headers)
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
logging.info("Registering host with directory service...")
res = urllib2.urlopen(request)
data = res.read()
logging.info("Done")
def ask_pin(self):
print \
"""Chromoting host supports PIN-based authentication, but it doesn't
work with Chrome 16 and Chrome 17 clients. Leave the PIN empty if you
need to use Chrome 16 or Chrome 17 clients. If you only use Chrome 18
or above, please set a non-empty PIN. You can change PIN later using
-p flag."""
while 1:
pin = getpass.getpass("Host PIN: ")
if len(pin) == 0:
print "Using empty PIN"
break
if len(pin) < 4:
print "PIN must be at least 4 characters long."
continue
pin2 = getpass.getpass("Confirm host PIN: ")
if pin2 != pin:
print "PINs didn't match. Please try again."
continue
break
self.set_pin(pin)
def set_pin(self, pin):
if pin == "":
self.host_secret_hash = "plain:"
else:
self.host_secret_hash = "hmac:" + base64.b64encode(
hmac.new(str(self.host_id), pin, hashlib.sha256).digest())
def is_pin_set(self):
return self.host_secret_hash
def load_config(self):
try:
settings_file = open(self.config_file, 'r')
data = json.load(settings_file)
settings_file.close()
except:
logging.info("Failed to load: " + self.config_file)
return False
self.host_id = data["host_id"]
self.host_name = data["host_name"]
self.host_secret_hash = data.get("host_secret_hash")
self.private_key = data["private_key"]
return True
def save_config(self):
data = {
"host_id": self.host_id,
"host_name": self.host_name,
"host_secret_hash": self.host_secret_hash,
"private_key": self.private_key,
}
if self.host_secret_hash:
data["host_secret_hash"] = self.host_secret_hash
old_umask = os.umask(0066)
settings_file = open(self.config_file, 'w')
settings_file.write(json.dumps(data, indent=2))
settings_file.close()
os.umask(old_umask)
class Desktop:
"""Manage a single virtual desktop"""
def __init__(self, width, height):
self.x_proc = None
self.session_proc = None
self.host_proc = None
self.width = width
self.height = height
g_desktops.append(self)
@staticmethod
def get_unused_display_number():
"""Return a candidate display number for which there is currently no
X Server lock file"""
display = FIRST_X_DISPLAY_NUMBER
while os.path.exists(X_LOCK_FILE_TEMPLATE % display):
display += 1
return display
def launch_x_server(self, extra_x_args):
display = self.get_unused_display_number()
ret_code = subprocess.call("xauth add :%d . `mcookie`" % display,
shell=True)
if ret_code != 0:
raise Exception("xauth failed with code %d" % ret_code)
logging.info("Starting Xvfb on display :%d" % display);
screen_option = "%dx%dx24" % (self.width, self.height)
self.x_proc = subprocess.Popen(["Xvfb", ":%d" % display,
"-auth", X_AUTH_FILE,
"-nolisten", "tcp",
"-screen", "0", screen_option
] + extra_x_args)
if not self.x_proc.pid:
raise Exception("Could not start Xvfb.")
# Create clean environment for new session, so it is cleanly separated from
# the user's console X session.
self.child_env = {
"DISPLAY": ":%d" % display,
"REMOTING_ME2ME_SESSION": "1" }
for key in [
"HOME",
"LANG",
"LOGNAME",
"PATH",
"SHELL",
"USER",
"USERNAME"]:
if os.environ.has_key(key):
self.child_env[key] = os.environ[key]
# Wait for X to be active.
for test in range(5):
proc = subprocess.Popen("xdpyinfo > /dev/null", env=self.child_env,
shell=True)
pid, retcode = os.waitpid(proc.pid, 0)
if retcode == 0:
break
time.sleep(0.5)
if retcode != 0:
raise Exception("Could not connect to Xvfb.")
else:
logging.info("Xvfb is active.")
def launch_x_session(self):
# Start desktop session
# The /dev/null input redirection is necessary to prevent Xsession from
# reading from stdin. If this code runs as a shell background job in a
# terminal, any reading from stdin causes the job to be suspended.
# Daemonization would solve this problem by separating the process from the
# controlling terminal.
#
# This assumes that GDM is installed and configured on the system.
self.session_proc = subprocess.Popen("/etc/gdm/Xsession",
stdin=open(os.devnull, "r"),
cwd=HOME_DIR,
env=self.child_env)
if not self.session_proc.pid:
raise Exception("Could not start X session")
def launch_host(self, host):
# Start remoting host
args = [locate_executable(REMOTING_COMMAND),
"--%s=%s" % (HOST_CONFIG_SWITCH_NAME, host.config_file)]
self.host_proc = subprocess.Popen(args, env=self.child_env)
if not self.host_proc.pid:
raise Exception("Could not start remoting host")
class PidFile:
"""Class to allow creating and deleting a file which holds the PID of the
running process. This is used to detect if a process is already running, and
inform the user of the PID. On process termination, the PID file is
deleted.
Note that PID files are not truly atomic or reliable, see
http://mywiki.wooledge.org/ProcessManagement for more discussion on this.
So this class is just to prevent the user from accidentally running two
instances of this script, and to report which PID may be the other running
instance.
"""
def __init__(self, filename):
"""Create an object to manage a PID file. This does not create the PID
file itself."""
self.filename = filename
self.created = False
def check(self):
"""Checks current status of the process.
Returns:
Tuple (running, pid):
|running| is True if the daemon is running.
|pid| holds the process ID of the running instance if |running| is True.
If the PID file exists but the PID couldn't be read from the file
(perhaps if the data hasn't been written yet), 0 is returned.
Raises:
IOError: Filesystem error occurred.
"""
if os.path.exists(self.filename):
pid_file = open(self.filename, 'r')
file_contents = pid_file.read()
pid_file.close()
try:
pid = int(file_contents)
except ValueError:
return True, 0
# Test to see if there's a process currently running with that PID.
# If there is no process running, the existing PID file is definitely
# stale and it is safe to overwrite it. Otherwise, report the PID as
# possibly a running instance of this script.
if os.path.exists("/proc/%d" % pid):
| |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen
https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from .admissionregistration_v1beta1_service_reference import AdmissionregistrationV1beta1ServiceReference
from .admissionregistration_v1beta1_webhook_client_config import AdmissionregistrationV1beta1WebhookClientConfig
from .apiextensions_v1beta1_service_reference import ApiextensionsV1beta1ServiceReference
from .apiextensions_v1beta1_webhook_client_config import ApiextensionsV1beta1WebhookClientConfig
from .apiregistration_v1beta1_service_reference import ApiregistrationV1beta1ServiceReference
from .apps_v1beta1_deployment import AppsV1beta1Deployment
from .apps_v1beta1_deployment_condition import AppsV1beta1DeploymentCondition
from .apps_v1beta1_deployment_list import AppsV1beta1DeploymentList
from .apps_v1beta1_deployment_rollback import AppsV1beta1DeploymentRollback
from .apps_v1beta1_deployment_spec import AppsV1beta1DeploymentSpec
from .apps_v1beta1_deployment_status import AppsV1beta1DeploymentStatus
from .apps_v1beta1_deployment_strategy import AppsV1beta1DeploymentStrategy
from .apps_v1beta1_rollback_config import AppsV1beta1RollbackConfig
from .apps_v1beta1_rolling_update_deployment import AppsV1beta1RollingUpdateDeployment
from .apps_v1beta1_scale import AppsV1beta1Scale
from .apps_v1beta1_scale_spec import AppsV1beta1ScaleSpec
from .apps_v1beta1_scale_status import AppsV1beta1ScaleStatus
from .extensions_v1beta1_allowed_csi_driver import ExtensionsV1beta1AllowedCSIDriver
from .extensions_v1beta1_allowed_flex_volume import ExtensionsV1beta1AllowedFlexVolume
from .extensions_v1beta1_allowed_host_path import ExtensionsV1beta1AllowedHostPath
from .extensions_v1beta1_deployment import ExtensionsV1beta1Deployment
from .extensions_v1beta1_deployment_condition import ExtensionsV1beta1DeploymentCondition
from .extensions_v1beta1_deployment_list import ExtensionsV1beta1DeploymentList
from .extensions_v1beta1_deployment_rollback import ExtensionsV1beta1DeploymentRollback
from .extensions_v1beta1_deployment_spec import ExtensionsV1beta1DeploymentSpec
from .extensions_v1beta1_deployment_status import ExtensionsV1beta1DeploymentStatus
from .extensions_v1beta1_deployment_strategy import ExtensionsV1beta1DeploymentStrategy
from .extensions_v1beta1_fs_group_strategy_options import ExtensionsV1beta1FSGroupStrategyOptions
from .extensions_v1beta1_http_ingress_path import ExtensionsV1beta1HTTPIngressPath
from .extensions_v1beta1_http_ingress_rule_value import ExtensionsV1beta1HTTPIngressRuleValue
from .extensions_v1beta1_host_port_range import ExtensionsV1beta1HostPortRange
from .extensions_v1beta1_id_range import ExtensionsV1beta1IDRange
from .extensions_v1beta1_ingress import ExtensionsV1beta1Ingress
from .extensions_v1beta1_ingress_backend import ExtensionsV1beta1IngressBackend
from .extensions_v1beta1_ingress_list import ExtensionsV1beta1IngressList
from .extensions_v1beta1_ingress_rule import ExtensionsV1beta1IngressRule
from .extensions_v1beta1_ingress_spec import ExtensionsV1beta1IngressSpec
from .extensions_v1beta1_ingress_status import ExtensionsV1beta1IngressStatus
from .extensions_v1beta1_ingress_tls import ExtensionsV1beta1IngressTLS
from .extensions_v1beta1_pod_security_policy import ExtensionsV1beta1PodSecurityPolicy
from .extensions_v1beta1_pod_security_policy_list import ExtensionsV1beta1PodSecurityPolicyList
from .extensions_v1beta1_pod_security_policy_spec import ExtensionsV1beta1PodSecurityPolicySpec
from .extensions_v1beta1_rollback_config import ExtensionsV1beta1RollbackConfig
from .extensions_v1beta1_rolling_update_deployment import ExtensionsV1beta1RollingUpdateDeployment
from .extensions_v1beta1_run_as_group_strategy_options import ExtensionsV1beta1RunAsGroupStrategyOptions
from .extensions_v1beta1_run_as_user_strategy_options import ExtensionsV1beta1RunAsUserStrategyOptions
from .extensions_v1beta1_se_linux_strategy_options import ExtensionsV1beta1SELinuxStrategyOptions
from .extensions_v1beta1_scale import ExtensionsV1beta1Scale
from .extensions_v1beta1_scale_spec import ExtensionsV1beta1ScaleSpec
from .extensions_v1beta1_scale_status import ExtensionsV1beta1ScaleStatus
from .extensions_v1beta1_supplemental_groups_strategy_options import ExtensionsV1beta1SupplementalGroupsStrategyOptions
from .networking_v1beta1_http_ingress_path import NetworkingV1beta1HTTPIngressPath
from .networking_v1beta1_http_ingress_rule_value import NetworkingV1beta1HTTPIngressRuleValue
from .networking_v1beta1_ingress import NetworkingV1beta1Ingress
from .networking_v1beta1_ingress_backend import NetworkingV1beta1IngressBackend
from .networking_v1beta1_ingress_list import NetworkingV1beta1IngressList
from .networking_v1beta1_ingress_rule import NetworkingV1beta1IngressRule
from .networking_v1beta1_ingress_spec import NetworkingV1beta1IngressSpec
from .networking_v1beta1_ingress_status import NetworkingV1beta1IngressStatus
from .networking_v1beta1_ingress_tls import NetworkingV1beta1IngressTLS
from .policy_v1beta1_allowed_csi_driver import PolicyV1beta1AllowedCSIDriver
from .policy_v1beta1_allowed_flex_volume import PolicyV1beta1AllowedFlexVolume
from .policy_v1beta1_allowed_host_path import PolicyV1beta1AllowedHostPath
from .policy_v1beta1_fs_group_strategy_options import PolicyV1beta1FSGroupStrategyOptions
from .policy_v1beta1_host_port_range import PolicyV1beta1HostPortRange
from .policy_v1beta1_id_range import PolicyV1beta1IDRange
from .policy_v1beta1_pod_security_policy import PolicyV1beta1PodSecurityPolicy
from .policy_v1beta1_pod_security_policy_list import PolicyV1beta1PodSecurityPolicyList
from .policy_v1beta1_pod_security_policy_spec import PolicyV1beta1PodSecurityPolicySpec
from .policy_v1beta1_run_as_group_strategy_options import PolicyV1beta1RunAsGroupStrategyOptions
from .policy_v1beta1_run_as_user_strategy_options import PolicyV1beta1RunAsUserStrategyOptions
from .policy_v1beta1_se_linux_strategy_options import PolicyV1beta1SELinuxStrategyOptions
from .policy_v1beta1_supplemental_groups_strategy_options import PolicyV1beta1SupplementalGroupsStrategyOptions
from .runtime_raw_extension import RuntimeRawExtension
from .v1_api_group import V1APIGroup
from .v1_api_group_list import V1APIGroupList
from .v1_api_resource import V1APIResource
from .v1_api_resource_list import V1APIResourceList
from .v1_api_service import V1APIService
from .v1_api_service_condition import V1APIServiceCondition
from .v1_api_service_list import V1APIServiceList
from .v1_api_service_spec import V1APIServiceSpec
from .v1_api_service_status import V1APIServiceStatus
from .v1_api_versions import V1APIVersions
from .v1_aws_elastic_block_store_volume_source import V1AWSElasticBlockStoreVolumeSource
from .v1_affinity import V1Affinity
from .v1_aggregation_rule import V1AggregationRule
from .v1_attached_volume import V1AttachedVolume
from .v1_azure_disk_volume_source import V1AzureDiskVolumeSource
from .v1_azure_file_persistent_volume_source import V1AzureFilePersistentVolumeSource
from .v1_azure_file_volume_source import V1AzureFileVolumeSource
from .v1_binding import V1Binding
from .v1_csi_persistent_volume_source import V1CSIPersistentVolumeSource
from .v1_csi_volume_source import V1CSIVolumeSource
from .v1_capabilities import V1Capabilities
from .v1_ceph_fs_persistent_volume_source import V1CephFSPersistentVolumeSource
from .v1_ceph_fs_volume_source import V1CephFSVolumeSource
from .v1_cinder_persistent_volume_source import V1CinderPersistentVolumeSource
from .v1_cinder_volume_source import V1CinderVolumeSource
from .v1_client_ip_config import V1ClientIPConfig
from .v1_cluster_role import V1ClusterRole
from .v1_cluster_role_binding import V1ClusterRoleBinding
from .v1_cluster_role_binding_list import V1ClusterRoleBindingList
from .v1_cluster_role_list import V1ClusterRoleList
from .v1_component_condition import V1ComponentCondition
from .v1_component_status import V1ComponentStatus
from .v1_component_status_list import V1ComponentStatusList
from .v1_config_map import V1ConfigMap
from .v1_config_map_env_source import V1ConfigMapEnvSource
from .v1_config_map_key_selector import V1ConfigMapKeySelector
from .v1_config_map_list import V1ConfigMapList
from .v1_config_map_node_config_source import V1ConfigMapNodeConfigSource
from .v1_config_map_projection import V1ConfigMapProjection
from .v1_config_map_volume_source import V1ConfigMapVolumeSource
from .v1_container import V1Container
from .v1_container_image import V1ContainerImage
from .v1_container_port import V1ContainerPort
from .v1_container_state import V1ContainerState
from .v1_container_state_running import V1ContainerStateRunning
from .v1_container_state_terminated import V1ContainerStateTerminated
from .v1_container_state_waiting import V1ContainerStateWaiting
from .v1_container_status import V1ContainerStatus
from .v1_controller_revision import V1ControllerRevision
from .v1_controller_revision_list import V1ControllerRevisionList
from .v1_cross_version_object_reference import V1CrossVersionObjectReference
from .v1_daemon_endpoint import V1DaemonEndpoint
from .v1_daemon_set import V1DaemonSet
from .v1_daemon_set_condition import V1DaemonSetCondition
from .v1_daemon_set_list import V1DaemonSetList
from .v1_daemon_set_spec import V1DaemonSetSpec
from .v1_daemon_set_status import V1DaemonSetStatus
from .v1_daemon_set_update_strategy import V1DaemonSetUpdateStrategy
from .v1_delete_options import V1DeleteOptions
from .v1_deployment import V1Deployment
from .v1_deployment_condition import V1DeploymentCondition
from .v1_deployment_list import V1DeploymentList
from .v1_deployment_spec import V1DeploymentSpec
from .v1_deployment_status import V1DeploymentStatus
from .v1_deployment_strategy import V1DeploymentStrategy
from .v1_downward_api_projection import V1DownwardAPIProjection
from .v1_downward_api_volume_file import V1DownwardAPIVolumeFile
from .v1_downward_api_volume_source import V1DownwardAPIVolumeSource
from .v1_empty_dir_volume_source import V1EmptyDirVolumeSource
from .v1_endpoint_address import V1EndpointAddress
from .v1_endpoint_port import V1EndpointPort
from .v1_endpoint_subset import V1EndpointSubset
from .v1_endpoints import V1Endpoints
from .v1_endpoints_list import V1EndpointsList
from .v1_env_from_source import V1EnvFromSource
from .v1_env_var import V1EnvVar
from .v1_env_var_source import V1EnvVarSource
from .v1_event import V1Event
from .v1_event_list import V1EventList
from .v1_event_series import V1EventSeries
from .v1_event_source import V1EventSource
from .v1_exec_action import V1ExecAction
from .v1_fc_volume_source import V1FCVolumeSource
from .v1_flex_persistent_volume_source import V1FlexPersistentVolumeSource
from .v1_flex_volume_source import V1FlexVolumeSource
from .v1_flocker_volume_source import V1FlockerVolumeSource
from .v1_gce_persistent_disk_volume_source import V1GCEPersistentDiskVolumeSource
from .v1_git_repo_volume_source import V1GitRepoVolumeSource
from .v1_glusterfs_persistent_volume_source import V1GlusterfsPersistentVolumeSource
from .v1_glusterfs_volume_source import V1GlusterfsVolumeSource
from .v1_group_version_for_discovery import V1GroupVersionForDiscovery
from .v1_http_get_action import V1HTTPGetAction
from .v1_http_header import V1HTTPHeader
from .v1_handler import V1Handler
from .v1_horizontal_pod_autoscaler import V1HorizontalPodAutoscaler
from .v1_horizontal_pod_autoscaler_list import V1HorizontalPodAutoscalerList
from .v1_horizontal_pod_autoscaler_spec import V1HorizontalPodAutoscalerSpec
from .v1_horizontal_pod_autoscaler_status import V1HorizontalPodAutoscalerStatus
from .v1_host_alias import V1HostAlias
from .v1_host_path_volume_source import V1HostPathVolumeSource
from .v1_ip_block import V1IPBlock
from .v1_iscsi_persistent_volume_source import V1ISCSIPersistentVolumeSource
from .v1_iscsi_volume_source import V1ISCSIVolumeSource
from .v1_initializer import V1Initializer
from .v1_initializers import V1Initializers
from .v1_job import V1Job
from .v1_job_condition import V1JobCondition
from .v1_job_list import V1JobList
from .v1_job_spec import V1JobSpec
from .v1_job_status import V1JobStatus
from .v1_key_to_path import V1KeyToPath
from .v1_label_selector import V1LabelSelector
from .v1_label_selector_requirement import V1LabelSelectorRequirement
from .v1_lease import V1Lease
from .v1_lease_list import V1LeaseList
from .v1_lease_spec import V1LeaseSpec
from .v1_lifecycle import V1Lifecycle
from .v1_limit_range import V1LimitRange
from .v1_limit_range_item import V1LimitRangeItem
from .v1_limit_range_list import V1LimitRangeList
from .v1_limit_range_spec import V1LimitRangeSpec
from .v1_list_meta import V1ListMeta
from .v1_load_balancer_ingress import V1LoadBalancerIngress
from .v1_load_balancer_status import V1LoadBalancerStatus
from .v1_local_object_reference import V1LocalObjectReference
from .v1_local_subject_access_review import V1LocalSubjectAccessReview
from .v1_local_volume_source import V1LocalVolumeSource
from .v1_managed_fields_entry import V1ManagedFieldsEntry
from .v1_nfs_volume_source import V1NFSVolumeSource
from .v1_namespace import V1Namespace
from .v1_namespace_list import V1NamespaceList
from .v1_namespace_spec import V1NamespaceSpec
from .v1_namespace_status import V1NamespaceStatus
from .v1_network_policy import V1NetworkPolicy
from .v1_network_policy_egress_rule import V1NetworkPolicyEgressRule
from .v1_network_policy_ingress_rule import V1NetworkPolicyIngressRule
from .v1_network_policy_list import V1NetworkPolicyList
from .v1_network_policy_peer import V1NetworkPolicyPeer
from .v1_network_policy_port import V1NetworkPolicyPort
from .v1_network_policy_spec import V1NetworkPolicySpec
from .v1_node import V1Node
from .v1_node_address import V1NodeAddress
from .v1_node_affinity import V1NodeAffinity
from .v1_node_condition import V1NodeCondition
from .v1_node_config_source import V1NodeConfigSource
from .v1_node_config_status import V1NodeConfigStatus
from .v1_node_daemon_endpoints import V1NodeDaemonEndpoints
from .v1_node_list import V1NodeList
from .v1_node_selector import V1NodeSelector
from .v1_node_selector_requirement import V1NodeSelectorRequirement
from .v1_node_selector_term import V1NodeSelectorTerm
from .v1_node_spec import V1NodeSpec
from .v1_node_status import V1NodeStatus
from .v1_node_system_info import V1NodeSystemInfo
from .v1_non_resource_attributes import V1NonResourceAttributes
from .v1_non_resource_rule import V1NonResourceRule
from .v1_object_field_selector import V1ObjectFieldSelector
from .v1_object_meta import V1ObjectMeta
from .v1_object_reference import V1ObjectReference
from .v1_owner_reference import V1OwnerReference
from .v1_persistent_volume import V1PersistentVolume
from .v1_persistent_volume_claim import V1PersistentVolumeClaim
from .v1_persistent_volume_claim_condition import V1PersistentVolumeClaimCondition
from .v1_persistent_volume_claim_list import V1PersistentVolumeClaimList
from .v1_persistent_volume_claim_spec import V1PersistentVolumeClaimSpec
from .v1_persistent_volume_claim_status import V1PersistentVolumeClaimStatus
from .v1_persistent_volume_claim_volume_source import V1PersistentVolumeClaimVolumeSource
from .v1_persistent_volume_list import V1PersistentVolumeList
from .v1_persistent_volume_spec import V1PersistentVolumeSpec
from .v1_persistent_volume_status import V1PersistentVolumeStatus
from .v1_photon_persistent_disk_volume_source import V1PhotonPersistentDiskVolumeSource
from .v1_pod import V1Pod
from .v1_pod_affinity import V1PodAffinity
from .v1_pod_affinity_term import V1PodAffinityTerm
from .v1_pod_anti_affinity import V1PodAntiAffinity
from .v1_pod_condition import V1PodCondition
from .v1_pod_dns_config import V1PodDNSConfig
from .v1_pod_dns_config_option import V1PodDNSConfigOption
from .v1_pod_list import V1PodList
from .v1_pod_readiness_gate import V1PodReadinessGate
from .v1_pod_security_context import V1PodSecurityContext
from .v1_pod_spec import V1PodSpec
from .v1_pod_status import V1PodStatus
from .v1_pod_template import V1PodTemplate
from .v1_pod_template_list import V1PodTemplateList
from .v1_pod_template_spec import V1PodTemplateSpec
from .v1_policy_rule import V1PolicyRule
from .v1_portworx_volume_source import V1PortworxVolumeSource
from .v1_preconditions import V1Preconditions
from .v1_preferred_scheduling_term import V1PreferredSchedulingTerm
from .v1_priority_class import V1PriorityClass
from .v1_priority_class_list import V1PriorityClassList
from .v1_probe import V1Probe
from .v1_projected_volume_source import V1ProjectedVolumeSource
from .v1_quobyte_volume_source import V1QuobyteVolumeSource
from .v1_rbd_persistent_volume_source import V1RBDPersistentVolumeSource
from .v1_rbd_volume_source import V1RBDVolumeSource
from .v1_replica_set import V1ReplicaSet
from .v1_replica_set_condition import V1ReplicaSetCondition
from .v1_replica_set_list import V1ReplicaSetList
from .v1_replica_set_spec import V1ReplicaSetSpec
from .v1_replica_set_status import V1ReplicaSetStatus
from .v1_replication_controller import V1ReplicationController
from .v1_replication_controller_condition import V1ReplicationControllerCondition
from .v1_replication_controller_list import V1ReplicationControllerList
from .v1_replication_controller_spec import V1ReplicationControllerSpec
from .v1_replication_controller_status import V1ReplicationControllerStatus
from .v1_resource_attributes import V1ResourceAttributes
from .v1_resource_field_selector import V1ResourceFieldSelector
from .v1_resource_quota import V1ResourceQuota
from .v1_resource_quota_list import V1ResourceQuotaList
from .v1_resource_quota_spec import V1ResourceQuotaSpec
from .v1_resource_quota_status import V1ResourceQuotaStatus
from .v1_resource_requirements import V1ResourceRequirements
from .v1_resource_rule import V1ResourceRule
from .v1_role import V1Role
from .v1_role_binding import V1RoleBinding
from .v1_role_binding_list import V1RoleBindingList
from .v1_role_list import V1RoleList
from .v1_role_ref import V1RoleRef
from .v1_rolling_update_daemon_set import V1RollingUpdateDaemonSet
from .v1_rolling_update_deployment import V1RollingUpdateDeployment
from .v1_rolling_update_stateful_set_strategy import V1RollingUpdateStatefulSetStrategy
from .v1_se_linux_options import V1SELinuxOptions
from .v1_scale import V1Scale
from .v1_scale_io_persistent_volume_source import V1ScaleIOPersistentVolumeSource
from .v1_scale_io_volume_source import V1ScaleIOVolumeSource
from .v1_scale_spec import V1ScaleSpec
from .v1_scale_status import V1ScaleStatus
from .v1_scope_selector import V1ScopeSelector
from .v1_scoped_resource_selector_requirement import V1ScopedResourceSelectorRequirement
from .v1_secret import V1Secret
from .v1_secret_env_source import V1SecretEnvSource
from .v1_secret_key_selector import V1SecretKeySelector
from .v1_secret_list import V1SecretList
from .v1_secret_projection import V1SecretProjection
from .v1_secret_reference import V1SecretReference
from .v1_secret_volume_source import V1SecretVolumeSource
from .v1_security_context import V1SecurityContext
from .v1_self_subject_access_review import V1SelfSubjectAccessReview
from .v1_self_subject_access_review_spec import V1SelfSubjectAccessReviewSpec
from .v1_self_subject_rules_review import V1SelfSubjectRulesReview
from .v1_self_subject_rules_review_spec import V1SelfSubjectRulesReviewSpec
from .v1_server_address_by_client_cidr import V1ServerAddressByClientCIDR
from .v1_service import V1Service
from .v1_service_account import V1ServiceAccount
from .v1_service_account_list import V1ServiceAccountList
from .v1_service_account_token_projection import V1ServiceAccountTokenProjection
from .v1_service_list import V1ServiceList
from .v1_service_port import V1ServicePort
from .v1_service_reference import V1ServiceReference
from .v1_service_spec import V1ServiceSpec
from .v1_service_status import V1ServiceStatus
from .v1_session_affinity_config import V1SessionAffinityConfig
from .v1_stateful_set import V1StatefulSet
from .v1_stateful_set_condition import V1StatefulSetCondition
from .v1_stateful_set_list import V1StatefulSetList
from .v1_stateful_set_spec import V1StatefulSetSpec
from .v1_stateful_set_status import V1StatefulSetStatus
from .v1_stateful_set_update_strategy import V1StatefulSetUpdateStrategy
from .v1_status import V1Status
from .v1_status_cause import V1StatusCause
from .v1_status_details import V1StatusDetails
from .v1_storage_class import V1StorageClass
from .v1_storage_class_list import V1StorageClassList
from .v1_storage_os_persistent_volume_source import V1StorageOSPersistentVolumeSource
from .v1_storage_os_volume_source import V1StorageOSVolumeSource
from .v1_subject import V1Subject
from .v1_subject_access_review import V1SubjectAccessReview
from .v1_subject_access_review_spec import V1SubjectAccessReviewSpec
from .v1_subject_access_review_status import V1SubjectAccessReviewStatus
from .v1_subject_rules_review_status import V1SubjectRulesReviewStatus
from .v1_sysctl import V1Sysctl
from .v1_tcp_socket_action import V1TCPSocketAction
from .v1_taint import V1Taint
from .v1_token_review import V1TokenReview
from .v1_token_review_spec import V1TokenReviewSpec
from .v1_token_review_status import V1TokenReviewStatus
from .v1_toleration import V1Toleration
from .v1_topology_selector_label_requirement import V1TopologySelectorLabelRequirement
from .v1_topology_selector_term import V1TopologySelectorTerm
from .v1_typed_local_object_reference import V1TypedLocalObjectReference
from .v1_user_info import V1UserInfo
from .v1_volume import V1Volume
from .v1_volume_attachment import V1VolumeAttachment
from .v1_volume_attachment_list import V1VolumeAttachmentList
from .v1_volume_attachment_source import V1VolumeAttachmentSource
from .v1_volume_attachment_spec import V1VolumeAttachmentSpec
from .v1_volume_attachment_status import V1VolumeAttachmentStatus
from .v1_volume_device import V1VolumeDevice
from .v1_volume_error import V1VolumeError
from .v1_volume_mount import V1VolumeMount
from .v1_volume_node_affinity import V1VolumeNodeAffinity
from .v1_volume_projection import V1VolumeProjection
from .v1_vsphere_virtual_disk_volume_source import V1VsphereVirtualDiskVolumeSource
from .v1_watch_event import V1WatchEvent
from .v1_weighted_pod_affinity_term import V1WeightedPodAffinityTerm
from .v1alpha1_aggregation_rule import V1alpha1AggregationRule
from .v1alpha1_audit_sink import V1alpha1AuditSink
from .v1alpha1_audit_sink_list import V1alpha1AuditSinkList
from .v1alpha1_audit_sink_spec import V1alpha1AuditSinkSpec
from .v1alpha1_cluster_role import V1alpha1ClusterRole
from .v1alpha1_cluster_role_binding import V1alpha1ClusterRoleBinding
from .v1alpha1_cluster_role_binding_list import V1alpha1ClusterRoleBindingList
from .v1alpha1_cluster_role_list import V1alpha1ClusterRoleList
from .v1alpha1_pod_preset import V1alpha1PodPreset
from .v1alpha1_pod_preset_list import V1alpha1PodPresetList
from .v1alpha1_pod_preset_spec import V1alpha1PodPresetSpec
from .v1alpha1_policy import V1alpha1Policy
from .v1alpha1_policy_rule import V1alpha1PolicyRule
from .v1alpha1_priority_class import V1alpha1PriorityClass
from .v1alpha1_priority_class_list import V1alpha1PriorityClassList
from .v1alpha1_role import V1alpha1Role
from .v1alpha1_role_binding import V1alpha1RoleBinding
from .v1alpha1_role_binding_list import V1alpha1RoleBindingList
from .v1alpha1_role_list import V1alpha1RoleList
from .v1alpha1_role_ref import V1alpha1RoleRef
from .v1alpha1_runtime_class import V1alpha1RuntimeClass
from .v1alpha1_runtime_class_list import V1alpha1RuntimeClassList
from .v1alpha1_runtime_class_spec import V1alpha1RuntimeClassSpec
from .v1alpha1_service_reference import V1alpha1ServiceReference
from .v1alpha1_subject import V1alpha1Subject
from .v1alpha1_volume_attachment import V1alpha1VolumeAttachment
from .v1alpha1_volume_attachment_list import V1alpha1VolumeAttachmentList
from .v1alpha1_volume_attachment_source import V1alpha1VolumeAttachmentSource
from .v1alpha1_volume_attachment_spec import V1alpha1VolumeAttachmentSpec
from .v1alpha1_volume_attachment_status import V1alpha1VolumeAttachmentStatus
from .v1alpha1_volume_error import V1alpha1VolumeError
from .v1alpha1_webhook import V1alpha1Webhook
from .v1alpha1_webhook_client_config import V1alpha1WebhookClientConfig
from .v1alpha1_webhook_throttle_config import V1alpha1WebhookThrottleConfig
from .v1beta1_api_service import V1beta1APIService
from .v1beta1_api_service_condition import V1beta1APIServiceCondition
from .v1beta1_api_service_list import V1beta1APIServiceList
from .v1beta1_api_service_spec import V1beta1APIServiceSpec
from .v1beta1_api_service_status import V1beta1APIServiceStatus
from .v1beta1_aggregation_rule import V1beta1AggregationRule
from .v1beta1_csi_driver import V1beta1CSIDriver
from .v1beta1_csi_driver_list import V1beta1CSIDriverList
from .v1beta1_csi_driver_spec import V1beta1CSIDriverSpec
from .v1beta1_csi_node import V1beta1CSINode
from .v1beta1_csi_node_driver import V1beta1CSINodeDriver
from .v1beta1_csi_node_list import V1beta1CSINodeList
from .v1beta1_csi_node_spec import | |
<gh_stars>1-10
import os
import cv2
import torch
import numpy as np
from time import time
from collections import defaultdict
# Thirdparty ##################
import g2o
import lambdatwist
################################
from .utils import utils
from .labeling import kp_config
from .models.pkpnet import PkpNet
from .utils.eval_meter import AverageMeter
from thirdparty.bop_toolkit.bop_toolkit_lib.renderer_py import RendererPython
# Return PnP pose only if enough points otherwise None.
# Returns an estimated transformation that transforms points in same
# frame as points_3d to the camera frame with corresponding image plane
# points_2d are defined in as well as a mask of inliers if ransac is set.
# If no ransac, inliers is just all True.
def pnp(points_3d, points_2d, camera_matrix):
assert points_3d.shape[0] == points_2d.shape[0], \
'points 3D and points 2D must have same number of rows'
assert camera_matrix.shape == (3,3), "Camera matrix must be of shape (3,3)"
num_pts = points_3d.shape[0]
if num_pts < 4: # LambdaTwist PnP RANSAC need >=4 pts.
return None
# Have to normalize the image coords
KinvT = np.linalg.inv(camera_matrix).T
points_2d_norm = points_2d @ KinvT[:2,:2] + KinvT[2:3,:2]
res = lambdatwist.pnp(points_3d, points_2d_norm)
if np.allclose(res, np.eye(4)):
return None
else:
return res[:3,:], np.ones((num_pts), dtype=np.bool)
# This class will handle the logic of running the keypoint network
# with or without the prior detection, calculating the prior detection,
# and optimizing the results with a BA. All user does is instantiate,
# feed image and bbox. Note that each image can have a different K matrix.
# We offer a single_view_mode for just performing covariance-driven PnP like PVNet
# and SfM mode for dealing with unordered sets of images in non-realtime, although
# this functionality is not as good as the baseline SLAM with all views since
# we don't do anything special for ordered images vs. non-ordered.
class ObjectSLAM:
def __init__(self, chkpt_path, mesh_db, no_network_cov=False,
no_prior_det=False, pred_res=(256,256), debug_gt_kp=False,
sfm_mode=False, single_view_mode=False, viz_cov=False,
do_viz_extra=False, global_opt_every=10, kp_var_thresh=0.2,
bbox_thresh=0.9, bbox_inflate=0.0, manual_kp_std=0.005,
opt_init_with_outliers=False, give_all_prior=False):
"""
\param chkpt_path: string of path to load keypoint model from
"""
self.mesh_db = mesh_db
# Ignore network covariance if debugging with GT keypoints
self.no_network_cov = no_network_cov or debug_gt_kp
# Still make prior detection if debugging with GT keypoints so we
# can debug that too.
self.no_prior_det = no_prior_det
self.pred_res = list(pred_res)
self.debug_gt_kp = debug_gt_kp
self.sfm_mode = sfm_mode
self.single_view_mode = single_view_mode
self.slam_mode = not (sfm_mode or single_view_mode)
self.viz_cov = viz_cov
self.do_viz_extra = do_viz_extra
self.global_opt_every = global_opt_every
self.kp_var_thresh = kp_var_thresh
self.bbox_thresh = bbox_thresh
self.bbox_inflate = bbox_inflate
self.manual_kp_std = manual_kp_std
self.opt_init_with_outliers = opt_init_with_outliers
self.give_all_prior = give_all_prior
self.reset()
self.model = None
self.model_epoch = -1
if not debug_gt_kp:
print(f"Loading model from {chkpt_path}")
assert os.path.isfile(chkpt_path), \
"=> no checkpoint found at '{}'".format(chkpt_path)
print("=> loading checkpoint '{}'".format(chkpt_path))
checkpoint = torch.load(chkpt_path)
self.model = PkpNet(calc_cov = not self.no_network_cov)
self.model.load_state_dict(checkpoint['model'])
assert self.model.calc_cov != self.no_network_cov
model_args = checkpoint['args']
self.model_epoch = checkpoint['epoch']
print("======= Model's Training Args ================")
for attr in dir(model_args):
if not attr.startswith('_'):
print(f"{attr}: {getattr(model_args, attr)}")
print("=============================")
if torch.cuda.is_available():
print(f"Found CUDA")
self.model = self.model.cuda()
else:
print("WARNING: No CUDA found.")
self.model.eval()
# Set the timers outside of the reset function in case we
# run multiple sequences.
# Check the average std of the predicted keypoints
self.avg_std_meter = AverageMeter()
# We count the tracking time as the time for the network prediction, PnP,
# and pose camera pose estimation. The graph opt and outlier rejection
# can be done in a seperate thread.
self.track_time_meter = AverageMeter()
self.opt_time_meter = AverageMeter()
self.all_time_num_views = 0
self.renderer = None
def reset(self):
'''
Reset to initial state and delete old fed-in data if there is any.
'''
# Map view_id to dict of detection info
# (object-specific masks, bboxes, and keypoints mapped by object instance ID)
self.detections = {}
# Map view_id to current estimated camera pose for that view.
# Pose is T_GtoC, which transforms points in world frame into camera frame.
self.cam_poses = {}
self.view_ids = [] # Order received of views
# K matrix for each camera pose. May be different.
self.cam_K = {}
self.images = {}
# Map of object instance ID to object pose expressed in the first camera frame.
# Pose is T_OtoG, which transforms points in object frame into world frame
# (first camera frame).
self.obj_poses = {}
# Info needed for deciding whether or not to re-initialize object.
self.obj_num_dets = defaultdict(int)
self.obj_num_det_kps = defaultdict(int)
self.remove_penalty = defaultdict(int)
self.needs_opt = False
def get_global_opt_strtime(self, t0=np.nan, t1=np.nan):
return f"TIMING: Global opt time: {1000*(t1-t0)} ms " \
+ f"({1000*self.opt_time_meter.average()} avg) " \
+ f"({1/self.opt_time_meter.average()} Hz)"
def get_tracking_strtime(self, tt0=np.nan, tt1=np.nan):
ttavg = self.track_time_meter.average()
return f"TIMING: Tracking time: {1000*(tt1-tt0):.3f} ms " \
+ f"({1000*ttavg:.3f} avg) " \
+ f"({'inf' if ttavg<1e-12 else 1/ttavg} Hz)"
def print_global_opt_time(self, t0=np.nan, t1=np.nan):
print(self.get_global_opt_strtime(t0, t1))
def print_tracking_time(self, tt0=np.nan, tt1=np.nan):
print(self.get_tracking_strtime(tt0, tt1))
'''
Call this to get object poses in each camera frame for evaluation.
'''
def collect_results(self, last_only=False, no_viz=False, final=False):
if self.slam_mode and self.needs_opt and final:
print("Performing FINAL global optimization")
t0 = time()
self.optimize()
t1 = time()
self.opt_time_meter.update(t1-t0)
self.print_global_opt_time(t0, t1)
results = {}
assert len(self.view_ids) == len(self.cam_poses)
view_ids = [self.view_ids[-1]] if last_only else self.view_ids
for view_id in view_ids:
T_GtoC = self.cam_poses[view_id]
results[view_id] = {
"poses": {},
}
poses = {}
detection = self.detections[view_id]
if T_GtoC.shape[0] == 3:
T_GtoC = np.concatenate((T_GtoC,np.eye(4)[3:4,:]), axis=0)
# Combine estimated objects with objects detected in this frame.
obj_ids = set(list(self.obj_poses.keys()) + list(detection.keys()))
kp_cov = None
if not no_viz:
# Populate with keypoints detected in this view
kp_pred = np.zeros((len(obj_ids), kp_config.num_kp(), 2), dtype=np.float32)
if not self.no_network_cov:
kp_cov = np.zeros((len(obj_ids), kp_config.num_kp(), 2, 2),
dtype=np.float32)
kp_mask = np.zeros((len(obj_ids), kp_config.num_kp()), dtype=np.bool)
bboxes = np.zeros((len(obj_ids), 5), dtype=np.int)
# Remake the priors for the whole image now
img = self.images[view_id]
priors = np.zeros((kp_config.num_kp(),*img.shape[:2]), dtype=np.float32)
for i, obj_id in enumerate(obj_ids):
T_OtoC = None
if obj_id in self.obj_poses.keys():
T_OtoG = self.obj_poses[obj_id]
if T_OtoG.shape[0] == 3:
T_OtoG = np.concatenate((T_OtoG,np.eye(4)[3:4,:]), axis=0)
T_OtoC = T_GtoC @ T_OtoG
poses[obj_id] = T_OtoC
result = {
"T_OtoC": T_OtoC,
#"score": 0,
"score": 1 + self.obj_num_inliers(obj_id), # TOTAL number of final inliers
}
if obj_id in detection.keys():
det_obj = detection[obj_id]
#result["score"] = np.count_nonzero(det_obj["inliers"])
if not no_viz:
kp_mask_i = det_obj["kp_mask"] # Which kp were detected
#inliers_i = det_obj["inliers"] # Which kp are final inliers
# Project kps into full image plane with this homography
H = (self.cam_K[view_id] @ np.linalg.inv(det_obj["K"])).T
kp_pred[i][kp_mask_i] = det_obj["uv_pred"] @ H[:2,:2] + H[2:3,:2]
kp_mask[i] = kp_mask_i
bboxes[i][0] = obj_id
bboxes[i][1:] = (det_obj["bbox"] + 0.5).astype(np.int)
if not self.no_network_cov:
assert "cov_pred" in det_obj.keys()
# Covariance propagate from the bbox image in ndc to full
# image in raw UV coordinates.
kp_cov[i][kp_mask_i] = H[:2,:2].T[None,None,...] \
@ det_obj["cov_pred"] @ H[None,None,:2,:2]
#print(kp_cov[i][kp_mask_i])
if det_obj["prior_uv"] is not None:
prior_uv_full = det_obj["prior_uv"] @ H[:2,:2] + H[2:3,:2]
# Dont viz prior outside of bbox
x1,y1,x2,y2 = bboxes[i][1:]
priors[:,y1:y2,x1:x2] += utils.make_prior_kp_input(prior_uv_full,
det_obj["model_kp_mask"], img.shape[:2],
ndc=False)[:,y1:y2,x1:x2]
results[view_id]["poses"][obj_id] = result
if not no_viz:
# Get viz data
priors = np.clip(priors,0,1)
t0 = utils.device_time()
# LEFT: bbox and prior, MIDDLE keypoints, RIGHT model overlay
viz_img_left = utils.make_kp_viz(img, np.array([]), np.array([]),
bbox_pred=bboxes, prior=priors)
viz_img_center = utils.make_kp_viz(img, kp_pred, kp_mask,
cov=kp_cov if self.viz_cov else None, ndc=False)
viz_img_right = utils.make_kp_viz(img, np.array([]), np.array([]),
poses=poses, K=self.cam_K[view_id], mesh_db=self.mesh_db)
viz_img = np.concatenate((viz_img_left, viz_img_center, viz_img_right), axis=1)
results[view_id]["viz"] = viz_img
t1 = utils.device_time()
print(f"TIMING: Viz time = {1000 * (t1-t0)} ms")
# Add some more visualizations for each object (for paper figs).
if self.do_viz_extra:
results[view_id]["viz_extra"] = {}
# Image and bbox
results[view_id]["viz_extra"]["bbox_input"] = utils.make_kp_viz(
img, np.array([]), np.array([]), bbox_pred=bboxes)
for i, obj_id in enumerate(obj_ids):
x1,y1,x2,y2 = bboxes[i][1:]
img_i = img[y1:y2,x1:x2,:]
prior_i = priors[:,y1:y2,x1:x2]
kp_pred_i = kp_pred[i:i+1] - np.array([x1,y1])[None,None,:]
T = np.eye(3, dtype=np.float32)
T[:2,2] = -np.array([x1,y1], dtype=np.float32)
K_bbox = T @ self.cam_K[view_id]
kp_cov_i = None
if kp_cov is not None:
kp_cov_i = kp_cov[i:i+1]
# Just image (and prior if nonzero)
results[view_id]["viz_extra"][f"viz_obj_{obj_id}_input"] = \
utils.make_kp_viz(img_i, np.array([]), np.array([]), prior=prior_i)
# Image with keypoints
results[view_id]["viz_extra"][f"viz_obj_{obj_id}_output"] = \
utils.make_kp_viz(img_i, kp_pred_i, kp_mask[i:i+1],
cov=kp_cov_i if self.viz_cov else None, ndc=False)
# Image overlayed with CAD model at estim. pose
if obj_id in poses.keys():
results[view_id]["viz_extra"][f"viz_obj_{obj_id}_overlay"] = \
utils.make_kp_viz(img_i, np.array([]), np.array([]),
poses={obj_id: poses[obj_id]}, K=K_bbox,
mesh_db=self.mesh_db)
return | |
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
vivopump -- module of helper functions for the pump
"""
import sys
import csv
import string
import random
import logging
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2016 <NAME>"
__license__ = "New BSD license"
__version__ = "0.8.7"
logger = logging.getLogger(__name__)
class DefNotFoundException(Exception):
"""
Raise this exception when update definition fle is not found
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
class InvalidDefException(Exception):
"""
Raise this exception when update definition contains values that can not be processed
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
class InvalidSourceException(Exception):
"""
Raise this exception when update data contains values that can not be processed
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
class PathLengthException(Exception):
"""
Raise this exception when update def has a path length greater than support
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
class UnicodeCsvReader(object):
"""
From http://stackoverflow.com/questions/1846135/python-csv-
library-with-unicode-utf-8-support-that-just-works. Added errors='ignore'
to handle cases when the input file misrepresents itself as utf-8.
"""
def __init__(self, f, encoding="utf-8", **kwargs):
self.csv_reader = csv.reader(f, **kwargs)
self.encoding = encoding
def __iter__(self):
return self
def next(self):
"""
Read and split the csv row into fields
"""
row = self.csv_reader.next()
# now decode
return [unicode(cell, self.encoding, errors='ignore') for cell in row]
@property
def line_num(self):
"""
Return line number
"""
return self.csv_reader.line_num
class UnicodeDictReader(csv.DictReader):
"""
A Unicode CSV Reader
"""
def __init__(self, f, encoding="utf-8", fieldnames=None, **kwds):
csv.DictReader.__init__(self, f, fieldnames=fieldnames, **kwds)
self.reader = UnicodeCsvReader(f, encoding=encoding, **kwds)
def read_csv(filename, skip=True, delimiter='|'):
"""
Read a CSV file, return dictionary object
:param filename: name of file to read
:param skip: should lines with invalid number of columns be skipped? False=Throw Exception
:param delimiter: The delimiter for CSV files
:return: Dictionary object
"""
fp = open(filename, 'rU')
data = read_csv_fp(fp, skip, delimiter)
fp.close()
return data
def read_csv_fp(fp, skip=True, delimiter="|"):
"""
Given a filename, read the CSV file with that name. We use "|" as a
separator in CSV files to allow commas to appear in values.
CSV files read by this function follow these conventions:
-- use delimiter as a separator. Defaults to vertical bar.
-- have a first row that contains column headings.
-- all elements must have values. To specify a missing value, use
the string "None" or "NULL" between separators, that is |None| or |NULL|
-- leading and trailing whitespace in values is ignored. | The | will be
read as "The"
-- if skip=True, rows with too many or too few data elements are skipped.
if skip=False, a RowError is thrown
CSV files processed by read_csv will be returned as a dictionary of
dictionaries, one dictionary per row keyed by an integer row number. This supports
maintaining the order of the data input, which is important for some applications
"""
class RowError(Exception):
"""
Thrown when the number of data elements on a row in a CSV is not equal to the number of header elements
"""
pass
heading = []
row_number = 0
data = {}
for row in UnicodeCsvReader(fp, delimiter=delimiter):
i = 0
for r in row:
# remove white space fore and aft
row[i] = r.strip(string.whitespace)
i += 1
if len(heading) == 0:
heading = row # the first row is the heading
continue
row_number += 1
if len(row) == len(heading):
data[row_number] = {}
i = 0
for r in row:
data[row_number][heading[i]] = r
i += 1
elif not skip:
raise RowError("On row " + str(row_number) + ", expecting " +
str(len(heading)) + " data values. Found " +
str(len(row)) + " data values. Row contents = " +
str(row))
else:
pass # row has wrong number of columns and skip is True
logger.debug("loader returns {} rows".format(len(data)))
return data
def write_csv_fp(fp, data, delimiter='|'):
"""
Write a CSV to a file pointer. Used to support stdout.
:param fp: File pointer. Could be stdout.
:param data: data to be written
:param delimiter: field delimiter for output
:return:
"""
assert(len(data.keys()) > 0)
# create a list of var_names from the first row
var_names = data[data.keys()[0]].keys()
fp.write(delimiter.join(var_names) + '\n')
for key in sorted(data.keys()):
fp.write(delimiter.join([data[key][x] for x in var_names]) + '\n')
def write_csv(filename, data, delimiter='|'):
"""
Given a filename, a data structure as produced by read_csv and an optional
delimiter, write a file that can be read by read_csv
The data structure is a dictionary keyed by an integer of "row numbers"
preserving the natural order of the data. Each element is in turn a
dictionary of name value pairs. All values are strings.
:param filename: name of file to write
:param data: data structure to be written to the file
:param delimiter: field delimiter. Popular choices are '|', '\t' and ','
:return:
"""
with open(filename, 'w') as f:
f.write(delimiter.join(data[data.keys()[0]].keys()) + '\n')
for key in sorted(data.keys()):
f.write(delimiter.join(data[key].values()) + '\n')
def replace_initials(s):
"""
For a string s, find all occurrences of A. B. etc and replace them with A B etc
:param s:
:return: string with replacements made
"""
import re
def repl_function(m):
"""
Helper function for re.sub
"""
return m.group(0)[0]
t = re.sub('[A-Z]\.', repl_function, s)
return t
def key_string(s):
"""
Given a string s, return a string with a bunch of punctuation and special
characters removed and then everything lower cased. Useful for matching
strings in which case, punctuation and special characters should not be
considered in the match
"""
k = s.encode("utf-8", "ignore").translate(None,
""" \t\n\r\f!@#$%^&*()_+:"<>?-=[]\\;'`~,./""")
k = k.lower()
return k
def get_vivo_types(selector, parms, separator=';'):
"""
Query VIVO using the selector and return a dictionary with keys of all uri satisfying the selector and
data of all the types for each uri, separated by the separator
:param: selector: query fragment for selecting the entities whose types will be returned
:param: parms: vivo_query parms
:return: dictionary of types keyed by uri
"""
query = """
select ?uri (GROUP_CONCAT(?type; separator="{{separator}}") AS ?types)
where {
{{selector}}
?uri rdf:type ?type .}
GROUP BY ?uri
"""
q = query.replace("{{separator}}", separator)
q = q.replace("{{selector}}", selector)
a = vivo_query(q, parms)
types = [x['types']['value'] for x in a['results']['bindings']]
uri = [x['uri']['value'] for x in a['results']['bindings']]
return dict(zip(uri, types))
def get_vivo_ufid(parms):
"""
Query VIVO and return a list of all the ufid found in VIVO
:param: parms: vivo_query parameters
:return: dictionary of uri keyed by ufid
"""
query = "select ?uri ?ufid where {?uri uf:ufid ?ufid .}"
a = vivo_query(query, parms)
ufid = [x['ufid']['value'] for x in a['results']['bindings']]
uri = [x['uri']['value'] for x in a['results']['bindings']]
return dict(zip(ufid, uri))
def get_vivo_publishers(parms):
"""
Query VIVO and return a list of all the publishers found in VIVO
:param: parms: vivo_query parameters
:return: dictionary of uri keyed by simplified publisher name
"""
query = "select ?uri ?label where {?uri a vivo:Publisher . ?uri rdfs:label ?label .}"
a = vivo_query(query, parms)
label = [key_string(x['label']['value']) for x in a['results']['bindings']]
uri = [x['uri']['value'] for x in a['results']['bindings']]
return dict(zip(label, uri))
def get_vivo_journals(parms):
"""
Query VIVO and return a list of all the journals.
@see uf_examples/publications/filters/journal_match_filter.py
:param: parms: vivo_query params
:return: dictionary of uri keyed by ISSN
"""
query = "select ?uri ?issn where {?uri bibo:issn ?issn .}"
a = vivo_query(query, parms)
issn = [x['issn']['value'] for x in a['results']['bindings']]
uri = [x['uri']['value'] for x in a['results']['bindings']]
return dict(zip(issn, uri))
def get_vivo_ccn(parms):
"""
Query VIVO and return a list of all the ccn found in VIVO.
@see uf_examples/courses/merge_filter.py
:param: parms: vivo_query parms
:return: dictionary of uri keyed by ccn
"""
query = "select ?uri ?ccn where {?uri uf:ccn ?ccn .}"
a = vivo_query(query, parms)
ccn = [x['ccn']['value'] for x in a['results']['bindings']]
uri = [x['uri']['value'] for x in a['results']['bindings']]
return dict(zip(ccn, uri))
def get_vivo_sponsorid(parms):
"""
Query VIVO and return a list of all the sponsorid found in VIVO
:param: parms: vivo_query parms
:return: dictionary of uri keyed by sponsorid
"""
query = "select ?uri ?sponsorid where {?uri a vivo:FundingOrganization . ?uri ufVivo:sponsorID ?sponsorid .}"
a = vivo_query(query, parms)
sponsorid = [x['sponsorid']['value'] for x in a['results']['bindings']]
uri = [x['uri']['value'] for x in a['results']['bindings']]
return dict(zip(sponsorid, uri))
def get_vivo_authors(parms):
"""
Query |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.