hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
de80fc19bc777df59769e113072125cab077fed6
| 142
|
py
|
Python
|
badwing/character/controller/__init__.py
|
kfields/badwing
|
5f53c98cbb6fca8390e1632fa559f5201861365b
|
[
"MIT"
] | 3
|
2020-03-23T06:43:25.000Z
|
2022-02-18T16:35:56.000Z
|
badwing/character/controller/__init__.py
|
kfields/badwing
|
5f53c98cbb6fca8390e1632fa559f5201861365b
|
[
"MIT"
] | 2
|
2020-03-26T02:05:36.000Z
|
2021-08-02T19:13:06.000Z
|
badwing/character/controller/__init__.py
|
kfields/badwing
|
5f53c98cbb6fca8390e1632fa559f5201861365b
|
[
"MIT"
] | null | null | null |
from badwing.character.controller.controller import CharacterController
from badwing.character.controller.kinematic import KinematicController
| 71
| 71
| 0.908451
| 14
| 142
| 9.214286
| 0.571429
| 0.170543
| 0.310078
| 0.465116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049296
| 142
| 2
| 72
| 71
| 0.955556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
deab8a3dde9924928a5178fa78d3f47d6fc4d06f
| 60,245
|
py
|
Python
|
imageslider.py
|
albertoriva/actor
|
9125eec015c074a77987c621c5764e4f0eb4b72a
|
[
"MIT"
] | 1
|
2019-01-15T15:21:41.000Z
|
2019-01-15T15:21:41.000Z
|
imageslider.py
|
albertoriva/actor
|
9125eec015c074a77987c621c5764e4f0eb4b72a
|
[
"MIT"
] | 1
|
2018-09-29T14:48:25.000Z
|
2018-09-29T16:57:52.000Z
|
imageslider.py
|
albertoriva/actor
|
9125eec015c074a77987c621c5764e4f0eb4b72a
|
[
"MIT"
] | 1
|
2018-09-29T14:36:08.000Z
|
2018-09-29T14:36:08.000Z
|
###################################################
#
# (c) 2016, Alberto Riva, ariva@ufl.edu
# DiBiG, ICBR Bioinformatics, University of Florida
#
# See the LICENSE file for license information.
###################################################
# A class to generate a slider for multiple images.
# Two subclasses are provided: VerticalSlider and HorizontalSlider
#
# Code taken from: http://www.jssor.com/
class ImageSlider():
images = []
initialized = False
width = 500
height = 500
labelwidth = 200
def __init__(self, images, width=500, height=500, labelwidth=200):
self.images = images
self.width = width
self.height = height
self.labelwidth = labelwidth
self.initialized = False
def generate(self, stream):
"""Write the slider to `stream'."""
if not self.initialized:
self.initialize(stream)
self.preamble(stream)
for img in self.images:
self.slide(stream, img)
self.closing(stream)
def slide(self, stream, img):
name = img[0]
url = img[1]
stream.write(""" <div style="display: none;">
<div style="position: absolute; top: 0px; left: 0px; width: 598px; height: 299px;">
<div style="margin: 10px; overflow: hidden; color: #000;"><img src='{}'></div>
</div>
<div data-u="thumb">{}</div>
</div>
""".format(url, name))
class HorizontalSlider(ImageSlider):
def initialize(self, stream):
stream.write("""<script>
(function(h,f,c,j,d,l,k){/*! Jssor */
new(function(){});var e={Ed:function(a){return-c.cos(a*c.PI)/2+.5},Fd:function(a){return a},Hd:function(a){return-a*(a-2)}};var b=new function(){var g=this,Bb=/\S+/g,G=1,db=2,hb=3,gb=4,lb=5,H,r=0,i=0,s=0,W=0,z=0,J=navigator,pb=J.appName,o=J.userAgent,p=parseFloat;function zb(){if(!H){H={Pe:"ontouchstart"in h||"createTouch"in f};var a;if(J.pointerEnabled||(a=J.msPointerEnabled))H.hd=a?"msTouchAction":"touchAction"}return H}function v(j){if(!r){r=-1;if(pb=="Microsoft Internet Explorer"&&!!h.attachEvent&&!!h.ActiveXObject){var e=o.indexOf("MSIE");r=G;s=p(o.substring(e+5,o.indexOf(";",e)));/*@cc_on W=@_jscript_version@*/;i=f.documentMode||s}else if(pb=="Netscape"&&!!h.addEventListener){var d=o.indexOf("Firefox"),b=o.indexOf("Safari"),g=o.indexOf("Chrome"),c=o.indexOf("AppleWebKit");if(d>=0){r=db;i=p(o.substring(d+8))}else if(b>=0){var k=o.substring(0,b).lastIndexOf("/");r=g>=0?gb:hb;i=p(o.substring(k+1,b))}else{var a=/Trident\/.*rv:([0-9]{1,}[\.0-9]{0,})/i.exec(o);if(a){r=G;i=s=p(a[1])}}if(c>=0)z=p(o.substring(c+12))}else{var a=/(opera)(?:.*version|)[ \/]([\w.]+)/i.exec(o);if(a){r=lb;i=p(a[2])}}}return j==r}function q(){return v(G)}function R(){return q()&&(i<6||f.compatMode=="BackCompat")}function fb(){return v(hb)}function kb(){return v(lb)}function wb(){return fb()&&z>534&&z<535}function K(){v();return z>537||i>42||r==G&&i>=11}function P(){return q()&&i<9}function xb(a){var b,c;return function(f){if(!b){b=d;var e=a.substr(0,1).toUpperCase()+a.substr(1);n([a].concat(["WebKit","ms","Moz","O","webkit"]),function(g,d){var b=a;if(d)b=g+e;if(f.style[b]!=k)return c=b})}return c}}function vb(b){var a;return function(c){a=a||xb(b)(c)||b;return a}}var L=vb("transform");function ob(a){return{}.toString.call(a)}var F;function Hb(){if(!F){F={};n(["Boolean","Number","String","Function","Array","Date","RegExp","Object"],function(a){F["[object "+a+"]"]=a.toLowerCase()})}return F}function n(b,d){var a,c;if(ob(b)=="[object Array]"){for(a=0;a<b.length;a++)if(c=d(b[a],a,b))return c}else for(a in b)if(c=d(b[a],a,b))return c}function C(a){return a==j?String(a):Hb()[ob(a)]||"object"}function mb(a){for(var b in a)return d}function A(a){try{return C(a)=="object"&&!a.nodeType&&a!=a.window&&(!a.constructor||{}.hasOwnProperty.call(a.constructor.prototype,"isPrototypeOf"))}catch(b){}}function u(a,b){return{x:a,y:b}}function sb(b,a){setTimeout(b,a||0)}function I(b,d,c){var a=!b||b=="inherit"?"":b;n(d,function(c){var b=c.exec(a);if(b){var d=a.substr(0,b.index),e=a.substr(b.index+b[0].length+1,a.length-1);a=d+e}});a=c+(!a.indexOf(" ")?"":" ")+a;return a}function ub(b,a){if(i<9)b.style.filter=a}g.Le=zb;g.gd=q;g.Ge=fb;g.ee=K;g.xc=P;xb("transform");g.hc=function(){return i};g.jc=sb;function Z(a){a.constructor===Z.caller&&a.lc&&a.lc.apply(a,Z.caller.arguments)}g.lc=Z;g.ab=function(a){if(g.Oe(a))a=f.getElementById(a);return a};function t(a){return a||h.event}g.nc=t;g.Rb=function(b){b=t(b);var a=b.target||b.srcElement||f;if(a.nodeType==3)a=g.pc(a);return a};g.rc=function(a){a=t(a);return{x:a.pageX||a.clientX||0,y:a.pageY||a.clientY||0}};function D(c,d,a){if(a!==k)c.style[d]=a==k?"":a;else{var b=c.currentStyle||c.style;a=b[d];if(a==""&&h.getComputedStyle){b=c.ownerDocument.defaultView.getComputedStyle(c,j);b&&(a=b.getPropertyValue(d)||b[d])}return a}}function bb(b,c,a,d){if(a!==k){if(a==j)a="";else d&&(a+="px");D(b,c,a)}else return p(D(b,c))}function m(c,a){var d=a?bb:D,b;if(a&4)b=vb(c);return function(e,f){return d(e,b?b(e):c,f,a&2)}}function Eb(b){if(q()&&s<9){var a=/opacity=([^)]*)/.exec(b.style.filter||"");return a?p(a[1])/100:1}else return p(b.style.opacity||"1")}function Gb(b,a,f){if(q()&&s<9){var h=b.style.filter||"",i=new RegExp(/[\s]*alpha\([^\)]*\)/g),e=c.round(100*a),d="";if(e<100||f)d="alpha(opacity="+e+") ";var g=I(h,[i],d);ub(b,g)}else b.style.opacity=a==1?"":c.round(a*100)/100}var M={U:["rotate"],F:["rotateX"],G:["rotateY"],xb:["skewX"],wb:["skewY"]};if(!K())M=B(M,{n:["scaleX",2],m:["scaleY",2],K:["translateZ",1]});function N(d,a){var c="";if(a){if(q()&&i&&i<10){delete a.F;delete a.G;delete a.K}b.e(a,function(d,b){var a=M[b];if(a){var e=a[1]||0;if(O[b]!=d)c+=" "+a[0]+"("+d+(["deg","px",""])[e]+")"}});if(K()){if(a.L||a.W||a.K)c+=" translate3d("+(a.L||0)+"px,"+(a.W||0)+"px,"+(a.K||0)+"px)";if(a.n==k)a.n=1;if(a.m==k)a.m=1;if(a.n!=1||a.m!=1)c+=" scale3d("+a.n+", "+a.m+", 1)"}}d.style[L(d)]=c}g.Ic=m("transformOrigin",4);g.Be=m("backfaceVisibility",4);g.Fe=m("transformStyle",4);g.je=m("perspective",6);g.Ce=m("perspectiveOrigin",4);g.Ae=function(a,b){if(q()&&s<9||s<10&&R())a.style.zoom=b==1?"":b;else{var c=L(a),f="scale("+b+")",e=a.style[c],g=new RegExp(/[\s]*scale\(.*?\)/g),d=I(e,[g],f);a.style[c]=d}};g.Kb=function(b,a){return function(c){c=t(c);var e=c.type,d=c.relatedTarget||(e=="mouseout"?c.toElement:c.fromElement);(!d||d!==a&&!g.we(a,d))&&b(c)}};g.a=function(a,d,b,c){a=g.ab(a);if(a.addEventListener){d=="mousewheel"&&a.addEventListener("DOMMouseScroll",b,c);a.addEventListener(d,b,c)}else if(a.attachEvent){a.attachEvent("on"+d,b);c&&a.setCapture&&a.setCapture()}};g.z=function(a,c,d,b){a=g.ab(a);if(a.removeEventListener){c=="mousewheel"&&a.removeEventListener("DOMMouseScroll",d,b);a.removeEventListener(c,d,b)}else if(a.detachEvent){a.detachEvent("on"+c,d);b&&a.releaseCapture&&a.releaseCapture()}};g.Ab=function(a){a=t(a);a.preventDefault&&a.preventDefault();a.cancel=d;a.returnValue=l};g.pe=function(a){a=t(a);a.stopPropagation&&a.stopPropagation();a.cancelBubble=d};g.O=function(d,c){var a=[].slice.call(arguments,2),b=function(){var b=a.concat([].slice.call(arguments,0));return c.apply(d,b)};return b};g.Bb=function(d,c){for(var b=[],a=d.firstChild;a;a=a.nextSibling)(c||a.nodeType==1)&&b.push(a);return b};function nb(a,c,e,b){b=b||"u";for(a=a?a.firstChild:j;a;a=a.nextSibling)if(a.nodeType==1){if(V(a,b)==c)return a;if(!e){var d=nb(a,c,e,b);if(d)return d}}}g.o=nb;function T(a,d,f,b){b=b||"u";var c=[];for(a=a?a.firstChild:j;a;a=a.nextSibling)if(a.nodeType==1){V(a,b)==d&&c.push(a);if(!f){var e=T(a,d,f,b);if(e.length)c=c.concat(e)}}return c}function ib(a,c,d){for(a=a?a.firstChild:j;a;a=a.nextSibling)if(a.nodeType==1){if(a.tagName==c)return a;if(!d){var b=ib(a,c,d);if(b)return b}}}g.Te=ib;g.Me=function(b,a){return b.getElementsByTagName(a)};function B(){var e=arguments,d,c,b,a,g=1&e[0],f=1+g;d=e[f-1]||{};for(;f<e.length;f++)if(c=e[f])for(b in c){a=c[b];if(a!==k){a=c[b];var h=d[b];d[b]=g&&(A(h)||A(a))?B(g,{},h,a):a}}return d}g.T=B;function ab(f,g){var d={},c,a,b;for(c in f){a=f[c];b=g[c];if(a!==b){var e;if(A(a)&&A(b)){a=ab(a,b);e=!mb(a)}!e&&(d[c]=a)}}return d}g.Bc=function(a){return C(a)=="function"};g.Oe=function(a){return C(a)=="string"};g.Ne=function(a){return!isNaN(p(a))&&isFinite(a)};g.e=n;function S(a){return f.createElement(a)}g.P=function(){return S("DIV")};g.gc=function(){};function X(b,c,a){if(a==k)return b.getAttribute(c);b.setAttribute(c,a)}function V(a,b){return X(a,b)||X(a,"data-"+b)}g.bb=X;g.g=V;function x(b,a){if(a==k)return b.className;b.className=a}g.Fc=x;function rb(b){var a={};n(b,function(b){a[b]=b});return a}function tb(b,a){return b.match(a||Bb)}function Q(b,a){return rb(tb(b||"",a))}g.Xd=tb;function cb(b,c){var a="";n(c,function(c){a&&(a+=b);a+=c});return a}function E(a,c,b){x(a,cb(" ",B(ab(Q(x(a)),Q(c)),Q(b))))}g.pc=function(a){return a.parentNode};g.J=function(a){g.I(a,"none")};g.hb=function(a,b){g.I(a,b?"none":"")};g.wd=function(b,a){b.removeAttribute(a)};g.td=function(){return q()&&i<10};g.sd=function(d,a){if(a)d.style.clip="rect("+c.round(a.j)+"px "+c.round(a.p)+"px "+c.round(a.q)+"px "+c.round(a.f)+"px)";else{var g=d.style.cssText,f=[new RegExp(/[\s]*clip: rect\(.*?\)[;]?/i),new RegExp(/[\s]*cliptop: .*?[;]?/i),new RegExp(/[\s]*clipright: .*?[;]?/i),new RegExp(/[\s]*clipbottom: .*?[;]?/i),new RegExp(/[\s]*clipleft: .*?[;]?/i)],e=I(g,f,"");b.rb(d,e)}};g.E=function(){return+new Date};g.A=function(b,a){b.appendChild(a)};g.Gb=function(b,a,c){(c||a.parentNode).insertBefore(b,a)};g.Hb=function(b,a){a=a||b.parentNode;a&&a.removeChild(b)};g.kd=function(a,b){n(a,function(a){g.Hb(a,b)})};g.oc=function(a){g.kd(g.Bb(a,d),a)};g.nd=function(a,b){var c=g.pc(a);b&1&&g.r(a,(g.k(c)-g.k(a))/2);b&2&&g.u(a,(g.l(c)-g.l(a))/2)};g.ld=function(b,a){return parseInt(b,a||10)};g.jd=p;g.we=function(b,a){var c=f.body;while(a&&b!==a&&c!==a)try{a=a.parentNode}catch(d){return l}return b===a};function Y(d,c,b){var a=d.cloneNode(!c);!b&&g.wd(a,"id");return a}g.cb=Y;g.db=function(e,f){var a=new Image;function b(e,d){g.z(a,"load",b);g.z(a,"abort",c);g.z(a,"error",c);f&&f(a,d)}function c(a){b(a,d)}if(kb()&&i<11.6||!e)b(!e);else{g.a(a,"load",b);g.a(a,"abort",c);g.a(a,"error",c);a.src=e}};g.md=function(d,a,e){var c=d.length+1;function b(b){c--;if(a&&b&&b.src==a.src)a=b;!c&&e&&e(a)}n(d,function(a){g.db(a.src,b)});b()};g.yd=function(a,g,i,h){if(h)a=Y(a);var c=T(a,g);if(!c.length)c=b.Me(a,g);for(var f=c.length-1;f>-1;f--){var d=c[f],e=Y(i);x(e,x(d));b.rb(e,d.style.cssText);b.Gb(e,d);b.Hb(d)}return a};function Ib(a){var l=this,p="",r=["av","pv","ds","dn"],e=[],q,j=0,h=0,d=0;function i(){E(a,q,e[d||j||h&2||h]);b.C(a,"pointer-events",d?"none":"")}function c(){j=0;i();g.z(f,"mouseup",c);g.z(f,"touchend",c);g.z(f,"touchcancel",c)}function o(a){if(d)g.Ab(a);else{j=4;i();g.a(f,"mouseup",c);g.a(f,"touchend",c);g.a(f,"touchcancel",c)}}l.zd=function(a){if(a===k)return h;h=a&2||a&1;i()};l.Hc=function(a){if(a===k)return!d;d=a?0:3;i()};l.N=a=g.ab(a);var m=b.Xd(x(a));if(m)p=m.shift();n(r,function(a){e.push(p+a)});q=cb(" ",e);e.unshift("");g.a(a,"mousedown",o);g.a(a,"touchstart",o)}g.Pd=function(a){return new Ib(a)};g.C=D;g.fb=m("overflow");g.u=m("top",2);g.r=m("left",2);g.k=m("width",2);g.l=m("height",2);g.ce=m("marginLeft",2);g.ge=m("marginTop",2);g.s=m("position");g.I=m("display");g.v=m("zIndex",1);g.ec=function(b,a,c){if(a!=k)Gb(b,a,c);else return Eb(b)};g.rb=function(a,b){if(b!=k)a.style.cssText=b;else return a.style.cssText};var U={nb:g.ec,j:g.u,f:g.r,Eb:g.k,Fb:g.l,kb:g.s,ef:g.I,lb:g.v};function w(f,l){var e=P(),b=K(),d=wb(),h=L(f);function i(b,d,a){var e=b.M(u(-d/2,-a/2)),f=b.M(u(d/2,-a/2)),g=b.M(u(d/2,a/2)),h=b.M(u(-d/2,a/2));b.M(u(300,300));return u(c.min(e.x,f.x,g.x,h.x)+d/2,c.min(e.y,f.y,g.y,h.y)+a/2)}function a(d,a){a=a||{};var f=a.K||0,l=(a.F||0)%360,m=(a.G||0)%360,o=(a.U||0)%360,p=a.df;if(e){f=0;l=0;m=0;p=0}var c=new Db(a.L,a.W,f);c.F(l);c.G(m);c.Wd(o);c.Vd(a.xb,a.wb);c.yc(a.n,a.m,p);if(b){c.ob(a.mb,a.pb);d.style[h]=c.Ud()}else if(!W||W<9){var j="";if(o||a.n!=k&&a.n!=1||a.m!=k&&a.m!=1){var n=i(c,a.Z,a.V);g.ge(d,n.y);g.ce(d,n.x);j=c.Td()}var r=d.style.filter,s=new RegExp(/[\s]*progid:DXImageTransform\.Microsoft\.Matrix\([^\)]*\)/g),q=I(r,[s],j);ub(d,q)}}w=function(e,c){c=c||{};var h=c.mb,i=c.pb,f;n(U,function(a,b){f=c[b];f!==k&&a(e,f)});g.sd(e,c.c);if(!b){h!=k&&g.r(e,c.Gc+h);i!=k&&g.u(e,c.Ac+i)}if(c.de)if(d)sb(g.O(j,N,e,c));else a(e,c)};g.sb=N;if(d)g.sb=w;if(e)g.sb=a;else if(!b)a=N;g.H=w;w(f,l)}g.sb=w;g.H=w;function Db(i,l,p){var d=this,b=[1,0,0,0,0,1,0,0,0,0,1,0,i||0,l||0,p||0,1],h=c.sin,g=c.cos,m=c.tan;function f(a){return a*c.PI/180}function o(a,b){return{x:a,y:b}}function n(c,e,l,m,o,r,t,u,w,z,A,C,E,b,f,k,a,g,i,n,p,q,s,v,x,y,B,D,F,d,h,j){return[c*a+e*p+l*x+m*F,c*g+e*q+l*y+m*d,c*i+e*s+l*B+m*h,c*n+e*v+l*D+m*j,o*a+r*p+t*x+u*F,o*g+r*q+t*y+u*d,o*i+r*s+t*B+u*h,o*n+r*v+t*D+u*j,w*a+z*p+A*x+C*F,w*g+z*q+A*y+C*d,w*i+z*s+A*B+C*h,w*n+z*v+A*D+C*j,E*a+b*p+f*x+k*F,E*g+b*q+f*y+k*d,E*i+b*s+f*B+k*h,E*n+b*v+f*D+k*j]}function e(c,a){return n.apply(j,(a||b).concat(c))}d.yc=function(a,c,d){if(a==k)a=1;if(c==k)c=1;if(d==k)d=1;if(a!=1||c!=1||d!=1)b=e([a,0,0,0,0,c,0,0,0,0,d,0,0,0,0,1])};d.ob=function(a,c,d){b[12]+=a||0;b[13]+=c||0;b[14]+=d||0};d.F=function(c){if(c){a=f(c);var d=g(a),i=h(a);b=e([1,0,0,0,0,d,i,0,0,-i,d,0,0,0,0,1])}};d.G=function(c){if(c){a=f(c);var d=g(a),i=h(a);b=e([d,0,-i,0,0,1,0,0,i,0,d,0,0,0,0,1])}};d.Wd=function(c){if(c){a=f(c);var d=g(a),i=h(a);b=e([d,i,0,0,-i,d,0,0,0,0,1,0,0,0,0,1])}};d.Vd=function(a,c){if(a||c){i=f(a);l=f(c);b=e([1,m(l),0,0,m(i),1,0,0,0,0,1,0,0,0,0,1])}};d.M=function(c){var a=e(b,[1,0,0,0,0,1,0,0,0,0,1,0,c.x,c.y,0,1]);return o(a[12],a[13])};d.Ud=function(){return"matrix3d("+b.join(",")+")"};d.Td=function(){return"progid:DXImageTransform.Microsoft.Matrix(M11="+b[0]+", M12="+b[4]+", M21="+b[1]+", M22="+b[5]+", SizingMethod='auto expand')"}}new function(){var a=this;function b(d,g){for(var j=d[0].length,i=d.length,h=g[0].length,f=[],c=0;c<i;c++)for(var k=f[c]=[],b=0;b<h;b++){for(var e=0,a=0;a<j;a++)e+=d[c][a]*g[a][b];k[b]=e}return f}a.n=function(b,c){return a.Dc(b,c,0)};a.m=function(b,c){return a.Dc(b,0,c)};a.Dc=function(a,c,d){return b(a,[[c,0],[0,d]])};a.M=function(d,c){var a=b(d,[[c.x],[c.y]]);return u(a[0][0],a[1][0])}};var O={Gc:0,Ac:0,mb:0,pb:0,S:1,n:1,m:1,U:0,F:0,G:0,L:0,W:0,K:0,xb:0,wb:0};g.Od=function(a){var c=a||{};if(a)if(b.Bc(a))c={cc:c};else if(b.Bc(a.c))c.c={cc:a.c};return c};g.Nd=function(l,m,w,n,y,z,o){var a=m;if(l){a={};for(var g in m){var A=z[g]||1,v=y[g]||[0,1],f=(w-v[0])/v[1];f=c.min(c.max(f,0),1);f=f*A;var u=c.floor(f);if(f!=u)f-=u;var h=n.cc||e.Ed,i,B=l[g],q=m[g];if(b.Ne(q)){h=n[g]||h;var x=h(f);i=B+q*x}else{i=b.T({tb:{}},l[g]);b.e(q.tb||q,function(d,a){if(n.c)h=n.c[a]||n.c.cc||h;var c=h(f),b=d*c;i.tb[a]=b;i[a]+=b})}a[g]=i}var t=b.e(m,function(b,a){return O[a]!=k});t&&b.e(O,function(c,b){if(a[b]==k&&l[b]!==k)a[b]=l[b]});if(t){if(a.S)a.n=a.m=a.S;a.Z=o.Z;a.V=o.V;a.de=d}}if(m.c&&o.ob){var p=a.c.tb,s=(p.j||0)+(p.q||0),r=(p.f||0)+(p.p||0);a.f=(a.f||0)+r;a.j=(a.j||0)+s;a.c.f-=r;a.c.p-=r;a.c.j-=s;a.c.q-=s}if(a.c&&b.td()&&!a.c.j&&!a.c.f&&a.c.p==o.Z&&a.c.q==o.V)a.c=j;return a}};function n(){var a=this,d=[];function i(a,b){d.push({dc:a,fc:b})}function g(a,c){b.e(d,function(b,e){b.dc==a&&b.fc===c&&d.splice(e,1)})}a.qb=a.addEventListener=i;a.removeEventListener=g;a.i=function(a){var c=[].slice.call(arguments,1);b.e(d,function(b){b.dc==a&&b.fc.apply(h,c)})}}var m=function(z,C,i,J,M,L){z=z||0;var a=this,q,n,o,u,A=0,G,H,F,B,y=0,g=0,m=0,D,k,f,e,p,w=[],x;function O(a){f+=a;e+=a;k+=a;g+=a;m+=a;y+=a}function t(o){var h=o;if(p&&(h>=e||h<=f))h=((h-f)%p+p)%p+f;if(!D||u||g!=h){var j=c.min(h,e);j=c.max(j,f);if(!D||u||j!=m){if(L){var l=(j-k)/(C||1);if(i.Id)l=1-l;var n=b.Nd(M,L,l,G,F,H,i);if(x)b.e(n,function(b,a){x[a]&&x[a](J,b)});else b.H(J,n)}a.Vb(m-k,j-k);m=j;b.e(w,function(b,c){var a=o<g?w[w.length-c-1]:b;a.B(m-y)});var r=g,q=m;g=h;D=d;a.ub(r,q)}}}function E(a,b,d){b&&a.Ob(e);if(!d){f=c.min(f,a.Cc()+y);e=c.max(e,a.Qb()+y)}w.push(a)}var r=h.requestAnimationFrame||h.webkitRequestAnimationFrame||h.mozRequestAnimationFrame||h.msRequestAnimationFrame;if(b.Ge()&&b.hc()<7)r=j;r=r||function(a){b.jc(a,i.zc)};function I(){if(q){var d=b.E(),e=c.min(d-A,i.vc),a=g+e*o;A=d;if(a*o>=n*o)a=n;t(a);if(!u&&a*o>=n*o)K(B);else r(I)}}function s(h,i,j){if(!q){q=d;u=j;B=i;h=c.max(h,f);h=c.min(h,e);n=h;o=n<g?-1:1;a.uc();A=b.E();r(I)}}function K(b){if(q){u=q=B=l;a.tc();b&&b()}}a.mc=function(a,b,c){s(a?g+a:e,b,c)};a.ic=s;a.Q=K;a.Cd=function(a){s(a)};a.D=function(){return g};a.sc=function(){return n};a.ib=function(){return m};a.B=t;a.ob=function(a){t(g+a)};a.Ec=function(){return q};a.Jd=function(a){p=a};a.Ob=O;a.kc=function(a,b){E(a,0,b)};a.Wb=function(a){E(a,1)};a.Cc=function(){return f};a.Qb=function(){return e};a.ub=a.uc=a.tc=a.Vb=b.gc;a.bc=b.E();i=b.T({zc:16,vc:50},i);p=i.wc;x=i.Yd;f=k=z;e=z+C;H=i.Zd||{};F=i.be||{};G=b.Od(i.gb)};new(function(){});var i=function(p,dc){var g=this;function Ac(){var a=this;m.call(a,-1e8,2e8);a.Ke=function(){var b=a.ib(),d=c.floor(b),f=t(d),e=b-c.floor(b);return{R:f,He:d,kb:e}};a.ub=function(b,a){var e=c.floor(a);if(e!=a&&a>b)e++;Sb(e,d);g.i(i.Ee,t(a),t(b),a,b)}}function zc(){var a=this;m.call(a,0,0,{wc:q});b.e(C,function(b){D&1&&b.Jd(q);a.Wb(b);b.Ob(ib/Zb)})}function yc(){var a=this,b=Tb.N;m.call(a,-1,2,{gb:e.Fd,Yd:{kb:Yb},wc:q},b,{kb:1},{kb:-2});a.zb=b}function mc(o,n){var b=this,e,f,h,k,c;m.call(b,-1e8,2e8,{vc:100});b.uc=function(){M=d;S=j;g.i(i.ie,t(w.D()),w.D())};b.tc=function(){M=l;k=l;var a=w.Ke();g.i(i.ke,t(w.D()),w.D());!a.kb&&Cc(a.He,s)};b.ub=function(i,g){var b;if(k)b=c;else{b=f;if(h){var d=g/h;b=a.le(d)*(f-e)+e}}w.B(b)};b.yb=function(a,d,c,g){e=a;f=d;h=c;w.B(a);b.B(0);b.ic(c,g)};b.ne=function(a){k=d;c=a;b.mc(a,j,d)};b.se=function(a){c=a};w=new Ac;w.kc(o);w.kc(n)}function oc(){var c=this,a=Wb();b.v(a,0);b.C(a,"pointerEvents","none");c.N=a;c.Cb=function(){b.J(a);b.oc(a)}}function wc(o,f){var e=this,r,L,v,k,y=[],x,B,W,G,Q,F,h,w,p;m.call(e,-u,u+1,{});function E(a){r&&r.Qc();T(o,a,0);F=d;r=new I.X(o,I,b.jd(b.g(o,"idle"))||kc);r.B(0)}function Z(){r.bc<I.bc&&E()}function M(p,r,o){if(!G){G=d;if(k&&o){var h=o.width,c=o.height,n=h,m=c;if(h&&c&&a.eb){if(a.eb&3&&(!(a.eb&4)||h>K||c>J)){var j=l,q=K/J*c/h;if(a.eb&1)j=q>1;else if(a.eb&2)j=q<1;n=j?h*J/c:K;m=j?J:c*K/h}b.k(k,n);b.l(k,m);b.u(k,(J-m)/2);b.r(k,(K-n)/2)}b.s(k,"absolute");g.i(i.De,f)}}b.J(r);p&&p(e)}function Y(b,c,d,g){if(g==S&&s==f&&N)if(!Bc){var a=t(b);A.Ie(a,f,c,e,d);c.te();U.Ob(a-U.Cc()-1);U.B(a);z.yb(b,b,0)}}function cb(b){if(b==S&&s==f){if(!h){var a=j;if(A)if(A.R==f)a=A.Qe();else A.Cb();Z();h=new uc(o,f,a,r);h.Nc(p)}!h.Ec()&&h.Tb()}}function R(d,g,l){if(d==f){if(d!=g)C[g]&&C[g].ve();else!l&&h&&h.ye();p&&p.Hc();var m=S=b.E();e.db(b.O(j,cb,m))}else{var k=c.min(f,d),i=c.max(f,d),o=c.min(i-k,k+q-i),n=u+a.ze-1;(!Q||o<=n)&&e.db()}}function db(){if(s==f&&h){h.Q();p&&p.xe();p&&p.ue();h.id()}}function eb(){s==f&&h&&h.Q()}function ab(a){!P&&g.i(i.qe,f,a)}function O(){p=w.pInstance;h&&h.Nc(p)}e.db=function(c,a){a=a||v;if(y.length&&!G){b.hb(a);if(!W){W=d;g.i(i.oe,f);b.e(y,function(a){if(!b.bb(a,"src")){a.src=b.g(a,"src2");b.I(a,a["display-origin"])}})}b.md(y,k,b.O(j,M,c,a))}else M(c,a)};e.me=function(){var h=f;if(a.Zc<0)h-=q;var d=h+a.Zc*sc;if(D&2)d=t(d);if(!(D&1))d=c.max(0,c.min(d,q-u));if(d!=f){if(A){var e=A.he(q);if(e){var i=S=b.E(),g=C[t(d)];return g.db(b.O(j,Y,d,g,e,i),v)}}bb(d)}};e.Ub=function(){R(f,f,d)};e.ve=function(){p&&p.xe();p&&p.ue();e.Wc();h&&h.re();h=j;E()};e.te=function(){b.J(o)};e.Wc=function(){b.hb(o)};e.Je=function(){p&&p.Hc()};function T(a,c,e){if(b.bb(a,"jssor-slider"))return;if(!F){if(a.tagName=="IMG"){y.push(a);if(!b.bb(a,"src")){Q=d;a["display-origin"]=b.I(a);b.J(a)}}b.xc()&&b.v(a,(b.v(a)||0)+1)}var f=b.Bb(a);b.e(f,function(f){var h=f.tagName,i=b.g(f,"u");if(i=="player"&&!w){w=f;if(w.pInstance)O();else b.a(w,"dataavailable",O)}if(i=="caption"){if(c){b.Ic(f,b.g(f,"to"));b.Be(f,b.g(f,"bf"));b.g(f,"3d")&&b.Fe(f,"preserve-3d")}else if(!b.gd()){var g=b.cb(f,l,d);b.Gb(g,f,a);b.Hb(f,a);f=g;c=d}}else if(!F&&!e&&!k){if(h=="A"){if(b.g(f,"u")=="image")k=b.Te(f,"IMG");else k=b.o(f,"image",d);if(k){x=f;b.I(x,"block");b.H(x,V);B=b.cb(x,d);b.s(x,"relative");b.ec(B,0);b.C(B,"backgroundColor","#000")}}else if(h=="IMG"&&b.g(f,"u")=="image")k=f;if(k){k.border=0;b.H(k,V)}}T(f,c,e+1)})}e.Vb=function(c,b){var a=u-b;Yb(L,a)};e.R=f;n.call(e);b.je(o,b.g(o,"p"));b.Ce(o,b.g(o,"po"));var H=b.o(o,"thumb",d);if(H){e.Re=b.cb(H);b.J(H)}b.hb(o);v=b.cb(fb);b.v(v,1e3);b.a(o,"click",ab);E(d);e.Rc=k;e.ad=B;e.zb=L=o;b.A(L,v);g.qb(203,R);g.qb(28,eb);g.qb(24,db)}function uc(y,f,p,q){var a=this,n=0,u=0,h,j,e,c,k,t,r,o=C[f];m.call(a,0,0);function v(){b.oc(L);ac&&k&&o.ad&&b.A(L,o.ad);b.hb(L,!k&&o.Rc)}function w(){a.Tb()}function x(b){r=b;a.Q();a.Tb()}a.Tb=function(){var b=a.ib();if(!B&&!M&&!r&&s==f){if(!b){if(h&&!k){k=d;a.id(d);g.i(i.Se,f,n,u,h,c)}v()}var l,p=i.Jc;if(b!=c)if(b==e)l=c;else if(b==j)l=e;else if(!b)l=j;else l=a.sc();g.i(p,f,b,n,j,e,c);var m=N&&(!E||F);if(b==c)(e!=c&&!(E&12)||m)&&o.me();else(m||b!=e)&&a.ic(l,w)}};a.ye=function(){e==c&&e==a.ib()&&a.B(j)};a.re=function(){A&&A.R==f&&A.Cb();var b=a.ib();b<c&&g.i(i.Jc,f,-b-1,n,j,e,c)};a.id=function(a){p&&b.fb(jb,a&&p.qc.bf?"":"hidden")};a.Vb=function(b,a){if(k&&a>=h){k=l;v();o.Wc();A.Cb();g.i(i.xd,f,n,u,h,c)}g.i(i.ud,f,a,n,j,e,c)};a.Nc=function(a){if(a&&!t){t=a;a.qb($JssorPlayer$.Bd,x)}};p&&a.Wb(p);h=a.Qb();a.Wb(q);j=h+q.Xc;e=h+q.Vc;c=a.Qb()}function Jb(a,c,d){b.r(a,c);b.u(a,d)}function Yb(c,b){var a=x>0?x:eb,d=zb*b*(a&1),e=Ab*b*(a>>1&1);Jb(c,d,e)}function Ob(){pb=M;Ib=z.sc();G=w.D()}function fc(){Ob();if(B||!F&&E&12){z.Q();g.i(i.rd)}}function cc(f){if(!B&&(F||!(E&12))&&!z.Ec()){var d=w.D(),b=c.ceil(G);if(f&&c.abs(H)>=a.Uc){b=c.ceil(d);b+=hb}if(!(D&1))b=c.min(q-u,c.max(b,0));var e=c.abs(b-d);e=1-c.pow(1-e,5);if(!P&&pb)z.Cd(Ib);else if(d==b){sb.Je();sb.Ub()}else z.yb(d,b,e*Ub)}}function Hb(a){!b.g(b.Rb(a),"nodrag")&&b.Ab(a)}function qc(a){Xb(a,1)}function Xb(a,c){a=b.nc(a);var k=b.Rb(a);if(!O&&!b.g(k,"nodrag")&&rc()&&(!c||a.touches.length==1)){B=d;yb=l;S=j;b.a(f,c?"touchmove":"mousemove",Bb);b.E();P=0;fc();if(!pb)x=0;if(c){var h=a.touches[0];ub=h.clientX;vb=h.clientY}else{var e=b.rc(a);ub=e.x;vb=e.y}H=0;gb=0;hb=0;g.i(i.qd,t(G),G,a)}}function Bb(e){if(B){e=b.nc(e);var f;if(e.type!="mousemove"){var l=e.touches[0];f={x:l.clientX,y:l.clientY}}else f=b.rc(e);if(f){var j=f.x-ub,k=f.y-vb;if(c.floor(G)!=G)x=x||eb&O;if((j||k)&&!x){if(O==3)if(c.abs(k)>c.abs(j))x=2;else x=1;else x=O;if(mb&&x==1&&c.abs(k)-c.abs(j)>3)yb=d}if(x){var a=k,i=Ab;if(x==1){a=j;i=zb}if(!(D&1)){if(a>0){var g=i*s,h=a-g;if(h>0)a=g+c.sqrt(h)*5}if(a<0){var g=i*(q-u-s),h=-a-g;if(h>0)a=-g-c.sqrt(h)*5}}if(H-gb<-2)hb=0;else if(H-gb>2)hb=-1;gb=H;H=a;rb=G-H/i/(Y||1);if(H&&x&&!yb){b.Ab(e);if(!M)z.ne(rb);else z.se(rb)}}}}}function ab(){pc();if(B){B=l;b.E();b.z(f,"mousemove",Bb);b.z(f,"touchmove",Bb);P=H;z.Q();var a=w.D();g.i(i.pd,t(a),a,t(G),G);E&12&&Ob();cc(d)}}function hc(c){if(P){b.pe(c);var a=b.Rb(c);while(a&&v!==a){a.tagName=="A"&&b.Ab(c);try{a=a.parentNode}catch(d){break}}}}function jc(a){C[s];s=t(a);sb=C[s];Sb(a);return s}function Cc(a,b){x=0;jc(a);g.i(i.fe,t(a),b)}function Sb(a,c){wb=a;b.e(T,function(b){b.Ib(t(a),a,c)})}function rc(){var b=i.Oc||0,a=X;if(mb)a&1&&(a&=1);i.Oc|=a;return O=a&~b}function pc(){if(O){i.Oc&=~X;O=0}}function Wb(){var a=b.P();b.H(a,V);b.s(a,"absolute");return a}function t(a){return(a%q+q)%q}function ic(b,d){if(d)if(!D){b=c.min(c.max(b+wb,0),q-u);d=l}else if(D&2){b=t(b+wb);d=l}bb(b,a.Lb,d)}function xb(){b.e(T,function(a){a.Xb(a.Db.af<=F)})}function Ec(){if(!F){F=1;xb();if(!B){E&12&&cc();E&3&&C[s].Ub()}}}function Dc(){if(F){F=0;xb();B||!(E&12)||fc()}}function gc(){V={Eb:K,Fb:J,j:0,f:0};b.e(Q,function(a){b.H(a,V);b.s(a,"absolute");b.fb(a,"hidden");b.J(a)});b.H(fb,V)}function ob(b,a){bb(b,a,d)}function bb(g,f,j){if(Qb&&(!B&&(F||!(E&12))||a.Mc)){M=d;B=l;z.Q();if(f==k)f=Ub;var e=Cb.ib(),b=g;if(j){b=e+g;if(g>0)b=c.ceil(b);else b=c.floor(b)}if(D&2)b=t(b);if(!(D&1))b=c.max(0,c.min(b,q-u));var i=(b-e)%q;b=e+i;var h=e==b?0:f*c.abs(i);h=c.min(h,f*u*1.5);z.yb(e,b,h||1)}}g.od=bb;g.mc=function(){if(!N){N=d;C[s]&&C[s].Ub()}};g.ae=function(){return P};function W(){return b.k(y||p)}function lb(){return b.l(y||p)}g.Z=W;g.V=lb;function Eb(c,d){if(c==k)return b.k(p);if(!y){var a=b.P(f);b.Fc(a,b.Fc(p));b.rb(a,b.rb(p));b.I(a,"block");b.s(a,"relative");b.u(a,0);b.r(a,0);b.fb(a,"visible");y=b.P(f);b.s(y,"absolute");b.u(y,0);b.r(y,0);b.k(y,b.k(p));b.l(y,b.l(p));b.Ic(y,"0 0");b.A(y,a);var h=b.Bb(p);b.A(p,y);b.C(p,"backgroundImage","");b.e(h,function(c){b.A(b.g(c,"noscale")?p:a,c);b.g(c,"autocenter")&&Kb.push(c)})}Y=c/(d?b.l:b.k)(y);b.Ae(y,Y);var g=d?Y*W():c,e=d?c:Y*lb();b.k(p,g);b.l(p,e);b.e(Kb,function(a){var c=b.ld(b.g(a,"autocenter"));b.nd(a,c)})}g.Ad=Eb;g.Tc=function(a){var d=c.ceil(t(ib/Zb)),b=t(a-s+d);if(b>u){if(a-s>q/2)a-=q;else if(a-s<=-q/2)a+=q}else a=s+b-d;return a};n.call(g);g.N=p=b.ab(p);var a=b.T({eb:0,ze:1,Mb:1,Jb:0,ac:l,Sc:1,jb:d,Mc:d,Zc:1,Yc:3e3,Kc:1,Lb:500,le:e.Hd,Uc:20,Lc:0,Y:1,Yb:0,Sd:1,Zb:1,Pc:1},dc);a.jb=a.jb&&b.ee();if(a.Rd!=k)a.Yc=a.Rd;if(a.Qd!=k)a.Yb=a.Qd;var eb=a.Zb&3,sc=(a.Zb&4)/-4||1,kb=a.cf,I=b.T({X:r,jb:a.jb},a.Ve);I.Sb=I.Sb||I.We;var Fb=a.Xe,Z=a.Ye,db=a.Md,R=!a.Sd,y,v=b.o(p,"slides",R),fb=b.o(p,"loading",R)||b.P(f),Mb=b.o(p,"navigator",R),ec=b.o(p,"arrowleft",R),bc=b.o(p,"arrowright",R),Lb=b.o(p,"thumbnavigator",R),nc=b.k(v),lc=b.l(v),V,Q=[],tc=b.Bb(v);b.e(tc,function(a){if(a.tagName=="DIV"&&!b.g(a,"u"))Q.push(a);else b.xc()&&b.v(a,(b.v(a)||0)+1)});var s=-1,wb,sb,q=Q.length,K=a.Ld||nc,J=a.Kd||lc,Vb=a.Lc,zb=K+Vb,Ab=J+Vb,Zb=eb&1?zb:Ab,u=c.min(a.Y,q),jb,x,O,yb,T=[],Pb,Rb,Nb,ac,Bc,N,E=a.Kc,kc=a.Yc,Ub=a.Lb,qb,tb,ib,Qb=u<q,D=Qb?a.Sc:0,X,P,F=1,M,B,S,ub=0,vb=0,H,gb,hb,Cb,w,U,z,Tb=new oc,Y,Kb=[];if(a.jb)Jb=function(a,c,d){b.sb(a,{L:c,W:d})};N=a.ac;g.Db=dc;gc();b.bb(p,"jssor-slider",d);b.v(v,b.v(v)||0);b.s(v,"absolute");jb=b.cb(v,d);b.Gb(jb,v);if(kb){ac=kb.Ze;qb=kb.X;tb=u==1&&q>1&&qb&&(!b.gd()||b.hc()>=8)}ib=tb||u>=q||!(D&1)?0:a.Yb;X=(u>1||ib?eb:-1)&a.Pc;var Gb=v,C=[],A,L,Db=b.Le(),mb=Db.Pe,G,pb,Ib,rb;Db.hd&&b.C(Gb,Db.hd,([j,"pan-y","pan-x","none"])[X]||"");U=new yc;if(tb)A=new qb(Tb,K,J,kb,mb);b.A(jb,U.zb);b.fb(v,"hidden");L=Wb();b.C(L,"backgroundColor","#000");b.ec(L,0);b.Gb(L,Gb.firstChild,Gb);for(var cb=0;cb<Q.length;cb++){var vc=Q[cb],xc=new wc(vc,cb);C.push(xc)}b.J(fb);Cb=new zc;z=new mc(Cb,U);if(X){b.a(v,"mousedown",Xb);b.a(v,"touchstart",qc);b.a(v,"dragstart",Hb);b.a(v,"selectstart",Hb);b.a(f,"mouseup",ab);b.a(f,"touchend",ab);b.a(f,"touchcancel",ab);b.a(h,"blur",ab)}E&=mb?10:5;if(Mb&&Fb){Pb=new Fb.X(Mb,Fb,W(),lb());T.push(Pb)}if(Z&&ec&&bc){Z.Sc=D;Z.Y=u;Rb=new Z.X(ec,bc,Z,W(),lb());T.push(Rb)}if(Lb&&db){db.Jb=a.Jb;Nb=new db.X(Lb,db);T.push(Nb)}b.e(T,function(a){a.Pb(q,C,fb);a.qb(o.fd,ic)});b.C(p,"visibility","visible");Eb(W());b.a(v,"click",hc);b.a(p,"mouseout",b.Kb(Ec,p));b.a(p,"mouseover",b.Kb(Dc,p));xb();a.Mb&&b.a(f,"keydown",function(b){if(b.keyCode==37)ob(-a.Mb);else b.keyCode==39&&ob(a.Mb)});var nb=a.Jb;if(!(D&1))nb=c.max(0,c.min(nb,q-u));z.yb(nb,nb,0)};i.qe=21;i.qd=22;i.pd=23;i.ie=24;i.ke=25;i.oe=26;i.De=27;i.rd=28;i.Ee=202;i.fe=203;i.Se=206;i.xd=207;i.ud=208;i.Jc=209;var o={fd:1};var q=function(g,B){var h=this,z,p,a,v=[],x,w,e,q,r,u,t,m,s,f,k;n.call(h);g=b.ab(g);function A(n,f){var g=this,c,m,l;function q(){m.zd(p==f)}function i(d){if(d||!s.ae()){var a=e-f%e,b=s.Tc((f+a)/e-1),c=b*e+e-a;h.i(o.fd,c)}}g.R=f;g.bd=q;l=n.Re||n.Rc||b.P();g.zb=c=b.yd(k,"thumbnailtemplate",l,d);m=b.Pd(c);a.Nb&1&&b.a(c,"click",b.O(j,i,0));a.Nb&2&&b.a(c,"mouseover",b.Kb(b.O(j,i,1),c))}h.Ib=function(b,d,f){var a=p;p=b;a!=-1&&v[a].bd();v[b].bd();!f&&s.od(s.Tc(c.floor(d/e)))};h.Xb=function(a){b.hb(g,a)};var y;h.Pb=function(D,C){if(!y){z=D;c.ceil(z/e);p=-1;m=c.min(m,C.length);var h=a.vb&1,n=u+(u+q)*(e-1)*(1-h),k=t+(t+r)*(e-1)*h,B=n+(n+q)*(m-1)*h,o=k+(k+r)*(m-1)*(1-h);b.s(f,"absolute");b.fb(f,"hidden");a.cd&1&&b.r(f,(x-B)/2);a.cd&2&&b.u(f,(w-o)/2);b.k(f,B);b.l(f,o);var j=[];b.e(C,function(l,g){var i=new A(l,g),d=i.zb,a=c.floor(g/e),k=g%e;b.r(d,(u+q)*k*(1-h));b.u(d,(t+r)*k*h);if(!j[a]){j[a]=b.P();b.A(f,j[a])}b.A(j[a],d);v.push(i)});var E=b.T({ac:l,Mc:l,Ld:n,Kd:k,Lc:q*h+r*(1-h),Uc:12,Lb:200,Kc:1,Zb:a.vb,Pc:a.Dd||a.Ue?0:a.vb},a);s=new i(g,E);y=d}};h.Db=a=b.T({ed:0,dd:0,Y:1,vb:1,cd:3,Nb:1},B);x=b.k(g);w=b.l(g);f=b.o(g,"slides",d);k=b.o(f,"prototype");u=b.k(k);t=b.l(k);b.Hb(k,f);e=a.vd||1;q=a.ed;r=a.dd;m=a.Y;a.yc==l&&b.bb(g,"noscale",d)};function r(e,d,c){var a=this;m.call(a,0,c);a.Qc=b.gc;a.Xc=0;a.Vc=c}jssor_1_slider_init=function(){var f={ac:l,Md:{X:q,Y:5,Yb:200,Dd:d}},e=new i("jssor_1",f);function a(){var b=e.N.parentNode.clientWidth;if(b){b=c.min(b,600);e.Ad(b)}else h.setTimeout(a,30)}a();b.a(h,"load",a);b.a(h,"resize",a);b.a(h,"orientationchange",a)}})(window,document,Math,null,true,false)
</script>
""")
stream.write("""<style>
.jssort12 .w{cursor:pointer;position:absolute;WIDTH:""" + str(self.labelwidth) + """px;HEIGHT:28px;border:1px solid gray;top:0;left:-1px}.jssort12 .p{position:absolute;width:100px;height:30px;top:0;left:0;padding:0}.jssort12 .pav .w,.jssort12 .pdn .w{border-bottom:1px solid #fff}.jssort12 .c{width:100%;height:100%;position:absolute;top:0;left:0;line-height:28px;text-align:center;color:#000;font-size:13px}.jssort12 .p .c,.jssort12 .pav:hover .c{background-color:#eee}.jssort12 .pav .c,.jssort12 .p:hover .c{background-color:#fff}
</style>
""")
self.initialized = True
def preamble(self, stream):
stream.write("""<div id="jssor_1" style="position: relative; margin: 0 auto; top: 0px; left: 0px; width: 600px; height: 530px; overflow: hidden; visibility: hidden; background-color: #ffffff;">
<!-- Loading Screen -->
<div data-u="loading" style="position: absolute; top: 0px; left: 0px;">
<div style="filter: alpha(opacity=70); opacity: 0.7; position: absolute; display: block; top: 0px; left: 0px; width: 100%; height: 100%;"></div>
<div style="position:absolute;display:block;background:url('img/loading.gif') no-repeat center center;top:0px;left:0px;width:100%;height:100%;"></div>
</div>
<div data-u="slides" style="cursor: default; position: relative; top: 29px; left: 0px; width: 598px; height: 499px; overflow: hidden; border: 1px solid #adadad">
""")
def closing(self, stream):
stream.write("""</div>
<!-- Thumbnail Navigator -->
<div data-u="thumbnavigator" class="jssort12" style="position:absolute;left:0px;top:0px;width:500px;height:30px;">
<!-- Thumbnail Item Skin Begin -->
<div data-u="slides" style="cursor: default; top: 0px; left: 0px; border-left: 1px solid gray;">
<div data-u="prototype" class="p">
<div class="w">
<div data-u="thumbnailtemplate" class="c"></div>
</div>
</div>
</div>
<!-- Thumbnail Item Skin End -->
</div>
<a href="http://www.jssor.com" style="display:none">Bootstrap Carousel</a>
</div>
<script>
jssor_1_slider_init();
</script>
""")
class VerticalSlider(ImageSlider):
def initialize(self, stream):
stream.write("""<script>
(function(h,f,c,j,d,l,k){/*! Jssor */
new(function(){});var e={Fd:function(a){return-c.cos(a*c.PI)/2+.5},Hd:function(a){return a},Id:function(a){return-a*(a-2)}};var b=new function(){var g=this,Bb=/\S+/g,G=1,db=2,hb=3,gb=4,lb=5,H,r=0,i=0,s=0,W=0,z=0,J=navigator,pb=J.appName,o=J.userAgent,p=parseFloat;function zb(){if(!H){H={Fe:"ontouchstart"in h||"createTouch"in f};var a;if(J.pointerEnabled||(a=J.msPointerEnabled))H.id=a?"msTouchAction":"touchAction"}return H}function v(j){if(!r){r=-1;if(pb=="Microsoft Internet Explorer"&&!!h.attachEvent&&!!h.ActiveXObject){var e=o.indexOf("MSIE");r=G;s=p(o.substring(e+5,o.indexOf(";",e)));/*@cc_on W=@_jscript_version@*/;i=f.documentMode||s}else if(pb=="Netscape"&&!!h.addEventListener){var d=o.indexOf("Firefox"),b=o.indexOf("Safari"),g=o.indexOf("Chrome"),c=o.indexOf("AppleWebKit");if(d>=0){r=db;i=p(o.substring(d+8))}else if(b>=0){var k=o.substring(0,b).lastIndexOf("/");r=g>=0?gb:hb;i=p(o.substring(k+1,b))}else{var a=/Trident\/.*rv:([0-9]{1,}[\.0-9]{0,})/i.exec(o);if(a){r=G;i=s=p(a[1])}}if(c>=0)z=p(o.substring(c+12))}else{var a=/(opera)(?:.*version|)[ \/]([\w.]+)/i.exec(o);if(a){r=lb;i=p(a[2])}}}return j==r}function q(){return v(G)}function R(){return q()&&(i<6||f.compatMode=="BackCompat")}function fb(){return v(hb)}function kb(){return v(lb)}function wb(){return fb()&&z>534&&z<535}function K(){v();return z>537||i>42||r==G&&i>=11}function P(){return q()&&i<9}function xb(a){var b,c;return function(f){if(!b){b=d;var e=a.substr(0,1).toUpperCase()+a.substr(1);n([a].concat(["WebKit","ms","Moz","O","webkit"]),function(g,d){var b=a;if(d)b=g+e;if(f.style[b]!=k)return c=b})}return c}}function vb(b){var a;return function(c){a=a||xb(b)(c)||b;return a}}var L=vb("transform");function ob(a){return{}.toString.call(a)}var F;function Hb(){if(!F){F={};n(["Boolean","Number","String","Function","Array","Date","RegExp","Object"],function(a){F["[object "+a+"]"]=a.toLowerCase()})}return F}function n(b,d){var a,c;if(ob(b)=="[object Array]"){for(a=0;a<b.length;a++)if(c=d(b[a],a,b))return c}else for(a in b)if(c=d(b[a],a,b))return c}function C(a){return a==j?String(a):Hb()[ob(a)]||"object"}function mb(a){for(var b in a)return d}function A(a){try{return C(a)=="object"&&!a.nodeType&&a!=a.window&&(!a.constructor||{}.hasOwnProperty.call(a.constructor.prototype,"isPrototypeOf"))}catch(b){}}function u(a,b){return{x:a,y:b}}function sb(b,a){setTimeout(b,a||0)}function I(b,d,c){var a=!b||b=="inherit"?"":b;n(d,function(c){var b=c.exec(a);if(b){var d=a.substr(0,b.index),e=a.substr(b.index+b[0].length+1,a.length-1);a=d+e}});a=c+(!a.indexOf(" ")?"":" ")+a;return a}function ub(b,a){if(i<9)b.style.filter=a}g.Oe=zb;g.Zc=q;g.Le=fb;g.zd=K;g.Ic=P;xb("transform");g.qc=function(){return i};g.nc=sb;function Z(a){a.constructor===Z.caller&&a.jc&&a.jc.apply(a,Z.caller.arguments)}g.jc=Z;g.cb=function(a){if(g.Re(a))a=f.getElementById(a);return a};function t(a){return a||h.event}g.hc=t;g.Qb=function(b){b=t(b);var a=b.target||b.srcElement||f;if(a.nodeType==3)a=g.kc(a);return a};g.rc=function(a){a=t(a);return{x:a.pageX||a.clientX||0,y:a.pageY||a.clientY||0}};function D(c,d,a){if(a!==k)c.style[d]=a==k?"":a;else{var b=c.currentStyle||c.style;a=b[d];if(a==""&&h.getComputedStyle){b=c.ownerDocument.defaultView.getComputedStyle(c,j);b&&(a=b.getPropertyValue(d)||b[d])}return a}}function bb(b,c,a,d){if(a!==k){if(a==j)a="";else d&&(a+="px");D(b,c,a)}else return p(D(b,c))}function m(c,a){var d=a?bb:D,b;if(a&4)b=vb(c);return function(e,f){return d(e,b?b(e):c,f,a&2)}}function Eb(b){if(q()&&s<9){var a=/opacity=([^)]*)/.exec(b.style.filter||"");return a?p(a[1])/100:1}else return p(b.style.opacity||"1")}function Gb(b,a,f){if(q()&&s<9){var h=b.style.filter||"",i=new RegExp(/[\s]*alpha\([^\)]*\)/g),e=c.round(100*a),d="";if(e<100||f)d="alpha(opacity="+e+") ";var g=I(h,[i],d);ub(b,g)}else b.style.opacity=a==1?"":c.round(a*100)/100}var M={Q:["rotate"],I:["rotateX"],K:["rotateY"],Bb:["skewX"],Ab:["skewY"]};if(!K())M=B(M,{m:["scaleX",2],n:["scaleY",2],J:["translateZ",1]});function N(d,a){var c="";if(a){if(q()&&i&&i<10){delete a.I;delete a.K;delete a.J}b.e(a,function(d,b){var a=M[b];if(a){var e=a[1]||0;if(O[b]!=d)c+=" "+a[0]+"("+d+(["deg","px",""])[e]+")"}});if(K()){if(a.T||a.V||a.J)c+=" translate3d("+(a.T||0)+"px,"+(a.V||0)+"px,"+(a.J||0)+"px)";if(a.m==k)a.m=1;if(a.n==k)a.n=1;if(a.m!=1||a.n!=1)c+=" scale3d("+a.m+", "+a.n+", 1)"}}d.style[L(d)]=c}g.Jc=m("transformOrigin",4);g.Ae=m("backfaceVisibility",4);g.Be=m("transformStyle",4);g.Qe=m("perspective",6);g.je=m("perspectiveOrigin",4);g.ze=function(a,b){if(q()&&s<9||s<10&&R())a.style.zoom=b==1?"":b;else{var c=L(a),f="scale("+b+")",e=a.style[c],g=new RegExp(/[\s]*scale\(.*?\)/g),d=I(e,[g],f);a.style[c]=d}};g.Jb=function(b,a){return function(c){c=t(c);var e=c.type,d=c.relatedTarget||(e=="mouseout"?c.toElement:c.fromElement);(!d||d!==a&&!g.ve(a,d))&&b(c)}};g.a=function(a,d,b,c){a=g.cb(a);if(a.addEventListener){d=="mousewheel"&&a.addEventListener("DOMMouseScroll",b,c);a.addEventListener(d,b,c)}else if(a.attachEvent){a.attachEvent("on"+d,b);c&&a.setCapture&&a.setCapture()}};g.A=function(a,c,d,b){a=g.cb(a);if(a.removeEventListener){c=="mousewheel"&&a.removeEventListener("DOMMouseScroll",d,b);a.removeEventListener(c,d,b)}else if(a.detachEvent){a.detachEvent("on"+c,d);b&&a.releaseCapture&&a.releaseCapture()}};g.vb=function(a){a=t(a);a.preventDefault&&a.preventDefault();a.cancel=d;a.returnValue=l};g.pe=function(a){a=t(a);a.stopPropagation&&a.stopPropagation();a.cancelBubble=d};g.W=function(d,c){var a=[].slice.call(arguments,2),b=function(){var b=a.concat([].slice.call(arguments,0));return c.apply(d,b)};return b};g.Gb=function(d,c){for(var b=[],a=d.firstChild;a;a=a.nextSibling)(c||a.nodeType==1)&&b.push(a);return b};function nb(a,c,e,b){b=b||"u";for(a=a?a.firstChild:j;a;a=a.nextSibling)if(a.nodeType==1){if(V(a,b)==c)return a;if(!e){var d=nb(a,c,e,b);if(d)return d}}}g.o=nb;function T(a,d,f,b){b=b||"u";var c=[];for(a=a?a.firstChild:j;a;a=a.nextSibling)if(a.nodeType==1){V(a,b)==d&&c.push(a);if(!f){var e=T(a,d,f,b);if(e.length)c=c.concat(e)}}return c}function ib(a,c,d){for(a=a?a.firstChild:j;a;a=a.nextSibling)if(a.nodeType==1){if(a.tagName==c)return a;if(!d){var b=ib(a,c,d);if(b)return b}}}g.Te=ib;g.Se=function(b,a){return b.getElementsByTagName(a)};function B(){var e=arguments,d,c,b,a,g=1&e[0],f=1+g;d=e[f-1]||{};for(;f<e.length;f++)if(c=e[f])for(b in c){a=c[b];if(a!==k){a=c[b];var h=d[b];d[b]=g&&(A(h)||A(a))?B(g,{},h,a):a}}return d}g.L=B;function ab(f,g){var d={},c,a,b;for(c in f){a=f[c];b=g[c];if(a!==b){var e;if(A(a)&&A(b)){a=ab(a,b);e=!mb(a)}!e&&(d[c]=a)}}return d}g.uc=function(a){return C(a)=="function"};g.Re=function(a){return C(a)=="string"};g.Me=function(a){return!isNaN(p(a))&&isFinite(a)};g.e=n;function S(a){return f.createElement(a)}g.Y=function(){return S("DIV")};g.Gc=function(){};function X(b,c,a){if(a==k)return b.getAttribute(c);b.setAttribute(c,a)}function V(a,b){return X(a,b)||X(a,"data-"+b)}g.bb=X;g.j=V;function x(b,a){if(a==k)return b.className;b.className=a}g.Dc=x;function rb(b){var a={};n(b,function(b){a[b]=b});return a}function tb(b,a){return b.match(a||Bb)}function Q(b,a){return rb(tb(b||"",a))}g.ee=tb;function cb(b,c){var a="";n(c,function(c){a&&(a+=b);a+=c});return a}function E(a,c,b){x(a,cb(" ",B(ab(Q(x(a)),Q(c)),Q(b))))}g.kc=function(a){return a.parentNode};g.F=function(a){g.D(a,"none")};g.fb=function(a,b){g.D(a,b?"none":"")};g.yd=function(b,a){b.removeAttribute(a)};g.ud=function(){return q()&&i<10};g.td=function(d,a){if(a)d.style.clip="rect("+c.round(a.i)+"px "+c.round(a.u)+"px "+c.round(a.s)+"px "+c.round(a.f)+"px)";else{var g=d.style.cssText,f=[new RegExp(/[\s]*clip: rect\(.*?\)[;]?/i),new RegExp(/[\s]*cliptop: .*?[;]?/i),new RegExp(/[\s]*clipright: .*?[;]?/i),new RegExp(/[\s]*clipbottom: .*?[;]?/i),new RegExp(/[\s]*clipleft: .*?[;]?/i)],e=I(g,f,"");b.ub(d,e)}};g.E=function(){return+new Date};g.z=function(b,a){b.appendChild(a)};g.wb=function(b,a,c){(c||a.parentNode).insertBefore(b,a)};g.Ib=function(b,a){a=a||b.parentNode;a&&a.removeChild(b)};g.jd=function(a,b){n(a,function(a){g.Ib(a,b)})};g.Ec=function(a){g.jd(g.Gb(a,d),a)};g.kd=function(a,b){var c=g.kc(a);b&1&&g.v(a,(g.k(c)-g.k(a))/2);b&2&&g.r(a,(g.l(c)-g.l(a))/2)};g.md=function(b,a){return parseInt(b,a||10)};g.ld=p;g.ve=function(b,a){var c=f.body;while(a&&b!==a&&c!==a)try{a=a.parentNode}catch(d){return l}return b===a};function Y(d,c,b){var a=d.cloneNode(!c);!b&&g.yd(a,"id");return a}g.db=Y;g.eb=function(e,f){var a=new Image;function b(e,d){g.A(a,"load",b);g.A(a,"abort",c);g.A(a,"error",c);f&&f(a,d)}function c(a){b(a,d)}if(kb()&&i<11.6||!e)b(!e);else{g.a(a,"load",b);g.a(a,"abort",c);g.a(a,"error",c);a.src=e}};g.nd=function(d,a,e){var c=d.length+1;function b(b){c--;if(a&&b&&b.src==a.src)a=b;!c&&e&&e(a)}n(d,function(a){g.eb(a.src,b)});b()};g.qd=function(a,g,i,h){if(h)a=Y(a);var c=T(a,g);if(!c.length)c=b.Se(a,g);for(var f=c.length-1;f>-1;f--){var d=c[f],e=Y(i);x(e,x(d));b.ub(e,d.style.cssText);b.wb(e,d);b.Ib(d)}return a};function Ib(a){var l=this,p="",r=["av","pv","ds","dn"],e=[],q,j=0,h=0,d=0;function i(){E(a,q,e[d||j||h&2||h]);b.C(a,"pointer-events",d?"none":"")}function c(){j=0;i();g.A(f,"mouseup",c);g.A(f,"touchend",c);g.A(f,"touchcancel",c)}function o(a){if(d)g.vb(a);else{j=4;i();g.a(f,"mouseup",c);g.a(f,"touchend",c);g.a(f,"touchcancel",c)}}l.Ad=function(a){if(a===k)return h;h=a&2||a&1;i()};l.oc=function(a){if(a===k)return!d;d=a?0:3;i()};l.N=a=g.cb(a);var m=b.ee(x(a));if(m)p=m.shift();n(r,function(a){e.push(p+a)});q=cb(" ",e);e.unshift("");g.a(a,"mousedown",o);g.a(a,"touchstart",o)}g.de=function(a){return new Ib(a)};g.C=D;g.gb=m("overflow");g.r=m("top",2);g.v=m("left",2);g.k=m("width",2);g.l=m("height",2);g.ce=m("marginLeft",2);g.ge=m("marginTop",2);g.q=m("position");g.D=m("display");g.p=m("zIndex",1);g.Lb=function(b,a,c){if(a!=k)Gb(b,a,c);else return Eb(b)};g.ub=function(a,b){if(b!=k)a.style.cssText=b;else return a.style.cssText};var U={ib:g.Lb,i:g.r,f:g.v,xb:g.k,Hb:g.l,mb:g.q,ef:g.D,nb:g.p};function w(f,l){var e=P(),b=K(),d=wb(),h=L(f);function i(b,d,a){var e=b.M(u(-d/2,-a/2)),f=b.M(u(d/2,-a/2)),g=b.M(u(d/2,a/2)),h=b.M(u(-d/2,a/2));b.M(u(300,300));return u(c.min(e.x,f.x,g.x,h.x)+d/2,c.min(e.y,f.y,g.y,h.y)+a/2)}function a(d,a){a=a||{};var f=a.J||0,l=(a.I||0)%360,m=(a.K||0)%360,o=(a.Q||0)%360,p=a.df;if(e){f=0;l=0;m=0;p=0}var c=new Db(a.T,a.V,f);c.I(l);c.K(m);c.Xd(o);c.Wd(a.Bb,a.Ab);c.ic(a.m,a.n,p);if(b){c.ob(a.pb,a.kb);d.style[h]=c.Vd()}else if(!W||W<9){var j="";if(o||a.m!=k&&a.m!=1||a.n!=k&&a.n!=1){var n=i(c,a.S,a.R);g.ge(d,n.y);g.ce(d,n.x);j=c.Ud()}var r=d.style.filter,s=new RegExp(/[\s]*progid:DXImageTransform\.Microsoft\.Matrix\([^\)]*\)/g),q=I(r,[s],j);ub(d,q)}}w=function(e,c){c=c||{};var h=c.pb,i=c.kb,f;n(U,function(a,b){f=c[b];f!==k&&a(e,f)});g.td(e,c.c);if(!b){h!=k&&g.v(e,c.yc+h);i!=k&&g.r(e,c.zc+i)}if(c.Rd)if(d)sb(g.W(j,N,e,c));else a(e,c)};g.zb=N;if(d)g.zb=w;if(e)g.zb=a;else if(!b)a=N;g.B=w;w(f,l)}g.zb=w;g.B=w;function Db(i,l,p){var d=this,b=[1,0,0,0,0,1,0,0,0,0,1,0,i||0,l||0,p||0,1],h=c.sin,g=c.cos,m=c.tan;function f(a){return a*c.PI/180}function o(a,b){return{x:a,y:b}}function n(c,e,l,m,o,r,t,u,w,z,A,C,E,b,f,k,a,g,i,n,p,q,s,v,x,y,B,D,F,d,h,j){return[c*a+e*p+l*x+m*F,c*g+e*q+l*y+m*d,c*i+e*s+l*B+m*h,c*n+e*v+l*D+m*j,o*a+r*p+t*x+u*F,o*g+r*q+t*y+u*d,o*i+r*s+t*B+u*h,o*n+r*v+t*D+u*j,w*a+z*p+A*x+C*F,w*g+z*q+A*y+C*d,w*i+z*s+A*B+C*h,w*n+z*v+A*D+C*j,E*a+b*p+f*x+k*F,E*g+b*q+f*y+k*d,E*i+b*s+f*B+k*h,E*n+b*v+f*D+k*j]}function e(c,a){return n.apply(j,(a||b).concat(c))}d.ic=function(a,c,d){if(a==k)a=1;if(c==k)c=1;if(d==k)d=1;if(a!=1||c!=1||d!=1)b=e([a,0,0,0,0,c,0,0,0,0,d,0,0,0,0,1])};d.ob=function(a,c,d){b[12]+=a||0;b[13]+=c||0;b[14]+=d||0};d.I=function(c){if(c){a=f(c);var d=g(a),i=h(a);b=e([1,0,0,0,0,d,i,0,0,-i,d,0,0,0,0,1])}};d.K=function(c){if(c){a=f(c);var d=g(a),i=h(a);b=e([d,0,-i,0,0,1,0,0,i,0,d,0,0,0,0,1])}};d.Xd=function(c){if(c){a=f(c);var d=g(a),i=h(a);b=e([d,i,0,0,-i,d,0,0,0,0,1,0,0,0,0,1])}};d.Wd=function(a,c){if(a||c){i=f(a);l=f(c);b=e([1,m(l),0,0,m(i),1,0,0,0,0,1,0,0,0,0,1])}};d.M=function(c){var a=e(b,[1,0,0,0,0,1,0,0,0,0,1,0,c.x,c.y,0,1]);return o(a[12],a[13])};d.Vd=function(){return"matrix3d("+b.join(",")+")"};d.Ud=function(){return"progid:DXImageTransform.Microsoft.Matrix(M11="+b[0]+", M12="+b[4]+", M21="+b[1]+", M22="+b[5]+", SizingMethod='auto expand')"}}new function(){var a=this;function b(d,g){for(var j=d[0].length,i=d.length,h=g[0].length,f=[],c=0;c<i;c++)for(var k=f[c]=[],b=0;b<h;b++){for(var e=0,a=0;a<j;a++)e+=d[c][a]*g[a][b];k[b]=e}return f}a.m=function(b,c){return a.Cc(b,c,0)};a.n=function(b,c){return a.Cc(b,0,c)};a.Cc=function(a,c,d){return b(a,[[c,0],[0,d]])};a.M=function(d,c){var a=b(d,[[c.x],[c.y]]);return u(a[0][0],a[1][0])}};var O={yc:0,zc:0,pb:0,kb:0,X:1,m:1,n:1,Q:0,I:0,K:0,T:0,V:0,J:0,Bb:0,Ab:0};g.Pd=function(a){var c=a||{};if(a)if(b.uc(a))c={fc:c};else if(b.uc(a.c))c.c={fc:a.c};return c};g.Od=function(l,m,w,n,y,z,o){var a=m;if(l){a={};for(var g in m){var A=z[g]||1,v=y[g]||[0,1],f=(w-v[0])/v[1];f=c.min(c.max(f,0),1);f=f*A;var u=c.floor(f);if(f!=u)f-=u;var h=n.fc||e.Fd,i,B=l[g],q=m[g];if(b.Me(q)){h=n[g]||h;var x=h(f);i=B+q*x}else{i=b.L({Fb:{}},l[g]);b.e(q.Fb||q,function(d,a){if(n.c)h=n.c[a]||n.c.fc||h;var c=h(f),b=d*c;i.Fb[a]=b;i[a]+=b})}a[g]=i}var t=b.e(m,function(b,a){return O[a]!=k});t&&b.e(O,function(c,b){if(a[b]==k&&l[b]!==k)a[b]=l[b]});if(t){if(a.X)a.m=a.n=a.X;a.S=o.S;a.R=o.R;a.Rd=d}}if(m.c&&o.ob){var p=a.c.Fb,s=(p.i||0)+(p.s||0),r=(p.f||0)+(p.u||0);a.f=(a.f||0)+r;a.i=(a.i||0)+s;a.c.f-=r;a.c.u-=r;a.c.i-=s;a.c.s-=s}if(a.c&&b.ud()&&!a.c.i&&!a.c.f&&a.c.u==o.S&&a.c.s==o.R)a.c=j;return a}};function n(){var a=this,d=[];function i(a,b){d.push({gc:a,Yb:b})}function g(a,c){b.e(d,function(b,e){b.gc==a&&b.Yb===c&&d.splice(e,1)})}a.lb=a.addEventListener=i;a.removeEventListener=g;a.g=function(a){var c=[].slice.call(arguments,1);b.e(d,function(b){b.gc==a&&b.Yb.apply(h,c)})}}var m=function(z,C,i,J,M,L){z=z||0;var a=this,q,n,o,u,A=0,G,H,F,B,y=0,g=0,m=0,D,k,f,e,p,w=[],x;function O(a){f+=a;e+=a;k+=a;g+=a;m+=a;y+=a}function t(o){var h=o;if(p&&(h>=e||h<=f))h=((h-f)%p+p)%p+f;if(!D||u||g!=h){var j=c.min(h,e);j=c.max(j,f);if(!D||u||j!=m){if(L){var l=(j-k)/(C||1);if(i.Jd)l=1-l;var n=b.Od(M,L,l,G,F,H,i);if(x)b.e(n,function(b,a){x[a]&&x[a](J,b)});else b.B(J,n)}a.Sb(m-k,j-k);m=j;b.e(w,function(b,c){var a=o<g?w[w.length-c-1]:b;a.H(m-y)});var r=g,q=m;g=h;D=d;a.tb(r,q)}}}function E(a,b,d){b&&a.Ob(e);if(!d){f=c.min(f,a.xc()+y);e=c.max(e,a.Mb()+y)}w.push(a)}var r=h.requestAnimationFrame||h.webkitRequestAnimationFrame||h.mozRequestAnimationFrame||h.msRequestAnimationFrame;if(b.Le()&&b.qc()<7)r=j;r=r||function(a){b.nc(a,i.wc)};function I(){if(q){var d=b.E(),e=c.min(d-A,i.vc),a=g+e*o;A=d;if(a*o>=n*o)a=n;t(a);if(!u&&a*o>=n*o)K(B);else r(I)}}function s(h,i,j){if(!q){q=d;u=j;B=i;h=c.max(h,f);h=c.min(h,e);n=h;o=n<g?-1:1;a.Hc();A=b.E();r(I)}}function K(b){if(q){u=q=B=l;a.tc();b&&b()}}a.sc=function(a,b,c){s(a?g+a:e,b,c)};a.pc=s;a.O=K;a.Dd=function(a){s(a)};a.G=function(){return g};a.Ac=function(){return n};a.jb=function(){return m};a.H=t;a.ob=function(a){t(g+a)};a.Bc=function(){return q};a.Kd=function(a){p=a};a.Ob=O;a.Fc=function(a,b){E(a,0,b)};a.Tb=function(a){E(a,1)};a.xc=function(){return f};a.Mb=function(){return e};a.tb=a.Hc=a.tc=a.Sb=b.Gc;a.dc=b.E();i=b.L({wc:16,vc:50},i);p=i.mc;x=i.Yd;f=k=z;e=z+C;H=i.Zd||{};F=i.be||{};G=b.Pd(i.hb)};new(function(){});var i=function(p,dc){var g=this;function Ac(){var a=this;m.call(a,-1e8,2e8);a.Ne=function(){var b=a.jb(),d=c.floor(b),f=t(d),e=b-c.floor(b);return{P:f,De:d,mb:e}};a.tb=function(b,a){var e=c.floor(a);if(e!=a&&a>b)e++;Sb(e,d);g.g(i.ie,t(a),t(b),a,b)}}function zc(){var a=this;m.call(a,0,0,{mc:q});b.e(C,function(b){D&1&&b.Kd(q);a.Tb(b);b.Ob(ib/Zb)})}function yc(){var a=this,b=Tb.N;m.call(a,-1,2,{hb:e.Hd,Yd:{mb:Yb},mc:q},b,{mb:1},{mb:-2});a.Eb=b}function mc(o,n){var b=this,e,f,h,k,c;m.call(b,-1e8,2e8,{vc:100});b.Hc=function(){M=d;S=j;g.g(i.Ee,t(w.G()),w.G())};b.tc=function(){M=l;k=l;var a=w.Ne();g.g(i.ke,t(w.G()),w.G());!a.mb&&Cc(a.De,s)};b.tb=function(i,g){var b;if(k)b=c;else{b=f;if(h){var d=g/h;b=a.me(d)*(f-e)+e}}w.H(b)};b.Db=function(a,d,c,g){e=a;f=d;h=c;w.H(a);b.H(0);b.pc(c,g)};b.oe=function(a){k=d;c=a;b.sc(a,j,d)};b.qe=function(a){c=a};w=new Ac;w.Fc(o);w.Fc(n)}function oc(){var c=this,a=Wb();b.p(a,0);b.C(a,"pointerEvents","none");c.N=a;c.Cb=function(){b.F(a);b.Ec(a)}}function wc(o,f){var e=this,r,L,v,k,y=[],x,B,W,G,Q,F,h,w,p;m.call(e,-u,u+1,{});function E(a){r&&r.Qc();T(o,a,0);F=d;r=new I.Z(o,I,b.ld(b.j(o,"idle"))||kc);r.H(0)}function Z(){r.dc<I.dc&&E()}function M(p,r,o){if(!G){G=d;if(k&&o){var h=o.width,c=o.height,n=h,m=c;if(h&&c&&a.ab){if(a.ab&3&&(!(a.ab&4)||h>K||c>J)){var j=l,q=K/J*c/h;if(a.ab&1)j=q>1;else if(a.ab&2)j=q<1;n=j?h*J/c:K;m=j?J:c*K/h}b.k(k,n);b.l(k,m);b.r(k,(J-m)/2);b.v(k,(K-n)/2)}b.q(k,"absolute");g.g(i.se,f)}}b.F(r);p&&p(e)}function Y(b,c,d,g){if(g==S&&s==f&&N)if(!Bc){var a=t(b);A.Ke(a,f,c,e,d);c.ue();U.Ob(a-U.xc()-1);U.H(a);z.Db(b,b,0)}}function cb(b){if(b==S&&s==f){if(!h){var a=j;if(A)if(A.P==f)a=A.Ie();else A.Cb();Z();h=new uc(o,f,a,r);h.Nc(p)}!h.Bc()&&h.Nb()}}function R(d,g,l){if(d==f){if(d!=g)C[g]&&C[g].we();else!l&&h&&h.ye();p&&p.oc();var m=S=b.E();e.eb(b.W(j,cb,m))}else{var k=c.min(f,d),i=c.max(f,d),o=c.min(i-k,k+q-i),n=u+a.xe-1;(!Q||o<=n)&&e.eb()}}function db(){if(s==f&&h){h.O();p&&p.te();p&&p.re();h.hd()}}function eb(){s==f&&h&&h.O()}function ab(a){!P&&g.g(i.ne,f,a)}function O(){p=w.pInstance;h&&h.Nc(p)}e.eb=function(c,a){a=a||v;if(y.length&&!G){b.fb(a);if(!W){W=d;g.g(i.le,f);b.e(y,function(a){if(!b.bb(a,"src")){a.src=b.j(a,"src2");b.D(a,a["display-origin"])}})}b.nd(y,k,b.W(j,M,c,a))}else M(c,a)};e.he=function(){var h=f;if(a.gd<0)h-=q;var d=h+a.gd*sc;if(D&2)d=t(d);if(!(D&1))d=c.max(0,c.min(d,q-u));if(d!=f){if(A){var e=A.Ce(q);if(e){var i=S=b.E(),g=C[t(d)];return g.eb(b.W(j,Y,d,g,e,i),v)}}bb(d)}};e.Ub=function(){R(f,f,d)};e.we=function(){p&&p.te();p&&p.re();e.Mc();h&&h.Pe();h=j;E()};e.ue=function(){b.F(o)};e.Mc=function(){b.fb(o)};e.Je=function(){p&&p.oc()};function T(a,c,e){if(b.bb(a,"jssor-slider"))return;if(!F){if(a.tagName=="IMG"){y.push(a);if(!b.bb(a,"src")){Q=d;a["display-origin"]=b.D(a);b.F(a)}}b.Ic()&&b.p(a,(b.p(a)||0)+1)}var f=b.Gb(a);b.e(f,function(f){var h=f.tagName,i=b.j(f,"u");if(i=="player"&&!w){w=f;if(w.pInstance)O();else b.a(w,"dataavailable",O)}if(i=="caption"){if(c){b.Jc(f,b.j(f,"to"));b.Ae(f,b.j(f,"bf"));b.j(f,"3d")&&b.Be(f,"preserve-3d")}else if(!b.Zc()){var g=b.db(f,l,d);b.wb(g,f,a);b.Ib(f,a);f=g;c=d}}else if(!F&&!e&&!k){if(h=="A"){if(b.j(f,"u")=="image")k=b.Te(f,"IMG");else k=b.o(f,"image",d);if(k){x=f;b.D(x,"block");b.B(x,V);B=b.db(x,d);b.q(x,"relative");b.Lb(B,0);b.C(B,"backgroundColor","#000")}}else if(h=="IMG"&&b.j(f,"u")=="image")k=f;if(k){k.border=0;b.B(k,V)}}T(f,c,e+1)})}e.Sb=function(c,b){var a=u-b;Yb(L,a)};e.P=f;n.call(e);b.Qe(o,b.j(o,"p"));b.je(o,b.j(o,"po"));var H=b.o(o,"thumb",d);if(H){e.He=b.db(H);b.F(H)}b.fb(o);v=b.db(fb);b.p(v,1e3);b.a(o,"click",ab);E(d);e.Kc=k;e.Wc=B;e.Eb=L=o;b.z(L,v);g.lb(203,R);g.lb(28,eb);g.lb(24,db)}function uc(y,f,p,q){var a=this,n=0,u=0,h,j,e,c,k,t,r,o=C[f];m.call(a,0,0);function v(){b.Ec(L);ac&&k&&o.Wc&&b.z(L,o.Wc);b.fb(L,!k&&o.Kc)}function w(){a.Nb()}function x(b){r=b;a.O();a.Nb()}a.Nb=function(){var b=a.jb();if(!B&&!M&&!r&&s==f){if(!b){if(h&&!k){k=d;a.hd(d);g.g(i.Ge,f,n,u,h,c)}v()}var l,p=i.ad;if(b!=c)if(b==e)l=c;else if(b==j)l=e;else if(!b)l=j;else l=a.Ac();g.g(p,f,b,n,j,e,c);var m=N&&(!E||F);if(b==c)(e!=c&&!(E&12)||m)&&o.he();else(m||b!=e)&&a.pc(l,w)}};a.ye=function(){e==c&&e==a.jb()&&a.H(j)};a.Pe=function(){A&&A.P==f&&A.Cb();var b=a.jb();b<c&&g.g(i.ad,f,-b-1,n,j,e,c)};a.hd=function(a){p&&b.gb(jb,a&&p.lc.bf?"":"hidden")};a.Sb=function(b,a){if(k&&a>=h){k=l;v();o.Mc();A.Cb();g.g(i.wd,f,n,u,h,c)}g.g(i.vd,f,a,n,j,e,c)};a.Nc=function(a){if(a&&!t){t=a;a.lb($JssorPlayer$.Cd,x)}};p&&a.Tb(p);h=a.Mb();a.Tb(q);j=h+q.Vc;e=h+q.Uc;c=a.Mb()}function Jb(a,c,d){b.v(a,c);b.r(a,d)}function Yb(c,b){var a=x>0?x:eb,d=zb*b*(a&1),e=Ab*b*(a>>1&1);Jb(c,d,e)}function Ob(){pb=M;Ib=z.Ac();G=w.G()}function fc(){Ob();if(B||!F&&E&12){z.O();g.g(i.sd)}}function cc(f){if(!B&&(F||!(E&12))&&!z.Bc()){var d=w.G(),b=c.ceil(G);if(f&&c.abs(H)>=a.Tc){b=c.ceil(d);b+=hb}if(!(D&1))b=c.min(q-u,c.max(b,0));var e=c.abs(b-d);e=1-c.pow(1-e,5);if(!P&&pb)z.Dd(Ib);else if(d==b){sb.Je();sb.Ub()}else z.Db(d,b,e*Ub)}}function Hb(a){!b.j(b.Qb(a),"nodrag")&&b.vb(a)}function qc(a){Xb(a,1)}function Xb(a,c){a=b.hc(a);var k=b.Qb(a);if(!O&&!b.j(k,"nodrag")&&rc()&&(!c||a.touches.length==1)){B=d;yb=l;S=j;b.a(f,c?"touchmove":"mousemove",Bb);b.E();P=0;fc();if(!pb)x=0;if(c){var h=a.touches[0];ub=h.clientX;vb=h.clientY}else{var e=b.rc(a);ub=e.x;vb=e.y}H=0;gb=0;hb=0;g.g(i.rd,t(G),G,a)}}function Bb(e){if(B){e=b.hc(e);var f;if(e.type!="mousemove"){var l=e.touches[0];f={x:l.clientX,y:l.clientY}}else f=b.rc(e);if(f){var j=f.x-ub,k=f.y-vb;if(c.floor(G)!=G)x=x||eb&O;if((j||k)&&!x){if(O==3)if(c.abs(k)>c.abs(j))x=2;else x=1;else x=O;if(mb&&x==1&&c.abs(k)-c.abs(j)>3)yb=d}if(x){var a=k,i=Ab;if(x==1){a=j;i=zb}if(!(D&1)){if(a>0){var g=i*s,h=a-g;if(h>0)a=g+c.sqrt(h)*5}if(a<0){var g=i*(q-u-s),h=-a-g;if(h>0)a=-g-c.sqrt(h)*5}}if(H-gb<-2)hb=0;else if(H-gb>2)hb=-1;gb=H;H=a;rb=G-H/i/(Y||1);if(H&&x&&!yb){b.vb(e);if(!M)z.oe(rb);else z.qe(rb)}}}}}function ab(){pc();if(B){B=l;b.E();b.A(f,"mousemove",Bb);b.A(f,"touchmove",Bb);P=H;z.O();var a=w.G();g.g(i.pd,t(a),a,t(G),G);E&12&&Ob();cc(d)}}function hc(c){if(P){b.pe(c);var a=b.Qb(c);while(a&&v!==a){a.tagName=="A"&&b.vb(c);try{a=a.parentNode}catch(d){break}}}}function jc(a){C[s];s=t(a);sb=C[s];Sb(a);return s}function Cc(a,b){x=0;jc(a);g.g(i.fe,t(a),b)}function Sb(a,c){wb=a;b.e(T,function(b){b.ac(t(a),a,c)})}function rc(){var b=i.Pc||0,a=X;if(mb)a&1&&(a&=1);i.Pc|=a;return O=a&~b}function pc(){if(O){i.Pc&=~X;O=0}}function Wb(){var a=b.Y();b.B(a,V);b.q(a,"absolute");return a}function t(a){return(a%q+q)%q}function ic(b,d){if(d)if(!D){b=c.min(c.max(b+wb,0),q-u);d=l}else if(D&2){b=t(b+wb);d=l}bb(b,a.Zb,d)}function xb(){b.e(T,function(a){a.Vb(a.yb.af<=F)})}function Ec(){if(!F){F=1;xb();if(!B){E&12&&cc();E&3&&C[s].Ub()}}}function Dc(){if(F){F=0;xb();B||!(E&12)||fc()}}function gc(){V={xb:K,Hb:J,i:0,f:0};b.e(Q,function(a){b.B(a,V);b.q(a,"absolute");b.gb(a,"hidden");b.F(a)});b.B(fb,V)}function ob(b,a){bb(b,a,d)}function bb(g,f,j){if(Qb&&(!B&&(F||!(E&12))||a.Yc)){M=d;B=l;z.O();if(f==k)f=Ub;var e=Cb.jb(),b=g;if(j){b=e+g;if(g>0)b=c.ceil(b);else b=c.floor(b)}if(D&2)b=t(b);if(!(D&1))b=c.max(0,c.min(b,q-u));var i=(b-e)%q;b=e+i;var h=e==b?0:f*c.abs(i);h=c.min(h,f*u*1.5);z.Db(e,b,h||1)}}g.od=bb;g.sc=function(){if(!N){N=d;C[s]&&C[s].Ub()}};g.ae=function(){return P};function W(){return b.k(y||p)}function lb(){return b.l(y||p)}g.S=W;g.R=lb;function Eb(c,d){if(c==k)return b.k(p);if(!y){var a=b.Y(f);b.Dc(a,b.Dc(p));b.ub(a,b.ub(p));b.D(a,"block");b.q(a,"relative");b.r(a,0);b.v(a,0);b.gb(a,"visible");y=b.Y(f);b.q(y,"absolute");b.r(y,0);b.v(y,0);b.k(y,b.k(p));b.l(y,b.l(p));b.Jc(y,"0 0");b.z(y,a);var h=b.Gb(p);b.z(p,y);b.C(p,"backgroundImage","");b.e(h,function(c){b.z(b.j(c,"noscale")?p:a,c);b.j(c,"autocenter")&&Kb.push(c)})}Y=c/(d?b.l:b.k)(y);b.ze(y,Y);var g=d?Y*W():c,e=d?c:Y*lb();b.k(p,g);b.l(p,e);b.e(Kb,function(a){var c=b.md(b.j(a,"autocenter"));b.kd(a,c)})}g.Bd=Eb;g.Sc=function(a){var d=c.ceil(t(ib/Zb)),b=t(a-s+d);if(b>u){if(a-s>q/2)a-=q;else if(a-s<=-q/2)a+=q}else a=s+b-d;return a};n.call(g);g.N=p=b.cb(p);var a=b.L({ab:0,xe:1,cc:1,Wb:0,Xb:l,Rc:1,qb:d,Yc:d,gd:1,Xc:3e3,Lc:1,Zb:500,me:e.Id,Tc:20,Oc:0,U:1,bc:0,Td:1,sb:1,ec:1},dc);a.qb=a.qb&&b.zd();if(a.Sd!=k)a.Xc=a.Sd;if(a.Qd!=k)a.bc=a.Qd;var eb=a.sb&3,sc=(a.sb&4)/-4||1,kb=a.cf,I=b.L({Z:r,qb:a.qb},a.Ve);I.Rb=I.Rb||I.Ze;var Fb=a.We,Z=a.Ye,db=a.Nd,R=!a.Td,y,v=b.o(p,"slides",R),fb=b.o(p,"loading",R)||b.Y(f),Mb=b.o(p,"navigator",R),ec=b.o(p,"arrowleft",R),bc=b.o(p,"arrowright",R),Lb=b.o(p,"thumbnavigator",R),nc=b.k(v),lc=b.l(v),V,Q=[],tc=b.Gb(v);b.e(tc,function(a){if(a.tagName=="DIV"&&!b.j(a,"u"))Q.push(a);else b.Ic()&&b.p(a,(b.p(a)||0)+1)});var s=-1,wb,sb,q=Q.length,K=a.Md||nc,J=a.Ld||lc,Vb=a.Oc,zb=K+Vb,Ab=J+Vb,Zb=eb&1?zb:Ab,u=c.min(a.U,q),jb,x,O,yb,T=[],Pb,Rb,Nb,ac,Bc,N,E=a.Lc,kc=a.Xc,Ub=a.Zb,qb,tb,ib,Qb=u<q,D=Qb?a.Rc:0,X,P,F=1,M,B,S,ub=0,vb=0,H,gb,hb,Cb,w,U,z,Tb=new oc,Y,Kb=[];if(a.qb)Jb=function(a,c,d){b.zb(a,{T:c,V:d})};N=a.Xb;g.yb=dc;gc();b.bb(p,"jssor-slider",d);b.p(v,b.p(v)||0);b.q(v,"absolute");jb=b.db(v,d);b.wb(jb,v);if(kb){ac=kb.Xe;qb=kb.Z;tb=u==1&&q>1&&qb&&(!b.Zc()||b.qc()>=8)}ib=tb||u>=q||!(D&1)?0:a.bc;X=(u>1||ib?eb:-1)&a.ec;var Gb=v,C=[],A,L,Db=b.Oe(),mb=Db.Fe,G,pb,Ib,rb;Db.id&&b.C(Gb,Db.id,([j,"pan-y","pan-x","none"])[X]||"");U=new yc;if(tb)A=new qb(Tb,K,J,kb,mb);b.z(jb,U.Eb);b.gb(v,"hidden");L=Wb();b.C(L,"backgroundColor","#000");b.Lb(L,0);b.wb(L,Gb.firstChild,Gb);for(var cb=0;cb<Q.length;cb++){var vc=Q[cb],xc=new wc(vc,cb);C.push(xc)}b.F(fb);Cb=new zc;z=new mc(Cb,U);if(X){b.a(v,"mousedown",Xb);b.a(v,"touchstart",qc);b.a(v,"dragstart",Hb);b.a(v,"selectstart",Hb);b.a(f,"mouseup",ab);b.a(f,"touchend",ab);b.a(f,"touchcancel",ab);b.a(h,"blur",ab)}E&=mb?10:5;if(Mb&&Fb){Pb=new Fb.Z(Mb,Fb,W(),lb());T.push(Pb)}if(Z&&ec&&bc){Z.Rc=D;Z.U=u;Rb=new Z.Z(ec,bc,Z,W(),lb());T.push(Rb)}if(Lb&&db){db.Wb=a.Wb;Nb=new db.Z(Lb,db);T.push(Nb)}b.e(T,function(a){a.Kb(q,C,fb);a.lb(o.fd,ic)});b.C(p,"visibility","visible");Eb(W());b.a(v,"click",hc);b.a(p,"mouseout",b.Jb(Ec,p));b.a(p,"mouseover",b.Jb(Dc,p));xb();a.cc&&b.a(f,"keydown",function(b){if(b.keyCode==37)ob(-a.cc);else b.keyCode==39&&ob(a.cc)});var nb=a.Wb;if(!(D&1))nb=c.max(0,c.min(nb,q-u));z.Db(nb,nb,0)};i.ne=21;i.rd=22;i.pd=23;i.Ee=24;i.ke=25;i.le=26;i.se=27;i.sd=28;i.ie=202;i.fe=203;i.Ge=206;i.wd=207;i.vd=208;i.ad=209;var o={fd:1};var q=function(g,B){var h=this,z,p,a,v=[],x,w,e,q,r,u,t,m,s,f,k;n.call(h);g=b.cb(g);function A(n,f){var g=this,c,m,l;function q(){m.Ad(p==f)}function i(d){if(d||!s.ae()){var a=e-f%e,b=s.Sc((f+a)/e-1),c=b*e+e-a;h.g(o.fd,c)}}g.P=f;g.bd=q;l=n.He||n.Kc||b.Y();g.Eb=c=b.qd(k,"thumbnailtemplate",l,d);m=b.de(c);a.Pb&1&&b.a(c,"click",b.W(j,i,0));a.Pb&2&&b.a(c,"mouseover",b.Jb(b.W(j,i,1),c))}h.ac=function(b,d,f){var a=p;p=b;a!=-1&&v[a].bd();v[b].bd();!f&&s.od(s.Sc(c.floor(d/e)))};h.Vb=function(a){b.fb(g,a)};var y;h.Kb=function(D,C){if(!y){z=D;c.ceil(z/e);p=-1;m=c.min(m,C.length);var h=a.rb&1,n=u+(u+q)*(e-1)*(1-h),k=t+(t+r)*(e-1)*h,B=n+(n+q)*(m-1)*h,o=k+(k+r)*(m-1)*(1-h);b.q(f,"absolute");b.gb(f,"hidden");a.cd&1&&b.v(f,(x-B)/2);a.cd&2&&b.r(f,(w-o)/2);b.k(f,B);b.l(f,o);var j=[];b.e(C,function(l,g){var i=new A(l,g),d=i.Eb,a=c.floor(g/e),k=g%e;b.v(d,(u+q)*k*(1-h));b.r(d,(t+r)*k*h);if(!j[a]){j[a]=b.Y();b.z(f,j[a])}b.z(j[a],d);v.push(i)});var E=b.L({Xb:l,Yc:l,Md:n,Ld:k,Oc:q*h+r*(1-h),Tc:12,Zb:200,Lc:1,sb:a.rb,ec:a.Ed||a.Ue?0:a.rb},a);s=new i(g,E);y=d}};h.yb=a=b.L({ed:0,dd:0,U:1,rb:1,cd:3,Pb:1},B);x=b.k(g);w=b.l(g);f=b.o(g,"slides",d);k=b.o(f,"prototype");u=b.k(k);t=b.l(k);b.Ib(k,f);e=a.xd||1;q=a.ed;r=a.dd;m=a.U;a.ic==l&&b.bb(g,"noscale",d)};function r(e,d,c){var a=this;m.call(a,0,c);a.Qc=b.Gc;a.Vc=0;a.Uc=c}jssor_1_slider_init=function(){var f={Xb:l,ec:2,sb:2,Nd:{Z:q,U:9,rb:2,bc:0,Ed:d}},e=new i("jssor_1",f);function a(){var b=e.N.parentNode.clientWidth;if(b){b=c.min(b,700);e.Bd(b)}else h.setTimeout(a,30)}a();b.a(h,"load",a);b.a(h,"resize",a);b.a(h,"orientationchange",a)}})(window,document,Math,null,true,false)
</script>
""")
stream.write("""<style>
.jssort13 .p{POSITION:absolute;WIDTH:""" + str(self.labelwidth) + """px;HEIGHT:30px;TOP:0;LEFT:0;padding:0}.jssort13 .w{cursor:pointer;position:absolute;WIDTH:""" + str(self.labelwidth-2) + """px;HEIGHT:29px;border:1px solid gray;top:-1px;left:0}.jssort13 .pav .w,.jssort13 .pdn .w{border-right:1px solid #fff}.jssort13 .c{width:100%;height:100%;position:absolute;top:0;left:0;line-height:29px;text-align:center;color:#000;font-size:13px}.jssort13 .p .c,.jssort13 .pav:hover .c{background-color:#eee}.jssort13 .pav .c,.jssort13 .p:hover .c{background-color:#fff}
</style>
""")
self.initialized = True
def preamble(self, stream):
stream.write("""<div id="jssor_1" style="position: relative; margin: 0 auto; top: 0px; left: 0px; width: {}px; height: {}px; overflow: hidden; visibility: hidden; background-color: #ffffff;">
<!-- Loading Screen -->
<div data-u="loading" style="position: absolute; top: 0px; left: 0px;">
<div style="filter: alpha(opacity=70); opacity: 0.7; position: absolute; display: block; top: 0px; left: 0px; width: 100%; height: 100%;"></div>
<div style="position:absolute;display:block;background:url('img/loading.gif') no-repeat center center;top:0px;left:0px;width:100%;height:100%;"></div>
</div>
<div data-u="slides" style="cursor: default; position: relative; top: 0px; left: {}px; width: {}px; height: {}px; overflow: hidden; border: 1px solid #adadad">
""".format(self.width+self.labelwidth, self.height, self.labelwidth-1, self.width-1, self.width-2))
def closing(self, stream):
stream.write("""</div>
<!-- Thumbnail Navigator -->
<div data-u="thumbnavigator" class="jssort13" style="position:absolute;left:0px;top:0px;width:{}px;height:{}px;">
<!-- Thumbnail Item Skin Begin -->
<div data-u="slides" style="cursor: default; top: 0px; left: 0px; border-top: 1px solid gray;">
<div data-u="prototype" class="p">
<div class="w">
<div data-u="thumbnailtemplate" class="c"></div>
</div>
</div>
</div>
<!-- Thumbnail Item Skin End -->
</div>
<a href="http://www.jssor.com" style="display:none">Bootstrap Carousel</a>
</div>
<script>
jssor_1_slider_init();
</script>
""".format(self.labelwidth, self.height))
| 433.417266
| 27,137
| 0.585443
| 16,563
| 60,245
| 2.127875
| 0.034052
| 0.010328
| 0.004767
| 0.003178
| 0.820934
| 0.798916
| 0.764669
| 0.744694
| 0.722449
| 0.693026
| 0
| 0.026234
| 0.030675
| 60,245
| 138
| 27,138
| 436.557971
| 0.57729
| 0.005278
| 0
| 0.612613
| 0
| 0.207207
| 0.974386
| 0.849189
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0
| 0
| 0.153153
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
722a52340c35c1aede807124082eaad7fbe05eec
| 22,279
|
py
|
Python
|
src/backend/marsha/bbb/tests/test_api.py
|
insad/marsha
|
3c6627b9a1debbb594e43233df7b7edb88f57f45
|
[
"MIT"
] | null | null | null |
src/backend/marsha/bbb/tests/test_api.py
|
insad/marsha
|
3c6627b9a1debbb594e43233df7b7edb88f57f45
|
[
"MIT"
] | null | null | null |
src/backend/marsha/bbb/tests/test_api.py
|
insad/marsha
|
3c6627b9a1debbb594e43233df7b7edb88f57f45
|
[
"MIT"
] | null | null | null |
"""Tests for the meeting API."""
import json
import random
import re
from unittest import mock
from urllib.parse import quote_plus
import uuid
from django.test import TestCase, override_settings
from rest_framework_simplejwt.tokens import AccessToken
from marsha.bbb import api, serializers
from marsha.core import factories as core_factories
from ..factories import MeetingFactory
from ..utils.bbb_utils import ApiMeetingException
# We don't enforce arguments documentation in tests
# pylint: disable=unused-argument
@override_settings(BBB_API_ENDPOINT="https://10.7.7.1/bigbluebutton/api")
@override_settings(BBB_API_SECRET="SuperSecret")
@override_settings(BBB_ENABLED=True)
class MeetingAPITest(TestCase):
"""Test for the Meeting API."""
maxDiff = None
@mock.patch.object(serializers, "get_meeting_infos")
def test_api_meeting_fetch_student(self, mock_get_meeting_infos):
"""A student should be allowed to fetch a meeting."""
meeting = MeetingFactory()
mock_get_meeting_infos.return_value = {
"returncode": "SUCCESS",
"running": "true",
}
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(meeting.id)
jwt_token.payload["roles"] = ["student"]
response = self.client.get(
f"/api/meetings/{meeting.id!s}/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertDictEqual(
{
"id": str(meeting.id),
"infos": {"returncode": "SUCCESS", "running": "true"},
"lti_id": str(meeting.lti_id),
"title": meeting.title,
"started": False,
"meeting_id": str(meeting.meeting_id),
"welcome_text": meeting.welcome_text,
"playlist": {
"id": str(meeting.playlist.id),
"title": meeting.playlist.title,
"lti_id": meeting.playlist.lti_id,
},
},
content,
)
@mock.patch.object(serializers, "get_meeting_infos")
def test_api_meeting_fetch_instructor(self, mock_get_meeting_infos):
"""An instructor should be able to fetch a meeting."""
meeting = MeetingFactory()
mock_get_meeting_infos.return_value = {
"returncode": "SUCCESS",
"running": "true",
}
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(meeting.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.get(
f"/api/meetings/{meeting.id!s}/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertDictEqual(
{
"id": str(meeting.id),
"infos": {"returncode": "SUCCESS", "running": "true"},
"lti_id": str(meeting.lti_id),
"title": meeting.title,
"started": False,
"meeting_id": str(meeting.meeting_id),
"welcome_text": meeting.welcome_text,
"playlist": {
"id": str(meeting.playlist.id),
"title": meeting.playlist.title,
"lti_id": meeting.playlist.lti_id,
},
},
content,
)
def test_api_meeting_update_anonymous(self):
"""An anonymous should not be able to update a meeting."""
meeting = MeetingFactory()
response = self.client.put(f"/api/meetings/{meeting.id!s}/")
self.assertEqual(response.status_code, 401)
def test_api_meeting_update_user_logged_in(self):
"""An logged in user should not be able to update a meeting."""
user = core_factories.UserFactory(
first_name="Jane", last_name="Doe", email="jane.doe@example.com"
)
meeting = MeetingFactory()
self.client.force_login(user)
response = self.client.put(f"/api/meetings/{meeting.id!s}/")
self.assertEqual(response.status_code, 401)
def test_api_meeting_update_student(self):
"""A student user should not be able to update a meeting."""
meeting = MeetingFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(meeting.id)
jwt_token.payload["roles"] = ["student"]
data = {"title": "new title"}
response = self.client.put(
f"/api/meetings/{meeting.id!s}/",
json.dumps(data),
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
def test_api_meeting_update_instructor_read_only(self):
"""An instructor should not be able to update a meeting in read_only."""
meeting = MeetingFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(meeting.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": False}
data = {"title": "new title"}
response = self.client.put(
f"/api/meetings/{meeting.id!s}/",
data,
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
@mock.patch.object(serializers, "get_meeting_infos")
def test_api_meeting_update_instructor(self, mock_get_meeting_infos):
"""An instructor should be able to update a meeting."""
meeting = MeetingFactory()
mock_get_meeting_infos.return_value = {
"returncode": "SUCCESS",
"running": "true",
}
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(meeting.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
data = {"title": "new title", "welcome_text": "Hello"}
response = self.client.put(
f"/api/meetings/{meeting.id!s}/",
data,
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
meeting.refresh_from_db()
self.assertEqual("new title", meeting.title)
self.assertEqual("Hello", meeting.welcome_text)
def test_api_select_instructor_no_bbb_server(self):
"""An instructor should be able to fetch a meeting lti select."""
playlist = core_factories.PlaylistFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = "None"
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["playlist_id"] = str(playlist.id)
response = self.client.get(
"/api/meetings/lti-select/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
new_uuid = re.search(
"http://testserver/lti/meetings/(.*)", response.json().get("new_url", "")
).group(1)
self.assertEqual(uuid.UUID(new_uuid).version, 4)
self.assertDictEqual(
{
"new_url": f"http://testserver/lti/meetings/{new_uuid}",
"meetings": [],
},
response.json(),
)
def test_api_select_instructor_no_meetings(self):
"""An instructor should be able to fetch a meeting lti select."""
playlist = core_factories.PlaylistFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = "None"
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["playlist_id"] = str(playlist.id)
response = self.client.get(
"/api/meetings/lti-select/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
new_uuid = re.search(
"http://testserver/lti/meetings/(.*)", response.json().get("new_url", "")
).group(1)
self.assertEqual(uuid.UUID(new_uuid).version, 4)
self.assertDictEqual(
{
"new_url": f"http://testserver/lti/meetings/{new_uuid}",
"meetings": [],
},
response.json(),
)
def test_api_select_instructor(self):
"""An instructor should be able to fetch a meeting lti select."""
# playlist = core_factories.PlaylistFactory()
# MeetingFactory.build_batch(3, playlist=playlist)
meeting = MeetingFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = "None"
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["playlist_id"] = str(meeting.playlist_id)
response = self.client.get(
"/api/meetings/lti-select/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
new_uuid = re.search(
"http://testserver/lti/meetings/(.*)", response.json().get("new_url", "")
).group(1)
self.assertEqual(uuid.UUID(new_uuid).version, 4)
self.assertDictEqual(
{
"new_url": f"http://testserver/lti/meetings/{new_uuid}",
"meetings": [
{
"id": str(meeting.id),
"lti_id": str(meeting.lti_id),
"lti_url": f"http://testserver/lti/meetings/{str(meeting.id)}",
"title": meeting.title,
"meeting_id": str(meeting.meeting_id),
"playlist": {
"id": str(meeting.playlist_id),
"title": meeting.playlist.title,
"lti_id": meeting.playlist.lti_id,
},
}
],
},
response.json(),
)
@mock.patch.object(api, "create")
def test_api_bbb_create_anonymous(self, mock_create_request):
"""An anonymous should not be able to create a meeting."""
meeting = MeetingFactory()
response = self.client.patch(f"/api/meetings/{meeting.id}/create/")
self.assertEqual(response.status_code, 401)
mock_create_request.assert_not_called()
@mock.patch.object(api, "create")
def test_api_bbb_create_user_logged_in(self, mock_create_request):
"""A logged in user should not be able to create a meeting."""
user = core_factories.UserFactory(
first_name="Jane", last_name="Doe", email="jane.doe@example.com"
)
meeting = MeetingFactory()
self.client.force_login(user)
response = self.client.patch(f"/api/meetings/{meeting.id}/create/")
self.assertEqual(response.status_code, 401)
mock_create_request.assert_not_called()
@mock.patch.object(api, "create")
def test_api_bbb_create_student(self, mock_create_request):
"""A student should not be able to create a meeting."""
meeting = MeetingFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(meeting.id)
jwt_token.payload["roles"] = ["student"]
response = self.client.patch(
f"/api/meetings/{meeting.id}/create/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 403)
mock_create_request.assert_not_called()
@mock.patch.object(api, "create")
@mock.patch.object(serializers, "get_meeting_infos")
def test_api_bbb_create_new_meeting(
self, mock_get_meeting_infos, mock_create_request
):
"""Starting a meeting with parameters should store them."""
meeting = MeetingFactory()
mock_get_meeting_infos.return_value = {"returncode": "SUCCESS"}
mock_create_request.return_value = {"returncode": "SUCCESS"}
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(meeting.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
data = {"title": "new title", "welcome_text": "Hello"}
response = self.client.patch(
f"/api/meetings/{meeting.id}/create/",
data,
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
self.assertDictEqual(
{
"returncode": "SUCCESS",
},
response.data,
)
meeting.refresh_from_db()
self.assertEqual("new title", meeting.title)
self.assertEqual("Hello", meeting.welcome_text)
mock_create_request.assert_called_with(meeting=meeting)
@mock.patch.object(api, "create")
@mock.patch.object(serializers, "get_meeting_infos")
def test_api_bbb_create_existing_meeting(
self, mock_get_meeting_infos, mock_create_request
):
"""No new meeting should be started if a BBB meeting exists for the same id."""
meeting = MeetingFactory(
meeting_id="21e6634f-ab6f-4c77-a665-4229c61b479a",
title="Meeting 1",
attendee_password="9#R1kuUl3R",
moderator_password="0$C7Aaz0o",
)
mock_get_meeting_infos.return_value = {"returncode": "SUCCESS"}
mock_create_request.side_effect = ApiMeetingException(
{"message": "A meeting already exists with that meeting ID."}
)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(meeting.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
data = {"title": meeting.title, "welcome_text": meeting.welcome_text}
response = self.client.patch(
f"/api/meetings/{meeting.id}/create/",
data,
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 400)
self.assertDictEqual(
response.data,
{"message": "A meeting already exists with that meeting ID."},
)
mock_create_request.assert_called_with(meeting=meeting)
@mock.patch.object(api, "join")
@mock.patch.object(serializers, "get_meeting_infos")
def test_api_bbb_join_meeting_anonymous(
self, mock_get_meeting_infos, mock_join_request
):
"""An anonymous should not be able to join a meeting."""
meeting = MeetingFactory()
response = self.client.patch(
f"/api/meetings/{meeting.id}/join/",
)
self.assertEqual(response.status_code, 401)
mock_get_meeting_infos.assert_not_called()
mock_join_request.assert_not_called()
@mock.patch.object(api, "join")
def test_api_bbb_join_meeting_user_logged_in(self, mock_join_request):
"""A logged in user should not be able to join a meeting."""
user = core_factories.UserFactory(
first_name="Jane", last_name="Doe", email="jane.doe@example.com"
)
meeting = MeetingFactory()
self.client.force_login(user)
response = self.client.patch(f"/api/meetings/{meeting.id}/join/")
self.assertEqual(response.status_code, 401)
mock_join_request.assert_not_called()
def test_api_bbb_join_student(self):
"""Joining a meeting as student should return an attendee meeting url."""
meeting = MeetingFactory(
meeting_id="21e6634f-ab6f-4c77-a665-4229c61b479a",
title="Meeting 1",
attendee_password="9#R1kuUl3R",
moderator_password="0$C7Aaz0o",
)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(meeting.id)
jwt_token.payload["roles"] = ["student"]
response = self.client.patch(
f"/api/meetings/{meeting.id}/join/",
data=json.dumps({"fullname": "John Doe"}),
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
self.assertIn(
"https://10.7.7.1/bigbluebutton/api/join?"
f"fullName=John+Doe&meetingID={meeting.meeting_id}&"
f"password={quote_plus(meeting.attendee_password)}&redirect=true",
response.data.get("url"),
)
def test_api_bbb_join_instructor(self):
"""Joining a meeting as instructor should return a moderator meeting url."""
meeting = MeetingFactory(
meeting_id="21e6634f-ab6f-4c77-a665-4229c61b479a",
title="Meeting 1",
attendee_password="9#R1kuUl3R",
moderator_password="0$C7Aaz0o",
)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(meeting.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.patch(
f"/api/meetings/{meeting.id}/join/",
data=json.dumps({"fullname": "John Doe"}),
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
self.assertIn(
"https://10.7.7.1/bigbluebutton/api/join?"
f"fullName=John+Doe&meetingID={meeting.meeting_id}&"
f"password={quote_plus(meeting.moderator_password)}&redirect=true",
response.data.get("url"),
)
def test_api_bbb_join_instructor_no_fullname(self):
"""Joining a meeting without fullname parameter should return a 422."""
meeting = MeetingFactory(
meeting_id="21e6634f-ab6f-4c77-a665-4229c61b479a",
title="Meeting 1",
attendee_password="9#R1kuUl3R",
moderator_password="0$C7Aaz0o",
)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(meeting.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.patch(
f"/api/meetings/{meeting.id}/join/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 400)
self.assertDictEqual(
{"message": "missing fullname parameter"},
response.data,
)
@mock.patch.object(api, "end")
def test_api_bbb_end_meeting_anonymous(self, mock_end_request):
"""An anonymous should not be able to end a meeting."""
meeting = MeetingFactory()
response = self.client.patch(
f"/api/meetings/{meeting.id}/end/",
)
self.assertEqual(response.status_code, 401)
mock_end_request.assert_not_called()
@mock.patch.object(api, "end")
def test_api_bbb_end_meeting_user_logged_in(self, mock_end_request):
"""A logged in user should not be able to end a meeting."""
user = core_factories.UserFactory(
first_name="Jane", last_name="Doe", email="jane.doe@example.com"
)
meeting = MeetingFactory()
self.client.force_login(user)
response = self.client.patch(
f"/api/meetings/{meeting.id}/end/",
)
self.assertEqual(response.status_code, 401)
mock_end_request.assert_not_called()
@mock.patch.object(api, "end")
def test_api_bbb_end_meeting_student(self, mock_end_request):
"""A student should not be able to end a meeting."""
meeting = MeetingFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(meeting.id)
jwt_token.payload["roles"] = ["student"]
response = self.client.patch(
f"/api/meetings/{meeting.id}/end/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 403)
mock_end_request.assert_not_called()
@mock.patch.object(api, "end")
def test_api_bbb_end_meeting_instructor(self, mock_end_request):
"""Ending a meeting as instructor should return a moderator meeting url."""
meeting = MeetingFactory(
meeting_id="21e6634f-ab6f-4c77-a665-4229c61b479a",
title="Meeting 1",
attendee_password="9#R1kuUl3R",
moderator_password="0$C7Aaz0o",
)
mock_end_request.return_value = {
"message": "A request to end the meeting was sent.",
"messageKey": "sentEndMeetingRequest",
"returncode": "SUCCESS",
}
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(meeting.id)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.patch(
f"/api/meetings/{meeting.id}/end/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
self.assertDictEqual(
{
"returncode": "SUCCESS",
"message": "A request to end the meeting was sent.",
"messageKey": "sentEndMeetingRequest",
},
response.data,
)
| 38.813589
| 87
| 0.603663
| 2,438
| 22,279
| 5.30927
| 0.084085
| 0.048208
| 0.053307
| 0.05377
| 0.895396
| 0.881026
| 0.858699
| 0.855841
| 0.840158
| 0.823548
| 0
| 0.014875
| 0.269761
| 22,279
| 573
| 88
| 38.881326
| 0.780749
| 0.072176
| 0
| 0.737179
| 0
| 0
| 0.20037
| 0.057311
| 0
| 0
| 0
| 0
| 0.113248
| 1
| 0.051282
| false
| 0.025641
| 0.025641
| 0
| 0.081197
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a0c65cdd7e809f3d9154c85f0e2efbc95e809f86
| 24,534
|
py
|
Python
|
ion/services/sa/observatory/test/test_observatory_util.py
|
ooici/coi-services
|
43246f46a82e597345507afd7dfc7373cb346afa
|
[
"BSD-2-Clause"
] | 3
|
2016-09-20T09:50:06.000Z
|
2018-08-10T01:41:38.000Z
|
ion/services/sa/observatory/test/test_observatory_util.py
|
ooici/coi-services
|
43246f46a82e597345507afd7dfc7373cb346afa
|
[
"BSD-2-Clause"
] | null | null | null |
ion/services/sa/observatory/test/test_observatory_util.py
|
ooici/coi-services
|
43246f46a82e597345507afd7dfc7373cb346afa
|
[
"BSD-2-Clause"
] | 2
|
2016-03-16T22:25:49.000Z
|
2016-11-26T14:54:21.000Z
|
#!/usr/bin/env python
__author__ = 'Michael Meisinger'
import unittest
from nose.plugins.attrib import attr
from pyon.public import RT, log
from pyon.util.unit_test import IonUnitTestCase
from ion.services.sa.observatory.mockutil import MockUtil
from ion.services.sa.observatory.observatory_util import ObservatoryUtil
from interface.objects import DeviceStatusType, DeviceCommsType, AggregateStatusType
DST = DeviceStatusType
def _devstat(dev_id, power, comms, data, loc):
res_dict = dict(device_id = dev_id,
agg_status={
AggregateStatusType.AGGREGATE_POWER: dict(status=power),
AggregateStatusType.AGGREGATE_COMMS: dict(status=comms),
AggregateStatusType.AGGREGATE_DATA: dict(status=data),
AggregateStatusType.AGGREGATE_LOCATION: dict(status=loc),
})
return res_dict
@attr('UNIT', group='sa')
class TestObservatoryUtil(IonUnitTestCase):
def setUp(self):
self.mu = MockUtil()
self.process_mock = self.mu.create_process_mock()
self.container_mock = self.mu.create_container_mock()
self.dsm_mock = self.mu.create_device_status_manager_mock()
res_list = [
dict(rt='Org', _id='Org_1', attr={}),
dict(rt='Observatory', _id='Obs_1', attr={}),
dict(rt='Observatory', _id='Obs_2', attr={}),
dict(rt='Subsite', _id='Sub_1', attr={}),
dict(rt='Subsite', _id='Sub_2', attr={}),
dict(rt='PlatformSite', _id='PS_1', attr={}),
dict(rt='InstrumentSite', _id='IS_1', attr={}),
dict(rt='PlatformDevice', _id='PD_1', attr={}),
dict(rt='InstrumentDevice', _id='ID_1', attr={}),
]
assoc_list = [
['Obs_1', 'hasSite', 'Sub_1'],
['Sub_1', 'hasSite', 'PS_1'],
['PS_1', 'hasSite', 'IS_1'],
]
assoc_list1 = [
['Org_1', 'hasResource', 'Obs_1'],
['Org_1', 'hasResource', 'Obs_2'],
['Obs_2', 'hasSite', 'Sub_2'],
]
assoc_list2 = [
['PS_1', 'hasDevice', 'PD_1'],
['IS_1', 'hasDevice', 'ID_1'],
['PD_1', 'hasDevice', 'ID_1'],
]
def spy_get_child_sites(self, parent_site_id=None, org_id=None, exclude_types=None, include_parents=True, id_only=True):
child_sites, site_ancestors = self.obs_util.get_child_sites(parent_site_id=parent_site_id,
org_id=org_id,
exclude_types=exclude_types,
include_parents=include_parents,
id_only=id_only)
print "child_sites of", parent_site_id, "are", child_sites
print "site_ancestors of", parent_site_id, "are", site_ancestors
return child_sites, site_ancestors
def test_get_child_sites(self):
self.mu.load_mock_resources(self.res_list)
self.mu.load_mock_associations(self.assoc_list)
self.obs_util = ObservatoryUtil(self.process_mock, self.container_mock)
site_resources, site_children = self.spy_get_child_sites(parent_site_id='Obs_1', include_parents=False, id_only=True)
self.assertEquals(len(site_resources), 3)
self.assertEquals(len(site_children), 3)
self.assertIn('Sub_1', site_resources)
self.assertIn('PS_1', site_resources)
self.assertIn('IS_1', site_resources)
self.assertNotIn('Obs_1', site_resources)
self.assertEquals(len([v for v in site_resources.values() if v is None]), 3)
site_resources, site_children = self.spy_get_child_sites(parent_site_id='Obs_1', include_parents=False, id_only=False)
self.assertEquals(len(site_resources), 3)
self.assertEquals(len(site_children), 3)
self.assertEquals(len([v for v in site_resources.values() if v is None]), 0)
self.assertEquals(site_resources['Sub_1']._get_type(), RT.Subsite)
site_resources, site_children = self.spy_get_child_sites(parent_site_id='Obs_1', include_parents=True)
self.assertEquals(len(site_resources), 4)
self.assertEquals(len(site_children), 3)
self.assertIn('Obs_1', site_resources)
site_resources, site_children = self.spy_get_child_sites(parent_site_id='Sub_1', include_parents=False)
self.assertEquals(len(site_resources), 2)
self.assertEquals(len(site_children), 2)
self.assertNotIn('Sub_1', site_resources)
site_resources, site_children = self.spy_get_child_sites(parent_site_id='Sub_1', include_parents=True)
self.assertEquals(len(site_resources), 4)
self.assertEquals(len(site_children), 3)
self.assertIn('Sub_1', site_resources)
self.assertIn('Obs_1', site_resources)
site_resources, site_children = self.spy_get_child_sites(parent_site_id='PS_1', include_parents=False)
self.assertEquals(len(site_resources), 1)
self.assertEquals(len(site_children), 1)
site_resources, site_children = self.spy_get_child_sites(parent_site_id='PS_1', include_parents=True)
self.assertEquals(len(site_resources), 4)
self.assertEquals(len(site_children), 3)
site_resources, site_children = self.spy_get_child_sites(parent_site_id='IS_1', include_parents=False)
self.assertEquals(len(site_resources), 0)
self.assertEquals(len(site_children), 0)
site_resources, site_children = self.spy_get_child_sites(parent_site_id='IS_1', include_parents=True)
self.assertEquals(len(site_resources), 4)
self.assertEquals(len(site_children), 3)
site_resources, site_children = self.spy_get_child_sites(parent_site_id='XXX', include_parents=True)
self.assertEquals(len(site_resources), 1)
self.assertEquals(len(site_children), 0)
def test_get_child_sites_org(self):
self.mu.load_mock_resources(self.res_list)
self.mu.load_mock_associations(self.assoc_list + self.assoc_list1)
self.mu.assign_mockres_find_objects(filter_predicate="hasResource")
self.obs_util = ObservatoryUtil(self.process_mock, self.container_mock)
child_sites, site_ancestors = self.obs_util.get_child_sites(org_id='Org_1', include_parents=False, id_only=True)
self.assertEquals(len(child_sites), 6)
self.assertEquals(len(site_ancestors), 5)
self.assertIn('Sub_1', child_sites)
self.assertIn('PS_1', child_sites)
self.assertIn('IS_1', child_sites)
self.assertIn('Obs_1', child_sites)
self.assertIn('Obs_2', child_sites)
child_sites, site_ancestors = self.obs_util.get_child_sites(org_id='Org_1', include_parents=True, id_only=True)
self.assertEquals(len(child_sites), 7)
self.assertEquals(len(site_ancestors), 5)
child_sites, site_ancestors = self.obs_util.get_child_sites(org_id='Org_1', include_parents=True, id_only=False)
self.assertEquals(len(child_sites), 7)
self.assertEquals(len(site_ancestors), 5)
self.assertEquals(len([v for v in child_sites.values() if v is None]), 0)
self.assertEquals(child_sites['Org_1']._get_type(), RT.Org)
def test_get_site_devices(self):
self.mu.load_mock_resources(self.res_list)
self.mu.load_mock_associations(self.assoc_list2)
self.obs_util = ObservatoryUtil(self.process_mock, self.container_mock)
site_devices = self.obs_util.get_site_devices(['Sub_1', 'PS_1', 'IS_1'])
self.assertEquals(len(site_devices), 3)
self.assertEquals(site_devices['Sub_1'], [])
self.assertEquals(site_devices['IS_1'], [('InstrumentSite', 'ID_1', 'InstrumentDevice')])
def test_get_child_devices(self):
self.mu.load_mock_resources(self.res_list)
self.mu.load_mock_associations(self.assoc_list2)
self.obs_util = ObservatoryUtil(self.process_mock, self.container_mock)
child_devices = self.obs_util.get_child_devices('PD_1')
self.assertEquals(len(child_devices), 2)
self.assertEquals(child_devices['PD_1'][0][1], 'ID_1')
child_devices = self.obs_util.get_child_devices('ID_1')
self.assertEquals(len(child_devices), 1)
self.assertEquals(child_devices['ID_1'], [])
child_devices = self.obs_util.get_child_devices('Sub_1')
self.assertEquals(len(child_devices), 1)
self.assertEquals(child_devices['Sub_1'], [])
child_devices = self.obs_util.get_child_devices('XXX')
self.assertEquals(len(child_devices), 1)
def test_get_device_data_products(self):
self.mu.load_mock_resources(self.res_list + self.res_list1)
self.mu.load_mock_associations(self.assoc_list + self.assoc_list1 + self.assoc_list2 + self.assoc_list3)
self.mu.assign_mockres_find_objects(filter_predicate="hasResource")
self.obs_util = ObservatoryUtil(self.process_mock, self.container_mock)
res_dict = self.obs_util.get_site_data_products('Obs_1', RT.Observatory)
self.assertGreaterEqual(len(res_dict), 6)
self.assertIsNone(res_dict['data_product_resources'])
self.assertIn('ID_1', res_dict['device_data_products'])
self.assertEquals(len(res_dict['device_data_products']['ID_1']), 3)
self.assertIn('DP_1', res_dict['device_data_products']['ID_1'])
self.assertIn('PD_1', res_dict['device_data_products'])
self.assertEquals(len(res_dict['device_data_products']['PD_1']), 3)
res_dict = self.obs_util.get_site_data_products('PS_1', RT.PlatformSite)
self.assertEquals(len(res_dict['device_data_products']['ID_1']), 3)
self.assertIn('ID_1', res_dict['device_data_products'])
self.assertIn('DP_1', res_dict['device_data_products']['ID_1'])
self.assertIn('PD_1', res_dict['device_data_products'])
self.assertEquals(len(res_dict['device_data_products']['PD_1']), 3)
res_dict = self.obs_util.get_site_data_products('Org_1', RT.Org)
self.assertIn('DP_1', res_dict['device_data_products']['ID_1'])
res_dict = self.obs_util.get_site_data_products('PS_1', RT.PlatformSite, include_data_products=True)
self.assertIsNotNone(res_dict['data_product_resources'])
self.assertIn('DP_1', res_dict['data_product_resources'])
#import pprint
#pprint.pprint(res_dict)
status_by_device_1 = {
"ID_1": _devstat("ID_1", DST.STATUS_OK, DST.STATUS_OK, DST.STATUS_OK, DST.STATUS_OK),
"PD_1": _devstat("PD_1", DST.STATUS_OK, DST.STATUS_OK, DST.STATUS_OK, DST.STATUS_OK),
}
status_by_device_2 = {
"ID_1": _devstat("ID_1", DST.STATUS_WARNING, DST.STATUS_OK, DST.STATUS_OK, DST.STATUS_OK),
"PD_1": _devstat("PD_1", DST.STATUS_OK, DST.STATUS_OK, DST.STATUS_OK, DST.STATUS_OK),
}
status_by_device_3 = {
"ID_1": _devstat("ID_1", DST.STATUS_WARNING, DST.STATUS_WARNING, DST.STATUS_OK, DST.STATUS_OK),
"PD_1": _devstat("PD_1", DST.STATUS_OK, DST.STATUS_OK, DST.STATUS_OK, DST.STATUS_OK),
}
status_by_device_4 = {
"ID_1": _devstat("ID_1", DST.STATUS_OK, DST.STATUS_OK, DST.STATUS_OK, DST.STATUS_OK),
"PD_1": _devstat("PD_1", DST.STATUS_WARNING, DST.STATUS_WARNING, DST.STATUS_OK, DST.STATUS_OK),
}
def test_get_status_roll_ups(self):
self.mu.load_mock_resources(self.res_list)
self.mu.load_mock_associations(self.assoc_list + self.assoc_list1 + self.assoc_list2)
self.mu.load_mock_device_statuses(self.status_by_device_1)
self.obs_util = ObservatoryUtil(self.process_mock, self.container_mock, device_status_mgr=self.dsm_mock)
# No problems
status_rollups = self.obs_util.get_status_roll_ups('ID_1', RT.InstrumentDevice)
self.assertIn('ID_1', status_rollups)
self._assert_status(status_rollups, 'ID_1')
status_rollups = self.obs_util.get_status_roll_ups('PD_1', RT.PlatformDevice)
self.assertIn('PD_1', status_rollups)
self.assertIn('ID_1', status_rollups)
self._assert_status(status_rollups, 'PD_1')
self._assert_status(status_rollups, 'ID_1')
status_rollups = self.obs_util.get_status_roll_ups('IS_1', RT.InstrumentSite)
self.assertEquals(len(status_rollups), 6)
self.assertIn('IS_1', status_rollups)
self.assertIn('ID_1', status_rollups)
self._assert_status(status_rollups, 'IS_1')
self._assert_status(status_rollups, 'ID_1')
status_rollups = self.obs_util.get_status_roll_ups('PS_1', RT.PlatformSite)
self.assertEquals(len(status_rollups), 6)
self.assertIn('PS_1', status_rollups)
self.assertIn('PD_1', status_rollups)
self.assertIn('IS_1', status_rollups)
self.assertIn('ID_1', status_rollups)
self._assert_status(status_rollups, 'PS_1')
self._assert_status(status_rollups, 'PD_1')
self._assert_status(status_rollups, 'IS_1')
self._assert_status(status_rollups, 'ID_1')
status_rollups = self.obs_util.get_status_roll_ups('Sub_1', RT.Subsite)
self.assertIn('Sub_1', status_rollups)
self.assertIn('PS_1', status_rollups)
self.assertIn('PD_1', status_rollups)
self.assertIn('IS_1', status_rollups)
self.assertIn('ID_1', status_rollups)
self._assert_status(status_rollups, 'Sub_1')
self._assert_status(status_rollups, 'PS_1')
self._assert_status(status_rollups, 'PD_1')
self._assert_status(status_rollups, 'IS_1')
self._assert_status(status_rollups, 'ID_1')
status_rollups = self.obs_util.get_status_roll_ups('Obs_1', RT.Observatory)
self.assertIn('Obs_1', status_rollups)
self.assertIn('Sub_1', status_rollups)
self.assertIn('PS_1', status_rollups)
self.assertIn('PD_1', status_rollups)
self.assertIn('IS_1', status_rollups)
self.assertIn('ID_1', status_rollups)
self._assert_status(status_rollups, 'Obs_1')
self._assert_status(status_rollups, 'Sub_1')
self._assert_status(status_rollups, 'PS_1')
self._assert_status(status_rollups, 'PD_1')
self._assert_status(status_rollups, 'IS_1')
self._assert_status(status_rollups, 'ID_1')
# ID_1 power warning
self.mu.load_mock_device_statuses(self.status_by_device_2)
status_rollups = self.obs_util.get_status_roll_ups('ID_1', RT.InstrumentDevice)
self._assert_status(status_rollups, 'ID_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING)
status_rollups = self.obs_util.get_status_roll_ups('PD_1', RT.PlatformDevice)
self._assert_status(status_rollups, 'PD_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'ID_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING)
status_rollups = self.obs_util.get_status_roll_ups('IS_1', RT.InstrumentSite)
self.assertIn('IS_1', status_rollups)
self._assert_status(status_rollups, 'IS_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING)
status_rollups = self.obs_util.get_status_roll_ups('PS_1', RT.PlatformSite)
self.assertIn('PS_1', status_rollups)
self.assertIn('IS_1', status_rollups)
self._assert_status(status_rollups, 'PS_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'IS_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING)
# ID_1 power+comms warning
self.mu.load_mock_device_statuses(self.status_by_device_3)
status_rollups = self.obs_util.get_status_roll_ups('ID_1', RT.InstrumentDevice)
self._assert_status(status_rollups, 'ID_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
status_rollups = self.obs_util.get_status_roll_ups('PD_1', RT.PlatformDevice)
self._assert_status(status_rollups, 'PD_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'ID_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
status_rollups = self.obs_util.get_status_roll_ups('IS_1', RT.InstrumentSite)
self.assertEquals(len(status_rollups), 6)
self.assertIn('IS_1', status_rollups)
self.assertIn('ID_1', status_rollups)
self._assert_status(status_rollups, 'IS_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'ID_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
status_rollups = self.obs_util.get_status_roll_ups('PS_1', RT.PlatformSite)
self.assertEquals(len(status_rollups), 6)
self.assertIn('PS_1', status_rollups)
self.assertIn('PD_1', status_rollups)
self.assertIn('IS_1', status_rollups)
self.assertIn('ID_1', status_rollups)
self._assert_status(status_rollups, 'PS_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'PD_1')
self._assert_status(status_rollups, 'IS_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'ID_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
status_rollups = self.obs_util.get_status_roll_ups('Sub_1', RT.Subsite)
self.assertIn('Sub_1', status_rollups)
self.assertIn('PS_1', status_rollups)
self.assertIn('PD_1', status_rollups)
self.assertIn('IS_1', status_rollups)
self.assertIn('ID_1', status_rollups)
self._assert_status(status_rollups, 'Sub_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'PS_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'PD_1')
self._assert_status(status_rollups, 'IS_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'ID_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
status_rollups = self.obs_util.get_status_roll_ups('Obs_1', RT.Observatory)
self.assertIn('Obs_1', status_rollups)
self.assertIn('Sub_1', status_rollups)
self.assertIn('PS_1', status_rollups)
self.assertIn('PD_1', status_rollups)
self.assertIn('IS_1', status_rollups)
self.assertIn('ID_1', status_rollups)
self._assert_status(status_rollups, 'Obs_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'Sub_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'PS_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'PD_1')
self._assert_status(status_rollups, 'IS_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'ID_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
def test_get_status_roll_ups_platform_warn(self):
self.mu.load_mock_resources(self.res_list)
self.mu.load_mock_associations(self.assoc_list + self.assoc_list1 + self.assoc_list2)
self.mu.load_mock_device_statuses(self.status_by_device_4)
self.obs_util = ObservatoryUtil(self.process_mock, self.container_mock, device_status_mgr=self.dsm_mock)
# PD_1 power+comms warning
status_rollups = self.obs_util.get_status_roll_ups('ID_1', RT.InstrumentDevice)
self._assert_status(status_rollups, 'ID_1')
status_rollups = self.obs_util.get_status_roll_ups('PD_1', RT.PlatformDevice)
#log.warn("status %s" % status_rollups)
self._assert_status(status_rollups, 'PD_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'ID_1')
status_rollups = self.obs_util.get_status_roll_ups('IS_1', RT.InstrumentSite)
self.assertEquals(len(status_rollups), 6)
self.assertIn('IS_1', status_rollups)
self.assertIn('ID_1', status_rollups)
self._assert_status(status_rollups, 'IS_1')
self._assert_status(status_rollups, 'ID_1')
status_rollups = self.obs_util.get_status_roll_ups('PS_1', RT.PlatformSite)
self.assertEquals(len(status_rollups), 6)
self.assertIn('PS_1', status_rollups)
self.assertIn('PD_1', status_rollups)
self.assertIn('IS_1', status_rollups)
self.assertIn('ID_1', status_rollups)
self._assert_status(status_rollups, 'PS_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'PD_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'IS_1')
self._assert_status(status_rollups, 'ID_1')
status_rollups = self.obs_util.get_status_roll_ups('Sub_1', RT.Subsite)
self.assertIn('Sub_1', status_rollups)
self.assertIn('PS_1', status_rollups)
self.assertIn('PD_1', status_rollups)
self.assertIn('IS_1', status_rollups)
self.assertIn('ID_1', status_rollups)
self._assert_status(status_rollups, 'Sub_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'PS_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'PD_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'IS_1')
self._assert_status(status_rollups, 'ID_1')
status_rollups = self.obs_util.get_status_roll_ups('Obs_1', RT.Observatory)
self.assertIn('Obs_1', status_rollups)
self.assertIn('Sub_1', status_rollups)
self.assertIn('PS_1', status_rollups)
self.assertIn('PD_1', status_rollups)
self.assertIn('IS_1', status_rollups)
self.assertIn('ID_1', status_rollups)
self._assert_status(status_rollups, 'Obs_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'Sub_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'PS_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'PD_1', agg=DST.STATUS_WARNING, power=DST.STATUS_WARNING, comms=DST.STATUS_WARNING)
self._assert_status(status_rollups, 'IS_1')
self._assert_status(status_rollups, 'ID_1')
def _assert_status(self, status_rollups, res_id=None, agg=DST.STATUS_OK, loc=DST.STATUS_OK,
data=DST.STATUS_OK, comms=DST.STATUS_OK, power=DST.STATUS_OK):
res_status = status_rollups[res_id] if res_id else status_rollups
log.debug("_assert_status(%s) = %s", res_id, res_status)
self.assertEquals(len(res_status), 5)
if agg is not None:
self.assertEquals(res_status['agg'], agg)
if loc is not None:
self.assertEquals(res_status[AggregateStatusType.AGGREGATE_LOCATION], loc)
if data is not None:
self.assertEquals(res_status[AggregateStatusType.AGGREGATE_DATA], data)
if comms is not None:
self.assertEquals(res_status[AggregateStatusType.AGGREGATE_COMMS], comms)
if power is not None:
self.assertEquals(res_status[AggregateStatusType.AGGREGATE_POWER], power)
res_list1 = [
dict(rt='DataProduct', _id='DP_1', attr={}),
dict(rt='DataProduct', _id='DP_2', attr={}),
dict(rt='DataProduct', _id='DP_3', attr={}),
dict(rt='DataProduct', _id='DP_4', attr={}),
dict(rt='DataProduct', _id='DP_5', attr={}),
]
assoc_list3 = [
['DP_1', 'hasSource', 'ID_1'],
['DP_1', 'hasSource', 'IS_1'],
['DP_2', 'hasSource', 'ID_1'],
['DP_3', 'hasSource', 'ID_1'],
['DP_3', 'hasSource', 'PD_1'],
['DP_4', 'hasSource', 'PD_1'],
['DP_5', 'hasSource', 'PD_1'],
]
| 51.978814
| 128
| 0.694139
| 3,388
| 24,534
| 4.647285
| 0.045455
| 0.127977
| 0.099587
| 0.076596
| 0.853985
| 0.828453
| 0.800635
| 0.786853
| 0.778342
| 0.751921
| 0
| 0.016972
| 0.18346
| 24,534
| 471
| 129
| 52.089172
| 0.768981
| 0.007133
| 0
| 0.550914
| 0
| 0
| 0.075849
| 0.00271
| 0
| 0
| 0
| 0
| 0.537859
| 0
| null | null | 0
| 0.018277
| null | null | 0.005222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a0eef355229e63a689ad95592e70877fbaff0933
| 5,567
|
py
|
Python
|
tests/components/zha/test_registries.py
|
mfaraco/home-assistant
|
a91b0058229974aed578fb20e833df4454d007f9
|
[
"Apache-2.0"
] | 23
|
2017-11-15T21:03:53.000Z
|
2021-03-29T21:33:48.000Z
|
tests/components/zha/test_registries.py
|
mfaraco/home-assistant
|
a91b0058229974aed578fb20e833df4454d007f9
|
[
"Apache-2.0"
] | 6
|
2021-02-08T20:59:36.000Z
|
2022-03-12T00:52:11.000Z
|
tests/components/zha/test_registries.py
|
ajk12345-code/home-assistant
|
85624e80312e0487f51653ce24b398320972cbec
|
[
"Apache-2.0"
] | 10
|
2018-01-01T00:12:51.000Z
|
2021-12-21T23:08:05.000Z
|
"""Test ZHA registries."""
from unittest import mock
import pytest
import homeassistant.components.zha.core.registries as registries
MANUFACTURER = "mock manufacturer"
MODEL = "mock model"
@pytest.fixture
def zha_device():
"""Return a mock of ZHA device."""
dev = mock.MagicMock()
dev.manufacturer = MANUFACTURER
dev.model = MODEL
return dev
@pytest.fixture
def channels():
"""Return a mock of channels."""
def channel(name, chan_id):
ch = mock.MagicMock()
ch.name = name
ch.generic_id = chan_id
return ch
return [channel("level", "channel_0x0008"), channel("on_off", "channel_0x0006")]
@pytest.mark.parametrize(
"rule, matched",
[
(registries.MatchRule(), False),
(registries.MatchRule(channel_names={"level"}), True),
(registries.MatchRule(channel_names={"level", "no match"}), False),
(registries.MatchRule(channel_names={"on_off"}), True),
(registries.MatchRule(channel_names={"on_off", "no match"}), False),
(registries.MatchRule(channel_names={"on_off", "level"}), True),
(registries.MatchRule(channel_names={"on_off", "level", "no match"}), False),
# test generic_id matching
(registries.MatchRule(generic_ids={"channel_0x0006"}), True),
(registries.MatchRule(generic_ids={"channel_0x0008"}), True),
(registries.MatchRule(generic_ids={"channel_0x0006", "channel_0x0008"}), True),
(
registries.MatchRule(
generic_ids={"channel_0x0006", "channel_0x0008", "channel_0x0009"}
),
False,
),
(
registries.MatchRule(
generic_ids={"channel_0x0006", "channel_0x0008"},
channel_names={"on_off", "level"},
),
True,
),
# manufacturer matching
(registries.MatchRule(manufacturer="no match"), False),
(registries.MatchRule(manufacturer=MANUFACTURER), True),
(registries.MatchRule(model=MODEL), True),
(registries.MatchRule(model="no match"), False),
# match everything
(
registries.MatchRule(
generic_ids={"channel_0x0006", "channel_0x0008"},
channel_names={"on_off", "level"},
manufacturer=MANUFACTURER,
model=MODEL,
),
True,
),
],
)
def test_registry_matching(rule, matched, zha_device, channels):
"""Test empty rule matching."""
reg = registries.ZHAEntityRegistry()
assert reg._strict_matched(zha_device, channels, rule) is matched
@pytest.mark.parametrize(
"rule, matched",
[
(registries.MatchRule(), False),
(registries.MatchRule(channel_names={"level"}), True),
(registries.MatchRule(channel_names={"level", "no match"}), False),
(registries.MatchRule(channel_names={"on_off"}), True),
(registries.MatchRule(channel_names={"on_off", "no match"}), False),
(registries.MatchRule(channel_names={"on_off", "level"}), True),
(registries.MatchRule(channel_names={"on_off", "level", "no match"}), False),
(
registries.MatchRule(channel_names={"on_off", "level"}, model="no match"),
True,
),
(
registries.MatchRule(
channel_names={"on_off", "level"},
model="no match",
manufacturer="no match",
),
True,
),
(
registries.MatchRule(
channel_names={"on_off", "level"},
model="no match",
manufacturer=MANUFACTURER,
),
True,
),
# test generic_id matching
(registries.MatchRule(generic_ids={"channel_0x0006"}), True),
(registries.MatchRule(generic_ids={"channel_0x0008"}), True),
(registries.MatchRule(generic_ids={"channel_0x0006", "channel_0x0008"}), True),
(
registries.MatchRule(
generic_ids={"channel_0x0006", "channel_0x0008", "channel_0x0009"}
),
False,
),
(
registries.MatchRule(
generic_ids={"channel_0x0006", "channel_0x0008", "channel_0x0009"},
model="mo match",
),
False,
),
(
registries.MatchRule(
generic_ids={"channel_0x0006", "channel_0x0008", "channel_0x0009"},
model=MODEL,
),
True,
),
(
registries.MatchRule(
generic_ids={"channel_0x0006", "channel_0x0008"},
channel_names={"on_off", "level"},
),
True,
),
# manufacturer matching
(registries.MatchRule(manufacturer="no match"), False),
(registries.MatchRule(manufacturer=MANUFACTURER), True),
(registries.MatchRule(model=MODEL), True),
(registries.MatchRule(model="no match"), False),
# match everything
(
registries.MatchRule(
generic_ids={"channel_0x0006", "channel_0x0008"},
channel_names={"on_off", "level"},
manufacturer=MANUFACTURER,
model=MODEL,
),
True,
),
],
)
def test_registry_loose_matching(rule, matched, zha_device, channels):
"""Test loose rule matching."""
reg = registries.ZHAEntityRegistry()
assert reg._loose_matched(zha_device, channels, rule) is matched
| 33.536145
| 87
| 0.566733
| 504
| 5,567
| 6.065476
| 0.113095
| 0.242395
| 0.142951
| 0.15211
| 0.842002
| 0.840366
| 0.840366
| 0.756624
| 0.756624
| 0.756624
| 0
| 0.038373
| 0.297826
| 5,567
| 165
| 88
| 33.739394
| 0.743668
| 0.046165
| 0
| 0.723404
| 0
| 0
| 0.145672
| 0
| 0
| 0
| 0
| 0
| 0.014184
| 1
| 0.035461
| false
| 0
| 0.021277
| 0
| 0.078014
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9d02aadc93e827bb43b46c1ba12034a55ec73612
| 301,169
|
py
|
Python
|
tests/data/big_array.py
|
HildeTeamTNT/pyarmor
|
a699f43baa766d8826f801021f1f483d19fdcb87
|
[
"OLDAP-2.5",
"Python-2.0"
] | 1,463
|
2017-09-30T02:46:56.000Z
|
2022-03-30T15:11:05.000Z
|
tests/data/big_array.py
|
HildeTeamTNT/pyarmor
|
a699f43baa766d8826f801021f1f483d19fdcb87
|
[
"OLDAP-2.5",
"Python-2.0"
] | 702
|
2016-12-02T23:47:21.000Z
|
2022-03-31T08:14:00.000Z
|
tests/data/big_array.py
|
HildeTeamTNT/pyarmor
|
a699f43baa766d8826f801021f1f483d19fdcb87
|
[
"OLDAP-2.5",
"Python-2.0"
] | 208
|
2018-01-17T05:55:55.000Z
|
2022-03-29T18:27:47.000Z
|
def foo():
a0 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a1 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a2 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a3 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a4 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a5 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a6 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a7 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a8 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a9 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a10 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a11 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a12 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a13 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a14 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a15 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a16 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a17 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a18 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a19 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a20 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a21 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a22 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a23 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a24 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a25 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a26 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a27 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a28 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a29 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a30 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a31 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a32 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a33 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a34 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a35 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a36 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a37 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a38 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a39 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a40 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a41 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a42 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a43 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a44 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a45 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a46 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a47 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a48 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a49 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a50 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a51 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a52 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a53 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a54 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a55 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a56 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a57 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a58 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a59 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a60 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a61 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a62 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a63 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a64 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a65 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a66 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a67 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a68 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a69 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a70 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a71 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a72 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a73 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a74 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a75 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a76 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a77 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a78 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a79 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a80 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a81 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a82 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a83 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a84 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a85 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a86 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a87 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a88 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a89 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a90 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a91 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a92 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a93 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a94 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a95 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a96 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a97 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a98 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
a99 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
print('a99 is %s' % a99)
if __name__ == '__main__':
foo()
| 2,814.663551
| 3,010
| 0.333139
| 100,111
| 301,169
| 1.002118
| 0.001089
| 1.991567
| 2.984361
| 3.97516
| 0.99678
| 0.99678
| 0.99678
| 0.99678
| 0.99678
| 0.99678
| 0
| 0.499604
| 0.334105
| 301,169
| 106
| 3,011
| 2,841.216981
| 0.000643
| 0
| 0
| 0
| 0
| 0
| 0.000056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009615
| false
| 0
| 0
| 0
| 0.009615
| 0.009615
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
9d2fa1a283f769cccbed5fa57e4e9ffc64c673ee
| 147
|
py
|
Python
|
crawler/admin.py
|
JeetThakare/goodread
|
c4ee8d29a8d4836fa8e6d5dfbd3eceb4d7e7ea48
|
[
"MIT"
] | 1
|
2020-06-14T09:19:04.000Z
|
2020-06-14T09:19:04.000Z
|
crawler/admin.py
|
JeetThakare/goodread
|
c4ee8d29a8d4836fa8e6d5dfbd3eceb4d7e7ea48
|
[
"MIT"
] | 6
|
2021-03-18T21:12:37.000Z
|
2022-02-10T06:55:11.000Z
|
crawler/admin.py
|
JeetThakare/goodread
|
c4ee8d29a8d4836fa8e6d5dfbd3eceb4d7e7ea48
|
[
"MIT"
] | 3
|
2018-11-21T03:38:32.000Z
|
2019-02-26T22:02:59.000Z
|
from django.contrib import admin
from .models import Link, Topic, Article, ArticleTopic
admin.site.register([Link, Topic, Article, ArticleTopic])
| 29.4
| 57
| 0.795918
| 19
| 147
| 6.157895
| 0.631579
| 0.153846
| 0.273504
| 0.478632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108844
| 147
| 4
| 58
| 36.75
| 0.89313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
19e19fb3dd0846ddfbf706ef88912edbef64fa56
| 6,337
|
py
|
Python
|
tests.py
|
Wida7/CheessMoves
|
378dbb7f5ee3137cf2d75a1030b7601b19530daa
|
[
"MIT"
] | null | null | null |
tests.py
|
Wida7/CheessMoves
|
378dbb7f5ee3137cf2d75a1030b7601b19530daa
|
[
"MIT"
] | null | null | null |
tests.py
|
Wida7/CheessMoves
|
378dbb7f5ee3137cf2d75a1030b7601b19530daa
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from movimientos import *
class Test_movimientos(TestCase):
def test_tablero_a_cadena(self):
dado = []
espero = ""
obtengo = tablero_a_cadena(dado)
self.assertEquals(espero, obtengo)
def test_obtener_nombre_pieza(self):
dado = []
espero = ""
obtengo = obtener_nombre_pieza(dado)
self.assertEquals(espero, obtengo)
from unittest import TestCase
from movimientos import *
class Test_movimientos(TestCase):
def test_tablero_a_cadena(self):
dado = []
espero = ""
obtengo = tablero_a_cadena(dado)
self.assertEquals(espero, obtengo)
def test_obtener_nombre_pieza(self):
dado = 'R'
espero = "Rey Negro"
obtengo = obtener_nombre_pieza(dado)
self.assertEqual(espero, obtengo)
dado = 'Q'
espero = "Reina Negra"
obtengo = obtener_nombre_pieza(dado)
self.assertEquals(espero, obtengo)
self.assertEquals(espero, obtengo)
dado = 'P'
espero = "Peon Negro"
obtengo = obtener_nombre_pieza(dado)
self.assertEquals(espero, obtengo)
dado = 'T'
espero = "Torre Negro"
obtengo = obtener_nombre_pieza(dado)
self.assertEquals(espero, obtengo)
dado = 'A'
espero = "Alfil Negro"
obtengo = obtener_nombre_pieza(dado)
self.assertEquals(espero, obtengo)
dado = 'K'
espero = "Caballo Negro"
obtengo = obtener_nombre_pieza(dado)
self.assertEquals(espero, obtengo)
def test_mover_torre(self):
dado = [
[['t', 'k', 'a', 'q', 'r', 'a', 'k', 't'],
[' ', 'p', 'p', 'p', 'p', 'p', 'p', 'p'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
['P', 'P', 'P', 'P', 'P', 'P', 'P', 'P'],
['T', 'K', 'A', 'R', 'Q', 'A', 'K', 'T']],
0, 0, 5, 0
]
espero = [
[' ', 'k', 'a', 'q', 'r', 'a', 'k', 't'],
[' ', 'p', 'p', 'p', 'p', 'p', 'p', 'p'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
['t', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
['P', 'P', 'P', 'P', 'P', 'P', 'P', 'P'],
['T', 'K', 'A', 'R', 'Q', 'A', 'K', 'T']
]
obtengo = mover_torre(dado[0], dado[0], dado[0], dado[0], dado[5])
self.assertEquals(espero, obtengo)
self.assertRaises(ValueError)
dado = [
[['t', 'k', 'a', 'q', 'r', 'a', 'k', 't'],
['p', 'p', 'p', 'p', 'p', 'p', 'p', 'p'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
['P', 'P', 'P', 'P', 'P', 'P', 'P', 'P'],
['T', 'K', 'A', 'R', 'Q', 'A', 'K', 'T']],
0, 0, 0, 0
]
espero = [
['t', 'k', 'a', 'q', 'r', 'a', 'k', 't'],
['p', 'p', 'p', 'p', 'p', 'p', 'p', 'p'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
['P', 'P', 'P', 'P', 'P', 'P', 'P', 'P'],
['T', 'K', 'A', 'R', 'Q', 'A', 'K', 'T']
]
obtengo = mover_torre(dado[0], dado[0], dado[0], dado[0], dado[0])
self.assertEquals(espero, obtengo)
self.assertEqual(ValueError)
def test_mover_rey(self):
dado = [
[['t', 'k', 'a', 'q', 'r', 'a', 'k', 't'],
['p ', 'p', 'p', 'p', 'p', 'p', 'p', 'p'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
['P', 'P', 'P', 'P', 'P', 'P', 'P', 'P'],
['T', 'K', 'A', 'R', 'Q', 'A', 'K', 'T']],
0, 4, 0, 5
]
espero = [
['t', 'k', 'a', 'q', 'r', 'a', 'k', 't'],
['p', 'p', 'p', 'p', 'p', 'p', 'p', 'p'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
['', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
['P', 'P', 'P', 'P', 'P', 'P', 'P', 'P'],
['T', 'K', 'A', 'R', 'Q', 'A', 'K', 'T']
]
obtengo = test_mover_rey(dado[0], dado[0], dado[0], dado[0], dado[5])
self.assertEquals(espero, obtengo)
self.assertRaises(ValueError)
def test_mover_rey(self):
dado = [
[['t', 'k', 'a', 'q', '', 'a', 'k', 't'],
['p ', 'p', 'p', 'p', 'r', 'p', 'p', 'p'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
['P', 'P', 'P', 'P', 'P', 'P', 'P', 'P'],
['T', 'K', 'A', 'R', 'Q', 'A', 'K', 'T']],
0, 5, 1, 5
]
espero = [
['t', 'k', 'a', 'q', 'r', 'a', 'k', 't'],
['p', 'p', 'p', 'p', 'p', 'p', 'p', 'p'],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
['', ' ', ' ', ' ', ' ', ' ', ' ', ' '],
['P', 'P', 'P', 'P', 'P', 'P', 'P', 'P'],
['T', 'K', 'A', 'R', 'Q', 'A', 'K', 'T']
]
obtengo = test_mover_rey(dado[0], dado[0], dado[0], dado[1], dado[5])
self.assertEquals(espero, obtengo)
print("espero\n", tablero_a_cadena(espero))
| 39.855346
| 81
| 0.278681
| 545
| 6,337
| 3.155963
| 0.077064
| 0.133721
| 0.18314
| 0.22093
| 0.89593
| 0.857558
| 0.837791
| 0.807558
| 0.806977
| 0.738953
| 0
| 0.009675
| 0.412814
| 6,337
| 158
| 82
| 40.107595
| 0.452567
| 0
| 0
| 0.753425
| 0
| 0
| 0.093104
| 0
| 0
| 0
| 0
| 0
| 0.116438
| 1
| 0.047945
| false
| 0
| 0.027397
| 0
| 0.089041
| 0.006849
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
19e4feae4349f325e74f402a4675e24ea6c31457
| 7,300
|
py
|
Python
|
tests/jobs/test_jobs.py
|
oso1248/budAPI
|
87c5b1d41249273e30f6b590d9218afe195047e6
|
[
"MIT"
] | null | null | null |
tests/jobs/test_jobs.py
|
oso1248/budAPI
|
87c5b1d41249273e30f6b590d9218afe195047e6
|
[
"MIT"
] | null | null | null |
tests/jobs/test_jobs.py
|
oso1248/budAPI
|
87c5b1d41249273e30f6b590d9218afe195047e6
|
[
"MIT"
] | null | null | null |
from api.validators import val_jobs
def test_get_all_jobs(authorized_client, test_job_data):
res = authorized_client.get('/jobs')
assert len(res.json()) == len(test_job_data)
assert res.status_code == 200
def test_get_all_jobs_empty(authorized_client):
res = authorized_client.get('/jobs')
assert res.status_code == 404
def test_get_all_jobs_unauthorized(authorized_client_permissions_0):
res = authorized_client_permissions_0.get('/jobs')
assert res.status_code == 403
def test_get_one_jobs(authorized_client, test_job_data):
res = authorized_client.get('/jobs/1')
job = val_jobs.JobOut(**res.json())
assert job.id == 1
assert res.status_code == 200
def test_get_one_jobs_empty(authorized_client, test_job_data):
res = authorized_client.get('/jobs/5')
assert res.status_code == 404
def test_get_one_jobs_unauthorized(authorized_client_permissions_0):
res = authorized_client_permissions_0.get('/jobs/1')
assert res.status_code == 403
def test_create_job(authorized_client):
res = authorized_client.post('/jobs', json={
"name": "Releasing",
"area": "Finishing",
"is_active": True,
"is_work_restriction": False
})
new_user = val_jobs.JobOut(**res.json())
assert new_user.name == 'Releasing'
assert res.status_code == 201
def test_create_job_unauthorized(authorized_client_permissions_0):
res = authorized_client_permissions_0.post('/jobs', json={
"name": "Releasing",
"area": "Finishing",
"is_active": True,
"is_work_restriction": False
})
assert res.status_code == 403
def test_update_job(authorized_client, test_job_data):
res = authorized_client.put('/jobs/1', json={
"name": "Support 1",
"area": "Finishing",
"is_active": False,
"is_work_restriction": False
})
data = val_jobs.JobOut(**res.json())
assert data.name == 'Support 1'
assert data.is_active == False
assert res.status_code == 200
def test_update_job_empty(authorized_client, test_job_data):
res = authorized_client.put('/jobs/50', json={
"name": "Support 1",
"area": "Finishing",
"is_active": False,
"is_work_restriction": False
})
assert res.status_code == 404
def test_update_job_unauthorized(authorized_client_permissions_0):
res = authorized_client_permissions_0.put('/jobs/1', json={
"name": "Support 1",
"area": "Finishing",
"is_active": False,
"is_work_restriction": False
})
assert res.status_code == 403
def test_delete_job(admin_authorized_client, test_job_data):
res = admin_authorized_client.delete('/jobs/1')
assert res.status_code == 205
def test_delete_job_unauthorized(authorized_client, test_job_data):
res = authorized_client.delete('/jobs/1')
assert res.status_code == 403
def test_update_skap(authorized_client, test_job_data, test_user_job_data):
res = authorized_client.put('/jobs/userjobs', json={
"id_users": 1,
"id_jobs": 2,
"skap": 1
})
data = val_jobs.UserJobOut(**res.json())
assert data.brewer.id == 1
assert data.job.id == 2
assert data.skap == 1
assert res.status_code == 200
def test_update_skap_empty_user(authorized_client, test_job_data, test_user_job_data):
res = authorized_client.put('/jobs/userjobs', json={
"id_users": 50,
"id_jobs": 2,
"skap": 1
})
assert res.status_code == 404
def test_update_skap_empty_job(authorized_client, test_job_data, test_user_job_data):
res = authorized_client.put('/jobs/userjobs', json={
"id_users": 1,
"id_jobs": 50,
"skap": 1
})
assert res.status_code == 404
def test_update_skap_unauthorized(authorized_client_permissions_0):
res = authorized_client_permissions_0.put('/jobs/userjobs', json={
"id_users": 1,
"id_jobs": 50,
"skap": 1
})
assert res.status_code == 403
def test_add_job_to_user(authorized_client, test_job_data):
res = authorized_client.post('/jobs/userjobs', json={
"id_users": 1,
"id_jobs": 2,
"skap": 6
})
data = val_jobs.UserJobOut(**res.json())
assert data.brewer.id == 1
assert data.job.id == 2
assert data.skap == 6
assert res.status_code == 201
def test_add_job_to_user_empty_user(authorized_client, test_job_data):
res = authorized_client.post('/jobs/userjobs', json={
"id_users": 100,
"id_jobs": 2,
"skap": 6
})
assert res.status_code == 404
def test_add_job_to_user_empty_job(authorized_client, test_job_data):
res = authorized_client.post('/jobs/userjobs', json={
"id_users": 1,
"id_jobs": 200,
"skap": 6
})
assert res.status_code == 404
def test_add_job_to_user_unauthorized(authorized_client_permissions_0):
res = authorized_client_permissions_0.post('/jobs/userjobs', json={
"id_users": 1,
"id_jobs": 200,
"skap": 6
})
assert res.status_code == 403
def test_delete_job_from_user(authorized_client, test_job_data, test_user_job_data):
res = authorized_client.delete('/jobs/userjobs', json={
"id_users": 1,
"id_jobs": 2,
})
assert res.status_code == 205
def test_delete_job_from_user_empty_user(authorized_client, test_job_data, test_user_job_data):
res = authorized_client.delete('/jobs/userjobs', json={
"id_users": 50,
"id_jobs": 2,
})
assert res.status_code == 404
def test_delete_job_from_user_empty_job(authorized_client, test_job_data, test_user_job_data):
res = authorized_client.delete('/jobs/userjobs', json={
"id_users": 1,
"id_jobs": 50,
})
assert res.status_code == 404
def test_delete_job_from_user_unauthorized(authorized_client_permissions_0):
res = authorized_client_permissions_0.delete('/jobs/userjobs', json={
"id_users": 1,
"id_jobs": 2,
})
assert res.status_code == 403
def test_get_job_with_users(authorized_client, test_job_data, test_user_job_data):
res = authorized_client.get('/jobs/userjobs/jobs/1')
data = res.json()
data[0]['job']['id'] == data[1]['job']['id']
assert res.status_code == 200
def test_get_job_with_users_empty_job(authorized_client, test_job_data, test_user_job_data):
res = authorized_client.get('/jobs/userjobs/jobs/50')
assert res.status_code == 404
def test_get_job_with_users_unauthorized(authorized_client_permissions_0):
res = authorized_client_permissions_0.get('/jobs/userjobs/jobs/50')
assert res.status_code == 403
def test_get_user_with_jobs(authorized_client, test_job_data, test_user_job_data):
res = authorized_client.get('/jobs/userjobs/users/1')
data = res.json()
data[0]['brewer']['id'] == data[1]['brewer']['id']
assert res.status_code == 200
def test_get_user_with_jobs_empty_user(authorized_client, test_job_data, test_user_job_data):
res = authorized_client.get('/jobs/userjobs/users/50')
assert res.status_code == 404
def test_get_user_with_jobs_unauthorized(authorized_client_permissions_0):
res = authorized_client_permissions_0.get('/jobs/userjobs/users/1')
assert res.status_code == 403
| 29.317269
| 95
| 0.684658
| 1,012
| 7,300
| 4.567194
| 0.064229
| 0.214626
| 0.100606
| 0.127434
| 0.953483
| 0.929684
| 0.869537
| 0.854392
| 0.779965
| 0.694505
| 0
| 0.031711
| 0.192192
| 7,300
| 248
| 96
| 29.435484
| 0.752077
| 0
| 0
| 0.644809
| 0
| 0
| 0.124384
| 0.018082
| 0
| 0
| 0
| 0
| 0.229508
| 1
| 0.169399
| false
| 0
| 0.005464
| 0
| 0.174863
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c203f1ce529aa794a9b16333fd38d7b050480251
| 152
|
py
|
Python
|
fiberorient/__init__.py
|
scott-trinkle/fiberorient
|
306cf2741008eb46a97cfccdcf81e9ec33189a8d
|
[
"MIT"
] | null | null | null |
fiberorient/__init__.py
|
scott-trinkle/fiberorient
|
306cf2741008eb46a97cfccdcf81e9ec33189a8d
|
[
"MIT"
] | null | null | null |
fiberorient/__init__.py
|
scott-trinkle/fiberorient
|
306cf2741008eb46a97cfccdcf81e9ec33189a8d
|
[
"MIT"
] | null | null | null |
from .structuretensor import StructureTensor
from . import structuretensor
from . import odf
from . import util
from . import vis
from . import metrics
| 21.714286
| 44
| 0.802632
| 19
| 152
| 6.421053
| 0.368421
| 0.409836
| 0.409836
| 0.508197
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 152
| 6
| 45
| 25.333333
| 0.953125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
a06fcd4f63299064903f1abfe79d453ba216eea6
| 1,091
|
py
|
Python
|
jinjiang/model.py
|
lanmaoxinqing/python-qidian-recommend
|
5f03d5cbd18b144502513b8441e698e8fb1f8089
|
[
"MIT"
] | 2
|
2018-08-14T08:34:35.000Z
|
2022-03-18T09:22:18.000Z
|
jinjiang/model.py
|
lanmaoxinqing/python-qidian-recommend
|
5f03d5cbd18b144502513b8441e698e8fb1f8089
|
[
"MIT"
] | null | null | null |
jinjiang/model.py
|
lanmaoxinqing/python-qidian-recommend
|
5f03d5cbd18b144502513b8441e698e8fb1f8089
|
[
"MIT"
] | null | null | null |
# !/usr/bin/python3
# -*- coding: utf-8 -*-
from peewee import *
import asyncio
db = SqliteDatabase('jinjiang.db')
host = 'http://www.jjwxc.net'
sem = asyncio.Semaphore(5)
class Base(Model):
class Meta:
database = db
class FreeRank(Base):
id = TextField(primary_key=True)
book_name = TextField()
book_url = TextField()
author_name = TextField(null=True)
author_url = TextField(null=True)
category = TextField(null=True)
tag = TextField(null=True)
state = TextField(null=True)
recommend_reason = TextField(null=True)
word_count = BigIntegerField(null=True)
collect_count = BigIntegerField(null=True)
class VipRank(Base):
id = TextField(primary_key=True)
book_name = TextField()
book_url = TextField()
author_name = TextField(null=True)
author_url = TextField(null=True)
category = TextField(null=True)
tag = TextField(null=True)
state = TextField(null=True)
recommend_reason = TextField(null=True)
word_count = BigIntegerField(null=True)
collect_count = BigIntegerField(null=True)
| 23.717391
| 46
| 0.690192
| 133
| 1,091
| 5.541353
| 0.353383
| 0.173677
| 0.276798
| 0.151967
| 0.762551
| 0.762551
| 0.762551
| 0.762551
| 0.762551
| 0.762551
| 0
| 0.003405
| 0.192484
| 1,091
| 45
| 47
| 24.244444
| 0.833144
| 0.035747
| 0
| 0.6875
| 0
| 0
| 0.029608
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.875
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
26171f44a670e4841737bb7e6dd136cf5b387dd9
| 10,559
|
py
|
Python
|
test/test_integration/test_sync_local_to_local.py
|
dkleissa/hybrid-object-store
|
fa3ece6b272f4f060128a8c49b7fb38ab23e1608
|
[
"MIT"
] | null | null | null |
test/test_integration/test_sync_local_to_local.py
|
dkleissa/hybrid-object-store
|
fa3ece6b272f4f060128a8c49b7fb38ab23e1608
|
[
"MIT"
] | null | null | null |
test/test_integration/test_sync_local_to_local.py
|
dkleissa/hybrid-object-store
|
fa3ece6b272f4f060128a8c49b7fb38ab23e1608
|
[
"MIT"
] | 1
|
2022-02-24T15:30:39.000Z
|
2022-02-24T15:30:39.000Z
|
import hoss
import hoss.error
import time
import pytest
class TestSyncLocalToLocal:
def test_sync_simplex(self, fixture_sync_config_local_local):
_, ns_src, ns_tgt, ds_src_name = fixture_sync_config_local_local
ds_src = ns_src.create_dataset(ds_src_name, "source dataset")
assert ds_src.is_sync_enabled() is False
assert ds_src.sync_type is None
assert ds_src.sync_policy is None
# Dataset should not exist in target yet
with pytest.raises(hoss.error.NotFoundException):
ns_tgt.get_dataset(ds_src_name)
# Enable namespace syncing
ns_src.enable_sync_target("http://localhost", "simplex", "ns-tgt")
sync_config = ns_src.get_sync_configuration()
assert sync_config['sync_enabled'] is True
assert len(sync_config['sync_targets']) == 1
assert sync_config['sync_targets'][0]['target_core_service'] == "http://localhost/core/v1"
assert sync_config['sync_targets'][0]['target_namespace'] == "ns-tgt"
assert sync_config['sync_targets'][0]['sync_type'] == "simplex"
# Enable dataset sync
ds_src.enable_sync("simplex")
assert ds_src.is_sync_enabled() is True
time.sleep(5)
ds_src = ns_src.get_dataset(ds_src_name)
assert ds_src.sync_type == "simplex"
assert ds_src.sync_policy is not None
assert ds_src.sync_policy.get("Version") == '1'
assert len(ds_src.sync_policy.get("Statements")) == 0
print('waiting for api event processing and demuxer reload timeout...')
ds_tgt = None
for cnt in range(15):
try:
ds_tgt = ns_tgt.get_dataset(ds_src_name)
print(f"Target Dataset ready in {5 * cnt} seconds")
break
except (hoss.error.NotFoundException, hoss.error.HossException):
time.sleep(5)
continue
if not ds_tgt:
raise Exception("Failed to load target dataset.")
# Write a file in the source and watch it end up in the target
f_src = ds_src / 'test.txt'
f_tgt = ds_tgt / 'test.txt'
assert f_src.exists() is False
assert f_tgt.exists() is False
f_src.write_text("a file written in the source dataset", metadata={"key1": "value1", "key2": "value2"})
for cnt in range(30):
try:
assert f_src.exists() is True
assert f_tgt.exists() is True
print(f"Objects correct in {cnt} seconds")
break
except AssertionError:
time.sleep(1)
continue
assert f_src.read_text() == f_tgt.read_text()
assert f_tgt.metadata["key1"] == "value1"
assert f_tgt.metadata["key2"] == "value2"
# Make sure URL parsing is working OK. include spaces and plus sign.
# Write a file in the source and watch it end up in the target
f_src = ds_src / 'test + 1 (3).txt'
f_tgt = ds_tgt / 'test + 1 (3).txt'
assert f_src.exists() is False
assert f_tgt.exists() is False
f_src.write_text("a file written in the source dataset again.")
for cnt in range(30):
try:
assert f_src.exists() is True
assert f_tgt.exists() is True
print(f"Objects correct in {cnt} seconds")
break
except AssertionError:
time.sleep(1)
continue
assert f_src.read_text() == f_tgt.read_text() == "a file written in the source dataset again."
# The opposite should not work with simplex syncing
f_src = ds_src / 'test2.txt'
f_tgt = ds_tgt / 'test2.txt'
assert f_src.exists() is False
assert f_tgt.exists() is False
f_tgt.write_text("a file written in the target dataset")
time.sleep(5)
assert f_src.exists() is False
assert f_tgt.exists() is True
def test_sync_duplex(self, fixture_sync_config_local_local):
_, ns_src, ns_tgt, ds_src_name = fixture_sync_config_local_local
ds_src = ns_src.create_dataset(ds_src_name, "source dataset")
assert ds_src.is_sync_enabled() is False
# Dataset should not exist in target yet
with pytest.raises(hoss.error.NotFoundException):
ns_tgt.get_dataset(ds_src_name)
# Enable namespace syncing
ns_src.enable_sync_target("http://localhost", "duplex", "ns-tgt")
time.sleep(5)
sync_config = ns_src.get_sync_configuration()
assert sync_config['sync_enabled'] is True
assert len(sync_config['sync_targets']) == 1
assert sync_config['sync_targets'][0]['target_core_service'] == "http://localhost/core/v1"
assert sync_config['sync_targets'][0]['target_namespace'] == "ns-tgt"
assert sync_config['sync_targets'][0]['sync_type'] == "duplex"
# Enable dataset sync
ds_src.enable_sync("duplex")
assert ds_src.is_sync_enabled() is True
ds_src = ns_src.get_dataset(ds_src.dataset_name)
time.sleep(70)
print('waiting for api event processing and demuxer reload timeout...')
# Dataset should now exist in the target namespace
ds_tgt = None
for cnt in range(15):
try:
ds_tgt = ns_tgt.get_dataset(ds_src_name)
print(f"Target Dataset ready in {5*cnt} seconds")
break
except (hoss.error.NotFoundException, hoss.error.HossException):
time.sleep(5)
continue
if not ds_tgt:
raise Exception("Failed to load target dataset.")
time.sleep(10)
# Write a file in the source and watch it end up in the target
f_src = ds_src / 'test.txt'
f_tgt = ds_tgt / 'test.txt'
assert f_src.exists() is False
assert f_tgt.exists() is False
f_src.write_text("a file written in the source dataset", metadata={"key1": "value1", "write": "source"})
for cnt in range(60):
try:
assert f_src.exists() is True
assert f_tgt.exists() is True
print(f"Objects correct in {cnt} seconds")
time.sleep(2)
break
except AssertionError:
time.sleep(1)
continue
assert f_src.read_text() == f_tgt.read_text()
assert f_src.metadata["key1"] == "value1"
assert f_src.metadata["write"] == "source"
assert f_tgt.metadata["key1"] == "value1"
assert f_tgt.metadata["write"] == "source"
# The opposite should work
f_src = ds_src / 'test2.txt'
f_tgt = ds_tgt / 'test2.txt'
assert f_src.exists() is False
assert f_tgt.exists() is False
f_tgt.write_text("a file written in the target dataset", metadata={"key2": "value2", "write": "target"})
for cnt in range(30):
try:
assert f_src.exists() is True
assert f_tgt.exists() is True
print(f"Objects correct in {cnt} seconds")
time.sleep(3)
break
except AssertionError:
time.sleep(1)
continue
assert f_src.read_text() == f_tgt.read_text()
assert f_src.metadata["key2"] == "value2"
assert f_src.metadata["write"] == "target"
assert f_tgt.metadata["key2"] == "value2"
assert f_tgt.metadata["write"] == "target"
def test_sync_simplex_existing(self, fixture_sync_config_local_local):
_, ns_src, ns_tgt, ds_src_name = fixture_sync_config_local_local
ds_src = ns_src.create_dataset(ds_src_name, "source dataset")
assert ds_src.is_sync_enabled() is False
assert ds_src.sync_type is None
assert ds_src.sync_policy is None
# Dataset should not exist in target yet
with pytest.raises(hoss.error.NotFoundException):
ns_tgt.get_dataset(ds_src_name)
# Enable namespace syncing
ns_src.enable_sync_target("http://localhost", "simplex", "ns-tgt")
sync_config = ns_src.get_sync_configuration()
assert sync_config['sync_enabled'] is True
assert len(sync_config['sync_targets']) == 1
assert sync_config['sync_targets'][0]['target_core_service'] == "http://localhost/core/v1"
assert sync_config['sync_targets'][0]['target_namespace'] == "ns-tgt"
assert sync_config['sync_targets'][0]['sync_type'] == "simplex"
# Write some data
f1_src = ds_src / 'test1.txt'
f2_src = ds_src / 'test2.txt'
assert f1_src.exists() is False
assert f2_src.exists() is False
f1_src.write_text("a file written in the source dataset", metadata={"key1": "value1", "key2": "value2"})
f2_src.write_text("a file written in the source dataset", metadata={"key1": "value1", "key2": "value2"})
# Enable dataset sync
ds_src.enable_sync("simplex")
assert ds_src.is_sync_enabled() is True
time.sleep(5)
ds_src = ns_src.get_dataset(ds_src_name)
assert ds_src.sync_type == "simplex"
assert ds_src.sync_policy is not None
assert ds_src.sync_policy.get("Version") == '1'
assert len(ds_src.sync_policy.get("Statements")) == 0
print('waiting for api event processing and demuxer reload timeout...')
ds_tgt = None
for cnt in range(15):
try:
ds_tgt = ns_tgt.get_dataset(ds_src_name)
print(f"Target Dataset ready in {5 * cnt} seconds")
break
except (hoss.error.NotFoundException, hoss.error.HossException):
time.sleep(5)
continue
if not ds_tgt:
raise Exception("Failed to load target dataset.")
# Files should already exist in the target
f1_tgt = ds_tgt / 'test1.txt'
f2_tgt = ds_tgt / 'test2.txt'
for cnt in range(30):
try:
assert f1_tgt.exists() is True
assert f2_tgt.exists() is True
print(f"Objects correct in {cnt} seconds")
break
except AssertionError:
time.sleep(1)
continue
assert f1_src.read_text() == f1_tgt.read_text()
assert f2_src.read_text() == f2_tgt.read_text()
assert f1_tgt.metadata["key1"] == "value1"
assert f2_tgt.metadata["key2"] == "value2"
| 39.695489
| 112
| 0.60053
| 1,416
| 10,559
| 4.250706
| 0.096045
| 0.040704
| 0.029905
| 0.039874
| 0.886692
| 0.857618
| 0.847981
| 0.838678
| 0.82954
| 0.822894
| 0
| 0.015659
| 0.298418
| 10,559
| 265
| 113
| 39.845283
| 0.796841
| 0.064589
| 0
| 0.782609
| 0
| 0
| 0.186752
| 0
| 0
| 0
| 0
| 0
| 0.386473
| 1
| 0.014493
| false
| 0
| 0.019324
| 0
| 0.038647
| 0.05314
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
262a3095af6b6ae14fd7976ba46aa119d8ad9f43
| 8,071
|
py
|
Python
|
diffopt/distributions/quadratic.py
|
brandontrabucco/controls
|
16ccff9b4cb7598c7971669422ece0b17fc2a975
|
[
"MIT"
] | 1
|
2020-01-13T04:10:57.000Z
|
2020-01-13T04:10:57.000Z
|
diffopt/distributions/quadratic.py
|
brandontrabucco/controls
|
16ccff9b4cb7598c7971669422ece0b17fc2a975
|
[
"MIT"
] | 4
|
2020-11-13T18:33:37.000Z
|
2022-02-10T00:33:34.000Z
|
diffopt/distributions/quadratic.py
|
brandontrabucco/controls
|
16ccff9b4cb7598c7971669422ece0b17fc2a975
|
[
"MIT"
] | 1
|
2020-02-12T02:55:35.000Z
|
2020-02-12T02:55:35.000Z
|
"""Author: Brandon Trabucco, Copyright 2019, MIT License"""
from diffopt.distributions.continuous.gaussian import Gaussian
from diffopt.distributions.continuous.deterministic import Deterministic
import tensorflow as tf
class QuadraticGaussian(Gaussian):
def __init__(self, mean, centers, covariance, jacobians, hessians):
"""Create a distribution for a quadratic gaussian.
Args:
- mean: a mean for the quadratic gaussian
with shape [batch_dim, output_dim]
- centers[i]: the center of the taylor approximation
with shape [batch_dim, input_dim[i]]
- covariance: a covariance matrix for the linear gaussian
with shape [batch_dim, output_dim, output_dim]
- jacobians[i]: a jacobian of the output wrt. input[i]
with shape [batch_dim, output_dim, input_dim[i]]
- hessians[i][j]: a hessian of the output wrt. input[i] and input[j]
with shape [batch_dim, output_dim, input_dim[i], input_dim[j]]
"""
def quadratic_gaussian_model(time, inputs):
delta = 0.0
for jacobian, x, x0 in zip(jacobians, inputs, centers):
x = x - (x0 if x0 is not None else 0)
if jacobian is not None and not tf.reduce_all(tf.equal(jacobian, 0)):
delta = delta + (
jacobian @ x[:, :, tf.newaxis])[:, :, 0]
for tmp, x, x0 in zip(hessians, inputs, centers):
x = x - (x0 if x0 is not None else 0)
for hessian, y, y0 in zip(tmp, inputs, centers):
y = y - (y0 if y0 is not None else 0)
if hessian is not None and not tf.reduce_all(tf.equal(hessian, 0)):
out = tf.matmul(x[:, tf.newaxis, :, tf.newaxis],
hessian, transpose_a=True)
out = tf.matmul(out, y[:, tf.newaxis, :, tf.newaxis])
delta = delta + 0.5 * out[:, :, 0, 0]
return (delta + (mean if mean is not None else 0),
tf.linalg.sqrtm(covariance),
tf.linalg.inv(covariance),
tf.linalg.logdet(covariance))
Gaussian.__init__(self, quadratic_gaussian_model)
class TimeVaryingQuadraticGaussian(Gaussian):
def __init__(self, mean, centers, covariance, jacobians, hessians):
"""Create a distribution for a time varying quadratic gaussian.
Args:
- mean: a mean for the linear gaussian
with shape [T, batch_dim, output_dim]
- centers[i]: the center of the taylor approximation
with shape [T, batch_dim, input_dim[i]]
- covariance: a covariance matrix for the quadratic gaussian
with shape [T, batch_dim, output_dim, output_dim]
- jacobians[i]: a jacobian of the output wrt. input[i]
with shape [T, batch_dim, output_dim, input_dim[i]]
- hessians[i][j]: a hessian of the output wrt. input[i] and input[j]
with shape [T, batch_dim, output_dim, input_dim[i], input_dim[j]]
"""
def quadratic_gaussian_model(time, inputs):
delta = 0.0
for jacobian, x, x0 in zip(jacobians, inputs, centers):
x = x - (x0[time] if x0 is not None else 0)
if jacobian is not None and not tf.reduce_all(tf.equal(jacobian, 0)):
delta = delta + (
jacobian[time] @ x[:, :, tf.newaxis])[:, :, 0]
for tmp, x, x0 in zip(hessians, inputs, centers):
x = x - (x0[time] if x0 is not None else 0)
for hessian, y, y0 in zip(tmp, inputs, centers):
y = y - (y0[time] if y0 is not None else 0)
if hessian is not None and not tf.reduce_all(tf.equal(hessian, 0)):
out = tf.matmul(x[:, tf.newaxis, :, tf.newaxis],
hessian, transpose_a=True)
out = tf.matmul(out, y[:, tf.newaxis, :, tf.newaxis])
delta = delta + 0.5 * out[:, :, 0, 0]
return (delta + (mean[time] if mean is not None else 0),
tf.linalg.sqrtm(covariance[time]),
tf.linalg.inv(covariance[time]),
tf.linalg.logdet(covariance[time]))
Gaussian.__init__(self, quadratic_gaussian_model)
class Quadratic(Deterministic):
def __init__(self, mean, centers, jacobians, hessians):
"""Create a distribution for a quadratic variable.
Args:
- mean: the mean of the quadratic function
with shape [batch_dim, output_dim]
- centers[i]: the center of the taylor approximation
with shape [batch_dim, input_dim[i]]
- jacobians[i]: a jacobian of the output wrt. input[i]
with shape [batch_dim, output_dim, input_dim[i]]
- hessians[i][j]: a hessian of the output wrt. input[i] and input[j]
with shape [batch_dim, output_dim, input_dim[i], input_dim[j]]
"""
def quadratic_model(time, inputs):
delta = 0.0
for jacobian, x, x0 in zip(jacobians, inputs, centers):
x = x - (x0 if x0 is not None else 0)
if jacobian is not None and not tf.reduce_all(tf.equal(jacobian, 0)):
delta = delta + (
jacobian @ x[:, :, tf.newaxis])[:, :, 0]
for tmp, x, x0 in zip(hessians, inputs, centers):
x = x - (x0 if x0 is not None else 0)
for hessian, y, y0 in zip(tmp, inputs, centers):
y = y - (y0 if y0 is not None else 0)
if hessian is not None and not tf.reduce_all(tf.equal(hessian, 0)):
out = tf.matmul(x[:, tf.newaxis, :, tf.newaxis],
hessian, transpose_a=True)
out = tf.matmul(out, y[:, tf.newaxis, :, tf.newaxis])
delta = delta + 0.5 * out[:, :, 0, 0]
return delta + (mean if mean is not None else 0),
Deterministic.__init__(self, quadratic_model)
class TimeVaryingQuadratic(Deterministic):
def __init__(self, mean, centers, jacobians, hessians):
"""Create a distribution for a time varying quadratic variable.
Args:
- mean: the mean of the quadratic function
with shape [T, batch_dim, output_dim]
- centers[i]: the center of the taylor approximation
with shape [T, batch_dim, input_dim[i]]
- jacobians[i]: a jacobian of the output wrt. input[i]
with shape [T, batch_dim, output_dim, input_dim[i]]
- hessians[i][j]: a hessian of the output wrt. input[i] and input[j]
with shape [T, batch_dim, output_dim, input_dim[i], input_dim[j]]
"""
def quadratic_model(time, inputs):
delta = 0.0
for jacobian, x, x0 in zip(jacobians, inputs, centers):
x = x - (x0[time] if x0 is not None else 0)
if jacobian is not None and not tf.reduce_all(tf.equal(jacobian, 0)):
delta = delta + (
jacobian[time] @ x[:, :, tf.newaxis])[:, :, 0]
for tmp, x, x0 in zip(hessians, inputs, centers):
x = x - (x0[time] if x0 is not None else 0)
for hessian, y, y0 in zip(tmp, inputs, centers):
y = y - (y0[time] if y0 is not None else 0)
if hessian is not None and not tf.reduce_all(tf.equal(hessian, 0)):
out = tf.matmul(x[:, tf.newaxis, :, tf.newaxis],
hessian[time], transpose_a=True)
out = tf.matmul(out, y[:, tf.newaxis, :, tf.newaxis])
delta = delta + 0.5 * out[:, :, 0, 0]
return delta + (mean[time] if mean is not None else 0),
Deterministic.__init__(self, quadratic_model)
| 43.627027
| 87
| 0.545038
| 1,050
| 8,071
| 4.088571
| 0.084762
| 0.027952
| 0.050314
| 0.048451
| 0.912416
| 0.912416
| 0.905427
| 0.879571
| 0.85884
| 0.85884
| 0
| 0.017541
| 0.350142
| 8,071
| 184
| 88
| 43.86413
| 0.800953
| 0.275307
| 0
| 0.797753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089888
| false
| 0
| 0.033708
| 0
| 0.213483
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cd1bc52aae6835489da51cb915c423919a4827f1
| 2,678
|
py
|
Python
|
astraviso/test/projectionutils.py
|
bradsease/test2
|
0925f1a170b2e1d7c09902cd8550dc844b7d7764
|
[
"MIT"
] | null | null | null |
astraviso/test/projectionutils.py
|
bradsease/test2
|
0925f1a170b2e1d7c09902cd8550dc844b7d7764
|
[
"MIT"
] | 44
|
2017-02-22T23:38:52.000Z
|
2017-03-02T03:27:11.000Z
|
astraviso/test/projectionutils.py
|
bradsease/test
|
0925f1a170b2e1d7c09902cd8550dc844b7d7764
|
[
"MIT"
] | null | null | null |
"""
projectionutils unit tests.
"""
from __future__ import division
import unittest
import numpy as np
from astraviso import projectionutils
class imageutilstests(unittest.TestCase):
"""
Imageutils unit test class.
"""
def setUp(self):
pass
def tearDown(self):
pass
class test_pinhole_project(imageutilstests):
"""
Test pinhole_project function.
"""
def test_single_pinhole(self):
"""
Test single point projection.
"""
# Build input vector
vector = np.array([0, 0, 1])
# Project with 3 pixel resolution
img_x, img_y = projectionutils.pinhole_project(vector, 93, 0.016, 3)
# Check result
self.assertIsInstance(img_x, np.ndarray, "X coordinate output should be ndarray.")
self.assertIsInstance(img_y, np.ndarray, "X coordinate output should be ndarray.")
self.assertEqual(len(img_x), 1, "Output dimension should equal input dimension")
self.assertEqual(len(img_x), len(img_y), "Number of x and y coordinates should be equal.")
self.assertEqual(img_x[0], img_y[0], "Coordinate values should be equal.")
self.assertEqual(img_x[0], 1, "Coordinate values should be 1.")
# Project with 4 pixel resolution
img_x, img_y = projectionutils.pinhole_project(vector, 93, 0.016, 4)
# Check result
self.assertIsInstance(img_x, np.ndarray, "X coordinate output should be ndarray.")
self.assertIsInstance(img_y, np.ndarray, "X coordinate output should be ndarray.")
self.assertEqual(len(img_x), 1, "Output dimension should equal input dimension")
self.assertEqual(len(img_x), len(img_y), "Number of x and y coordinates should be equal.")
self.assertEqual(img_x[0], img_y[0], "Coordinate values should be equal.")
self.assertEqual(img_x[0], 1.5, "Coordinate values should be 1.5.")
def test_multiple_pinhole(self):
"""
Test multiple point projection.
"""
# Build input vector
vector = np.array([[0, 0, 1], [0, 0, -1]])
# Convert
img_x, img_y = projectionutils.pinhole_project(vector, 93, 0.016, 256)
# Check result
self.assertIsInstance(img_x, np.ndarray, "X coordinate output should be ndarray.")
self.assertIsInstance(img_y, np.ndarray, "X coordinate output should be ndarray.")
self.assertEqual(len(img_x), 2, "Output dimension should equal input dimension")
self.assertEqual(len(img_x), len(img_y), "Number of x and y coordinates should be equal.")
self.assertTrue(all(img_x == img_y), "For this case, coordinates should be equal.")
| 37.71831
| 98
| 0.659447
| 357
| 2,678
| 4.826331
| 0.19888
| 0.039466
| 0.080093
| 0.069646
| 0.745212
| 0.716193
| 0.716193
| 0.716193
| 0.716193
| 0.716193
| 0
| 0.023856
| 0.23301
| 2,678
| 70
| 99
| 38.257143
| 0.814995
| 0.112397
| 0
| 0.441176
| 0
| 0
| 0.294838
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.117647
| false
| 0.058824
| 0.117647
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
26a6d37e9769666642b452ed3b4208886208a590
| 3,859
|
py
|
Python
|
tests/test_integration.py
|
tlancaster6/InternetOfFish
|
413d53f83a98bfd537fd2789c8943f06d6d3ee63
|
[
"MIT"
] | 1
|
2022-03-14T21:44:50.000Z
|
2022-03-14T21:44:50.000Z
|
tests/test_integration.py
|
tlancaster6/InternetOfFish
|
413d53f83a98bfd537fd2789c8943f06d6d3ee63
|
[
"MIT"
] | null | null | null |
tests/test_integration.py
|
tlancaster6/InternetOfFish
|
413d53f83a98bfd537fd2789c8943f06d6d3ee63
|
[
"MIT"
] | 1
|
2022-03-22T18:51:15.000Z
|
2022-03-22T18:51:15.000Z
|
import time
import pytest
from context import runner, metadata, mptools
@pytest.fixture
def mock_metadata():
mm = metadata.MetaDataDict()
mm.quick_update({'owner': 'foo',
'species': 'bar',
'fish_type': 'other',
'model_id': 'efficientdet_new',
})
yield mm.simplify()
@pytest.fixture
def testing_context(mock_metadata):
yield mptools.MainContext(mock_metadata)
@pytest.mark.parametrize('mode', ['active', 'passive'])
def test_runner_startup(mocker, testing_context, mode):
with testing_context as main_ctx:
mptools.init_signals(main_ctx.shutdown_event, mptools.default_signal_handler, mptools.default_signal_handler)
runner_patch = mocker.patch('context.runner.RunnerWorker.return_value.expected_mode', return_value=mode)
mocker.patch('context.runner.RunnerWorker.return_value.logger.debug', new_callable=print)
mocker.patch('context.runner.collector.CollectorWorker.return_value')
mocker.patch('context.runner.detector.DetectorWorker.return_value')
runner_proc = main_ctx.Proc('RUN', runner.RunnerWorker, main_ctx, persistent=True)
runner_proc.startup_event.wait(10)
assert runner_proc.startup_event.is_set()
assert runner_proc.proc.is_alive()
@pytest.mark.parametrize('mode', ['active', 'passive'])
def test_runner_hard_shutdown(mocker, testing_context, mode):
with testing_context as main_ctx:
mptools.init_signals(main_ctx.shutdown_event, mptools.default_signal_handler, mptools.default_signal_handler)
mocker.patch('context.runner.RunnerWorker.return_value.expected_mode', return_value=mode)
mocker.patch('context.runner.RunnerWorker.return_value.logger.debug', new_callable=print)
mocker.patch('context.runner.collector.CollectorWorker.return_value')
mocker.patch('context.runner.detector.DetectorWorker.return_value')
runner_proc = main_ctx.Proc('RUN', runner.RunnerWorker, main_ctx, persistent=True)
runner_proc.startup_event.wait(10)
main_ctx.event_queue.safe_put(mptools.EventMessage('test', 'HARD_SHUTDOWN', ''))
runner_proc.shutdown_event.wait(10)
assert runner_proc.shutdown_event.is_set()
assert not main_ctx.procs
assert not runner_proc.proc.is_alive()
@pytest.mark.parametrize('mode', ['active', 'passive'])
def test_runner_soft_shutdown(mocker, testing_context, mode):
with testing_context as main_ctx:
mptools.init_signals(main_ctx.shutdown_event, mptools.default_signal_handler, mptools.default_signal_handler)
mocker.patch('context.runner.RunnerWorker.return_value.expected_mode', return_value=mode)
mocker.patch('context.runner.RunnerWorker.return_value.logger.debug', new_callable=print)
mocker.patch('context.runner.collector.CollectorWorker.return_value')
mocker.patch('context.runner.detector.DetectorWorker.return_value')
runner_proc = main_ctx.Proc('RUN', runner.RunnerWorker, main_ctx, persistent=True)
runner_proc.startup_event.wait(10)
main_ctx.event_queue.safe_put(mptools.EventMessage('test', 'SOFT_SHUTDOWN', ''))
time.sleep(10)
assert not runner_proc.shutdown_event.is_set()
assert runner_proc.proc.is_alive()
if mode == 'active':
assert runner_proc.detector_proc and not runner_proc.detector_proc.proc.is_alive()
assert runner_proc.collector_proc and not runner_proc.collector_proc.proc.is_alive()
assert runner_proc.notifier_proc and not runner_proc.notifier_proc.proc.is_alive()
elif mode == 'passive':
assert runner_proc.uploader_proc and not runner_proc.uploader_proc.proc.is_alive()
assert runner_proc.notifier_proc and not runner_proc.notifier_proc.proc.is_alive()
| 51.453333
| 117
| 0.731278
| 483
| 3,859
| 5.565217
| 0.178054
| 0.085565
| 0.080357
| 0.107143
| 0.815476
| 0.793155
| 0.780878
| 0.75
| 0.75
| 0.71131
| 0
| 0.003095
| 0.162736
| 3,859
| 74
| 118
| 52.148649
| 0.828846
| 0
| 0
| 0.507937
| 0
| 0
| 0.206539
| 0.164245
| 0
| 0
| 0
| 0
| 0.190476
| 1
| 0.079365
| false
| 0.063492
| 0.047619
| 0
| 0.126984
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
26dd88e33b73beea9b85fce30ef069b7e0b693c9
| 157,697
|
py
|
Python
|
bccr/python/antchain_sdk_bccr/models.py
|
alipay/antchain-openapi-prod-sdk
|
f78549e5135d91756093bd88d191ca260b28e083
|
[
"MIT"
] | 6
|
2020-06-28T06:40:50.000Z
|
2022-02-25T11:02:18.000Z
|
bccr/python/antchain_sdk_bccr/models.py
|
alipay/antchain-openapi-prod-sdk
|
f78549e5135d91756093bd88d191ca260b28e083
|
[
"MIT"
] | null | null | null |
bccr/python/antchain_sdk_bccr/models.py
|
alipay/antchain-openapi-prod-sdk
|
f78549e5135d91756093bd88d191ca260b28e083
|
[
"MIT"
] | 6
|
2020-06-30T09:29:03.000Z
|
2022-01-07T10:42:22.000Z
|
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
from typing import List
class Config(TeaModel):
"""
Model for initing client
"""
def __init__(
self,
access_key_id: str = None,
access_key_secret: str = None,
security_token: str = None,
protocol: str = None,
read_timeout: int = None,
connect_timeout: int = None,
http_proxy: str = None,
https_proxy: str = None,
endpoint: str = None,
no_proxy: str = None,
max_idle_conns: int = None,
user_agent: str = None,
socks_5proxy: str = None,
socks_5net_work: str = None,
max_idle_time_millis: int = None,
keep_alive_duration_millis: int = None,
max_requests: int = None,
max_requests_per_host: int = None,
):
# accesskey id
self.access_key_id = access_key_id
# accesskey secret
self.access_key_secret = access_key_secret
# security token
self.security_token = security_token
# http protocol
self.protocol = protocol
# read timeout
self.read_timeout = read_timeout
# connect timeout
self.connect_timeout = connect_timeout
# http proxy
self.http_proxy = http_proxy
# https proxy
self.https_proxy = https_proxy
# endpoint
self.endpoint = endpoint
# proxy white list
self.no_proxy = no_proxy
# max idle conns
self.max_idle_conns = max_idle_conns
# user agent
self.user_agent = user_agent
# socks5 proxy
self.socks_5proxy = socks_5proxy
# socks5 network
self.socks_5net_work = socks_5net_work
# 长链接最大空闲时长
self.max_idle_time_millis = max_idle_time_millis
# 长链接最大连接时长
self.keep_alive_duration_millis = keep_alive_duration_millis
# 最大连接数(长链接最大总数)
self.max_requests = max_requests
# 每个目标主机的最大连接数(分主机域名的长链接最大总数
self.max_requests_per_host = max_requests_per_host
def validate(self):
pass
def to_map(self):
result = dict()
if self.access_key_id is not None:
result['accessKeyId'] = self.access_key_id
if self.access_key_secret is not None:
result['accessKeySecret'] = self.access_key_secret
if self.security_token is not None:
result['securityToken'] = self.security_token
if self.protocol is not None:
result['protocol'] = self.protocol
if self.read_timeout is not None:
result['readTimeout'] = self.read_timeout
if self.connect_timeout is not None:
result['connectTimeout'] = self.connect_timeout
if self.http_proxy is not None:
result['httpProxy'] = self.http_proxy
if self.https_proxy is not None:
result['httpsProxy'] = self.https_proxy
if self.endpoint is not None:
result['endpoint'] = self.endpoint
if self.no_proxy is not None:
result['noProxy'] = self.no_proxy
if self.max_idle_conns is not None:
result['maxIdleConns'] = self.max_idle_conns
if self.user_agent is not None:
result['userAgent'] = self.user_agent
if self.socks_5proxy is not None:
result['socks5Proxy'] = self.socks_5proxy
if self.socks_5net_work is not None:
result['socks5NetWork'] = self.socks_5net_work
if self.max_idle_time_millis is not None:
result['maxIdleTimeMillis'] = self.max_idle_time_millis
if self.keep_alive_duration_millis is not None:
result['keepAliveDurationMillis'] = self.keep_alive_duration_millis
if self.max_requests is not None:
result['maxRequests'] = self.max_requests
if self.max_requests_per_host is not None:
result['maxRequestsPerHost'] = self.max_requests_per_host
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('accessKeyId') is not None:
self.access_key_id = m.get('accessKeyId')
if m.get('accessKeySecret') is not None:
self.access_key_secret = m.get('accessKeySecret')
if m.get('securityToken') is not None:
self.security_token = m.get('securityToken')
if m.get('protocol') is not None:
self.protocol = m.get('protocol')
if m.get('readTimeout') is not None:
self.read_timeout = m.get('readTimeout')
if m.get('connectTimeout') is not None:
self.connect_timeout = m.get('connectTimeout')
if m.get('httpProxy') is not None:
self.http_proxy = m.get('httpProxy')
if m.get('httpsProxy') is not None:
self.https_proxy = m.get('httpsProxy')
if m.get('endpoint') is not None:
self.endpoint = m.get('endpoint')
if m.get('noProxy') is not None:
self.no_proxy = m.get('noProxy')
if m.get('maxIdleConns') is not None:
self.max_idle_conns = m.get('maxIdleConns')
if m.get('userAgent') is not None:
self.user_agent = m.get('userAgent')
if m.get('socks5Proxy') is not None:
self.socks_5proxy = m.get('socks5Proxy')
if m.get('socks5NetWork') is not None:
self.socks_5net_work = m.get('socks5NetWork')
if m.get('maxIdleTimeMillis') is not None:
self.max_idle_time_millis = m.get('maxIdleTimeMillis')
if m.get('keepAliveDurationMillis') is not None:
self.keep_alive_duration_millis = m.get('keepAliveDurationMillis')
if m.get('maxRequests') is not None:
self.max_requests = m.get('maxRequests')
if m.get('maxRequestsPerHost') is not None:
self.max_requests_per_host = m.get('maxRequestsPerHost')
return self
class MonitorProviderCapability(TeaModel):
def __init__(
self,
provider_id: str = None,
provider_name: str = None,
provider_description: str = None,
is_provided: bool = None,
):
# 供应商id
self.provider_id = provider_id
# 供应商名称
self.provider_name = provider_name
# 供应商描述
self.provider_description = provider_description
# 是否推荐供应商
self.is_provided = is_provided
def validate(self):
self.validate_required(self.provider_id, 'provider_id')
self.validate_required(self.provider_name, 'provider_name')
self.validate_required(self.provider_description, 'provider_description')
self.validate_required(self.is_provided, 'is_provided')
def to_map(self):
result = dict()
if self.provider_id is not None:
result['provider_id'] = self.provider_id
if self.provider_name is not None:
result['provider_name'] = self.provider_name
if self.provider_description is not None:
result['provider_description'] = self.provider_description
if self.is_provided is not None:
result['is_provided'] = self.is_provided
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('provider_id') is not None:
self.provider_id = m.get('provider_id')
if m.get('provider_name') is not None:
self.provider_name = m.get('provider_name')
if m.get('provider_description') is not None:
self.provider_description = m.get('provider_description')
if m.get('is_provided') is not None:
self.is_provided = m.get('is_provided')
return self
class ScreenInfo(TeaModel):
def __init__(
self,
process_log_file: str = None,
check_log_file: str = None,
web_screenshot_file: str = None,
web_source_file: str = None,
video_file: str = None,
process_log_file_tx_hash: str = None,
check_log_file_tx_hash: str = None,
):
# 全链路取证日志文件下载链接
self.process_log_file = process_log_file
# 自清洁文件下载地址
self.check_log_file = check_log_file
# 网页截图文件下载链接
self.web_screenshot_file = web_screenshot_file
# 网页源码文件下载链接
self.web_source_file = web_source_file
# 视频源文件下载链接
self.video_file = video_file
# 操作日志交易hash
self.process_log_file_tx_hash = process_log_file_tx_hash
# 自清洁日志交易hash
self.check_log_file_tx_hash = check_log_file_tx_hash
def validate(self):
pass
def to_map(self):
result = dict()
if self.process_log_file is not None:
result['process_log_file'] = self.process_log_file
if self.check_log_file is not None:
result['check_log_file'] = self.check_log_file
if self.web_screenshot_file is not None:
result['web_screenshot_file'] = self.web_screenshot_file
if self.web_source_file is not None:
result['web_source_file'] = self.web_source_file
if self.video_file is not None:
result['video_file'] = self.video_file
if self.process_log_file_tx_hash is not None:
result['process_log_file_tx_hash'] = self.process_log_file_tx_hash
if self.check_log_file_tx_hash is not None:
result['check_log_file_tx_hash'] = self.check_log_file_tx_hash
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('process_log_file') is not None:
self.process_log_file = m.get('process_log_file')
if m.get('check_log_file') is not None:
self.check_log_file = m.get('check_log_file')
if m.get('web_screenshot_file') is not None:
self.web_screenshot_file = m.get('web_screenshot_file')
if m.get('web_source_file') is not None:
self.web_source_file = m.get('web_source_file')
if m.get('video_file') is not None:
self.video_file = m.get('video_file')
if m.get('process_log_file_tx_hash') is not None:
self.process_log_file_tx_hash = m.get('process_log_file_tx_hash')
if m.get('check_log_file_tx_hash') is not None:
self.check_log_file_tx_hash = m.get('check_log_file_tx_hash')
return self
class ScreenshotInfo(TeaModel):
def __init__(
self,
process_log_file: str = None,
check_log_file: str = None,
screenshot_file: str = None,
process_log_file_tx_hash: str = None,
check_log_file_tx_hash: str = None,
):
# 全链路取证日志文件下载链接
self.process_log_file = process_log_file
# 自清洁文件下载链接
self.check_log_file = check_log_file
# 网页截图文件下载链接
self.screenshot_file = screenshot_file
# 操作日志文件交易hash
self.process_log_file_tx_hash = process_log_file_tx_hash
# 自清洁日志文件交易hash
self.check_log_file_tx_hash = check_log_file_tx_hash
def validate(self):
self.validate_required(self.screenshot_file, 'screenshot_file')
def to_map(self):
result = dict()
if self.process_log_file is not None:
result['process_log_file'] = self.process_log_file
if self.check_log_file is not None:
result['check_log_file'] = self.check_log_file
if self.screenshot_file is not None:
result['screenshot_file'] = self.screenshot_file
if self.process_log_file_tx_hash is not None:
result['process_log_file_tx_hash'] = self.process_log_file_tx_hash
if self.check_log_file_tx_hash is not None:
result['check_log_file_tx_hash'] = self.check_log_file_tx_hash
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('process_log_file') is not None:
self.process_log_file = m.get('process_log_file')
if m.get('check_log_file') is not None:
self.check_log_file = m.get('check_log_file')
if m.get('screenshot_file') is not None:
self.screenshot_file = m.get('screenshot_file')
if m.get('process_log_file_tx_hash') is not None:
self.process_log_file_tx_hash = m.get('process_log_file_tx_hash')
if m.get('check_log_file_tx_hash') is not None:
self.check_log_file_tx_hash = m.get('check_log_file_tx_hash')
return self
class NotaryPublicOffice(TeaModel):
def __init__(
self,
code: str = None,
name: str = None,
province: str = None,
city: str = None,
):
# 公证处code
self.code = code
# 公证处名称
self.name = name
# 公证处所在省(直辖市)
self.province = province
# 公证处所在市
self.city = city
def validate(self):
self.validate_required(self.code, 'code')
self.validate_required(self.name, 'name')
self.validate_required(self.province, 'province')
self.validate_required(self.city, 'city')
def to_map(self):
result = dict()
if self.code is not None:
result['code'] = self.code
if self.name is not None:
result['name'] = self.name
if self.province is not None:
result['province'] = self.province
if self.city is not None:
result['city'] = self.city
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('code') is not None:
self.code = m.get('code')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('province') is not None:
self.province = m.get('province')
if m.get('city') is not None:
self.city = m.get('city')
return self
class ProxyData(TeaModel):
def __init__(
self,
tenant_id: str = None,
tenant_name: str = None,
if_measure: bool = None,
):
# 金融云租户id
self.tenant_id = tenant_id
# 租户名称
self.tenant_name = tenant_name
# 是否计量
self.if_measure = if_measure
def validate(self):
self.validate_required(self.tenant_id, 'tenant_id')
self.validate_required(self.tenant_name, 'tenant_name')
def to_map(self):
result = dict()
if self.tenant_id is not None:
result['tenant_id'] = self.tenant_id
if self.tenant_name is not None:
result['tenant_name'] = self.tenant_name
if self.if_measure is not None:
result['if_measure'] = self.if_measure
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('tenant_id') is not None:
self.tenant_id = m.get('tenant_id')
if m.get('tenant_name') is not None:
self.tenant_name = m.get('tenant_name')
if m.get('if_measure') is not None:
self.if_measure = m.get('if_measure')
return self
class MonitorType(TeaModel):
def __init__(
self,
file_type: str = None,
submit_type: str = None,
):
# 文件类型
self.file_type = file_type
# 提交类型
self.submit_type = submit_type
def validate(self):
self.validate_required(self.file_type, 'file_type')
self.validate_required(self.submit_type, 'submit_type')
def to_map(self):
result = dict()
if self.file_type is not None:
result['file_type'] = self.file_type
if self.submit_type is not None:
result['submit_type'] = self.submit_type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('file_type') is not None:
self.file_type = m.get('file_type')
if m.get('submit_type') is not None:
self.submit_type = m.get('submit_type')
return self
class MonitorProviderType(TeaModel):
def __init__(
self,
file_type: str = None,
submit_type: str = None,
file_format: str = None,
monitor_providers: List[MonitorProviderCapability] = None,
):
# 监测文件类型
self.file_type = file_type
# 提交类型
self.submit_type = submit_type
# 文件格式
self.file_format = file_format
# 支持的服务商列表,已排序
self.monitor_providers = monitor_providers
def validate(self):
self.validate_required(self.file_type, 'file_type')
self.validate_required(self.submit_type, 'submit_type')
self.validate_required(self.monitor_providers, 'monitor_providers')
if self.monitor_providers:
for k in self.monitor_providers:
if k:
k.validate()
def to_map(self):
result = dict()
if self.file_type is not None:
result['file_type'] = self.file_type
if self.submit_type is not None:
result['submit_type'] = self.submit_type
if self.file_format is not None:
result['file_format'] = self.file_format
result['monitor_providers'] = []
if self.monitor_providers is not None:
for k in self.monitor_providers:
result['monitor_providers'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('file_type') is not None:
self.file_type = m.get('file_type')
if m.get('submit_type') is not None:
self.submit_type = m.get('submit_type')
if m.get('file_format') is not None:
self.file_format = m.get('file_format')
self.monitor_providers = []
if m.get('monitor_providers') is not None:
for k in m.get('monitor_providers'):
temp_model = MonitorProviderCapability()
self.monitor_providers.append(temp_model.from_map(k))
return self
class RecordScreenData(TeaModel):
def __init__(
self,
error_reason: str = None,
file_hash: str = None,
gmt_end: int = None,
gmt_start: int = None,
os_version: str = None,
screen_info: ScreenInfo = None,
screen_zip: str = None,
sw_version: str = None,
tsr: str = None,
tx_hash: str = None,
file_size: int = None,
block_height: int = None,
timestamp: int = None,
certificate_url: str = None,
certificate_storage_no: str = None,
):
# 错误原因(状态为FAIL才有数据)
self.error_reason = error_reason
# 录屏文件Hash值
self.file_hash = file_hash
# 录屏结束时间
self.gmt_end = gmt_end
# 录屏开始时间
self.gmt_start = gmt_start
# OS版本号
self.os_version = os_version
# 录屏文件信息
self.screen_info = screen_info
# 证据包下载地址(状态为SUCCESS 才有数据)
self.screen_zip = screen_zip
# 录屏软件版本号
self.sw_version = sw_version
# 可信时间戳的返回对象
self.tsr = tsr
# 统一证据编号
self.tx_hash = tx_hash
# 录屏文件大小
self.file_size = file_size
# 录屏文件存证块高
self.block_height = block_height
# 录屏文件上链时间
self.timestamp = timestamp
# 录屏文件公证处证书下载链接
self.certificate_url = certificate_url
# 公证处证书编号
self.certificate_storage_no = certificate_storage_no
def validate(self):
self.validate_required(self.gmt_end, 'gmt_end')
self.validate_required(self.gmt_start, 'gmt_start')
self.validate_required(self.os_version, 'os_version')
if self.screen_info:
self.screen_info.validate()
self.validate_required(self.sw_version, 'sw_version')
def to_map(self):
result = dict()
if self.error_reason is not None:
result['error_reason'] = self.error_reason
if self.file_hash is not None:
result['file_hash'] = self.file_hash
if self.gmt_end is not None:
result['gmt_end'] = self.gmt_end
if self.gmt_start is not None:
result['gmt_start'] = self.gmt_start
if self.os_version is not None:
result['os_version'] = self.os_version
if self.screen_info is not None:
result['screen_info'] = self.screen_info.to_map()
if self.screen_zip is not None:
result['screen_zip'] = self.screen_zip
if self.sw_version is not None:
result['sw_version'] = self.sw_version
if self.tsr is not None:
result['tsr'] = self.tsr
if self.tx_hash is not None:
result['tx_hash'] = self.tx_hash
if self.file_size is not None:
result['file_size'] = self.file_size
if self.block_height is not None:
result['block_height'] = self.block_height
if self.timestamp is not None:
result['timestamp'] = self.timestamp
if self.certificate_url is not None:
result['certificate_url'] = self.certificate_url
if self.certificate_storage_no is not None:
result['certificate_storage_no'] = self.certificate_storage_no
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('error_reason') is not None:
self.error_reason = m.get('error_reason')
if m.get('file_hash') is not None:
self.file_hash = m.get('file_hash')
if m.get('gmt_end') is not None:
self.gmt_end = m.get('gmt_end')
if m.get('gmt_start') is not None:
self.gmt_start = m.get('gmt_start')
if m.get('os_version') is not None:
self.os_version = m.get('os_version')
if m.get('screen_info') is not None:
temp_model = ScreenInfo()
self.screen_info = temp_model.from_map(m['screen_info'])
if m.get('screen_zip') is not None:
self.screen_zip = m.get('screen_zip')
if m.get('sw_version') is not None:
self.sw_version = m.get('sw_version')
if m.get('tsr') is not None:
self.tsr = m.get('tsr')
if m.get('tx_hash') is not None:
self.tx_hash = m.get('tx_hash')
if m.get('file_size') is not None:
self.file_size = m.get('file_size')
if m.get('block_height') is not None:
self.block_height = m.get('block_height')
if m.get('timestamp') is not None:
self.timestamp = m.get('timestamp')
if m.get('certificate_url') is not None:
self.certificate_url = m.get('certificate_url')
if m.get('certificate_storage_no') is not None:
self.certificate_storage_no = m.get('certificate_storage_no')
return self
class AccountData(TeaModel):
def __init__(
self,
account_id: str = None,
account_name: str = None,
account_platform: str = None,
):
# 账号id
self.account_id = account_id
# 账号名称
self.account_name = account_name
# 账号平台(支付宝,淘宝,微信,抖音等)
self.account_platform = account_platform
def validate(self):
self.validate_required(self.account_id, 'account_id')
self.validate_required(self.account_name, 'account_name')
self.validate_required(self.account_platform, 'account_platform')
def to_map(self):
result = dict()
if self.account_id is not None:
result['account_id'] = self.account_id
if self.account_name is not None:
result['account_name'] = self.account_name
if self.account_platform is not None:
result['account_platform'] = self.account_platform
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('account_id') is not None:
self.account_id = m.get('account_id')
if m.get('account_name') is not None:
self.account_name = m.get('account_name')
if m.get('account_platform') is not None:
self.account_platform = m.get('account_platform')
return self
class MonitorResult(TeaModel):
def __init__(
self,
monitor_task_id: str = None,
monitor_result_id: str = None,
infr_url: str = None,
infr_host: str = None,
infr_title: str = None,
infr_time: int = None,
detail: str = None,
poster: str = None,
post_date: int = None,
find_date: int = None,
view_count: int = None,
like_count: int = None,
comment_count: int = None,
repost_count: int = None,
download_url: str = None,
infr_feedback_time: int = None,
):
# 监测任务ID
#
self.monitor_task_id = monitor_task_id
# 监测结果ID
#
self.monitor_result_id = monitor_result_id
# 侵权网址
#
#
self.infr_url = infr_url
# 侵权主体
#
#
self.infr_host = infr_host
# 侵权标题
#
#
self.infr_title = infr_title
# 侵权内容上传时间
#
#
self.infr_time = infr_time
# 侵权详情
#
self.detail = detail
# 发布者
self.poster = poster
# 侵权内容发布时间
self.post_date = post_date
# 侵权内容发现时间
self.find_date = find_date
# 观看数
self.view_count = view_count
# 点赞数
self.like_count = like_count
# 评论数
self.comment_count = comment_count
# 转发数
self.repost_count = repost_count
# 侵权文件下载链接
self.download_url = download_url
# 疑似侵权反馈时间
self.infr_feedback_time = infr_feedback_time
def validate(self):
self.validate_required(self.monitor_task_id, 'monitor_task_id')
self.validate_required(self.monitor_result_id, 'monitor_result_id')
def to_map(self):
result = dict()
if self.monitor_task_id is not None:
result['monitor_task_id'] = self.monitor_task_id
if self.monitor_result_id is not None:
result['monitor_result_id'] = self.monitor_result_id
if self.infr_url is not None:
result['infr_url'] = self.infr_url
if self.infr_host is not None:
result['infr_host'] = self.infr_host
if self.infr_title is not None:
result['infr_title'] = self.infr_title
if self.infr_time is not None:
result['infr_time'] = self.infr_time
if self.detail is not None:
result['detail'] = self.detail
if self.poster is not None:
result['poster'] = self.poster
if self.post_date is not None:
result['post_date'] = self.post_date
if self.find_date is not None:
result['find_date'] = self.find_date
if self.view_count is not None:
result['view_count'] = self.view_count
if self.like_count is not None:
result['like_count'] = self.like_count
if self.comment_count is not None:
result['comment_count'] = self.comment_count
if self.repost_count is not None:
result['repost_count'] = self.repost_count
if self.download_url is not None:
result['download_url'] = self.download_url
if self.infr_feedback_time is not None:
result['infr_feedback_time'] = self.infr_feedback_time
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('monitor_task_id') is not None:
self.monitor_task_id = m.get('monitor_task_id')
if m.get('monitor_result_id') is not None:
self.monitor_result_id = m.get('monitor_result_id')
if m.get('infr_url') is not None:
self.infr_url = m.get('infr_url')
if m.get('infr_host') is not None:
self.infr_host = m.get('infr_host')
if m.get('infr_title') is not None:
self.infr_title = m.get('infr_title')
if m.get('infr_time') is not None:
self.infr_time = m.get('infr_time')
if m.get('detail') is not None:
self.detail = m.get('detail')
if m.get('poster') is not None:
self.poster = m.get('poster')
if m.get('post_date') is not None:
self.post_date = m.get('post_date')
if m.get('find_date') is not None:
self.find_date = m.get('find_date')
if m.get('view_count') is not None:
self.view_count = m.get('view_count')
if m.get('like_count') is not None:
self.like_count = m.get('like_count')
if m.get('comment_count') is not None:
self.comment_count = m.get('comment_count')
if m.get('repost_count') is not None:
self.repost_count = m.get('repost_count')
if m.get('download_url') is not None:
self.download_url = m.get('download_url')
if m.get('infr_feedback_time') is not None:
self.infr_feedback_time = m.get('infr_feedback_time')
return self
class ScreenshotData(TeaModel):
def __init__(
self,
url: str = None,
gmt_evidence: int = None,
file_hash: str = None,
file_size: int = None,
tx_hash: str = None,
block_height: int = None,
timestamp: int = None,
tsr: str = None,
certificate_url: str = None,
certificate_storage_no: str = None,
tool_version: str = None,
screenshot_zip: str = None,
screenshot_info: ScreenshotInfo = None,
head_title: str = None,
):
# 取证地址
self.url = url
# 网页取证时间
self.gmt_evidence = gmt_evidence
# 网页取证文件Hash值
self.file_hash = file_hash
# 网页取证文件大小
self.file_size = file_size
# 统一证据编号
self.tx_hash = tx_hash
# 网页取证文件存证块高
self.block_height = block_height
# 网页取证文件上链时间
self.timestamp = timestamp
# 可信时间戳的返回对象
self.tsr = tsr
# 网页截图文件公证处证书下载链接
self.certificate_url = certificate_url
# 公证处证书编号
self.certificate_storage_no = certificate_storage_no
# 网页取证工具软件版本号
self.tool_version = tool_version
# 证据包下载地址(状态为SUCCESS 才有数据)
self.screenshot_zip = screenshot_zip
# 取证文件信息
self.screenshot_info = screenshot_info
# 网页title
self.head_title = head_title
def validate(self):
self.validate_required(self.url, 'url')
self.validate_required(self.gmt_evidence, 'gmt_evidence')
if self.screenshot_info:
self.screenshot_info.validate()
def to_map(self):
result = dict()
if self.url is not None:
result['url'] = self.url
if self.gmt_evidence is not None:
result['gmt_evidence'] = self.gmt_evidence
if self.file_hash is not None:
result['file_hash'] = self.file_hash
if self.file_size is not None:
result['file_size'] = self.file_size
if self.tx_hash is not None:
result['tx_hash'] = self.tx_hash
if self.block_height is not None:
result['block_height'] = self.block_height
if self.timestamp is not None:
result['timestamp'] = self.timestamp
if self.tsr is not None:
result['tsr'] = self.tsr
if self.certificate_url is not None:
result['certificate_url'] = self.certificate_url
if self.certificate_storage_no is not None:
result['certificate_storage_no'] = self.certificate_storage_no
if self.tool_version is not None:
result['tool_version'] = self.tool_version
if self.screenshot_zip is not None:
result['screenshot_zip'] = self.screenshot_zip
if self.screenshot_info is not None:
result['screenshot_info'] = self.screenshot_info.to_map()
if self.head_title is not None:
result['head_title'] = self.head_title
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('url') is not None:
self.url = m.get('url')
if m.get('gmt_evidence') is not None:
self.gmt_evidence = m.get('gmt_evidence')
if m.get('file_hash') is not None:
self.file_hash = m.get('file_hash')
if m.get('file_size') is not None:
self.file_size = m.get('file_size')
if m.get('tx_hash') is not None:
self.tx_hash = m.get('tx_hash')
if m.get('block_height') is not None:
self.block_height = m.get('block_height')
if m.get('timestamp') is not None:
self.timestamp = m.get('timestamp')
if m.get('tsr') is not None:
self.tsr = m.get('tsr')
if m.get('certificate_url') is not None:
self.certificate_url = m.get('certificate_url')
if m.get('certificate_storage_no') is not None:
self.certificate_storage_no = m.get('certificate_storage_no')
if m.get('tool_version') is not None:
self.tool_version = m.get('tool_version')
if m.get('screenshot_zip') is not None:
self.screenshot_zip = m.get('screenshot_zip')
if m.get('screenshot_info') is not None:
temp_model = ScreenshotInfo()
self.screenshot_info = temp_model.from_map(m['screenshot_info'])
if m.get('head_title') is not None:
self.head_title = m.get('head_title')
return self
class UserData(TeaModel):
def __init__(
self,
account: str = None,
account_type: str = None,
cert_name: str = None,
cert_no: str = None,
cert_type: str = None,
contact_info: str = None,
legal_person: str = None,
reg_time: int = None,
status: str = None,
type: str = None,
):
# 用户账号
self.account = account
# 账号类型
self.account_type = account_type
# 证件名称
self.cert_name = cert_name
# 证件号码
self.cert_no = cert_no
# 证件类型
self.cert_type = cert_type
# 联系方式
self.contact_info = contact_info
# 企业法人
self.legal_person = legal_person
# 注册时间
self.reg_time = reg_time
# 账号状态
self.status = status
# 用户类型
self.type = type
def validate(self):
self.validate_required(self.account, 'account')
self.validate_required(self.account_type, 'account_type')
self.validate_required(self.cert_name, 'cert_name')
self.validate_required(self.cert_no, 'cert_no')
self.validate_required(self.cert_type, 'cert_type')
self.validate_required(self.reg_time, 'reg_time')
self.validate_required(self.status, 'status')
self.validate_required(self.type, 'type')
def to_map(self):
result = dict()
if self.account is not None:
result['account'] = self.account
if self.account_type is not None:
result['account_type'] = self.account_type
if self.cert_name is not None:
result['cert_name'] = self.cert_name
if self.cert_no is not None:
result['cert_no'] = self.cert_no
if self.cert_type is not None:
result['cert_type'] = self.cert_type
if self.contact_info is not None:
result['contact_info'] = self.contact_info
if self.legal_person is not None:
result['legal_person'] = self.legal_person
if self.reg_time is not None:
result['reg_time'] = self.reg_time
if self.status is not None:
result['status'] = self.status
if self.type is not None:
result['type'] = self.type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('account') is not None:
self.account = m.get('account')
if m.get('account_type') is not None:
self.account_type = m.get('account_type')
if m.get('cert_name') is not None:
self.cert_name = m.get('cert_name')
if m.get('cert_no') is not None:
self.cert_no = m.get('cert_no')
if m.get('cert_type') is not None:
self.cert_type = m.get('cert_type')
if m.get('contact_info') is not None:
self.contact_info = m.get('contact_info')
if m.get('legal_person') is not None:
self.legal_person = m.get('legal_person')
if m.get('reg_time') is not None:
self.reg_time = m.get('reg_time')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('type') is not None:
self.type = m.get('type')
return self
class GoodSkuInfo(TeaModel):
def __init__(
self,
sku_num: str = None,
price: int = None,
):
# 授权规格序号
self.sku_num = sku_num
# 授权规格价格(单位:分)
self.price = price
def validate(self):
self.validate_required(self.sku_num, 'sku_num')
self.validate_required(self.price, 'price')
def to_map(self):
result = dict()
if self.sku_num is not None:
result['sku_num'] = self.sku_num
if self.price is not None:
result['price'] = self.price
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('sku_num') is not None:
self.sku_num = m.get('sku_num')
if m.get('price') is not None:
self.price = m.get('price')
return self
class RightsInfo(TeaModel):
def __init__(
self,
work_name: str = None,
work_type: str = None,
completed_time: int = None,
completed_place: str = None,
is_publish: bool = None,
publish_time: int = None,
publish_place: str = None,
publish_web: str = None,
is_author: bool = None,
author_cert_name: str = None,
author_cert_no: str = None,
):
# 作品名称(最长128个字符)
self.work_name = work_name
# 作品类型
self.work_type = work_type
# 作品创作完成时间
self.completed_time = completed_time
# 作品创作完成地点(最长128个字符)
self.completed_place = completed_place
# 是否发表
self.is_publish = is_publish
# 发表时间(如果isPublish为true则必填)
self.publish_time = publish_time
# 发表地点(如果isPublish为true则必填,最长128个字符)
self.publish_place = publish_place
# 发表网址(如果isPublish为true则必填,最长1024个字符)
self.publish_web = publish_web
# 是否是作者
self.is_author = is_author
# 作者姓名(如果isAuthor为true则必填,最长32个字符)
self.author_cert_name = author_cert_name
# 作者身份证号(如果isAuthor为true则必填)
self.author_cert_no = author_cert_no
def validate(self):
self.validate_required(self.work_name, 'work_name')
if self.work_name is not None:
self.validate_max_length(self.work_name, 'work_name', 128)
self.validate_required(self.work_type, 'work_type')
self.validate_required(self.completed_time, 'completed_time')
self.validate_required(self.completed_place, 'completed_place')
if self.completed_place is not None:
self.validate_max_length(self.completed_place, 'completed_place', 128)
self.validate_required(self.is_publish, 'is_publish')
if self.publish_place is not None:
self.validate_max_length(self.publish_place, 'publish_place', 128)
if self.publish_web is not None:
self.validate_max_length(self.publish_web, 'publish_web', 1024)
self.validate_required(self.is_author, 'is_author')
if self.author_cert_name is not None:
self.validate_max_length(self.author_cert_name, 'author_cert_name', 32)
if self.author_cert_no is not None:
self.validate_max_length(self.author_cert_no, 'author_cert_no', 30)
def to_map(self):
result = dict()
if self.work_name is not None:
result['work_name'] = self.work_name
if self.work_type is not None:
result['work_type'] = self.work_type
if self.completed_time is not None:
result['completed_time'] = self.completed_time
if self.completed_place is not None:
result['completed_place'] = self.completed_place
if self.is_publish is not None:
result['is_publish'] = self.is_publish
if self.publish_time is not None:
result['publish_time'] = self.publish_time
if self.publish_place is not None:
result['publish_place'] = self.publish_place
if self.publish_web is not None:
result['publish_web'] = self.publish_web
if self.is_author is not None:
result['is_author'] = self.is_author
if self.author_cert_name is not None:
result['author_cert_name'] = self.author_cert_name
if self.author_cert_no is not None:
result['author_cert_no'] = self.author_cert_no
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('work_name') is not None:
self.work_name = m.get('work_name')
if m.get('work_type') is not None:
self.work_type = m.get('work_type')
if m.get('completed_time') is not None:
self.completed_time = m.get('completed_time')
if m.get('completed_place') is not None:
self.completed_place = m.get('completed_place')
if m.get('is_publish') is not None:
self.is_publish = m.get('is_publish')
if m.get('publish_time') is not None:
self.publish_time = m.get('publish_time')
if m.get('publish_place') is not None:
self.publish_place = m.get('publish_place')
if m.get('publish_web') is not None:
self.publish_web = m.get('publish_web')
if m.get('is_author') is not None:
self.is_author = m.get('is_author')
if m.get('author_cert_name') is not None:
self.author_cert_name = m.get('author_cert_name')
if m.get('author_cert_no') is not None:
self.author_cert_no = m.get('author_cert_no')
return self
class MonitorTask(TeaModel):
def __init__(
self,
content_desc: str = None,
content_title: str = None,
file_length_in_second: int = None,
keywords: List[str] = None,
monitor_task_id: str = None,
name: str = None,
provider_id: str = None,
result_count: int = None,
scopes: List[str] = None,
status: str = None,
task_duration: int = None,
task_end: int = None,
task_start: int = None,
file_type: str = None,
file_size: int = None,
submit_content: str = None,
submit_type: str = None,
failure_msg: str = None,
):
# 内容表述
#
#
self.content_desc = content_desc
# 监测任务标题
#
#
self.content_title = content_title
# 监测文件时长,主要用于视频监测、音频监测的时长记录
self.file_length_in_second = file_length_in_second
# 监测关键字
#
#
self.keywords = keywords
# 监测任务ID
#
self.monitor_task_id = monitor_task_id
# 任务名称
#
#
self.name = name
# 监测供应商id
self.provider_id = provider_id
# 监测结果数量
#
#
self.result_count = result_count
# 监测范围
#
#
self.scopes = scopes
# 任务状态
#
# INIT
#
# WORKING
#
# FINISH
#
# FAILURE
#
# DELETE
self.status = status
# 监测时长(天)
#
#
self.task_duration = task_duration
# 任务结束时间
#
#
self.task_end = task_end
# 任务开始时间
#
#
self.task_start = task_start
# 文件类别
#
# IMAGE
#
# TEXT
#
# VIDEO
self.file_type = file_type
# 文件大小
self.file_size = file_size
# submitType是url就是url地址 submitType 是file就是oss fileId
self.submit_content = submit_content
# 提交任务的类型:FILE/URL
# 不填为FILE
# FILE表示文件上传素材,URL表示上传URL
self.submit_type = submit_type
# 当status为failure时的具体错误信息
self.failure_msg = failure_msg
def validate(self):
self.validate_required(self.keywords, 'keywords')
self.validate_required(self.monitor_task_id, 'monitor_task_id')
self.validate_required(self.name, 'name')
self.validate_required(self.scopes, 'scopes')
self.validate_required(self.status, 'status')
self.validate_required(self.task_duration, 'task_duration')
self.validate_required(self.file_type, 'file_type')
self.validate_required(self.submit_content, 'submit_content')
def to_map(self):
result = dict()
if self.content_desc is not None:
result['content_desc'] = self.content_desc
if self.content_title is not None:
result['content_title'] = self.content_title
if self.file_length_in_second is not None:
result['file_length_in_second'] = self.file_length_in_second
if self.keywords is not None:
result['keywords'] = self.keywords
if self.monitor_task_id is not None:
result['monitor_task_id'] = self.monitor_task_id
if self.name is not None:
result['name'] = self.name
if self.provider_id is not None:
result['provider_id'] = self.provider_id
if self.result_count is not None:
result['result_count'] = self.result_count
if self.scopes is not None:
result['scopes'] = self.scopes
if self.status is not None:
result['status'] = self.status
if self.task_duration is not None:
result['task_duration'] = self.task_duration
if self.task_end is not None:
result['task_end'] = self.task_end
if self.task_start is not None:
result['task_start'] = self.task_start
if self.file_type is not None:
result['file_type'] = self.file_type
if self.file_size is not None:
result['file_size'] = self.file_size
if self.submit_content is not None:
result['submit_content'] = self.submit_content
if self.submit_type is not None:
result['submit_type'] = self.submit_type
if self.failure_msg is not None:
result['failure_msg'] = self.failure_msg
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('content_desc') is not None:
self.content_desc = m.get('content_desc')
if m.get('content_title') is not None:
self.content_title = m.get('content_title')
if m.get('file_length_in_second') is not None:
self.file_length_in_second = m.get('file_length_in_second')
if m.get('keywords') is not None:
self.keywords = m.get('keywords')
if m.get('monitor_task_id') is not None:
self.monitor_task_id = m.get('monitor_task_id')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('provider_id') is not None:
self.provider_id = m.get('provider_id')
if m.get('result_count') is not None:
self.result_count = m.get('result_count')
if m.get('scopes') is not None:
self.scopes = m.get('scopes')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('task_duration') is not None:
self.task_duration = m.get('task_duration')
if m.get('task_end') is not None:
self.task_end = m.get('task_end')
if m.get('task_start') is not None:
self.task_start = m.get('task_start')
if m.get('file_type') is not None:
self.file_type = m.get('file_type')
if m.get('file_size') is not None:
self.file_size = m.get('file_size')
if m.get('submit_content') is not None:
self.submit_content = m.get('submit_content')
if m.get('submit_type') is not None:
self.submit_type = m.get('submit_type')
if m.get('failure_msg') is not None:
self.failure_msg = m.get('failure_msg')
return self
class VerifyStatementData(TeaModel):
def __init__(
self,
statement_tx_hash: str = None,
statement_file_id: str = None,
statement_file_hash: str = None,
):
# 权利声明书存证交易HASH
self.statement_tx_hash = statement_tx_hash
# 权利声明书文件ID(和statementFileId 二选一,如果都传使用statementFileHash)
self.statement_file_id = statement_file_id
# 权利声明书文件HASH(和statementFileId 二选一,如果都传使用statementFileHash)
self.statement_file_hash = statement_file_hash
def validate(self):
pass
def to_map(self):
result = dict()
if self.statement_tx_hash is not None:
result['statement_tx_hash'] = self.statement_tx_hash
if self.statement_file_id is not None:
result['statement_file_id'] = self.statement_file_id
if self.statement_file_hash is not None:
result['statement_file_hash'] = self.statement_file_hash
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('statement_tx_hash') is not None:
self.statement_tx_hash = m.get('statement_tx_hash')
if m.get('statement_file_id') is not None:
self.statement_file_id = m.get('statement_file_id')
if m.get('statement_file_hash') is not None:
self.statement_file_hash = m.get('statement_file_hash')
return self
class SecurityData(TeaModel):
def __init__(
self,
result: str = None,
risk_data: List[str] = None,
):
# 内容安全检查结果
self.result = result
# 有风险的内容
self.risk_data = risk_data
def validate(self):
self.validate_required(self.result, 'result')
def to_map(self):
result = dict()
if self.result is not None:
result['result'] = self.result
if self.risk_data is not None:
result['risk_data'] = self.risk_data
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('result') is not None:
self.result = m.get('result')
if m.get('risk_data') is not None:
self.risk_data = m.get('risk_data')
return self
class VerifyUserData(TeaModel):
def __init__(
self,
cert_name: str = None,
cert_no: str = None,
cert_type: str = None,
register_person_tx_hash: str = None,
):
# 证件名称
self.cert_name = cert_name
# 证件号码
self.cert_no = cert_no
# 证件类型
self.cert_type = cert_type
# 登记人信息存证交易HASH
self.register_person_tx_hash = register_person_tx_hash
def validate(self):
pass
def to_map(self):
result = dict()
if self.cert_name is not None:
result['cert_name'] = self.cert_name
if self.cert_no is not None:
result['cert_no'] = self.cert_no
if self.cert_type is not None:
result['cert_type'] = self.cert_type
if self.register_person_tx_hash is not None:
result['register_person_tx_hash'] = self.register_person_tx_hash
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('cert_name') is not None:
self.cert_name = m.get('cert_name')
if m.get('cert_no') is not None:
self.cert_no = m.get('cert_no')
if m.get('cert_type') is not None:
self.cert_type = m.get('cert_type')
if m.get('register_person_tx_hash') is not None:
self.register_person_tx_hash = m.get('register_person_tx_hash')
return self
class VerifyEvidenceData(TeaModel):
def __init__(
self,
process_log_tx_hash: str = None,
check_log_tx_hash: str = None,
):
# 操作日志交易HASH
self.process_log_tx_hash = process_log_tx_hash
# 自清洁日志交易HASH
self.check_log_tx_hash = check_log_tx_hash
def validate(self):
pass
def to_map(self):
result = dict()
if self.process_log_tx_hash is not None:
result['process_log_tx_hash'] = self.process_log_tx_hash
if self.check_log_tx_hash is not None:
result['check_log_tx_hash'] = self.check_log_tx_hash
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('process_log_tx_hash') is not None:
self.process_log_tx_hash = m.get('process_log_tx_hash')
if m.get('check_log_tx_hash') is not None:
self.check_log_tx_hash = m.get('check_log_tx_hash')
return self
class GetUploadurlRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
file_name: str = None,
client_token: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 文件名称
self.file_name = file_name
# 保证请求幂等性。从您的客户端生成一个参数值,确保不同请求间该参数值唯一。clientToken只支持ASCII字符,且不能超过64个字符。
self.client_token = client_token
def validate(self):
self.validate_required(self.file_name, 'file_name')
if self.file_name is not None:
self.validate_max_length(self.file_name, 'file_name', 128)
if self.client_token is not None:
self.validate_max_length(self.client_token, 'client_token', 64)
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.file_name is not None:
result['file_name'] = self.file_name
if self.client_token is not None:
result['client_token'] = self.client_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('file_name') is not None:
self.file_name = m.get('file_name')
if m.get('client_token') is not None:
self.client_token = m.get('client_token')
return self
class GetUploadurlResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
url: str = None,
file_id: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 授权访问oss链接
self.url = url
# OSS 文件id
self.file_id = file_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.url is not None:
result['url'] = self.url
if self.file_id is not None:
result['file_id'] = self.file_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('url') is not None:
self.url = m.get('url')
if m.get('file_id') is not None:
self.file_id = m.get('file_id')
return self
class AddHashregisterRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
hash: str = None,
length: int = None,
hash_type: str = None,
type: str = None,
name: str = None,
memo: str = None,
cert_name: str = None,
cert_no: str = None,
cert_type: str = None,
create_certificate: bool = None,
certificate_type: str = None,
create_package: bool = None,
org_id: str = None,
proxy_info: ProxyData = None,
phone_num: str = None,
feature_file_id: str = None,
client_token: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 文件SHA256 hash
self.hash = hash
# 文件大小
self.length = length
# hash算法
self.hash_type = hash_type
# 文件类型
self.type = type
# 文件名称
self.name = name
# 备注
self.memo = memo
# 证件名称
self.cert_name = cert_name
# 证件号码
self.cert_no = cert_no
# 证件类型
self.cert_type = cert_type
# 是否自动创建公证保管函
self.create_certificate = create_certificate
# 证书类型
self.certificate_type = certificate_type
# 是否创建证据包,默认否
self.create_package = create_package
#
# 公证处Id
self.org_id = org_id
# 代理信息
self.proxy_info = proxy_info
# 电话号码
self.phone_num = phone_num
# 特征文件oss id
self.feature_file_id = feature_file_id
# 幂等字段
self.client_token = client_token
def validate(self):
self.validate_required(self.hash, 'hash')
self.validate_required(self.length, 'length')
self.validate_required(self.hash_type, 'hash_type')
self.validate_required(self.type, 'type')
self.validate_required(self.name, 'name')
if self.name is not None:
self.validate_max_length(self.name, 'name', 128)
if self.memo is not None:
self.validate_max_length(self.memo, 'memo', 512)
self.validate_required(self.cert_name, 'cert_name')
self.validate_required(self.cert_no, 'cert_no')
self.validate_required(self.cert_type, 'cert_type')
if self.proxy_info:
self.proxy_info.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.hash is not None:
result['hash'] = self.hash
if self.length is not None:
result['length'] = self.length
if self.hash_type is not None:
result['hash_type'] = self.hash_type
if self.type is not None:
result['type'] = self.type
if self.name is not None:
result['name'] = self.name
if self.memo is not None:
result['memo'] = self.memo
if self.cert_name is not None:
result['cert_name'] = self.cert_name
if self.cert_no is not None:
result['cert_no'] = self.cert_no
if self.cert_type is not None:
result['cert_type'] = self.cert_type
if self.create_certificate is not None:
result['create_certificate'] = self.create_certificate
if self.certificate_type is not None:
result['certificate_type'] = self.certificate_type
if self.create_package is not None:
result['create_package'] = self.create_package
if self.org_id is not None:
result['org_id'] = self.org_id
if self.proxy_info is not None:
result['proxy_info'] = self.proxy_info.to_map()
if self.phone_num is not None:
result['phone_num'] = self.phone_num
if self.feature_file_id is not None:
result['feature_file_id'] = self.feature_file_id
if self.client_token is not None:
result['client_token'] = self.client_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('hash') is not None:
self.hash = m.get('hash')
if m.get('length') is not None:
self.length = m.get('length')
if m.get('hash_type') is not None:
self.hash_type = m.get('hash_type')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('memo') is not None:
self.memo = m.get('memo')
if m.get('cert_name') is not None:
self.cert_name = m.get('cert_name')
if m.get('cert_no') is not None:
self.cert_no = m.get('cert_no')
if m.get('cert_type') is not None:
self.cert_type = m.get('cert_type')
if m.get('create_certificate') is not None:
self.create_certificate = m.get('create_certificate')
if m.get('certificate_type') is not None:
self.certificate_type = m.get('certificate_type')
if m.get('create_package') is not None:
self.create_package = m.get('create_package')
if m.get('org_id') is not None:
self.org_id = m.get('org_id')
if m.get('proxy_info') is not None:
temp_model = ProxyData()
self.proxy_info = temp_model.from_map(m['proxy_info'])
if m.get('phone_num') is not None:
self.phone_num = m.get('phone_num')
if m.get('feature_file_id') is not None:
self.feature_file_id = m.get('feature_file_id')
if m.get('client_token') is not None:
self.client_token = m.get('client_token')
return self
class AddHashregisterResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
register_id: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 登记id
self.register_id = register_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.register_id is not None:
result['register_id'] = self.register_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('register_id') is not None:
self.register_id = m.get('register_id')
return self
class AddRegisterRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
file_id: str = None,
create_certificate: bool = None,
certificate_type: str = None,
create_package: bool = None,
org_id: str = None,
name: str = None,
type: str = None,
memo: str = None,
cert_name: str = None,
cert_no: str = None,
cert_type: str = None,
phone_num: str = None,
client_token: str = None,
proxy_info: ProxyData = None,
sync_info: AccountData = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 文件oss id
self.file_id = file_id
# 是否自动创建公证书,默认否
self.create_certificate = create_certificate
# 证书类型
self.certificate_type = certificate_type
# 是否创建证据包,默认否
self.create_package = create_package
# 公证处id
self.org_id = org_id
# 文件名称
self.name = name
# 文件类型
self.type = type
# 备注
self.memo = memo
# 证件名称
self.cert_name = cert_name
# 证件号码
self.cert_no = cert_no
# 证件类型
self.cert_type = cert_type
# 电话号码
self.phone_num = phone_num
# 幂等
self.client_token = client_token
# 代理信息
self.proxy_info = proxy_info
# 同步账号信息
self.sync_info = sync_info
def validate(self):
self.validate_required(self.file_id, 'file_id')
self.validate_required(self.name, 'name')
if self.name is not None:
self.validate_max_length(self.name, 'name', 128)
self.validate_required(self.type, 'type')
if self.memo is not None:
self.validate_max_length(self.memo, 'memo', 512)
self.validate_required(self.cert_name, 'cert_name')
self.validate_required(self.cert_no, 'cert_no')
self.validate_required(self.cert_type, 'cert_type')
if self.proxy_info:
self.proxy_info.validate()
if self.sync_info:
self.sync_info.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.file_id is not None:
result['file_id'] = self.file_id
if self.create_certificate is not None:
result['create_certificate'] = self.create_certificate
if self.certificate_type is not None:
result['certificate_type'] = self.certificate_type
if self.create_package is not None:
result['create_package'] = self.create_package
if self.org_id is not None:
result['org_id'] = self.org_id
if self.name is not None:
result['name'] = self.name
if self.type is not None:
result['type'] = self.type
if self.memo is not None:
result['memo'] = self.memo
if self.cert_name is not None:
result['cert_name'] = self.cert_name
if self.cert_no is not None:
result['cert_no'] = self.cert_no
if self.cert_type is not None:
result['cert_type'] = self.cert_type
if self.phone_num is not None:
result['phone_num'] = self.phone_num
if self.client_token is not None:
result['client_token'] = self.client_token
if self.proxy_info is not None:
result['proxy_info'] = self.proxy_info.to_map()
if self.sync_info is not None:
result['sync_info'] = self.sync_info.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('file_id') is not None:
self.file_id = m.get('file_id')
if m.get('create_certificate') is not None:
self.create_certificate = m.get('create_certificate')
if m.get('certificate_type') is not None:
self.certificate_type = m.get('certificate_type')
if m.get('create_package') is not None:
self.create_package = m.get('create_package')
if m.get('org_id') is not None:
self.org_id = m.get('org_id')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('memo') is not None:
self.memo = m.get('memo')
if m.get('cert_name') is not None:
self.cert_name = m.get('cert_name')
if m.get('cert_no') is not None:
self.cert_no = m.get('cert_no')
if m.get('cert_type') is not None:
self.cert_type = m.get('cert_type')
if m.get('phone_num') is not None:
self.phone_num = m.get('phone_num')
if m.get('client_token') is not None:
self.client_token = m.get('client_token')
if m.get('proxy_info') is not None:
temp_model = ProxyData()
self.proxy_info = temp_model.from_map(m['proxy_info'])
if m.get('sync_info') is not None:
temp_model = AccountData()
self.sync_info = temp_model.from_map(m['sync_info'])
return self
class AddRegisterResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
register_id: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 登记id
self.register_id = register_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.register_id is not None:
result['register_id'] = self.register_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('register_id') is not None:
self.register_id = m.get('register_id')
return self
class QueryRegisterstatusRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
register_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 登记id
self.register_id = register_id
def validate(self):
self.validate_required(self.register_id, 'register_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.register_id is not None:
result['register_id'] = self.register_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('register_id') is not None:
self.register_id = m.get('register_id')
return self
class QueryRegisterstatusResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
status: str = None,
timestamp: int = None,
hash: str = None,
tx_hash: str = None,
block_height: int = None,
tsr: str = None,
certificate_url: str = None,
certificate_storage_no: str = None,
certificate_time_url: str = None,
package_url: str = None,
statement_url: str = None,
statement_tx_hash: str = None,
register_person_tx_hash: str = None,
security: SecurityData = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 登记状态
self.status = status
# 存证时间
self.timestamp = timestamp
# 文件hash
self.hash = hash
# 统一证据编号(存证交易HASH)
self.tx_hash = tx_hash
# 存证块高
self.block_height = block_height
# tsr信息
self.tsr = tsr
# 公证处证书下载链接
self.certificate_url = certificate_url
# 公证处证书编号
self.certificate_storage_no = certificate_storage_no
# 授时中心证书下载链接
self.certificate_time_url = certificate_time_url
# 证据包下载地址(状态为SUCCESS并且请求要求生成证据包才有数据)
self.package_url = package_url
# 权利声明书下载地址 注意只有传递了权利信息并且生成了权利声明书才会返回
self.statement_url = statement_url
# 权利声明书存证交易HASH 注意只有传递了权利信息并且生成了权利声明书才会返回
self.statement_tx_hash = statement_tx_hash
# 登记人信息存证交易HASH
self.register_person_tx_hash = register_person_tx_hash
# 安全信息
self.security = security
def validate(self):
if self.security:
self.security.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.status is not None:
result['status'] = self.status
if self.timestamp is not None:
result['timestamp'] = self.timestamp
if self.hash is not None:
result['hash'] = self.hash
if self.tx_hash is not None:
result['tx_hash'] = self.tx_hash
if self.block_height is not None:
result['block_height'] = self.block_height
if self.tsr is not None:
result['tsr'] = self.tsr
if self.certificate_url is not None:
result['certificate_url'] = self.certificate_url
if self.certificate_storage_no is not None:
result['certificate_storage_no'] = self.certificate_storage_no
if self.certificate_time_url is not None:
result['certificate_time_url'] = self.certificate_time_url
if self.package_url is not None:
result['package_url'] = self.package_url
if self.statement_url is not None:
result['statement_url'] = self.statement_url
if self.statement_tx_hash is not None:
result['statement_tx_hash'] = self.statement_tx_hash
if self.register_person_tx_hash is not None:
result['register_person_tx_hash'] = self.register_person_tx_hash
if self.security is not None:
result['security'] = self.security.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('timestamp') is not None:
self.timestamp = m.get('timestamp')
if m.get('hash') is not None:
self.hash = m.get('hash')
if m.get('tx_hash') is not None:
self.tx_hash = m.get('tx_hash')
if m.get('block_height') is not None:
self.block_height = m.get('block_height')
if m.get('tsr') is not None:
self.tsr = m.get('tsr')
if m.get('certificate_url') is not None:
self.certificate_url = m.get('certificate_url')
if m.get('certificate_storage_no') is not None:
self.certificate_storage_no = m.get('certificate_storage_no')
if m.get('certificate_time_url') is not None:
self.certificate_time_url = m.get('certificate_time_url')
if m.get('package_url') is not None:
self.package_url = m.get('package_url')
if m.get('statement_url') is not None:
self.statement_url = m.get('statement_url')
if m.get('statement_tx_hash') is not None:
self.statement_tx_hash = m.get('statement_tx_hash')
if m.get('register_person_tx_hash') is not None:
self.register_person_tx_hash = m.get('register_person_tx_hash')
if m.get('security') is not None:
temp_model = SecurityData()
self.security = temp_model.from_map(m['security'])
return self
class CreateCertificateRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
register_id: str = None,
certificate_type: str = None,
features_type: str = None,
org_id: str = None,
proxy_info: ProxyData = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 版权登记id
self.register_id = register_id
# 证书类型
self.certificate_type = certificate_type
# 功能类型
self.features_type = features_type
# 公证处id
self.org_id = org_id
# 代理信息
self.proxy_info = proxy_info
def validate(self):
self.validate_required(self.register_id, 'register_id')
self.validate_required(self.certificate_type, 'certificate_type')
self.validate_required(self.features_type, 'features_type')
if self.proxy_info:
self.proxy_info.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.register_id is not None:
result['register_id'] = self.register_id
if self.certificate_type is not None:
result['certificate_type'] = self.certificate_type
if self.features_type is not None:
result['features_type'] = self.features_type
if self.org_id is not None:
result['org_id'] = self.org_id
if self.proxy_info is not None:
result['proxy_info'] = self.proxy_info.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('register_id') is not None:
self.register_id = m.get('register_id')
if m.get('certificate_type') is not None:
self.certificate_type = m.get('certificate_type')
if m.get('features_type') is not None:
self.features_type = m.get('features_type')
if m.get('org_id') is not None:
self.org_id = m.get('org_id')
if m.get('proxy_info') is not None:
temp_model = ProxyData()
self.proxy_info = temp_model.from_map(m['proxy_info'])
return self
class CreateCertificateResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
status: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 生成证书结果
self.status = status
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.status is not None:
result['status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('status') is not None:
self.status = m.get('status')
return self
class GetCertificateRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
certificate_type: str = None,
features_type: str = None,
register_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 证书类型
self.certificate_type = certificate_type
# 功能类型
self.features_type = features_type
# 版权登记id
self.register_id = register_id
def validate(self):
self.validate_required(self.certificate_type, 'certificate_type')
self.validate_required(self.features_type, 'features_type')
self.validate_required(self.register_id, 'register_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.certificate_type is not None:
result['certificate_type'] = self.certificate_type
if self.features_type is not None:
result['features_type'] = self.features_type
if self.register_id is not None:
result['register_id'] = self.register_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('certificate_type') is not None:
self.certificate_type = m.get('certificate_type')
if m.get('features_type') is not None:
self.features_type = m.get('features_type')
if m.get('register_id') is not None:
self.register_id = m.get('register_id')
return self
class GetCertificateResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
url: str = None,
status: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 证书下载链接
self.url = url
# 证书生成状态
self.status = status
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.url is not None:
result['url'] = self.url
if self.status is not None:
result['status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('url') is not None:
self.url = m.get('url')
if m.get('status') is not None:
self.status = m.get('status')
return self
class CreateMonitorTaskRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
submit_content: str = None,
submit_type: str = None,
file_type: str = None,
task_name: str = None,
content_title: str = None,
keywords: List[str] = None,
content_desc: str = None,
provider_id: str = None,
scopes: List[str] = None,
task_start: int = None,
task_duration: int = None,
proxy_info: ProxyData = None,
client_token: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# submitType是url就传 url地址 submitType 是file就传 oss fileId
self.submit_content = submit_content
# 提交任务的类型:FILE/URL
# 不填为FILE
# FILE表示文件上传素材,URL表示上传URL
self.submit_type = submit_type
# 文件类别
#
# IMAGE
#
# TEXT
#
# VIDEO
self.file_type = file_type
# 任务名称(不超过100字符)
self.task_name = task_name
# 监测任务标题(不超过100字符)
self.content_title = content_title
# 关键词(不超过200字符)
self.keywords = keywords
# 内容表述(不超过1000字符)
self.content_desc = content_desc
# 监测供应商id
self.provider_id = provider_id
# 监测范围 默认全部:ALL;PC网站:SITE;APP应用:APP
self.scopes = scopes
# 任务开始时间,如当前时间戳,不传则取服务器当前时间戳,单位毫秒
self.task_start = task_start
# 任务持续时间,单位天,默认90天
#
# 一次性监测:0
self.task_duration = task_duration
# 代理信息
self.proxy_info = proxy_info
# 保证请求幂等性。从您的客户端生成一个参数值,确保不同请求间该参数值唯一。clientToken只支持ASCII字符,且不能超过64个字符。更多详情,请参见如何保证幂等性。
self.client_token = client_token
def validate(self):
self.validate_required(self.submit_content, 'submit_content')
self.validate_required(self.file_type, 'file_type')
self.validate_required(self.task_name, 'task_name')
self.validate_required(self.content_title, 'content_title')
self.validate_required(self.keywords, 'keywords')
if self.proxy_info:
self.proxy_info.validate()
self.validate_required(self.client_token, 'client_token')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.submit_content is not None:
result['submit_content'] = self.submit_content
if self.submit_type is not None:
result['submit_type'] = self.submit_type
if self.file_type is not None:
result['file_type'] = self.file_type
if self.task_name is not None:
result['task_name'] = self.task_name
if self.content_title is not None:
result['content_title'] = self.content_title
if self.keywords is not None:
result['keywords'] = self.keywords
if self.content_desc is not None:
result['content_desc'] = self.content_desc
if self.provider_id is not None:
result['provider_id'] = self.provider_id
if self.scopes is not None:
result['scopes'] = self.scopes
if self.task_start is not None:
result['task_start'] = self.task_start
if self.task_duration is not None:
result['task_duration'] = self.task_duration
if self.proxy_info is not None:
result['proxy_info'] = self.proxy_info.to_map()
if self.client_token is not None:
result['client_token'] = self.client_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('submit_content') is not None:
self.submit_content = m.get('submit_content')
if m.get('submit_type') is not None:
self.submit_type = m.get('submit_type')
if m.get('file_type') is not None:
self.file_type = m.get('file_type')
if m.get('task_name') is not None:
self.task_name = m.get('task_name')
if m.get('content_title') is not None:
self.content_title = m.get('content_title')
if m.get('keywords') is not None:
self.keywords = m.get('keywords')
if m.get('content_desc') is not None:
self.content_desc = m.get('content_desc')
if m.get('provider_id') is not None:
self.provider_id = m.get('provider_id')
if m.get('scopes') is not None:
self.scopes = m.get('scopes')
if m.get('task_start') is not None:
self.task_start = m.get('task_start')
if m.get('task_duration') is not None:
self.task_duration = m.get('task_duration')
if m.get('proxy_info') is not None:
temp_model = ProxyData()
self.proxy_info = temp_model.from_map(m['proxy_info'])
if m.get('client_token') is not None:
self.client_token = m.get('client_token')
return self
class CreateMonitorTaskResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
monitor_task_id: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 监测任务Id
self.monitor_task_id = monitor_task_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.monitor_task_id is not None:
result['monitor_task_id'] = self.monitor_task_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('monitor_task_id') is not None:
self.monitor_task_id = m.get('monitor_task_id')
return self
class StopMonitorTaskRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
monitor_task_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 监测任务Id
#
self.monitor_task_id = monitor_task_id
def validate(self):
self.validate_required(self.monitor_task_id, 'monitor_task_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.monitor_task_id is not None:
result['monitor_task_id'] = self.monitor_task_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('monitor_task_id') is not None:
self.monitor_task_id = m.get('monitor_task_id')
return self
class StopMonitorTaskResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
result: bool = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 停止结果
self.result = result
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.result is not None:
result['result'] = self.result
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('result') is not None:
self.result = m.get('result')
return self
class QueryMonitorTaskRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
monitor_task_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 监测任务Id
#
self.monitor_task_id = monitor_task_id
def validate(self):
self.validate_required(self.monitor_task_id, 'monitor_task_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.monitor_task_id is not None:
result['monitor_task_id'] = self.monitor_task_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('monitor_task_id') is not None:
self.monitor_task_id = m.get('monitor_task_id')
return self
class QueryMonitorTaskResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
monitor_task: MonitorTask = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 监测任务信息
#
#
self.monitor_task = monitor_task
def validate(self):
if self.monitor_task:
self.monitor_task.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.monitor_task is not None:
result['monitor_task'] = self.monitor_task.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('monitor_task') is not None:
temp_model = MonitorTask()
self.monitor_task = temp_model.from_map(m['monitor_task'])
return self
class QueryMonitorResultRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
monitor_task_id: str = None,
page_index: int = None,
page_size: int = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 监测任务Id
self.monitor_task_id = monitor_task_id
# 页序号(>0)
#
#
self.page_index = page_index
# 页大小(1-200)
#
#
self.page_size = page_size
def validate(self):
self.validate_required(self.monitor_task_id, 'monitor_task_id')
self.validate_required(self.page_index, 'page_index')
self.validate_required(self.page_size, 'page_size')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.monitor_task_id is not None:
result['monitor_task_id'] = self.monitor_task_id
if self.page_index is not None:
result['page_index'] = self.page_index
if self.page_size is not None:
result['page_size'] = self.page_size
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('monitor_task_id') is not None:
self.monitor_task_id = m.get('monitor_task_id')
if m.get('page_index') is not None:
self.page_index = m.get('page_index')
if m.get('page_size') is not None:
self.page_size = m.get('page_size')
return self
class QueryMonitorResultResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
monitor_result_list: List[MonitorResult] = None,
page_index: int = None,
page_size: int = None,
result_count: int = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 监测任务信息
self.monitor_result_list = monitor_result_list
# 页序号(>0)
self.page_index = page_index
# 页大小(1-200)
self.page_size = page_size
# 监测结果数量
self.result_count = result_count
def validate(self):
if self.monitor_result_list:
for k in self.monitor_result_list:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['monitor_result_list'] = []
if self.monitor_result_list is not None:
for k in self.monitor_result_list:
result['monitor_result_list'].append(k.to_map() if k else None)
if self.page_index is not None:
result['page_index'] = self.page_index
if self.page_size is not None:
result['page_size'] = self.page_size
if self.result_count is not None:
result['result_count'] = self.result_count
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.monitor_result_list = []
if m.get('monitor_result_list') is not None:
for k in m.get('monitor_result_list'):
temp_model = MonitorResult()
self.monitor_result_list.append(temp_model.from_map(k))
if m.get('page_index') is not None:
self.page_index = m.get('page_index')
if m.get('page_size') is not None:
self.page_size = m.get('page_size')
if m.get('result_count') is not None:
self.result_count = m.get('result_count')
return self
class ListMonitorProviderRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
monitor_types: List[MonitorType] = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 需监测文件类型
self.monitor_types = monitor_types
def validate(self):
self.validate_required(self.monitor_types, 'monitor_types')
if self.monitor_types:
for k in self.monitor_types:
if k:
k.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
result['monitor_types'] = []
if self.monitor_types is not None:
for k in self.monitor_types:
result['monitor_types'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
self.monitor_types = []
if m.get('monitor_types') is not None:
for k in m.get('monitor_types'):
temp_model = MonitorType()
self.monitor_types.append(temp_model.from_map(k))
return self
class ListMonitorProviderResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
provider_list: List[MonitorProviderType] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 监测能力提供方类型列表
self.provider_list = provider_list
def validate(self):
if self.provider_list:
for k in self.provider_list:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['provider_list'] = []
if self.provider_list is not None:
for k in self.provider_list:
result['provider_list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.provider_list = []
if m.get('provider_list') is not None:
for k in m.get('provider_list'):
temp_model = MonitorProviderType()
self.provider_list.append(temp_model.from_map(k))
return self
class CreateRecodescreenRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
name: str = None,
memo: str = None,
url: List[str] = None,
type: str = None,
area: str = None,
screen_code: str = None,
max_time_in_min: int = None,
wait_in_min: int = None,
cross_region: str = None,
ip_type: str = None,
callback_url: str = None,
org_id: str = None,
cert_name: str = None,
cert_no: str = None,
cert_type: str = None,
legal_person_name: str = None,
legal_person_no: str = None,
agent: str = None,
agent_no: str = None,
phone_num: str = None,
proxy_info: ProxyData = None,
client_token: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 取证名称(最长128个字符)
self.name = name
# 取证备注(最长512个字符)
self.memo = memo
# 预校验网址和预打开网页(不能大于10条)
self.url = url
# 取证类型 具体见附录 RecordScreenType
self.type = type
# 取证服务器地域 具体见附录 RecordScreenArea
self.area = area
# 定制录屏桌面识别码,目前取值只有默认值:DEFAULT
self.screen_code = screen_code
# 最大录屏时间,maxTimeInMin不能小于6min,不能大于300min
self.max_time_in_min = max_time_in_min
# 服务器进入ready状态,指定时间未开始进入abort状态的等待时间,单位为分钟,默认1min,waitInMin不能小于1,不能大于10min
self.wait_in_min = wait_in_min
# 当所选地域无机器资源时,是否使用其它地域
#
# 目前取值只有默认值:FORBID
self.cross_region = cross_region
# ip地域取值
#
# 目前取值只有默认值:ANY
self.ip_type = ip_type
# 回调地址
self.callback_url = callback_url
# 公证处Id
self.org_id = org_id
# 取证人证件名称
self.cert_name = cert_name
#
# 取证人证件号码
self.cert_no = cert_no
# 取证人证件类型 具体见附录CertType
self.cert_type = cert_type
# 企业法人代表姓名(如果certType为BUSINESS_LICENSE 则必传)
self.legal_person_name = legal_person_name
# 企业法人代表身份证号(如果certType为BUSINESS_LICENSE 则必传)
self.legal_person_no = legal_person_no
# 企业用户取证人姓名(如果certType为BUSINESS_LICENSE 则必传)
self.agent = agent
# 企业用户取证人身份证号(如果certType为BUSINESS_LICENSE 则必传)
self.agent_no = agent_no
# 取证人电话号码,生成公证处证书需要,公证处需要作登记 格式范例:(86-573)2651630 或 (86)13738258505
self.phone_num = phone_num
# 代理信息
self.proxy_info = proxy_info
# 保证请求幂等性。从您的客户端生成一个参数值,确保不同请求间该参数值唯一。clientToken只支持ASCII字符,且不能超过64个字符。更多详情,请参见如何保证幂等性。
self.client_token = client_token
def validate(self):
self.validate_required(self.name, 'name')
if self.name is not None:
self.validate_max_length(self.name, 'name', 128)
if self.memo is not None:
self.validate_max_length(self.memo, 'memo', 512)
self.validate_required(self.type, 'type')
self.validate_required(self.area, 'area')
self.validate_required(self.max_time_in_min, 'max_time_in_min')
self.validate_required(self.cert_name, 'cert_name')
self.validate_required(self.cert_no, 'cert_no')
self.validate_required(self.cert_type, 'cert_type')
if self.proxy_info:
self.proxy_info.validate()
self.validate_required(self.client_token, 'client_token')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.name is not None:
result['name'] = self.name
if self.memo is not None:
result['memo'] = self.memo
if self.url is not None:
result['url'] = self.url
if self.type is not None:
result['type'] = self.type
if self.area is not None:
result['area'] = self.area
if self.screen_code is not None:
result['screen_code'] = self.screen_code
if self.max_time_in_min is not None:
result['max_time_in_min'] = self.max_time_in_min
if self.wait_in_min is not None:
result['wait_in_min'] = self.wait_in_min
if self.cross_region is not None:
result['cross_region'] = self.cross_region
if self.ip_type is not None:
result['ip_type'] = self.ip_type
if self.callback_url is not None:
result['callback_url'] = self.callback_url
if self.org_id is not None:
result['org_id'] = self.org_id
if self.cert_name is not None:
result['cert_name'] = self.cert_name
if self.cert_no is not None:
result['cert_no'] = self.cert_no
if self.cert_type is not None:
result['cert_type'] = self.cert_type
if self.legal_person_name is not None:
result['legal_person_name'] = self.legal_person_name
if self.legal_person_no is not None:
result['legal_person_no'] = self.legal_person_no
if self.agent is not None:
result['agent'] = self.agent
if self.agent_no is not None:
result['agent_no'] = self.agent_no
if self.phone_num is not None:
result['phone_num'] = self.phone_num
if self.proxy_info is not None:
result['proxy_info'] = self.proxy_info.to_map()
if self.client_token is not None:
result['client_token'] = self.client_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('memo') is not None:
self.memo = m.get('memo')
if m.get('url') is not None:
self.url = m.get('url')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('area') is not None:
self.area = m.get('area')
if m.get('screen_code') is not None:
self.screen_code = m.get('screen_code')
if m.get('max_time_in_min') is not None:
self.max_time_in_min = m.get('max_time_in_min')
if m.get('wait_in_min') is not None:
self.wait_in_min = m.get('wait_in_min')
if m.get('cross_region') is not None:
self.cross_region = m.get('cross_region')
if m.get('ip_type') is not None:
self.ip_type = m.get('ip_type')
if m.get('callback_url') is not None:
self.callback_url = m.get('callback_url')
if m.get('org_id') is not None:
self.org_id = m.get('org_id')
if m.get('cert_name') is not None:
self.cert_name = m.get('cert_name')
if m.get('cert_no') is not None:
self.cert_no = m.get('cert_no')
if m.get('cert_type') is not None:
self.cert_type = m.get('cert_type')
if m.get('legal_person_name') is not None:
self.legal_person_name = m.get('legal_person_name')
if m.get('legal_person_no') is not None:
self.legal_person_no = m.get('legal_person_no')
if m.get('agent') is not None:
self.agent = m.get('agent')
if m.get('agent_no') is not None:
self.agent_no = m.get('agent_no')
if m.get('phone_num') is not None:
self.phone_num = m.get('phone_num')
if m.get('proxy_info') is not None:
temp_model = ProxyData()
self.proxy_info = temp_model.from_map(m['proxy_info'])
if m.get('client_token') is not None:
self.client_token = m.get('client_token')
return self
class CreateRecodescreenResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
evidence_id: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 取证id
self.evidence_id = evidence_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.evidence_id is not None:
result['evidence_id'] = self.evidence_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('evidence_id') is not None:
self.evidence_id = m.get('evidence_id')
return self
class QueryRecodescreenRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
evidence_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 取证id
self.evidence_id = evidence_id
def validate(self):
self.validate_required(self.evidence_id, 'evidence_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.evidence_id is not None:
result['evidence_id'] = self.evidence_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('evidence_id') is not None:
self.evidence_id = m.get('evidence_id')
return self
class QueryRecodescreenResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
evidence_id: str = None,
status: str = None,
operate_url: str = None,
data: RecordScreenData = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 取证id
self.evidence_id = evidence_id
# 取证状态 具体见附录 RecordScreenStatus
self.status = status
# 取证操作Url (状态为INIT和DOING 时返回 可以重复打开使用)
self.operate_url = operate_url
# 取证详情(状态为PACKAGING,SUCCESS,FAIL才有数据)
self.data = data
def validate(self):
if self.data:
self.data.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.evidence_id is not None:
result['evidence_id'] = self.evidence_id
if self.status is not None:
result['status'] = self.status
if self.operate_url is not None:
result['operate_url'] = self.operate_url
if self.data is not None:
result['data'] = self.data.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('evidence_id') is not None:
self.evidence_id = m.get('evidence_id')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('operate_url') is not None:
self.operate_url = m.get('operate_url')
if m.get('data') is not None:
temp_model = RecordScreenData()
self.data = temp_model.from_map(m['data'])
return self
class QueryUserRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
token: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 用户token,从cookie或者重定向链接获取
self.token = token
def validate(self):
self.validate_required(self.token, 'token')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.token is not None:
result['token'] = self.token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('token') is not None:
self.token = m.get('token')
return self
class QueryUserResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
login: bool = None,
user: UserData = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 是否登录
self.login = login
# 用户信息
self.user = user
def validate(self):
if self.user:
self.user.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.login is not None:
result['login'] = self.login
if self.user is not None:
result['user'] = self.user.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('login') is not None:
self.login = m.get('login')
if m.get('user') is not None:
temp_model = UserData()
self.user = temp_model.from_map(m['user'])
return self
class QueryUserListRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
page_index: int = None,
page_size: int = None,
status: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 页码,从1开始
self.page_index = page_index
# 每页数据,1~100
self.page_size = page_size
# 用户状态
self.status = status
def validate(self):
self.validate_required(self.page_index, 'page_index')
if self.page_index is not None:
self.validate_minimum(self.page_index, 'page_index', 1)
self.validate_required(self.page_size, 'page_size')
if self.page_size is not None:
self.validate_maximum(self.page_size, 'page_size', 100)
self.validate_minimum(self.page_size, 'page_size', 1)
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.page_index is not None:
result['page_index'] = self.page_index
if self.page_size is not None:
result['page_size'] = self.page_size
if self.status is not None:
result['status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('page_index') is not None:
self.page_index = m.get('page_index')
if m.get('page_size') is not None:
self.page_size = m.get('page_size')
if m.get('status') is not None:
self.status = m.get('status')
return self
class QueryUserListResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
page_index: int = None,
page_size: int = None,
total_count: int = None,
user_list: List[UserData] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 页码,从1开始
self.page_index = page_index
# 每页数据,1~100
self.page_size = page_size
# 总数量
self.total_count = total_count
# 用户信息列表
self.user_list = user_list
def validate(self):
if self.user_list:
for k in self.user_list:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.page_index is not None:
result['page_index'] = self.page_index
if self.page_size is not None:
result['page_size'] = self.page_size
if self.total_count is not None:
result['total_count'] = self.total_count
result['user_list'] = []
if self.user_list is not None:
for k in self.user_list:
result['user_list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('page_index') is not None:
self.page_index = m.get('page_index')
if m.get('page_size') is not None:
self.page_size = m.get('page_size')
if m.get('total_count') is not None:
self.total_count = m.get('total_count')
self.user_list = []
if m.get('user_list') is not None:
for k in m.get('user_list'):
temp_model = UserData()
self.user_list.append(temp_model.from_map(k))
return self
class CreateScreenshotRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
name: str = None,
memo: str = None,
url: str = None,
org_id: str = None,
cert_name: str = None,
cert_no: str = None,
cert_type: str = None,
legal_person_name: str = None,
legal_person_no: str = None,
agent_name: str = None,
agent_no: str = None,
phone_num: str = None,
proxy_info: ProxyData = None,
client_token: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 取证名称(最长128个字符)
self.name = name
# 取证备注(最长512个字符)
self.memo = memo
# 取证地址(最长2048个字符)
self.url = url
# 公证处Id
self.org_id = org_id
# 取证人证件名称
self.cert_name = cert_name
# 取证人证件号码
self.cert_no = cert_no
# 取证人证件类型
self.cert_type = cert_type
# 法人代表姓名(如果certType为BUSINESS_LICENSE 则必传)
self.legal_person_name = legal_person_name
# 法人代表身份证号(如果certType为BUSINESS_LICENSE 则必传)
self.legal_person_no = legal_person_no
# 企业用户取证人姓名(如果certType为BUSINESS_LICENSE 则必传)
self.agent_name = agent_name
# 企业用户取证人身份证号(如果certType为BUSINESS_LICENSE 则必传)
self.agent_no = agent_no
# 取证人电话号码,生成公证处证书需要,公证处需要作登记 格式范例:(86-573)2651630 或 (86)13738258505
self.phone_num = phone_num
# 代理信息
self.proxy_info = proxy_info
# 保证请求幂等性。从您的客户端生成一个参数值,确保不同请求间该参数值唯一。clientToken只支持ASCII字符,且不能超过64个字符
self.client_token = client_token
def validate(self):
self.validate_required(self.name, 'name')
if self.name is not None:
self.validate_max_length(self.name, 'name', 128)
if self.memo is not None:
self.validate_max_length(self.memo, 'memo', 512)
self.validate_required(self.url, 'url')
if self.url is not None:
self.validate_max_length(self.url, 'url', 2048)
self.validate_required(self.cert_name, 'cert_name')
self.validate_required(self.cert_no, 'cert_no')
self.validate_required(self.cert_type, 'cert_type')
if self.proxy_info:
self.proxy_info.validate()
self.validate_required(self.client_token, 'client_token')
if self.client_token is not None:
self.validate_max_length(self.client_token, 'client_token', 64)
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.name is not None:
result['name'] = self.name
if self.memo is not None:
result['memo'] = self.memo
if self.url is not None:
result['url'] = self.url
if self.org_id is not None:
result['org_id'] = self.org_id
if self.cert_name is not None:
result['cert_name'] = self.cert_name
if self.cert_no is not None:
result['cert_no'] = self.cert_no
if self.cert_type is not None:
result['cert_type'] = self.cert_type
if self.legal_person_name is not None:
result['legal_person_name'] = self.legal_person_name
if self.legal_person_no is not None:
result['legal_person_no'] = self.legal_person_no
if self.agent_name is not None:
result['agent_name'] = self.agent_name
if self.agent_no is not None:
result['agent_no'] = self.agent_no
if self.phone_num is not None:
result['phone_num'] = self.phone_num
if self.proxy_info is not None:
result['proxy_info'] = self.proxy_info.to_map()
if self.client_token is not None:
result['client_token'] = self.client_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('memo') is not None:
self.memo = m.get('memo')
if m.get('url') is not None:
self.url = m.get('url')
if m.get('org_id') is not None:
self.org_id = m.get('org_id')
if m.get('cert_name') is not None:
self.cert_name = m.get('cert_name')
if m.get('cert_no') is not None:
self.cert_no = m.get('cert_no')
if m.get('cert_type') is not None:
self.cert_type = m.get('cert_type')
if m.get('legal_person_name') is not None:
self.legal_person_name = m.get('legal_person_name')
if m.get('legal_person_no') is not None:
self.legal_person_no = m.get('legal_person_no')
if m.get('agent_name') is not None:
self.agent_name = m.get('agent_name')
if m.get('agent_no') is not None:
self.agent_no = m.get('agent_no')
if m.get('phone_num') is not None:
self.phone_num = m.get('phone_num')
if m.get('proxy_info') is not None:
temp_model = ProxyData()
self.proxy_info = temp_model.from_map(m['proxy_info'])
if m.get('client_token') is not None:
self.client_token = m.get('client_token')
return self
class CreateScreenshotResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
evidence_id: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 取证id
self.evidence_id = evidence_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.evidence_id is not None:
result['evidence_id'] = self.evidence_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('evidence_id') is not None:
self.evidence_id = m.get('evidence_id')
return self
class QueryScreenshotRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
evidence_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 取证id
self.evidence_id = evidence_id
def validate(self):
self.validate_required(self.evidence_id, 'evidence_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.evidence_id is not None:
result['evidence_id'] = self.evidence_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('evidence_id') is not None:
self.evidence_id = m.get('evidence_id')
return self
class QueryScreenshotResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
evidence_id: str = None,
status: str = None,
data: ScreenshotData = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 取证id
self.evidence_id = evidence_id
# 取证状态
self.status = status
# 网页截图信息
self.data = data
def validate(self):
if self.data:
self.data.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.evidence_id is not None:
result['evidence_id'] = self.evidence_id
if self.status is not None:
result['status'] = self.status
if self.data is not None:
result['data'] = self.data.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('evidence_id') is not None:
self.evidence_id = m.get('evidence_id')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('data') is not None:
temp_model = ScreenshotData()
self.data = temp_model.from_map(m['data'])
return self
class VerifyBlockchainRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
tx_hash: str = None,
file_hash: str = None,
file_id: str = None,
user_data: VerifyUserData = None,
statement_data: VerifyStatementData = None,
evidence_data: VerifyEvidenceData = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 统一证据编号(存证交易HASH)
self.tx_hash = tx_hash
# 文件HASH,使用HEX的完整表示,如 6460c949f8a347eb...
#
# (和fileId 二选一,必传一个,都传按fileHash为准)
self.file_hash = file_hash
# 文件OSS Id(上传文件有大小限制,文本文件最大100M,图片最大1G,视频和压缩包最大2G)
#
# (和fileHash 二选一,必传一个,都传按fileHash为准)
self.file_id = file_id
# 核验用户信息
self.user_data = user_data
# 核验权利声明信息
self.statement_data = statement_data
# 核验取证信息
self.evidence_data = evidence_data
def validate(self):
self.validate_required(self.tx_hash, 'tx_hash')
if self.user_data:
self.user_data.validate()
if self.statement_data:
self.statement_data.validate()
if self.evidence_data:
self.evidence_data.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.tx_hash is not None:
result['tx_hash'] = self.tx_hash
if self.file_hash is not None:
result['file_hash'] = self.file_hash
if self.file_id is not None:
result['file_id'] = self.file_id
if self.user_data is not None:
result['user_data'] = self.user_data.to_map()
if self.statement_data is not None:
result['statement_data'] = self.statement_data.to_map()
if self.evidence_data is not None:
result['evidence_data'] = self.evidence_data.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('tx_hash') is not None:
self.tx_hash = m.get('tx_hash')
if m.get('file_hash') is not None:
self.file_hash = m.get('file_hash')
if m.get('file_id') is not None:
self.file_id = m.get('file_id')
if m.get('user_data') is not None:
temp_model = VerifyUserData()
self.user_data = temp_model.from_map(m['user_data'])
if m.get('statement_data') is not None:
temp_model = VerifyStatementData()
self.statement_data = temp_model.from_map(m['statement_data'])
if m.get('evidence_data') is not None:
temp_model = VerifyEvidenceData()
self.evidence_data = temp_model.from_map(m['evidence_data'])
return self
class VerifyBlockchainResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
tx_hash: str = None,
status: str = None,
block_hash: str = None,
block_height: int = None,
data_type: str = None,
timestamp: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 统一证据编号(存证交易HASH)
self.tx_hash = tx_hash
# 核验结果
self.status = status
# 所在区块hash
self.block_hash = block_hash
# 区块链高度
self.block_height = block_height
# 存证类型
self.data_type = data_type
# 存证时间
self.timestamp = timestamp
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.tx_hash is not None:
result['tx_hash'] = self.tx_hash
if self.status is not None:
result['status'] = self.status
if self.block_hash is not None:
result['block_hash'] = self.block_hash
if self.block_height is not None:
result['block_height'] = self.block_height
if self.data_type is not None:
result['data_type'] = self.data_type
if self.timestamp is not None:
result['timestamp'] = self.timestamp
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('tx_hash') is not None:
self.tx_hash = m.get('tx_hash')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('block_hash') is not None:
self.block_hash = m.get('block_hash')
if m.get('block_height') is not None:
self.block_height = m.get('block_height')
if m.get('data_type') is not None:
self.data_type = m.get('data_type')
if m.get('timestamp') is not None:
self.timestamp = m.get('timestamp')
return self
class CreateRecordscreenRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
name: str = None,
memo: str = None,
url: List[str] = None,
type: str = None,
area: str = None,
screen_code: str = None,
max_time_in_min: int = None,
wait_in_min: int = None,
cross_region: str = None,
ip_type: str = None,
callback_url: str = None,
org_id: str = None,
cert_name: str = None,
cert_no: str = None,
cert_type: str = None,
legal_person_name: str = None,
legal_person_no: str = None,
agent_name: str = None,
agent_no: str = None,
phone_num: str = None,
proxy_info: ProxyData = None,
client_token: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 取证名称(最长128个字符)
self.name = name
# 取证备注(最长512个字符)
self.memo = memo
# 预校验网址和预打开网页(不能大于10条),如果automatic为true 则必填
self.url = url
# 取证类型 具体见附录 RecordScreenType
self.type = type
# 取证服务器地域 具体见附录 RecordScreenArea
self.area = area
# 定制录屏桌面识别码,目前取值只有默认值:DEFAULT
self.screen_code = screen_code
# 最大录屏时间,maxTimeInMin不能小于6min,不能大于300min
self.max_time_in_min = max_time_in_min
# 服务器进入ready状态,指定时间未开始进入abort状态的等待时间,单位为分钟,默认1min,waitInMin不能小于1,不能大于10min
self.wait_in_min = wait_in_min
# 当所选地域无机器资源时,是否使用其它地域 目前取值只有默认值:FORBID
self.cross_region = cross_region
# ip地域取值 目前取值只有默认值:ANY
self.ip_type = ip_type
# 回调地址
self.callback_url = callback_url
# 公证处Id
self.org_id = org_id
# 取证人证件名称
self.cert_name = cert_name
# 取证人证件号码
self.cert_no = cert_no
# 取证人证件类型 具体见附录CertType
self.cert_type = cert_type
# 企业法人代表姓名(如果certType为BUSINESS_LICENSE 则必传)
self.legal_person_name = legal_person_name
# 企业法人代表身份证号(如果certType为BUSINESS_LICENSE 则必传)
self.legal_person_no = legal_person_no
# 企业用户取证人姓名(如果certType为BUSINESS_LICENSE 则必传)
self.agent_name = agent_name
# 企业用户取证人身份证号(如果certType为BUSINESS_LICENSE 则必传)
self.agent_no = agent_no
# 取证人电话号码,生成公证处证书需要,公证处需要作登记 格式范例:(86-573)2651630 或 (86)13738258505
self.phone_num = phone_num
# 代理信息
self.proxy_info = proxy_info
# 保证请求幂等性。从您的客户端生成一个参数值,确保不同请求间该参数值唯一。clientToken只支持ASCII字符,且不能超过64个字符。更多详情,请参见如何保证幂等性。
self.client_token = client_token
def validate(self):
self.validate_required(self.name, 'name')
if self.name is not None:
self.validate_max_length(self.name, 'name', 128)
if self.memo is not None:
self.validate_max_length(self.memo, 'memo', 512)
self.validate_required(self.type, 'type')
self.validate_required(self.area, 'area')
self.validate_required(self.max_time_in_min, 'max_time_in_min')
self.validate_required(self.cert_name, 'cert_name')
self.validate_required(self.cert_no, 'cert_no')
self.validate_required(self.cert_type, 'cert_type')
if self.proxy_info:
self.proxy_info.validate()
self.validate_required(self.client_token, 'client_token')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.name is not None:
result['name'] = self.name
if self.memo is not None:
result['memo'] = self.memo
if self.url is not None:
result['url'] = self.url
if self.type is not None:
result['type'] = self.type
if self.area is not None:
result['area'] = self.area
if self.screen_code is not None:
result['screen_code'] = self.screen_code
if self.max_time_in_min is not None:
result['max_time_in_min'] = self.max_time_in_min
if self.wait_in_min is not None:
result['wait_in_min'] = self.wait_in_min
if self.cross_region is not None:
result['cross_region'] = self.cross_region
if self.ip_type is not None:
result['ip_type'] = self.ip_type
if self.callback_url is not None:
result['callback_url'] = self.callback_url
if self.org_id is not None:
result['org_id'] = self.org_id
if self.cert_name is not None:
result['cert_name'] = self.cert_name
if self.cert_no is not None:
result['cert_no'] = self.cert_no
if self.cert_type is not None:
result['cert_type'] = self.cert_type
if self.legal_person_name is not None:
result['legal_person_name'] = self.legal_person_name
if self.legal_person_no is not None:
result['legal_person_no'] = self.legal_person_no
if self.agent_name is not None:
result['agent_name'] = self.agent_name
if self.agent_no is not None:
result['agent_no'] = self.agent_no
if self.phone_num is not None:
result['phone_num'] = self.phone_num
if self.proxy_info is not None:
result['proxy_info'] = self.proxy_info.to_map()
if self.client_token is not None:
result['client_token'] = self.client_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('memo') is not None:
self.memo = m.get('memo')
if m.get('url') is not None:
self.url = m.get('url')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('area') is not None:
self.area = m.get('area')
if m.get('screen_code') is not None:
self.screen_code = m.get('screen_code')
if m.get('max_time_in_min') is not None:
self.max_time_in_min = m.get('max_time_in_min')
if m.get('wait_in_min') is not None:
self.wait_in_min = m.get('wait_in_min')
if m.get('cross_region') is not None:
self.cross_region = m.get('cross_region')
if m.get('ip_type') is not None:
self.ip_type = m.get('ip_type')
if m.get('callback_url') is not None:
self.callback_url = m.get('callback_url')
if m.get('org_id') is not None:
self.org_id = m.get('org_id')
if m.get('cert_name') is not None:
self.cert_name = m.get('cert_name')
if m.get('cert_no') is not None:
self.cert_no = m.get('cert_no')
if m.get('cert_type') is not None:
self.cert_type = m.get('cert_type')
if m.get('legal_person_name') is not None:
self.legal_person_name = m.get('legal_person_name')
if m.get('legal_person_no') is not None:
self.legal_person_no = m.get('legal_person_no')
if m.get('agent_name') is not None:
self.agent_name = m.get('agent_name')
if m.get('agent_no') is not None:
self.agent_no = m.get('agent_no')
if m.get('phone_num') is not None:
self.phone_num = m.get('phone_num')
if m.get('proxy_info') is not None:
temp_model = ProxyData()
self.proxy_info = temp_model.from_map(m['proxy_info'])
if m.get('client_token') is not None:
self.client_token = m.get('client_token')
return self
class CreateRecordscreenResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
evidence_id: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 取证id
self.evidence_id = evidence_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.evidence_id is not None:
result['evidence_id'] = self.evidence_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('evidence_id') is not None:
self.evidence_id = m.get('evidence_id')
return self
class QueryRecordscreenRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
evidence_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 取证id
self.evidence_id = evidence_id
def validate(self):
self.validate_required(self.evidence_id, 'evidence_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.evidence_id is not None:
result['evidence_id'] = self.evidence_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('evidence_id') is not None:
self.evidence_id = m.get('evidence_id')
return self
class QueryRecordscreenResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
evidence_id: str = None,
status: str = None,
operate_url: str = None,
data: RecordScreenData = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 取证id
self.evidence_id = evidence_id
# 取证状态 具体见附录 RecordScreenStatus
self.status = status
# 取证操作Url (状态为INIT和DOING 时返回 可以重复打开使用)
self.operate_url = operate_url
# 取证详情(状态为PACKAGING,SUCCESS,FAIL才有数据)
self.data = data
def validate(self):
if self.data:
self.data.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.evidence_id is not None:
result['evidence_id'] = self.evidence_id
if self.status is not None:
result['status'] = self.status
if self.operate_url is not None:
result['operate_url'] = self.operate_url
if self.data is not None:
result['data'] = self.data.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('evidence_id') is not None:
self.evidence_id = m.get('evidence_id')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('operate_url') is not None:
self.operate_url = m.get('operate_url')
if m.get('data') is not None:
temp_model = RecordScreenData()
self.data = temp_model.from_map(m['data'])
return self
class ListNotaryRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
return self
class ListNotaryResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
notary_list: List[NotaryPublicOffice] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 公证处列表
self.notary_list = notary_list
def validate(self):
if self.notary_list:
for k in self.notary_list:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['notary_list'] = []
if self.notary_list is not None:
for k in self.notary_list:
result['notary_list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.notary_list = []
if m.get('notary_list') is not None:
for k in m.get('notary_list'):
temp_model = NotaryPublicOffice()
self.notary_list.append(temp_model.from_map(k))
return self
| 35.581453
| 95
| 0.590442
| 21,094
| 157,697
| 4.166825
| 0.029155
| 0.051425
| 0.092565
| 0.064782
| 0.832744
| 0.747073
| 0.713647
| 0.700688
| 0.684749
| 0.672803
| 0
| 0.002591
| 0.312168
| 157,697
| 4,431
| 96
| 35.589483
| 0.807733
| 0.041973
| 0
| 0.761747
| 1
| 0
| 0.104063
| 0.004941
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068546
| false
| 0.004699
| 0.000553
| 0
| 0.120509
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
26ed43cf5a804d638bf38a6e307d345cfc70759c
| 7,380
|
py
|
Python
|
AnimeList/migrations/0002_auto_20200610_1429.py
|
habelash/AnimeView
|
893620c318b25a549479c781f002e6eed326705c
|
[
"CC0-1.0"
] | null | null | null |
AnimeList/migrations/0002_auto_20200610_1429.py
|
habelash/AnimeView
|
893620c318b25a549479c781f002e6eed326705c
|
[
"CC0-1.0"
] | 7
|
2021-03-30T14:17:35.000Z
|
2022-01-13T02:55:42.000Z
|
AnimeList/migrations/0002_auto_20200610_1429.py
|
habelash/AnimeView
|
893620c318b25a549479c781f002e6eed326705c
|
[
"CC0-1.0"
] | null | null | null |
# Generated by Django 3.0.6 on 2020-06-10 08:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AnimeList', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='afrosamurai',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Episode', models.PositiveIntegerField()),
('EpisodeLink', models.URLField()),
('Discription', models.TextField()),
('image', models.ImageField(upload_to='media')),
],
options={
'verbose_name_plural': 'Afro Samurai',
},
),
migrations.CreateModel(
name='animelist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('Discription', models.TextField()),
('image', models.ImageField(upload_to='media')),
],
options={
'verbose_name_plural': 'Anime List',
},
),
migrations.CreateModel(
name='aot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Episode', models.PositiveIntegerField()),
('EpisodeLink', models.URLField()),
('Discription', models.TextField()),
('image', models.ImageField(upload_to='media')),
],
options={
'verbose_name_plural': 'Attack On Titans',
},
),
migrations.CreateModel(
name='bleach',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Episode', models.PositiveIntegerField()),
('EpisodeLink', models.URLField()),
('Discription', models.TextField()),
('image', models.ImageField(upload_to='media')),
],
options={
'verbose_name_plural': 'Bleach',
},
),
migrations.CreateModel(
name='bnha',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Episode', models.PositiveIntegerField()),
('EpisodeLink', models.URLField()),
('Discription', models.TextField()),
('image', models.ImageField(upload_to='media')),
],
options={
'verbose_name_plural': 'My Hero Accadamy',
},
),
migrations.CreateModel(
name='deathnote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Episode', models.PositiveIntegerField()),
('EpisodeLink', models.URLField()),
('Discription', models.TextField()),
('image', models.ImageField(upload_to='media')),
],
options={
'verbose_name_plural': 'Death Note',
},
),
migrations.CreateModel(
name='fairytail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Episode', models.PositiveIntegerField()),
('EpisodeLink', models.URLField()),
('Discription', models.TextField()),
('image', models.ImageField(upload_to='media')),
],
options={
'verbose_name_plural': 'Fairy Tail',
},
),
migrations.CreateModel(
name='fmab',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Episode', models.PositiveIntegerField()),
('EpisodeLink', models.URLField()),
('Discription', models.TextField()),
('image', models.ImageField(upload_to='media')),
],
options={
'verbose_name_plural': 'Full Metal Alchemist Brotherhood',
},
),
migrations.CreateModel(
name='haikyu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Episode', models.PositiveIntegerField()),
('EpisodeLink', models.URLField()),
('Discription', models.TextField()),
('image', models.ImageField(upload_to='media')),
],
options={
'verbose_name_plural': 'Haikyu',
},
),
migrations.CreateModel(
name='hxh',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Episode', models.PositiveIntegerField()),
('EpisodeLink', models.URLField()),
('Discription', models.TextField()),
('image', models.ImageField(upload_to='media')),
],
options={
'verbose_name_plural': 'Hunter X Hunter',
},
),
migrations.CreateModel(
name='kny',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Episode', models.PositiveIntegerField()),
('EpisodeLink', models.URLField()),
('Discription', models.TextField()),
('image', models.ImageField(upload_to='media')),
],
options={
'verbose_name_plural': 'Demon Slayer',
},
),
migrations.CreateModel(
name='naruto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Episode', models.PositiveIntegerField()),
('EpisodeLink', models.URLField()),
('Discription', models.TextField()),
('image', models.ImageField(upload_to='media')),
],
options={
'verbose_name_plural': 'Naruto',
},
),
migrations.CreateModel(
name='onepunchman',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Episode', models.PositiveIntegerField()),
('EpisodeLink', models.URLField()),
('Discription', models.TextField()),
('image', models.ImageField(upload_to='media')),
],
options={
'verbose_name_plural': 'One Punch Man',
},
),
migrations.AlterModelOptions(
name='onepiece',
options={'verbose_name_plural': 'One Piece'},
),
]
| 39.677419
| 114
| 0.501084
| 560
| 7,380
| 6.457143
| 0.166071
| 0.082135
| 0.06969
| 0.09292
| 0.786781
| 0.778485
| 0.778485
| 0.778485
| 0.778485
| 0.778485
| 0
| 0.004412
| 0.355014
| 7,380
| 185
| 115
| 39.891892
| 0.755252
| 0.006098
| 0
| 0.72067
| 1
| 0
| 0.149598
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.005587
| 0
| 0.022346
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f85f064963a44d0ccb1270bc00e6b3900073034f
| 16,452
|
py
|
Python
|
engine/tests/test_form_service.py
|
ainterr/scoring_engine
|
d986eef08dcb819add20ed87d91239f887f62daa
|
[
"MIT"
] | null | null | null |
engine/tests/test_form_service.py
|
ainterr/scoring_engine
|
d986eef08dcb819add20ed87d91239f887f62daa
|
[
"MIT"
] | 7
|
2016-02-24T21:01:22.000Z
|
2017-01-04T03:22:44.000Z
|
engine/tests/test_form_service.py
|
ainterr/scoring_engine
|
d986eef08dcb819add20ed87d91239f887f62daa
|
[
"MIT"
] | 2
|
2016-03-04T17:04:48.000Z
|
2020-01-30T21:03:49.000Z
|
from django.test import TransactionTestCase
from django.core.exceptions import ValidationError
from django.core.management import call_command
from .. import models, forms
class ServiceTests(TransactionTestCase):
form_class = forms.ModelFormFactory(models.Service)
def setUp(self):
call_command('registerplugins')
self.http = models.Plugin.objects.get(name='http')
self.smb = models.Plugin.objects.get(name='smb')
def test_malformed_service_form(self):
"""Should not be able to submit service forms with malformed data"""
# Too little data
data = {}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'], ['This field is required.'])
data = {'name':'Service1'}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subnet_host'],
['This field is required.'])
data = {'subnet_host':2}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],
['This field is required.'])
data = {'port':80}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],
['This field is required.'])
data = {'plugin':self.http.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],
['This field is required.'])
data = {'name':'Service1', 'subnet_host':1}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['port'],
['This field is required.'])
data = {'name':'Service1', 'port':90}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subnet_host'],
['This field is required.'])
data = {'name':'Service1', 'plugin':self.http.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subnet_host'],
['This field is required.'])
data = {'subnet_host':2, 'port':93}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],
['This field is required.'])
data = {'subnet_host':2, 'plugin':self.smb.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],
['This field is required.'])
data = {'port':2, 'plugin':self.smb.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],
['This field is required.'])
data = {'name':'Service1', 'subnet_host':2, 'port':44}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['plugin'],
['This field is required.'])
data = {'name':'Service1', 'subnet_host':2, 'plugin':self.smb.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['port'],
['This field is required.'])
data = {'name':'Service1', 'port':32, 'plugin':self.smb.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subnet_host'],
['This field is required.'])
data = {'subnet_host':38, 'port':32, 'plugin':self.smb.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],
['This field is required.'])
# Malformed arguments
## Name
data = {'name':None, 'subnet_host':1, 'port':93, 'plugin':self.http.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],
['This field is required.'])
data = {'name':'', 'subnet_host':1, 'port':93, 'plugin':self.http.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],
['This field is required.'])
data = {'name':'a'*21, 'subnet_host':1, 'port':93, 'plugin':self.http.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],
['Ensure this value has at most 20 characters (it has 21).'])
## Subnet_host
data = {'name':'Service', 'subnet_host':None, 'port':93,
'plugin':self.http.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subnet_host'],
['This field is required.'])
data = {'name':'Service', 'subnet_host':'hi', 'port':93,
'plugin':self.http.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subnet_host'],
['Enter a whole number.'])
data = {'name':'Service', 'subnet_host':-1, 'port':93,
'plugin':self.http.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subnet_host'],
['Ensure this value is greater than or equal to 0.'])
# Port
data = {'name':'Service', 'subnet_host':1, 'port':None,
'plugin':self.http.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['port'],
['This field is required.'])
data = {'name':'Service', 'subnet_host':1, 'port':'oh',
'plugin':self.http.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['port'],
['Enter a whole number.'])
data = {'name':'Service', 'subnet_host':1, 'port':-1,
'plugin':self.http.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['port'],
['Ensure this value is greater than or equal to 0.'])
data = {'name':'Service', 'subnet_host':1, 'port':0, 'plugin':self.http.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['__all__'],
['Service port not in valid range 1-65535.'])
data = {'name':'Service', 'subnet_host':1, 'port':65536,
'plugin':self.http.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['__all__'],
['Service port not in valid range 1-65535.'])
# Plugin
data = {'name':'Service', 'subnet_host':1, 'port':15,
'plugin':None}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['plugin'],
['This field is required.'])
data = {'name':'Service', 'subnet_host':1, 'port':15,
'plugin':'http'}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['plugin'],
['Select a valid choice. That choice is not one of the available choices.'])
def test_service_same_names_form(self):
"""Service form submissions with the same name are not allowed"""
models.Service.objects.create(
name='Service1', subnet_host=1, port=30, plugin=self.http)
data = {'name':'Service1', 'subnet_host':2, 'port':38,
'plugin':self.http.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],
['Service with this Name already exists.'])
def test_service_same_host_port_form(self):
"""Service form submissions with the same host/port combo
are not allowed"""
models.Service.objects.create(
name='Service1', subnet_host=1, port=30, plugin=self.http)
data = {'name':'Service2', 'subnet_host':1, 'port':30,
'plugin':self.smb.pk}
form = self.form_class(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['__all__'],
['Service already exists on host 1 port 30.'])
def test_service_correct_form(self):
"""Correctly subitted service forms should be allowed"""
self.assertEqual(models.Service.objects.count(), 0)
data = {'name':'Service1', 'subnet_host':1, 'port':80,
'plugin':self.http.pk}
form = self.form_class(data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(models.Service.objects.count(), 1)
data = {'name':'Service2', 'subnet_host':4, 'port':38,
'plugin':self.smb.pk}
form = self.form_class(data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(models.Service.objects.count(), 2)
data = {'name':'Service3', 'subnet_host':1, 'port':38,
'plugin':self.http.pk} # Same host different port
form = self.form_class(data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(models.Service.objects.count(), 3)
data = {'name':'Service4', 'subnet_host':2, 'port':80,
'plugin':self.http.pk} # Different host same port
form = self.form_class(data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(models.Service.objects.count(), 4)
def test_service_malformed_edit_form(self):
"""Service forms should raise an error when
edited with malformed data"""
s = models.Service.objects.create(
name='Service', subnet_host=1, port=93, plugin=self.http)
data = {'name':'Service', 'subnet_host':1, 'port':93,
'plugin':self.http.pk}
# Malformed arguments
## Name
data['name'] = None
form = self.form_class(data, instance=s)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],
['This field is required.'])
data['name'] = ''
form = self.form_class(data, instance=s)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],
['This field is required.'])
data['name'] = 'a'*21
form = self.form_class(data, instance=s)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],
['Ensure this value has at most 20 characters (it has 21).'])
## Subnet_host
data['name'] = 'Service'
data['subnet_host'] = None
form = self.form_class(data, instance=s)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subnet_host'],
['This field is required.'])
data['subnet_host'] = 'hi'
form = self.form_class(data, instance=s)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subnet_host'],
['Enter a whole number.'])
data['subnet_host'] = -1
form = self.form_class(data, instance=s)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subnet_host'],
['Ensure this value is greater than or equal to 0.'])
# Port
data['subnet_host'] = 1
data['port'] = None
form = self.form_class(data, instance=s)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['port'],
['This field is required.'])
data['port'] = 'oh'
form = self.form_class(data, instance=s)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['port'],
['Enter a whole number.'])
data['port'] = -1
form = self.form_class(data, instance=s)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['port'],
['Ensure this value is greater than or equal to 0.'])
data['port'] = 0
form = self.form_class(data, instance=s)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['__all__'],
['Service port not in valid range 1-65535.'])
data['port'] = 65536
form = self.form_class(data, instance=s)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['__all__'],
['Service port not in valid range 1-65535.'])
# Plugin
data['port'] = 93
data['plugin'] = None
form = self.form_class(data, instance=s)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['plugin'],
['This field is required.'])
data['plugin'] = 'http'
form = self.form_class(data, instance=s)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['plugin'],
['Select a valid choice. That choice is not one of the available choices.'])
def test_service_edit_form(self):
"""Service fields should be updated when properly edited
through a form"""
s = models.Service.objects.create(
name='Service', subnet_host=1, port=93, plugin=self.http)
data = {'name':'kdkdkdkd', 'subnet_host':1, 'port':93,
'plugin':self.http.pk}
data['name']= 'kdkdkdkd'
form = self.form_class(data, instance=s)
self.assertTrue(form.is_valid())
form.save()
s = models.Service.objects.get(pk=s.pk)
self.assertEqual(s.name, 'kdkdkdkd')
data['subnet_host'] = 5
form = self.form_class(data, instance=s)
self.assertTrue(form.is_valid())
form.save()
s = models.Service.objects.get(pk=s.pk)
self.assertEqual(s.subnet_host, 5)
data['port'] = 5
form = self.form_class(data, instance=s)
self.assertTrue(form.is_valid())
form.save()
s = models.Service.objects.get(pk=s.pk)
self.assertEqual(s.port, 5)
data['plugin'] = self.smb.pk
form = self.form_class(data, instance=s)
self.assertTrue(form.is_valid())
form.save()
s = models.Service.objects.get(pk=s.pk)
self.assertEqual(s.plugin, self.smb)
def test_service_edit_same_names_form(self):
"""Services with the same name are not allowed when editing
through forms"""
models.Service.objects.create(
name='Service1', subnet_host=1, port=30, plugin=self.http)
s = models.Service.objects.create(
name='Service2', subnet_host=2, port=38, plugin=self.http)
data = {'name':'Service2', 'subnet_host':2, 'port':38,
'plugin':self.http.pk}
data['name'] = 'Service1'
form = self.form_class(data, instance=s)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['name'],
['Service with this Name already exists.'])
def test_service_edit_same_host_port_form(self):
"""Services with the same host/port combo are not allowed
when editing through forms"""
models.Service.objects.create(
name='Service1', subnet_host=1, port=30, plugin=self.http)
s = models.Service.objects.create(
name='Service2', subnet_host=2, port=30, plugin=self.smb)
data = {'name':'Service2', 'subnet_host':2, 'port':30,
'plugin':self.smb.pk}
data['subnet_host'] = 1
form = self.form_class(data, instance=s)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['__all__'],
['Service already exists on host 1 port 30.'])
s = models.Service.objects.create(
name='Service3', subnet_host=1, port=50, plugin=self.smb)
data = {'name':'Service3', 'subnet_host':1, 'port':50,
'plugin':self.smb.pk}
data['port'] = 30
form = self.form_class(data, instance=s)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['__all__'],
['Service already exists on host 1 port 30.'])
| 38.529274
| 88
| 0.585218
| 2,004
| 16,452
| 4.689122
| 0.067864
| 0.052783
| 0.068958
| 0.097691
| 0.904863
| 0.889539
| 0.862935
| 0.843035
| 0.831329
| 0.812813
| 0
| 0.017441
| 0.264649
| 16,452
| 426
| 89
| 38.619718
| 0.759299
| 0.042487
| 0
| 0.730769
| 0
| 0
| 0.192954
| 0
| 0
| 0
| 0
| 0
| 0.322485
| 1
| 0.026627
| false
| 0
| 0.011834
| 0
| 0.044379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f8bdc69b51e83336909893405e8711f3978dd1cc
| 152
|
py
|
Python
|
Dataset/Leetcode/train/70/43.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/70/43.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/train/70/43.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
class Solution:
def XXX(self, x: int) -> int:
return 3267960841*(pow(328501784,x+1,7841400319)-pow(7512898536,x+1,7841400319))%7841400319
| 25.333333
| 99
| 0.690789
| 21
| 152
| 5
| 0.666667
| 0.038095
| 0.228571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.476563
| 0.157895
| 152
| 5
| 100
| 30.4
| 0.34375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
3e04023ad53632604928a2a7b4d158dc105d9bf8
| 5,200
|
py
|
Python
|
voter/tests/test_models.py
|
NCVotes/voters-ingestor
|
af7bc9db151cc65eae85412f7c1d36d4d301b158
|
[
"MIT"
] | 10
|
2018-02-15T16:40:03.000Z
|
2020-06-13T19:50:16.000Z
|
voter/tests/test_models.py
|
NCVotes/voters-ingestor
|
af7bc9db151cc65eae85412f7c1d36d4d301b158
|
[
"MIT"
] | 17
|
2017-09-11T19:57:58.000Z
|
2018-06-19T13:31:09.000Z
|
voter/tests/test_models.py
|
NCVotes/voters-ingestor
|
af7bc9db151cc65eae85412f7c1d36d4d301b158
|
[
"MIT"
] | 1
|
2017-11-02T23:42:16.000Z
|
2017-11-02T23:42:16.000Z
|
from unittest.mock import patch
from django.test import TestCase
from voter.models import FileTracker, BadLineTracker, BadLineRange, NCVoter, NCVoterQueryView
from voter.tests import factories
class FileTrackerTest(TestCase):
def test_short_filename_for_path_is_correct(self):
f = FileTracker(filename='/foo/bar.txt')
self.assertEqual(f.short_filename, 'bar.txt')
def test_short_filename_if_no_parent(self):
f = FileTracker(filename='bar.txt')
self.assertEqual(f.short_filename, 'bar.txt')
class BadLineTrackerTest(TestCase):
def setUp(self):
self.blr = BadLineTracker('filename')
def test_just_one_error(self):
self.blr.error(27, 'bad line', 'the line was bad')
self.blr.flush()
self.assertEqual(1, BadLineRange.objects.count())
r = BadLineRange.objects.first()
self.assertEqual(27, r.first_line_no)
self.assertEqual(27, r.last_line_no)
self.assertEqual('bad line', r.example_line)
self.assertEqual('the line was bad', r.message)
self.assertEqual('filename', r.filename)
self.assertFalse(r.is_warning)
def test_adjacent_errors(self):
self.blr.error(27, 'bad line', 'the line was bad')
self.blr.error(28, 'worse line', 'the line was bad')
self.blr.flush()
self.assertEqual(1, BadLineRange.objects.count())
r = BadLineRange.objects.first()
self.assertEqual(27, r.first_line_no)
self.assertEqual(28, r.last_line_no)
self.assertEqual('bad line', r.example_line)
self.assertEqual('the line was bad', r.message)
self.assertEqual('filename', r.filename)
self.assertFalse(r.is_warning)
def test_gap_in_errors(self):
self.blr.error(27, 'bad line', 'the line was bad')
self.blr.error(29, 'worse line', 'the line was bad')
self.blr.flush()
self.assertEqual(2, BadLineRange.objects.count())
r = BadLineRange.objects.order_by('first_line_no').first()
self.assertEqual(27, r.first_line_no)
self.assertEqual(27, r.last_line_no)
self.assertEqual('bad line', r.example_line)
self.assertEqual('the line was bad', r.message)
self.assertEqual('filename', r.filename)
self.assertFalse(r.is_warning)
r = BadLineRange.objects.order_by('-first_line_no').first()
self.assertEqual(29, r.first_line_no)
self.assertEqual(29, r.last_line_no)
self.assertEqual('worse line', r.example_line)
self.assertEqual('the line was bad', r.message)
self.assertEqual('filename', r.filename)
self.assertFalse(r.is_warning)
def test_different_messages(self):
self.blr.error(27, 'bad line', 'the line was bad')
self.blr.error(28, 'worse line', 'the line was worse')
self.blr.flush()
self.assertEqual(2, BadLineRange.objects.count())
r = BadLineRange.objects.order_by('first_line_no').first()
self.assertEqual(27, r.first_line_no)
self.assertEqual(27, r.last_line_no)
self.assertEqual('bad line', r.example_line)
self.assertEqual('the line was bad', r.message)
self.assertEqual('filename', r.filename)
self.assertFalse(r.is_warning)
r = BadLineRange.objects.order_by('-first_line_no').first()
self.assertEqual(28, r.first_line_no)
self.assertEqual(28, r.last_line_no)
self.assertEqual('worse line', r.example_line)
self.assertEqual('the line was worse', r.message)
self.assertEqual('filename', r.filename)
self.assertFalse(r.is_warning)
def test_warning_error(self):
self.blr.warning(27, 'bad line', 'the line was bad')
self.blr.error(28, 'worse line', 'the line was bad')
self.blr.flush()
self.assertEqual(2, BadLineRange.objects.count())
r = BadLineRange.objects.order_by('first_line_no').first()
self.assertEqual(27, r.first_line_no)
self.assertEqual(27, r.last_line_no)
self.assertEqual('bad line', r.example_line)
self.assertEqual('the line was bad', r.message)
self.assertEqual('filename', r.filename)
self.assertTrue(r.is_warning)
r = BadLineRange.objects.order_by('-first_line_no').first()
self.assertEqual(28, r.first_line_no)
self.assertEqual(28, r.last_line_no)
self.assertEqual('worse line', r.example_line)
self.assertEqual('the line was bad', r.message)
self.assertEqual('filename', r.filename)
self.assertFalse(r.is_warning)
class NCVoterTest(TestCase):
def test_get_count_uses_materialized_view(self):
factories.NCVoter()
# materialized query view is not refreshed, so will get zero records
self.assertEqual(NCVoter.get_count({}), 0)
NCVoterQueryView.refresh()
self.assertEqual(NCVoter.get_count({}), 1)
def test_get_count_is_cached_in_db(self):
factories.NCVoterQueryCache(qs_filters={}, count=23)
with patch('voter.models.NCVoterQueryView.objects.filter') as mock_queryview:
self.assertEqual(NCVoter.get_count({}), 23)
mock_queryview.assert_not_called()
| 41.935484
| 93
| 0.668462
| 689
| 5,200
| 4.885341
| 0.139332
| 0.222816
| 0.050505
| 0.099822
| 0.758467
| 0.731729
| 0.723708
| 0.723708
| 0.723708
| 0.699346
| 0
| 0.014809
| 0.207885
| 5,200
| 123
| 94
| 42.276423
| 0.802379
| 0.012692
| 0
| 0.666667
| 0
| 0
| 0.127825
| 0.008574
| 0
| 0
| 0
| 0
| 0.561905
| 1
| 0.095238
| false
| 0
| 0.038095
| 0
| 0.161905
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3e1f0da69364a25bac03e1db4d8e2a8fa7dc7496
| 199
|
py
|
Python
|
src/infi/instruct/macros.py
|
jasonjorge/infi.asi
|
78a4c34a421102f99b959a659cf7303804627d9b
|
[
"BSD-3-Clause"
] | 2
|
2015-01-12T21:16:06.000Z
|
2019-12-12T05:59:56.000Z
|
src/infi/instruct/macros.py
|
jasonjorge/infi.asi
|
78a4c34a421102f99b959a659cf7303804627d9b
|
[
"BSD-3-Clause"
] | 4
|
2015-02-24T09:18:00.000Z
|
2021-06-16T12:55:19.000Z
|
src/infi/instruct/macros.py
|
jasonjorge/infi.asi
|
78a4c34a421102f99b959a659cf7303804627d9b
|
[
"BSD-3-Clause"
] | 4
|
2015-01-07T12:37:54.000Z
|
2018-02-08T15:07:17.000Z
|
# flake8: noqa
import functools
from .base import *
from .numeric_macros import *
from .string_macros import *
from .mapping_macros import *
from .array_macros import *
from .struct_macros import *
| 19.9
| 29
| 0.778894
| 27
| 199
| 5.555556
| 0.444444
| 0.333333
| 0.426667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005917
| 0.150754
| 199
| 9
| 30
| 22.111111
| 0.881657
| 0.060302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3e581e081d76ee14f7e27d39df5f97d39360e7a4
| 83
|
py
|
Python
|
apps/oob/forms/__init__.py
|
dbjennings/order-of-business
|
8f424bd36b323e4c102b3d8704bb576c150d585a
|
[
"MIT"
] | null | null | null |
apps/oob/forms/__init__.py
|
dbjennings/order-of-business
|
8f424bd36b323e4c102b3d8704bb576c150d585a
|
[
"MIT"
] | null | null | null |
apps/oob/forms/__init__.py
|
dbjennings/order-of-business
|
8f424bd36b323e4c102b3d8704bb576c150d585a
|
[
"MIT"
] | null | null | null |
from apps.oob.forms.task_forms import *
from apps.oob.forms.project_forms import *
| 27.666667
| 42
| 0.807229
| 14
| 83
| 4.642857
| 0.5
| 0.246154
| 0.338462
| 0.492308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096386
| 83
| 2
| 43
| 41.5
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
3e5f713cd9053c7d0f69c12cffe0497b46614552
| 5,033
|
py
|
Python
|
metrics/write_excel_new.py
|
IceClear/MW-GAN
|
acb962468c984681c4a21f7b5c14588ca8f58c00
|
[
"MIT"
] | 36
|
2020-08-12T05:17:42.000Z
|
2022-03-22T03:02:21.000Z
|
metrics/write_excel_new.py
|
IceClear/MW-GAN
|
acb962468c984681c4a21f7b5c14588ca8f58c00
|
[
"MIT"
] | 15
|
2021-01-19T08:24:58.000Z
|
2021-11-16T15:52:58.000Z
|
metrics/write_excel_new.py
|
RyanXingQL/MW-GAN
|
562199344e322919a108048acd55b0dd8820df55
|
[
"MIT"
] | 8
|
2020-10-23T14:15:15.000Z
|
2021-12-23T02:18:23.000Z
|
#!/usr/bin/env python
# coding=utf-8
from xlwt import *
from glob import glob
import os
import random
#需要xlwt库的支持
#import xlwt
#指定file以utf-8的格式打开
#指定打开的文件名
video_file = glob('/home/iceclear/Video-compression/video-PI42/*')
file_path = 'C:/Users/IRC207/Desktop/video-PI42'
method_list=['LQ']
file = Workbook(encoding = 'utf-8')
table = file.add_sheet('data')
data = ['File name']
for exp in video_file:
video_list= glob(exp + '/*.mp4')
for video_item in video_list:
if os.path.basename(video_item)[-11:-4]=='416x240':
addition_method=['MW-GAN','CVRGAN','MFQE2.0','SEVGAN']
random.shuffle(addition_method)
method_list+=addition_method
for i in range(len(method_list)):
data.append(os.path.join(file_path,method_list[i],os.path.basename(video_item)))
method_list=['LQ']
break
for i,p in enumerate(data):
table.write(i,0,p)
for i,p in enumerate(data):
if i>0:
table.write(i,1,os.path.join(file_path,'LQ',os.path.basename(p)))
else:
table.write(i,1,os.path.basename(p))
file.save('TEST_1.xls')
method_list=['LQ']
file = Workbook(encoding = 'utf-8')
table = file.add_sheet('data')
data = ['File name']
for exp in video_file:
video_list= glob(exp + '/*.mp4')
for video_item in video_list:
if os.path.basename(video_item)[-11:-4]=='832x480':
addition_method=['MW-GAN','CVRGAN','MFQE2.0','SEVGAN']
random.shuffle(addition_method)
method_list+=addition_method
for i in range(len(method_list)):
data.append(os.path.join(file_path,method_list[i],os.path.basename(video_item)))
method_list=['LQ']
break
for i,p in enumerate(data):
table.write(i,0,p)
for i,p in enumerate(data):
if i>0:
table.write(i,1,os.path.join(file_path,'LQ',os.path.basename(p)))
else:
table.write(i,1,os.path.basename(p))
file.save('TEST_2.xls')
method_list=['LQ']
file = Workbook(encoding = 'utf-8')
table = file.add_sheet('data')
data = ['File name']
for exp in video_file:
video_list= glob(exp + '/*.mp4')
for video_item in video_list:
if os.path.basename(video_item)[-12:-4]=='1280x720':
addition_method=['MW-GAN','CVRGAN','MFQE2.0','SEVGAN']
random.shuffle(addition_method)
method_list+=addition_method
for i in range(len(method_list)):
data.append(os.path.join(file_path,method_list[i],os.path.basename(video_item)))
method_list=['LQ']
break
for i,p in enumerate(data):
table.write(i,0,p)
for i,p in enumerate(data):
if i>0:
table.write(i,1,os.path.join(file_path,'LQ',os.path.basename(p)))
else:
table.write(i,1,os.path.basename(p))
file.save('TEST_3.xls')
method_list=['LQ']
file = Workbook(encoding = 'utf-8')
table = file.add_sheet('data')
data = ['File name']
for exp in video_file:
video_list= glob(exp + '/*.mp4')
for video_item in video_list:
if os.path.basename(video_item)[-13:-4]=='1920x1080':
addition_method=['MW-GAN','CVRGAN','MFQE2.0','SEVGAN']
random.shuffle(addition_method)
method_list+=addition_method
for i in range(len(method_list)):
data.append(os.path.join(file_path,method_list[i],os.path.basename(video_item)))
method_list=['LQ']
break
for i,p in enumerate(data):
table.write(i,0,p)
for i,p in enumerate(data):
if i>0:
table.write(i,1,os.path.join(file_path,'LQ',os.path.basename(p)))
else:
table.write(i,1,os.path.basename(p))
file.save('TEST_4.xls')
method_list=['LQ']
file = Workbook(encoding = 'utf-8')
table = file.add_sheet('data')
data = ['File name']
for exp in video_file:
video_list= glob(exp + '/*.mp4')
for video_item in video_list:
if os.path.basename(video_item)[-13:-4]=='2560x1600':
addition_method=['MW-GAN','CVRGAN','MFQE2.0','SEVGAN']
random.shuffle(addition_method)
method_list+=addition_method
for i in range(len(method_list)):
data.append(os.path.join(file_path,method_list[i],os.path.basename(video_item)))
method_list=['LQ']
break
for i,p in enumerate(data):
table.write(i,0,p)
for i,p in enumerate(data):
if i>0:
table.write(i,1,os.path.join(file_path,'LQ',os.path.basename(p)))
else:
table.write(i,1,os.path.basename(p))
file.save('TEST_5.xls')
# data = {}
# #字典数据
#
# ldata = []
# num = [a for a in data]
# #for循环指定取出key值存入num��?
# num.sort()
# #字典数据取出后无需,需要先排序
#
# for x in num:
# #for循环将data字典中的键和值分批的保存在ldata��?
# t = [int(x)]
# for a in data[x]:
# t.append(a)
# ldata.append(t)
#
# for i,p in enumerate(ldata):
# #将数据写入文��?i是enumerate()函数返回的序号数
# for j,q in enumerate(p):
# # print i,j,q
# table.write(i,j,q)
| 32.262821
| 97
| 0.606398
| 756
| 5,033
| 3.92328
| 0.12963
| 0.060688
| 0.094403
| 0.025961
| 0.8412
| 0.835806
| 0.835806
| 0.835806
| 0.835806
| 0.835806
| 0.001192
| 0.026811
| 0.229287
| 5,033
| 155
| 98
| 32.470968
| 0.736272
| 0.083449
| 0
| 0.867769
| 0
| 0
| 0.100339
| 0.017853
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.033058
| 0
| 0.033058
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3e7d72fb76d355aad5f25af9ffd96b3951fa8458
| 20,046
|
py
|
Python
|
model.py
|
Arenops/ImprovedFCN
|
49b4322d4ac5179de97c27f0f540432084782322
|
[
"MIT"
] | 12
|
2019-11-21T09:20:39.000Z
|
2022-03-10T08:58:21.000Z
|
model.py
|
Arenops/ImprovedFCN
|
49b4322d4ac5179de97c27f0f540432084782322
|
[
"MIT"
] | 12
|
2020-01-28T22:03:53.000Z
|
2022-03-11T23:35:52.000Z
|
model.py
|
Arenops/ImprovedFCN
|
49b4322d4ac5179de97c27f0f540432084782322
|
[
"MIT"
] | 3
|
2020-05-25T15:35:04.000Z
|
2021-12-29T07:41:13.000Z
|
import numpy as np
import os
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
def focal_loss(gamma=2., alpha=.25):
def focal_loss_fixed(y_true, y_pred):
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
return -keras.sum(alpha * keras.pow(1. - pt_1, gamma) * keras.log(pt_1)) - keras.sum((1 - alpha) * keras.pow(pt_0, gamma) * keras.log(1. - pt_0))
return focal_loss_fixed
# Dilated Convolutions & Focal Loss
def unet3s2(pretrained_weights = None, input_size = (512, 512, 3)):
input = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(input)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
# conv1 = BatchNormalization()(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv12 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(pool1))
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
# conv2 = BatchNormalization()(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv13 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (4,4))(pool2))
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
# conv3 = BatchNormalization()(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
conv14 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (8,8))(conv4))
conv4 = Conv2D(512, 3, activation = 'relu', dilation_rate = (2, 2), padding = 'same', kernel_initializer = 'he_normal')(conv4)
conv4 = Conv2D(512, 3, activation = 'relu', dilation_rate = (2, 2), padding = 'same', kernel_initializer = 'he_normal')(conv4)
conv4 = Conv2D(512, 3, activation = 'relu', dilation_rate = (4, 4), padding = 'same', kernel_initializer = 'he_normal')(conv4)
conv4 = Conv2D(512, 3, activation = 'relu', dilation_rate = (4, 4), padding = 'same', kernel_initializer = 'he_normal')(conv4)
# conv4 = BatchNormalization()(conv4)
pool4 = MaxPooling2D(pool_size=(64, 64))(conv4)
conv5 = Conv2D(128, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
up5 = UpSampling2D(size = (512,512))(conv5)
pool5 = MaxPooling2D(pool_size=(32, 32))(conv4)
conv6 = Conv2D(128, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool5)
up6 = UpSampling2D(size = (256,256))(conv6)
pool6 = MaxPooling2D(pool_size=(16, 16))(conv4)
conv7 = Conv2D(128, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool6)
up7 = UpSampling2D(size = (128,128))(conv7)
pool7 = MaxPooling2D(pool_size=(8, 8))(conv4)
conv8 = Conv2D(128, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool7)
up8 = UpSampling2D(size = (64,64))(conv8)
merge1 = concatenate([conv12, conv13, conv14, up5, up6, up7, up8], axis=3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge1)
# conv9 = BatchNormalization()(conv9)
conv10 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv11 = Conv2D(1, 1, activation = 'sigmoid')(conv10)
model = Model(inputs = input, outputs = conv11)
print('model compile')
# model.compile(optimizer = Adam(lr = 1e-5), loss = 'binary_crossentropy', metrics = ['accuracy'])
model.compile(optimizer = Adam(lr = 1e-5), loss = [focal_loss(alpha=.25, gamma=2)], metrics = ['accuracy'])
model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
print ('loaded pretrained_weights ... {}'.format(pretrained_weights))
return model
# Skip Connections
def unet3s1(pretrained_weights = None, input_size = (512, 512, 3)):
input = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(input)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
# conv1 = BatchNormalization()(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv12 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(pool1))
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
# conv2 = BatchNormalization()(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv13 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (4,4))(pool2))
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
# conv3 = BatchNormalization()(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
conv14 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (8,8))(conv4))
pool4 = MaxPooling2D(pool_size=(64, 64))(conv4)
conv5 = Conv2D(128, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
up5 = UpSampling2D(size = (512,512))(conv5)
pool5 = MaxPooling2D(pool_size=(32, 32))(conv4)
conv6 = Conv2D(128, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool5)
up6 = UpSampling2D(size = (256,256))(conv6)
pool6 = MaxPooling2D(pool_size=(16, 16))(conv4)
conv7 = Conv2D(128, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool6)
up7 = UpSampling2D(size = (128,128))(conv7)
pool7 = MaxPooling2D(pool_size=(8, 8))(conv4)
conv8 = Conv2D(128, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool7)
up8 = UpSampling2D(size = (64,64))(conv8)
merge1 = concatenate([conv12, conv13, conv14, up5, up6, up7, up8], axis=3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge1)
# conv9 = BatchNormalization()(conv9)
conv10 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv11 = Conv2D(1, 1, activation = 'sigmoid')(conv10)
model = Model(inputs = input, outputs = conv11)
print('model compile')
model.compile(optimizer = Adam(lr = 1e-5), loss = 'binary_crossentropy', metrics = ['accuracy'])
# model.compile(optimizer = Adam(lr = 1e-6), loss = [focal_loss(alpha=.25, gamma=2)], metrics = ['accuracy'])
model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
print ('loaded pretrained_weights ... {}'.format(pretrained_weights))
return model
# Spatial Max Pooling
def unet2s(pretrained_weights = None, input_size = (512, 512, 3)):
input = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(input)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)#512
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
conv12 = UpSampling2D(size=(8, 8))(conv4)
pool4 = MaxPooling2D(pool_size=(64, 64))(conv4)
conv5 = Conv2D(128, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
# up5 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (64,64))(conv5))
up5 = UpSampling2D(size = (512,512))(conv5)
pool5 = MaxPooling2D(pool_size=(32, 32))(conv4)
conv6 = Conv2D(128, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool5)
# up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (32,32))(conv6))
up6 = UpSampling2D(size = (256,256))(conv6)
pool6 = MaxPooling2D(pool_size=(16, 16))(conv4)
conv7 = Conv2D(128, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool6)
# up7 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (16,16))(conv7))
up7 = UpSampling2D(size = (128,128))(conv7)
pool7 = MaxPooling2D(pool_size=(8, 8))(conv4)
conv8 = Conv2D(128, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool7)
# up8 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (8,8))(conv8))
up8 = UpSampling2D(size = (64,64))(conv8)
merge1 = concatenate([conv12, up5, up6, up7, up8], axis=3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge1)
conv10 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv11 = Conv2D(1, 1, activation = 'sigmoid')(conv10)
model = Model(inputs = input, outputs = conv11)
print('model compile')
model.compile(optimizer = Adam(lr = 1e-5), loss = 'binary_crossentropy', metrics = ['accuracy'])
model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
print ('loaded pretrained_weights ... {}'.format(pretrained_weights))
return model
# Spatial Average Pooling
def unet2(pretrained_weights = None, input_size = (512, 512, 3)):
input = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(input)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)#512
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
conv12 = UpSampling2D(size=(8, 8))(conv4)
pool4 = AveragePooling2D(pool_size=(64, 64))(conv4)
conv5 = Conv2D(128, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
# up5 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (64,64))(conv5))
up5 = UpSampling2D(size = (512,512))(conv5)
pool5 = AveragePooling2D(pool_size=(32, 32))(conv4)
conv6 = Conv2D(128, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool5)
# up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (32,32))(conv6))
up6 = UpSampling2D(size = (256,256))(conv6)
pool6 = AveragePooling2D(pool_size=(16, 16))(conv4)
conv7 = Conv2D(128, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool6)
# up7 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (16,16))(conv7))
up7 = UpSampling2D(size = (128,128))(conv7)
pool7 = AveragePooling2D(pool_size=(8, 8))(conv4)
conv8 = Conv2D(128, 1, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool7)
# up8 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (8,8))(conv8))
up8 = UpSampling2D(size = (64,64))(conv8)
merge1 = concatenate([conv12, up5, up6, up7, up8], axis=3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge1)
conv10 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv11 = Conv2D(1, 1, activation = 'sigmoid')(conv10)
model = Model(inputs = input, outputs = conv11)
print('model compile')
model.compile(optimizer = Adam(lr = 1e-5), loss = 'binary_crossentropy', metrics = ['accuracy'])
model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
print ('loaded pretrained_weights ... {}'.format(pretrained_weights))
return model
# Backbone
def unet0(pretrained_weights = None, input_size = (512, 512, 3)):
input = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(input)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
conv14 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (8,8))(conv4))
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv14)
conv10 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv11 = Conv2D(1, 1, activation = 'sigmoid')(conv10)
model = Model(inputs = input, outputs = conv11)
print('model compile')
model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
# model.compile(optimizer = Adam(lr = 1e-5), loss = [focal_loss(alpha=.25, gamma=2)], metrics = ['accuracy'])
model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
print ('loaded pretrained_weights ... {}'.format(pretrained_weights))
return model
# U-net
def unet(pretrained_weights = None,input_size = (512,512,3)):
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)#512
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv5))
merge6 = concatenate([conv4, up6], axis=3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = Model(input = inputs, output = conv10)
print('model compile')
model.compile(optimizer = Adam(lr = 1e-5), loss = 'binary_crossentropy', metrics = ['accuracy'])
# model.compile(optimizer = Adam(lr = 1e-5), loss = [focal_loss(alpha=.25, gamma=2)], metrics = ['accuracy'])
model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
return model
| 62.06192
| 153
| 0.666018
| 2,490
| 20,046
| 5.228514
| 0.056627
| 0.116138
| 0.141025
| 0.232276
| 0.930793
| 0.926799
| 0.92388
| 0.92388
| 0.921038
| 0.920654
| 0
| 0.083134
| 0.167116
| 20,046
| 322
| 154
| 62.254658
| 0.696634
| 0.095131
| 0
| 0.790795
| 0
| 0
| 0.117254
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033473
| false
| 0
| 0.029289
| 0
| 0.096234
| 0.046025
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3e7e4eb3036bd85a1c44f699c1763067894174e5
| 533,451
|
py
|
Python
|
intersight/api/vnic_api.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/api/vnic_api.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/api/vnic_api.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.api_client import ApiClient, Endpoint as _Endpoint
from intersight.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from intersight.model.error import Error
from intersight.model.vnic_eth_adapter_policy import VnicEthAdapterPolicy
from intersight.model.vnic_eth_adapter_policy_response import VnicEthAdapterPolicyResponse
from intersight.model.vnic_eth_if import VnicEthIf
from intersight.model.vnic_eth_if_response import VnicEthIfResponse
from intersight.model.vnic_eth_network_policy import VnicEthNetworkPolicy
from intersight.model.vnic_eth_network_policy_response import VnicEthNetworkPolicyResponse
from intersight.model.vnic_eth_qos_policy import VnicEthQosPolicy
from intersight.model.vnic_eth_qos_policy_response import VnicEthQosPolicyResponse
from intersight.model.vnic_fc_adapter_policy import VnicFcAdapterPolicy
from intersight.model.vnic_fc_adapter_policy_response import VnicFcAdapterPolicyResponse
from intersight.model.vnic_fc_if import VnicFcIf
from intersight.model.vnic_fc_if_response import VnicFcIfResponse
from intersight.model.vnic_fc_network_policy import VnicFcNetworkPolicy
from intersight.model.vnic_fc_network_policy_response import VnicFcNetworkPolicyResponse
from intersight.model.vnic_fc_qos_policy import VnicFcQosPolicy
from intersight.model.vnic_fc_qos_policy_response import VnicFcQosPolicyResponse
from intersight.model.vnic_iscsi_adapter_policy import VnicIscsiAdapterPolicy
from intersight.model.vnic_iscsi_adapter_policy_response import VnicIscsiAdapterPolicyResponse
from intersight.model.vnic_iscsi_boot_policy import VnicIscsiBootPolicy
from intersight.model.vnic_iscsi_boot_policy_response import VnicIscsiBootPolicyResponse
from intersight.model.vnic_iscsi_static_target_policy import VnicIscsiStaticTargetPolicy
from intersight.model.vnic_iscsi_static_target_policy_response import VnicIscsiStaticTargetPolicyResponse
from intersight.model.vnic_lan_connectivity_policy import VnicLanConnectivityPolicy
from intersight.model.vnic_lan_connectivity_policy_response import VnicLanConnectivityPolicyResponse
from intersight.model.vnic_lcp_status import VnicLcpStatus
from intersight.model.vnic_lcp_status_response import VnicLcpStatusResponse
from intersight.model.vnic_san_connectivity_policy import VnicSanConnectivityPolicy
from intersight.model.vnic_san_connectivity_policy_response import VnicSanConnectivityPolicyResponse
from intersight.model.vnic_scp_status import VnicScpStatus
from intersight.model.vnic_scp_status_response import VnicScpStatusResponse
class VnicApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __create_vnic_eth_adapter_policy(
self,
vnic_eth_adapter_policy,
**kwargs
):
"""Create a 'vnic.EthAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_vnic_eth_adapter_policy(vnic_eth_adapter_policy, async_req=True)
>>> result = thread.get()
Args:
vnic_eth_adapter_policy (VnicEthAdapterPolicy): The 'vnic.EthAdapterPolicy' resource to create.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
if_none_match (str): For methods that apply server-side changes, If-None-Match used with the * value can be used to create a resource not known to exist, guaranteeing that another resource creation didn't happen before, losing the data of the previous put. The request will be processed only if the eventually existing resource's ETag doesn't match any of the values listed. Otherwise, the status code 412 (Precondition Failed) is used. The asterisk is a special value representing any resource. It is only useful when creating a resource, usually with PUT, to check if another resource with the identity has already been created before. The comparison with the stored ETag uses the weak comparison algorithm, meaning two resources are considered identical if the content is equivalent - they don't have to be identical byte for byte.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthAdapterPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['vnic_eth_adapter_policy'] = \
vnic_eth_adapter_policy
return self.call_with_http_info(**kwargs)
self.create_vnic_eth_adapter_policy = _Endpoint(
settings={
'response_type': (VnicEthAdapterPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthAdapterPolicies',
'operation_id': 'create_vnic_eth_adapter_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'vnic_eth_adapter_policy',
'if_match',
'if_none_match',
],
'required': [
'vnic_eth_adapter_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'vnic_eth_adapter_policy':
(VnicEthAdapterPolicy,),
'if_match':
(str,),
'if_none_match':
(str,),
},
'attribute_map': {
'if_match': 'If-Match',
'if_none_match': 'If-None-Match',
},
'location_map': {
'vnic_eth_adapter_policy': 'body',
'if_match': 'header',
'if_none_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_vnic_eth_adapter_policy
)
def __create_vnic_eth_if(
self,
vnic_eth_if,
**kwargs
):
"""Create a 'vnic.EthIf' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_vnic_eth_if(vnic_eth_if, async_req=True)
>>> result = thread.get()
Args:
vnic_eth_if (VnicEthIf): The 'vnic.EthIf' resource to create.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
if_none_match (str): For methods that apply server-side changes, If-None-Match used with the * value can be used to create a resource not known to exist, guaranteeing that another resource creation didn't happen before, losing the data of the previous put. The request will be processed only if the eventually existing resource's ETag doesn't match any of the values listed. Otherwise, the status code 412 (Precondition Failed) is used. The asterisk is a special value representing any resource. It is only useful when creating a resource, usually with PUT, to check if another resource with the identity has already been created before. The comparison with the stored ETag uses the weak comparison algorithm, meaning two resources are considered identical if the content is equivalent - they don't have to be identical byte for byte.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthIf
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['vnic_eth_if'] = \
vnic_eth_if
return self.call_with_http_info(**kwargs)
self.create_vnic_eth_if = _Endpoint(
settings={
'response_type': (VnicEthIf,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthIfs',
'operation_id': 'create_vnic_eth_if',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'vnic_eth_if',
'if_match',
'if_none_match',
],
'required': [
'vnic_eth_if',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'vnic_eth_if':
(VnicEthIf,),
'if_match':
(str,),
'if_none_match':
(str,),
},
'attribute_map': {
'if_match': 'If-Match',
'if_none_match': 'If-None-Match',
},
'location_map': {
'vnic_eth_if': 'body',
'if_match': 'header',
'if_none_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_vnic_eth_if
)
def __create_vnic_eth_network_policy(
self,
vnic_eth_network_policy,
**kwargs
):
"""Create a 'vnic.EthNetworkPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_vnic_eth_network_policy(vnic_eth_network_policy, async_req=True)
>>> result = thread.get()
Args:
vnic_eth_network_policy (VnicEthNetworkPolicy): The 'vnic.EthNetworkPolicy' resource to create.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
if_none_match (str): For methods that apply server-side changes, If-None-Match used with the * value can be used to create a resource not known to exist, guaranteeing that another resource creation didn't happen before, losing the data of the previous put. The request will be processed only if the eventually existing resource's ETag doesn't match any of the values listed. Otherwise, the status code 412 (Precondition Failed) is used. The asterisk is a special value representing any resource. It is only useful when creating a resource, usually with PUT, to check if another resource with the identity has already been created before. The comparison with the stored ETag uses the weak comparison algorithm, meaning two resources are considered identical if the content is equivalent - they don't have to be identical byte for byte.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthNetworkPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['vnic_eth_network_policy'] = \
vnic_eth_network_policy
return self.call_with_http_info(**kwargs)
self.create_vnic_eth_network_policy = _Endpoint(
settings={
'response_type': (VnicEthNetworkPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthNetworkPolicies',
'operation_id': 'create_vnic_eth_network_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'vnic_eth_network_policy',
'if_match',
'if_none_match',
],
'required': [
'vnic_eth_network_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'vnic_eth_network_policy':
(VnicEthNetworkPolicy,),
'if_match':
(str,),
'if_none_match':
(str,),
},
'attribute_map': {
'if_match': 'If-Match',
'if_none_match': 'If-None-Match',
},
'location_map': {
'vnic_eth_network_policy': 'body',
'if_match': 'header',
'if_none_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_vnic_eth_network_policy
)
def __create_vnic_eth_qos_policy(
self,
vnic_eth_qos_policy,
**kwargs
):
"""Create a 'vnic.EthQosPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_vnic_eth_qos_policy(vnic_eth_qos_policy, async_req=True)
>>> result = thread.get()
Args:
vnic_eth_qos_policy (VnicEthQosPolicy): The 'vnic.EthQosPolicy' resource to create.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
if_none_match (str): For methods that apply server-side changes, If-None-Match used with the * value can be used to create a resource not known to exist, guaranteeing that another resource creation didn't happen before, losing the data of the previous put. The request will be processed only if the eventually existing resource's ETag doesn't match any of the values listed. Otherwise, the status code 412 (Precondition Failed) is used. The asterisk is a special value representing any resource. It is only useful when creating a resource, usually with PUT, to check if another resource with the identity has already been created before. The comparison with the stored ETag uses the weak comparison algorithm, meaning two resources are considered identical if the content is equivalent - they don't have to be identical byte for byte.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthQosPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['vnic_eth_qos_policy'] = \
vnic_eth_qos_policy
return self.call_with_http_info(**kwargs)
self.create_vnic_eth_qos_policy = _Endpoint(
settings={
'response_type': (VnicEthQosPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthQosPolicies',
'operation_id': 'create_vnic_eth_qos_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'vnic_eth_qos_policy',
'if_match',
'if_none_match',
],
'required': [
'vnic_eth_qos_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'vnic_eth_qos_policy':
(VnicEthQosPolicy,),
'if_match':
(str,),
'if_none_match':
(str,),
},
'attribute_map': {
'if_match': 'If-Match',
'if_none_match': 'If-None-Match',
},
'location_map': {
'vnic_eth_qos_policy': 'body',
'if_match': 'header',
'if_none_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_vnic_eth_qos_policy
)
def __create_vnic_fc_adapter_policy(
self,
vnic_fc_adapter_policy,
**kwargs
):
"""Create a 'vnic.FcAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_vnic_fc_adapter_policy(vnic_fc_adapter_policy, async_req=True)
>>> result = thread.get()
Args:
vnic_fc_adapter_policy (VnicFcAdapterPolicy): The 'vnic.FcAdapterPolicy' resource to create.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
if_none_match (str): For methods that apply server-side changes, If-None-Match used with the * value can be used to create a resource not known to exist, guaranteeing that another resource creation didn't happen before, losing the data of the previous put. The request will be processed only if the eventually existing resource's ETag doesn't match any of the values listed. Otherwise, the status code 412 (Precondition Failed) is used. The asterisk is a special value representing any resource. It is only useful when creating a resource, usually with PUT, to check if another resource with the identity has already been created before. The comparison with the stored ETag uses the weak comparison algorithm, meaning two resources are considered identical if the content is equivalent - they don't have to be identical byte for byte.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcAdapterPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['vnic_fc_adapter_policy'] = \
vnic_fc_adapter_policy
return self.call_with_http_info(**kwargs)
self.create_vnic_fc_adapter_policy = _Endpoint(
settings={
'response_type': (VnicFcAdapterPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcAdapterPolicies',
'operation_id': 'create_vnic_fc_adapter_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'vnic_fc_adapter_policy',
'if_match',
'if_none_match',
],
'required': [
'vnic_fc_adapter_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'vnic_fc_adapter_policy':
(VnicFcAdapterPolicy,),
'if_match':
(str,),
'if_none_match':
(str,),
},
'attribute_map': {
'if_match': 'If-Match',
'if_none_match': 'If-None-Match',
},
'location_map': {
'vnic_fc_adapter_policy': 'body',
'if_match': 'header',
'if_none_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_vnic_fc_adapter_policy
)
def __create_vnic_fc_if(
self,
vnic_fc_if,
**kwargs
):
"""Create a 'vnic.FcIf' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_vnic_fc_if(vnic_fc_if, async_req=True)
>>> result = thread.get()
Args:
vnic_fc_if (VnicFcIf): The 'vnic.FcIf' resource to create.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
if_none_match (str): For methods that apply server-side changes, If-None-Match used with the * value can be used to create a resource not known to exist, guaranteeing that another resource creation didn't happen before, losing the data of the previous put. The request will be processed only if the eventually existing resource's ETag doesn't match any of the values listed. Otherwise, the status code 412 (Precondition Failed) is used. The asterisk is a special value representing any resource. It is only useful when creating a resource, usually with PUT, to check if another resource with the identity has already been created before. The comparison with the stored ETag uses the weak comparison algorithm, meaning two resources are considered identical if the content is equivalent - they don't have to be identical byte for byte.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcIf
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['vnic_fc_if'] = \
vnic_fc_if
return self.call_with_http_info(**kwargs)
self.create_vnic_fc_if = _Endpoint(
settings={
'response_type': (VnicFcIf,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcIfs',
'operation_id': 'create_vnic_fc_if',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'vnic_fc_if',
'if_match',
'if_none_match',
],
'required': [
'vnic_fc_if',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'vnic_fc_if':
(VnicFcIf,),
'if_match':
(str,),
'if_none_match':
(str,),
},
'attribute_map': {
'if_match': 'If-Match',
'if_none_match': 'If-None-Match',
},
'location_map': {
'vnic_fc_if': 'body',
'if_match': 'header',
'if_none_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_vnic_fc_if
)
def __create_vnic_fc_network_policy(
self,
vnic_fc_network_policy,
**kwargs
):
"""Create a 'vnic.FcNetworkPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_vnic_fc_network_policy(vnic_fc_network_policy, async_req=True)
>>> result = thread.get()
Args:
vnic_fc_network_policy (VnicFcNetworkPolicy): The 'vnic.FcNetworkPolicy' resource to create.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
if_none_match (str): For methods that apply server-side changes, If-None-Match used with the * value can be used to create a resource not known to exist, guaranteeing that another resource creation didn't happen before, losing the data of the previous put. The request will be processed only if the eventually existing resource's ETag doesn't match any of the values listed. Otherwise, the status code 412 (Precondition Failed) is used. The asterisk is a special value representing any resource. It is only useful when creating a resource, usually with PUT, to check if another resource with the identity has already been created before. The comparison with the stored ETag uses the weak comparison algorithm, meaning two resources are considered identical if the content is equivalent - they don't have to be identical byte for byte.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcNetworkPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['vnic_fc_network_policy'] = \
vnic_fc_network_policy
return self.call_with_http_info(**kwargs)
self.create_vnic_fc_network_policy = _Endpoint(
settings={
'response_type': (VnicFcNetworkPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcNetworkPolicies',
'operation_id': 'create_vnic_fc_network_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'vnic_fc_network_policy',
'if_match',
'if_none_match',
],
'required': [
'vnic_fc_network_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'vnic_fc_network_policy':
(VnicFcNetworkPolicy,),
'if_match':
(str,),
'if_none_match':
(str,),
},
'attribute_map': {
'if_match': 'If-Match',
'if_none_match': 'If-None-Match',
},
'location_map': {
'vnic_fc_network_policy': 'body',
'if_match': 'header',
'if_none_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_vnic_fc_network_policy
)
def __create_vnic_fc_qos_policy(
self,
vnic_fc_qos_policy,
**kwargs
):
"""Create a 'vnic.FcQosPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_vnic_fc_qos_policy(vnic_fc_qos_policy, async_req=True)
>>> result = thread.get()
Args:
vnic_fc_qos_policy (VnicFcQosPolicy): The 'vnic.FcQosPolicy' resource to create.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
if_none_match (str): For methods that apply server-side changes, If-None-Match used with the * value can be used to create a resource not known to exist, guaranteeing that another resource creation didn't happen before, losing the data of the previous put. The request will be processed only if the eventually existing resource's ETag doesn't match any of the values listed. Otherwise, the status code 412 (Precondition Failed) is used. The asterisk is a special value representing any resource. It is only useful when creating a resource, usually with PUT, to check if another resource with the identity has already been created before. The comparison with the stored ETag uses the weak comparison algorithm, meaning two resources are considered identical if the content is equivalent - they don't have to be identical byte for byte.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcQosPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['vnic_fc_qos_policy'] = \
vnic_fc_qos_policy
return self.call_with_http_info(**kwargs)
self.create_vnic_fc_qos_policy = _Endpoint(
settings={
'response_type': (VnicFcQosPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcQosPolicies',
'operation_id': 'create_vnic_fc_qos_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'vnic_fc_qos_policy',
'if_match',
'if_none_match',
],
'required': [
'vnic_fc_qos_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'vnic_fc_qos_policy':
(VnicFcQosPolicy,),
'if_match':
(str,),
'if_none_match':
(str,),
},
'attribute_map': {
'if_match': 'If-Match',
'if_none_match': 'If-None-Match',
},
'location_map': {
'vnic_fc_qos_policy': 'body',
'if_match': 'header',
'if_none_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_vnic_fc_qos_policy
)
def __create_vnic_iscsi_adapter_policy(
self,
vnic_iscsi_adapter_policy,
**kwargs
):
"""Create a 'vnic.IscsiAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_vnic_iscsi_adapter_policy(vnic_iscsi_adapter_policy, async_req=True)
>>> result = thread.get()
Args:
vnic_iscsi_adapter_policy (VnicIscsiAdapterPolicy): The 'vnic.IscsiAdapterPolicy' resource to create.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
if_none_match (str): For methods that apply server-side changes, If-None-Match used with the * value can be used to create a resource not known to exist, guaranteeing that another resource creation didn't happen before, losing the data of the previous put. The request will be processed only if the eventually existing resource's ETag doesn't match any of the values listed. Otherwise, the status code 412 (Precondition Failed) is used. The asterisk is a special value representing any resource. It is only useful when creating a resource, usually with PUT, to check if another resource with the identity has already been created before. The comparison with the stored ETag uses the weak comparison algorithm, meaning two resources are considered identical if the content is equivalent - they don't have to be identical byte for byte.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicIscsiAdapterPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['vnic_iscsi_adapter_policy'] = \
vnic_iscsi_adapter_policy
return self.call_with_http_info(**kwargs)
self.create_vnic_iscsi_adapter_policy = _Endpoint(
settings={
'response_type': (VnicIscsiAdapterPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiAdapterPolicies',
'operation_id': 'create_vnic_iscsi_adapter_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'vnic_iscsi_adapter_policy',
'if_match',
'if_none_match',
],
'required': [
'vnic_iscsi_adapter_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'vnic_iscsi_adapter_policy':
(VnicIscsiAdapterPolicy,),
'if_match':
(str,),
'if_none_match':
(str,),
},
'attribute_map': {
'if_match': 'If-Match',
'if_none_match': 'If-None-Match',
},
'location_map': {
'vnic_iscsi_adapter_policy': 'body',
'if_match': 'header',
'if_none_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_vnic_iscsi_adapter_policy
)
def __create_vnic_iscsi_boot_policy(
self,
vnic_iscsi_boot_policy,
**kwargs
):
"""Create a 'vnic.IscsiBootPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_vnic_iscsi_boot_policy(vnic_iscsi_boot_policy, async_req=True)
>>> result = thread.get()
Args:
vnic_iscsi_boot_policy (VnicIscsiBootPolicy): The 'vnic.IscsiBootPolicy' resource to create.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
if_none_match (str): For methods that apply server-side changes, If-None-Match used with the * value can be used to create a resource not known to exist, guaranteeing that another resource creation didn't happen before, losing the data of the previous put. The request will be processed only if the eventually existing resource's ETag doesn't match any of the values listed. Otherwise, the status code 412 (Precondition Failed) is used. The asterisk is a special value representing any resource. It is only useful when creating a resource, usually with PUT, to check if another resource with the identity has already been created before. The comparison with the stored ETag uses the weak comparison algorithm, meaning two resources are considered identical if the content is equivalent - they don't have to be identical byte for byte.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicIscsiBootPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['vnic_iscsi_boot_policy'] = \
vnic_iscsi_boot_policy
return self.call_with_http_info(**kwargs)
self.create_vnic_iscsi_boot_policy = _Endpoint(
settings={
'response_type': (VnicIscsiBootPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiBootPolicies',
'operation_id': 'create_vnic_iscsi_boot_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'vnic_iscsi_boot_policy',
'if_match',
'if_none_match',
],
'required': [
'vnic_iscsi_boot_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'vnic_iscsi_boot_policy':
(VnicIscsiBootPolicy,),
'if_match':
(str,),
'if_none_match':
(str,),
},
'attribute_map': {
'if_match': 'If-Match',
'if_none_match': 'If-None-Match',
},
'location_map': {
'vnic_iscsi_boot_policy': 'body',
'if_match': 'header',
'if_none_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_vnic_iscsi_boot_policy
)
def __create_vnic_iscsi_static_target_policy(
self,
vnic_iscsi_static_target_policy,
**kwargs
):
"""Create a 'vnic.IscsiStaticTargetPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_vnic_iscsi_static_target_policy(vnic_iscsi_static_target_policy, async_req=True)
>>> result = thread.get()
Args:
vnic_iscsi_static_target_policy (VnicIscsiStaticTargetPolicy): The 'vnic.IscsiStaticTargetPolicy' resource to create.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
if_none_match (str): For methods that apply server-side changes, If-None-Match used with the * value can be used to create a resource not known to exist, guaranteeing that another resource creation didn't happen before, losing the data of the previous put. The request will be processed only if the eventually existing resource's ETag doesn't match any of the values listed. Otherwise, the status code 412 (Precondition Failed) is used. The asterisk is a special value representing any resource. It is only useful when creating a resource, usually with PUT, to check if another resource with the identity has already been created before. The comparison with the stored ETag uses the weak comparison algorithm, meaning two resources are considered identical if the content is equivalent - they don't have to be identical byte for byte.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicIscsiStaticTargetPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['vnic_iscsi_static_target_policy'] = \
vnic_iscsi_static_target_policy
return self.call_with_http_info(**kwargs)
self.create_vnic_iscsi_static_target_policy = _Endpoint(
settings={
'response_type': (VnicIscsiStaticTargetPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiStaticTargetPolicies',
'operation_id': 'create_vnic_iscsi_static_target_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'vnic_iscsi_static_target_policy',
'if_match',
'if_none_match',
],
'required': [
'vnic_iscsi_static_target_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'vnic_iscsi_static_target_policy':
(VnicIscsiStaticTargetPolicy,),
'if_match':
(str,),
'if_none_match':
(str,),
},
'attribute_map': {
'if_match': 'If-Match',
'if_none_match': 'If-None-Match',
},
'location_map': {
'vnic_iscsi_static_target_policy': 'body',
'if_match': 'header',
'if_none_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_vnic_iscsi_static_target_policy
)
def __create_vnic_lan_connectivity_policy(
self,
vnic_lan_connectivity_policy,
**kwargs
):
"""Create a 'vnic.LanConnectivityPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_vnic_lan_connectivity_policy(vnic_lan_connectivity_policy, async_req=True)
>>> result = thread.get()
Args:
vnic_lan_connectivity_policy (VnicLanConnectivityPolicy): The 'vnic.LanConnectivityPolicy' resource to create.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
if_none_match (str): For methods that apply server-side changes, If-None-Match used with the * value can be used to create a resource not known to exist, guaranteeing that another resource creation didn't happen before, losing the data of the previous put. The request will be processed only if the eventually existing resource's ETag doesn't match any of the values listed. Otherwise, the status code 412 (Precondition Failed) is used. The asterisk is a special value representing any resource. It is only useful when creating a resource, usually with PUT, to check if another resource with the identity has already been created before. The comparison with the stored ETag uses the weak comparison algorithm, meaning two resources are considered identical if the content is equivalent - they don't have to be identical byte for byte.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicLanConnectivityPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['vnic_lan_connectivity_policy'] = \
vnic_lan_connectivity_policy
return self.call_with_http_info(**kwargs)
self.create_vnic_lan_connectivity_policy = _Endpoint(
settings={
'response_type': (VnicLanConnectivityPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/LanConnectivityPolicies',
'operation_id': 'create_vnic_lan_connectivity_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'vnic_lan_connectivity_policy',
'if_match',
'if_none_match',
],
'required': [
'vnic_lan_connectivity_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'vnic_lan_connectivity_policy':
(VnicLanConnectivityPolicy,),
'if_match':
(str,),
'if_none_match':
(str,),
},
'attribute_map': {
'if_match': 'If-Match',
'if_none_match': 'If-None-Match',
},
'location_map': {
'vnic_lan_connectivity_policy': 'body',
'if_match': 'header',
'if_none_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_vnic_lan_connectivity_policy
)
def __create_vnic_san_connectivity_policy(
self,
vnic_san_connectivity_policy,
**kwargs
):
"""Create a 'vnic.SanConnectivityPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_vnic_san_connectivity_policy(vnic_san_connectivity_policy, async_req=True)
>>> result = thread.get()
Args:
vnic_san_connectivity_policy (VnicSanConnectivityPolicy): The 'vnic.SanConnectivityPolicy' resource to create.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
if_none_match (str): For methods that apply server-side changes, If-None-Match used with the * value can be used to create a resource not known to exist, guaranteeing that another resource creation didn't happen before, losing the data of the previous put. The request will be processed only if the eventually existing resource's ETag doesn't match any of the values listed. Otherwise, the status code 412 (Precondition Failed) is used. The asterisk is a special value representing any resource. It is only useful when creating a resource, usually with PUT, to check if another resource with the identity has already been created before. The comparison with the stored ETag uses the weak comparison algorithm, meaning two resources are considered identical if the content is equivalent - they don't have to be identical byte for byte.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicSanConnectivityPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['vnic_san_connectivity_policy'] = \
vnic_san_connectivity_policy
return self.call_with_http_info(**kwargs)
self.create_vnic_san_connectivity_policy = _Endpoint(
settings={
'response_type': (VnicSanConnectivityPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/SanConnectivityPolicies',
'operation_id': 'create_vnic_san_connectivity_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'vnic_san_connectivity_policy',
'if_match',
'if_none_match',
],
'required': [
'vnic_san_connectivity_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'vnic_san_connectivity_policy':
(VnicSanConnectivityPolicy,),
'if_match':
(str,),
'if_none_match':
(str,),
},
'attribute_map': {
'if_match': 'If-Match',
'if_none_match': 'If-None-Match',
},
'location_map': {
'vnic_san_connectivity_policy': 'body',
'if_match': 'header',
'if_none_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_vnic_san_connectivity_policy
)
def __delete_vnic_eth_adapter_policy(
self,
moid,
**kwargs
):
"""Delete a 'vnic.EthAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_vnic_eth_adapter_policy(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.delete_vnic_eth_adapter_policy = _Endpoint(
settings={
'response_type': None,
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthAdapterPolicies/{Moid}',
'operation_id': 'delete_vnic_eth_adapter_policy',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_vnic_eth_adapter_policy
)
def __delete_vnic_eth_if(
self,
moid,
**kwargs
):
"""Delete a 'vnic.EthIf' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_vnic_eth_if(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.delete_vnic_eth_if = _Endpoint(
settings={
'response_type': None,
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthIfs/{Moid}',
'operation_id': 'delete_vnic_eth_if',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_vnic_eth_if
)
def __delete_vnic_eth_network_policy(
self,
moid,
**kwargs
):
"""Delete a 'vnic.EthNetworkPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_vnic_eth_network_policy(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.delete_vnic_eth_network_policy = _Endpoint(
settings={
'response_type': None,
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthNetworkPolicies/{Moid}',
'operation_id': 'delete_vnic_eth_network_policy',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_vnic_eth_network_policy
)
def __delete_vnic_eth_qos_policy(
self,
moid,
**kwargs
):
"""Delete a 'vnic.EthQosPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_vnic_eth_qos_policy(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.delete_vnic_eth_qos_policy = _Endpoint(
settings={
'response_type': None,
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthQosPolicies/{Moid}',
'operation_id': 'delete_vnic_eth_qos_policy',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_vnic_eth_qos_policy
)
def __delete_vnic_fc_adapter_policy(
self,
moid,
**kwargs
):
"""Delete a 'vnic.FcAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_vnic_fc_adapter_policy(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.delete_vnic_fc_adapter_policy = _Endpoint(
settings={
'response_type': None,
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcAdapterPolicies/{Moid}',
'operation_id': 'delete_vnic_fc_adapter_policy',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_vnic_fc_adapter_policy
)
def __delete_vnic_fc_if(
self,
moid,
**kwargs
):
"""Delete a 'vnic.FcIf' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_vnic_fc_if(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.delete_vnic_fc_if = _Endpoint(
settings={
'response_type': None,
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcIfs/{Moid}',
'operation_id': 'delete_vnic_fc_if',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_vnic_fc_if
)
def __delete_vnic_fc_network_policy(
self,
moid,
**kwargs
):
"""Delete a 'vnic.FcNetworkPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_vnic_fc_network_policy(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.delete_vnic_fc_network_policy = _Endpoint(
settings={
'response_type': None,
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcNetworkPolicies/{Moid}',
'operation_id': 'delete_vnic_fc_network_policy',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_vnic_fc_network_policy
)
def __delete_vnic_fc_qos_policy(
self,
moid,
**kwargs
):
"""Delete a 'vnic.FcQosPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_vnic_fc_qos_policy(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.delete_vnic_fc_qos_policy = _Endpoint(
settings={
'response_type': None,
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcQosPolicies/{Moid}',
'operation_id': 'delete_vnic_fc_qos_policy',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_vnic_fc_qos_policy
)
def __delete_vnic_iscsi_adapter_policy(
self,
moid,
**kwargs
):
"""Delete a 'vnic.IscsiAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_vnic_iscsi_adapter_policy(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.delete_vnic_iscsi_adapter_policy = _Endpoint(
settings={
'response_type': None,
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiAdapterPolicies/{Moid}',
'operation_id': 'delete_vnic_iscsi_adapter_policy',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_vnic_iscsi_adapter_policy
)
def __delete_vnic_iscsi_boot_policy(
self,
moid,
**kwargs
):
"""Delete a 'vnic.IscsiBootPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_vnic_iscsi_boot_policy(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.delete_vnic_iscsi_boot_policy = _Endpoint(
settings={
'response_type': None,
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiBootPolicies/{Moid}',
'operation_id': 'delete_vnic_iscsi_boot_policy',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_vnic_iscsi_boot_policy
)
def __delete_vnic_iscsi_static_target_policy(
self,
moid,
**kwargs
):
"""Delete a 'vnic.IscsiStaticTargetPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_vnic_iscsi_static_target_policy(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.delete_vnic_iscsi_static_target_policy = _Endpoint(
settings={
'response_type': None,
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiStaticTargetPolicies/{Moid}',
'operation_id': 'delete_vnic_iscsi_static_target_policy',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_vnic_iscsi_static_target_policy
)
def __delete_vnic_lan_connectivity_policy(
self,
moid,
**kwargs
):
"""Delete a 'vnic.LanConnectivityPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_vnic_lan_connectivity_policy(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.delete_vnic_lan_connectivity_policy = _Endpoint(
settings={
'response_type': None,
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/LanConnectivityPolicies/{Moid}',
'operation_id': 'delete_vnic_lan_connectivity_policy',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_vnic_lan_connectivity_policy
)
def __delete_vnic_san_connectivity_policy(
self,
moid,
**kwargs
):
"""Delete a 'vnic.SanConnectivityPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_vnic_san_connectivity_policy(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.delete_vnic_san_connectivity_policy = _Endpoint(
settings={
'response_type': None,
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/SanConnectivityPolicies/{Moid}',
'operation_id': 'delete_vnic_san_connectivity_policy',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_vnic_san_connectivity_policy
)
def __get_vnic_eth_adapter_policy_by_moid(
self,
moid,
**kwargs
):
"""Read a 'vnic.EthAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_eth_adapter_policy_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthAdapterPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_vnic_eth_adapter_policy_by_moid = _Endpoint(
settings={
'response_type': (VnicEthAdapterPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthAdapterPolicies/{Moid}',
'operation_id': 'get_vnic_eth_adapter_policy_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_eth_adapter_policy_by_moid
)
def __get_vnic_eth_adapter_policy_list(
self,
**kwargs
):
"""Read a 'vnic.EthAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_eth_adapter_policy_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthAdapterPolicyResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_vnic_eth_adapter_policy_list = _Endpoint(
settings={
'response_type': (VnicEthAdapterPolicyResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthAdapterPolicies',
'operation_id': 'get_vnic_eth_adapter_policy_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_eth_adapter_policy_list
)
def __get_vnic_eth_if_by_moid(
self,
moid,
**kwargs
):
"""Read a 'vnic.EthIf' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_eth_if_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthIf
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_vnic_eth_if_by_moid = _Endpoint(
settings={
'response_type': (VnicEthIf,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthIfs/{Moid}',
'operation_id': 'get_vnic_eth_if_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_eth_if_by_moid
)
def __get_vnic_eth_if_list(
self,
**kwargs
):
"""Read a 'vnic.EthIf' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_eth_if_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthIfResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_vnic_eth_if_list = _Endpoint(
settings={
'response_type': (VnicEthIfResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthIfs',
'operation_id': 'get_vnic_eth_if_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_eth_if_list
)
def __get_vnic_eth_network_policy_by_moid(
self,
moid,
**kwargs
):
"""Read a 'vnic.EthNetworkPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_eth_network_policy_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthNetworkPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_vnic_eth_network_policy_by_moid = _Endpoint(
settings={
'response_type': (VnicEthNetworkPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthNetworkPolicies/{Moid}',
'operation_id': 'get_vnic_eth_network_policy_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_eth_network_policy_by_moid
)
def __get_vnic_eth_network_policy_list(
self,
**kwargs
):
"""Read a 'vnic.EthNetworkPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_eth_network_policy_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthNetworkPolicyResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_vnic_eth_network_policy_list = _Endpoint(
settings={
'response_type': (VnicEthNetworkPolicyResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthNetworkPolicies',
'operation_id': 'get_vnic_eth_network_policy_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_eth_network_policy_list
)
def __get_vnic_eth_qos_policy_by_moid(
self,
moid,
**kwargs
):
"""Read a 'vnic.EthQosPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_eth_qos_policy_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthQosPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_vnic_eth_qos_policy_by_moid = _Endpoint(
settings={
'response_type': (VnicEthQosPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthQosPolicies/{Moid}',
'operation_id': 'get_vnic_eth_qos_policy_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_eth_qos_policy_by_moid
)
def __get_vnic_eth_qos_policy_list(
self,
**kwargs
):
"""Read a 'vnic.EthQosPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_eth_qos_policy_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthQosPolicyResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_vnic_eth_qos_policy_list = _Endpoint(
settings={
'response_type': (VnicEthQosPolicyResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthQosPolicies',
'operation_id': 'get_vnic_eth_qos_policy_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_eth_qos_policy_list
)
def __get_vnic_fc_adapter_policy_by_moid(
self,
moid,
**kwargs
):
"""Read a 'vnic.FcAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_fc_adapter_policy_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcAdapterPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_vnic_fc_adapter_policy_by_moid = _Endpoint(
settings={
'response_type': (VnicFcAdapterPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcAdapterPolicies/{Moid}',
'operation_id': 'get_vnic_fc_adapter_policy_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_fc_adapter_policy_by_moid
)
def __get_vnic_fc_adapter_policy_list(
self,
**kwargs
):
"""Read a 'vnic.FcAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_fc_adapter_policy_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcAdapterPolicyResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_vnic_fc_adapter_policy_list = _Endpoint(
settings={
'response_type': (VnicFcAdapterPolicyResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcAdapterPolicies',
'operation_id': 'get_vnic_fc_adapter_policy_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_fc_adapter_policy_list
)
def __get_vnic_fc_if_by_moid(
self,
moid,
**kwargs
):
"""Read a 'vnic.FcIf' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_fc_if_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcIf
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_vnic_fc_if_by_moid = _Endpoint(
settings={
'response_type': (VnicFcIf,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcIfs/{Moid}',
'operation_id': 'get_vnic_fc_if_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_fc_if_by_moid
)
def __get_vnic_fc_if_list(
self,
**kwargs
):
"""Read a 'vnic.FcIf' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_fc_if_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcIfResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_vnic_fc_if_list = _Endpoint(
settings={
'response_type': (VnicFcIfResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcIfs',
'operation_id': 'get_vnic_fc_if_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_fc_if_list
)
def __get_vnic_fc_network_policy_by_moid(
self,
moid,
**kwargs
):
"""Read a 'vnic.FcNetworkPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_fc_network_policy_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcNetworkPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_vnic_fc_network_policy_by_moid = _Endpoint(
settings={
'response_type': (VnicFcNetworkPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcNetworkPolicies/{Moid}',
'operation_id': 'get_vnic_fc_network_policy_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_fc_network_policy_by_moid
)
def __get_vnic_fc_network_policy_list(
self,
**kwargs
):
"""Read a 'vnic.FcNetworkPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_fc_network_policy_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcNetworkPolicyResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_vnic_fc_network_policy_list = _Endpoint(
settings={
'response_type': (VnicFcNetworkPolicyResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcNetworkPolicies',
'operation_id': 'get_vnic_fc_network_policy_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_fc_network_policy_list
)
def __get_vnic_fc_qos_policy_by_moid(
self,
moid,
**kwargs
):
"""Read a 'vnic.FcQosPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_fc_qos_policy_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcQosPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_vnic_fc_qos_policy_by_moid = _Endpoint(
settings={
'response_type': (VnicFcQosPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcQosPolicies/{Moid}',
'operation_id': 'get_vnic_fc_qos_policy_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_fc_qos_policy_by_moid
)
def __get_vnic_fc_qos_policy_list(
self,
**kwargs
):
"""Read a 'vnic.FcQosPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_fc_qos_policy_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcQosPolicyResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_vnic_fc_qos_policy_list = _Endpoint(
settings={
'response_type': (VnicFcQosPolicyResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcQosPolicies',
'operation_id': 'get_vnic_fc_qos_policy_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_fc_qos_policy_list
)
def __get_vnic_iscsi_adapter_policy_by_moid(
self,
moid,
**kwargs
):
"""Read a 'vnic.IscsiAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_iscsi_adapter_policy_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicIscsiAdapterPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_vnic_iscsi_adapter_policy_by_moid = _Endpoint(
settings={
'response_type': (VnicIscsiAdapterPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiAdapterPolicies/{Moid}',
'operation_id': 'get_vnic_iscsi_adapter_policy_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_iscsi_adapter_policy_by_moid
)
def __get_vnic_iscsi_adapter_policy_list(
self,
**kwargs
):
"""Read a 'vnic.IscsiAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_iscsi_adapter_policy_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicIscsiAdapterPolicyResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_vnic_iscsi_adapter_policy_list = _Endpoint(
settings={
'response_type': (VnicIscsiAdapterPolicyResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiAdapterPolicies',
'operation_id': 'get_vnic_iscsi_adapter_policy_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_iscsi_adapter_policy_list
)
def __get_vnic_iscsi_boot_policy_by_moid(
self,
moid,
**kwargs
):
"""Read a 'vnic.IscsiBootPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_iscsi_boot_policy_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicIscsiBootPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_vnic_iscsi_boot_policy_by_moid = _Endpoint(
settings={
'response_type': (VnicIscsiBootPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiBootPolicies/{Moid}',
'operation_id': 'get_vnic_iscsi_boot_policy_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_iscsi_boot_policy_by_moid
)
def __get_vnic_iscsi_boot_policy_list(
self,
**kwargs
):
"""Read a 'vnic.IscsiBootPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_iscsi_boot_policy_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicIscsiBootPolicyResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_vnic_iscsi_boot_policy_list = _Endpoint(
settings={
'response_type': (VnicIscsiBootPolicyResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiBootPolicies',
'operation_id': 'get_vnic_iscsi_boot_policy_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_iscsi_boot_policy_list
)
def __get_vnic_iscsi_static_target_policy_by_moid(
self,
moid,
**kwargs
):
"""Read a 'vnic.IscsiStaticTargetPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_iscsi_static_target_policy_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicIscsiStaticTargetPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_vnic_iscsi_static_target_policy_by_moid = _Endpoint(
settings={
'response_type': (VnicIscsiStaticTargetPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiStaticTargetPolicies/{Moid}',
'operation_id': 'get_vnic_iscsi_static_target_policy_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_iscsi_static_target_policy_by_moid
)
def __get_vnic_iscsi_static_target_policy_list(
self,
**kwargs
):
"""Read a 'vnic.IscsiStaticTargetPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_iscsi_static_target_policy_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicIscsiStaticTargetPolicyResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_vnic_iscsi_static_target_policy_list = _Endpoint(
settings={
'response_type': (VnicIscsiStaticTargetPolicyResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiStaticTargetPolicies',
'operation_id': 'get_vnic_iscsi_static_target_policy_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_iscsi_static_target_policy_list
)
def __get_vnic_lan_connectivity_policy_by_moid(
self,
moid,
**kwargs
):
"""Read a 'vnic.LanConnectivityPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_lan_connectivity_policy_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicLanConnectivityPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_vnic_lan_connectivity_policy_by_moid = _Endpoint(
settings={
'response_type': (VnicLanConnectivityPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/LanConnectivityPolicies/{Moid}',
'operation_id': 'get_vnic_lan_connectivity_policy_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_lan_connectivity_policy_by_moid
)
def __get_vnic_lan_connectivity_policy_list(
self,
**kwargs
):
"""Read a 'vnic.LanConnectivityPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_lan_connectivity_policy_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicLanConnectivityPolicyResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_vnic_lan_connectivity_policy_list = _Endpoint(
settings={
'response_type': (VnicLanConnectivityPolicyResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/LanConnectivityPolicies',
'operation_id': 'get_vnic_lan_connectivity_policy_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_lan_connectivity_policy_list
)
def __get_vnic_lcp_status_by_moid(
self,
moid,
**kwargs
):
"""Read a 'vnic.LcpStatus' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_lcp_status_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicLcpStatus
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_vnic_lcp_status_by_moid = _Endpoint(
settings={
'response_type': (VnicLcpStatus,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/LcpStatuses/{Moid}',
'operation_id': 'get_vnic_lcp_status_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_lcp_status_by_moid
)
def __get_vnic_lcp_status_list(
self,
**kwargs
):
"""Read a 'vnic.LcpStatus' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_lcp_status_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicLcpStatusResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_vnic_lcp_status_list = _Endpoint(
settings={
'response_type': (VnicLcpStatusResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/LcpStatuses',
'operation_id': 'get_vnic_lcp_status_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_lcp_status_list
)
def __get_vnic_san_connectivity_policy_by_moid(
self,
moid,
**kwargs
):
"""Read a 'vnic.SanConnectivityPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_san_connectivity_policy_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicSanConnectivityPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_vnic_san_connectivity_policy_by_moid = _Endpoint(
settings={
'response_type': (VnicSanConnectivityPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/SanConnectivityPolicies/{Moid}',
'operation_id': 'get_vnic_san_connectivity_policy_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_san_connectivity_policy_by_moid
)
def __get_vnic_san_connectivity_policy_list(
self,
**kwargs
):
"""Read a 'vnic.SanConnectivityPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_san_connectivity_policy_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicSanConnectivityPolicyResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_vnic_san_connectivity_policy_list = _Endpoint(
settings={
'response_type': (VnicSanConnectivityPolicyResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/SanConnectivityPolicies',
'operation_id': 'get_vnic_san_connectivity_policy_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_san_connectivity_policy_list
)
def __get_vnic_scp_status_by_moid(
self,
moid,
**kwargs
):
"""Read a 'vnic.ScpStatus' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_scp_status_by_moid(moid, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicScpStatus
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
return self.call_with_http_info(**kwargs)
self.get_vnic_scp_status_by_moid = _Endpoint(
settings={
'response_type': (VnicScpStatus,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/ScpStatuses/{Moid}',
'operation_id': 'get_vnic_scp_status_by_moid',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'moid',
],
'required': [
'moid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
},
'attribute_map': {
'moid': 'Moid',
},
'location_map': {
'moid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_scp_status_by_moid
)
def __get_vnic_scp_status_list(
self,
**kwargs
):
"""Read a 'vnic.ScpStatus' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_vnic_scp_status_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter (str): Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).. [optional] if omitted the server will use the default value of ""
orderby (str): Determines what properties are used to sort the collection of resources.. [optional]
top (int): Specifies the maximum number of resources to return in the response.. [optional] if omitted the server will use the default value of 100
skip (int): Specifies the number of resources to skip in the response.. [optional] if omitted the server will use the default value of 0
select (str): Specifies a subset of properties to return.. [optional] if omitted the server will use the default value of ""
expand (str): Specify additional attributes or related resources to return in addition to the primary resources.. [optional]
apply (str): Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.. [optional]
count (bool): The $count query specifies the service should return the count of the matching resources, instead of returning the resources.. [optional]
inlinecount (str): The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.. [optional] if omitted the server will use the default value of "allpages"
at (str): Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.. [optional]
tags (str): The 'tags' parameter is used to request a summary of the Tag utilization for this resource. When the 'tags' parameter is specified, the response provides a list of tag keys, the number of times the key has been used across all documents, and the tag values that have been assigned to the tag key.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicScpStatusResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_vnic_scp_status_list = _Endpoint(
settings={
'response_type': (VnicScpStatusResponse,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/ScpStatuses',
'operation_id': 'get_vnic_scp_status_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'orderby',
'top',
'skip',
'select',
'expand',
'apply',
'count',
'inlinecount',
'at',
'tags',
],
'required': [],
'nullable': [
],
'enum': [
'inlinecount',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('inlinecount',): {
"ALLPAGES": "allpages",
"NONE": "none"
},
},
'openapi_types': {
'filter':
(str,),
'orderby':
(str,),
'top':
(int,),
'skip':
(int,),
'select':
(str,),
'expand':
(str,),
'apply':
(str,),
'count':
(bool,),
'inlinecount':
(str,),
'at':
(str,),
'tags':
(str,),
},
'attribute_map': {
'filter': '$filter',
'orderby': '$orderby',
'top': '$top',
'skip': '$skip',
'select': '$select',
'expand': '$expand',
'apply': '$apply',
'count': '$count',
'inlinecount': '$inlinecount',
'at': 'at',
'tags': 'tags',
},
'location_map': {
'filter': 'query',
'orderby': 'query',
'top': 'query',
'skip': 'query',
'select': 'query',
'expand': 'query',
'apply': 'query',
'count': 'query',
'inlinecount': 'query',
'at': 'query',
'tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
],
'content_type': [],
},
api_client=api_client,
callable=__get_vnic_scp_status_list
)
def __patch_vnic_eth_adapter_policy(
self,
moid,
vnic_eth_adapter_policy,
**kwargs
):
"""Update a 'vnic.EthAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_vnic_eth_adapter_policy(moid, vnic_eth_adapter_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_eth_adapter_policy (VnicEthAdapterPolicy): The 'vnic.EthAdapterPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthAdapterPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_eth_adapter_policy'] = \
vnic_eth_adapter_policy
return self.call_with_http_info(**kwargs)
self.patch_vnic_eth_adapter_policy = _Endpoint(
settings={
'response_type': (VnicEthAdapterPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthAdapterPolicies/{Moid}',
'operation_id': 'patch_vnic_eth_adapter_policy',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_eth_adapter_policy',
'if_match',
],
'required': [
'moid',
'vnic_eth_adapter_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_eth_adapter_policy':
(VnicEthAdapterPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_eth_adapter_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_vnic_eth_adapter_policy
)
def __patch_vnic_eth_if(
self,
moid,
vnic_eth_if,
**kwargs
):
"""Update a 'vnic.EthIf' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_vnic_eth_if(moid, vnic_eth_if, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_eth_if (VnicEthIf): The 'vnic.EthIf' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthIf
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_eth_if'] = \
vnic_eth_if
return self.call_with_http_info(**kwargs)
self.patch_vnic_eth_if = _Endpoint(
settings={
'response_type': (VnicEthIf,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthIfs/{Moid}',
'operation_id': 'patch_vnic_eth_if',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_eth_if',
'if_match',
],
'required': [
'moid',
'vnic_eth_if',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_eth_if':
(VnicEthIf,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_eth_if': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_vnic_eth_if
)
def __patch_vnic_eth_network_policy(
self,
moid,
vnic_eth_network_policy,
**kwargs
):
"""Update a 'vnic.EthNetworkPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_vnic_eth_network_policy(moid, vnic_eth_network_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_eth_network_policy (VnicEthNetworkPolicy): The 'vnic.EthNetworkPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthNetworkPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_eth_network_policy'] = \
vnic_eth_network_policy
return self.call_with_http_info(**kwargs)
self.patch_vnic_eth_network_policy = _Endpoint(
settings={
'response_type': (VnicEthNetworkPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthNetworkPolicies/{Moid}',
'operation_id': 'patch_vnic_eth_network_policy',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_eth_network_policy',
'if_match',
],
'required': [
'moid',
'vnic_eth_network_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_eth_network_policy':
(VnicEthNetworkPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_eth_network_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_vnic_eth_network_policy
)
def __patch_vnic_eth_qos_policy(
self,
moid,
vnic_eth_qos_policy,
**kwargs
):
"""Update a 'vnic.EthQosPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_vnic_eth_qos_policy(moid, vnic_eth_qos_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_eth_qos_policy (VnicEthQosPolicy): The 'vnic.EthQosPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthQosPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_eth_qos_policy'] = \
vnic_eth_qos_policy
return self.call_with_http_info(**kwargs)
self.patch_vnic_eth_qos_policy = _Endpoint(
settings={
'response_type': (VnicEthQosPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthQosPolicies/{Moid}',
'operation_id': 'patch_vnic_eth_qos_policy',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_eth_qos_policy',
'if_match',
],
'required': [
'moid',
'vnic_eth_qos_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_eth_qos_policy':
(VnicEthQosPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_eth_qos_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_vnic_eth_qos_policy
)
def __patch_vnic_fc_adapter_policy(
self,
moid,
vnic_fc_adapter_policy,
**kwargs
):
"""Update a 'vnic.FcAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_vnic_fc_adapter_policy(moid, vnic_fc_adapter_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_fc_adapter_policy (VnicFcAdapterPolicy): The 'vnic.FcAdapterPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcAdapterPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_fc_adapter_policy'] = \
vnic_fc_adapter_policy
return self.call_with_http_info(**kwargs)
self.patch_vnic_fc_adapter_policy = _Endpoint(
settings={
'response_type': (VnicFcAdapterPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcAdapterPolicies/{Moid}',
'operation_id': 'patch_vnic_fc_adapter_policy',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_fc_adapter_policy',
'if_match',
],
'required': [
'moid',
'vnic_fc_adapter_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_fc_adapter_policy':
(VnicFcAdapterPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_fc_adapter_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_vnic_fc_adapter_policy
)
def __patch_vnic_fc_if(
self,
moid,
vnic_fc_if,
**kwargs
):
"""Update a 'vnic.FcIf' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_vnic_fc_if(moid, vnic_fc_if, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_fc_if (VnicFcIf): The 'vnic.FcIf' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcIf
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_fc_if'] = \
vnic_fc_if
return self.call_with_http_info(**kwargs)
self.patch_vnic_fc_if = _Endpoint(
settings={
'response_type': (VnicFcIf,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcIfs/{Moid}',
'operation_id': 'patch_vnic_fc_if',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_fc_if',
'if_match',
],
'required': [
'moid',
'vnic_fc_if',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_fc_if':
(VnicFcIf,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_fc_if': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_vnic_fc_if
)
def __patch_vnic_fc_network_policy(
self,
moid,
vnic_fc_network_policy,
**kwargs
):
"""Update a 'vnic.FcNetworkPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_vnic_fc_network_policy(moid, vnic_fc_network_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_fc_network_policy (VnicFcNetworkPolicy): The 'vnic.FcNetworkPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcNetworkPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_fc_network_policy'] = \
vnic_fc_network_policy
return self.call_with_http_info(**kwargs)
self.patch_vnic_fc_network_policy = _Endpoint(
settings={
'response_type': (VnicFcNetworkPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcNetworkPolicies/{Moid}',
'operation_id': 'patch_vnic_fc_network_policy',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_fc_network_policy',
'if_match',
],
'required': [
'moid',
'vnic_fc_network_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_fc_network_policy':
(VnicFcNetworkPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_fc_network_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_vnic_fc_network_policy
)
def __patch_vnic_fc_qos_policy(
self,
moid,
vnic_fc_qos_policy,
**kwargs
):
"""Update a 'vnic.FcQosPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_vnic_fc_qos_policy(moid, vnic_fc_qos_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_fc_qos_policy (VnicFcQosPolicy): The 'vnic.FcQosPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcQosPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_fc_qos_policy'] = \
vnic_fc_qos_policy
return self.call_with_http_info(**kwargs)
self.patch_vnic_fc_qos_policy = _Endpoint(
settings={
'response_type': (VnicFcQosPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcQosPolicies/{Moid}',
'operation_id': 'patch_vnic_fc_qos_policy',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_fc_qos_policy',
'if_match',
],
'required': [
'moid',
'vnic_fc_qos_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_fc_qos_policy':
(VnicFcQosPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_fc_qos_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_vnic_fc_qos_policy
)
def __patch_vnic_iscsi_adapter_policy(
self,
moid,
vnic_iscsi_adapter_policy,
**kwargs
):
"""Update a 'vnic.IscsiAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_vnic_iscsi_adapter_policy(moid, vnic_iscsi_adapter_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_iscsi_adapter_policy (VnicIscsiAdapterPolicy): The 'vnic.IscsiAdapterPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicIscsiAdapterPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_iscsi_adapter_policy'] = \
vnic_iscsi_adapter_policy
return self.call_with_http_info(**kwargs)
self.patch_vnic_iscsi_adapter_policy = _Endpoint(
settings={
'response_type': (VnicIscsiAdapterPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiAdapterPolicies/{Moid}',
'operation_id': 'patch_vnic_iscsi_adapter_policy',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_iscsi_adapter_policy',
'if_match',
],
'required': [
'moid',
'vnic_iscsi_adapter_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_iscsi_adapter_policy':
(VnicIscsiAdapterPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_iscsi_adapter_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_vnic_iscsi_adapter_policy
)
def __patch_vnic_iscsi_boot_policy(
self,
moid,
vnic_iscsi_boot_policy,
**kwargs
):
"""Update a 'vnic.IscsiBootPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_vnic_iscsi_boot_policy(moid, vnic_iscsi_boot_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_iscsi_boot_policy (VnicIscsiBootPolicy): The 'vnic.IscsiBootPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicIscsiBootPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_iscsi_boot_policy'] = \
vnic_iscsi_boot_policy
return self.call_with_http_info(**kwargs)
self.patch_vnic_iscsi_boot_policy = _Endpoint(
settings={
'response_type': (VnicIscsiBootPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiBootPolicies/{Moid}',
'operation_id': 'patch_vnic_iscsi_boot_policy',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_iscsi_boot_policy',
'if_match',
],
'required': [
'moid',
'vnic_iscsi_boot_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_iscsi_boot_policy':
(VnicIscsiBootPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_iscsi_boot_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_vnic_iscsi_boot_policy
)
def __patch_vnic_iscsi_static_target_policy(
self,
moid,
vnic_iscsi_static_target_policy,
**kwargs
):
"""Update a 'vnic.IscsiStaticTargetPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_vnic_iscsi_static_target_policy(moid, vnic_iscsi_static_target_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_iscsi_static_target_policy (VnicIscsiStaticTargetPolicy): The 'vnic.IscsiStaticTargetPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicIscsiStaticTargetPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_iscsi_static_target_policy'] = \
vnic_iscsi_static_target_policy
return self.call_with_http_info(**kwargs)
self.patch_vnic_iscsi_static_target_policy = _Endpoint(
settings={
'response_type': (VnicIscsiStaticTargetPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiStaticTargetPolicies/{Moid}',
'operation_id': 'patch_vnic_iscsi_static_target_policy',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_iscsi_static_target_policy',
'if_match',
],
'required': [
'moid',
'vnic_iscsi_static_target_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_iscsi_static_target_policy':
(VnicIscsiStaticTargetPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_iscsi_static_target_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_vnic_iscsi_static_target_policy
)
def __patch_vnic_lan_connectivity_policy(
self,
moid,
vnic_lan_connectivity_policy,
**kwargs
):
"""Update a 'vnic.LanConnectivityPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_vnic_lan_connectivity_policy(moid, vnic_lan_connectivity_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_lan_connectivity_policy (VnicLanConnectivityPolicy): The 'vnic.LanConnectivityPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicLanConnectivityPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_lan_connectivity_policy'] = \
vnic_lan_connectivity_policy
return self.call_with_http_info(**kwargs)
self.patch_vnic_lan_connectivity_policy = _Endpoint(
settings={
'response_type': (VnicLanConnectivityPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/LanConnectivityPolicies/{Moid}',
'operation_id': 'patch_vnic_lan_connectivity_policy',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_lan_connectivity_policy',
'if_match',
],
'required': [
'moid',
'vnic_lan_connectivity_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_lan_connectivity_policy':
(VnicLanConnectivityPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_lan_connectivity_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_vnic_lan_connectivity_policy
)
def __patch_vnic_san_connectivity_policy(
self,
moid,
vnic_san_connectivity_policy,
**kwargs
):
"""Update a 'vnic.SanConnectivityPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_vnic_san_connectivity_policy(moid, vnic_san_connectivity_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_san_connectivity_policy (VnicSanConnectivityPolicy): The 'vnic.SanConnectivityPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicSanConnectivityPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_san_connectivity_policy'] = \
vnic_san_connectivity_policy
return self.call_with_http_info(**kwargs)
self.patch_vnic_san_connectivity_policy = _Endpoint(
settings={
'response_type': (VnicSanConnectivityPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/SanConnectivityPolicies/{Moid}',
'operation_id': 'patch_vnic_san_connectivity_policy',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_san_connectivity_policy',
'if_match',
],
'required': [
'moid',
'vnic_san_connectivity_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_san_connectivity_policy':
(VnicSanConnectivityPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_san_connectivity_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__patch_vnic_san_connectivity_policy
)
def __update_vnic_eth_adapter_policy(
self,
moid,
vnic_eth_adapter_policy,
**kwargs
):
"""Update a 'vnic.EthAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_vnic_eth_adapter_policy(moid, vnic_eth_adapter_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_eth_adapter_policy (VnicEthAdapterPolicy): The 'vnic.EthAdapterPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthAdapterPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_eth_adapter_policy'] = \
vnic_eth_adapter_policy
return self.call_with_http_info(**kwargs)
self.update_vnic_eth_adapter_policy = _Endpoint(
settings={
'response_type': (VnicEthAdapterPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthAdapterPolicies/{Moid}',
'operation_id': 'update_vnic_eth_adapter_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_eth_adapter_policy',
'if_match',
],
'required': [
'moid',
'vnic_eth_adapter_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_eth_adapter_policy':
(VnicEthAdapterPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_eth_adapter_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_vnic_eth_adapter_policy
)
def __update_vnic_eth_if(
self,
moid,
vnic_eth_if,
**kwargs
):
"""Update a 'vnic.EthIf' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_vnic_eth_if(moid, vnic_eth_if, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_eth_if (VnicEthIf): The 'vnic.EthIf' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthIf
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_eth_if'] = \
vnic_eth_if
return self.call_with_http_info(**kwargs)
self.update_vnic_eth_if = _Endpoint(
settings={
'response_type': (VnicEthIf,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthIfs/{Moid}',
'operation_id': 'update_vnic_eth_if',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_eth_if',
'if_match',
],
'required': [
'moid',
'vnic_eth_if',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_eth_if':
(VnicEthIf,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_eth_if': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_vnic_eth_if
)
def __update_vnic_eth_network_policy(
self,
moid,
vnic_eth_network_policy,
**kwargs
):
"""Update a 'vnic.EthNetworkPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_vnic_eth_network_policy(moid, vnic_eth_network_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_eth_network_policy (VnicEthNetworkPolicy): The 'vnic.EthNetworkPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthNetworkPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_eth_network_policy'] = \
vnic_eth_network_policy
return self.call_with_http_info(**kwargs)
self.update_vnic_eth_network_policy = _Endpoint(
settings={
'response_type': (VnicEthNetworkPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthNetworkPolicies/{Moid}',
'operation_id': 'update_vnic_eth_network_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_eth_network_policy',
'if_match',
],
'required': [
'moid',
'vnic_eth_network_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_eth_network_policy':
(VnicEthNetworkPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_eth_network_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_vnic_eth_network_policy
)
def __update_vnic_eth_qos_policy(
self,
moid,
vnic_eth_qos_policy,
**kwargs
):
"""Update a 'vnic.EthQosPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_vnic_eth_qos_policy(moid, vnic_eth_qos_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_eth_qos_policy (VnicEthQosPolicy): The 'vnic.EthQosPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicEthQosPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_eth_qos_policy'] = \
vnic_eth_qos_policy
return self.call_with_http_info(**kwargs)
self.update_vnic_eth_qos_policy = _Endpoint(
settings={
'response_type': (VnicEthQosPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/EthQosPolicies/{Moid}',
'operation_id': 'update_vnic_eth_qos_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_eth_qos_policy',
'if_match',
],
'required': [
'moid',
'vnic_eth_qos_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_eth_qos_policy':
(VnicEthQosPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_eth_qos_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_vnic_eth_qos_policy
)
def __update_vnic_fc_adapter_policy(
self,
moid,
vnic_fc_adapter_policy,
**kwargs
):
"""Update a 'vnic.FcAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_vnic_fc_adapter_policy(moid, vnic_fc_adapter_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_fc_adapter_policy (VnicFcAdapterPolicy): The 'vnic.FcAdapterPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcAdapterPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_fc_adapter_policy'] = \
vnic_fc_adapter_policy
return self.call_with_http_info(**kwargs)
self.update_vnic_fc_adapter_policy = _Endpoint(
settings={
'response_type': (VnicFcAdapterPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcAdapterPolicies/{Moid}',
'operation_id': 'update_vnic_fc_adapter_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_fc_adapter_policy',
'if_match',
],
'required': [
'moid',
'vnic_fc_adapter_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_fc_adapter_policy':
(VnicFcAdapterPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_fc_adapter_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_vnic_fc_adapter_policy
)
def __update_vnic_fc_if(
self,
moid,
vnic_fc_if,
**kwargs
):
"""Update a 'vnic.FcIf' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_vnic_fc_if(moid, vnic_fc_if, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_fc_if (VnicFcIf): The 'vnic.FcIf' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcIf
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_fc_if'] = \
vnic_fc_if
return self.call_with_http_info(**kwargs)
self.update_vnic_fc_if = _Endpoint(
settings={
'response_type': (VnicFcIf,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcIfs/{Moid}',
'operation_id': 'update_vnic_fc_if',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_fc_if',
'if_match',
],
'required': [
'moid',
'vnic_fc_if',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_fc_if':
(VnicFcIf,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_fc_if': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_vnic_fc_if
)
def __update_vnic_fc_network_policy(
self,
moid,
vnic_fc_network_policy,
**kwargs
):
"""Update a 'vnic.FcNetworkPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_vnic_fc_network_policy(moid, vnic_fc_network_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_fc_network_policy (VnicFcNetworkPolicy): The 'vnic.FcNetworkPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcNetworkPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_fc_network_policy'] = \
vnic_fc_network_policy
return self.call_with_http_info(**kwargs)
self.update_vnic_fc_network_policy = _Endpoint(
settings={
'response_type': (VnicFcNetworkPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcNetworkPolicies/{Moid}',
'operation_id': 'update_vnic_fc_network_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_fc_network_policy',
'if_match',
],
'required': [
'moid',
'vnic_fc_network_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_fc_network_policy':
(VnicFcNetworkPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_fc_network_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_vnic_fc_network_policy
)
def __update_vnic_fc_qos_policy(
self,
moid,
vnic_fc_qos_policy,
**kwargs
):
"""Update a 'vnic.FcQosPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_vnic_fc_qos_policy(moid, vnic_fc_qos_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_fc_qos_policy (VnicFcQosPolicy): The 'vnic.FcQosPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicFcQosPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_fc_qos_policy'] = \
vnic_fc_qos_policy
return self.call_with_http_info(**kwargs)
self.update_vnic_fc_qos_policy = _Endpoint(
settings={
'response_type': (VnicFcQosPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/FcQosPolicies/{Moid}',
'operation_id': 'update_vnic_fc_qos_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_fc_qos_policy',
'if_match',
],
'required': [
'moid',
'vnic_fc_qos_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_fc_qos_policy':
(VnicFcQosPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_fc_qos_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_vnic_fc_qos_policy
)
def __update_vnic_iscsi_adapter_policy(
self,
moid,
vnic_iscsi_adapter_policy,
**kwargs
):
"""Update a 'vnic.IscsiAdapterPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_vnic_iscsi_adapter_policy(moid, vnic_iscsi_adapter_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_iscsi_adapter_policy (VnicIscsiAdapterPolicy): The 'vnic.IscsiAdapterPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicIscsiAdapterPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_iscsi_adapter_policy'] = \
vnic_iscsi_adapter_policy
return self.call_with_http_info(**kwargs)
self.update_vnic_iscsi_adapter_policy = _Endpoint(
settings={
'response_type': (VnicIscsiAdapterPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiAdapterPolicies/{Moid}',
'operation_id': 'update_vnic_iscsi_adapter_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_iscsi_adapter_policy',
'if_match',
],
'required': [
'moid',
'vnic_iscsi_adapter_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_iscsi_adapter_policy':
(VnicIscsiAdapterPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_iscsi_adapter_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_vnic_iscsi_adapter_policy
)
def __update_vnic_iscsi_boot_policy(
self,
moid,
vnic_iscsi_boot_policy,
**kwargs
):
"""Update a 'vnic.IscsiBootPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_vnic_iscsi_boot_policy(moid, vnic_iscsi_boot_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_iscsi_boot_policy (VnicIscsiBootPolicy): The 'vnic.IscsiBootPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicIscsiBootPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_iscsi_boot_policy'] = \
vnic_iscsi_boot_policy
return self.call_with_http_info(**kwargs)
self.update_vnic_iscsi_boot_policy = _Endpoint(
settings={
'response_type': (VnicIscsiBootPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiBootPolicies/{Moid}',
'operation_id': 'update_vnic_iscsi_boot_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_iscsi_boot_policy',
'if_match',
],
'required': [
'moid',
'vnic_iscsi_boot_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_iscsi_boot_policy':
(VnicIscsiBootPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_iscsi_boot_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_vnic_iscsi_boot_policy
)
def __update_vnic_iscsi_static_target_policy(
self,
moid,
vnic_iscsi_static_target_policy,
**kwargs
):
"""Update a 'vnic.IscsiStaticTargetPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_vnic_iscsi_static_target_policy(moid, vnic_iscsi_static_target_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_iscsi_static_target_policy (VnicIscsiStaticTargetPolicy): The 'vnic.IscsiStaticTargetPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicIscsiStaticTargetPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_iscsi_static_target_policy'] = \
vnic_iscsi_static_target_policy
return self.call_with_http_info(**kwargs)
self.update_vnic_iscsi_static_target_policy = _Endpoint(
settings={
'response_type': (VnicIscsiStaticTargetPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/IscsiStaticTargetPolicies/{Moid}',
'operation_id': 'update_vnic_iscsi_static_target_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_iscsi_static_target_policy',
'if_match',
],
'required': [
'moid',
'vnic_iscsi_static_target_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_iscsi_static_target_policy':
(VnicIscsiStaticTargetPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_iscsi_static_target_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_vnic_iscsi_static_target_policy
)
def __update_vnic_lan_connectivity_policy(
self,
moid,
vnic_lan_connectivity_policy,
**kwargs
):
"""Update a 'vnic.LanConnectivityPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_vnic_lan_connectivity_policy(moid, vnic_lan_connectivity_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_lan_connectivity_policy (VnicLanConnectivityPolicy): The 'vnic.LanConnectivityPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicLanConnectivityPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_lan_connectivity_policy'] = \
vnic_lan_connectivity_policy
return self.call_with_http_info(**kwargs)
self.update_vnic_lan_connectivity_policy = _Endpoint(
settings={
'response_type': (VnicLanConnectivityPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/LanConnectivityPolicies/{Moid}',
'operation_id': 'update_vnic_lan_connectivity_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_lan_connectivity_policy',
'if_match',
],
'required': [
'moid',
'vnic_lan_connectivity_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_lan_connectivity_policy':
(VnicLanConnectivityPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_lan_connectivity_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_vnic_lan_connectivity_policy
)
def __update_vnic_san_connectivity_policy(
self,
moid,
vnic_san_connectivity_policy,
**kwargs
):
"""Update a 'vnic.SanConnectivityPolicy' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_vnic_san_connectivity_policy(moid, vnic_san_connectivity_policy, async_req=True)
>>> result = thread.get()
Args:
moid (str): The unique Moid identifier of a resource instance.
vnic_san_connectivity_policy (VnicSanConnectivityPolicy): The 'vnic.SanConnectivityPolicy' resource to update.
Keyword Args:
if_match (str): For methods that apply server-side changes, and in particular for PUT, If-Match can be used to prevent the lost update problem. It can check if the modification of a resource that the user wants to upload will not override another change that has been done since the original resource was fetched. If the request cannot be fulfilled, the 412 (Precondition Failed) response is returned. When modifying a resource using POST or PUT, the If-Match header must be set to the value of the resource ModTime property after which no lost update problem should occur. For example, a client send a GET request to obtain a resource, which includes the ModTime property. The ModTime indicates the last time the resource was created or modified. The client then sends a POST or PUT request with the If-Match header set to the ModTime property of the resource as obtained in the GET request.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VnicSanConnectivityPolicy
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['moid'] = \
moid
kwargs['vnic_san_connectivity_policy'] = \
vnic_san_connectivity_policy
return self.call_with_http_info(**kwargs)
self.update_vnic_san_connectivity_policy = _Endpoint(
settings={
'response_type': (VnicSanConnectivityPolicy,),
'auth': [
'cookieAuth',
'http_signature',
'oAuth2',
'oAuth2'
],
'endpoint_path': '/api/v1/vnic/SanConnectivityPolicies/{Moid}',
'operation_id': 'update_vnic_san_connectivity_policy',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'moid',
'vnic_san_connectivity_policy',
'if_match',
],
'required': [
'moid',
'vnic_san_connectivity_policy',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'moid':
(str,),
'vnic_san_connectivity_policy':
(VnicSanConnectivityPolicy,),
'if_match':
(str,),
},
'attribute_map': {
'moid': 'Moid',
'if_match': 'If-Match',
},
'location_map': {
'moid': 'path',
'vnic_san_connectivity_policy': 'body',
'if_match': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'application/json-patch+json'
]
},
api_client=api_client,
callable=__update_vnic_san_connectivity_policy
)
| 46.034777
| 1,678
| 0.512131
| 51,812
| 533,451
| 5.086003
| 0.011465
| 0.019604
| 0.016181
| 0.016804
| 0.985515
| 0.979941
| 0.973808
| 0.96637
| 0.961566
| 0.95908
| 0
| 0.002781
| 0.41365
| 533,451
| 11,587
| 1,679
| 46.03875
| 0.839691
| 0.43371
| 0
| 0.763297
| 0
| 0
| 0.231417
| 0.060725
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010195
| false
| 0
| 0.004299
| 0
| 0.02469
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e48267dcbc986936fd4644351aeb92e20dfb2c95
| 7,141
|
py
|
Python
|
frontend_tests/happy_scenarios.py
|
mickypaganini/everware
|
7f86838b4d3692336f191ea62dc4a7f00951503a
|
[
"BSD-3-Clause"
] | 1
|
2021-01-06T00:31:10.000Z
|
2021-01-06T00:31:10.000Z
|
frontend_tests/happy_scenarios.py
|
mickypaganini/everware
|
7f86838b4d3692336f191ea62dc4a7f00951503a
|
[
"BSD-3-Clause"
] | null | null | null |
frontend_tests/happy_scenarios.py
|
mickypaganini/everware
|
7f86838b4d3692336f191ea62dc4a7f00951503a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
def scenario_x(user):
driver = user.get_driver()
print(user.driver_type)
def scenario_short(user):
driver = user.get_driver()
driver.get(user.base_url + "/hub/login")
user.log("login")
driver.find_element_by_id("username_input").clear()
driver.find_element_by_id("username_input").send_keys(user.login)
driver.find_element_by_id("password_input").clear()
driver.find_element_by_id("password_input").send_keys(user.password)
driver.find_element_by_id("login_submit").click()
user.wait_for_element_present(By.ID, "start")
driver.find_element_by_id("logout").click()
user.log("logout clicked")
def scenario_short_bad(user):
driver = user.get_driver()
driver.get(user.base_url + "/hub/login")
user.log("login")
driver.find_element_by_id("username_input").clear()
driver.find_element_by_id("username_input").send_keys(user.login)
driver.find_element_by_id("password_input").clear()
driver.find_element_by_id("password_input").send_keys(user.password)
driver.find_element_by_id("login_submit").click()
user.wait_for_element_present(By.ID, "start1")
driver.find_element_by_id("logout").click()
user.log("logout clicked")
def scenario_full(user):
driver = user.get_driver()
driver.get(user.base_url + "/hub/login")
user.log("login")
driver.find_element_by_id("username_input").clear()
driver.find_element_by_id("username_input").send_keys(user.login)
driver.find_element_by_id("password_input").clear()
driver.find_element_by_id("password_input").send_keys(user.password)
driver.find_element_by_id("login_submit").click()
user.wait_for_element_present(By.ID, "start")
driver.find_element_by_id("start").click()
user.wait_for_element_present(By.ID, "repository_input")
driver.find_element_by_id("repository_input").clear()
driver.find_element_by_id("repository_input").send_keys(user.repo)
driver.find_element_by_xpath("//input[@value='Spawn']").click()
user.log("spawn clicked")
user.wait_for_element_present(By.LINK_TEXT, "Control Panel")
driver.find_element_by_link_text("Control Panel").click()
user.wait_for_element_present(By.ID, "stop")
driver.find_element_by_id("stop").click()
user.log("stop clicked")
user.wait_for_pattern_in_page(r"Start\s+My\s+Server")
driver.find_element_by_id("logout").click()
user.log("logout clicked")
def scenario_no_jupyter(user):
driver = user.get_driver()
driver.get(user.base_url + "/hub/login")
user.log("login")
driver.find_element_by_id("username_input").clear()
driver.find_element_by_id("username_input").send_keys(user.login)
driver.find_element_by_id("password_input").clear()
driver.find_element_by_id("password_input").send_keys(user.password)
driver.find_element_by_id("login_submit").click()
user.wait_for_element_present(By.ID, "start")
driver.find_element_by_id("start").click()
user.wait_for_element_present(By.ID, "repository_input")
driver.find_element_by_id("repository_input").clear()
driver.find_element_by_id("repository_input").send_keys('docker:busybox')
driver.find_element_by_xpath("//input[@value='Spawn']").click()
user.log("spawn clicked")
user.wait_for_element_present(By.ID, "resist")
text = ("Something went wrong during building."
" Error: Container doesn't have jupyter-singleuser inside")
assert text in driver.page_source
user.log("correct, no jupyter in container")
driver.find_element_by_id("resist").click()
user.wait_for_element_present(By.ID, "repository_input")
driver.find_element_by_id("repository_input").clear()
driver.find_element_by_id("repository_input").send_keys(user.repo)
driver.find_element_by_xpath("//input[@value='Spawn']").click()
user.log("spawn clicked (second try)")
user.wait_for_element_present(By.LINK_TEXT, "Control Panel")
driver.find_element_by_link_text("Control Panel").click()
user.wait_for_element_present(By.ID, "stop")
driver.find_element_by_id("stop").click()
user.log("stop clicked")
user.wait_for_pattern_in_page(r"Start\s+My\s+Server")
driver.find_element_by_id("logout").click()
user.log("logout clicked")
def scenario_timeout(user):
driver = user.get_driver()
driver.get(user.base_url + "/hub/login")
user.log("login")
driver.find_element_by_id("username_input").clear()
driver.find_element_by_id("username_input").send_keys(user.login)
driver.find_element_by_id("password_input").clear()
driver.find_element_by_id("password_input").send_keys(user.password)
driver.find_element_by_id("login_submit").click()
user.wait_for_element_present(By.ID, "start")
driver.find_element_by_id("start").click()
user.wait_for_element_present(By.ID, "repository_input")
driver.find_element_by_id("repository_input").clear()
driver.find_element_by_id("repository_input").send_keys(
'https://github.com/everware/test_long_creation'
)
driver.find_element_by_xpath("//input[@value='Spawn']").click()
user.log("spawn clicked")
user.wait_for_element_present(By.ID, "resist")
assert "Building took too long" in driver.page_source or \
"This image is too heavy to build" in driver.page_source
user.log('correct, timeout happened')
driver.find_element_by_id("resist").click()
user.log("resist clicked")
user.wait_for_element_present(By.ID, "repository_input")
driver.find_element_by_id("repository_input").clear()
driver.find_element_by_id("repository_input").send_keys(
'https://github.com/everware/test_long_creation'
)
driver.find_element_by_xpath("//input[@value='Spawn']").click()
user.log("spawn clicked (second try)")
user.wait_for_element_present(By.ID, "resist")
assert "This image is too heavy to build" in driver.page_source
def scenario_no_dockerfile(user):
driver = user.get_driver()
driver.get(user.base_url + "/hub/login")
user.log("login")
driver.find_element_by_id("username_input").clear()
driver.find_element_by_id("username_input").send_keys(user.login)
driver.find_element_by_id("password_input").clear()
driver.find_element_by_id("password_input").send_keys(user.password)
driver.find_element_by_id("login_submit").click()
user.wait_for_element_present(By.ID, "start")
driver.find_element_by_id("start").click()
user.wait_for_element_present(By.ID, "repository_input")
driver.find_element_by_id("repository_input").clear()
driver.find_element_by_id("repository_input").send_keys(
'https://github.com/everware/runnable_examples'
)
driver.find_element_by_xpath("//input[@value='Spawn']").click()
user.log("spawn clicked")
user.wait_for_element_present(By.ID, "resist")
text = ("Something went wrong during building."
" Error: Your repo doesn't include Dockerfile")
assert text in driver.page_source
user.log("correct, no dockerfile")
| 44.91195
| 77
| 0.734631
| 1,039
| 7,141
| 4.704524
| 0.101059
| 0.05892
| 0.21563
| 0.240998
| 0.921236
| 0.91653
| 0.91653
| 0.913666
| 0.899345
| 0.895254
| 0
| 0.000319
| 0.123372
| 7,141
| 158
| 78
| 45.196203
| 0.780511
| 0.002941
| 0
| 0.826389
| 0
| 0
| 0.25699
| 0.01939
| 0
| 0
| 0
| 0
| 0.027778
| 1
| 0.048611
| false
| 0.083333
| 0.013889
| 0
| 0.0625
| 0.006944
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
e4abf741a85651dcc3de7b239afae48020db96df
| 10,314
|
py
|
Python
|
dialogue-engine/test/programytest/parser/pattern/matching/test_set.py
|
cotobadesign/cotoba-agent-oss
|
3833d56e79dcd7529c3e8b3a3a8a782d513d9b12
|
[
"MIT"
] | 104
|
2020-03-30T09:40:00.000Z
|
2022-03-06T22:34:25.000Z
|
dialogue-engine/test/programytest/parser/pattern/matching/test_set.py
|
cotobadesign/cotoba-agent-oss
|
3833d56e79dcd7529c3e8b3a3a8a782d513d9b12
|
[
"MIT"
] | 25
|
2020-06-12T01:36:35.000Z
|
2022-02-19T07:30:44.000Z
|
dialogue-engine/test/programytest/parser/pattern/matching/test_set.py
|
cotobadesign/cotoba-agent-oss
|
3833d56e79dcd7529c3e8b3a3a8a782d513d9b12
|
[
"MIT"
] | 10
|
2020-04-02T23:43:56.000Z
|
2021-05-14T13:47:01.000Z
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programytest.parser.pattern.matching.base import PatternMatcherBaseClass
class PatternMatcherSetTests(PatternMatcherBaseClass):
def test_basic_set_match_as_text(self):
if self._client_context.brain.sets.contains("SEX") is False:
set_dict = {"MAN": [["MAN"]], "WOMAN": [["WOMAN"]]}
values = {"MAN": "MAN", "WOMAN": "WOMAN"}
self._client_context.brain._sets_collection.add_set("SEX", set_dict, "teststore", False, values)
self.add_pattern_to_graph(pattern="I AM A <set>sex</set>", topic="X", that="Y", template="1")
context = self.match_sentence("I AM A MAN", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("MAN", context.star(1))
context = self.match_sentence("I AM A WOMAN", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("WOMAN", context.star(1))
def test_basic_set_match_as_name(self):
if self._client_context.brain.sets.contains("SEX") is False:
set_dict = {"MAN": [["MAN"]], "WOMAN": [["WOMAN"]]}
values = {"MAN": "MAN", "WOMAN": "WOMAN"}
self._client_context.brain._sets_collection.add_set("SEX", set_dict, "teststore", False, values)
self.add_pattern_to_graph(pattern='I AM A <set name="sex" />', topic="X", that="Y", template="1")
context = self.match_sentence("I AM A MAN", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("MAN", context.star(1))
context = self.match_sentence("I AM A WOMAN", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("WOMAN", context.star(1))
def test_multi_word_set_match(self):
set_dict = {"RED": [["RED"], ["RED", "AMBER"], ["RED", "BURNT", "OAK"], ["RED", "ORANGE"]]}
values = {"RED": "RED", "RED AMBER": "RED AMBER", "RED BURNT OAK": "RED BURNT OAK", "RED ORANGE": "RED ORANGE"}
self._client_context.brain._sets_collection.add_set("COLOR", set_dict, "teststore", False, values)
self.add_pattern_to_graph(pattern="I LIKE <set>color</set> *", topic="*", that="*", template="1")
context = self.match_sentence("I LIKE RED PAINT", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("RED", context.star(1))
self.assertEqual("PAINT", context.star(2))
context = self.match_sentence("I LIKE RED AMBER CARS", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("RED AMBER", context.star(1))
self.assertEqual("CARS", context.star(2))
context = self.match_sentence("I LIKE RED BURNT OAK MOTOR BIKES", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("RED BURNT OAK", context.star(1))
self.assertEqual("MOTOR BIKES", context.star(2))
def test_multi_word_match_value(self):
set_dict = {"RED": [["RED"], ["RED", "AMBER"], ["RED", "BURNT", "OAK"], ["RED", "ORANGE"]]}
values = {"RED": "red", "RED AMBER": "red amber", "RED BURNT OAK": "red burnt oak", "RED ORANGE": "red orange"}
self._client_context.brain._sets_collection.add_set("COLOR", set_dict, "teststore", False, values)
self.add_pattern_to_graph(pattern="i like <set>color</set> *", topic="*", that="*", template="1")
context = self.match_sentence("I LIKE RED PAINT", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("red", context.star(1))
self.assertEqual("PAINT", context.star(2))
context = self.match_sentence("I LIKE RED AMBER CARS", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("red amber", context.star(1))
self.assertEqual("CARS", context.star(2))
context = self.match_sentence("I LIKE RED BURNT OAK MOTOR BIKES", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("red burnt oak", context.star(1))
self.assertEqual("MOTOR BIKES", context.star(2))
def test_basic_set_match_as_text_jp(self):
if self._client_context.brain.sets.contains("SEX") is False:
set_dict = {"男": ["男", "男性"], "女": ["女", "女性"]}
values = {"男": "男", "男性": "男性", "女": "女", "女性": "女性"}
self._client_context.brain._sets_collection.add_set("SEX", set_dict, "teststore", True, values)
self.add_pattern_to_graph(pattern="私は <set>sex</set>", topic="X", that="Y", template="1")
context = self.match_sentence("私は男性", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("男性", context.star(1))
context = self.match_sentence("私は女", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("女", context.star(1))
def test_basic_set_match_as_name_jp(self):
if self._client_context.brain.sets.contains("SEX") is False:
set_dict = {"男": ["男", "男性"], "女": ["女", "女性"]}
values = {"男": "男", "男性": "男性", "女": "女", "女性": "女性"}
self._client_context.brain._sets_collection.add_set("SEX", set_dict, "teststore", True, values)
self.add_pattern_to_graph(pattern='私は <set name="sex" />', topic="X", that="Y", template="1")
context = self.match_sentence("私は男", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("男", context.star(1))
context = self.match_sentence("私は女性", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("女性", context.star(1))
def test_multi_word_set_match_jp(self):
set_dict = {"赤": ["赤", "赤色", "赤黒い", "赤面", "赤に塗った"]}
values = {"赤": "赤", "赤色": "赤色", "赤黒い": "赤黒い", "赤面": "赤面", "赤に塗った": "赤に塗った"}
self._client_context.brain._sets_collection.add_set("COLOR", set_dict, "teststore", True, values)
self.add_pattern_to_graph(pattern="私が好きなのは<set>color</set> *", topic="*", that="*", template="1")
context = self.match_sentence("私が好きなのは赤系統", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("赤", context.star(1))
self.assertEqual("系統", context.star(2))
context = self.match_sentence("私が好きなのは赤黒い車", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("赤黒い", context.star(1))
self.assertEqual("車", context.star(2))
context = self.match_sentence("私が好きなのは赤に塗ったバイク", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("赤に塗った", context.star(1))
self.assertEqual("バイク", context.star(2))
def test_basic_set_number_match(self):
self._client_context.brain.dynamics.add_dynamic_set('number', "programy.dynamic.sets.numeric.IsNumeric", None)
self.add_pattern_to_graph(pattern="I AM <set>number</set> YEARS OLD", topic="X", that="Y", template="1")
context = self.match_sentence("I AM 49 YEARS OLD", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("49", context.star(1))
| 50.558824
| 126
| 0.656292
| 1,302
| 10,314
| 5.062212
| 0.145929
| 0.102412
| 0.142012
| 0.065544
| 0.808527
| 0.796237
| 0.79381
| 0.761038
| 0.756334
| 0.748293
| 0
| 0.007247
| 0.183925
| 10,314
| 203
| 127
| 50.807882
| 0.775811
| 0.102967
| 0
| 0.657343
| 0
| 0
| 0.123972
| 0.006707
| 0
| 0
| 0
| 0
| 0.566434
| 1
| 0.055944
| false
| 0
| 0.006993
| 0
| 0.06993
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
90025a1386c5f9213d120225a31f82cdb82e676d
| 5,463
|
py
|
Python
|
cifar_train/genotypes.py
|
sunsmarterjie/GOLD_NAS
|
3b40c2a0700b1d07b96c2f9351057623efd0488f
|
[
"Apache-2.0"
] | 20
|
2020-08-30T13:39:08.000Z
|
2021-12-10T08:56:07.000Z
|
cifar_train/genotypes.py
|
sunsmarterjie/GOLD_NAS
|
3b40c2a0700b1d07b96c2f9351057623efd0488f
|
[
"Apache-2.0"
] | 3
|
2020-10-25T23:04:18.000Z
|
2021-03-16T07:58:17.000Z
|
cifar_train/genotypes.py
|
sunsmarterjie/GOLD_NAS
|
3b40c2a0700b1d07b96c2f9351057623efd0488f
|
[
"Apache-2.0"
] | 2
|
2020-11-02T04:54:09.000Z
|
2020-11-21T14:49:04.000Z
|
# 2019.11.25-Changed architecture-coding form
# Huawei Technologies Co., Ltd. <foss@huawei.com>
from collections import namedtuple
Genotype = namedtuple('Genotype', 'gene concat')
PRIMITIVES = [
'skip_connect',
'sep_conv_3x3'
]
s1 = Genotype(gene=[[('skip_connect', 0, 2), ('sep_conv_3x3', 0, 2), ('skip_connect', 1, 2), ('skip_connect', 1, 3),
('sep_conv_3x3', 1, 3), ('sep_conv_3x3', 2, 3), ('skip_connect', 0, 4), ('sep_conv_3x3', 1, 4),
('skip_connect', 2, 4), ('skip_connect', 0, 5), ('sep_conv_3x3', 0, 5)],
[('skip_connect', 0, 2), ('sep_conv_3x3', 0, 2), ('skip_connect', 0, 3), ('sep_conv_3x3', 0, 3),
('skip_connect', 1, 3), ('sep_conv_3x3', 1, 3), ('skip_connect', 0, 4), ('sep_conv_3x3', 0, 4),
('skip_connect', 2, 4), ('sep_conv_3x3', 3, 4), ('sep_conv_3x3', 2, 5), ('skip_connect', 3, 5),
('sep_conv_3x3', 3, 5), ('sep_conv_3x3', 4, 5)],
[('skip_connect', 0, 2), ('sep_conv_3x3', 0, 2), ('skip_connect', 1, 2), ('sep_conv_3x3', 0, 3),
('skip_connect', 1, 3), ('sep_conv_3x3', 1, 3), ('skip_connect', 0, 4), ('sep_conv_3x3', 0, 4),
('sep_conv_3x3', 0, 5), ('skip_connect', 1, 5), ('sep_conv_3x3', 1, 5), ('sep_conv_3x3', 3, 5)],
[('skip_connect', 1, 2), ('sep_conv_3x3', 1, 2), ('skip_connect', 0, 3), ('sep_conv_3x3', 0, 3),
('skip_connect', 0, 4), ('sep_conv_3x3', 0, 4), ('sep_conv_3x3', 3, 4), ('sep_conv_3x3', 1, 5),
('sep_conv_3x3', 2, 5)],
[('sep_conv_3x3', 0, 2), ('sep_conv_3x3', 1, 2), ('skip_connect', 0, 3), ('sep_conv_3x3', 0, 3),
('sep_conv_3x3', 1, 3), ('skip_connect', 1, 4), ('sep_conv_3x3', 1, 4), ('sep_conv_3x3', 2, 4),
('skip_connect', 0, 5), ('sep_conv_3x3', 0, 5), ('sep_conv_3x3', 3, 5)],
[('sep_conv_3x3', 0, 2), ('skip_connect', 1, 2), ('sep_conv_3x3', 1, 2), ('skip_connect', 0, 3),
('sep_conv_3x3', 0, 3), ('skip_connect', 1, 3), ('sep_conv_3x3', 2, 3), ('skip_connect', 0, 4),
('skip_connect', 1, 4), ('sep_conv_3x3', 3, 4), ('skip_connect', 0, 5), ('sep_conv_3x3', 0, 5),
('sep_conv_3x3', 3, 5), ('sep_conv_3x3', 4, 5)],
[('sep_conv_3x3', 0, 2), ('skip_connect', 1, 2), ('sep_conv_3x3', 1, 2), ('skip_connect', 0, 3),
('sep_conv_3x3', 0, 3), ('sep_conv_3x3', 1, 3), ('sep_conv_3x3', 2, 3), ('skip_connect', 0, 4),
('sep_conv_3x3', 0, 4), ('sep_conv_3x3', 1, 4), ('skip_connect', 0, 5), ('sep_conv_3x3', 0, 5),
('sep_conv_3x3', 1, 5), ('sep_conv_3x3', 3, 5)],
[('skip_connect', 0, 2), ('sep_conv_3x3', 0, 2), ('sep_conv_3x3', 1, 2), ('skip_connect', 0, 3),
('sep_conv_3x3', 0, 3), ('skip_connect', 1, 3), ('sep_conv_3x3', 1, 3), ('sep_conv_3x3', 0, 4),
('skip_connect', 1, 4), ('sep_conv_3x3', 1, 4), ('skip_connect', 0, 5), ('sep_conv_3x3', 0, 5),
('sep_conv_3x3', 1, 5), ('sep_conv_3x3', 3, 5)],
[('sep_conv_3x3', 0, 2), ('skip_connect', 1, 2), ('sep_conv_3x3', 1, 2), ('skip_connect', 0, 3),
('sep_conv_3x3', 0, 3), ('skip_connect', 1, 3), ('sep_conv_3x3', 1, 3), ('skip_connect', 0, 4),
('sep_conv_3x3', 0, 4), ('sep_conv_3x3', 1, 4), ('skip_connect', 0, 5), ('sep_conv_3x3', 0, 5),
('sep_conv_3x3', 1, 5)],
[('skip_connect', 0, 2), ('sep_conv_3x3', 0, 2), ('skip_connect', 1, 2), ('sep_conv_3x3', 1, 2),
('sep_conv_3x3', 0, 3), ('skip_connect', 1, 3), ('sep_conv_3x3', 1, 3), ('sep_conv_3x3', 2, 3),
('sep_conv_3x3', 0, 4), ('sep_conv_3x3', 1, 4), ('sep_conv_3x3', 2, 4), ('sep_conv_3x3', 3, 4),
('sep_conv_3x3', 0, 5), ('sep_conv_3x3', 1, 5), ('sep_conv_3x3', 2, 5), ('sep_conv_3x3', 3, 5),
('sep_conv_3x3', 4, 5)],
[('sep_conv_3x3', 0, 2), ('sep_conv_3x3', 1, 2), ('skip_connect', 0, 3), ('sep_conv_3x3', 1, 3),
('skip_connect', 0, 4), ('skip_connect', 1, 4), ('sep_conv_3x3', 1, 4), ('skip_connect', 0, 5),
('sep_conv_3x3', 1, 5), ('sep_conv_3x3', 2, 5), ('sep_conv_3x3', 4, 5)],
[('sep_conv_3x3', 0, 2), ('sep_conv_3x3', 1, 2), ('skip_connect', 0, 3), ('sep_conv_3x3', 0, 3),
('sep_conv_3x3', 1, 3), ('sep_conv_3x3', 2, 3), ('skip_connect', 0, 4), ('sep_conv_3x3', 0, 4),
('sep_conv_3x3', 3, 4), ('skip_connect', 0, 5), ('skip_connect', 1, 5), ('sep_conv_3x3', 3, 5),
('sep_conv_3x3', 4, 5)],
[('skip_connect', 0, 2), ('sep_conv_3x3', 1, 2), ('skip_connect', 0, 3), ('sep_conv_3x3', 1, 3),
('skip_connect', 0, 4), ('skip_connect', 1, 4), ('sep_conv_3x3', 1, 4), ('skip_connect', 0, 5),
('sep_conv_3x3', 2, 5), ('sep_conv_3x3', 3, 5)],
[('sep_conv_3x3', 0, 2), ('sep_conv_3x3', 1, 2), ('sep_conv_3x3', 0, 3), ('sep_conv_3x3', 1, 3),
('skip_connect', 1, 4), ('sep_conv_3x3', 1, 4), ('skip_connect', 0, 5), ('sep_conv_3x3', 0, 5),
('sep_conv_3x3', 1, 5)]], concat=range(2, 6))
| 89.557377
| 117
| 0.477576
| 835
| 5,463
| 2.782036
| 0.041916
| 0.337495
| 0.482135
| 0.18941
| 0.91003
| 0.908739
| 0.908739
| 0.904003
| 0.904003
| 0.887645
| 0
| 0.149227
| 0.289768
| 5,463
| 61
| 118
| 89.557377
| 0.449485
| 0.018671
| 0
| 0.339286
| 0
| 0
| 0.393244
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.017857
| 0
| 0.017857
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
9008aac69583e7754b1fe2c22fae49141bdb7d05
| 11,083
|
py
|
Python
|
rdmo/questions/migrations/0040_add_language_fields.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | 77
|
2016-08-09T11:40:20.000Z
|
2022-03-06T11:03:26.000Z
|
rdmo/questions/migrations/0040_add_language_fields.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | 377
|
2016-07-01T13:59:36.000Z
|
2022-03-30T13:53:19.000Z
|
rdmo/questions/migrations/0040_add_language_fields.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | 47
|
2016-06-23T11:32:19.000Z
|
2022-03-01T11:34:37.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-03-05 14:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questions', '0039_meta'),
]
operations = [
migrations.AddField(
model_name='catalog',
name='title_lang3',
field=models.CharField(blank=True, help_text='The title for this catalog in the tertiary language.', max_length=256, null=True, verbose_name='Title (tertiary)'),
),
migrations.AddField(
model_name='catalog',
name='title_lang4',
field=models.CharField(blank=True, help_text='The title for this catalog in the quaternary language.', max_length=256, null=True, verbose_name='Title (quaternary)'),
),
migrations.AddField(
model_name='catalog',
name='title_lang5',
field=models.CharField(blank=True, help_text='The title for this catalog in the quinary language.', max_length=256, null=True, verbose_name='Title (quinary)'),
),
migrations.AddField(
model_name='question',
name='help_lang3',
field=models.TextField(blank=True, help_text='The help text for this question in the tertiary language.', null=True, verbose_name='Help (tertiary)'),
),
migrations.AddField(
model_name='question',
name='help_lang4',
field=models.TextField(blank=True, help_text='The help text for this question in the quaternary language.', null=True, verbose_name='Help (quaternary)'),
),
migrations.AddField(
model_name='question',
name='help_lang5',
field=models.TextField(blank=True, help_text='The help text for this question in the quinary language.', null=True, verbose_name='Help (quinary)'),
),
migrations.AddField(
model_name='question',
name='text_lang3',
field=models.TextField(blank=True, help_text='The text for this question in the tertiary language.', null=True, verbose_name='Text (tertiary)'),
),
migrations.AddField(
model_name='question',
name='text_lang4',
field=models.TextField(blank=True, help_text='The text for this question in the quaternary language.', null=True, verbose_name='Text (quaternary)'),
),
migrations.AddField(
model_name='question',
name='text_lang5',
field=models.TextField(blank=True, help_text='The text for this question in the quinary language.', null=True, verbose_name='Text (quinary)'),
),
migrations.AddField(
model_name='question',
name='verbose_name_lang3',
field=models.CharField(blank=True, help_text='The name displayed for this question in the tertiary language.', max_length=256, verbose_name='Name (tertiary)'),
),
migrations.AddField(
model_name='question',
name='verbose_name_lang4',
field=models.CharField(blank=True, help_text='The name displayed for this question in the quaternary language.', max_length=256, verbose_name='Name (quaternary)'),
),
migrations.AddField(
model_name='question',
name='verbose_name_lang5',
field=models.CharField(blank=True, help_text='The name displayed for this question in the quinary language.', max_length=256, verbose_name='Name (quinary)'),
),
migrations.AddField(
model_name='question',
name='verbose_name_plural_lang3',
field=models.CharField(blank=True, help_text='The plural name displayed for this question in the tertiary language.', max_length=256, verbose_name='Plural name (tertiary)'),
),
migrations.AddField(
model_name='question',
name='verbose_name_plural_lang4',
field=models.CharField(blank=True, help_text='The plural name displayed for this question in the quaternary language.', max_length=256, verbose_name='Plural name (quaternary)'),
),
migrations.AddField(
model_name='question',
name='verbose_name_plural_lang5',
field=models.CharField(blank=True, help_text='The plural name displayed for this question in the quinary language.', max_length=256, verbose_name='Plural name (quinary)'),
),
migrations.AddField(
model_name='questionset',
name='help_lang3',
field=models.TextField(blank=True, help_text='The help text for this questionset in the tertiary language.', null=True, verbose_name='Help (tertiary)'),
),
migrations.AddField(
model_name='questionset',
name='help_lang4',
field=models.TextField(blank=True, help_text='The help text for this questionset in the quaternary language.', null=True, verbose_name='Help (quaternary)'),
),
migrations.AddField(
model_name='questionset',
name='help_lang5',
field=models.TextField(blank=True, help_text='The help text for this questionset in the quinary language.', null=True, verbose_name='Help (quinary)'),
),
migrations.AddField(
model_name='questionset',
name='title_lang3',
field=models.CharField(blank=True, help_text='The title for this questionset in the tertiary language.', max_length=256, null=True, verbose_name='Title (tertiary)'),
),
migrations.AddField(
model_name='questionset',
name='title_lang4',
field=models.CharField(blank=True, help_text='The title for this questionset in the quaternary language.', max_length=256, null=True, verbose_name='Title (quaternary)'),
),
migrations.AddField(
model_name='questionset',
name='title_lang5',
field=models.CharField(blank=True, help_text='The title for this questionset in the quinary language.', max_length=256, null=True, verbose_name='Title (quinary)'),
),
migrations.AddField(
model_name='questionset',
name='verbose_name_lang3',
field=models.CharField(blank=True, help_text='The name displayed for this question in the tertiary language.', max_length=256, verbose_name='Name (tertiary)'),
),
migrations.AddField(
model_name='questionset',
name='verbose_name_lang4',
field=models.CharField(blank=True, help_text='The name displayed for this question in the quaternary language.', max_length=256, verbose_name='Name (quaternary)'),
),
migrations.AddField(
model_name='questionset',
name='verbose_name_lang5',
field=models.CharField(blank=True, help_text='The name displayed for this question in the quinary language.', max_length=256, verbose_name='Name (quinary)'),
),
migrations.AddField(
model_name='questionset',
name='verbose_name_plural_lang3',
field=models.CharField(blank=True, help_text='The plural name displayed for this question in the tertiary language.', max_length=256, verbose_name='Plural name (tertiary)'),
),
migrations.AddField(
model_name='questionset',
name='verbose_name_plural_lang4',
field=models.CharField(blank=True, help_text='The plural name displayed for this question in the quaternary language.', max_length=256, verbose_name='Plural name (quaternary)'),
),
migrations.AddField(
model_name='questionset',
name='verbose_name_plural_lang5',
field=models.CharField(blank=True, help_text='The plural name displayed for this question in the quinary language.', max_length=256, verbose_name='Plural name (quinary)'),
),
migrations.AddField(
model_name='section',
name='title_lang3',
field=models.CharField(blank=True, help_text='The title for this section in the tertiary language.', max_length=256, null=True, verbose_name='Title (tertiary)'),
),
migrations.AddField(
model_name='section',
name='title_lang4',
field=models.CharField(blank=True, help_text='The title for this section in the quaternary language.', max_length=256, null=True, verbose_name='Title (quaternary)'),
),
migrations.AddField(
model_name='section',
name='title_lang5',
field=models.CharField(blank=True, help_text='The title for this section in the quinary language.', max_length=256, null=True, verbose_name='Title (quinary)'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang1',
field=models.CharField(blank=True, help_text='The title for this catalog in the primary language.', max_length=256, null=True, verbose_name='Title (primary)'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang2',
field=models.CharField(blank=True, help_text='The title for this catalog in the secondary language.', max_length=256, null=True, verbose_name='Title (secondary)'),
),
migrations.AlterField(
model_name='question',
name='text_lang1',
field=models.TextField(blank=True, help_text='The text for this question in the primary language.', null=True, verbose_name='Text (primary)'),
),
migrations.AlterField(
model_name='question',
name='text_lang2',
field=models.TextField(blank=True, help_text='The text for this question in the secondary language.', null=True, verbose_name='Text (secondary)'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang1',
field=models.CharField(blank=True, help_text='The title for this questionset in the primary language.', max_length=256, null=True, verbose_name='Title (primary)'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang2',
field=models.CharField(blank=True, help_text='The title for this questionset in the secondary language.', max_length=256, null=True, verbose_name='Title (secondary)'),
),
migrations.AlterField(
model_name='section',
name='title_lang1',
field=models.CharField(blank=True, help_text='The title for this section in the primary language.', max_length=256, null=True, verbose_name='Title (primary)'),
),
migrations.AlterField(
model_name='section',
name='title_lang2',
field=models.CharField(blank=True, help_text='The title for this section in the secondary language.', max_length=256, null=True, verbose_name='Title (secondary)'),
),
]
| 53.800971
| 189
| 0.641252
| 1,280
| 11,083
| 5.39375
| 0.053125
| 0.079664
| 0.071553
| 0.093569
| 0.969728
| 0.96741
| 0.956692
| 0.894844
| 0.89325
| 0.880649
| 0
| 0.016886
| 0.246594
| 11,083
| 205
| 190
| 54.063415
| 0.80994
| 0.006226
| 0
| 0.80303
| 1
| 0
| 0.339115
| 0.013623
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010101
| 0
| 0.025253
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
902b61b253f04ef093e9d19b3fe2dc3f65308050
| 4,250
|
py
|
Python
|
compuG/spritesIntro.py
|
alejoso76/Computaci-n-gr-fica
|
474a498a328b8951aa0bfa1db2d0d1f3d8cc914b
|
[
"MIT"
] | null | null | null |
compuG/spritesIntro.py
|
alejoso76/Computaci-n-gr-fica
|
474a498a328b8951aa0bfa1db2d0d1f3d8cc914b
|
[
"MIT"
] | null | null | null |
compuG/spritesIntro.py
|
alejoso76/Computaci-n-gr-fica
|
474a498a328b8951aa0bfa1db2d0d1f3d8cc914b
|
[
"MIT"
] | null | null | null |
import pygame
import math
ANCHO=640
ALTO=480
if __name__ == '__main__':
pygame.init()
pantalla=pygame.display.set_mode([ANCHO, ALTO]) #Crea la ventana
#Carga la imagen a una variable
nave=pygame.image.load('thor.png')
#La ubica en pantalla
posNave=[10,10]
pantalla.blit(nave, posNave)
pygame.display.flip()
print 'Funciona'
fin=False
#x=0
#reloj=pygame.time.Clock()
while not fin:
'''
x+=1
posNave[0]=10+x
pantalla.fill([0, 0, 0])
pantalla.blit(nave, posNave)
pygame.display.flip()
reloj.tick(60)
'''
for event in pygame.event.get():
if event.type == pygame.QUIT:
fin=True
if event.type == pygame.KEYDOWN:
if event.key==pygame.K_LEFT:
if(posNave[0]>=-115):
print 'Left'
#pintarPantallaNegra(pantalla)
pantalla.fill([0, 0, 0])
pygame.display.flip()
posNave=[posNave[0]-5, posNave[1]]
#print pos
pantalla.blit(nave, posNave)
pygame.display.flip()
else:
print 'Left'
#pintarPantallaNegra(pantalla)
pantalla.fill([0, 0, 0])
pygame.display.flip()
posNave=[625, posNave[1]]
#print pos
pantalla.blit(nave, posNave)
pygame.display.flip()
if event.key==pygame.K_RIGHT:
if(posNave[0]<625):
print 'Right'
#pintarPantallaNegra(pantalla)
pantalla.fill([0, 0, 0])
pygame.display.flip()
posNave=[posNave[0]+5, posNave[1]]
#print pos
pantalla.blit(nave, posNave)
pygame.display.flip()
else:
print 'Right'
#pintarPantallaNegra(pantalla)
pantalla.fill([0, 0, 0])
pygame.display.flip()
posNave=[-115, posNave[1]]
#print pos
pantalla.blit(nave, posNave)
pygame.display.flip()
if event.key==pygame.K_UP:
if(posNave[1]>=-110):
print 'Up'
#pintarPantallaNegra(pantalla)
pantalla.fill([0, 0, 0])
pygame.display.flip()
posNave=[posNave[0], posNave[1]-5]
#print pos
pantalla.blit(nave, posNave)
pygame.display.flip()
else:
print 'Up'
#pintarPantallaNegra(pantalla)
pantalla.fill([0, 0, 0])
pygame.display.flip()
posNave=[posNave[0], 470]
#print pos
pantalla.blit(nave, posNave)
pygame.display.flip()
if event.key==pygame.K_DOWN:
if(posNave[1]<470):
print 'Down'
#pintarPantallaNegra(pantalla)
pantalla.fill([0, 0, 0])
pygame.display.flip()
posNave=[posNave[0], posNave[1]+5]
#print pos
pantalla.blit(nave, posNave)
pygame.display.flip()
else:
print 'Down'
#pintarPantallaNegra(pantalla)
pantalla.fill([0, 0, 0])
pygame.display.flip()
posNave=[posNave[0], 0]
#print pos
pantalla.blit(nave, posNave)
pygame.display.flip()
print posNave
| 36.324786
| 69
| 0.400706
| 355
| 4,250
| 4.760563
| 0.2
| 0.146154
| 0.181065
| 0.136095
| 0.752071
| 0.733136
| 0.733136
| 0.709467
| 0.679882
| 0.651479
| 0
| 0.040662
| 0.502353
| 4,250
| 116
| 70
| 36.637931
| 0.758392
| 0.093412
| 0
| 0.582278
| 0
| 0
| 0.014766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.025316
| null | null | 0.126582
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5f4693d29f821f396de3761f73f3b81a69a66971
| 204
|
py
|
Python
|
cognite/async_client/jobs/__init__.py
|
cognitedata/cognite-async
|
256223b6b4f3e1630a8ab289ac4295a20d24437b
|
[
"Apache-2.0"
] | 1
|
2020-01-24T13:35:21.000Z
|
2020-01-24T13:35:21.000Z
|
cognite/async_client/jobs/__init__.py
|
cognitedata/cognite-async
|
256223b6b4f3e1630a8ab289ac4295a20d24437b
|
[
"Apache-2.0"
] | null | null | null |
cognite/async_client/jobs/__init__.py
|
cognitedata/cognite-async
|
256223b6b4f3e1630a8ab289ac4295a20d24437b
|
[
"Apache-2.0"
] | null | null | null |
from cognite.async_client.jobs.base import Job
from cognite.async_client.jobs.create import CreateJob
from cognite.async_client.jobs.datapoints import CountDatapointsJob, DatapointsJob, DatapointsListJob
| 51
| 101
| 0.877451
| 26
| 204
| 6.769231
| 0.538462
| 0.1875
| 0.272727
| 0.375
| 0.443182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068627
| 204
| 3
| 102
| 68
| 0.926316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5f57ff9c79f42f14061285a04b9fc8636bd80c73
| 18,282
|
py
|
Python
|
app/models/samples/schedule_ruleset.py
|
tanushree04/energy-model-schema-1
|
ea92b0d300f1bf97633ff19ae64006d0ea2fed21
|
[
"BSD-3-Clause"
] | null | null | null |
app/models/samples/schedule_ruleset.py
|
tanushree04/energy-model-schema-1
|
ea92b0d300f1bf97633ff19ae64006d0ea2fed21
|
[
"BSD-3-Clause"
] | null | null | null |
app/models/samples/schedule_ruleset.py
|
tanushree04/energy-model-schema-1
|
ea92b0d300f1bf97633ff19ae64006d0ea2fed21
|
[
"BSD-3-Clause"
] | null | null | null |
schedule_ruleset = {
'type': 'ScheduleRuleset',
'name': 'Schedule Ruleset',
'schedule_type': {
'type': 'ScheduleTypeLimit',
'numeric_type': 'ScheduleContinuous',
'unit_type': 'Temperature',
'name': 'Numeric Type',
'lower_limit_value': 0,
'upper_limit_value': 20
},
'default_day_schedule': {
'type': 'ScheduleDay',
'name': 'Default Day 1',
'interpolate_to_timestep': False,
'day_values': [
{
'time': {
'hour': 24,
'minute': 00
},
'value_until_time': 20
}
],
},
'summer_designday_schedule': {
'type': 'ScheduleDay',
'name': 'Default Day 2',
'interpolate_to_timestep': False,
'day_values': [
{
'time': {
'hour': 24,
'minute': 00
},
'value_until_time': 20
}
],
},
'winter_designday_schedule': {
'type': 'ScheduleDay',
'name': 'Default Day 3',
'interpolate_to_timestep': False,
'day_values': [
{
'time': {
'hour': 24,
'minute': 00
},
'value_until_time': 20
}
]
},
'schedule_rules': [
{
'type': 'ScheduleRule',
'schedule_day': {
'type': 'ScheduleDay',
'name': 'Default Day 4',
'interpolate_to_timestep': False,
'day_values': [
{
'time': {
'hour': 24,
'minute': 00
},
'value_until_time': 20
}
]
},
'start_period': {
'month': 1,
'day': 1,
'is_leap_year': False
},
'end_period': {
'month': 12,
'day': 31,
'is_leap_year': False
},
'name': 'Schedule Rule 1',
'apply_sunday': True,
'apply_monday': True,
'apply_tuesday': True,
'apply_wednesday': True,
'apply_thursday': True,
'apply_friday': True,
'apply_saturday': True,
'apply_holiday': True
}
]
}
schedule_ruleset_1 = {
'type': 'ScheduleRuleset',
'name': 'Schedule Ruleset 1',
'schedule_type': {
'type': 'ScheduleTypeLimit',
'numeric_type': 'ScheduleContinuous',
'unit_type': 'Dimensionless',
'name': 'Numeric Type',
'lower_limit_value': 0,
'upper_limit_value': 1
},
'default_day_schedule': {
'type': 'ScheduleDay',
'name': 'Default Day 1',
'interpolate_to_timestep': False,
'day_values': [
{
'time': {
'hour': 7,
'minute': 00
},
'value_until_time': 0
},
{
'time': {
'hour': 19,
'minute': 00
},
'value_until_time': 1
},
{
'time': {
'hour': 23,
'minute': 59
},
'value_until_time': 0
}
]
},
'summer_designday_schedule': {
'type': 'ScheduleDay',
'name': 'Default Day 2',
'interpolate_to_timestep': False,
'day_values': [
{
'time': {
'hour': 23,
'minute': 59
},
'value_until_time': 0
}
]
},
'winter_designday_schedule': {
'type': 'ScheduleDay',
'name': 'Default Day 3',
'interpolate_to_timestep': False,
'day_values': [
{
'time': {
'hour': 23,
'minute': 59
},
'value_until_time': 0
}
]
},
'schedule_rules': [
{
'type': 'ScheduleRule',
'schedule_day': {
'type': 'ScheduleDay',
'name': 'Schedule Rule 180 Day 1',
'interpolate_to_timestep': False,
'day_values': [
{
'time': {
'hour': 23,
'minute': 59
},
'value_until_time': 0
}
]
},
'start_period': {
'month': 8,
'day': 17,
'is_leap_year': False
},
'end_period': {
'month': 12,
'day': 11,
'is_leap_year': False
},
'name': 'Schedule Rule 180',
'apply_sunday': True,
'apply_monday': False,
'apply_tuesday': False,
'apply_wednesday': False,
'apply_thursday': False,
'apply_friday': False,
'apply_saturday': False,
'apply_holiday': False
},
{
'type': 'ScheduleRule',
'schedule_day': {
'type': 'ScheduleDay',
'name': 'Schedule Rule 181 Day 2',
'interpolate_to_timestep': False,
'day_values': [
{
'time': {
'hour': 7,
'minute': 00
},
'value_until_time': 0
},
{
'time': {
'hour': 16,
'minute': 00
},
'value_until_time': 0.3
},
{
'time': {
'hour': 23,
'minute': 59
},
'value_until_time': 0
}
]
},
'start_period': {
'month': 8,
'day': 17,
'is_leap_year': False
},
'end_period': {
'month': 12,
'day': 11,
'is_leap_year': False
},
'name': 'Schedule Rule 181',
'apply_sunday': False,
'apply_monday': False,
'apply_tuesday': False,
'apply_wednesday': False,
'apply_thursday': False,
'apply_friday': False,
'apply_saturday': True,
'apply_holiday': False
},
{
'type': 'ScheduleRule',
'schedule_day': {
'type': 'ScheduleDay',
'name': 'Schedule Rule 182 Day',
'interpolate_to_timestep': False,
'day_values': [
{
'time': {
'hour': 7,
'minute': 00
},
'value_until_time': 0
},
{
'time': {
'hour': 19,
'minute': 00
},
'value_until_time': 1
},
{
'time': {
'hour': 23,
'minute': 59,
},
'value_until_time': 0
}
]
},
'start_period': {
'month': 8,
'day': 17,
'is_leap_year': False
},
'end_period': {
'month': 12,
'day': 11,
'is_leap_year': False
},
'name': 'Schedule Rule 182',
'apply_sunday': False,
'apply_monday': True,
'apply_tuesday': True,
'apply_wednesday': True,
'apply_thursday': True,
'apply_friday': True,
'apply_saturday': False,
'apply_holiday': False
},
{
'type': 'ScheduleRule',
'schedule_day': {
'type': 'ScheduleDay',
'name': 'Schedule Rule 183 Day',
'interpolate_to_timestep': False,
'day_values': [
{
'time': {
'hour': 24,
'minute': 00
},
'value_until_time': 0
}
],
},
'start_period': {
'month': 4,
'day': 13,
'is_leap_year': False
},
'end_period': {
'month': 6,
'day': 12,
'is_leap_year': False
},
'name': 'Schedule Rule 183',
'apply_sunday': True,
'apply_monday': False,
'apply_tuesday': False,
'apply_wednesday': False,
'apply_thursday': False,
'apply_friday': False,
'apply_saturday': False,
'apply_holiday': False
},
{
'type': 'ScheduleRule',
'schedule_day': {
'type': 'ScheduleDay',
'name': 'Schedule Rule 184 Day',
'interpolate_to_timestep': False,
'day_values': [
{
'time': {
'hour': 7,
'minute': 00
},
'value_until_time': 0,
},
{
'time': {
'hour': 16,
'minute': 00
},
'value_until_time': 0.29
},
{
'time': {
'hour': 24,
'minute': 00
},
'value_until_time': 0
}
]
},
'start_period': {
'month': 4,
'day': 13,
'is_leap_year': False
},
'end_period': {
'month': 6,
'day': 12,
'is_leap_year': False
},
'name': 'Schedule Rule 184',
'apply_sunday': False,
'apply_monday': False,
'apply_tuesday': False,
'apply_wednesday': False,
'apply_thursday': False,
'apply_friday': False,
'apply_saturday': True,
'apply_holiday': False
},
{
'type': 'ScheduleRule',
'schedule_day': {
'type': 'ScheduleDay',
'name': 'Schedule Rule 185 Day',
'interpolate_to_timestep': False,
'day_values': [
{
'time': {
'hour': 7,
'minute': 00
},
'value_until_time': 0
},
{
'time': {
'hour': 20,
'minute': 00
},
'value_until_time': 0.57
},
{
'time': {
'hour': 24,
'minute': 00
},
'value_until_time': 0
}
]
},
'start_period': {
'month': 4,
'day': 13,
'is_leap_year': False
},
'end_period': {
'month': 6,
'day': 12,
'is_leap_year': False
},
'name': 'Schedule Rule 185',
'apply_sunday': False,
'apply_monday': True,
'apply_tuesday': True,
'apply_wednesday': True,
'apply_thursday': True,
'apply_friday': True,
'apply_saturday': False,
'apply_holiday': False
},
{
'type': 'ScheduleRule',
'schedule_day': {
'type': 'ScheduleDay',
'name': 'Schedule Rule 186 Day',
'interpolate_to_timestep': False,
'day_values': [
{
'time': {
'hour': 24,
'minute': 00
},
'value_until_time': 0
}
]
},
'start_period': {
'month': 1,
'day': 5,
'is_leap_year': False
},
'end_period': {
'month': 4,
'day': 3,
'is_leap_year': False
},
'name': 'Schedule Rule 186',
'apply_sunday': True,
'apply_monday': False,
'apply_tuesday': False,
'apply_wednesday': False,
'apply_thursday': False,
'apply_friday': False,
'apply_saturday': False,
'apply_holiday': False
},
{
'type': 'ScheduleRule',
'schedule_day': {
'type': 'ScheduleDay',
'name': 'Schedule Rule 187 Day',
'interpolate_to_timestep': False,
'day_values': [
{
'time': {
'hour': 7,
'minute': 00
},
'value_until_time': 0
},
{
'time': {
'hour': 16,
'minute': 00
},
'value_until_time': 0.29
},
{
'time': {
'hour': 24,
'minute': 00
},
'value_until_time': 0
}
]
},
'start_period': {
'month': 1,
'day': 5,
'is_leap_year': False
},
'end_period': {
'month': 4,
'day': 3,
'is_leap_year': False
},
'name': 'Schedule Rule 187',
'apply_sunday': False,
'apply_monday': False,
'apply_tuesday': False,
'apply_wednesday': False,
'apply_thursday': False,
'apply_friday': False,
'apply_saturday': True,
'apply_holiday': False
},
{
'type': 'ScheduleRule',
'schedule_day': {
'type': 'ScheduleDay',
'name': 'Schedule Rule 188 Day',
'interpolate_to_timestep': False,
'day_values': [
{
'time': {
'hour': 7,
'minute': 00
},
'value_until_time': 0
},
{
'time': {
'hour': 18,
'minute': 00
},
'value_until_time': 0.57
},
{
'time': {
'hour': 24,
'minute': 00
},
'value_until_time': 0
}
]
},
'start_period': {
'month': 1,
'day': 5,
'is_leap_year': False
},
'end_period': {
'month': 4,
'day': 3,
'is_leap_year': False
},
'name': 'Schedule Rule 188',
'apply_sunday': False,
'apply_monday': True,
'apply_tuesday': True,
'apply_wednesday': True,
'apply_thursday': True,
'apply_friday': True,
'apply_saturday': False,
'apply_holiday': False
}
]
}
| 31.961538
| 54
| 0.296849
| 1,078
| 18,282
| 4.766234
| 0.076994
| 0.081744
| 0.081744
| 0.084079
| 0.975282
| 0.948618
| 0.948618
| 0.942585
| 0.942585
| 0.883418
| 0
| 0.038718
| 0.598786
| 18,282
| 571
| 55
| 32.017513
| 0.661759
| 0
| 0
| 0.647368
| 0
| 0
| 0.256646
| 0.025599
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5fc65d07436a36562558424b717923385efb4e0c
| 34,287
|
py
|
Python
|
paragen/modules/search/beam_search.py
|
godweiyang/ParaGen
|
9665d1244ea38a41fc06b4e0a7f6411985e2221f
|
[
"Apache-2.0"
] | 50
|
2022-01-18T07:25:46.000Z
|
2022-03-14T13:06:18.000Z
|
paragen/modules/search/beam_search.py
|
JiangtaoFeng/ParaGen
|
509334bf16e3674e009bb9dc37ecc38ae3b5c977
|
[
"Apache-2.0"
] | 2
|
2022-01-19T09:36:42.000Z
|
2022-02-23T07:16:02.000Z
|
paragen/modules/search/beam_search.py
|
JiangtaoFeng/ParaGen
|
509334bf16e3674e009bb9dc37ecc38ae3b5c977
|
[
"Apache-2.0"
] | 6
|
2022-01-19T09:28:53.000Z
|
2022-03-10T10:20:08.000Z
|
import torch
import torch.nn.functional as F
from paragen.modules.search import register_search
from paragen.modules.search.sequence_search import SequenceSearch
from paragen.modules.utils import create_init_scores
from paragen.utils.ops import recursive
@register_search
class BeamSearch(SequenceSearch):
"""
BeamSearch is beam search on sequence generation.
Args:
beam: beam size
lenpen: length penalty
maxlen_coef (a, b): maxlen computation coefficient.
The max length is computed as `(S * a + b)`, where S is source sequence length.
maxlen: maximum length
num_return_sequence: the number of return generated sequence for each input sample,
which must smaller or equal to beam size, default as 1 to return the sequence
with the highest log probability
keepdim: bool, squeeze the output sequence if set True
"""
def __init__(self, beam=4, lenpen=0.1, maxlen_coef=(1.2, 10), maxlen=1000, num_return_sequence=1, keepdim=False):
super().__init__()
self._beam = beam
self._lenpen = lenpen
self._maxlen = maxlen
self._maxlen_a, self._maxlen_b = maxlen_coef
self._num_return_sequence = num_return_sequence
self._keepdim = keepdim
self._expand_cache = recursive(self._expand_tensor)
self._update_cache = recursive(self._update_tensor)
assert self._num_return_sequence <= self._beam, \
f"number of return sequence <= beam size, got {self.num_return_sequence} > {self._beam}"
def forward(self,
tokens,
memory,
memory_padding_mask,
target_mask=None,
prev_scores=None):
"""
Decoding full-step sequence with beam search
Args:
tokens: previous tokens or prefix of sequence
:math:`(N, T)` where N is batch size and T is current sequence length
memory: memory for attention.
:math:`(M, N, E)`, where M is the memory sequence length, N is the batch size,
memory_padding_mask: memory sequence padding mask.
:math:`(N, M)` where M is the memory sequence length, N is the batch size.
target_mask: target mask indicating blacklist tokens
:math:`(N, V)` where N is batch size and V is vocab size
prev_scores: scores of previous tokens
:math:`(N)` where N is batch size
Returns:
- log probability of generated sequence
:math: `(N, S)` where N is batch size and S is the number of return sequence
- generated sequence
:math: `(N, S, maxT)` where N is batch size, maxT is the maximum sequence length
and S is the number of return sequence
`(N, maxT)` if self.keepdim=True and S = 1
"""
batch_size, curlen = tokens.size(0), tokens.size(1)
memory_size = memory_padding_mask.size(-1)
scores = create_init_scores(tokens, memory) if prev_scores is None else prev_scores
# produce the first token
logits = self._decoder(tokens, memory,
memory_padding_mask=memory_padding_mask)[:, -1, :]
logits = F.log_softmax(logits, dim=-1)
next_token_score, next_token = logits.topk(self._beam, dim=-1)
scores = scores.unsqueeze(-1) + next_token_score
next_token = next_token.unsqueeze(dim=-1)
tokens = torch.cat([tokens.unsqueeze(dim=1).repeat(1, self._beam, 1), next_token], dim=-1)
# copy cache and memory for 'beam' times
memory, memory_padding_mask = self._expand(memory, memory_padding_mask)
# generate rest of tokens
maxlen = min(int(memory_size * self._maxlen_a + self._maxlen_b) - curlen, self._maxlen)
finished = tokens.data.new(batch_size, 1, maxlen + curlen + 1).fill_(self._eos)
finished_scores = scores.data.new(batch_size, 1).fill_(float('-inf'))
for _ in range(maxlen - 1):
tokens, scores = self._produce_candidates(tokens, memory, memory_padding_mask, scores)
finished_mask = tokens[:, :, -1] == self._eos
finished, finished_scores = self._add_finished(finished, finished_scores, tokens, scores, finished_mask)
tokens, scores = self._update_states(tokens, scores, finished_mask)
if not self._keepdim and self._num_return_sequence == 1:
finished = finished.squeeze(dim=1)
return finished_scores, finished
def _produce_candidates(self, tokens, memory, memory_padding_mask, scores):
"""
Predict `beam**2 * 2` candidates at next step
Args:
tokens: previous tokens or prefix of sequence at time step T
:math:`(N, B, T)` where N is batch size, B is beam size and T is current sequence length
memory: memory for attention.
:math:`(M, N * B, E)`, where M is the memory sequence length, N is the batch size,
B is beam size and E is feature size
memory_padding_mask: memory sequence padding mask.
:math:`(N * B, M)` where M is the memory sequence length, N is the batch size,
B is beam size.
scores: scores of previous tokens
:math:`(N * B)` where N is batch size and B is beam size.
Returns:
- candidate tokens at time step T+1
:math:`(N, B * B * 2, T+1)` where N is batch size, B is beam size and T is current sequence length
- log probablity of candidate tokens
:math:`(N, B * B * 2)` where N is batch size and B is beam size.
"""
seqlen = tokens.size(-1)
logits = self._decoder(tokens.view(-1, seqlen), memory,
memory_padding_mask=memory_padding_mask)[:, -1, :]
logits = F.log_softmax(logits, dim=-1)
next_token_score, next_token = logits.topk(self._beam * 2, dim=-1)
next_token_score = next_token_score.view(-1, self._beam, self._beam * 2)
scores = scores.unsqueeze(dim=2) + next_token_score
scores = scores.view(-1, self._beam ** 2 * 2)
tokens = torch.cat([tokens.unsqueeze(dim=2).repeat(1, 1, self._beam * 2, 1),
next_token.view(-1, self._beam, self._beam * 2, 1)],
dim=-1)
tokens = tokens.view(-1, self._beam ** 2 * 2, seqlen + 1)
return tokens, scores
def _add_finished(self, finished, finished_scores, tokens, scores, finished_mask):
"""
Select new finished sequences and add them to finished list.
Args:
finished: finished sequences ending with eos
:math:`(N, S, maxT)` where N is batch size, maxT is the maximum sequence length
S is the number of return sequence
finished_scores: log probability of finished sequence
:math:`(N, S)` where N is batch size and B is beam size
S is the number of return sequence
tokens: newly-generated sequences
:math:`(N, B * B * 2, T)` where N is batch size, B is beam size and T is decoded sequence length
scores: log probability of newly-generated sequences
:math:`(N, B)` where N is batch size, B is beam size.
finished_mask: mask indicating which sequences in `tokens` ends with eos
:math:`(N, B)` where N is batch size and B is beam size
Returns:
- updated finished sequences
:math:`(N, S, maxT)` where N is batch size, B is beam size, maxT is the maximum sequence length
S is the number of return sequence
- log probability of updated finished sequence
:math:`(N, S)` where N is batch size and B is beam size.
S is the number of return sequence
"""
maxlen = finished.size(dim=-1)
curlen = tokens.size(dim=-1)
non_finished_mask = ~finished_mask
scores = scores / ((5 + curlen) / 6)**self._lenpen
scores = scores.masked_fill(non_finished_mask, float('-inf'))
tokens = torch.cat([tokens, tokens.data.new(tokens.size()[:-1] + ((maxlen - curlen),)).fill_(self._eos)],
dim=-1)
finished_scores = torch.cat([finished_scores, scores], dim=1)
finished = torch.cat([finished, tokens], dim=1)
finished_scores, idx = finished_scores.topk(self._num_return_sequence, dim=1)
finished = finished.gather(1, index=idx.unsqueeze(dim=-1).repeat(1, 1, maxlen))
return finished, finished_scores
def _update_states(self, tokens, scores, finished_mask):
"""
Update decoder internal states with results preferred by beam search algorithm
Args:
tokens: newly-generated sequences
:math:`(N, B * B * 2, T)` where N is batch size, B is beam size and T is sequence length
scores: log probability of newly-generated sequences
:math:`(N, B * B * 2)` where N is batch size, B is beam size.
finished_mask: mask indicating which sequences in `tokens` ends with eos
:math:`(N, B * B * 2)` where N is batch size and B is beam size
Returns:
- updated sequences
:math: `(N, B, T)` where N is batch size, B is beam size and T is sequence length
- scores of updated sequences
:math: `(N, B)` where N is batch size, B is beam size
"""
curlen = tokens.size(dim=-1)
scores = scores.masked_fill(finished_mask, float('-inf'))
scores, idx = scores.topk(self._beam, dim=-1)
tokens = tokens.gather(1, index=idx.unsqueeze(2).repeat(1, 1, curlen))
cache = self._decoder.get_cache()
cache = self._update_cache(cache, idx)
self._decoder.set_cache(cache)
return tokens, scores
def _expand(self, memory, memory_padding_mask):
"""
Expand encoder states with `beam` times
Args:
memory: memory for attention.
:math:`(M, N, E)`, where M is the memory sequence length, N is the batch size,
B is beam size and E is feature size
memory_padding_mask: memory sequence padding mask.
:math:`(N, M)` where M is the memory sequence length, N is the batch size,
B is beam size.
Returns:
- expanded memory
:math:`(M, N * B, E)`, where M is the memory sequence length, N is the batch size,
B is beam size and E is feature size
- expanded memory padding mask
:math:`(N * B, M)` where M is the memory sequence length, N is the batch size,
B is beam size.
"""
batch_size, memory_size = memory_padding_mask.size()
cache = self._decoder.get_cache()
cache = self._expand_cache(cache)
self._decoder.set_cache(cache)
memory = memory.unsqueeze(dim=2).repeat(1, 1, self._beam, 1)
memory = memory.view(memory_size, batch_size * self._beam, -1)
memory_padding_mask = memory_padding_mask.unsqueeze(dim=1).repeat(1, self._beam, 1)
memory_padding_mask = memory_padding_mask.view(batch_size * self._beam, memory_size)
return memory, memory_padding_mask
def _expand_tensor(self, cache: torch.Tensor):
"""
Expand tensor with `beam` times after batch dimension
Args:
cache: torch tensor
:math:`(M, N, *)` where M is the memory sequence length, N is the batch size.
Returns:
- expanded cache
:math:`(M, N * B, *)` where M is the memory sequence length, N is the batch size,
B is beam size.
"""
size = cache.size()
repeat = (1, 1, self._beam) + tuple(1 for _ in size[2:])
cache = cache.unsqueeze(2).repeat(repeat).view((size[0], size[1] * self._beam,) + size[2:])
return cache
def _update_tensor(self, cache: torch.Tensor, idx):
"""
Update tensor by indexing with idx
Args:
cache: torch tensor
:math:`(M, N * B * B, E)` where M is the memory sequence length, N is the batch size,
B is beam size, and E is feature dimension.
idx: index to select
:math:`(N, B)` where N is the batch size and B is beam size.
Returns:
- indexed cache
:math:`(M, N * B, E)` where N is the batch size, B is beam size and E is feature dimension.
"""
size = cache.size()
idx = idx // (self._beam * 2)
cache = cache.view(size[0], size[1] // self._beam, self._beam, size[-1])
idx = idx.unsqueeze(0).unsqueeze(-1).repeat((size[0], 1, 1, size[-1]))
cache = cache.gather(2, index=idx)
cache = cache.view(size[0], size[1], size[-1])
return cache
@register_search
class BeamSearchV2(SequenceSearch):
"""
BeamSearchV2 is beam search on sequence generation. Different from V1, V2 is another implementation of beam search,
reducing beam search candidates at next step from beam * beam * 2 to beam * 2.
Args:
beam: beam size
lenpen: length penalty
maxlen_coef (a, b): maxlen computation coefficient.
The max length is computed as `(S * a + b)`, where S is source sequence length.
maxlen: maximum length
num_return_sequence: the number of return generated sequence for each input sample,
which must smaller or equal to beam size, default as 1 to return the sequence
with the highest log probability
keepdim: bool, squeeze the output sequence if set True
"""
def __init__(self, beam=4, lenpen=0.1, maxlen_coef=(1.2, 10), maxlen=1000, num_return_sequence=1, keepdim=False):
super().__init__()
self._beam = beam
self._lenpen = lenpen
self._maxlen = maxlen
self._maxlen_a, self._maxlen_b = maxlen_coef
self._num_return_sequence = num_return_sequence
self._keepdim = keepdim
self._update_cache = recursive(self._update_tensor)
assert self._num_return_sequence <= self._beam, \
f"number of return sequence <= beam size, got {self.num_return_sequence} > {self._beam}"
def forward(self,
tokens,
memory,
memory_padding_mask,
target_mask=None,
prev_scores=None):
"""
Decoding full-step sequence with beam search
Args:
tokens: previous tokens or prefix of sequence
:math:`(N, T)` where N is batch size and T is current sequence length
memory: memory for attention.
:math:`(M, N, E)`, where M is the memory sequence length, N is the batch size,
memory_padding_mask: memory sequence padding mask.
:math:`(N, M)` where M is the memory sequence length, N is the batch size.
target_mask: target mask indicating blacklist tokens
:math:`(N, V)` where N is batch size and V is vocab size
prev_scores: scores of previous tokens
:math:`(N)` where N is batch size
Returns:
- log probability of generated sequence
:math: `(N, S)` where N is batch size and S is the number of return sequence
- generated sequence
:math: `(N, S, maxT)` where N is batch size, maxT is the maximum sequence length
and S is the number of return sequence
`(N, maxT)` if self.keepdim=True and S = 1
"""
batch_size, curlen = tokens.size(0), tokens.size(1)
memory_size = memory_padding_mask.size(-1)
# copy token and memory for 'beam' times
tokens = self._expand(tokens, dim=0) # [N * B, T]
memory = self._expand(memory, dim=1) # [M, N * B, E]
memory_padding_mask = self._expand(memory_padding_mask, dim=0) # [N * B, M]
if prev_scores is None:
scores = create_init_scores(tokens, memory) # [N * B]
else:
scores = self._expand(prev_scores, dim=1) # [N * B]
# generate rest of tokens
maxlen = min(int(memory_size * self._maxlen_a + self._maxlen_b) - curlen, self._maxlen)
finished = tokens.data.new(batch_size, 1, maxlen + curlen + 1).fill_(self._eos)
finished_scores = scores.data.new(batch_size, 1).fill_(float('-inf'))
for step in range(maxlen - 1):
tokens, scores, idx = self._produce_candidates(tokens, memory, memory_padding_mask, scores, step)
finished_mask = tokens[:, :, -1] == self._eos
finished, finished_scores = self._add_finished(finished, finished_scores, tokens, scores, finished_mask)
tokens, scores = self._update_states(tokens, scores, idx, finished_mask)
if not self._keepdim and self._num_return_sequence == 1:
finished = finished.squeeze(dim=1)
return finished_scores, finished
def _produce_candidates(self, tokens, memory, memory_padding_mask, scores, step):
"""
Predict `beam * 2` candidates at next step
Args:
tokens: previous tokens or prefix of sequence at time step T
:math:`(N * B, T)` where N is batch size, B is beam size and T is current sequence length
memory: memory for attention.
:math:`(M, N * B, E)`, where M is the memory sequence length, N is the batch size,
B is beam size and E is feature size
memory_padding_mask: memory sequence padding mask.
:math:`(N * B, M)` where M is the memory sequence length, N is the batch size,
B is beam size.
scores: scores of previous tokens
:math:`(N * B)` where N is batch size and B is beam size.
step: decoding steps
Returns:
- candidate tokens at time step T+1
:math:`(N, B * B * 2, T+1)` where N is batch size, B is beam size and T is current sequence length
- log probablity of candidate tokens
:math:`(N, B * B * 2)` where N is batch size and B is beam size.
"""
batch_size, seqlen = tokens.size(0) // self._beam, tokens.size(-1)
logits = self._decoder(tokens, memory,
memory_padding_mask=memory_padding_mask)[:, -1, :]
logits = F.log_softmax(logits, dim=-1)
vocab_size = logits.size(-1)
tokens = tokens.view(batch_size, self._beam, seqlen)
scores = scores.unsqueeze(dim=-1) + logits
scores = scores.view(batch_size, -1)
if step == 0:
scores = scores[:, :vocab_size]
scores, next_id = scores.topk(self._beam * 2, dim=-1)
next_token_batch_id, next_token = next_id // vocab_size, next_id % vocab_size
prev_tokens = tokens.gather(1, next_token_batch_id.unsqueeze(dim=-1).repeat(1, 1, seqlen))
tokens = torch.cat([prev_tokens, next_token.unsqueeze(dim=-1)], dim=-1)
return tokens, scores, next_token_batch_id
def _add_finished(self, finished, finished_scores, tokens, scores, finished_mask):
"""
Select new finished sequences and add them to finished list.
Args:
finished: finished sequences ending with eos
:math:`(N, S, maxT)` where N is batch size, maxT is the maximum sequence length
S is the number of return sequence
finished_scores: log probability of finished sequence
:math:`(N, S)` where N is batch size and B is beam size
S is the number of return sequence
tokens: newly-generated sequences
:math:`(N, B * B * 2, T)` where N is batch size, B is beam size and T is decoded sequence length
scores: log probability of newly-generated sequences
:math:`(N, B)` where N is batch size, B is beam size.
finished_mask: mask indicating which sequences in `tokens` ends with eos
:math:`(N, B)` where N is batch size and B is beam size
Returns:
- updated finished sequences
:math:`(N, S, maxT)` where N is batch size, B is beam size, maxT is the maximum sequence length
S is the number of return sequence
- log probability of updated finished sequence
:math:`(N, S)` where N is batch size and B is beam size.
S is the number of return sequence
"""
maxlen = finished.size(dim=-1)
curlen = tokens.size(dim=-1)
non_finished_mask = ~finished_mask
scores = scores / ((5 + curlen) / 6) ** self._lenpen
scores = scores.masked_fill(non_finished_mask, float('-inf'))
tokens = torch.cat([tokens, tokens.data.new(tokens.size()[:-1] + ((maxlen - curlen),)).fill_(self._eos)],
dim=-1)
finished_scores = torch.cat([finished_scores, scores], dim=1)
finished = torch.cat([finished, tokens], dim=1)
finished_scores, idx = finished_scores.topk(self._num_return_sequence, dim=1)
finished = finished.gather(1, index=idx.unsqueeze(dim=-1).repeat(1, 1, maxlen))
return finished, finished_scores
def _update_states(self, tokens, scores, index, finished_mask):
"""
Update decoder internal states with results preferred by beam search algorithm
Args:
tokens: newly-generated sequences
:math:`(N, B * 2, T)` where N is batch size, B is beam size and T is sequence length
scores: log probability of newly-generated sequences
:math:`(N, B * 2)` where N is batch size, B is beam size.
index: selected beam indices
:math:`(N, B * 2)` where N is batch size, B is beam size
finished_mask: mask indicating which sequences in `tokens` ends with eos
:math:`(N, B * 2)` where N is batch size and B is beam size
Returns:
- updated sequences
:math: `(N, B, T)` where N is batch size, B is beam size and T is sequence length
- scores of updated sequences
:math: `(N, B)` where N is batch size, B is beam size
"""
curlen = tokens.size(dim=-1)
scores = scores.masked_fill(finished_mask, float('-inf'))
scores, idx = scores.topk(self._beam, dim=-1)
tokens = tokens.gather(1, index=idx.unsqueeze(2).repeat(1, 1, curlen))
cache = self._decoder.get_cache()
cache = self._update_cache(cache, index, idx)
self._decoder.set_cache(cache)
return tokens.view(-1, curlen), scores.view(-1)
def _expand(self, tensor, dim):
"""
Expand encoder states with `beam` times
Args:
tensor: a torch tensor
Returns:
- expanded tensor
"""
tensor_shape = tensor.size()
tensor = tensor.unsqueeze(dim=dim+1).repeat(
(1,) * (dim+1) + (self._beam, ) + (1, ) * (len(tensor_shape) - (dim+1)))
tensor = tensor.view(
tuple(tensor_shape[:dim]) + (tensor_shape[dim] * self._beam,) + tuple(tensor_shape[dim+1:]))
return tensor
def _update_tensor(self, cache: torch.Tensor, beam_index, non_finished_index):
"""
Update tensor by indexing with idx
Args:
cache: torch tensor
:math:`(M, N * B * B, E)` where M is the memory sequence length, N is the batch size,
B is beam size, and E is feature dimension.
beam_index: index to select in top B * 2
:math:`(N, B)` where N is the batch size and B is beam size.
non_finished_index: index to select in top B
:math:`(N, B)` where N is the batch size and B is beam size.
Returns:
- indexed cache
:math:`(M, N * B, E)` where N is the batch size, B is beam size and E is feature dimension.
"""
size = cache.size()
cache = cache.view(size[0], size[1] // self._beam, self._beam, size[-1])
idx = beam_index.unsqueeze(0).unsqueeze(-1).repeat((size[0], 1, 1, size[-1]))
cache = cache.gather(2, index=idx)
idx = non_finished_index // (self._beam * 2)
idx = idx.unsqueeze(0).unsqueeze(-1).repeat((size[0], 1, 1, size[-1]))
cache = cache.gather(2, index=idx)
cache = cache.view(size[0], size[1], size[-1])
return cache
@register_search
class BeamSearchV3(SequenceSearch):
"""
BeamSearchV3 is beam search on sequence generation. Different from V1 and V2, V3 use another strategy to implement
beam search by ranking finished and unfinished candidates all together.
Args:
beam: beam size
lenpen: length penalty
maxlen_coef (a, b): maxlen computation coefficient.
The max length is computed as `(S * a + b)`, where S is source sequence length.
maxlen: maximum length
num_return_sequence: the number of return generated sequence for each input sample,
which must smaller or equal to beam size, default as 1 to return the sequence
with the highest log probability
keepdim: bool, squeeze the output sequence if set True
"""
def __init__(self, beam=4, lenpen=0.1, maxlen_coef=(1.2, 10), maxlen=1000, num_return_sequence=1, keepdim=False):
super().__init__()
self._beam = beam
self._lenpen = lenpen
self._maxlen = maxlen
self._maxlen_a, self._maxlen_b = maxlen_coef
self._num_return_sequence = num_return_sequence
self._keepdim = keepdim
self._update_cache = recursive(self._update_tensor)
assert self._num_return_sequence <= self._beam, \
f"number of return sequence <= beam size, got {self.num_return_sequence} > {self._beam}"
def forward(self,
tokens,
memory,
memory_padding_mask,
target_mask=None,
prev_scores=None):
"""
Decoding full-step sequence with beam search
Args:
tokens: previous tokens or prefix of sequence
:math:`(N, T)` where N is batch size and T is current sequence length
memory: memory for attention.
:math:`(M, N, E)`, where M is the memory sequence length, N is the batch size,
memory_padding_mask: memory sequence padding mask.
:math:`(N, M)` where M is the memory sequence length, N is the batch size.
target_mask: target mask indicating blacklist tokens
:math:`(N, V)` where N is batch size and V is vocab size
prev_scores: scores of previous tokens
:math:`(N)` where N is batch size
Returns:
- log probability of generated sequence
:math: `(N, S)` where N is batch size and S is the number of return sequence
- generated sequence
:math: `(N, S, maxT)` where N is batch size, maxT is the maximum sequence length
and S is the number of return sequence
`(N, maxT)` if self.keepdim=True and S = 1
"""
batch_size, curlen = tokens.size(0), tokens.size(1)
memory_size = memory_padding_mask.size(-1)
# copy token and memory for 'beam' times
tokens = self._expand(tokens, dim=0) # [N * B, T]
memory = self._expand(memory, dim=1) # [M, N * B, E]
memory_padding_mask = self._expand(memory_padding_mask, dim=0) # [N * B, M]
if prev_scores is None:
scores = create_init_scores(tokens, memory) # [N * B]
else:
scores = self._expand(prev_scores, dim=1) # [N * B]
# generate rest of tokens
maxlen = min(int(memory_size * self._maxlen_a + self._maxlen_b) - curlen, self._maxlen)
for step in range(maxlen - 1):
tokens, scores, idx = self._produce_candidates(tokens, memory, memory_padding_mask, scores, step)
self._update_states(idx)
scores = scores.view(batch_size, self._beam)[:, :self._num_return_sequence]
tokens = tokens.view(batch_size, self._beam, -1)[:, :self._num_return_sequence, :]
if not self._keepdim and self._num_return_sequence == 1:
tokens = tokens.squeeze(dim=1)
return scores, tokens
def _produce_candidates(self, tokens, memory, memory_padding_mask, scores, step):
"""
Predict `beam` candidates at next step
Args:
tokens: previous tokens or prefix of sequence at time step T
:math:`(N * B, T)` where N is batch size, B is beam size and T is current sequence length
memory: memory for attention.
:math:`(M, N * B, E)`, where M is the memory sequence length, N is the batch size,
B is beam size and E is feature size
memory_padding_mask: memory sequence padding mask.
:math:`(N * B, M)` where M is the memory sequence length, N is the batch size,
B is beam size.
scores: scores of previous tokens
:math:`(N * B)` where N is batch size and B is beam size.
step: decoding steps
Returns:
- candidate tokens at time step T+1
:math:`(N, B * B, T+1)` where N is batch size, B is beam size and T is current sequence length
- log probability of candidate tokens
:math:`(N, B * B)` where N is batch size and B is beam size.
"""
batch_size, seqlen = tokens.size(0) // self._beam, tokens.size(-1)
logits = self._decoder(tokens, memory,
memory_padding_mask=memory_padding_mask)[:, -1, :]
logits = F.log_softmax(logits, dim=-1)
finished = tokens[:, -1].eq(self._eos).type_as(logits)
logits[:, self._eos] *= 1 - finished
vocab_size = logits.size(-1)
tokens = tokens.view(batch_size, self._beam, seqlen)
scores = scores.unsqueeze(dim=-1) + logits
scores = scores.view(batch_size, self._beam, -1)
normalized_scores = self._normalize(tokens, scores)
normalized_scores = normalized_scores.view(batch_size, -1)
if step == 0:
normalized_scores = normalized_scores[:, :vocab_size]
_, next_id = normalized_scores.topk(self._beam, dim=-1)
next_token_batch_id, next_token = next_id // vocab_size, next_id % vocab_size
scores = scores.view(batch_size, -1)
scores = scores.gather(1, next_id)
prev_tokens = tokens.gather(1, next_token_batch_id.unsqueeze(dim=-1).repeat(1, 1, seqlen))
tokens = torch.cat([prev_tokens, next_token.unsqueeze(dim=-1)], dim=-1)
return tokens.view(-1, seqlen + 1), scores.view(-1), next_token_batch_id
def _normalize(self, tokens, scores):
"""
Select new finished sequences and add them to finished list.
Args:
tokens: newly-generated sequences
:math:`(N, B * B, T)` where N is batch size, B is beam size and T is decoded sequence length
scores: log probability of newly-generated sequences
:math:`(N, B)` where N is batch size, B is beam size.
Returns:
- updated finished sequences
:math:`(N, S, maxT)` where N is batch size, B is beam size, maxT is the maximum sequence length
S is the number of return sequence
- log probability of updated finished sequence
:math:`(N, S)` where N is batch size and B is beam size.
S is the number of return sequence
"""
length = tokens.ne(self._eos).type_as(scores).sum(dim=-1, keepdim=True) - 1
scores = scores / ((5 + length) / 6) ** self._lenpen
return scores
def _update_states(self, index):
"""
Update decoder internal states with results preferred by beam search algorithm
Args:
index: selected beam indices
:math:`(N, B)` where N is batch size, B is beam size
Returns:
- updated sequences
:math: `(N, B, T)` where N is batch size, B is beam size and T is sequence length
- scores of updated sequences
:math: `(N, B)` where N is batch size, B is beam size
"""
cache = self._decoder.get_cache()
cache = self._update_cache(cache, index)
self._decoder.set_cache(cache)
def _expand(self, tensor, dim):
"""
Expand encoder states with `beam` times
Args:
tensor: a torch tensor
Returns:
- expanded tensor
"""
tensor_shape = tensor.size()
tensor = tensor.unsqueeze(dim=dim+1).repeat(
(1,) * (dim+1) + (self._beam, ) + (1, ) * (len(tensor_shape) - (dim+1)))
tensor = tensor.view(
tuple(tensor_shape[:dim]) + (tensor_shape[dim] * self._beam,) + tuple(tensor_shape[dim+1:]))
return tensor
def _update_tensor(self, cache: torch.Tensor, index):
"""
Update tensor by indexing with idx
Args:
cache: torch tensor
:math:`(M, N * B * B, E)` where M is the memory sequence length, N is the batch size,
B is beam size, and E is feature dimension.
index: index to select in top B
:math:`(N, B)` where N is the batch size and B is beam size.
Returns:
- indexed cache
:math:`(M, N * B, E)` where N is the batch size, B is beam size and E is feature dimension.
"""
size = cache.size()
cache = cache.view(size[0], size[1] // self._beam, self._beam, size[-1])
idx = index.unsqueeze(0).unsqueeze(-1).repeat((size[0], 1, 1, size[-1]))
cache = cache.gather(2, index=idx)
cache = cache.view(size[0], size[1], size[-1])
return cache
| 46.712534
| 119
| 0.59594
| 4,650
| 34,287
| 4.268602
| 0.046022
| 0.048516
| 0.026601
| 0.034914
| 0.911734
| 0.898383
| 0.883218
| 0.863721
| 0.849413
| 0.83576
| 0
| 0.012812
| 0.307959
| 34,287
| 733
| 120
| 46.776262
| 0.82371
| 0.4558
| 0
| 0.730496
| 0
| 0
| 0.017385
| 0.00486
| 0
| 0
| 0
| 0
| 0.010638
| 1
| 0.078014
| false
| 0
| 0.021277
| 0
| 0.173759
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5fd04d83cd065c4acf385a9fee056a5fee9b4ad6
| 1,126
|
py
|
Python
|
teaser/data/output/citygml_classes.py
|
linuscuy/TEASER
|
5bba638a6df0dd9c41de9036d42490c24497e04b
|
[
"MIT"
] | null | null | null |
teaser/data/output/citygml_classes.py
|
linuscuy/TEASER
|
5bba638a6df0dd9c41de9036d42490c24497e04b
|
[
"MIT"
] | null | null | null |
teaser/data/output/citygml_classes.py
|
linuscuy/TEASER
|
5bba638a6df0dd9c41de9036d42490c24497e04b
|
[
"MIT"
] | null | null | null |
# collection of CityGML namespaces for file creation and getting versions of inputed files
class CGML1:
core = 'http://www.opengis.net/citygml/1.0'
gen = 'http://www.opengis.net/citygml/generics/1.0'
grp = 'http://www.opengis.net/citygml/cityobjectgroup/1.0'
app = 'http://www.opengis.net/citygml/appearance/1.0'
bldg = 'http://www.opengis.net/citygml/building/1.0'
gml = 'http://www.opengis.net/gml'
xal = 'urn:oasis:names:tc:ciq:xsdschema:xAL:2.0'
xlink = 'http://www.w3.org/1999/xlink'
xsi = 'http://www.w3.org/2001/XMLSchema-instance'
class CGML2:
xsi = 'http://www.w3.org/2001/XMLSchema-instance'
core = 'http://www.opengis.net/citygml/2.0'
energy = 'http://www.sig3d.org/citygml/2.0/energy/1.0'
gen = 'http://www.opengis.net/citygml/generics/2.0'
grp = 'http://www.opengis.net/citygml/cityobjectgroup/2.0'
app = 'http://www.opengis.net/citygml/appearance/2.0'
bldg = 'http://www.opengis.net/citygml/building/2.0'
gml = 'http://www.opengis.net/gml'
xal = 'urn:oasis:names:tc:ciq:xsdschema:xAL:2.0'
xlink = 'http://www.w3.org/1999/xlink'
| 36.322581
| 90
| 0.666075
| 178
| 1,126
| 4.213483
| 0.275281
| 0.158667
| 0.224
| 0.272
| 0.810667
| 0.810667
| 0.736
| 0.736
| 0.325333
| 0.226667
| 0
| 0.05274
| 0.141208
| 1,126
| 30
| 91
| 37.533333
| 0.722854
| 0.078153
| 0
| 0.380952
| 0
| 0
| 0.718569
| 0.077369
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
398c632b20d559fe36d3db2fb23bb026f421ab81
| 6,554
|
py
|
Python
|
data/datasets.py
|
linyq17/IV-REID
|
6d9eec0f1f086e9bd55fbbe01647f31571df224c
|
[
"MIT"
] | 2
|
2019-12-12T10:53:17.000Z
|
2020-11-17T11:38:28.000Z
|
data/datasets.py
|
linyq17/IV-REID
|
6d9eec0f1f086e9bd55fbbe01647f31571df224c
|
[
"MIT"
] | 2
|
2020-11-30T15:43:48.000Z
|
2021-06-29T07:52:16.000Z
|
data/datasets.py
|
linyq17/IV-REID
|
6d9eec0f1f086e9bd55fbbe01647f31571df224c
|
[
"MIT"
] | 1
|
2020-01-30T08:17:39.000Z
|
2020-01-30T08:17:39.000Z
|
import os.path as osp
import configparser
from PIL import Image
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
class ImageDataset(Dataset):
"""Image Person ReID Dataset"""
def __init__(self, dataset, transform=None):
self.dataset = dataset
self.transform = transform
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
img_path, pid, camid = self.dataset[index]
got_img = False
if not osp.exists(img_path):
raise IOError("{} does not exist".format(img_path))
while not got_img:
try:
img = Image.open(img_path).convert('RGB')
got_img = True
except IOError:
print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path))
pass
if self.transform is not None:
img = self.transform(img)
return img, pid, camid, img_path
class SYSU_ImageDataset(Dataset):
"""Image Person ReID Dataset"""
def __init__(self, dataset, transform=None ):
self.dataset = dataset
self.transform = transform
self.RGB_cam = [1,2,4,5]
self.fake_data_root = '/home/lyq/Desktop/dataset/Reid/SYSU-MM/fake/'
self.grey_data_root = '/home/lyq/Desktop/dataset/Reid/SYSU-MM/grey/'
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
img_path, pid, camid = self.dataset[index]
got_real_img = False
if not osp.exists(img_path):
raise IOError("{} does not exist".format(img_path))
while not got_real_img:
try:
img = Image.open(img_path).convert('RGB')
got_real_img = True
except IOError:
print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path))
pass
if self.transform is not None:
img = self.transform(img)
# if camid in self.RGB_cam:
# got_fake_img = False
# fake_img_path = self.fake_data_root + ''.join(img_path.split('/')[-3:]).split('.')[0] + '_fake_B.png'
# if not osp.exists(fake_img_path):
# raise IOError("{} does not exist".format(fake_img_path))
# while not got_fake_img:
# try:
# fake_img = Image.open(fake_img_path).convert('RGB')
# got_fake_img = True
# except IOError:
# print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path))
# pass
# if self.transform is not None:
# fake_img = self.transform(fake_img)
# got_grey_img = False
# grey_img_path = self.grey_data_root + '/'.join(img_path.split('/')[-3:]).split('.')[0] + '.png'
# if not osp.exists(grey_img_path):
# raise IOError("{} does not exist".format(grey_img_path))
# while not got_grey_img:
# try:
# grey_img = Image.open(grey_img_path).convert('RGB')
# got_grey_img = True
# except IOError:
# print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path))
# pass
# if self.transform is not None:
# grey_img = self.transform(grey_img)
# return {'real': img, 'fake': fake_img, 'grey': grey_img}, pid, camid, {'real': img_path, 'fake': fake_img_path, 'grey': grey_img_path}
# else:
return {'real': img}, pid, camid, {'real': img_path}
class RegDB_ImageDataset(Dataset):
"""Image Person ReID Dataset"""
def __init__(self, dataset, transform=None):
self.dataset = dataset
self.transform = transform
self.RGB_cam = [2]
self.fake_data_root = '/home/lyq/Desktop/dataset/Reid/RegDB/fake/'
self.grey_data_root = '/home/lyq/Desktop/dataset/Reid/RegDB/Visible_grey/'
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
img_path, pid, camid = self.dataset[index]
# print(img_path, pid, camid)
got_real_img = False
if not osp.exists(img_path):
raise IOError("{} does not exist".format(img_path))
while not got_real_img:
try:
img = Image.open(img_path).convert('RGB')
got_real_img = True
except IOError:
print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path))
pass
if self.transform is not None:
img = self.transform(img)
# if camid in self.RGB_cam:
# got_fake_img = False
# fake_img_path = self.fake_data_root + ''.join(img_path.split('/')[-2:]).split('.')[0] + '_fake_B.bmp'
# if not osp.exists(fake_img_path):
# raise IOError("{} does not exist".format(fake_img_path))
# while not got_fake_img:
# try:
# fake_img = Image.open(fake_img_path).convert('RGB')
# got_fake_img = True
# except IOError:
# print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path))
# pass
# if self.transform is not None:
# fake_img = self.transform(fake_img)
# got_grey_img = False
# grey_img_path = self.grey_data_root + '/'.join(img_path.split('/')[-2:]).split('.')[0] + '.png'
# if not osp.exists(grey_img_path):
# raise IOError("{} does not exist".format(grey_img_path))
# while not got_grey_img:
# try:
# grey_img = Image.open(grey_img_path).convert('RGB')
# got_grey_img = True
# except IOError:
# print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path))
# pass
# if self.transform is not None:
# grey_img = self.transform(grey_img)
# return {'real': img, 'fake': fake_img, 'grey': grey_img}, pid, camid, {'real': img_path, 'fake': fake_img_path, 'grey': grey_img_path}
# else:
return {'real': img }, pid, camid, {'real': img_path }
| 43.693333
| 148
| 0.556454
| 807
| 6,554
| 4.287485
| 0.105328
| 0.099133
| 0.037572
| 0.028324
| 0.949711
| 0.934971
| 0.934971
| 0.934971
| 0.932081
| 0.900289
| 0
| 0.002929
| 0.322704
| 6,554
| 149
| 149
| 43.986577
| 0.776526
| 0.50412
| 0
| 0.746667
| 0
| 0
| 0.14767
| 0.056675
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0.04
| 0.08
| 0.04
| 0.32
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
39b465680722226350d311ca5b6c1e9a69db9d1b
| 274
|
py
|
Python
|
Python/t908.py
|
manhong2112/CodeColle
|
9233376608ee57005d563bab5b05541dc1262d90
|
[
"WTFPL"
] | null | null | null |
Python/t908.py
|
manhong2112/CodeColle
|
9233376608ee57005d563bab5b05541dc1262d90
|
[
"WTFPL"
] | null | null | null |
Python/t908.py
|
manhong2112/CodeColle
|
9233376608ee57005d563bab5b05541dc1262d90
|
[
"WTFPL"
] | null | null | null |
print(end="\t")
for i in range(1, 10):
print(i, end="\t")
print()
for i in range(1, 10):
print(i, end="\t")
for j in range(1, 10):
print(i * j, end="\t")
print()
for i in range(1, 10):
for j in range(1, 10):
print(i, "*", j, '=', i * j)
| 19.571429
| 36
| 0.470803
| 53
| 274
| 2.433962
| 0.188679
| 0.271318
| 0.310078
| 0.387597
| 0.914729
| 0.914729
| 0.914729
| 0.914729
| 0.914729
| 0.372093
| 0
| 0.07732
| 0.291971
| 274
| 13
| 37
| 21.076923
| 0.587629
| 0
| 0
| 0.75
| 0
| 0
| 0.036496
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.583333
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
f2f35b4608624f7f49b50c5c2f13c1bba67b7ae2
| 347,202
|
py
|
Python
|
hearthstonecarddetector/__init__.py
|
russon77/hearthstonecarddetector
|
602968c784834b6f06830bc63014fa2fdba9893d
|
[
"MIT"
] | null | null | null |
hearthstonecarddetector/__init__.py
|
russon77/hearthstonecarddetector
|
602968c784834b6f06830bc63014fa2fdba9893d
|
[
"MIT"
] | null | null | null |
hearthstonecarddetector/__init__.py
|
russon77/hearthstonecarddetector
|
602968c784834b6f06830bc63014fa2fdba9893d
|
[
"MIT"
] | null | null | null |
from imagehash import ImageHash, phash
import numpy
class ErrorThresholdReachedException(Exception):
pass
# thanks to https://github.com/JohannesBuchner/imagehash/issues/20
def _hex_to_hash(hexstr):
"""
Convert a stored hash (hex, as retrieved from str(Imagehash))
back to a Imagehash object.
"""
l = []
if len(hexstr) != 2*(16*16)/8:
raise ValueError('The hex string has the wrong length')
for i in range(0, int(16*16/8)):
h = hexstr[i*2:i*2+2]
v = int("0x" + h, 16)
l.append([v & 2**i > 0 for i in range(8)])
return ImageHash(numpy.array(l).reshape((16,16)))
def card_image_to_id(card_image, preferred_hash=phash, max_error=None):
"""
given a PIL Image object, find the closest Hearthstone card match and return its card id.
:param card_image: PIL Image object
:param preferred_hash: one of imagehash's phash, dhash, whash, or average_hash
:param max_error: percent error calculated by (diff(input, closest) / diff(input, furthest)). if exceeded,
raise ErrorThresholdReachedException
:return: card id of closest match found
"""
card_image_hash = preferred_hash(card_image, 16)
sort = sorted(db["cards"], key=lambda x: card_image_hash - _hex_to_hash(x[preferred_hash.__name__]))
if max_error is not None:
percent_error = (abs(card_image_hash - _hex_to_hash(sort[0][preferred_hash.__name__])) /
(abs(card_image_hash - _hex_to_hash(sort[-1][preferred_hash.__name__]))))
if percent_error > max_error:
raise ErrorThresholdReachedException
return sort[0]['card_id']
"""
when the module is loaded, load our database file into memory. the database is essential because it contains the
different hash values for each card.
"""
db = {"cards": [{"dhash": "468f087c05ef1c98bb605f0fbedc7401c1624084b4827d1ddb3a66560c87d13e", "average_hash": "f921fb03f80ff80ff00fc42ffc7efc7d9833b80018101816980798030108011c", "name": "Pompous Thespian", "phash": "c3f3735ada4bb6a5b7951dcf6ef992d1499266ce818c186609c960ba4ce40ca7", "mana": 2, "whash": "f9a1f987f80ff84ff40fe43ffc7ffc7fbc33bc101c10981698079803010e013c", "card_id": "KAR_011"}, {"dhash": "cae3ccce111c0a3135cf584bb0b6404d86b2314de3aa7c05dd390a708485652a", "average_hash": "779cffb8fbb8dcb18c79e425b56465506d404100c10458465806080101020100", "name": "Cyclopian Horror", "phash": "77c76731ef6173867e98cbb86cb218988c8c1cc40e93cbc473846be49ca1736e", "mana": 4, "whash": "7f9cffb8fbb8ddb98cb9ed75edf665516f414944c154d946780e448301820320", "card_id": "OG_337"}, {"dhash": "88810d1f30c4030074c2703d83fa3ff575c26f84df08fe3fe02f0c7d5bf8ffc7", "average_hash": "0f7c47e0e381c300000000000000f800fc00f801f019f87f807fa03ff077f8cf", "name": "Headcrack", "phash": "f3f53e443d027328c7e18c92032b8cf80b0b3a954b50933c7bec9ccaf39e36f8", "mana": 3, "whash": "ffff47f0e383c330002000801000f803fc01f801f91bf9ffa07fe07ff0fff8df", "card_id": "EX1_137"}, {"dhash": "fcccad39d2f3044679c086de09015356aeac5c1d8932714923b086e06947139e", "average_hash": "07110b632be7384fbccf87dbc343c3734316230723c313cf10c310c212061618", "name": "Trade Prince Gallywix", "phash": "9989bf0ec673fe983e0416911bc9462601e192a1ebeb5bd07c2c36b5db1ac76c", "mana": 6, "whash": "0f530f6f2be7386fbcdf87dbcb43c2534307031f239331df10c3384212879e1c", "card_id": "GVG_028"}, {"dhash": "feb8e84520be54eda3522f93d4c38d171b4d829a04a43f58d923b05748a1f16f", "average_hash": "07c6038e034fc04fc00fe00fe01fc00fc30df001f001f400fc00fc1039001900", "name": "Poisoned Blade", "phash": "7bd35d48ce2636335d1167ec36e4b1b61be784eda22c36b40df926c93324d620", "mana": 4, "whash": "07c60feef7cfc74fc10fe0bfe09fc28fe38ff001f001fc00fc00fd5039007ba4", "card_id": "AT_034"}, {"dhash": "3cc0ec3f217f4afeb5f853f1afe259c7b78e7f1dc62b3c4730b7e16f8389053e", "average_hash": "070103808101f007e007e00ff00ff00ff01ff01fe01fe01fe01fc01f801f811f", "name": "Wisp", "phash": "79f8d6508e172f819e810688290001a0129293e1ebf0d3e17f89ffd9ffcabeed", "mana": 0, "whash": "0f110ba093a3f087e007f00ff05ff05ff05ff01fe03fe01fe01fe01fc11f811f", "card_id": "CS2_231"}, {"dhash": "08f20c8ec1598bc5363a4d76ba0c74f1d992b36548cb18163734ea6884813137", "average_hash": "371f9b9dc1ade835783cec3e7c3e1c3e7c3e6c3d183918280020000000002102", "name": "Clockwork Knight", "phash": "e9976b96fe205f4b4b6887a08f0441acc3b086f0329c3a6cbe61dce1cf83ebe4", "mana": 5, "whash": "ff1fdfbdc3bde8b57c3cfc3efc7e3c7efc7f6c3d182918281020000001032107", "card_id": "AT_096"}, {"dhash": "f88ffdff20f00f00fc06904b30b6447cfbd0e6019827326c67c00c0e1f58f8b3", "average_hash": "0700e303f31f807f807f022c06640e003e090e300678067f023f263f1c39f83f", "name": "Inner Rage", "phash": "adf05612f9e5bf8806135e7034220651494804ac4b164fd95b623eb77bcdf73f", "mana": 0, "whash": "0700e747fb9fb57f807f127c06640e003e480e7906f806ff223f263f1c39f83f", "card_id": "EX1_607"}, {"dhash": "4afc8d641291040e387c72f88de85ad1b702e98406ab3d44c2338c6f5a8c8130", "average_hash": "f703f76ffbcd7049784078c0fc02fc00fc03d036000010033017a01ea01c0110", "name": "Baron Rivendare", "phash": "e369f7dafd199be03b16038d96e00958928c28699aa6d34d371936339b27b34d", "mana": 4, "whash": "ff13ff6ffbed705f7c40fcc0fc42fc40fd03d83600217013381fe03ea11e8114", "card_id": "FP1_031"}, {"dhash": "98ffed3c82f104e0bb0b673788d8536db7f263c5fc0ab01508322f64cc899133", "average_hash": "6707c367934ff04f004f00961c109001f801fc01f818301f801fb813b8033101", "name": "Ragnaros the Firelord", "phash": "cbad1dcadd906369b6430ca46f8c13965898c3c926d9ceedcd711e370ca3ccc9", "mana": 8, "whash": "ef17c37f93cff04f045f24be1c50b851f809fc01fc18701fa01fb817f9133903", "card_id": "EX1_298"}, {"dhash": "e83ffde3b091af6f1b9a9e003f63fc80c8d503fd7f02fa0606c81e33f57fec7f", "average_hash": "07008307f11f781efa13f80b726e767c006c00603c61306d10607e007e7df87f", "name": "Burgle", "phash": "e34a7b651fc1be382c121680930916ec8c46c738e6660cd363f933c71fc3ddbd", "mana": 3, "whash": "07008307f11d795efe1bfc0f7a7e727c007c2060b461b06d30607e007e7ffa7f", "card_id": "AT_033"}, {"dhash": "1adc6c6fe1864a79b5d6526da49c4f71ba6235c5ca0abf077037846c1899213e", "average_hash": "071183804389400050015003b809bc0b7c03fc0bf808f83bf83bb03f201f011e", "name": "Eater of Secrets", "phash": "eb693ec63c14b749deba993166e082e6a98866202a72b3ecce889b49bf817ea6", "mana": 4, "whash": "8f13cfa1c3a940a05009500ffc4bbc4ffc47fc0bf80af83bf83bb03fa11f211f", "card_id": "OG_254"}, {"dhash": "521cb933a6e778cfd1f4a388c7119c233047678c9e04b919de7346c68d0c737f", "average_hash": "07e001c200c600c293c3d38f9387338e33983389b383f887f0cff18b3702377f", "name": "Keeper of Uldaman", "phash": "d761dc3ea6fea43f6e1a33db3f9126c921bb8680119b08c431cb46d419338fb6", "mana": 4, "whash": "0de001c200c620c29383d38f8387b38e339c3399bb83f897f8dff14b7702f77f", "card_id": "LOE_017"}, {"dhash": "566389d7440cfb1876cbacc65cbcb8295072a0cc07011f0a9c34b8eb725363be", "average_hash": "635e21dfe09fe03e40384b38083818382d383d3838383c2c28a4603e600fe01f", "name": "Wicked Witchdoctor", "phash": "69573d2f59aec77bef6bde8eefc23e4f3036a3400c675b0e21d213c731281040", "mana": 4, "whash": "e15e31dfe0bfe0bf48b84a381a781cfc2d783d383c383c3c68a460bee02fc03f", "card_id": "KAR_021"}, {"dhash": "caf48ccd11828b7d373b4c22b8cc713df3f2868599cb411686376e6edc983931", "average_hash": "f70f3b1df91948186c1ccc1ccc3884382c383c087c03382138383819b8197900", "name": "Enhance-o Mechano", "phash": "639d7f4ee389d758fef039b05ae609a824de184426c69311fbc49b26cf027b2c", "mana": 4, "whash": "f71f799df999d8386c3cec3ecc3884382c38bc0d7c36bc2578393839f8197900", "card_id": "GVG_107"}, {"dhash": "0a81cd3f70fde3e4dfb9b7c27e0cfb01e0038007180f339f423e8efc34f0fccf", "average_hash": "07180340c387e007e007e01fe83ff87ff07f703330232072307218720060e867", "name": "Mark of the Wild", "phash": "b97e7b99fe63e6f52fcb8da14a18c49041380acc0bc132225b137e24fbd27e23", "mana": 2, "whash": "071c0360c387e107e20fea5ffa7ffc7ff87f707730722072307218720860f86f", "card_id": "CS2_009"}, {"dhash": "f280fd3f006e0f9c19b2b3600ce139c2d78c8e891c1031277302c6078957fabf", "average_hash": "070073407b84fc46fc4cfe0cfa00f829f271b273ba331803980310031003f00f", "name": "Soul of the Forest", "phash": "e300778edcffbe31fd581e471f3319f821ce0c5c03261ab44bd5135963a8fe04", "mana": 4, "whash": "070c73607b86fc47fe4dfe0ffa08fa29fa71fa73ba333a13980318031007f80f", "card_id": "EX1_158"}, {"dhash": "68cacd26121809e1fbd3b4ece111863f144719cac115b048c1bbd8f6ef0e9f9f", "average_hash": "f71fff76fbf5c8f140f85efa82efb3ce3f821b8202a0000000d800ce820f841f", "name": "Alexstrasza's Champion", "phash": "f9a1b7a63bbbf652db7068e82a8c06cc20e9321039d196016f417f11df13efb1", "mana": 2, "whash": "f71ffb76fbd1d0f144785e7b9effa1ce3f861b820ba0009000d800de828fc41f", "card_id": "AT_071"}, {"dhash": "0cfc0d09130004637896a21c45b086cb1bd37caef144c5089b3126e74c4cf3bb", "average_hash": "f703fb3ffb49f84038c01bc03fc02cc02d802d86258e188e98cc385cb801f603", "name": "SI:7 Agent", "phash": "c32bb68cbbe14916b38c4ce3db648c6d12c6a6b453b6b6e1798c9692e1ad94b1", "mana": 3, "whash": "f713fb5ffb49f8403cc03fc03e802ec02e822d862f8ebc8eb8ceb85cba01fc0b", "card_id": "EX1_134"}, {"dhash": "82c0ed77e0c7039f8c9f17243890402886f05dc1624a02cc37986c21d871feff", "average_hash": "f7bf21fc798e7c3efe0e7e1e3e743e601c60c878681830403c603c21b860fc7f", "name": "Competitive Spirit", "phash": "c7de4be6f9f9fcffbd6796b8694e1a648e522a7109d34be44b1452005b105680", "mana": 1, "whash": "f79f63f4799c7c3efe0f7e1e3e543e401e508c78681838403c603c01b860fc7f", "card_id": "AT_073"}, {"dhash": "00838d1fe0ecc1d919f805e087006f095c68bcd01921e0230ecc03800d82f02f", "average_hash": "f7ffc3e1e183fc03fc01fc01fc03dc478e279627f4030606632019001800fc0f", "name": "Enter the Coliseum", "phash": "935431ae6681bcc4cd4eed0123998e81091642f57b544ad753ad73685bebf39d", "mana": 6, "whash": "ffffc3e3f183fc03fe01fe01fc07fe4f8e27c627f6031607622018003800fe0f", "card_id": "AT_078"}, {"dhash": "28ccad18432b0446f81ba6f6490c92fb21dbc73610c56c0820b1cde603403fac", "average_hash": "d73bcf7ba373b467d4c24fd04fd30ff2bff1e7f1cfe00dc004c000c006800600", "name": "Shado-Pan Rider", "phash": "972cbb29fae83b772f904e8c27c630f040d18b06b9bc5799e6247c64d322d673", "mana": 5, "whash": "ff3bcf7bab73bd73d4e34fd06fd30ef2bff0e7b1cfa00fc004c004c002800620", "card_id": "AT_028"}, {"dhash": "8a7f4df8f3863f1ad965d08301c3203c60fa9e4cb91b6036d4e96f938ce43f4f", "average_hash": "0700c71f837fc0ffe3ffa1f30301c7000300390319e701e7036783c720c3f94f", "name": "Shiv", "phash": "b90a2ff5d9ebdede66f51eb0c39c32b92c4294c20bd4a47cbb019e084f24fc84", "mana": 2, "whash": "0700c39f837fc1ffe2ffe3f34301c70007003d0719e703e703ef83c721c3f94f", "card_id": "EX1_278"}, {"dhash": "0a005d1ef0e90cc3392c735ae684cd7192a30b4716be791c7638c8f0b1f1ffe7", "average_hash": "077c07c0038003c420c020c0a0c100e100e000e080e000e600e300f340f9faff", "name": "Conceal", "phash": "b5aa5ef5afd49ee62fd9939c633224ecc9601c9353506e344bc238c8cbc43c2d", "mana": 1, "whash": "ffff0ff087c083c420c630eca0e188e100e051e099e01de781f341f3e1f9ffff", "card_id": "EX1_128"}, {"dhash": "0adc2cf1d1fa8b0535324eecbcc87111a3324c6548cb9297a93676692c92f13c", "average_hash": "f713b3976b9e083f582adc38fc3df839ec195c191c090c20d838180408042104", "name": "Dragonkin Sorcerer", "phash": "23d7fb4cde349f9c33a433e34e6288e8318c99d009e196a47e496666778e6792", "mana": 4, "whash": "f713fb9f6b9e083f5c3edc38fc3df879fc1bfc1b1c290c20d83898142904211c", "card_id": "BRM_020"}, {"dhash": "4af88ce111870b5a36244ec4b84873f1e4b25965868b38174d36d36e209d153a", "average_hash": "f70ff11e791df81ef83cf8181c301c3adc36d81ed8104813c018c01e001a011a", "name": "Murloc Tidecaller", "phash": "eb67e3f0fb387b86dfe43ce01a333126985089a4329af6886b8358dbddc13690", "mana": 1, "whash": "f71ffb9efb9ff83ef83ffc381c381c3efc3ad81ed815c813c018c01e001a001a", "card_id": "EX1_509"}, {"dhash": "88f82d60c283450ebb2976de8d3c707966e2c1049b8b94177937f06e6a9c9533", "average_hash": "e70fe37f01df305e30d80c981c181c30383878387c3858385818801a8019a111", "name": "Hemet Nesingwary", "phash": "adaddf8a5b3263c6bf951c061240626083c4f274dba84e691c1eb639cfe3d3a9", "mana": 5, "whash": "ef1fef7f03ff707e3c780cd81c593c783c387c387c385c38d81a801aa11ba113", "card_id": "GVG_120"}, {"dhash": "ea83fdfdc01e86e31f443ff370cc7736d8cce4d91bf3810610c963b00d39fbff", "average_hash": "070003003b1e9c0b9c7d3e7fee71d677127f027a927f1e6e065996401670f27f", "name": "Execute", "phash": "afe0ce5f76bffeeb7cf03ebe215a8c51894393a2130c4e0cc9706d8403c27383", "mana": 1, "whash": "07000300391e1d0b9c7d3e7dee7dd677127b127a127f1e6f065b96401270fa7f", "card_id": "CS2_108"}, {"dhash": "cac2acfc11fe8bcf370f4f32bee07001c00ee4f5c08b0d17b7346a6f949c3939", "average_hash": "f700f1813b19c81ff83ffc3ffc3ff83f6018103818381830180d081d0819111c", "name": "Bloodsail Raider", "phash": "e999f3367e89b6b176e8639a4f6638c818660926cf02fec0ed44cf02ff016f12", "mana": 2, "whash": "f714f991bb9fc83ff83ffc3ffc3ffc3f7039103818381830382d181d1019111c", "card_id": "NEW1_018"}, {"dhash": "ea87fdff60a58f791a07341e68fcd0d8b1f179e3c34287870b0e1e19fc64f2df", "average_hash": "07200100f31a4c1e8e531c511c501c580c583c5c3c1c1c3e1e7e1c6cf04ff04f", "name": "Reincarnate", "phash": "4f2a3e3dfbf06e65f2c28e9283b911c20a6661ea4f503a8d0f07fb017f63b305", "mana": 2, "whash": "07300340fb1e6c1e9e5b3e511c501c584c583e5c3e3c1e3e1e7e5c7ef04ff05f", "card_id": "FP1_025"}, {"dhash": "c8f80d5302c6c901fb3ab7c5e877838f1c47231a463498c820b987f03701ffaf", "average_hash": "f73f7f7ffbffe5ff0cffc6dda6c363c263c8a3c883c002c000c000c003800623", "name": "Siege Engine", "phash": "7d68e9868b94ff62fb449ea00b812c4382dcb604b9f93e9173867686cdb99e61", "mana": 5, "whash": "ff3f7f7ffbffe7ff8cfbc6dd26c363c223c883c883c005c004c004c007a00623", "card_id": "GVG_086"}, {"dhash": "aacb2c27919d4233b5e073e6a79c5373bc4e4015886acbc5ffb935722b86472c", "average_hash": "d797efa3fba1f5a1fc40f8019d438943015301180114005c804e8007c1064104", "name": "Wailing Soul", "phash": "7bf3b7057f566cac9f84d94e26a441e0016190b00d736cc3de866f13f708e7ca", "mana": 4, "whash": "dff7efa3fba1f7a1fce0bcc19dc789c74b538150015401dc804f858fc1864325", "card_id": "FP1_016"}, {"dhash": "663e18f001e097012f0e5e1c8c7839a5714aa2c4d499213306660cc0f887ff3f", "average_hash": "cff7e39f803f003f181f005f0038d01cd03c801c000e000020002000e006fd3f", "name": "Forgotten Torch", "phash": "e9f7d98adb0c963237f3b688970db6b38576cc8ea518a632a746a200a6ddaa15", "mana": 3, "whash": "cfffe39fe33f013f197f117f017df17cf13dd01d881e200220002100e107fdbf", "card_id": "LOE_002"}, {"dhash": "8ae40ccd41308b4cb6b84c87bd7a7b85827a6495cdea33954532fa64e6891933", "average_hash": "f71fff99eb9b603b203040110438403d8001000000117c137810f81098111911", "name": "Tentacle of N'Zoth", "phash": "a3ad797cf395dbd87bd313c29ef112b2cce00918cc2c1b2c3b4863076722be63", "mana": 1, "whash": "ff1fffb9ebbb6dbbc43244310578517f814931509d517c537c11f81099113911", "card_id": "OG_151"}, {"dhash": "ccd86d60938d0d3376ec2cd15906e7fc1adb673182cc3c0b413cf4e0c345072f", "average_hash": "ff3f7f7e3b7c1df81cf0aff187f107f067f8f7d86fd01de01ce000c004800620", "name": "Void Crusher", "phash": "a7bceb81ca03636e79509c949314716084c31c69f9f1839b5c3be7d1cbc27cbc", "mana": 6, "whash": "ff1f7f7e3b7c1ff81cf0aff187f146f067f8f7d86fc01de01ce004c000800600", "card_id": "AT_023"}, {"dhash": "7a80fd3f00f701c09bbe375f78e451f8bfe0e0c1170b87f40f0ffc12e06fc07f", "average_hash": "070013403f01fc030604661c7e38be107c0c3838b0067042fc60fc10fc1cfc3f", "name": "Quick Shot", "phash": "63ee1ebdff77fc438dd137ac3ce028e01a4a0374235c4b8c5b039e255f0c6712", "mana": 2, "whash": "070813407f85fd030e44661d7e38fe18fc0d783ab826f042fc60fc51fc1ffc3f", "card_id": "BRM_013"}, {"dhash": "2ac4255a42f6c92c7fb3a45c5979b2cc64d2c9e61fc57c0af1309ae334476b38", "average_hash": "87100d68011340bd40de82f3d2f573fe7d327db1ffa1fca1dcc1688260084600", "name": "Brave Archer", "phash": "a7cebdf95ecab757eef41ba32791704c08f261101c918ac19b045f896f6293cc", "mana": 1, "whash": "87100b78035340bdc0fe8af7dab7f1fe7d327fb1ffa1fca1fc83688262084600", "card_id": "AT_059"}, {"dhash": "5afe24f0d1c40b39b7e44c83b9165209849a40f5a7cbe1167235786b2e92353c", "average_hash": "8703e58fc99fd83fc03fe03b8037601d000a0000802030281800c80968043114", "name": "Secretkeeper", "phash": "b9fd19655b97d6f376bcb2e8b33010c346a126e78e84c640bd09de8c3be29823", "mana": 1, "whash": "cf13efafcbbfd8bfc43fe43fc57f615f014a004091303c683c18d82d6905391c", "card_id": "EX1_080"}, {"dhash": "88f80cf2010c8b1136e34cccb9187637ef0e441d98ea6f4410b8e1634b869738", "average_hash": "f71ff39ffb9fd83fe831983390339036803b001b0002000100000004000c0110", "name": "Mind Control Tech", "phash": "f9b5eb244b1adbccefe016cab60d30c833a65a7299e11bcbcde366e061099664", "mana": 3, "whash": "ff9fffbffbbffcbffc339c339d779177d17f015b0142094100000084818c0118", "card_id": "EX1_085"}, {"dhash": "8affedff33628f853c41780cf890d67e81f9eac171130741638fc41f12fc7cfe", "average_hash": "0700010ef33f487f387718793078007c8e1d3e3e3a1f320fb80b0c0f0c12907f", "name": "Wrath", "phash": "89e8fdba7ade5b402e372ef9a6c006de30924cace32116654943eb72f3d07629", "mana": 2, "whash": "07000347f33f787f787f18791c78007e0e3f3e3e3e1f320fb80b0c0f0c16d87f", "card_id": "EX1_154"}, {"dhash": "cade6c7181460ab8360f57dcbff053a1ac02fb25e4cb46979d373a6d64968939", "average_hash": "370023835b0e180608073819381ff813dc17c01f483f683ee83ef83c501d1119", "name": "Evolved Kobold", "phash": "69c7de307ec97f36bf69a7a42c1930f282a4c4d061f247c2be84196573863ccb", "mana": 4, "whash": "a7122b8b5b8f38060817b81bb81ffc57dc17e01fc83fe83ee83ef03c501d511d", "card_id": "OG_082"}, {"dhash": "4cf08cc681990b33377e4cf8b9006229cc529be5318b4f12df34f46cdc997932", "average_hash": "f71c7b9cbb9838381838fc39fc33fc323c203c20382018203830383030137918", "name": "Emperor Cobra", "phash": "a7757f2179a15e127336b3e4338b0ca92d72999433639af93f879e391882a318", "mana": 3, "whash": "ff1fbfbcbbb838b83c38fc39fd73fc723d603c2039201820383038303933f919", "card_id": "EX1_170"}, {"dhash": "18b02d467299fd03723f846fc19d987b1fd779acc24005891e30bee07c88311f", "average_hash": "076f81ee83cd80d000d003d08bc01fc00bc06fa0ff987dc878c078c07b003f10", "name": "Shielded Minibot", "phash": "572f1df2e3c0597c9fc6c3826f60c29488c093251be6bac9df09a7853b1adb31", "mana": 2, "whash": "877f8bee83dd80f800f003d08fc81fc02f826facff9cff987cc079c07b007f10", "card_id": "GVG_058"}, {"dhash": "eaf20dc552080d70fcc8e1bd0701e43a0de5621e07781cc07239c1f20f449f1c", "average_hash": "f73fff7feb7dfdf914e203c203e303e41bc413c0e3c187c104c000c000800600", "name": "Druid of the Saber", "phash": "f7b3eb912b797bbb4f8298481ec0c4e002d136187e0c5f217e8e3c84bf03dbc3", "mana": 2, "whash": "ff3fff7ffbfdfdf914e207e60fe70af41bc41bc0e7c187c104c004c204800620", "card_id": "AT_042"}, {"dhash": "d8e4adc9c29005333b6e6ed89808709184e200c5d00b3907f6b67869bf96092c", "average_hash": "c71fe37fe1db987d9879bcbd343e583f8803f0017830383000300000c0084908", "name": "Captain Greenskin", "phash": "a99b39297e9bb6285b4a4d9067911a498c04c7e66ab2339b9f8976e69e234db2", "mana": 5, "whash": "8f1fe77ff3fb987f9c7dfcfd7c7f587f881ff801f830383000300001d1084908", "card_id": "NEW1_024"}, {"dhash": "8cf80d61d28f8d2ffb7de6e10984f7443f1df87ef0f9dbc3273f67f4ce481910", "average_hash": "7706134e0b4c081c209ef4bfc723a017e09f40be013e103d98bf881998181818", "name": "Dark Arakkoa", "phash": "4909ff68e683cd90f3a4933966e0712401d83e3626463b89efb944e77b1b3e66", "mana": 6, "whash": "77161b4e0b5c085c80fef4bfc737e4b7e0bf42bf01be103d98bf887d98181818", "card_id": "OG_293"}, {"dhash": "01c0cf1fb0b80af638e4519b9f0636c9601a933006e69c0ee0df437e8718fe33", "average_hash": "ffffcbf0f1c3f823e803f0077d1f0d1ec530072006400660023806f80ff00f38", "name": "Holy Fire", "phash": "2d7cd303b5093cc24fb6cd08d35824ac23060faee94197f553f0791b73ee39a5", "mana": 6, "whash": "ffffcbf1f1e1f863e803f807fcbf0d06c534079006c006c002f807f80ff00fbb", "card_id": "EX1_624"}, {"dhash": "9af8add302c63d187f75cec698097107623ecced1153038b723287e11c0c7330", "average_hash": "470a215e215eb01e80bcc33fcb3fc13f033b033b23be3198088018cc32003200", "name": "Archmage Antonidas", "phash": "ed0d39ef46eade7cbfcc11db3f100cc7107143829b86336d15db13654f225614", "mana": 7, "whash": "470f235e235eb07ea83fcbbfeb3fc33f033b033b23be39b818c318cc3b003300", "card_id": "EX1_559"}, {"dhash": "08f7ac0c6197423cb1f85669acc25aa5b7422485db2a91456cbaf46403839738", "average_hash": "c710c39943816009e019e01ef01af81bfc1f3809e00630132002a00200068110", "name": "Southsea Captain", "phash": "736977ca9e9696c58f3771a078b0031a23cd74cce2e4dbbe9b368c9b828918b4", "mana": 3, "whash": "cf96cfb963a17589f419e01ff15af95bfd5f7d59e1567153a012a092a1068110", "card_id": "NEW1_027"}, {"dhash": "dadc243181800a3f351b5ca6b95c63f9c4f201c5028b3f16b9361b6f769e7938", "average_hash": "37936db739ac18280c184c100c301c721c6018001c20dc20dc38783c791c791c", "name": "Bog Creeper", "phash": "e7df9efc6b9996f3dfa83986b63048889263cc612e4036926f01db44db226ec8", "mana": 7, "whash": "3f977fb73bac1db80c384c300d701d721d701d401d60dd60dc3c78bc79be791c", "card_id": "OG_153"}, {"dhash": "e8e1cdcb9216846939c7703c8ff85c69a382d04463cb4e148c315873b085413a", "average_hash": "37003b787bd8f840fc403c82d81ef01ff8177038780c780478047000e0000100", "name": "Eydis Darkbane", "phash": "e3ce8f2136616e98f93859368e13925cd33079c22ce6d394d6996cdb0cad09e7", "mana": 3, "whash": "3f1c3f787bf8f860fc423cc0dd7ef97ffd1f7d30790c78447c04f001f1009100", "card_id": "AT_131"}, {"dhash": "aabfddfc31852f105c00b03d2767ecdc80b11be32d5273a48c8f2482911dec2f", "average_hash": "0700870ff33ff07f807f0262466646640e601e606e616e1ef27830086400f00f", "name": "Commanding Shout", "phash": "bf4abdf26b7d179d16de7e6890868633213e269c61614e0c9b093cd24ba49f31", "mana": 2, "whash": "0710c75ff3bff1ffc07f026346664e644ee01ee0cf716e5ef668b0086600f00f", "card_id": "NEW1_036"}, {"dhash": "9cce8d1d023bc464b9a3716183da4715b4320e753cead9c5bbb163608787072a", "average_hash": "ef50ffe1bfe1f5c1ecc0ffc29f41d7c18f400701874181c584c1c1c305830320", "name": "Sylvanas Windrunner", "phash": "973be728ff189f816e6411614e94024c218c12f83bf3cecc5fe7ce636c632627", "mana": 6, "whash": "ffd1fff1bfe1f761fcc0fdc29f41dfc38f400741874181c584c7c58305a20320", "card_id": "EX1_016"}, {"dhash": "68dfec2e91f14b9eb7f84ca5a966560999f233e5c78a8f153536de6c689bf93e", "average_hash": "0700818093813010e018c03bc017e007f80ffc0cfc1cf8187831b812b816f91c", "name": "River Crocolisk", "phash": "e3b73cdcae35479b5e62948429eb0c60659846b486449ec137493f9b7742fe2c", "mana": 2, "whash": "0f108fa193a130b0e038c03be457e84ff84ffc0efc1dfc18fc3db812b916f91e", "card_id": "CS2_120"}, {"dhash": "0383cd3e70e891d03f014e4c9c98392173c28904030d261b1036626ecd98fa3f", "average_hash": "ffffd9ffe3cff81ff81f7dbe391c19381118410040004000000000000100e183", "name": "Resurrect", "phash": "61df192a399e96e4cdf9a6e9636c2c3c4b3222914b945e955bc97ba05ba25b28", "mana": 2, "whash": "ffffdbfff3cff91ff87f7dbe39bc19b9199d719040804180008001800180f18f", "card_id": "BRM_017"}, {"dhash": "9cdf2c3ee1fd4ac3b7804f53ba3e7971c2c20485730acb14163b68767088c136", "average_hash": "c713e7b783afc02f703e003c043f1c7f783978037804780c701878107910c110", "name": "Silverback Patriarch", "phash": "63dd19443e06d38a9f852d8d5eb2e2e021499be618eb36fbedb36d25522a6360", "mana": 3, "whash": "cf97cfbf83afc4aff03e003c057f1d7e7d79794179447c4c781878107910c110", "card_id": "CS2_127"}, {"dhash": "b88d6f3bc0ed38b37344e71886e11e436996022cc8d090892332cfe5998d3310", "average_hash": "47e043e243c1b8c1f8dd3bcc3bccebbea3bd439d0399019800c011d6131f1719", "name": "Twilight Darkmender", "phash": "dd9537c696e03c0c3f1446462ec9a19921314a2756ba2e19ff373a856b639ed5", "mana": 5, "whash": "4fd04be043c1b8c5f8dd3fdd3fccebfea3bf439d0399099800c011de131f1318", "card_id": "OG_096"}, {"dhash": "1cd9ed3e022344ccb91963f19f06772fa46ae0dd1babeb571637266ccc98f937", "average_hash": "e707e366bbc7f043e0c64084e033a037801b80187024703e3838183018100913", "name": "Illidan Stormrage", "phash": "fba45b2a87076f129e01259072cc06c3192c921cab49c7b9dfd48dadf9b8b35b", "mana": 6, "whash": "ef17ef7ebbf7f443e44744e5e173a17781378019f126707e3c38383019104913", "card_id": "EX1_614"}, {"dhash": "c8fc8d6103c74c007b26a01d4cf799022705089e1174afc8cb3326e4c443bfb4", "average_hash": "771f3f7f737f707e18f813c073c8fbd983db03c223c003c018d008c002840604", "name": "Dark Iron Skulker", "phash": "dda9eb58cb9afe0c3b00268116309c8521e5720a66e65f72ffa639e73632cd32", "mana": 5, "whash": "ff3f3f7f7b7f747f9cfd17c077d8fad983df0382238001c01cf00c500a8c0600", "card_id": "BRM_008"}, {"dhash": "6ce44998923905f33a84650a9b7c6a91c742f704e84b9897e037136f2c9c1938", "average_hash": "930f130b99179813b017b00fcc06f802f803f00be80df81fc81fc81ff81f701e", "name": "Icehowl", "phash": "c9ef96b0cd813b49f6480d7e6b1899b1c26463361e7e1b356e1263cb93191d96", "mana": 9, "whash": "870f115399179853b817b81fcc0ff802f803f80be81ff81fc81fd81ff81f701e", "card_id": "AT_125"}, {"dhash": "8af905e202800532bbc86c119b0044e7980e3c1d002a8cc4c8b181636383373c", "average_hash": "f70ff15ff15fe03fe01ff03fe03fe00fe00fe00fc007c003c000400020042104", "name": "Baron Geddon", "phash": "79fce9a967b7726acf5236d69a90661834698e842b9613c6d624dab03c09772b", "mana": 7, "whash": "ff1ffb7ffbdff07fe07ff0bff87fe01fe00fe00fe007c003c00060002104210c", "card_id": "EX1_249"}, {"dhash": "6afb4cf491e38b05363f4cb2bc747ec9f092c125a6c90e937936e36c86991532", "average_hash": "d702f31d191e383e383c78387c386c3c6c3ecc3ecc3ce831c833881380112000", "name": "Chillwind Yeti", "phash": "2935cfc2dae133f26f6833cd36d81826c2980cce9b649ae4cb249e37b6619e66", "mana": 4, "whash": "f712f99d199f383e383c7c387c3a6c3e6c3eec3eec3cec31c833c833881b2010", "card_id": "CS2_182"}, {"dhash": "7a00fd1e30ffe3ce076f0cbc143e6b608ec01c001b80f732df8f3cf3fde1d087", "average_hash": "073013803b003423fe1dfe09fe0b7e033e033603c003e003f065f070f800fc07", "name": "Ancestral Spirit", "phash": "c3700e55f4fffcea8f2d65821a4f2db00938384dc9f0c26c9ec7c7036b017727", "mana": 2, "whash": "073013c03f003d23fe1fff1ffe0ffe073f073e03c003f003f065f071f810fc0f", "card_id": "CS2_038"}, {"dhash": "eadf8527027c3d187ba2c5608c9971735f46b14d62934648bcb1d8e2ff07fb2f", "average_hash": "f703f163f941f045f0b87b9c8b1d0301b32b4300438063c060c021c07b00f30f", "name": "Flame Leviathan", "phash": "738b33f7f3f8d6dc9f59ccb92fc4086611301e031a6a6b0ac43c660c3ca39b79", "mana": 7, "whash": "ff13fb63fbc1f06df2f9ff9c8f5d3b01a32b4b00438063c020c021c07b08ff0f", "card_id": "GVG_007"}, {"dhash": "eaceac1d41f30ac4b7914f61b99e661dd972f2a5fd4b3297f5b682690496c92d", "average_hash": "1710038303830007001dc01e803780278c3c0c3e2c3fe83fc03fd00fc807c105", "name": "Midnight Drake", "phash": "9997fe2cf6c9cf9d763371e26ca446e222b2c24c99e106c66958d6d8bf803989", "mana": 4, "whash": "1f100fb303a300b7803dc03ec47f84678c7f8c3fac3fec3fe03fd00fd90fc107", "card_id": "OG_320"}, {"dhash": "cce08c8111230aceb57c5793ae645df1abaa3015410a9a45a6b94e76b9842139", "average_hash": "f71ffb9ff99b780278083c1d2c1d881dd00f7008700020013001300870006100", "name": "Sunwalker", "phash": "e3d9c331f705fb8039861998661861e018e61666de0c93bc7d73d9cbed81f38c", "mana": 6, "whash": "ff1fffbffbbbf8827c3cfc1d2d5f8c5fd14f704e710830411001301971007108", "card_id": "EX1_032"}, {"dhash": "3adc393042c3cc86b91c77718ac274bf593aa4f4c08b87170d37fb6cb6903f21", "average_hash": "cf7e4f7f0fef05ee24cc63fc037803fc03fc0d3e1d781df80c780db005a00320", "name": "Deathwing, Dragonlord", "phash": "4dfe5ba29f0c8f473fd326e566f1207f247b26f32ea773c107811a411b0087e1", "mana": 10, "whash": "cf5f4f7f2fef05ee24ee65f403f003fc03fc0f7a1f781df80c7805b005a00320", "card_id": "OG_317"}, {"dhash": "161c817124e77908f218e661cc93196735ce669ccf30974938b361e49f00437f", "average_hash": "fbe73bc700c6c4c603834380c380e3806380338833886088e0c801c10300077e", "name": "Medivh's Valet", "phash": "7529d9ae89eef33dff8a36dbffc8b60121ee326301f762448cac227888990e11", "mana": 2, "whash": "fff77be730d6c4dec7cb4780e389e388e38833887398f0c8e4c811c123804f7e", "card_id": "KAR_092"}, {"dhash": "ca1ffd7fe0b003c3dd9a37fe7fb8e540fe11f1a343468e0f04ce38900130fb66", "average_hash": "0700e107f103fc03f8007e007e75fc797c7e0c7e247ce07c207e003e00442006", "name": "Frost Shock", "phash": "a1aa92fd7a752dfe8f1d6c9226ea0c66a3e30eb90f5d0e8562cc362259c3ec44", "mana": 1, "whash": "0700e307f103fc03fc007e407e7dfc7f7c7e2c7e247c607d607e007e005ef06f", "card_id": "CS2_037"}, {"dhash": "0af86de292c40d00ff23ec0e0cf5f9c2331d007e09d8f082873a1ef7784ec314", "average_hash": "e71fc37fcb5f807f00fe0afd1b3864bcc29d028801800904188450dc600c0004", "name": "Klaxxi Amber-Weaver", "phash": "6987692bebf2dfce7b48c7389e9010b008d462e08ef1bb81b386567c7c41fe64", "mana": 4, "whash": "e71fcb7fcb5f807f00fe0eff1f7c4efcc2bf028d0380092e38d450dcf01c800c", "card_id": "OG_188"}, {"dhash": "12163d7924cb6894f13987364c7990e327c64d18c3440c5b3bb260a2814d86ff", "average_hash": "c561c1c1808d608d428c63847a907890799173926298c090d081c003c203e47f", "name": "Tunnel Trogg", "phash": "7391ce9e19fab67de7ecb2ab2d86fa9805f36646a9e20e76254c6632146106c1", "mana": 1, "whash": "cf41e3e1e2edf48de28d6b9c7ad079d079d37392ea98c098d081e003e203e4ff", "card_id": "LOE_018"}, {"dhash": "0af038884111c323b64e4dffb91a6325a4cacc350b0bd446a9b10f633987d53c", "average_hash": "f79f6fbf07bf01b10431043001730173207070387128004c000420043104211c", "name": "Sea Giant", "phash": "eddc9dc55b024b4a7b2819e667d662fe10b393efb1d973f8272113609b809360", "mana": 10, "whash": "ffdf6fbf3fbf05b504f1057007f301f323707178797849ec004c34843586233c", "card_id": "EX1_586"}, {"dhash": "4ae68dc802040d2cb8c575d08f0a723740eebddcfb6b0cc70836536c28891130", "average_hash": "f71df77ffbdbf0d860c03c800411003400200028c03ec03ee010781838103900", "name": "Twin Emperor Vek'lor", "phash": "e3e9fbd963697942ded31bb24bb24618761659a26ec44626cf646c849904776c", "mana": 7, "whash": "ff5fff7f7bfbf47a6c413ce41559117019a0012ad13ee87ee830781879103900", "card_id": "OG_131"}, {"dhash": "2cc32c8841428b36b6c44c01b0da6375cfca703d81aa22c75fb43b6836812d26", "average_hash": "fffeffbfffb7edf974f83ff01ff007f0177f03fe03f001f000e00de00d800b20", "name": "Ironfur Grizzly", "phash": "adf473855b0759e2df4216422761497868f00e6f0ca3d9f2bfd0f67c6848d2ca", "mana": 3, "whash": "fffeffbffbb7eff174f935701bf001e0177f01fe03f001f0007005a00d800320", "card_id": "CS2_125"}, {"dhash": "28decc3801f70a6cb58c5379a6825c25b10a7365c68a3d955936c96c1499613e", "average_hash": "871f838f010760076003700cf00cb01fb01cc01dd818f817f817f813f802711e", "name": "Ice Rager", "phash": "6b2dded89f441d236d33c3b06e6230c889ec33654bda7676ae698dc999a82632", "mana": 3, "whash": "c71fcb9fc9876007700f700ef004b05fb01cc01ff81af81ff837f817f912791e", "card_id": "AT_092"}, {"dhash": "6af9ccf091e78bcf37975940a69848e5b71a6e65b0ca6114833932730c863928", "average_hash": "971e1f9e1b9f983ff81ffc07f403f001f401ec0f9c07dc061c00980018001900", "name": "Faerie Dragon", "phash": "cb67d964ce415fe77ee806f20ba6316020a21c10bc08ff98bf03ee0c7f8cbe2d", "mana": 2, "whash": "9f1e9fbe1bbe98bffc3ffc07f447fc45fc41fc0fdc07dc071c00980419001900", "card_id": "NEW1_023"}, {"dhash": "8ad6a42c41c18ab435e85b11b6026d9da232d8f599cb36974136996463818725", "average_hash": "f79fedbff9affcadec3ff41f404f406f083104300d330c3304384010c1004105", "name": "Goldshire Footman", "phash": "b9dddbdb73b1afb57ed26be0cf8426a0248046426e8433319b45d9523b86ce89", "mana": 1, "whash": "ff9fefbffbaffcbfec3fec1f554f416f095105700d730c730038401081004105", "card_id": "CS1_042"}, {"dhash": "0a87dd78f060c34b3c8e62796f80bc36c7fc3b33b62a0cdc09e3e292e538f9ff", "average_hash": "077e07e3038f1b261c462c02a60002200262c6f6c5ffc0c3d0001a000220f27f", "name": "Gang Up", "phash": "df66de1cefc939e6bd1826d9113e86a6a0460db161e03e48cb853e075b0aff0c", "mana": 2, "whash": "4fff87f333de1ba61cc62c02a6c2826802e2c7fec5ffc5c3d1001b000b30faff", "card_id": "BRM_007"}, {"dhash": "cace043b013e0af035285670a82c5ed9f9b2f745e94b26970c364c6c128d7538", "average_hash": "f711f981f980f801b80c7c0cfc0058107c387c3c3c3e383e281f601f201e011e", "name": "Abusive Sergeant", "phash": "63dfb3f3fb79bfd43ee39ea44d30249999c1902412c656466c604c1bce10ef64", "mana": 1, "whash": "f711fba1fba0f801f81c7c1cfc527c5c7c387c3c3c3e383e281f601f211e211e", "card_id": "CS2_188"}, {"dhash": "ea3ff8ff61c0ef011f5e3c3e7a64e4d889b1f367e7821f0dfe1bf03f0060187e", "average_hash": "03008107e00f303c183808726c7468647824783c783cf819f81ff81ff007f81f", "name": "Ironforge Portal", "phash": "e18edcfcba7173df1e9eb6eca6ed8cf3e6616438691ab2660906190293a502f0", "mana": 5, "whash": "05008107e01f307c187c08766c76686678647c7c7c7cf819f83ff83ffc0ff81f", "card_id": "KAR_091"}, {"dhash": "7ad94cf6d1f30b513700524eacf85d39b2b247453a8a7c17f83780680093112f", "average_hash": "8703c187fb1ff81ff81ff80ebc08fc0fbc09c801d807e007f007f003b0062107", "name": "Pint-Sized Summoner", "phash": "935f3fa5fcb43ec69ec91e6096a002600978d3e0b68d6e30fb009bd9bf09e633", "mana": 2, "whash": "8713e98ffb9ff81ff81ffc0fbc08dc0fbc09d803d807f007f007f00bb00f2107", "card_id": "EX1_076"}, {"dhash": "8af30dc59283053b7a46a4895826b14b48f780ce8735094b52b0b6e67f49b7a2", "average_hash": "771e3f7d1b7f187b98fb5bf907f807fc07f00be033f011f000d000d002002602", "name": "Southsea Squidface", "phash": "eda56b8bebea6e127b1e6de463d031ce107c222c52ae3e4439a592869ba6d2f0", "mana": 4, "whash": "7f1e3f7d1b7f397b9cfb5ffd37fc06f80fec0bb033b011f000f0007002a02602", "card_id": "OG_267"}, {"dhash": "3cc02d20028fc4f8bb9367419c9670bdc542811c822b184770b4816b0f87373c", "average_hash": "870f07740be08458c0fe08fe847f857e803fc03ae03ca039802b000e010e0114", "name": "Onyxia", "phash": "3d363f8ade0ab3c0af1499106b814be633513e303aa8cf9b4f934ddd298953ce", "mana": 9, "whash": "8f570f740bec847cc47f0cff857e857ea93fe13fe13ca17d803f008e018e011c", "card_id": "EX1_562"}, {"dhash": "e8d90c3211f08a6035455b88b4b86867d68ecb3d86e938c323b6076cd6982d3b", "average_hash": "f71ffb87f987f8079c032c09046030603020e03080310030003800300019011b", "name": "Blood Knight", "phash": "b95dd342db85b1b4fbd04cf1199b068cc9681c733ca6de5cc633396b94861ca5", "mana": 3, "whash": "ff9fffaffbaffcafbc873c2d156931687172e17081710171003804b001bb011b", "card_id": "EX1_590"}, {"dhash": "b8b76d4e8395047bfbe4a6984d41b006616f841e89b530cb45b609e717484fb0", "average_hash": "c718eb6ddb4fd073f0f9b3fb23fb82f803fc03be03ba00be00f6005c001c0604", "name": "Shady Dealer", "phash": "2da1b34b5ada7cd62e824d8ccf01663986f62e64cee4cf999f99d87149234472", "mana": 3, "whash": "e718cf6ddb6ff073f4f9b3fb22fb82f803bc03be03be00be00fe007c029c0604", "card_id": "AT_032"}, {"dhash": "0a80cd7f30fa43e097c37f0e35b3ec0cdaf3a0c74e6f3a9f4d5e9abcf07b0ee4", "average_hash": "0700030081014007001f081f002e006d0c7c0c7c00780c7b487efa7fe07ff47f", "name": "Soulfire", "phash": "a9de5ef9b3a5b66f4f5acda68c7d29860dd83190c683192d63013333d39059e1", "mana": 1, "whash": "073803c0c3034017003f083f007f046d0c7e8c7c147a4c7fd87ffa7fe0fffc7f", "card_id": "EX1_308"}, {"dhash": "00c0cd3fe086817daf7b1fc3bca4d10832036cf8d9e18115896b12d6a42ce919", "average_hash": "f7bf03e001804018e81dec0ffc3b8c59049f001f060f9c5e49de595c5b58da1f", "name": "Duplicate", "phash": "4989b43c66c3dc2c3d99a7860b1068c10beda7c8d8dd4f6753f25889d3f7456a", "mana": 3, "whash": "f7bf02f0518c4018e81ebc0ffc7bce5b041f021f0e0f1e5e485e595c5b58da1f", "card_id": "FP1_018"}, {"dhash": "929e0839a1fe1cbc39f157c62b8147129b2468fdd082c31d44ba0c633f95f17f", "average_hash": "e7a1f3a3f107f847e807e807f807e007d80f900618023818000c200e1102817f", "name": "Huge Toad", "phash": "d3de33d6ec54ce79bfad279c2dad1a66219bc6f848a206e105920ef025a4cf16", "mana": 2, "whash": "efa1f3a3f6a7fc47ec07e847f847f847f84fd0473846385c201c200e1102c3ff", "card_id": "LOE_046"}, {"dhash": "0008ddfff0ffe36e940c2ad081f03fa1eca313872f48ffd1ff27ff0fc217bcbf", "average_hash": "073003c0030fe00ff007f001f803f803fc67f801fc01f400fc01fc039c07dc0f", "name": "Lava Burst", "phash": "835ffc800c07e66cad0306e8623c2782a9f49a39e38dadc46bfcda0f7be41c37", "mana": 3, "whash": "0f7807c0839fe11ff007f803f803fc03fc67f801fc01f401fc03fe0fbc0ffc0f", "card_id": "EX1_241"}, {"dhash": "03c00d33f0e4d981bd07531fbc3e78f9d072b6c708ce331c66314c6a8190ff27", "average_hash": "ffff0bfe03ce003e006e01fe01fc1dfc1dfc1dec00e708c380c3a0e101e0f1ff", "name": "Embrace the Shadow", "phash": "cd5f4d22f3e497ad679737b983b023da034db0f05b485b255b505b905b025eaa", "mana": 2, "whash": "ffff0bf603ce003e006e01fe01fc0dfc1dfc1dfe08e708c380c3e0e101e0f1ff", "card_id": "OG_104"}, {"dhash": "96392162648cf9497390a620dde10bc717077e0ca8305c48f8b1e3e7ef0f0b3f", "average_hash": "c3c7e1cfe0cfc08ac39a639a6398e39be38be38fc387c087c0838083c31ff71f", "name": "Ivory Knight", "phash": "7945656e89fad09aab19d43eef44bf0700698c11849f1252d68c7bcd9e0f63f2", "mana": 6, "whash": "c1c7e1cfe0cfc08ac39a6398639ce39be38be38fc387c087c083c083c30ff71f", "card_id": "KAR_057"}, {"dhash": "bac0ec37c1640ac8b4da53f9a5a242c7bf2e0edd58eae297c932866509820d34", "average_hash": "47944fb47ba6dda64442c043e341f140f1579103110308478c17901781070104", "name": "Bloodsail Corsair", "phash": "dbe677d60f97dfe577b139980ec819e10586c2188b613343fbc05618a7093bac", "mana": 1, "whash": "4fd44fb07ba6dfa644c3c543e3c1f1c0f357914b11470de78c579497858f0326", "card_id": "NEW1_025"}, {"dhash": "c8c30d2662ccc531bbe46441819e5e6173c2c894950b4b175e36986c92993533", "average_hash": "7700fb60c3c4c04cb4f9a4d384433c08383b303c3834783c703e603220132113", "name": "Gruul", "phash": "e73d37e3764ce794ce495360879028ed63591ee21eb238ec7f0d4e633c0b908c", "mana": 8, "whash": "7f10ff70e3e4c47df479a4f38547bd5a393b383c39347c3c783a603a21122113", "card_id": "NEW1_038"}, {"dhash": "6af04c2091d00aa7b7184c7fbf7c71c9e002c10500aba5467f32b46ec89f3138", "average_hash": "d71fd98fd9851827303fc834fc3e783f6c383010100010005012b012901b3119", "name": "Lowly Squire", "phash": "e9f75bd3dbf3dee26b3f39643c300828698c99d153320e337602dbd0251929c3", "mana": 1, "whash": "df1fdfafdbad18a5343fec36fc7efc7f6c783832102010005016b012b11f3919", "card_id": "AT_082"}, {"dhash": "0cc9093742c0c433fbc7a43c194966c6889e356169c7c38a97393861644e9b3f", "average_hash": "77065372c3c7005920f81ab058324264400c4f0f4d8c7c8c7cce68c41010321c", "name": "Lord Jaraxxus", "phash": "6f9ebe64e7295b1e3ec0c7268f613c6c41d83cc74bae92c1599e93f13908c524", "mana": 9, "whash": "ff165f72c3e70479b4781ef25c724264460c4f0f6f8c7cad7cce6cc410b0361c", "card_id": "EX1_323"}, {"dhash": "9ccd6d1a823708ce78eea311663387c6270e4cdf1834f648a8390fe37846b731", "average_hash": "a73bef723be379e37cc7bac7b3c723c38101c38163c030c400c400c402000600", "name": "Stablemaster", "phash": "d7889b0c2e6e3e473e11cd986b881137887ce66c1ee79bd1cfd947066c3a1899", "mana": 3, "whash": "af7bef727be77fa37ce7fec7b3c733c381c3c3c173c031c400c404c406a02620", "card_id": "AT_057"}, {"dhash": "0ae2858c423bcd66fecf2cfe5838dd7833e7c6c89c85610bc33e17e6884c1333", "average_hash": "371c1f780970007004b00eb01c300cdc18189bb398b3183f1cffb81f901b8017", "name": "Voidwalker", "phash": "8dad76ba7a7adbf16f9b49ca93d4234948e0991092b4e6044d197d24feca1393", "mana": 1, "whash": "371c1f580b7000f004b01eb01cb00edc1cbdbbb39db718bf1cffb8dfb89b8217", "card_id": "CS2_065"}, {"dhash": "0a80cd3330ef01fef7e47f599c526eb4dd4c86b038016703ce063e0db898e02f", "average_hash": "0700030033002000e006300fb42fd40efc0ff623fe07fe17fc3ff81ffc0ffc0f", "name": "Tinker's Sharpsword Oil", "phash": "e3707c68ef8f9ea01f4be9f09fbc318809a683568ed8e32149b293e4b3436336", "mana": 4, "whash": "0700030033002100f00f700fb42fd42ffc1ffe23fe07fe17fc1ff81ffc1ffc0f", "card_id": "GVG_022"}, {"dhash": "0080cd39e0c6930d275acefc98df63b446228dc192c317c16c86987d7163cf0c", "average_hash": "ffbe03e01186ec0cd8087838f930ff021c331e120e107c107c12c45ec246f20f", "name": "Polymorph: Boar", "phash": "43c6bc0979b28f2c9f21a73464990dc6035979828b6df3d8cd6ddba4dab51923", "mana": 3, "whash": "ffff03e13186ec0cdc187838f930ff329c231f120e507e14fc12cc5ec646f20f", "card_id": "AT_005"}, {"dhash": "ca1ffdfb00c00326d62c3c5979f2f2c4f681ed33d9cba1074b04921c663fccae", "average_hash": "0700c107f11f703f30323831fc70fc3a7c3b7c3fbc3fbc3fdc07cc02cc02ec0f", "name": "Rockbiter Weapon", "phash": "abfcfcb95e76dbeb36ea36338e99268c2e45a41e658327c84b6563044390db04", "mana": 1, "whash": "0700c107f11f703f30323831f870fc3a7c3b7c3fbc3fbc3fdc07cc06cc02ec0f", "card_id": "CS2_045"}, {"dhash": "0280a51e60e581df3f677f3cfcdaf985ff8bbf671f8e0c7c201880640780fc1f", "average_hash": "173c01e00182000038043c1cfcfaf4fdf4ffe0fff87ff87fd63300180000380e", "name": "Arcane Blast", "phash": "a1ffde7a5e1d39ece5eba7e14b1f2c9883d1c2865b60427549e1138853ba1b20", "mana": 1, "whash": "77be01e001820000b8243c5efcfef4fdf4ffecfff87ff87fd63b00180000380f", "card_id": "AT_004"}, {"dhash": "e8dcad20434ccc37f8fea1c943bddffa37a74f0980b431491cba80e1f347272e", "average_hash": "171f8b7f0b6404e01ce01ae11bc1dadffbdffbdf039100d800d000c0c087e607", "name": "Darkshire Councilman", "phash": "9f81cf037e567b251b859e619ed193e10c8c0c7cf6f8dbc86ca653e4666284d4", "mana": 3, "whash": "1f1f8f7f6b6404e014e03ee11fe1dedffadffbdf83910098c0d100c0c087e607", "card_id": "OG_113"}, {"dhash": "8087edfbe0e7738f0f5a1a641c987970f6b8c96193c33e077d0d7258c09008a7", "average_hash": "070001c0711ffc3f7e3c5e1e9e191e185e3a5e324e3aec3bf83ff033c023e003", "name": "Effigy", "phash": "2b454f20f098bd0c96211e012e042790cd708e774b7b9bed4bcd7b9ccff37b18", "mana": 3, "whash": "0708c043711ffc3f7e3e7e1e9e191e185e3a5e324e3aec3bf83ff033c023e001", "card_id": "AT_002"}, {"dhash": "c081bdfde0b30128b7d006a3046b3cdefdbc3b70f4e52d03d386f40dc91784bf", "average_hash": "073063c061183c181c18461e420e4206e247fe07fe1ffc07b807f403fe03f807", "name": "Lightning Storm", "phash": "4b206c0ca63d97b27c821e420def07f383274f13eb413f6c69f8399433d794ad", "mana": 3, "whash": "073063c075187d1e9c19461e420e6206e667fe07fe1ffe07b807fc03fe07f80f", "card_id": "EX1_259"}, {"dhash": "fafffdf3e3870f3e1ee4998b3f1e6238c4e159c1108301069f8e2f0d000cf83b", "average_hash": "07004102791e7c78fe71be771e7f1e7f1e7f1e7f1e3e1e1c3e1c1e1c0e180008", "name": "Bolster", "phash": "8f20f17e7adfcef6dcc81c30b2e43790347b0e6969984e4cd3123e04cbc73e09", "mana": 2, "whash": "07004106791e7c78fe71be771e7f1e7f3e7e5e7f1e3e1e1c1e1c1e1c0e080008", "card_id": "AT_068"}, {"dhash": "c283fd7fe0fcc3f0cfc39f813c173bbe6d3cf278ccf999e327876d0cd310e62f", "average_hash": "07180340ff87dc1f8e3f063f073f023f823fc01fc01f863ba219e205e001fc07", "name": "Swipe", "phash": "1b119feaf6bef46ffc62ec980d682ab209f002fc4bef33dd4bb2928c5b003340", "mana": 4, "whash": "071c8360ff87dd1f8e3f0e3f063f033f823fc81fc41f873be219e205e001fc07", "card_id": "CS2_012"}, {"dhash": "9af0a52552ca8c373b72667498e86b51d29284a4694bc296bc37676ef49fd934", "average_hash": "e70f616f29c51848585858bc583078097c0c2c242c212c240839b839d81fd90f", "name": "Hogger, Doom of Elwynn", "phash": "6bdff7aa7933fe1c7be62d651aa609412348064196114c92f3191b33f634f329", "mana": 7, "whash": "ef1fe97f2bc5184858795cbd5c387c297c0c2c242c212c2c083bb83fd91ff90f", "card_id": "OG_318"}, {"dhash": "e8f18ce741ee8b87363a4ff0b0184879b3d26505ff0a6c448933c36f668f0d3e", "average_hash": "070c130c811fc01f783c783cfc0d38083c187c08e81fc007c006001f881fc01f", "name": "Abomination", "phash": "49976fd018a65ee2c64986c8a394a78c706019303e7c9b3ded2df6a4d69273de", "mana": 5, "whash": "071e1b8ec19fe03ff83efc3dfc3d38083c197c1cfc1fc00fc017001fc81fc11f", "card_id": "EX1_097"}, {"dhash": "caf88de412890d117aa7e45e197dead211c5220ecc193e033c3e92f4ec49f913", "average_hash": "f71ffb7fbb5d987d88f00af03b3efcfef889f28839b07938609018d018103000", "name": "Wildwalker", "phash": "e721d933fbf0734adb488de48eb414f311d28e6172b09fe18ead4464ee02de58", "mana": 4, "whash": "f71ffb5fbb5f987d88f48cf83f3bfebbfa88fa8039b0583860b8105018183000", "card_id": "AT_040"}, {"dhash": "0883fdfc20f80ff41c42f80cf138ce61800b001789ee31d7ff3e7079c1f0fc27", "average_hash": "073ec3e1e7be247fe07fb07fb877387f10f000c000400070807080610061f02f", "name": "Savage Roar", "phash": "bdfa4b003b5576a5d64bb542da441898e9540cec4372acfc6b534e8d5bc39696", "mana": 3, "whash": "773ec7f3fffff7ffe27ff07fb87f387f107000c0004000709070c0610061f02f", "card_id": "CS2_011"}, {"dhash": "3a7f70bca750ce4888b319af674bc81e80f901b3d3c723eea718cf9f7f3ce060", "average_hash": "8100d9139c750e7f8e23ce477e4f1a4e1e402e401e781e600e603e7c3e7efe3f", "name": "Everyfin is Awesome", "phash": "8faa38e7b9b7d63f72e5f883727c7e381c42e9a856c887c23cb21726e0300d1c", "mana": 7, "whash": "8100c9119c778e7f8e63ce675e4f9e6e1e402e401e781e600e603e7c3e7f7e3c", "card_id": "LOE_113"}, {"dhash": "888f1d3bf0d6e378d4e107d2d9e6b7097f177e2ffc5ee865c1ca809703260ed8", "average_hash": "070003004107400384038203e003e003e00fe00fc00fc01fe25fe24fe67ffe7f", "name": "Nourish", "phash": "3908f6d5cf6059a2275bb6208e310b6826d530c6e38469c95ad61b38d9f773fe", "mana": 5, "whash": "071c03634187e103c603e203e003e007e00fe04fe00fc35fe25ff67fe67ffe7f", "card_id": "EX1_164"}, {"dhash": "38fc6d1cd270848539fa73e5879e4927b7424e45308ad14463318f60da83053e", "average_hash": "87139b6f9bc3c8476cc1c0c7f04ff80bfc039c03880000000000000001000100", "name": "Tinkmaster Overspark", "phash": "93ab9b83ac049f31fb3179607ccc221cc64cc2ed99b45bf656c7b6cda6094c38", "mana": 3, "whash": "af539fff9be3de67ec43c5c7f54ff9cbff43fd038d4009400c428c802180032c", "card_id": "EX1_083"}, {"dhash": "0200fd2f70f98ff71fccb3f061e40011487af224fe89c02387076a4c419afebf", "average_hash": "073003c0b300bcf1fcff787fe03fe80ff00ff80bcc07180f180c00040400e01f", "name": "Lava Shock", "phash": "2bdfb5287ef15ed77b740d8c8d8c24b881c31b366328ecd04b672701db017f86", "mana": 2, "whash": "0f7803c0f380bdf1befff8ffea7fe81ff00ff80fdc07180f181c80040414f01f", "card_id": "BRM_011"}, {"dhash": "0ce08dd082233d4f728cc6d898b521e3580eec3cded1074bb332a4e4b1890312", "average_hash": "f70f735e3bdc38d838983b980f3003a1e30803100389019480d401d203060703", "name": "Herald Volazj", "phash": "ef25e32049a079723b5a11161b84616c33d09bdc88be2ba3dfdbda7496b75511", "mana": 6, "whash": "ff1f7fde3bdc38da3eb83fb80fb017a1f3880392038f01f580d401d6031f0707", "card_id": "OG_316"}, {"dhash": "ea9ffdff60869f2c9fdd1e303fc67e3ea448f0810783050683283c78c180feff", "average_hash": "07008100e11fec3ffe7ffe7ffe7fee7f3e7f3e7c0678127c303c000c0000c00e", "name": "Feign Death", "phash": "296a7b2c72ffeef19ea07ab1a7600e5881e43cdc411acaa53b480a1b7b837e6c", "mana": 2, "whash": "0700c100f11fec3ffe7ffe7ffe7ffe7f3e7f3e7c0678107c301c000c0000800e", "card_id": "GVG_026"}, {"dhash": "388eed32927fcc607983e35c0f10d0662e8d94de787885807c3bf3f66c4a931c", "average_hash": "07100b600b70e0c1ccc71fc7ff4732d372cbc3ef33ce3d004cc818d4b084361c", "name": "Ironbark Protector", "phash": "d729bf0eaec7be42afd007a75a3011c1287ce2f12b5b66c1edad04a53e069b64", "mana": 8, "whash": "0f100f600b70a0c3c4c79fc7ffc736d3729fe2a7338f3f00dcc858d4f894361c", "card_id": "CS2_232"}, {"dhash": "ca34c9c3828f1c17317e66fc8cf8517120624bc519829f1c3d3972e2c697f97f", "average_hash": "f74b7bcb79c8b8c878887c18fc187c183c003c003c0018009800c8000900f17f", "name": "Brann Bronzebeard", "phash": "c3ab63a839d8962af93c3adcfb981e1519101ed339288eb1f39c2ed1635c6e54", "mana": 3, "whash": "f74ffbcf7e5cfcd87c88fc98ff587f587f003d003c409c40cc00c8000900fbff", "card_id": "LOE_077"}, {"dhash": "fe1ff0e3c1360f5f30fec730bacced319e86586ca0534287842c2959fee33f7f", "average_hash": "07e007800d200cf01c603814600cc86f801f00170117080704278061f067fc7f", "name": "Dart Trap", "phash": "d9be764d375adebcd96e7e5a67b194a8856634cb259680bda436006f245a41d0", "mana": 2, "whash": "07e007802f601df03ce038347c3cc86f913fc91f035f8d678467e063e067ff7f", "card_id": "LOE_021"}, {"dhash": "188c2d106262c888f861a3915f33b366648ec11b82f5034b7fb6e7ec1c4b313c", "average_hash": "873f8f6703e3008004c006c002f803fe013e03bc03b800b800fa18b77a3f7e1f", "name": "Starving Buzzard", "phash": "eda41e4ad39abd65eb8089a52688d3a26068699334e572dadbf2770c8ee3b1ac", "mana": 5, "whash": "cf3f8f7703e307a004c006c003f803fe01fe03fc03fc01f800fa18ff7abf7e3f", "card_id": "CS2_237"}, {"dhash": "2ae78dce12bd895176a3ac0618a5b10946efe39c0f313ec8e83090e3b04e21bd", "average_hash": "070c0318815bc0bb883f08bf0038003800bef199e001e083f887f007f80ff01f", "name": "Siltfin Spiritwalker", "phash": "312d2c37c3cb766a6eda46562e9442b210b4324bd6b4d9c51ef8c725bca99e27", "mana": 4, "whash": "071c9959b15bc0bb88bf0cbf0e3a00ba01bef19be18bf083f887f00ff80ff01f", "card_id": "GVG_040"}, {"dhash": "4afe857c8299052339c6727e8c085903b6624895906a38d537b15e6ef18f013f", "average_hash": "f713b77f9bff985b1cc99ccc7c4e381f981bb01380100000200070107118191c", "name": "Skycap'n Kragg", "phash": "ebe99bebffd2bef0eb24665e1e92a2e10809228d2be019261d2376183682d7ce", "mana": 7, "whash": "ff13ff7fdbff9c5f5c498ccc7d5fb95fb91bb11381110150200170107118191c", "card_id": "AT_070"}, {"dhash": "8af104e681cb0b9437184ee1bce6780dd27a64c5c98a9317043719686684cd3a", "average_hash": "771ef99fd99f603eb03fd03c603ea03f001b181b1818081f0018000000001900", "name": "Gadgetzan Jouster", "phash": "69ef69b4d696dfe9f7e875ece7c01268c918d31c66643e6436039b1199009b26", "mana": 1, "whash": "7f1ffbbfdbbff1bff83ff03c607ee07f087f1c1f181f1c3f001c000801001900", "card_id": "AT_133"}, {"dhash": "48e76cc8d18cca7bb5b55369a4c24b07a78e001d1b2a265418b8317342838f2d", "average_hash": "17bc1fbc0bb885f884efe74fe3cbf3cf734763427342f5c1c44045c005800320", "name": "Jungle Panther", "phash": "7f5b4d410e07c7c4ef8163e2cbf0c8b093f172e066f8cef8db304672c58cd8c8", "mana": 3, "whash": "1ffc0fb80fb887f884ebe5cfe3cff3cf734763c373c2f5c244c0c58005a00320", "card_id": "EX1_017"}, {"dhash": "6af6cdfc9229859b7b0ea6d94826bdf913c386267185c70abf3181e63c4d7b9e", "average_hash": "f70fb11ddb1fa83e909f7299f0812c0cbc083c211d2c18a8e886000a50067014", "name": "Goblin Auto-Barber", "phash": "4b33bb3bfbe31e5b33d83069f6d0986c01760c0ef31cf3804d0719c63e61dba1", "mana": 2, "whash": "f71fb91ddb5fa87ed0ff71b9f4892cacbc893ca11cac1cace886104a5007f014", "card_id": "GVG_023"}, {"dhash": "80c7ef3fe0c03b960d2d531cb6b3386d03b2e6e4cc9d016a01962f7c7f1cfe21", "average_hash": "ffffe9f7f99f3c3e3e3e390c390c37382d011c000821002006300e380f383f38", "name": "Power Word: Tentacles", "phash": "4d59d90a396630e5b7ed649087030e180b70c6e843e6739f5b1d7bf05be2d33a", "mana": 5, "whash": "ffffebf7f99f7c3e3e7e3f0c310c37102f011c081c200a2006700e780f783fb8", "card_id": "OG_094"}, {"dhash": "ca0ff5ffe0f1836016063d627a4fe19ce379feb1294286260d9cd9388067fcdf", "average_hash": "073041c0f91bfc3f9e3f9e340e3c0e7886702e7d7e5c7e7c7e203c000400f80f", "name": "Evolve", "phash": "0ffcbff5f27ff6fa3ceb368649928eb48b41490869192c8743224e095b433ac4", "mana": 1, "whash": "0730c3c0fd1fbd3f9e3fbe344a7c0e780e702e7d7e5c7e7c7e203c000400f80f", "card_id": "OG_027"}, {"dhash": "c8f38d6404c13983730ee0669ccd2c1b1126264c4c98d08890b127e3bd84430d", "average_hash": "37ceb3febbde18de18fe0bee8bbe8bfc9b8c9b8c0b88018800c801c803000300", "name": "Dark Cultist", "phash": "4d53c931d238565bbb58347cbe8442c208fc6dad0cf572ce5edb6bd269b30845", "mana": 3, "whash": "3fdebfffbbdf18fe18fe0ffe8ffe8bfd9f8c9b8c0b88018800c801c803000300", "card_id": "FP1_023"}, {"dhash": "c08fddff705c83a024814b2a1fd6390876f8e110c921344340263749febf686f", "average_hash": "0730c3c3f397f807f807fc03fc3cac1b0c1fcc1f4419c4130030c020f03ff01f", "name": "Spreading Madness", "phash": "3b55d702f5823ea5cd282c7246a886528a4c23866ed38bd4eb6cf3bc5ba37ccc", "mana": 3, "whash": "0738c3c3f39ff907fc07fc03fc3cec3b0c1fec0f461bc413c0314024f03ff01f", "card_id": "OG_116"}, {"dhash": "fa8ffdff00ff035e1eb09d7118e374c4c31407691e92fc25d88af09be72f0efc", "average_hash": "0700e103f90ffc07f837f833fe10fe00fe00fa01da41da03e601c603e60ffe2f", "name": "Wild Growth", "phash": "b300e3fddcfb9e2b6cf83cf484073c7804d21f0306d94bbdc50f7f80df815340", "mana": 2, "whash": "0700e103f90ffc0ffc27f833fe11fe00fe00fa41fa41fa03e601c603f60ffa2f", "card_id": "CS2_013"}, {"dhash": "c8e4adf9538385067b5fa49e49f981f3128764eecbc5a78b0f331ee6fc4cf399", "average_hash": "b71f3b7f3b7e38783cd81fd01ff01be01fe80ff80ff81df818f838f87a3cf61f", "name": "Ogre Ninja", "phash": "ed09ff086bf89f4e3bc086a31f21266800e126b00cb65fc4df6759e4cc6b1d6d", "mana": 5, "whash": "b71e3b5f3b5e38783cf81ff01eb01ae01ea805a80fb81cb818f818787a3cf41f", "card_id": "GVG_088"}, {"dhash": "0ae60ce811cd8b91362649d4b0ac6b49deb2fc45818b44977932876c1e88f132", "average_hash": "f71ffb9ff39da83c983d982d4c292807042f483f5816c8148813001100100110", "name": "Aberrant Berserker", "phash": "e99f731377317bccfb88124923c289981832469819896d704fd2cf653dc7e6cd", "mana": 4, "whash": "ff1ffbbffbbfa8bd9c3fdc3f5c692c67046f583f581ec8168813001501100110", "card_id": "OG_150"}, {"dhash": "88df6c70c1cc0bb8b7a45ee3ba8c7d19fa4234056a2af8c761b4896b3396c53d", "average_hash": "0703c387c18ec00f801fc01fe00fe82f9c1f880fc00f801f800fc00fe00fe107", "name": "Validated Doomsayer", "phash": "19df27337e849ec9a79c998c3ce024a0894966ca96cc6f6ceee4d6f28d8932cc", "mana": 5, "whash": "8713cba7d3afc01fd01fe41fec0fec5fdc1f880fc00f801fc00fc00fe10fe10f", "card_id": "OG_200"}, {"dhash": "02c0c5ff30b0cb0c3f087f18f634ec01f1fb0286011c44334c35d2eafdd9fabf", "average_hash": "ffff01f8e1ff707e78fe20ff20fe20ff80fd807c40000000400000000000f00f", "name": "Noble Sacrifice", "phash": "edffeda876c536f5bd3b5c98c3760dbb09c042444b64b9315b8153a45b0958e0", "mana": 1, "whash": "ffff4bf8f1ff71ff7cff38ff20fe20ff80fff4fe40044000400000001080f0af", "card_id": "EX1_130"}, {"dhash": "5ad90cf0d10c8b3935e04f06be0c712db21220f5d2aa4f4406b8d873a3871d3d", "average_hash": "b796ffb603be0db8983bcc3f8c3f0579051d001e110eb0074001400281089118", "name": "Twisted Worgen", "phash": "5977ef34f726dba06d3193b48b92024a82a9c6026c483d197f34bc41fba1efc3", "mana": 2, "whash": "bf9fffb603be0db89cbfcc3f8d7f057f855d015e114eb1474003c08281889938", "card_id": "OG_247"}, {"dhash": "e001fdff71a4e71b8cf1980e30114826a4cdc9391b5333eccf4ffc9bcb313fe6", "average_hash": "0730238079317c787e780e7bbe6912500260227200620062187fa33f923ffe0f", "name": "Healing Wave", "phash": "ad00272d5bf5fc955e801f031f4a2d2406e3ada1a15e99972b6d9bf163b0569b", "mana": 3, "whash": "073023c07b317d787e788e7fbe6d1270026022722062027e987fa27f9a3ffe4f", "card_id": "AT_048"}, {"dhash": "00c04d41b09c233bcc60988d33db4db29a46678cbf3c7e47f20da89bf027014c", "average_hash": "f7bf03f009c80878822182010240d349994de807e087f66ff26ff24ff04ff04f", "name": "Blessed Champion", "phash": "f1432c0fe94c69bcde2bbb503ab40b250bc24bf44c370bcb7acd5b095e10dbb4", "mana": 5, "whash": "f7ff0bf219d80878822182010240d24d994de907e407f66ff66ff24ff24ff04f", "card_id": "EX1_355"}, {"dhash": "8af6adcc129b4900f96cb299f4f4bd7341c782ea35442b0a523ca5e7cf08afb5", "average_hash": "e71fe35db3dd705e3048300938a839bc39a829b03c14bc27a4a1801d00110001", "name": "Kor'kron Elite", "phash": "e3397f4a3bbfff367ec4397386883ce483724b843cf4974169a1b6640903c630", "mana": 4, "whash": "e71feb7df3df70beb04abc493ca839ba39b82db03e94bc27a4e3c0bd82110001", "card_id": "NEW1_011"}, {"dhash": "88f6accfc11b8b3136e64c8cb8387081e602c9f5b5696357d6b7ac6e599d513e", "average_hash": "371d2b9da1bdf8319830183118313830183240362035303c303cb03eb01e311e", "name": "Pit Fighter", "phash": "edb537896b197c9c5e46c7998e88186626a2b3d839849be4fbc166360db299c9", "mana": 5, "whash": "2f1d2fbca3bdfcb99c391c391d713d701d724172217d307c303c303eb11e311e", "card_id": "AT_101"}, {"dhash": "ea8f8d3512ef3cf07be3e78ec819857332c67c8cc151870b38330fe47808c319", "average_hash": "770079603bc0b8c0e8d3cb9b9ba739813b833b9f3bb8f9b8389838dc12080208", "name": "Sorcerer's Apprentice", "phash": "f709b39a26a2e77cdec80cc53f44402c00c866324c333b47afad7e1dff84ee25", "mana": 2, "whash": "770079617bc1b8c0e8d7c99f9ba7398b7b933b9b3bbcf8b8389c38dc2a000219", "card_id": "EX1_608"}, {"dhash": "fe7f58c6a16c6f339f7736fc7cd8f92385c61a896f96dc0c01d07300fc377eff", "average_hash": "03c0e30ef413d009fc79fc59fc79f870bc407b40fac09cc01c400e00e203fc3f", "name": "Curse of Rafaam", "phash": "a3a379775c54de87e71ca44e5dab9e18a4b3b0e806eb90b9067da08d06f890fc", "mana": 2, "whash": "0340e30ff413f80ffe7bfc79fc79fc78bc407f40f8c09ec01e000e00e243fc7e", "card_id": "LOE_007"}, {"dhash": "cae0ccc5119b8b27370f4c1cb0345bc9a6c20905778ad80503b2986167804f24", "average_hash": "f71f7b9f3b9d383f1c3f3c3f7c0b7c183c167c00480510050000000000000100", "name": "Kezan Mystic", "phash": "c31deb097c717be0776cf3645b9608e689a4198e725893714d2676c59b919a9a", "mana": 4, "whash": "ff1f7fbfbbbfb8bf3c3f3c3f7d7b7d5a7d567c426d4d18471000000001000100", "card_id": "GVG_074"}, {"dhash": "0cc36d3ec27dc4eef9e0b3f107834326ba4ef4398967788bf93f01e406491f32", "average_hash": "0710036003c08047c0c7e2c7e247c243800183310db18cbfccffc0fb00121a12", "name": "Wilfred Fizzlebang", "phash": "99add6426e662f19b64139644fa4489009d3326469edce31766d5793f3ce616e", "mana": 6, "whash": "1f500f6003e08667e4c7e6c7e343c243a241a335afb98cbfccffc4fb00b71e16", "card_id": "AT_027"}, {"dhash": "08879d7ff0f1e37f19fc36e03f8cfe32f149c49999336c44f28dc893632780dc", "average_hash": "0700030001011000f801f003e007f01ff01ff03ff83ff00ff00fe00fe00fd00f", "name": "Cobra Shot", "phash": "f3609e875e5aeff01825330e8710e7e04ec10d869cdc3a7063982b9bdb4c9bed", "mana": 5, "whash": "0700030001031003f803f807e817f81ff01ff07ffe7ff23ff40fe25ffa1ff01f", "card_id": "GVG_073"}, {"dhash": "03808d7f70c0911f36c74c0c911834e16d42f884f02d8d1b127724cad997fa2f", "average_hash": "ffff0bf0c3cf001f803991bf11be31363137011e001e800f802c00203100f10f", "name": "Convert", "phash": "e9753dad37ca5732d7f4b7560965a41929b3488149465b645b405b625ba9b39a", "mana": 2, "whash": "ffff0bf0c3df011f803db1bf51bf31b631b7019e009e809f80ac01e03186f58f", "card_id": "AT_015"}, {"dhash": "46b0297dc6de3889d312a7a14c5998e33984731ae2a4c44889b193e77b5f773c", "average_hash": "f323f941c8cdd8cfd28cf09c009c108cf21c638c408e008e4086001ec00fe01e", "name": "Silverware Golem", "phash": "694ab78bb276d69eeb199c657f607b8493f68cd2644c1b248ea496d66d092f08", "mana": 3, "whash": "fb63f965d88fdccdd6def4dc02de589ef31ee28e408e008e4086001ee03fe03e", "card_id": "KAR_205"}, {"dhash": "8acd8c67d1988ac3363e4dceb0b463e9be92df65a8c9e0160f363468039fdd3c", "average_hash": "77183fb2e388d8293834bc303c317c606c306c3f183f0c3e003e001b001c011c", "name": "Master Swordsmith", "phash": "af9ff7907c6359cc76a886a18d9160b0468e96c939992f0777017f125f32e664", "mana": 2, "whash": "7f183fb2eba8d8a93c36bc303d717d617d706c7f1d3f1c3e183e801b011e011c", "card_id": "NEW1_037"}, {"dhash": "8a8ddd7b30c6814837914e27994c7a99e1008225cecb10145327364c7898f0a7", "average_hash": "073847e6f38ff81fe81fcc3fcc3fcc3ffc3ce430c03918181c1098306030f01f", "name": "Hunter's Mark", "phash": "69775bcefee136bbb7f28e7d997ca4340b2522054b4943c049c172e05363bcd0", "mana": 1, "whash": "071c47e6f38ff91ff81fec3fcc3fdc3ffc3cc030c07918183c1098307030f01f", "card_id": "CS2_084"}, {"dhash": "98e02dc3c2bf3d70f2e5c1ce92d56cb37f66de4db1d3c288703388e3078c8719", "average_hash": "770e636e29d0b8d980810f003f209f289f1f9f3fcb9f49ce00cf81cd03080718", "name": "Vol'jin", "phash": "7f0fa33070a8d986eb6254505f9219b0016173708b396a297cb73edc77f29ce5", "mana": 5, "whash": "771f6bde69d0b85982b10fa03f209fa89f1fdf1f4b9f49ce08db81c503090718", "card_id": "GVG_014"}, {"dhash": "82c3ed7fe08c032c1c5a1ea03c1e71f8e7e01ff3e117cf07de1f0c3e303ce060", "average_hash": "0508c140f10c78007c017e201e381e383e3c700e383c7e7efc3e7c3e7c3c3c3c", "name": "Divine Strength", "phash": "63cc36ef78ffd9323cf32d2d86d8d3cc8b4c4c9559646b244b475b801b603519", "mana": 1, "whash": "070cc160f10c78017c017e601e381e383e7c724e383e7e7efc7e7c3e7e3c3c3c", "card_id": "OG_223"}, {"dhash": "0238194724da7ca0d320a601cf1994f329cd431aa124065870b0bf83c316a6ff", "average_hash": "f76ff3dff1cff0cd72dd829c039e139ef39df3806280208000c0c1040600847f", "name": "Fierce Monkey", "phash": "79b0fb29bb7ef24c89079c372d0c334389e9cad81198b219b3b19e247366ccdc", "mana": 3, "whash": "ff7ff3eff2bff4dd729d829c03de13def39df384628020800080c10c0601c4ff", "card_id": "LOE_022"}, {"dhash": "0a80cd3ef0f941fbb5f64bbb97b427497332ed40de290d533a2ef479c0a68005", "average_hash": "073c07e083800000900ea40ff00ff807e81bd81fd00fd027e033f07de03df00f", "name": "Snake Trap", "phash": "3b77de2c7eba3fe7638da598c9e6216882c44b322bc43b8cdb211b065f233384", "mana": 2, "whash": "67be07e083800304940fb42ff82ff807f81bd81fd00fd027f073f07de07ff01f", "card_id": "EX1_554"}, {"dhash": "b8c92d3743ccc413fbbfa63c41e19387273f08cefc958d0902b318e22540cbaf", "average_hash": "471247608364c044b0d87bd87bc0d3f3c7d31fc99ffe99de30da40dcc2060606", "name": "Defias Ringleader", "phash": "7d67b626de5ebf9f9ac4e7c14f4124c939641282c6d91b02798dc641de81fe10", "mana": 2, "whash": "4f124f6083648064b4d87fd87fc0d6f3c7931f899fbeb8de20fa505dc282860f", "card_id": "EX1_131"}, {"dhash": "68d3cce001870a5c35c85f9cae304d49b2f27d05912b22d40c36c769f88b6530", "average_hash": "d71ffbbffbbef81ef81bfc1f3c1d284d480b781f781320000000000000000100", "name": "Crazed Worshipper", "phash": "eb5d4b413e369fa45f820e992ba0894c342cc32067e7dee47c42f339ce7198cd", "mana": 5, "whash": "df1fffbffbbffcbffc1bfc1fbd5d394e696b785f793304402000000001020100", "card_id": "OG_321"}, {"dhash": "08d96cfe11008a203401498bb606480d805a00b5236aded471b9836007810724", "average_hash": "fff7ffffdfff8fe30ce30fe007f807f007e007c007e007e004e305e005a007a0", "name": "Fen Creeper", "phash": "1f5efb05fb857ba06f20672066909620942836689b211bfbfb9399e729a34be6", "mana": 5, "whash": "ffffffffdfff8fe30ce30fe00ff807e007e007c007c007e004e205e007a00760", "card_id": "CS1_069"}, {"dhash": "9a317cc86096d32fb654ace9dc85b95b62fc8cb2e3ed1f1b3f26165c60918166", "average_hash": "078c030c031e003b803dc03cc0390039003c200e2c3cfc39fc3bf838f8107004", "name": "Gladiator's Longbow", "phash": "69dd5ce9d736d71e5e02a3d356f2e1b69d8d84b481ec12648c21523c87793399", "mana": 7, "whash": "07ce479c075e017fa03dc03dc03d003902bd601e7c3cfc39fc3ffc3af934792c", "card_id": "DS1_188"}, {"dhash": "fcf92dc6d2148509389f309687245f39bcc6e119c0f733cb413ec7e41c0039b8", "average_hash": "071a7b7e0bff447e4cd62ec63e420d5f0f1e033d03f800b00cf018f81b801e00", "name": "Iron Juggernaut", "phash": "8d0f7b2a66c3bd605b42870c3650420b88f41cc30bb99e74cdb53e9fdbe3878d", "mana": 6, "whash": "0f5a7f7e0bff4c7e4c7e2ec63e460f5f0f5e033d03b904f20cf81cf80b801e00", "card_id": "GVG_056"}, {"dhash": "0c8f0d116236fd087234a4c9c99789bf147749ac005097c9ef367fe3fc8d731b", "average_hash": "f765f3e361c2e0d100f083f083f003e803d00fb0038003d000e018d8fb1ff71b", "name": "Mysterious Challenger", "phash": "7525b50a63e8de5efb602c133fb00ca700f04a4923fa7ea57e4bced68ca15ed0", "mana": 6, "whash": "f775fbfb63c2e0f1a0f083f0a3f083e803b00fb00f8003b008e879d8fb1ff71b", "card_id": "AT_079"}, {"dhash": "8081ddff70e2eb41de8e143d07f230006300c6149c79fde77b47079c0408fc1f", "average_hash": "073843c0f39f7c7e1e7e1e7e7e1efe31fc31e01000000f103e181f180f10fe1f", "name": "Shadow Bolt", "phash": "2f05fdba5d0a369733918f48cc468ca12398666e63743e9d89f11e4973eb4ae4", "mana": 3, "whash": "073843e0f39f7d7e1e7e3e7e7e1efe31fc31f01010000f103f181e180e18fe1f", "card_id": "CS2_057"}, {"dhash": "48c68cf9c1c30b3536ce4d79b0447c89c31ae635e0cb09173336ce6cdc98913b", "average_hash": "b71d9383318f783c1839903ec8224838803d803f0c3d0839003b8030b019311f", "name": "Junkbot", "phash": "a9d55742d3a91c197b3236e60b3220b32dc9678edbe673726d166d4d93269249", "mana": 5, "whash": "ff1d9bb33baf78bc3c3db03edc66487c807d803f0c3d0c39003b9035b119311f", "card_id": "GVG_106"}, {"dhash": "929ab87961a35a46bbc85639ab7256e5ac4a589583a30d5dfaba40600196f57f", "average_hash": "e7a663a76107b40b301d74163456741f701e301e7006f010f007c0030100e17f", "name": "Fossilized Devilsaur", "phash": "fbfcdf1c3f5ff656ed1616e767864e90213106441921266011e4cadcc924ff94", "mana": 8, "whash": "efa763a776aff40b741d745e745e745ff05e305e7046f050f007c0030100e1ff", "card_id": "LOE_073"}, {"dhash": "28cb6cbed1ff8270378075f8bb3866018c121f25306a2094d139ff73fc8f213f", "average_hash": "47100580c387e01ff01fb817f807f807e807c0038000000000003800f80fe107", "name": "Echoing Ooze", "phash": "937f3f657fc53ee7db8486e38cb04220300c73421dc31f11ce017b279b23f939", "mana": 2, "whash": "cf12cdb2cbbfe0bffc1fbc1ffc67f947e947c843c10304000c007804f90ff117", "card_id": "FP1_003"}, {"dhash": "0cf00cc401990b6637cc4c88b90263058c3a32f5048abb54e038806119848733", "average_hash": "f71ffb9ffb9f783bb83f383304330063000f800d800070030003000001000100", "name": "Argent Commander", "phash": "99d96909630173025e9293809385189299d8966d8fcdfbecdbe6766f694a59f2", "mana": 6, "whash": "ff9fffbffbbffcbfbc3fbc3f2d73056f014f814d9d40f1432003008001800100", "card_id": "EX1_067"}, {"dhash": "828ffd7f30e06704dc13b8a6620d0dba8354b6ad791af334d3710243128efc1d", "average_hash": "0738c3c3f38f107e067e0ae00be8020c834421623140390c180e080e0c0cf61f", "name": "Fist of Jaraxxus", "phash": "cd0abd646bf7d2313e680e87a5780c1e09e16cf8812e46d3e3d54bbe33c1332d", "mana": 4, "whash": "0f7cc3e7f3bf15ff06fe0efe0fe8034c83c4236331c0390c180e098e0d0cf73f", "card_id": "AT_022"}, {"dhash": "6cf8ccc091130baf36524d86b04c6091985a42b591ca2e94b3386772da81bd26", "average_hash": "f79fbbbfbbbffcbffc3d5c3f1d731d60045004100d100c000c00080001000900", "name": "Reckless Rocketeer", "phash": "875fe9c1fb015e207e20134e298430a03292921c9bcd79e16fb3165ff6e3bbf4", "mana": 6, "whash": "ff9fbfbfbbbffebffcbf7d7d7df31d60075005500d500d401c000c8001800b20", "card_id": "CS2_213"}, {"dhash": "e8d0cc82910d0bdb36274c8cb16867b598d26525d84a27950d36fe6c94913126", "average_hash": "771e7b9eb9ba9830bc343c30182218023c083c180c1a4c001c20583308133106", "name": "Tauren Warrior", "phash": "c74767b3fb846f687bc896862f3c2978329298e4a7c336db2de14263f12c8461", "mana": 3, "whash": "ff9e7fbebbbe9cb4bc3c3c381d621d4235483d5a1d1a4c401c3058333933310f", "card_id": "EX1_390"}, {"dhash": "0a0fdd7ce0e1c39b37b50ddc7d21f344a6f11c81703be0c7ff0c095310bceb7a", "average_hash": "072081038107c40ffc3efc3ff83f787f043fc007c0070477ac67e80f601af01f", "name": "Primal Fusion", "phash": "99743ef77e7b9cea6e8da78c6666225f2db064c40b522cbc1b4d8382dbc0b105", "mana": 1, "whash": "072083078107c40ffc3ff83ff83f7a7f043fe007c0270477ee67e80f601af81f", "card_id": "OG_023"}, {"dhash": "8a1f3dfe6100c742dc1e3cca7880d000e1e18143670e0d175a1ca43e1b70fccf", "average_hash": "0720c187c33fe07ff07ff27ffc7df87fd83c782478503030104000730070e01f", "name": "Forked Lightning", "phash": "a9b23bf75efd6ef67ecf8639668086d48c64a43c410a2fd429a16ec45b520e85", "mana": 1, "whash": "0730c387c13fe17ff07ff27ffc7ff87f5839782c78783860105800730070e01f", "card_id": "EX1_251"}, {"dhash": "03808d1ee4ed9159247b4ff09cc1bf0dcb13b60c7024649b8b771a481891f02f", "average_hash": "ffff8bf183f0c418c00de00ff107f19fe9eff967d0130406501260003100f103", "name": "Shadow Word: Horror", "phash": "71573d23ce219da0f19b63cdcb64431a49eb2a642b65573659e4f171139a52ea", "mana": 4, "whash": "ffff8bf181f0c418c00df80ff10ff1bff9eff9e7d2930486509a61003100f183", "card_id": "OG_100"}, {"dhash": "0880fd1be0ebe10702bc8d3917733fce7efcb88071008a01344fa2bb4f6efefa", "average_hash": "071803c07f803c003e003700330e630e7e0fbe073e07be198218026c147efc7f", "name": "Deadly Shot", "phash": "4f00f69435af1b0dd9447c00a7418f902bcd4c3cc3b36ce9fbd636f45b6097a6", "mana": 3, "whash": "071c03e07f803d147e003700330e630e7e0f3e077f07ff19c61c226e36fefc7f", "card_id": "EX1_617"}, {"dhash": "a8cd6c98d1200aedb0325325a442438f9c6ab7dd6fbb9104bd314b63f48cf93d", "average_hash": "5f93cfb70bb30ca0cc46c4404340c14007401168336c394c186d1c8819987b3d", "name": "Micro Machine", "phash": "c7cf9d248f85cfd13eac128e63f00cd2060ae661ac598e85ef24b9c37f227336", "mana": 2, "whash": "5ff3cfb70fb31ff0c4c645c0e7c2c3c0074013e833e839c818e91d885db87b3d", "card_id": "GVG_103"}, {"dhash": "1692007205a01e80bd224819bcf670bd654a969429200f5d3cbae8549388653f", "average_hash": "fba7f9a7fc634c6804b8073803f8c7f8f3b43121710070407400640165000b3e", "name": "Avian Watcher", "phash": "e7fc8157d34ff3b0fb3776b86f981a9839b6364c0998128629a626cd099216d3", "mana": 5, "whash": "ffa7fbe7fde70c6a04e8077807f8c778f3743321770074c07440744165810b3f", "card_id": "KAR_037"}, {"dhash": "caffedff70308fe01e003d4870f0e610dda13343678eef0d3f093c100220802f", "average_hash": "0700c107e31fe033f077f075f070f07c786c780e584ff85ff80ff803f801fc03", "name": "Explosive Trap", "phash": "a192bcf47a7d5eeb66d2b6b88ce006f689703664866566044bd37b835b13db0a", "mana": 2, "whash": "0700c107e11fe037f07ff075f071f87c786c780e784ff81ff80ff803f801fc03", "card_id": "EX1_610"}, {"dhash": "48d8ec31916f0a9b35664e8ebd187d7191c2206547ca88963935776a9c966139", "average_hash": "37130b831b861804980f8819d83d383fb80ff80ce80cc80c8809d801680c7108", "name": "Bilefin Tidehunter", "phash": "4b4bdfa4b6b45f1eefa8b1e1330812298870618632c37e08bf01ef49eea2df66", "mana": 2, "whash": "3f171ba71ba658a5982f8c3bfc7d3c7fbc5fbc0eec0e880c8829d801e90c710d", "card_id": "OG_156"}, {"dhash": "0afd4dd08283090e7f593c3cd973b2a7781fc4a201c54808b13f46e0cc0f19bf", "average_hash": "f703f11f195a3818783c683e703ef03e603f04bf0810080c18091801980f901f", "name": "N'Zoth's First Mate", "phash": "e9e5636c9aeb1e1ffb74b3c60a9c165c6190f2a08ce54f8986249d86cca4e7e8", "mana": 1, "whash": "f71bfb1f1bdeb838783c7c3e703ef1bf60bf0cbf0c18180c182f1801980f901f", "card_id": "OG_312"}, {"dhash": "08f00cf0d1c50bc336064f9eba3c74e9e0b2814516894313df37bc6e4c9cf13c", "average_hash": "f70ff11e290e181f182f183e3c3f7c3e7c321c301c301830383cb81cb81cf01c", "name": "Explosive Sheep", "phash": "cb5759ca7a21b6e573f869b896d211e263c899c03b488e613e067e12fb96de12", "mana": 2, "whash": "f70ff91eab1e181f182f183f7c3f7c3e7c3e5c301c303830383cf81cb81cf01c", "card_id": "GVG_076"}, {"dhash": "6cf84ce381868b9d36364c4cb8907189f2f2e585cf4b9e97bc37106c26819d26", "average_hash": "d71f5b9f7b9c783c383c9838f838f8307c383c387c38ec3cf81fc013c0002100", "name": "Squirming Tentacle", "phash": "63d76f815a824bc8b6611b622f5398258c9c3678a9ed36663b6d6373ad234aca", "mana": 3, "whash": "f71f5b9f7b9c783cb83cbc38fc38bc707c387c387c38fc3cf83fc013c0002100", "card_id": "OG_327"}, {"dhash": "dc1f3cfe60fc9d7d26ff2f0ad904baf160c1c762b68d681bd827375738a2416e", "average_hash": "0780c300e11ac01fd837f83ffc3f7c3e783efc3fdc1f980fc80ed807f007c003", "name": "Coghammer", "phash": "e3373f007e0a1eb1e6401736138198f04cc0460a28b8f3e98fe5be25ffbc7e3e", "mana": 3, "whash": "0780c300e11ae00ff837f83ff83e7c3e7c3efc3fdc1f981fc80fc80ff007c003", "card_id": "GVG_059"}, {"dhash": "4ce48d89621b4526bc786bc19d96610fc67a1c85702a83d516b7bc7669899333", "average_hash": "771f337d23d0107114e080a9803f803b8033940b300f001c0018f01ef0139103", "name": "Mekgineer Thermaplugg", "phash": "a9793f9a67296f289b51690edad843669c8df3301bcb36a4770892f15b2e9624", "mana": 9, "whash": "7f1f3f7d23f935793461c4f9817f817f8933950b310f015d0438f09ef1929103", "card_id": "GVG_116"}, {"dhash": "caf0accf11a20b06b67c4cc9a7d24d259b4a6495d92af245ecb939675296253d", "average_hash": "f71ff39efb9bf83f7030f011b00cb00fb00fb00370006008600ee00fe00f4107", "name": "Jeeves", "phash": "f3f9bf257f06dfa99bf369f226a491a008900990c8601b99bb0ddec4be01ee4e", "mana": 4, "whash": "f71ffbbffbbff83f7030f011f04cf04fb04fb003f001600c700ee00fe10f610f", "card_id": "GVG_094"}, {"dhash": "7267c9ab0694fd2877d6aa20544da8b3534fa08c47c19f9a7737c7a68d491a3f", "average_hash": "f148f95bf8ddf0d9dab9dbbb48fe58f831f811f801f808bb809b90991218103e", "name": "Swashburglar", "phash": "2d49f3ce9abde63fee723d4f1f56b67130b966c260fa7230639806938c811642", "mana": 1, "whash": "f948b97bf85df0b958b9dfbb46fc58f830f811f801f808bb80bb9099121b103e", "card_id": "KAR_069"}, {"dhash": "c8e10ccf111b0396373c4b70acec52b1a166728d6f2aded405373b6e7898813c", "average_hash": "f71c7b9c3b99383c781cfc0cec14dc16b8003808000cc00c683c383c701c211c", "name": "Raging Worgen", "phash": "639be741ed987ec8df2429a658d2e7d1382619f28e35333a29e34ccf9929b218", "mana": 3, "whash": "ff1c7fbc7bb938bd783cfc1cfc56dc56b8503848000cc40d6c3c783cf11c291c", "card_id": "EX1_412"}, {"dhash": "80c30f672408397cda10770fc82cb2f150b3876413cd66fa8c9e0f617adcf4ef", "average_hash": "fffffbfff1ddf811f252e4983cd038f078f87838783068302ef8348c38b0f8f8", "name": "Shadowform", "phash": "65ff39a05901cd16cd5f4e7899446f48b38482350bf922fd59e1331edbc2f18a", "mana": 3, "whash": "ffffeafff1ddf055f250e4987cd018f078f85838782028302ef0348838b078f8", "card_id": "EX1_625"}, {"dhash": "68e0cd8712bc09687c132eccf13083410ef3180a23c44c08f138def1bc075bbc", "average_hash": "f71f3b7cfbd1f8dbdc61dc303cf03dc00d823f800200180008c010c252025418", "name": "Orgrimmar Aspirant", "phash": "c7c7e30979e17e386f188e122cc2186618dbd36061b93be733c77386f3981c9a", "mana": 3, "whash": "f71f3b7cfbf9f9fbdc61def03ef13dc01f803f820f801c8018c418c252825c1c", "card_id": "AT_066"}, {"dhash": "08daad2c12478be07e04bf0cf0f980330f4f06da38b4694896b8adf16f02d7b4", "average_hash": "f73dbf75fbeffdfe7cff3eff3efc5bc093c103c103c002c000c000c003800620", "name": "Armorsmith", "phash": "ffb0eb177b1edea236412da9298009a82294728636487f49ed49df44df26df82", "mana": 2, "whash": "ff3fbf75fbeffdff7cffbeff3ef45bc013c103c103c000c000c004c007800620", "card_id": "EX1_402"}, {"dhash": "0afb0cf6d1ad8bc3370f4f38beb070ede3d2cfc59d0932137d36fb6cec899937", "average_hash": "f703f116c11fe81f383f3c3c1c3c3438bc30fc317c3178307811d811f811f80d", "name": "Gnomish Inventor", "phash": "a3b55d727ba3b6ac76720ce1c7d0938d607830d8398e1626395adcc91dc33b1b", "mana": 4, "whash": "f717f11ec11fe81f383f3c3c3c3c343cfc307c317c317831f811d811f811f80d", "card_id": "CS2_147"}, {"dhash": "f87f7dfbe1108f7f19c283967e00d400483cb3e3cc8653028e08b41582386c1f", "average_hash": "0700c303f13f3c7bfc4bfa2ffc7fdc37482e8e4d1e3f3e003c003c042800f803", "name": "Beneath the Grounds", "phash": "830ab9c87e6d6e713c16d6b01b83164a067484b965b2ee54ead432e57b3bb5c6", "mana": 3, "whash": "0700c30bc13fbc7bfc6bfa0ffc7ffc37482e9e4d3e2f3e097c0038043802f807", "card_id": "AT_035"}, {"dhash": "98cd2cff018c0b30b7485617bf2874f9a0b25165670adf1426b5586a3394e739", "average_hash": "e7116390f199f039a03d001f081f183f7c1a7c0cfc0c78083000200000000118", "name": "Kvaldir Raider", "phash": "63dd39a53e2467961e869c800f8a2448a1c10db27b823365bb6ddddb6dcb6e39", "mana": 5, "whash": "ef13efb1fbbff4bfa43f041f1d5fd97f7d5afc4cfd0c7c487000200021000118", "card_id": "AT_119"}, {"dhash": "eacfc5f2029ec520faff3078078d7c1a7766c3c887171bcb073edee4344be13c", "average_hash": "17019563c15860794079a481e001f83018381a3c78b8603838b07810781c701e", "name": "Cho'gall", "phash": "6787bf695efcafe5cb5296385a508f952c8d698c63a84b31561146027ec65467", "mana": 7, "whash": "1f13db63d3dc607d4479f681e041fa709838bbbcf8b8f03878f07830781e781e", "card_id": "OG_121"}, {"dhash": "b65b69bd8462db05b67a4ce5b8027bedf15ac2950d23129406b96954b3806d3f", "average_hash": "4bf649f74cf744f774fcd6f8dd782d78e77831783d780c480400000001004b3d", "name": "Medivh, the Guardian", "phash": "adbeb1a4da86df3bff67566aefc66630690ecc6ca9a01a6aa94c46d109137210", "mana": 8, "whash": "4ff659f74cf744ff74fcdff89f782f78eb783d783d780c480440004001004b3d", "card_id": "KAR_097"}, {"dhash": "389d6d3ed2ecbc917966e208cdd39acb20b6b36c67d1cd88893753e6d6089b19", "average_hash": "07400be009c0d8c408c033c013c003d04ba02fac1fac1fce4cfc08f98319831d", "name": "Flamewaker", "phash": "5d0f96a2a9e9cf56be2024e95f70269d00c6bb2541d6fae5aaf94add372002d3", "mana": 3, "whash": "0f500be049c158c50cc1b7c017c00fd04fe46facbfac9fee5cfe09f98b19971d", "card_id": "BRM_002"}, {"dhash": "4cdc8cf901e44b00b7ac5331a6424887b48a7b15e42a82450dbafe60dd87b33d", "average_hash": "f797fbbfe39fc03eb01e701f704ee04ee00e700420040010001010003105b11d", "name": "Ancient Harbinger", "phash": "7bf07b449b111684fb0196c08b9984e02c6612e679e67bce3ecbec2db629ee34", "mana": 6, "whash": "ff97ffbffbbfd4bfb43e701f714ef14e714ef1442144015000101081318db11d", "card_id": "OG_290"}, {"dhash": "0080cd9fe0310ee0b89f613482c004193b7ef6fc08eb69e097c6eb891737fe0c", "average_hash": "ffff03f0d3f13cf100e20cc200040004010109010f600f00afa17f233f7eff0f", "name": "Mirror Entity", "phash": "1f54f0ab2f003f115a575f944b6a9d0851988d4e23d74b359bd43be5d9e174a1", "mana": 3, "whash": "ffff0ff0dbf13df102e20cc21184010401050d010f620f40efe17fa33ffeff8f", "card_id": "EX1_294"}, {"dhash": "fabfcc6020e794ce279c2f12db24bc59e4a48c7bd8e7739f052f8a583e80316e", "average_hash": "8780c30f730c300e782f783f280f043f003b007b003f003f083f001eb00f300e", "name": "Glaivezooka", "phash": "c9dc5f7276733ce9b6650b9c93d8846c85b408b8466a66392da4a7d4de6a5e21", "mana": 2, "whash": "8781e78f730c790e782f7c3f2c1f4c3f003f807b003f283f283f003eb81ff90f", "card_id": "GVG_043"}, {"dhash": "3ad8e43041e3cad6b4895939b7f27e87f91e93a56c4b97c768b6d26c8990373f", "average_hash": "0fb60fa60ba604e604660267e366f3fff37ead6d0d6d81f2c03200b20580033c", "name": "Murloc Raider", "phash": "fdf6dfb69ea58fb76bf2b3e41e090cf0216096480f13de60c9c1d312f9206c48", "mana": 1, "whash": "0ff60fa60ba607e604e6054763eef1eff37eaded0d6d85f2c07205b205a0033c", "card_id": "CS2_168"}, {"dhash": "eaf004e1018ecb1f37714eceb81c70f9e072c7c5e00b5f1736374c6c9080713f", "average_hash": "b70f711e511e001cf03d981d9c3f1c3cdc383c3c3c3c381db01f201960006000", "name": "Grimscale Oracle", "phash": "69ff7de27ac84ff396e693795bda328830b04cc099213ac36e869384a724e762", "mana": 1, "whash": "f71f799f719e201df83dbc3fbc3f1c36fc383c3c3c3c383db01f301970006008", "card_id": "EX1_508"}, {"dhash": "c082ed7f408c831847049f013cfe63a4f9cc9090022309c2f7048c4c0899fb37", "average_hash": "77bf63e1f98f8c1ece0c46380638377cb73f773e172217213e3018300030f63c", "name": "Ice Barrier", "phash": "2f70590bd28fd7717dc0a4f83e0008070b7a9c2f4be52c95d3e3432a5be3363f", "mana": 3, "whash": "f7bfe3e1f98fcc1eca1c463c0638377c277f773e372207313c3018300030763e", "card_id": "EX1_289"}, {"dhash": "c8f40dbaa2654c86f90ce6990cf3d9663acd658ec7380d439a382df35a46a71d", "average_hash": "f71ffb7f537fb05e30cc32d8335832d812c80398e39c3330b0c428c420040607", "name": "Addled Grizzly", "phash": "e530ebe19b1e3b263f89e6386790401802f07ab21ea7aee1bbad10cd8d0dbedc", "mana": 3, "whash": "f71fff7f937fb0fe70fc32d833d832d91298029ce39cb33830c428c4208c260f", "card_id": "OG_313"}, {"dhash": "0c85ed0f02e07c96796ce310cec99b9b3756da2cf1b0c34900b301e63f086710", "average_hash": "970fc363f1c1f0c030c13b845bd8c99acbbe83bf039f639e00de00da43106200", "name": "Faceless Summoner", "phash": "79299353b2d819643f474ce1cb10883920706acd5c3c6386bbffe3f1b7617ac4", "mana": 6, "whash": "e71fcbe3f1c1f0c130c139c45bdcd99fcbbf83bf039f619e00de00db43146300", "card_id": "OG_207"}, {"dhash": "fa9ffd7f63805e4e9d49329f64004438ba704ce101c3f94523880430c867fbdf", "average_hash": "0700e107fb07fc63fe687e693e611e001e101e031e201e301e0006000248605f", "name": "Slam", "phash": "87aaf35e787f9ef55c856ea0a5ca346825e504138378ad851ed65b42bb257f00", "mana": 2, "whash": "0700e307ff2ffcf3feeb7e6d7e7b1e441e501e431e601e313e400600024cf47f", "card_id": "EX1_391"}, {"dhash": "8a8ffd7fa0f8d3f4476f3fcd78026138ca4186811f2321e74688f9198370fec7", "average_hash": "070003009b07cc1fee1ffe3ffe739c61bc703e70d67006702e3006680660fc27", "name": "Revenge", "phash": "aff07bdd7eeefeac86d22ee82b800c1803f41cf6438de60073431e075b01ff30", "mana": 2, "whash": "070003409b07ed1ffe1ffe3ffe739e61be703e7096712270267006680660fe6f", "card_id": "BRM_015"}, {"dhash": "18f02de062c44580bf006e4f98127047409a85340aeb24c659b7466ee999b735", "average_hash": "e71fe37f03df807e00fe00be003e003e0038403100308038803800180015c101", "name": "Chromaggus", "phash": "69fc49225b0bb64af35847601b9608f453529634abe75ef65ee5dd91c608d698", "mana": 8, "whash": "ef1fef7f83ff847f847f00fe017e217f013e41394139c178803800ba819fe10f", "card_id": "BRM_031"}, {"dhash": "e23d894496b139dd7306e70cce4d1cbb3c1671ac66c4c919b67340e2ad0cdb7f", "average_hash": "a7e3f1cef8c338da9b9f9b9e0b8c098e038e038e838db08db0cd11c91208c67f", "name": "Animated Armor", "phash": "5d63d72eb3e8f2793e6647db3db3dec581b08e49848d06119769a2d908925f12", "mana": 4, "whash": "95ebf1cef8c338da9b9f9b9e0b8e098e038e038c838db88db8cd1149130ac77f", "card_id": "LOE_119"}, {"dhash": "00007de1e1ffff0b5e1f943934e660dce3e1ffcbe6320d067a9cd333fc3c02e0", "average_hash": "0700010001000016025a3a7076387e383e5cfa7ffe3ffe7ffe7ffe7ffe7ffe7f", "name": "Elemental Destruction", "phash": "2b00fc359ef75bd766913640ad40992b0cac2bd5e1f623932ed5b1c20e4bd9ab", "mana": 3, "whash": "07000000000000120250227022386e383e5cfa7efe3ffe7ffe7ffe1ffe7ffe7f", "card_id": "AT_051"}, {"dhash": "6adb8d2612d90ca2794c93180c295ac28166378d78168d08c231a4e37943b3be", "average_hash": "c706f367f34ff04ff04fbb8f321f781ef802b909b987b88010c0004000000400", "name": "Xaril, Poisoned Mind", "phash": "d33afb9b3ef6f7672e442c648f69c26c04f4408c22e912015fc67761b621de36", "mana": 4, "whash": "ef16fb6ffbcff04ff84fff9ff25f7a5efa02b98bf987b88010c0004022000400", "card_id": "OG_080"}, {"dhash": "c8f78ccb11100b4436804d08b8347ac9e502c905836904d708b6236cc7980539", "average_hash": "f71ff19ffb1f78377837b8320438203c003e003e403e403c003c0018001f010e", "name": "Ship's Cannon", "phash": "69f5f360db8979c24e6a249b9eb482a4c89826cc3e4c1b676f027b123f13fe8c", "mana": 2, "whash": "f71ff99ffbbf7837fc37bc376c38347c083e003e403ec03dc03c003c001f011f", "card_id": "GVG_075"}, {"dhash": "a8f94de552cb8d967b682cdc4234f9d22527cfe8dc85000b733ee6e68c4c5939", "average_hash": "771e7b1c7b1d381d383d8831a00d78bff817d813989b08301890181838181018", "name": "Usher of Souls", "phash": "a3817b9e6ee09be67b6416601a61016404e663599bf8be069f679f65ba9861b9", "mana": 5, "whash": "f71e7b1c7b5df87d383d8c31b01d5cbff81fda93d89b1c3118b1183838181818", "card_id": "OG_302"}, {"dhash": "0af7edc692090d13785ce6a40fc9f4f25985e70e8058aa815f3a26f6fc4d5b1f", "average_hash": "f71ff35dfb5db81c38845882db3cd824f8a8f8b8291808180010185080098008", "name": "Mire Keeper", "phash": "63a3f3c378797d3c3bd21ac33e5cd278425282a123e633c9c6ac92a74e83eb31", "mana": 4, "whash": "f71ffb5ffb5df87c3cc4dc92df1df8adfab8fab829980818189818508009c00c", "card_id": "OG_202"}, {"dhash": "0a80fdefe19c8f1d1e723ccd6103c33886e30d3c1e132c66e08d990c2112f32f", "average_hash": "073801c047b84c780cf8d0f990fb80fb20f39033c802c002e402e6226200f00f", "name": "Power Overwhelming", "phash": "a5ecadbf5ed45a57d9993de88963a6fc098e271089c4e6913b443a786314d626", "mana": 1, "whash": "073c07e0d3f85df86cf8d0f9b0fba4fb38f3b07bca16c807e602e6227200f20f", "card_id": "EX1_316"}, {"dhash": "1a802d27d2c88b137eaea1946e7d98d060a6476f9fc4660999327ae5c842033e", "average_hash": "e71e0364e301b8bd1cf01ac01ad87bb879387b984f97dc86d884d8a5f8013600", "name": "Houndmaster", "phash": "6787ff3bfb727b966b4c0be28f60a1f001f4290558c95a4033927318b3a5bcad", "mana": 4, "whash": "e71e0b64e345b8b91cf01ec01ad87bf879387b987f975c96f880d8b1fa01360c", "card_id": "DS1_070"}, {"dhash": "26f448cac50c9f413ff25748ac935c07b14c649ceb22f85d80bb015403888f3f", "average_hash": "933f191f983f883f881f841fe01fe01fa01f001fe00fe00f800f00100000013a", "name": "Arcanosmith", "phash": "79398f67a647f3d0fe197b3efb1a96c5c93432368891927829328e5961447686", "mana": 4, "whash": "dbbf99bfd83f9c7fcc7f8c1fe05fe05fa01fa01fe00fe00fa01f80140100013f", "card_id": "KAR_710"}, {"dhash": "aaf94df6924109897e032d2eda8cb45963b2c46f98c5248b593723e4c4480f39", "average_hash": "f70fd97f9b1f583f1cff1e3f1cbc19bd0c3d0dbb0dba08300898081800100608", "name": "King's Elekk", "phash": "0d87dbf2fae0ff2c3fd824a24e6001d820986e006761ba11ff21df01bf837fe3", "mana": 2, "whash": "f71fdb5f9b5f583f9cff1e3f5cbc1dbd5d3d0dbd0fba08b008b8089800100008", "card_id": "AT_058"}, {"dhash": "2803fdfc72c76f3fdd46929c68f137384f6cbcdb683431e3e3c4831f551efa20", "average_hash": "07000300011c203812691b610bd18373882786431601072317261f3e063ef63f", "name": "Backstab", "phash": "9fc0f42f6ff01a74b11f0f24b6c14942b3542c98c2ae82b5837bad547f895a5b", "mana": 0, "whash": "07000300011c21fc36691ef35ad1a673da279de797a347231f2e1f3e173ef63f", "card_id": "CS2_072"}, {"dhash": "0cf00ce001cd4b32b43d5069a6c24d05b30e621d1aaab74464bb0f6d339acd36", "average_hash": "ffbfffbf7fbc3dfc34e8df40efc0e7c02340035003c011c0045205b005b40334", "name": "Ironforge Rifleman", "phash": "7ff77b436995dbc07ba613869b62a1b1c94863d829cd72c3bec2169ee6901224", "mana": 3, "whash": "ffffffbf7fbc3ffcbce0ddc0efc0e7c023c003d003c015c004d205b005b40334", "card_id": "CS2_141"}, {"dhash": "0ae00dc9922085133c8e633996f26805d05284341bcb63969f347e689887313f", "average_hash": "f71f3b7f5bdb08703c603c8c300c342814000020082018203c203800380f390f", "name": "Gormok the Impaler", "phash": "c75b7debfb0d3ec67b786378595412f81236388463805b9637a5b314330c590f", "mana": 4, "whash": "ff1fff7f5bfb1c783c603cec756e756d152805220d203d603c203881390f390f", "card_id": "AT_122"}, {"dhash": "ea87fd7ef088c17b8fc71931776ecf589c27708f307e24e4c98f74bd973c3f20", "average_hash": "07000300830c88088c0016220664006f986f887f007f007e027e807f827ffe3f", "name": "Rampage", "phash": "a9e2567f2fd5f5aedb922cd8a664892b0a671c9806c98b07c3d8af0437233fa8", "mana": 2, "whash": "070003009b0f8d088e021e26067e407f987f88ff607f427e867f807fde7ffe3f", "card_id": "CS2_104"}, {"dhash": "cade8c3891f30acf37964ffcbfb073c1fe12ccc5d889e3978f373f6c58986138", "average_hash": "670131033101180678023828b82ff837fc3fb83f183f203e381c381c781ef80e", "name": "C'Thun's Chosen", "phash": "ebb5967076a23dbdb3d865931ec803a629e692840e436e72b681ce89f6383bc3", "mana": 4, "whash": "67017103b1817806f802382ab82ff83ffc3fb83f183f343e383cf81cf81ff81e", "card_id": "OG_283"}, {"dhash": "6af0cdc3128705593b026c0c98786fa1d14280849b0b27479cb0f873638cc53f", "average_hash": "f71f3b7ffb5ef87cf83e1cbc1c3abc3c9c3c3030301020202000600080008117", "name": "Old Murk-Eye", "phash": "e9e7efa37b8af6489f5996d09c94323c3132324636cc1625792313a1db0633cf", "mana": 4, "whash": "ff1f3f7ffbfef87efc7e1cbc1c7abc7ebc3e3832103220206010f0048100011f", "card_id": "EX1_062"}, {"dhash": "9add2c2bd1d00ae1b5bc516baa925627acfe185d612ad944b6b94c731384673e", "average_hash": "6fb16fa7eba73deb1ccec7e7c7d583f60356634633c431cd044121c805804320", "name": "Arcane Nullifier X-21", "phash": "ff9ebb8667ccf7983fa439f10b928ca88c61a64933c91b32692273916e0366e2", "mana": 4, "whash": "6ff56fa7efa73feb1ceec5f7c7c583d6035623c63bc421cd04c1258805804320", "card_id": "GVG_091"}, {"dhash": "eafb0de1928e051b7b602c095c34f55062e7c1988365028b273e1ce47048e33e", "average_hash": "f70ff11f711cf81cf81d901d303c083c083e003e00bc003838bc601c4000c00e", "name": "Flame Imp", "phash": "69c5f3f373eefddc9ed41e3c3a5113c209490644eca09e815c984ec16e46e75c", "mana": 1, "whash": "f71ffb1f7b5cf87cf83dd03dbc3c18be083e10be00be083838be781c5006c01e", "card_id": "EX1_319"}, {"dhash": "0ae84dd392bc0d137f1eecf81824f3704085d36e31996402d338a4f0fb43bf17", "average_hash": "f71f7b5e7b5898393cbc7cb83f393c301cb008b0092000000080004000060007", "name": "Savage Combatant", "phash": "a505e3695b68fe42dbd473783954005319d39bd339d93aa036cb0583ef99ce58", "mana": 4, "whash": "f71f7f7e7b7998f9bcfc7ef87ff97efc3eb00eb00ba20b2000c000c000860607", "card_id": "AT_039"}, {"dhash": "e8dd2cf341a0cb4336bc4c55b8ea73c5e01ac3351eca7694cf38cc73bc81793f", "average_hash": "071263bfe1bfa033383a00301030407860388430c4006806780778037803f906", "name": "Madder Bomber", "phash": "e3443d31d34c5e1e2fc1c6c80ee28629c9a216e3acb1beb87b3ab6dbed034623", "mana": 5, "whash": "0f96efbfe3bfe4bf3c3a343031700178617c8570c5446d467c0778837903f90e", "card_id": "GVG_090"}, {"dhash": "6ac2ccfc912b0a9e35445bacb7184fb190627c85f1ca87151d3173726285c53e", "average_hash": "97969fb1bbb9f804fc047c037c433c4a9803180600065800c800c000c100c110", "name": "Sunfury Protector", "phash": "c3e7cfb13f97dce59ee113e64ec4022263a243706b407e617c011e0daf89bb93", "mana": 2, "whash": "9f9fdfb3bbbdfda4fc277c07fd473d4e9d431d4e094e4d44c800e082c180c110", "card_id": "EX1_058"}, {"dhash": "0080dd1ff0e3833e3deecadcbf8f33396362c644c89b30e7b7ce4d1fbb1cfeaf", "average_hash": "773e03e00380201c002b9037881f8c1f981bb80198110b102f7c3e7c3638fc3f", "name": "Innervate", "phash": "bf19fe266e663e811b792f80894403180b426c244b98baa57bf61bd9db4abc6f", "mana": 0, "whash": "777f07e003802134002b903fe83fec1f9c1bbc1598110b502f7d3e7c3e3afcbf", "card_id": "EX1_169"}, {"dhash": "8283ed3be0dc01300f80774a8a1e308c410e9f1026e109f278b4b1687b58e697", "average_hash": "fffff3e1fb80bc09de0f0c9e841f02200020022082208030c4b0e4b0e7b0f6df", "name": "Cone of Cold", "phash": "3756db4b73adb454fde83db409b30e1683494bf1495863b5469c76a4d3c61322", "mana": 4, "whash": "fffffbe1fb80fd09de0f0c9f869f42684020032082210030c2b0e6b8f7f0ffdf", "card_id": "EX1_275"}, {"dhash": "9e3f686ee1dcd3b0367b2df2d2c4b90b6700ae32c02448db11a3aa5566a68d2c", "average_hash": "0780c382c309c03dc03fe83fe01fe83ff03be00bc41f800d8005c004c0002000", "name": "Death's Bite", "phash": "39973fc4de425e60b76d87b44c9ca6fe84b120b240f9345b8db1292d5ae85e33", "mana": 4, "whash": "87c0c78ac7cbc17dc83fec3fec1fe83ffabbf00fe41fc00fc40fd405c1002908", "card_id": "FP1_021"}, {"dhash": "48f40cd391a50b8336144f2ebc8c7039e042d0a5174938964130876e7e99f939", "average_hash": "f71ff39f791ff83f383e1c3fdc3e1c1c1c041c20ac200c200c000000f800f919", "name": "Stormpike Commando", "phash": "833f69c073819e3c73c11c2e3312ccf003993116bcf19bb6fee9330bcca4e3f2", "mana": 5, "whash": "f71ffb9ffb9ff83f7c3e9c3ffc3e3c1c3c043c30bc200c330c000800f801f919", "card_id": "CS2_150"}, {"dhash": "0af92ce611c80b92b7204d59baec6c39e8925165a38a061558328c689b811533", "average_hash": "f71ffb9ffb9df83df03c1c3c0c3ec83f083e481e481470000000000000000100", "name": "Twilight Summoner", "phash": "6b377b33772173ec9fc4dee4cec480c800b013a8182066313f9333c3f389ffb9", "mana": 4, "whash": "ff1fffbffbbdfcbdfc3c3c3c3d7ecd7f8d7e495e595478540000000001000100", "card_id": "OG_272"}, {"dhash": "f67f88e001594ea23d065b193c66e39c05617cc66008c310023f363c1cf0fc67", "average_hash": "e1c7fb3fbc3dbc3f9c7f1c3f0e7c46729c98380c380c380c14f810780070f07f", "name": "Cat Trick", "phash": "89fb9b56f99672793b0e3caccbdb94e5136cb49ce5b8065b84a114bcaec904c0", "mana": 2, "whash": "c5c7f93fb83fbc3f9c7f9c3f0e7c46729e9a380c380c380c10f810780070f87f", "card_id": "KAR_004"}, {"dhash": "08c6cc0d011b4a34b46851d1a6a2495787ae2c5d71aac244a6b9ed725b85973c", "average_hash": "ffffbffdbff975f104f147e547c903c903c103c003c201c004c005c025a00f21", "name": "Imp Master", "phash": "ffd93b91af11df617f0939c81b8092a421c8636463c8f3f6db96662cb8885a9a", "mana": 3, "whash": "fffffffdfffd77f164f547f147c943c903c003c203c201c004c005c025a02f69", "card_id": "EX1_597"}, {"dhash": "4cfc8cb401710b8437604f47ba9c7cf1e882f105a7095e133c36726e808c013b", "average_hash": "e30ff11fe117f017f039a03d983c383e7c3c783c783c783df81ef81ff81ff00b", "name": "Saboteur", "phash": "69959d34da616792cf620d633cd8224c866c93d11af61b9efc386c7c678b22cb", "mana": 3, "whash": "e30ff007e117f007f019a03dd83c383e783c783c783c783df81ef81ff01bf00b", "card_id": "AT_086"}, {"dhash": "bacf2d3f726c48f0d181a3040719de033827c4ecbc91f10b013782e4244c69b0", "average_hash": "8701c303e1c7e087f00fba8fda07f80ff89dd995b93718bf089e081e001c0000", "name": "Fireguard Destroyer", "phash": "db07b3383eaeafc7ae898cc38ed16cd920f1823076e24d11c7386524dbea3aa6", "mana": 4, "whash": "8713e143e147e087f08fb88ffa87f88ff99ff997b9b71cbf08be085e081c0000", "card_id": "BRM_012"}, {"dhash": "f89fec7920ff95ff23f62dc2de10b9797210e762d2cfbf1f072f1c5f709ec17a", "average_hash": "078083003300f001f807fc079c310c390c3ccc380c38fc38783c783cf81ef01f", "name": "Doomhammer", "phash": "237f9691fae02c5c1c10c791932563b60cc90c2986a9e3704ebc6e0d9fcbff73", "mana": 5, "whash": "0780e3807300f90bfc0ffc079c3b0c399c3ccc3acc39dc3a7c3c783cf81ef91f", "card_id": "EX1_567"}, {"dhash": "2a8e251802227c3ef35ca6fcccb9b4634387d60c30319dc97f3242e6008ec318", "average_hash": "c741e1e3e1c360c030c1fbd8fbf9f3fc7ba073b013a001d1d0c150c003080708", "name": "Vilefin Inquisitor", "phash": "f781b3fe1eea9dfdef5233ab1e61c2d400d06c2103a666245f926b619e2dd250", "mana": 1, "whash": "c753ebe3f3c370d030e1fbf8fbf9fbfc7ba073b233a00191d0c170c03308070c", "card_id": "OG_006"}, {"dhash": "0afd856002d90d073b6266c8981879717ac2f304c74b2a9794360b69be83f13d", "average_hash": "f707f16ff1cff85ff85df899b8199839381c781c6016a8062800080000000109", "name": "Shifter Zerus", "phash": "e3bbfbe9fb3e7b367f4c96343b8446c409110e043a3156317741661b7f026698", "mana": 1, "whash": "ef1ffb7ffbdff85ff83df8bbbc39983b7c1c781c681ee806880408000800c10d", "card_id": "OG_123"}, {"dhash": "0280e53fe0c16338dc6092fb31e57302cc3bdee7af9f3f3478ece41e393900e2", "average_hash": "071801c00100003c82608273e233e0734031007fe07ff07ff03ff43ffe7ff47f", "name": "Arcane Missiles", "phash": "b1fa5e8f47f3d0ff3e9cf3048d2a0cf223caa3184a706bd6321d9b605ae031aa", "mana": 1, "whash": "071c01e00180003c826c8273e26be2734031007fe87ff07ff03ff43ffe7ff67f", "card_id": "EX1_277"}, {"dhash": "fcb1cd4382be84c1394f720496a0603f42f29ee43f0b7396c7362e6d00801121", "average_hash": "070e1b6efb40f8431c4b7881182000301c303c209c3760377837b81f80010101", "name": "Cairne Bloodhoof", "phash": "87d75f4a5b29cd943c50c9805e1e032b78ac1827c3b672e6b3a576784e8b8797", "mana": 6, "whash": "0f1e1f6efbe9fc437c4f7ce13d6001703d303d209d3774777837b83f81038101", "card_id": "EX1_110"}, {"dhash": "f8cf2c3f61f6ca04b5797e93bfe67367e44e8a9d35ab734687b87a632d87593c", "average_hash": "07108780c183c00fc019e019603fe03ff03fb03fb0073007f005d0077006710e", "name": "Sludge Belcher", "phash": "f1f03c0c0ec797f1af3c06804e811032ccc849ec6dc877e6bbc9766dadec66d4", "mana": 5, "whash": "07108fa0c3a3c0afe03fe01de07fe07ff07ff03ff02f70077007f007f106791e", "card_id": "FP1_012"}, {"dhash": "08ff0dfe92c0a9017706ae0c1cd1a06d47bf8c4cb091cccb13b66fe4db40b7b3", "average_hash": "f71ffb5fd9dfc8bf18bc1abc0b3880a101a101b203b800bc00b8005800000000", "name": "Thunder Bluff Valiant", "phash": "2d054b8373aafb74b27066696f4086dc20903e32ffa659491db996971dcbaa9c", "mana": 5, "whash": "ff1fff5fdbffd9bf9cbf1ebc0fb886f105b309b203bc02bc00b8007802900404", "card_id": "AT_049"}, {"dhash": "8aff4dff7082e3348c6b19d890207f4fc088a30d5f9becac021bc4f01b61ff4f", "average_hash": "0700414fe1bfc07de47f24296013e01f6008f068c86e80efb0f32043ac60fe6f", "name": "Living Roots", "phash": "b96a7bdd9fb7fc6b6ef59abc074226782924342623d9461b4b843944d9685684", "mana": 1, "whash": "07084377c1bfc07de47fa43f6013e01f6008f068d86ae8efaa53b043ea62fe6f", "card_id": "AT_037"}, {"dhash": "bcf02d6792b84546ba296c969d6c76d9e822b12463cbc49703375f68bc807938", "average_hash": "670f637efb5c105fb057ccbb4c3f6c3e7c3e3c3e0c3c083c083c181818001900", "name": "Justicar Trueheart", "phash": "e98f7b20f6014f527bc41c4c3ed082c049a499648be517c7370f3639f739d3e3", "mana": 6, "whash": "ef1f737efbde907fb037ccbf6c3f2c3e7c3e3c3e0c3e0c3c083c181818001900", "card_id": "AT_132"}, {"dhash": "e67f78d3f3806638cd659acb3c9f7982a364c4f93f92ff04f00f60d1d7273f7e", "average_hash": "07c6031f407e007d8279803880381e1802180070f800f803e00fc007e007fe3f", "name": "Explorer's Hat", "phash": "29696c69d99ed6ae8ea5d2b5b6739c9a275bd29aa4de30ae04ee806eb0740865", "mana": 2, "whash": "07e6039f437f227da379a339a039de38c2180270fb03fe03e01fe00fe207fe3f", "card_id": "LOE_105"}, {"dhash": "ca81dd3f10f8c3e4bde17b12fe4ed81f87bdc23904f26ec4bd3972038b4cf1bf", "average_hash": "07180140f183e047c067806fc0fd80f800fe0a7c0e604e44cc49f00ff811fc1f", "name": "Claw", "phash": "39aafcfcd2d52ec6ed656dfa0e9699110dea2c2529ca5b80db7127c74b821229", "mana": 1, "whash": "071c6360f183e147c067c0ffc0ff80f882fe8e7c0f604e64cc49f20ff81bfc1f", "card_id": "CS2_005"}, {"dhash": "3cf96cc241cbcb1eb7004cf9b8c26707ca2a38dd70ea8144833904720b841d2c", "average_hash": "c71fcb9fc3bee03ef03f303cf031d02f800f000e000f10061000000000000100", "name": "Boulderfist Ogre", "phash": "69f369856f0f5b4e9ec693e1c6a000388198d998e6b8a6b72cc3dbf3b2a332b8", "mana": 6, "whash": "cf9fcfbfc3bee4bef4bffc7cf177d16fc36f014f014f91471c04048001800120", "card_id": "CS2_200"}, {"dhash": "28f62dfc42e2c90cfe37a46ac9c4b388261efb6fcec5188961378fe7fc4ce33d", "average_hash": "c70f834f811f001e00f8ea18e498c092c016e99ec999c81b009f101ef01ff21f", "name": "Tundra Rhino", "phash": "2925be22cb989fdb7b428661270164a498c437834d70daa4ffa71f457eeb98e1", "mana": 5, "whash": "c71f8b5f811f003ea0f8ee38ecb8c59ac016e99ecc99c81b80bff01ed01ff01f", "card_id": "DS1_178"}, {"dhash": "02cfcd7f20e9633c9e403f932d26760cdc18f1a06647c8888107024e0480fe1f", "average_hash": "671ec1ffe19ff03ff67ff63fe61fe60fcc0fcc0e840c0c0f080c080000000000", "name": "Eye for an Eye", "phash": "497ef1f8f6cd5cd776ec8e891f38a6664bc036386b744a8d73d05a041b861668", "mana": 1, "whash": "f79fc1fff1bff03ff67ff67fe63fe63fd41fcc1e8c0e0c0f080c080000000800", "card_id": "EX1_132"}, {"dhash": "e21f18f070c3d72623014c0893131f473c7c60e0fd0887217823cec7008cff38", "average_hash": "0df043cf020f241f241f00000000001e031e0e1fbc1fe01ec01fe01ff01ffd3f", "name": "Sacred Trial", "phash": "497db6bef5de5f0bb5a7f4af87f70dbc252ee13d87522c00811788056714c801", "mana": 1, "whash": "0ff063cf830fa51f251f01050106011f031f0e1fbe1fe31fc01fe01fe11ffdbf", "card_id": "LOE_027"}, {"dhash": "0280051fe4fef1eca7d94ff29ac3348d493ad3fd3cfc4070882703cf2600fb27", "average_hash": "ffff0df081c2c003c403e511e1b5c5f4c53581f9d9ff1f9f07be03fc0b20d707", "name": "Light of the Naaru", "phash": "3dd7b1fedf21cd23e5f66d9e43c929490be8c902713352955be01fe0518a5298", "mana": 1, "whash": "ffff0bf08182c003c001c515e1b5c5f4c5bd81f897ff0f9f07be03fc0300d787", "card_id": "GVG_012"}, {"dhash": "8a8ffd7fb0e3c1c737265b089e703498e1b0c0c5f7093ff38e8d021e8c31f90f", "average_hash": "0700c101f107fc0f7c0e3c0e3c3e5c1e5c1e3810381c900ef407b6011802fc0f", "name": "Cold Blood", "phash": "cbf67bbeb6c93c6d3c872ef14e7b06984946cc7423a8db844b49d20ca5a48b94", "mana": 1, "whash": "0700c301f107fd0f7c0f7c0f3c3e7c3e5c3e3c143c1eb42efe0fb6113803fc0f", "card_id": "CS2_073"}, {"dhash": "52a6e84ca1c95ebbb936564b2d955279a4b24f25b242e65d89bb5b662394c57f", "average_hash": "87ab83a99109904cd00ef05fa05b98590818c816c016401e401e400e6106c17f", "name": "Wobbling Runts", "phash": "792f9751775a9e84bf3d76cdd92642462dcb4236992122cc1d689a3133a68ced", "mana": 6, "whash": "cfab13a996a9944cd8def45fab5b9b591b588d56c056405e401f401ec107c3ff", "card_id": "LOE_089"}, {"dhash": "00800f1fe4bc31f0e4c7cfe498413b91463a07ceec0c98d130b6e1c8ceb1ec0f", "average_hash": "ffff0bf041c080018007891fd98bc183c1c38183c081c18bc021e1e179e1f997", "name": "Thoughtsteal", "phash": "b1574d82e71035f9cd4eaf544b01813259cd334043f77bcc79e45acc5b6a762a", "mana": 3, "whash": "ffff8ff0c1c0c1038007099f9babc1abc1c38183c381c18bc1a1e1e1fbe1f9df", "card_id": "EX1_339"}, {"dhash": "8aec0dd1028e0b187f3eaeec5831b3e07186030f1a3496c87cb9e1e20345673e", "average_hash": "f713fb737b5838b818f87af83eb83bbcf13ce3926380208120c1808000000600", "name": "Infested Wolf", "phash": "656daf2d9abaf38ebb749d9192c011630991d6d813746a506f8a568abea52da5", "mana": 4, "whash": "ff13ff737bf878b83cf87ef83ef87ffcf9fcf3926380608160c180c002800600", "card_id": "OG_216"}, {"dhash": "c8fdcd8093190c3f78e328544298cff01ac715083f044208983910e2be476336", "average_hash": "771bfb7fbb7838c07cc01ee19dc01ec07cc17fc1f3c1788038c000c020800600", "name": "Floating Watcher", "phash": "c7838703ed7073e8ebf01e423b947198424816e41eabeeccbdb14f657a2ef070", "mana": 5, "whash": "7f1bff7fbb703fe1fce11ee19fc05ec07ec17f81ff81fc8038c004c030802604", "card_id": "GVG_100"}, {"dhash": "cae78ccc11990b02341c58a1ab4846d5b8ae6d1d532a8ac576b8e9735986872c", "average_hash": "f79fffbffbbdbcbd3c3c7c304d6641464040600a010600412001200741010100", "name": "Spellbreaker", "phash": "ebf86b61bb197d897fc69eb4e62c62680cc109e2098436cd6e62539acd61f332", "mana": 4, "whash": "ff9fffbffbbffdbf3cbc7d706dd641c64348615a214e01412001348351810320", "card_id": "EX1_048"}, {"dhash": "429b886601dd1aba3977528ead9859f181e27fc5dd823a1cf4396663c196e17f", "average_hash": "f700f102f805f80df801fc01fc003c019c00f8007811b8019001e001f001e17f", "name": "Naga Sea Witch", "phash": "835b63727970f694efa496b96d2c32568c33c68c39a68233358c8ee555987ee2", "mana": 5, "whash": "ffa2f9aaf805fc0ffc0bfc41fc417c41fc41fc017811b803d803f005f103e1ff", "card_id": "LOE_038"}, {"dhash": "b23ce161e40ff9f37294e706def93d077e36dc6cfed1cc0b017600e4110ae37f", "average_hash": "01c101c400ccc0801a87038e038ff19f839fc38fc38fe8bff09ff0804200460f", "name": "Babbling Book", "phash": "5915dc7ba6bab9bfbe4be6dd9b46de27a1e5b2c054382414797263489905ce00", "mana": 1, "whash": "01e101c400cec0c51b8fc38e0b9ff19fc39fc39fc39fe8bff89ff0804308c72f", "card_id": "KAR_009"}, {"dhash": "80815d6e3051e3a71c500baad67ef9d4af353a2947100026bc4f42810c3cfd7f", "average_hash": "073043c0838c20257025082e042a96c9cac6fa6efb387b00737d130c1360fb7f", "name": "Drain Life", "phash": "eb021eddff5177041d220ecda3048b0291a80e75a95c9e276bc1aff15bea33e5", "mana": 3, "whash": "073843e0a18ca1667426082f042bbecbfad6fa7efa387300737d130d1368faff", "card_id": "CS2_061"}, {"dhash": "9ad42c3b617c4b11b64648cfa1b457cbac2adfdd98eb225761b6cc684b83873c", "average_hash": "07950fa343a6c5b790739441b54129618150013b013bc031003b803ba10d811d", "name": "Mogu'shan Warden", "phash": "bdf5dfe65b97edc4679bc69889c188c02083c1b4c9d856b26d04cdd867836f21", "mana": 4, "whash": "0fd50fa3c3a7c5b794f39541bdc129c18350017f017b45f1047384bfe58fc33d", "card_id": "EX1_396"}, {"dhash": "f8f18cc5114a0b003675484bb90e6799cc22e17508cb7c17ff36e66d01931d3e", "average_hash": "471fbf9ebbbf983dcc398430843414260832c0180c000837c037f0074006210c", "name": "Garrison Commander", "phash": "89b76fc9db80dfc56f6493f42c68304c88c61ec629919e0dd7087e26bd81cf8d", "mana": 2, "whash": "ef1fffbefbbfd8bfcc398430847d54661876c03a0c220c37c037f0376106310c", "card_id": "AT_080"}, {"dhash": "88e70ccf81080b6236644cd9b9206741dc4aba1d382b00c607b08f680387c73e", "average_hash": "f71ffb99fb9bf839f83db033f0332037803f802f002f00070001000000000104", "name": "Stampeding Kodo", "phash": "a9f563096e2b594a7b4a96c84bd022d20692c6cc336cbe24db265f366e32bee6", "mana": 5, "whash": "ff1fffbdfbbff8b9fc3dfc33fd73f977a17f806f012f0007000300000100010c", "card_id": "NEW1_041"}, {"dhash": "4af98d7693c905877b6ea7905c30a3734ce7f80ee2d5870b1c3620e487489bb9", "average_hash": "f707f90ef94f385d78dc3b9c3cb818c618a718be01be40be609c401800080000", "name": "Bladed Cultist", "phash": "e9a3f3bb33fb79769e74376c2f41409812848c603c997b01db58d70137249dd9", "mana": 1, "whash": "f717fb1ff95f787d78fc3fbc3cb81ca61aa718be01bec8be60fc605800180000", "card_id": "OG_070"}, {"dhash": "8a870d7f30d00180651acef09c0c6f0171f29104c6093c5670ac877a8e95fcbf", "average_hash": "f73fe7f3e38fe10fc00af808e80f380f9817dc0dc400c021c00388210810fc1f", "name": "Mark of Y'Shaarj", "phash": "7b5f7b8a3fa03fb0cfb7ac6827ec0c4689904e5c4b103e814be05f8073cbbc2c", "mana": 2, "whash": "ffffe7f3f38fe30ff00af808f80f380f9817fc0dd41ac061c04388291810fc1f", "card_id": "OG_048"}, {"dhash": "ccf70ccfd1160b6d36024c9cb17046d9b1026f851b0aa6150835c16a3397dd3e", "average_hash": "f71cfb9cfbbd583a783b1c383c207c00fc18fc1078007800000000200014011e", "name": "Disciple of C'Thun", "phash": "634d63897d1073787372d6f09ef0a6e133ee528c9c370f877963783a9920d330", "mana": 3, "whash": "ff9cffbcfbbf7cba7c3b1c383d60fd40fd58fd58fd407d4004200020011c811e", "card_id": "OG_162"}, {"dhash": "1c952c786080d4ef23fe2721d0c4ae837f0dda32f0a6c45933af6f50daa7bd6f", "average_hash": "07c30fa3874a05407806f883fc01e004c38fc017801f001f84498501a901f921", "name": "Eaglehorn Bow", "phash": "5b7a77449e191d60371633d9c3c492b44cc10c2a14307b59f3e1ce2cdf3c3e3f", "mana": 3, "whash": "07c38fe387ca07407d86fc83fc83fa84e39fe017801f801f845ff543e901fba3", "card_id": "EX1_536"}, {"dhash": "66e0c88485681f933e0e5d68b8906521cb6284c53aa30d5d7abae1530787cd3f", "average_hash": "dbbe793c383c9c3f9c37fc79fc75bc7598235021101010008003800f010cc13f", "name": "Menagerie Magician", "phash": "a9f7e1176c0776e57b5b373b676699cf99b148333132c6788909963490219b19", "mana": 5, "whash": "d9bf79bc383e9c7f9c77fc7dfc75fc7798735821503890108003800f010ec13f", "card_id": "KAR_702"}, {"dhash": "1af12d405299c963ffc6ac8959d3a63e001e7bff86346e48f8bbe1e60347073c", "average_hash": "c73fcf7e03fc00b910fb86fb82f783ee010283848393c0c4c0cfe0cf820f0606", "name": "Scavenging Hyena", "phash": "7d33af496bcf73b2de52998683a0936c10e892100c9466d2ff2d6d037e9af661", "mana": 2, "whash": "ef1fcf7f03fc00bb14fb96fb86f783ee0186c3848392c084c0cfe0dfc28f0606", "card_id": "EX1_531"}, {"dhash": "3acf392c6272c4fdb88af37587a34d679a0e351d5a3afc44d8b981736387c73f", "average_hash": "cff28fe107e105e044c147c043c0a3c123c18340c3c2c3c7c4c7e5c7e5efc7bf", "name": "Deathwing", "phash": "7fea87882f161e60af9943f41ae740fe99ffc26faae74e6447811b811b0017c8", "mana": 10, "whash": "cff28ff10fe107e0c4e147c0c7c0a3c523c0834083c2c3c7c4c7e5c7e5afc77f", "card_id": "NEW1_030"}, {"dhash": "2cfe8d01d24504fb39b2734487085e19b022c31da6ab044719b493682385c73c", "average_hash": "f71e3f7f1be79944dcc7dcc7dd47cd478c00c020813000300010001801088108", "name": "Malygos", "phash": "19e6cb22ed186cd07b4b196d3bd2664366a366f332b3662636661266938716f6", "mana": 9, "whash": "ff5e3fff9bef9d64dc47ddc7dd47dd4f8f58c134c1704170003004980188832c", "card_id": "EX1_563"}, {"dhash": "0883cd3ff0e0c18cb729585220bd757ddbdcb6b30fc63b88f78ef83fa13b066e", "average_hash": "07180340c383781e0c3c04200800042036612e788e70bc03fc27f63fee3ffe7f", "name": "Recycle", "phash": "2b5c6e077be4bf213f089a950e1a2970c2594b2547c0db143be51b5cd9f1d39b", "mana": 6, "whash": "071c03e0e387790a2c3c04240a400c2036610ef8ae72fc07fc67f67ffe7ffe7f", "card_id": "GVG_031"}, {"dhash": "c8e28dc4122d094f7c14b3f0e0ec83db2eb7dd0ab845e389843b37f66e02d9b9", "average_hash": "371fb35d39dc381e786638008881c9c16182f8a73837241e00ce800ef805f801", "name": "Ravaging Ghoul", "phash": "cb23ef686b734d1c37c50c1b8b90d3f658ec9b8609cd6eb1ccbc53c39036198c", "mana": 3, "whash": "371fbb7db9dc38be78663c208ca1c9836996f9b72c9f241e001fa88efa05f801", "card_id": "OG_149"}, {"dhash": "cce08dc10283853c3f7769528eac783173e2c6c49fcb64173b36ca6ca699693b", "average_hash": "f70ff37f7b5a7058f838fcac7c1c3c089c381830dc301c30183108381818191b", "name": "Bolf Ramshield", "phash": "a3cf7b38f9089f309b095b385b94305419c14a1cfa88de34dbf493a72d2fb1a5", "mana": 6, "whash": "ff1ffb5ffb5f7058f8387cad7c1d3c081c395c30fc301c32583148381818591f", "card_id": "AT_124"}, {"dhash": "0a804d1e90b90172b78cfe53b0ec00f31bcee7074f494af00740740ef11cea37", "average_hash": "077c07c083819109b03ba03d6071e0007818780e6d03ed01ff00ff28f638fe1f", "name": "Sabotage", "phash": "e359bc2b3fe0d7de6dc6273c099ee36143611cd2034a21b1cb2d36d69849f68c", "mana": 4, "whash": "0ffe07e08381930bb03ba07d607de0207808fc4e7d03fd01ff00ff28f738fe1f", "card_id": "GVG_047"}, {"dhash": "ca810d2ef0d880fb37a34f569e642ccd739ac424b3096c92c06798cf21987eb0", "average_hash": "077867c0538019009c079c07ec3fec1c641a6412e024e026003e003f001ff01f", "name": "Blade Flurry", "phash": "6b57160bfda1f6f03fe80d3e93978ced036309538e8d9c906b8396f0c960b334", "mana": 4, "whash": "4fffe7e073808b039c07dc3fec3eec1ce43e6412e036e03e203f003f803ff01f", "card_id": "CS2_233"}, {"dhash": "88d80c33514c8a9034614fe1b94c721ba7b2cf6592cb28975f37c36cac99b937", "average_hash": "f717fb87e987e006a0018021c033c032c003d813d803d833f839783ab819711f", "name": "Silver Hand Regent", "phash": "b3277b106b99f3a47be0a721b2782289269812763be4deec6ef3333399249ac8", "mana": 3, "whash": "f717fb8ff987e825a0059029c033c072c813d80bd803d833f83b783bf81ff91f", "card_id": "AT_100"}, {"dhash": "c2eb008e85391f62be4c5d1cbe915de6b49a80657f03d8de123ecd6d36984932", "average_hash": "f11af119f031f8333033303c383c101e205f6063c86f803f8039083d583e9934", "name": "Arcane Anomaly", "phash": "e9f9f3693237395f6f266cdbe7dbd60c993189c3a61e2068184236cc6c183781", "mana": 1, "whash": "f9bbf119f031f8333033383a387c101e707f607bc82fa03f803d283d793db936", "card_id": "KAR_036"}, {"dhash": "d23fa94c1690f86b735fe5f8c2811f072e16792cd8f465d89ff120e2010dd77f", "average_hash": "67eff1cff0cbc0c173d07b88fb84038703820381038300c2c0cdc1c18300c77f", "name": "Ethereal Conjurer", "phash": "5529e31929fa987eff1463126f866e060cec0ee415af06449d09aad993f3df64", "mana": 5, "whash": "efeff1eff0cbc0d1fbd07b80fbc653c703820381038300c7e0cbc3c98300c7ff", "card_id": "LOE_003"}, {"dhash": "7cd8ac7741ff42ecb5c85f91bf207f01a002480df72a8e473036406894807d39", "average_hash": "07000380618cf00ff00ff01ff83ffc3ff81b1804e01ef019b019000100006100", "name": "Alarm-o-Bot", "phash": "eb9ddb843e0326338e85999973828ee323f919e6dbf2b63939cd4866a6812469", "mana": 3, "whash": "87110ba463aef0bff01ffc1ffc7ffc7ffc5bf816e01ff039b03f000901006100", "card_id": "EX1_006"}, {"dhash": "caf04dc5d29485737e472c9c5170a7815963c7c61c85378b9b3620e4d448299b", "average_hash": "b70fb11fd918983818b11cb13c207c201c389c301c3838b0b8b8981948190018", "name": "Buccaneer", "phash": "a385db2a78a97b72dbd8c3989ed4199359d0d20c2769bfc11e4973cc3284c74d", "mana": 1, "whash": "b71ffb1ffb58987a183b3cb13ca07ca09cbcdcb81cb87cb4f8bc98196819001a", "card_id": "AT_029"}, {"dhash": "c2b0884101865e0c381a50ffa2995d32b34401cd68a23f5dfeba50618396c57f", "average_hash": "3faf7baf79ae74400c400240b841b845f85a280030483050f007c0010100c3ff", "name": "Tomb Spider", "phash": "e3fbc75cd95ccb70fb331bb3bcbc3e7629a20699119a22e405a00e9213870f6d", "mana": 4, "whash": "bfbf7faf7eae74400cc04340fb41fb55fb5a2b4031483050f597c001c521c3ff", "card_id": "LOE_047"}, {"dhash": "78deadf8522229c97615adea16b0ad6573dbc42498c1300b4136b3edcc4b09b7", "average_hash": "c703e347f1cf7896c8a668b6bb0df02fe4bffdbbbd3318b30892880008030007", "name": "Tuskarr Totemic", "phash": "99157b6a9ea6bf2d7b581ec28e44269618c48685e6b677942da45c719bd32ac9", "mana": 3, "whash": "c713e34ff14f7897589468b6be2df0aff5bffdbfbdb318b30892080108030007", "card_id": "AT_046"}, {"dhash": "0afe0cfc01a04b00b7345e41bcdc5931b0e2ecc5d92ba3d64e311b626480193c", "average_hash": "f71ff19ffb9ff01ff01fd01c881c981b18181018101a000a080a180618031100", "name": "Dragon Egg", "phash": "eb697bfddb96df9cbfa30da26ef006c226699b842c1c3a60bb0193616e821e0a", "mana": 1, "whash": "ff1ffbbffbbff0bff03ff43e8c7e985f18583838103b100a0c0e180e99031100", "card_id": "BRM_022"}, {"dhash": "eabffdff01100f601ed03c007800f000c5c193832f737ce4b219640fc838bf67", "average_hash": "0700e10ff33ff07ff87ff87b787838701c608060e061f041fc01bc3180319e21", "name": "Renounce Darkness", "phash": "a132f9ec5b91765a8ed6e6b9306e8cf3c6e8a41027cd9c23cb5433244f406f8f", "mana": 2, "whash": "0700e30ff33ff87ff87ff87ff87838701c60c060e061f041fe41be318031fe31", "card_id": "OG_118"}, {"dhash": "88b02d6752f33d047312a56cce998c3b38f76eecd150a089e1329fe7728c4f13", "average_hash": "f74f63ffe1dff8dff0df9bcd9b8c8b8c0b88038b0380088200c740d463100700", "name": "Tuskarr Jouster", "phash": "5d8dd95aa3ecdd1c3f5218c22f4121ac08c03339999c3ec5bfb39a7938ae26d3", "mana": 5, "whash": "f75f69ffe1dfb8dff8df9bcd9b8c8b8d0b8c0b8b3389008208c740d463104f00", "card_id": "AT_104"}, {"dhash": "cac6859d126609597e862dfede28b1d16033f74282450c8b1a3b31f6660cd9b8", "average_hash": "f701f111f903f816b832383c7c3d3c3cdc3cdc3ec83c881cc818c008c808f008", "name": "Warbot", "phash": "2327f3b4f2783b7b6fc92c996bd01c274630590461587e46dcb0db00b7d337c9", "mana": 1, "whash": "f719f911f953f836b8303c3c7c3d7cbcdc3cdcbec83dc83ce81dc019c808f008", "card_id": "GVG_051"}, {"dhash": "c8fcade9c2d24d267e4ee0901161f7c62e7dd0beb4494b82863f59f73b4ce310", "average_hash": "3717337f215f307d70f87ae3736342f302bf02be0db40d3560bc60dc7010e000", "name": "Druid of the Claw", "phash": "ed9cdf8163789f337a60d3c0877141e12091aee3ebc0db811fab34b43c0e3b5c", "mana": 5, "whash": "37173b7f235f307d78f87ae3732342b302bf12bc0dbe0d3f60fc605e6018f000", "card_id": "EX1_165"}, {"dhash": "c8ef0cdf018a0b36366448c8b0944091833a097590ea26957f324773f8861538", "average_hash": "f799ffb9fbb9fcb93c383c389d400d4005400100050004500c300808090c010c", "name": "Fencing Coach", "phash": "273de32163017ea45ee2066b9eb062c83cb406de3be7fae2ee784e4e66a221e4", "mana": 3, "whash": "fff9ffbdfbb9fff97cf8bd789fe00dc0074001c007e005d00c700d980d8c032c", "card_id": "AT_115"}, {"dhash": "fd3f6297897b36a24d009f7c2e1d79d8807050e0ef1b26e6c90f36415c909e7f", "average_hash": "07e0e693f837f80ff82f187cb83d3c7e9c3e1e1c7c3e423a1a3e980df007fc3f", "name": "Anyfin Can Happen", "phash": "2b56b50832b03e2417bd1c227d48469b9d4cb33b8c93b426853f8c96fe97a977", "mana": 10, "whash": "07e0e69bf837f80ff82f1a5cb83f1c7e9c3a1e1c7c3e420a081e980de007bc3f", "card_id": "LOE_026"}, {"dhash": "daf9ed62129d04f279c5e3360f486f82d70cbb1d703ac240a4bb48f2114e471e", "average_hash": "2703616bfb4db849f0c7f8877f0ff80ef227e207c107610e608d60456004000c", "name": "Malorne", "phash": "fbca9f6feef9ff6cbf66474c1e869130085006820799cf04cd614361db02dbc1", "mana": 7, "whash": "6f13e37bfb4df84df88ffe87ff0ffa0ff227e207c187600e608d60476004000c", "card_id": "GVG_035"}, {"dhash": "0ab84d52808d3923734fe61a8c6714cf009e073c3b50c6cb9c337be6ec88b121", "average_hash": "f7fe5bee0bde08d80ad803d827dc23cc43806382638761e444d8f9d8fb107b00", "name": "Shifting Shade", "phash": "770f9d63a3635176ff302933db90336900d0d39118ed22305f17da0cef27de15", "mana": 4, "whash": "fffe5dfe09fa00d80af80fd807dc23c843c06380638765e468f8f9d8fb187f00", "card_id": "OG_335"}, {"dhash": "0cd90c360168cad0b50d73f7a66e4dff929ae5259b493816fb3896723487e93c", "average_hash": "f713f3a2e382800700070004c007c00f700b7c3b6c33ac21fc01f8007804790c", "name": "Dancing Swords", "phash": "f3ce1c34f727a36d7bcc1ae38d94c3ec99c91c1eb3e15adabc4c29f90006d240", "mana": 3, "whash": "ff93ffa2e3a6808704070005c147e14f714bfd7bfd3bac21fc03f8807904f90f", "card_id": "FP1_029"}, {"dhash": "80bf8dff30980f007c047d81fb26e0ddc01b33e7048e191ce63bc8f783af3f46", "average_hash": "073c03ffe1bfe07fe07fe07e007f407f806f806c884118412043a043807f8e0f", "name": "Spellbender", "phash": "b93a69405f0096ab0e46c48c0633343a87c899876b7ced7563996a957bf3d9ec", "mana": 3, "whash": "379c82ffe1bfe07fe07fe07ea07f407f807f807d8841b0412043a063807f9e1f", "card_id": "tt_010"}, {"dhash": "98d46d29c21fccfeb9e077018ca6590ddf32f7e4c80b99173a36766dc093393e", "average_hash": "6700036003c02040a0c180878409c009c40d9c0fbc39bc39fc39f83df80ff91f", "name": "Harrison Jones", "phash": "2b5bde88e784a7497ec43195cb604382233ce29822e45e3fe7c59e634f0b7dc2", "mana": 5, "whash": "6f10076003e02040a44584d7c559c159cd1c9c3fbd3bbc39fc39f83df91ff91f", "card_id": "EX1_558"}, {"dhash": "3afc64f0d1860b9135315f66acee59e5b39a1f956d0ade173837706c6080cf3e", "average_hash": "c707c19f491fc81cc00c401c801dc019e001f00df80ff819f819f800c000c11c", "name": "Angry Chicken", "phash": "73ff9d96dfc7d3f83af89c8dcfe232b0273012038fb13614ce041b09db017c48", "mana": 1, "whash": "e717c9bfcbbfd83cc81c401cc01dc44de45df40ff80ffc1dfc39f811c100d11c", "card_id": "EX1_009"}, {"dhash": "cae1ccc311044b48b77f4edabd107e31f862f04560cb00950fb0ff629987113f", "average_hash": "371e199e7b9820300038bc3cbc3fbc1f1c3f1c3e0c1c08040000300030071107", "name": "Gilblin Stalker", "phash": "c97d6f65f6c45ec6cbe35b780c13c0f01cf0cb90d310d31463107703fb867f26", "mana": 2, "whash": "3f1f1fbefbbc20b80038bc3fbd7fbc7f9c7f1c3f0d3c0c1c0000f0033107110f", "card_id": "GVG_081"}, {"dhash": "e8f5cc0b11bf037736ca4c06b21c63f9e4c2c9058e0970961f3a2976d085fd3b", "average_hash": "770c7b8e7982381bf833dc379c313c03fc107c30383000306810281800080106", "name": "Demolisher", "phash": "c39dd3c17c8066e05f761c8eb9d623a98ba10c7937fe3cf39344c6c726a22631", "mana": 3, "whash": "771c7bbdfba1f83bfc33fc379c373c73fc31fc383c30083c68142818010c7107", "card_id": "EX1_102"}, {"dhash": "58feacf941e64b04b6484e99a8627097c14a07353cead8953139c7728a83253e", "average_hash": "87978fbfe3bfe0bff03f703b70196030d420f40084018c010c01080301030102", "name": "Frostwolf Warlord", "phash": "bbbd69249d0d16b36ee286891b39836025a6709636639ee1af87be1cf6a8c662", "mana": 5, "whash": "8f978fbfe3bfe4bff4bff43b7159e978f560f540b5418d410c010c830183012a", "card_id": "CS2_226"}, {"dhash": "6af2c4cc91110b23b6fc50d1a7024e87fa7efcfd912b0046deb831736386e53d", "average_hash": "979f1fbd9bbf9cb93430f401c147c1464177213f313600204000c0004100c100", "name": "Tournament Attendee", "phash": "fdf389f16fd3cde159ae527aa6b8a06c843646321b019e4973624932bc29f34c", "mana": 1, "whash": "9fdf1fbd9bbf9fb934f1d541e5c7c1c7437f217f397411e0c040c4804580c320", "card_id": "AT_097"}, {"dhash": "0ad96c6251c50a0ab7104f2da6524ca7b9cef79db03a42479cb5016ac79b0d36", "average_hash": "d717c3ae6b8ef01c601e003e901eb00e103cb03f201fe02ec00fc00000020100", "name": "Infested Tauren", "phash": "69f26f6636063b9cdfa532e28da4206d22c846e23369a4e56e99da29e722cf8d", "mana": 4, "whash": "ff97efaefbafe4bc343e803e915ef14e117fe17f215fe16ec00fc00801060100", "card_id": "OG_249"}, {"dhash": "f838dcc000c11f9e274e2c22d970f699d930e3428e87351fcf261c5568ab416f", "average_hash": "c787730f331e381c383c3c3e1c32cc34c83cd818581218173006f000e001e007", "name": "Tentacles for Arms", "phash": "eb774f38db381bc437069b34894c210b19d2140386a19669b764f3b29fbb5e1f", "mana": 5, "whash": "c787778f3b1e383c383c7c3e1c33cc35cc3dd83cd83a5817b817f006f007f007", "card_id": "OG_033"}, {"dhash": "08f0edc1d225bd49722d805ec1fd8ae355879b0ce5110e0bc83630e443897f12", "average_hash": "b75f1bff19de08fa0cf00fe03fc07fc0ffe0ffa07bb070b000f000d003000700", "name": "Warhorse Trainer", "phash": "e725c90b98e8f1d0db5ad4383e10389c23e4c3a5c3fd32e67f6542f2c2b323d4", "mana": 3, "whash": "b77f2bff1bdf18fa0cf02fe03fe07fc0ffa0ffa47fb071b000f000f003100700", "card_id": "AT_075"}, {"dhash": "caf185c0128305027e4d2c9a90b0638946f2b76d0f93d88ae1351ae7e84cbb31", "average_hash": "f70ff15ff95f781e383c3c323c313c318c20e001c001c001080c480c2007f003", "name": "Acidmaw", "phash": "e39deb6c79e27bca3b589c5893d04c3404753209628c1e5757c3ed99262e9673", "mana": 7, "whash": "f71ffb5ff95ff87e383e3cba3c31bc318c21e921e801c801582d580c381ff007", "card_id": "AT_063"}, {"dhash": "08cb2d36c2d904e03bed67f39e844311866eb89c506b27c6c731bf73f88fe13e", "average_hash": "f703e363c14c704b2055e89fec3fc80f0007002f003200021006d803f81ff91f", "name": "Nexus-Champion Saraad", "phash": "d92e379a73339e603b2603856d90cac48e8923cd6be25be65fa30d333cd3cc38", "mana": 5, "whash": "ff13e363e3ed704b3c7ffc9ffc7fc84f0007002f003a00021806d807f91ff91f", "card_id": "AT_127"}, {"dhash": "c23fb8ffc0ce35796b825e2eb9146609ec32d2e18dc33807f7abfc5f01b00c7e", "average_hash": "01c0e107f00ff81ff81fd83ffc37fc274c350c31a8319811b01ff00fc007f00f", "name": "Firelands Portal", "phash": "abbf71077868b4b8965edd7086a57ceee6e3b318cc690c966d1e8c20a7a78064", "mana": 7, "whash": "01e0e187f00ff81ff81fd83ffc3ffc274c354c31a8319811d01ff00fe007f81f", "card_id": "KAR_076"}, {"dhash": "02c0e526e40901f88e745f9b9a6620856bdc8ee0fd01f71380a31d4d7f9fe03e", "average_hash": "ffff81fcd19cfc009e00c6216430fe00f7203f03be07fc0f7c0e7c047d06ff0f", "name": "Holy Smite", "phash": "43f535ab4da3dd29bd943dfe9bd886906348c3a44a1d2b9d31c15b2b5968f890", "mana": 1, "whash": "ffff81f0999cfc009e00c6116434fe00f7203f03be07fc0f7c0e7c047c06ff0f", "card_id": "CS1_130"}, {"dhash": "c287cd3f30ae6178fe80dc03bfdb7820c17d8a9b1467588ee71c9b3860f8e8c3", "average_hash": "471ce1e3f183f013f017e003c82c86700070327092f10c71647cf87e4078207f", "name": "Repentance", "phash": "b1f0f3757bed9df2cc730e9e0e9de12528782b53c3c10cbc5360539859c358c8", "mana": 1, "whash": "771ee3e3f183f003f017f003cc2ca671027032f196f18e716c7df87e407c607f", "card_id": "EX1_379"}, {"dhash": "baf0edee9299490277e5accc1913fa2d7637c9cc8091f10b27371ae4344b63bc", "average_hash": "0714014c13dfb0bfb0b98abb831b00b901bb89b71bb018bc189c585070186018", "name": "Whirling Zap-o-matic", "phash": "ed859f3166eaced73e0a1a092611a2844878f2e09ba1d90d3f876d09fba1beb9", "mana": 2, "whash": "071c0b5c13dfb0bfb4bd8ebb87bb82b909bf89b51bb018bc389c785a7018741c", "card_id": "GVG_037"}, {"dhash": "4af48d4912d308367efcb590f921b7c36e37d16ab48541090f3a1ef6fc00b3bf", "average_hash": "371b330f3bc9380d3862f03370b361bfe1b7e8b5981758167890781838003001", "name": "Screwjank Clunker", "phash": "e385ff58ee797fa73bd0a6ec8ab022b208b4c3c124cc52446d9bb649f64c99a4", "mana": 4, "whash": "371f3b4f39cb382f3872f8337cb371bfe1bfe9b7d89738167810789838003000", "card_id": "GVG_055"}, {"dhash": "0ae60dc892914926766dac82081591f926c70f0c103066c8ecb5c9e283411fae", "average_hash": "f71fff7f9bdb38b93cb93e931fd10eb03190e1826382028200c3004300010600", "name": "Totem Golem", "phash": "f709ebc9bbcb7b662b229b4c6334633404540e026fe357c15e891f0d3f26be82", "mana": 2, "whash": "ff1fff5fdbffb9bfbcb97e931fb92ef03dd0f182638222820083004302830600", "card_id": "AT_052"}, {"dhash": "e230c96184c13e23514ca6984cb11f23d2c6208ae1049e1938736082611482fd", "average_hash": "934f110ff807b88b380b38193098b01f300d3984781c781cf09df00dd00d807f", "name": "Desert Camel", "phash": "c999dbc119c67b0efd3239592f8cd65999692263b4313391a9cc93f0c7446ed4", "mana": 3, "whash": "9f4f190ff807b88f3a8b3a993899b01f300d3984789cf89df09df00dd20dc47f", "card_id": "LOE_020"}, {"dhash": "a8f70dfce2834b267f50ae4f58bda0694782ef0ff0f5fd8bfb3306e7bc4b6130", "average_hash": "e70ce31de11f301d7838103c183c1c283c26e033c01e800f901f981fe80fe001", "name": "Metaltooth Leaper", "phash": "69016fc97bca7b6636168d033cc0a2c486f84e0fb9f1d9945cf29b0d8cadd9cc", "mana": 3, "whash": "e71dfb5fc11ff03f783c103c183c3ca83c26e9b3c09ea01f981fd81fe80fe003", "card_id": "GVG_048"}, {"dhash": "5af6acec51898a1a35b458c9b1827f0dce7a9ab5b06b27d50e3200726180dd32", "average_hash": "879dafbdfbbdfc0dfc0cf002b04380618424202604362413f011f000c1008100", "name": "Flame Juggler", "phash": "733b7b9b3f031d9d9c92b2e8c66404a6188996c4c7007e44ef04bf91ff60ee66", "mana": 2, "whash": "af9dafbdfbbdfdbdfc29f402f143a165856d216625776552f811b090c180c100", "card_id": "AT_094"}, {"dhash": "68cdec9ec1a50a17316c58fbb170631b8ea27d15fe2a00d56338b66f68889130", "average_hash": "d790dfb0fbb1f8987c483c203d63d9737c41b410911f80151c06b83f91118100", "name": "Antique Healbot", "phash": "a76567443f2719a43e8496605ed846e2268219e5cf243679db307ea637b1cbcd", "mana": 5, "whash": "df90ffb0fbb1fcb87c083c203d63d9727d43b554115f81571c07bcbf91918100", "card_id": "GVG_069"}, {"dhash": "acdd0c3f01edca9b37206cb8b1426685d80a8f3d9aeb304706b61c68f99cf33f", "average_hash": "77107387c18fe00ff83ff83ff027f022c020e022403300112010300030006118", "name": "Shade of Naxxramas", "phash": "79765b064e99becc76c6a6e0a98d24e1693b53f2799c3c790cc3c6c7c3326032", "mana": 3, "whash": "771effbfdbafe0bffc3ffc3ff467f063e168e023c0330031201070007110e11c", "card_id": "FP1_005"}, {"dhash": "bac02488413b4bf4b4864ff5bd027a05e3facedd704ba0965f361a6f809c3738", "average_hash": "071f0f9803b1203180038022c033a039003b98378c3f0426f832f839001c011e", "name": "Hungry Crab", "phash": "adfd96d86f9d9fc66de9a3e45c72838158202d19b384dcf0273959429e4133e4", "mana": 1, "whash": "8f9f4fb203b1a4b584038033c17be178817b99778d7f0576f833fcbb01bc011e", "card_id": "NEW1_017"}, {"dhash": "2aca6cb7d1440a99b4754891bfe65b4d87923a05410aee17b235606f4e8c9939", "average_hash": "f71fbb97dbbff82fe024e821e418645df447fc073800b80c8005800f80010101", "name": "Mechanical Yeti", "phash": "7b1f7fc69fc3dfa1ffac39a8066092b201924cc90b499342fbc833c16921de72", "mana": 4, "whash": "ff1fbfbfdbbff8afe424ec23e45a645df447fc077800b82c880d801f91010109", "card_id": "GVG_078"}, {"dhash": "6ce08ded829305873fee6d0098087edb7c32916412cb86079f33fe6fdc8f013a", "average_hash": "870ab348395d781d383c3c3f003e083e003e401e000600021019b81ff81fc11f", "name": "Ysera", "phash": "c9ada719f3893e623d6686515b6423d12cc492e61ae893cdd93ccdc93c26f4e8", "mana": 9, "whash": "8f1fb37c795f787d7c3f3cbf143e083e083ec01f400e000a101df81ff81fc11f", "card_id": "EX1_572"}, {"dhash": "d27829cd223a7cc4d168a7854c039912223f1c7e6cc4d0d801f173a28747eeff", "average_hash": "af7be37d31df60cf73cfe3db83cb03d103810380038900c000c00040062086ff", "name": "Tomb Pillager", "phash": "5d89fb3929fe6e1fbf61b66d9b51d694919c9290919896f00d8d46c445e0eec0", "mana": 4, "whash": "af7ff37d76ff64dff7dfe3df93db03d303c103c10b89008004c002000620ceff", "card_id": "LOE_012"}, {"dhash": "0add8dba12c3093c7e793cc2f81cb1f143c3dfa2ec05910b233e22e0840279b6", "average_hash": "e7017306f3567019f031e839cc383cb07c20fcbdbc3c1c381838180008000000", "name": "Magnataur Alpha", "phash": "a395fb2cdee2ef53ded20c8b2939b29c86e40c090996334973329fa49c875fc2", "mana": 4, "whash": "e717fb07fbd6703bf871ec39ccb93db0fca0fdbdfc3cbc381c3c182408000804", "card_id": "AT_067"}, {"dhash": "d0d82d3152cfac977bffaf2c1c7871e1e0f2d1c48101130b063608e55040abb4", "average_hash": "c70723466146784eb839f83e7c387c3c7c3e7d3c393c38bc389c280c00000000", "name": "Hallazeal the Ascended", "phash": "e317bb265eaacf1caf6409653741129e10540a4979ef58665de64765ac4eb699", "mana": 5, "whash": "e70761476146f84fb839fcbf7e3f7c3e7d3e7d3c3dbc38be389c381c00000000", "card_id": "OG_209"}, {"dhash": "0cb300ce059d9f3c3eeb5cc2bc844389837a60a5cd02bd5d73bbc65c9892353c", "average_hash": "ff3fff0ff81bfc3bec39fc39ec79c41c80032800340010001010301000000108", "name": "Arcane Giant", "phash": "23293301cfd0dee05e826da02714929498e1643ca46dd2efe0e6f6fea4cfb477", "mana": 12, "whash": "ffbfffbff87bfc7bfc79fc79ec79e45e8007a800bc009051903130302100113c", "card_id": "KAR_711"}, {"dhash": "78c62c38e1e0cb1f372e5611ace65839b3e2041d672a184566b8d973e0878d3d", "average_hash": "8fb58fa1c7a304befc7ffc596d58dd58bd5d7144e100b04130405800818f812f", "name": "Grim Patron", "phash": "7fdb3f959e17b6901f93862939a043828cb12d62ece8cc71364bd274c786199b", "mana": 5, "whash": "8f958fa1c3a307befcbffd596dd8fdd83f5d7146e140314130404c80818f832f", "card_id": "BRM_019"}, {"dhash": "426bc90594a92ade75b48f6e5f3db44360c6d328e7459c9b293356a40c49faff", "average_hash": "f71ef9deb881380038849b26a83e78b609b3a1b9c09cc89c989f9811d000f47f", "name": "Rumbling Elemental", "phash": "e317c732f3ea7dedffbc89ae3976bee241dc625089381e46119462d011470cd1", "mana": 4, "whash": "fd5e794bb8803884b88c9e27ba3e79bf09b1a11dc29dc89ed89e9813d000f47f", "card_id": "LOE_016"}, {"dhash": "e8f0cc8b01b20b44b77750bfa43e4e6993d220c5888b09573eb6a16c06895d24", "average_hash": "37977fbd7bb96cba047c045b054b2d4d3d43bd41192010700430003009000920", "name": "Azure Drake", "phash": "cfdd4bc86e84f7b1db1a58e00ce30c344834cc0633cd92edd7a646b32de3bcc1", "mana": 5, "whash": "3f9f7fbd7bbb6fba04be055f0dcb7dcdbf43bd5119601570047004b009a00b20", "card_id": "EX1_284"}, {"dhash": "8cda0c3f11cb0aa7355a5b78a6e04c39bac27e85b10af7158033866c2989c134", "average_hash": "f707f387f907f80d780d780efc0efc073c03bc05bc0678023800100000000100", "name": "Priestess of Elune", "phash": "c39773903c009f253fb068e00f8684e1309833663b31fdcc9f33bbb3f9c0ced9", "mana": 6, "whash": "f717fb87fbaff82ff80f7c0efc0efc477c07fc07bc067c1f381c101010140104", "card_id": "EX1_583"}, {"dhash": "849f6df5d0820b65728aef349ccd393066c0c1046628c6fff81e332100c6fc8f", "average_hash": "0730e3c7eb97f8bb789f381edc3c9e3f2e1ff00cf00c40004c64c8054000e007", "name": "Dark Bargain", "phash": "6b87ab85b69a367a1f700dc2e0e2cc688f0d26c127936c3ecb0992b173db9e56", "mana": 6, "whash": "0738e3c7eb97f9bb789f789fdc3c9e3f2e1ff80cf00c6000cc66c80d400cf00f", "card_id": "AT_025"}, {"dhash": "f83f9c6d00d9153323622e90dce0b3c17111fd02e0050019012286552ca05d6e", "average_hash": "e787f30ffb0ff80dfc15fc1bfc19fc1efc0ffc07b80398001800180000000000", "name": "Arcanite Reaper", "phash": "43ff7399bc425ce07d440ee5161412061ca3042005c13ecddf69de26efb77e3f", "mana": 5, "whash": "e787f78ffb1ff91ffc1ffc1bfc19fc1efe0ffc0ffc079c011c00180001000900", "card_id": "CS2_112"}, {"dhash": "caf18d47929a0c75789e21584394cc297b53b6e06fc59b0b203fc062324d7d34", "average_hash": "770e711cfb0d78097803fc01bc00bc00bc20bc28dc09fc397cb8000e080e7814", "name": "Mistress of Pain", "phash": "c31bf3b86ce42f7b7e30596c83869c6c03e0184c6e62271333633f93fb4499ce", "mana": 2, "whash": "771e791cfb4d78497807fc01fc01bc81bc28bcb8fca9fc39fcba083a081e781c", "card_id": "GVG_018"}, {"dhash": "3ac7ac0c11584a60b4834871a3f64d0d9b1a2235516aaed77836636cc8909535", "average_hash": "c79fefb3f3a1e4a1442214208444e54de44de405e500e410c431a03101018101", "name": "Mechwarper", "phash": "33dbdf528fc573265b9b0ce1cce48820290852ca8f11b7917f02ff047f26dec9", "mana": 2, "whash": "ef9fefbbfba1e4a1c4a31560c545e54de74de5456540e550e439b4b111a19321", "card_id": "GVG_006"}, {"dhash": "e887fd5f60fc83b20ee459c1b7066b85de08cf71186833826603d944849f293f", "average_hash": "07006300fd03ec11fc21f003e403e407f603e413f4037c03fc03f803f801f00f", "name": "Force of Nature", "phash": "934d3c726e93dfac7ce21a4eccc031810f120eb10a1c6b25d3f6d7ccb94a7f89", "mana": 5, "whash": "0708e341fd83fc11fc29f007e40bf427f633f613fc03fc03fc03f803f813f80f", "card_id": "EX1_571"}, {"dhash": "0cf20cc581c00b11376256ccb0187d39d2428cf539cb911667305c60b8807526", "average_hash": "f71ffb9ffb9fd81f181f981b9c239c3b8c3b04030c0318003000100000000100", "name": "Coliseum Manager", "phash": "895959836f26ffe43f10b3711a8808a86dca5646f2f6b6353b4bf3909ca7d248", "mana": 3, "whash": "ff1fffbffbbff8bfdc1f9c1fbd639d7f8d7b04730d2318203810100021000100", "card_id": "AT_110"}, {"dhash": "caf10c07611ac2e7b5fd58cab1b04f61988267559b2a36176cb2dd614b839734", "average_hash": "779e7fbe23a585a17033fc21bd41bd477c447011791348132003a00681048104", "name": "Twilight Drake", "phash": "f3bcefc97c166fcc6fb38338a3cc60e010c498611e7116e2d386db906d02df72", "mana": 4, "whash": "7f9e7fbe03a585a1f4b1fc21bd41fd477d4471517d536d53a401a485a185a124", "card_id": "EX1_043"}, {"dhash": "9ce3adcf4238c482bb6672c8849a497f97822405590a83976e30f069f487493a", "average_hash": "071ce37debfbb44794d8b8c9904d184e7c037c03f900f800f8005800d900d918", "name": "The Black Knight", "phash": "734b7d383f86ff5c4bc24c932c9182c04474c8729a92d3bd4f0c6f6fec8831a7", "mana": 6, "whash": "2f1def7debfbf4679478bcc9914d194afd0b7d01fd02fd6078005880d980d908", "card_id": "EX1_002"}, {"dhash": "12b5686ac19c9f39399356a6afec5301bcc270058702369d483a7967ee9ff978", "average_hash": "e7a3c18de00de81cd808cc025c12fc03f80d701c500060020800c001d11ff17f", "name": "Gorillabot A-3", "phash": "636f3bcf39537e98fb9c96e6dfe41a33931819e565b049cc32248a1411b0cee1", "mana": 4, "whash": "efabf1adf0bdfc1ddc08dc427c5afc43f85d781c780868420800c019d11ff1ff", "card_id": "LOE_039"}, {"dhash": "8aff2dce129805247e48ac8159b3a1464b1b276648c1708a85373ee4f449df91", "average_hash": "f71ff11ff35ff87ff8bff3b3b0b120a1408084010d000ca008b0001800188010", "name": "Patient Assassin", "phash": "a19dfb586bef1e5e3a70a4a413d220ec80d08c183ba57fc0f7059605ff416fb5", "mana": 2, "whash": "f71ff91ffb5ff87ff8fff3fbf2b332a166a18c810d801ca00cf0007802188019", "card_id": "EX1_522"}, {"dhash": "48fb0cf211e50bc0b6806d2db21a6833ece69fc5f30ba44741b7076e3f983530", "average_hash": "f71ff59f9b9ee03f603e0036100818001834182e003f003f003e003c001c0118", "name": "Haunted Creeper", "phash": "c9e7d3667b3d9ba1b69334a899b44892308833096e423d23ae07dfc3f36166f2", "mana": 2, "whash": "ff9ffdbffbbfe8bf743f003e156c1968196c187e113f803f003e003c013c0118", "card_id": "FP1_002"}, {"dhash": "18f00de2029c05bd3b706cc099c07701e2329144aa8b7cd70736fe6fe09d1938", "average_hash": "e70ff35ff15ff01ff01df831f831e03340134000c03080307830f818f01ff01f", "name": "Mimiron's Head", "phash": "29c75b8849299e46db4c27141ed203ca18d81933e6ce1c7d77e54f9b4d13d7cc", "mana": 5, "whash": "e70ff35ff15ff05ff83ff831f831e037c033c000c03080387830f81cf81ff81f", "card_id": "GVG_111"}, {"dhash": "6cf0cc3001ec8adbb147531ea6bc7f7984223925c2cab095613b437696950d3c", "average_hash": "970f1b8f1b868006800f380fbc07fc3ffc0ffc0f8c0b0c0b001b0819800d010e", "name": "Windfury Harpy", "phash": "cb4bdb907c902f6de39689895a7819d8396434334a2cc6b6ad8dd96273b992e6", "mana": 6, "whash": "971f9faf1bae8887840f3c0fbc47fc7ffc4ffc0fdc0b0c0f081f0819810d011e", "card_id": "EX1_033"}, {"dhash": "4cfd097b02e00dd47b73ef8018106138fb6084c5089bc3008e3b18f7644ecb1c", "average_hash": "e703f347f14ff81ff81ff83f9d3b8c219c3918311808300c700c6006c004c00c", "name": "Aviana", "phash": "e307bb297ae6fe18be511f644f0101b540983e2e8eadabc1597f50dafe028785", "mana": 9, "whash": "e707f34ff94ff85ff81ffc3fdd3b8c219c3998393918380c700c7046f00cd00c", "card_id": "AT_045"}, {"dhash": "cadd8c3b11e20a84307c53e1a5425a8f9b3a3eb5e54ac39606392d72f8857137", "average_hash": "771f7d9ffb8ff8067c04f001f001e000c009e00c2c0f3c0e2c04300030003902", "name": "Frostwolf Grunt", "phash": "f3d8f3c5c744499cbde536ec0be066320c2605343a823b437f10fe23ff109f93", "mana": 2, "whash": "ff9f7fbffbaffc87fc04f401f141e140e149e94c3d4f3c4f3c04380039023902", "card_id": "CS2_121"}, {"dhash": "0a992d7152f6bcfd7b4ae490c40f9a330146669e0130fa49c0b38fe41b087f36", "average_hash": "f75665e62bce00cf88ffa3fbc3d983c893800381038003d600fe01de03100718", "name": "Fallen Hero", "phash": "5d29b34aabeef636366221e43b81899909e076647dcdb601ee12ee057f85364d", "mana": 2, "whash": "ff57ebf66bee00efcaff8bfbc3db93c883c0038103c003d600fe01fe0312071c", "card_id": "AT_003"}, {"dhash": "c8fffde761980f307ef098c233214642cc4d914b8e937c66d84c21d310cefebe", "average_hash": "0700830fe13ff079f879f873f867f867b063fa619a71c2748224000e0006e007", "name": "Powershot", "phash": "b10237494cfd5ee2d21c5e163685a6e9666224c967123af623cd3e964bcbd935", "mana": 3, "whash": "0700c34ff13ff079f879f873f867f867b863fa61da71d27c826e000e0016f00f", "card_id": "AT_056"}, {"dhash": "0a81fd83e014806f57de983d30f326c67d9c97393df2f2e4e1cbc70ecf353e0c", "average_hash": "0700030041000c001c185a00f200f200ee21ee31ef43ff07ff0fbe0fbe0ffe0f", "name": "Shield Slam", "phash": "0340e4f74cff3b9c77b6cf611be809194a44966822f439904bcc798da9b57f64", "mana": 1, "whash": "070003006b101d001e18fe00f620fe20ee29ee31ff43ff07ff0fbe0fff0ffe0f", "card_id": "EX1_410"}, {"dhash": "e8fe0d5192a22dc1732aa7d4ce6d90db29375a4ce01003c8cfb1b0e689810736", "average_hash": "f74ff3fff1df58df18de9bdd0b9fc39fcb9c4b860b86208220c280c203000700", "name": "Cobalt Guardian", "phash": "5d87698bb268d36c3f1631380e9082e300bc9ccd4fe73ecc3ff3eab4289c0e59", "mana": 5, "whash": "e75ff1fff1dfd8dfb8de9bdd0b9fcb9fcb9f0b8f0b86008220c280c003000700", "card_id": "GVG_062"}, {"dhash": "0080cd3fe0efc34d9eb11c42189d6778c3e3bf0f671e18e0b087ed0ff2bd08e0", "average_hash": "271c01e001800c105e320e300f309b60386078f478fc9b098e0be61fe07fe03f", "name": "Holy Wrath", "phash": "ad1486a479edd78bf5131f102dc401fe8641291f0eb59a537ba879f252c41deb", "mana": 5, "whash": "f79f03e001800c105e360e308f309b60b86078fcfcfcdb0f8f0be61fe07ff07f", "card_id": "EX1_365"}, {"dhash": "aac0a533c25d8cef799ee72f0c1cf4f040e181c63718ec00d03be0f1f947131e", "average_hash": "071007600b6038405ccfcef6ff7a3cfc1ee43fa0ff80fb0030c010c200878607", "name": "Forbidden Ancient", "phash": "c7b7bfefb8b8f69b8fd169e03ba49cc409d1c309a6313a0079a249909c05fbd4", "mana": 1, "whash": "0f110f600b6039e05ccfdffefffe3efc1ea43fa0ff80fb8070c410c2008f860f", "card_id": "OG_051"}, {"dhash": "823f21d844b4594bb2946621b9107321e64ac4bda1637bde463c8c481a91753e", "average_hash": "7bfcf9dbe8df44def4fe76fa197805730031003021343077343330323120293c", "name": "Moroes", "phash": "edfc31eb530ebd9a3e9b934c6fb27661992466962c4b8a2e66129359890632d9", "mana": 3, "whash": "ffebf9ffe0dfc4def4de74fa1b780b7103700130213430773423303231302b3c", "card_id": "KAR_044"}, {"dhash": "08f6acecc1d34b61b6964927a64c59f9a3120125380ab7d40cb9f173c687192e", "average_hash": "f79fcbbfa3bf70bf1c375c3e4c4f5d58fc42ec000c0000006000c00001000100", "name": "Argent Horserider", "phash": "1b4fc981f991720867037bcacee4b3a6e3a4ccc9679873d8cd27c6edb00c9a64", "mana": 3, "whash": "ff9fffbfebbf74bf1cb75c3f4d5f5d58ff43ed400d4241406400c48001801120", "card_id": "AT_087"}, {"dhash": "4a3b994622ac1cb1b3d6e73c8c5070614ee2b9154222e3dd0e3b3462099cf17f", "average_hash": "f746f1c6f04ad8c9d099d81ef81af83b7806f8061000201c301e10081108c17f", "name": "Arch-Thief Rafaam", "phash": "e36333a23d33f638bb06ef4d3d829ed31d1c365923224a381598e671396cbec4", "mana": 9, "whash": "f74ef3c6fe4afc89d49dd89ff81ff83b7806f80e504a205c301e100d1108c3ff", "card_id": "LOE_092"}, {"dhash": "4af68d4a12143db1784de5f2cac1800724b6483c9fd12b4bc6b20de57f02df34", "average_hash": "f75ffbfffbfb98e38ce16fe06fe093e003b003b003b203f400f401c003000700", "name": "Dalaran Aspirant", "phash": "fd95f34be3eae931ff4071b03b1c0c8c18d11a7345b46e04ba666a6137299e1d", "mana": 4, "whash": "ff5ffbfffbffd8f78ee1efe0e7e093e003f003b007b303f400f401c003000700", "card_id": "AT_006"}, {"dhash": "0080dd37b0cf223d3ffe5d70bac16586893a3e45008a00d60db8a2415300fcbf", "average_hash": "1f7c07e01384d828d879f077e87de07fc077887f800f0040400004000400f00f", "name": "Sacrificial Pact", "phash": "31d2af034ecddea939302794898d0f2821420e5d4b514673cbe09ba56fffb63d", "mana": 0, "whash": "ffff07f01384db28fc79fc7ff87fe277c07fcdffc07f4040404004000c00f00f", "card_id": "NEW1_003"}, {"dhash": "4618893006423c8c70b0a5e04a9d85331847738c9404671a7837c6a4094bc23f", "average_hash": "f347f947f8c7f0c7fa84c384c285d085318cb18631867083908380830200841d", "name": "Ethereal Peddler", "phash": "d133f3ababecd36dbbb2b98b7f644e2765d88244397342d0cd4c16f121011b19", "mana": 5, "whash": "f967f947f867f8c7fa84eb84c287d2cdf98db98eb986f083c08390938205803d", "card_id": "KAR_070"}, {"dhash": "76b4c061851d9fc7387857c6bc2c49993a1241e40602f8dd0f3afb51c687993f", "average_hash": "8bafb9aff82ffc73fc04dc4dec7bfc09fc0c080004000010e000f805d90f993f", "name": "Pantry Spider", "phash": "13dbc958d913fcb4d95b3bcf8f895306e6249239a53c0c87339346464dc1d778", "mana": 3, "whash": "8fafb9aff87ffc77fc48dc4dfd7bfd09fd08490005000010e800f805c90f993f", "card_id": "KAR_030a"}, {"dhash": "aacb0df60300cf0cf6f12de34b8cd71a27676ec9d834b14862b9c9e39b47372e", "average_hash": "7712df78cb7c04f404f047f087f38ad38bdb9bdb93df80cf00c700c700870627", "name": "Summoning Portal", "phash": "9da527a7df5efb69af05927c676096cc009cc898cd98491a590e5dc46b067367", "mana": 4, "whash": "7f16df788b7c04f404f047f08ff38ad38bdf1bdf93df81cf00c700c700870607", "card_id": "EX1_315"}, {"dhash": "0cde2c3c41c84ab4b5405229ac1250b7aa6e40dd00aafd44e7b13a62f3811d2e", "average_hash": "f793efa3e3afe4afe06fe05ff35e514053511100110010410000000081810124", "name": "Silithid Swarmer", "phash": "fbf33b113f1e9685b7a533693f6908c989c896b066369be9cc8c98c681837962", "mana": 3, "whash": "fff3efa7e3a7e7efe4efe55ff3dfd1d5135111c011c011c10040058085832b24", "card_id": "OG_034"}, {"dhash": "9cd42c4161984a79b59e5635bbaa64d39d823b35f26ae457ccb5d96f728fa13e", "average_hash": "c717638163800008000a101410140804600ee00ee00e603e601f601ff81ff11f", "name": "Am'gam Rager", "phash": "69c6b681bb99734c3b938db41b6109698ce21c937e726eb0f69276236d26de60", "mana": 3, "whash": "ef976fab63a804a8101a101415741964714ef14ee14f607f703f603ff91ff91f", "card_id": "OG_248"}, {"dhash": "0a80d57fb0f9418e9f0b0f1e9cbc79f9ffe0d000ac010017602e434e36987c2b", "average_hash": "07180140818c001f401e001e380c3c39fc3ffe3fb83f003f003f083f083ef81f", "name": "Wisps of the Old Gods", "phash": "69dd76fa7eeebff5979fac4d7bd00dc409288b45c3414b8259c15280732219d8", "mana": 7, "whash": "071c0360118e911fc01e601f380cfc3ffe3ffe3ff83f003f003f083f483ff81f", "card_id": "OG_195"}, {"dhash": "08f9cdd1128305067a0dac3e10fd5981e702300d47f2df0b3c3560e0814107b2", "average_hash": "f70e735e395e383c283c28bc7b28fc11fc2109084188708cf0ad8003c0038002", "name": "Al'Akir the Windlord", "phash": "e30cefa2d9e4734bb36433d66c4418dc219452cc4eebd75699b453a7ce689053", "mana": 8, "whash": "ff1e7b5e395e387d383c7ebc7e28fc11fd216d08f38878acf0adc043c003c003", "card_id": "NEW1_010"}, {"dhash": "ea0ffdfe613903f71eec9d407383e0a6b609c15b8cb331e6400c03f1eca6fb0b", "average_hash": "07208381f331fc31fc31f277f27e36721678027802721673066200700048d00f", "name": "Ancestral Knowledge", "phash": "a520f3755adf9cec6ce99c2e7306248f87f8847c2b166b40b3443e0b6ba11f35", "mana": 2, "whash": "073093c3fb33fc33fe33fa77f27e36fa16790278067216730e7200700078f02f", "card_id": "AT_053"}, {"dhash": "38cc6c3a41e6ca18b57052c9a10e413f83fe1e1d7d2ad0c483b97f63db843d3b", "average_hash": "dffbcfbbefae75ee34c89fc187c103c0034003c083c101c104c035c81d831b33", "name": "Spiteful Smith", "phash": "9fd93b66eb453e869f811cb11b8126310c98493063b29a96ef8196edfb69b319", "mana": 5, "whash": "cffbcfbfefae67ee34f89dd187c103c00bc043c083c105c104c035c81dbb1f3b", "card_id": "CS2_221"}, {"dhash": "fab7fdf91146cba39e5138917122e74ecc9d383893a00347eee89111783dffdf", "average_hash": "0700f3493bbeccfd66fe067a00636067006662060606326060620e022270fa7f", "name": "Tracking", "phash": "adcc2fff67f35ebdda2bb6fa2df81c2c21c21e950b07a4210f40e124c7c0d831", "mana": 1, "whash": "0718f3eb3bbecdfd6efe26fe00676067026662160617326060741e622272fa7f", "card_id": "DS1_184"}, {"dhash": "02830d3eb0fce3e82c23530c943e3a81725aed27f359c2031966762d09f8fecf", "average_hash": "77bec1e1c38bc00fc847c82fc41b741ff01f6c7f683e4b1e001480130008c40f", "name": "Arcane Explosion", "phash": "795fbbc29eb1e70c67688ef58d1e897243c26b0803797a414b13178e5ba9f823", "mana": 2, "whash": "ffffc3e1c38bc00fc84fc82fd41bfc1ff01fec7f683e493e001c801b000ccc0f", "card_id": "CS2_025"}, {"dhash": "78deed3992703ce979c4873a8365641b4976db6db4930d09d33276e7ec8f9b1c", "average_hash": "8703f363fb43f843f8837b837f00cb008f009b001b80198018d018d81b1c1f1e", "name": "Tirion Fordring", "phash": "d787bb58c9e996b73b12040c4ff00c2f00f466c6e6fcee307f2d5bc621cb44c4", "mana": 8, "whash": "cf13f363fbc7f843f883ff837f00cf008f009f001f801fb01cf019f81b1e1f1e", "card_id": "EX1_383"}, {"dhash": "f8ff3dff63a0cf40dc10b07b6c634f961cc4f70cc96322a7d11844b7b228efcf", "average_hash": "0700f307f93fe47fe67fe24ee244724ef80ff00fb21bcc038e0400584000e00f", "name": "Brawl", "phash": "39a039743e7f8ed5f24076a23e180c480dc2a56c05bb169fca657a96f98f3f61", "mana": 5, "whash": "0700f307d93fe4ffe67fe24ee25c726ef80ff81ffa1bcc039e0c00506211e00f", "card_id": "EX1_407"}, {"dhash": "08e22d9042ec45f1bb6f66039fcc7f796cc2e104870b3c4678b09073cf871d3c", "average_hash": "870fc347c153c01b800fe007ec3ffc3ffc3e7c3ef031e002c003800380070119", "name": "Fjola Lightbane", "phash": "396f9fab9e216766ef199a506bc0485249d938b3666c5326b5b5d9d86c26068e", "mana": 3, "whash": "871fc357c353c05fc00ff89fec3ffc3ffc3e7c3ef037e026c003c0038007011f", "card_id": "AT_129"}, {"dhash": "6ae4ed4393f504607988a6994ca19142313f56febb35864b0030cba1a44f03b0", "average_hash": "871f1b3fdb4de84e20cb83d923dd22c183b001bbe3bff0bf00c00842001f0604", "name": "Master of Disguise", "phash": "d989fb4bcfe6bbc12e91d6b959580cec1064494c06e63b8373029e91ebc95ee4", "mana": 4, "whash": "871f9b5fdb4ff87e64dba7cb22df22e1c2b801bfe3bfe0bf00c208420a1d0604", "card_id": "NEW1_014"}, {"dhash": "8cef0cf811e20b0c37124e6dbd82490db7aa345d40aac34764b481694f963d38", "average_hash": "f71bfb9ffb9f783f583a1038803c001d000f000f0007803b003f0024001c0118", "name": "Thrallmar Farseer", "phash": "69e96304f314dbc07f29343c26e54ec2245846b8eed66dd9deb65a4e2513dab0", "mana": 3, "whash": "ff1fffbffbbf7cbf5c3e1c3e917c055d054f104f0107803f802f0024011f1118", "card_id": "EX1_021"}, {"dhash": "cccfad3ad2d54477b904721b84ce61d783e26304ce0a1c443fb9b87363851f2c", "average_hash": "1ff00ff00fe605eb84ef07c827681b61dfc1ff50ff580dc834c005c485832728", "name": "Maexxna", "phash": "776a9f089f142727b7034c50276103e182910ce363ec5e6fdf73e1ddd58e1127", "mana": 6, "whash": "0ff00ff00fe607eb84ef07e8a7e81be0dfc0ff58ff580dc834c005c405b3372c", "card_id": "FP1_010"}, {"dhash": "0280cd7ff0bb03c63efc5d1fbe4400a1436efbd8c6a19d063311664cd8b3fd2f", "average_hash": "5f7e07e02383383b207dc17f607ef038f0001e001e003f403e0038200820fc3f", "name": "Demonfuse", "phash": "e7f17dac9f9c96e179562dd209aa90b723692f0d29c09b065b601e096f817ed8", "mana": 2, "whash": "ff7f07e0239b3b7b307de07ff07ff038f0009e001f003fe03e203e301c20fc3f", "card_id": "AT_024"}, {"dhash": "88f30c8c01218bd2360f4f7ab8847349e7b2f06581c91a9327364e6c1898f130", "average_hash": "f70ef31db903b8338827383dec398c3e4c3b0c390c3c0c383838383830180100", "name": "Tinkertown Technician", "phash": "a91ddb32f2e16dcc3f0a65a06e92cbe41ec66c303365becccee2c93919139358", "mana": 3, "whash": "f71ffb9fb99bf83398377c3dec3bac3e4c3f8c3d0c3c0c383838383830188110", "card_id": "GVG_102"}, {"dhash": "0af924e341c20afdb52a5305ae7a5985a6229fddf2abe446db33b86d0e87b33c", "average_hash": "f71fed9ee99e682cd00fd00f10095011d000d004d012100c300ec007200f211d", "name": "Argent Squire", "phash": "79f673ce5b1c7366b3ed99e899b113e626c024c91e8836cb330dd3611c03db34", "mana": 1, "whash": "ff1fffbeebbe74bcd41ed00f115d7151d140d044d13a105c183ec01f310f211f", "card_id": "EX1_008"}, {"dhash": "9ad9182d45984a33b76456d9a482591fa40a087534ebe00647398d700b89153e", "average_hash": "ffb7ffb5cfa18de1b4f9b758a3dd83d303720560036019f20c5225c025901b3c", "name": "Faceless Behemoth", "phash": "bf793bd04f1496c67fa4632c6df3b07c89ef907d585a866d9b0016c04f213b90", "mana": 10, "whash": "fff7ffb5dfa185f1b4f9b558a3dc81db037207e003e019f20c52258025b00b3c", "card_id": "OG_141"}, {"dhash": "0ad20c24419bca30b5e35003a34e4e3d87da72bdbfea7555803226646d8c9738", "average_hash": "f7bf9fbd8fad85e984698c450742054083418140f110f1d4183010b801b80318", "name": "Zealous Initiate", "phash": "7df3db9a6d95e3f67fca4ba673e6202892207164160cd3103b0c6c0fff2218ce", "mana": 1, "whash": "ffffbfbf8bad87e984e98dc187c207c08341a3c0e3d0f5d41c5c15b805b80338", "card_id": "OG_158"}, {"dhash": "90802f3e80fb7925f342ecd89ab7256f6ed6d08ca1404e8988309ce37186a31c", "average_hash": "47c023e081c1d0df40d903bd83a5e3a3f9aff9bf7d9c7986788621c403040304", "name": "Cabal Shadow Priest", "phash": "f591b5c90e624326bf5014066e8441c5016c7be989e39b17ff31ba61b33f5a75", "mana": 6, "whash": "47c023e081c150df50f903bd83a5e3afffaffbbf7f9c7d8678c631c403040304", "card_id": "EX1_091"}, {"dhash": "01c0ed37600e81d90ca31b7c1c7931f3770cdff03e007918e034c0cf81fc3ef3", "average_hash": "f7bf7bf0fd84cc00ce46122e091bf809f217d607ec07e007f003c007001bf037", "name": "Forbidden Healing", "phash": "1b546786f4be53b1ed41f84989700c4013f01a3d23dc3be54bd35b69db26b7a4", "mana": 0, "whash": "f7ff4bf0fd84cc014e46423ec81ff81bf237d617ec07f007f003c007001bf07f", "card_id": "OG_198"}, {"dhash": "f4dcad7f02bcc543da80ad001edb7c3fe16e84fd3833c148c0b48fe73a4e37bc", "average_hash": "07012365a34fc05fd0f782bf037e823e813d03210384028400c40044000c260c", "name": "The Mistcaller", "phash": "3d92bb2156a8964dae480486169092b721f129295eef5dc5c9d9d747e7e267b2", "mana": 6, "whash": "0f51af67f3efe67ff47f8eff877e827e817d03270785068400c4044c228c265c", "card_id": "AT_054"}, {"dhash": "68eeadd9422785da3a876d2e9ddc63b1c0c2dd24b64b1a9687311872f48c9533", "average_hash": "c70ff77f7bdbf876fc467ca29c309c3618306830e43c282b3c08480840003103", "name": "Feugen", "phash": "a36fdb267a087dd8bf814e4339303282925229376eb6f6c9dd4c4f2f398624b1", "mana": 5, "whash": "cf1fff7f7bdff877fc727caabc70bc763831e834e8382c293c08400821103103", "card_id": "FP1_015"}, {"dhash": "3adac5608203049c79fe77108e6e4cf9b9126335c6cbfc9781b28d713f8ee730", "average_hash": "e707df671bcc384018c018de005e481eec0eec1cc40c843f001c0006011c8100", "name": "Chillmaw", "phash": "49b6c3e2b6d93b7e7b36599c3bd9220cb21166dce638693863194cc19e8336c3", "mana": 7, "whash": "ef57df7f1bec3d601c402cde055e4d5eed5eed1ecd3c8d7f001e0087019c8120", "card_id": "AT_123"}, {"dhash": "72918d23008c7831f166e4d98bb716cf0f1e1d3c7a7024c9c8b091e12383472e", "average_hash": "4ff63fec3bec14c007d103d023c563c6c3c3c3c383c243c2c0c241c243824702", "name": "Spawn of Shadows", "phash": "5f66e59bae6ef166ff4e45b62f8110e100c46ab010d83a846f676a14fe8b3b45", "mana": 4, "whash": "7ffe3ffe7bec14e087f187d127c563c6e3c3c3c383c343c2c0c241c247824722", "card_id": "AT_012"}, {"dhash": "9888fdffe3efefbe1e41310c7000c0ec8ff9f845601b93152322665e40c0fc9f", "average_hash": "07000300013efefffeff8effd67f0064c4677c37183d0019900800080000f00f", "name": "Blood Warriors", "phash": "a982af047e79fe955a663e2129888714136a24c9e3563e3dd969de365bc893e1", "mana": 3, "whash": "070003403f3efffffeffbeffde7f0266c4677c7f187f001d981880080000f00f", "card_id": "OG_276"}, {"dhash": "2ad9cc3251e582e8b5d14bafbf107501e012c2a508c97716d0b26074c78d9f3b", "average_hash": "1706d386cb86e007c003c027f83ff83f781c9c398c2458260017801980038101", "name": "Voodoo Doctor", "phash": "b977db665e66ef96afb9aca096d21c2861ac07582d19c3867e48db9133076c60", "mana": 1, "whash": "7717dba6ebafe807e407e027f837fc7ff83bbc3dcc265836a017801981038109", "card_id": "EX1_011"}, {"dhash": "48fa2ca5d1620bcd363a51f1a6825925965a203540ead6973b36a66c9c8d7139", "average_hash": "f71ff3afebaff836781e700fe00f804b800b0408040000201830381038183118", "name": "Shattered Sun Cleric", "phash": "739c7396e71536783b69b3e166d2266489e59a863b9673ce3e9b26cd34021948", "mana": 3, "whash": "ff9fffafebaffcb77c1ef00ef14f814bc14b054805400d601c3038b839983918", "card_id": "EX1_019"}, {"dhash": "ecdbadee02c14d82fb1fe62608f9d21231656dceb398e701c63a86f70c4f9b1c", "average_hash": "6710336cb35f105e00de4ade1b1c388c188c18823986391f388638ce100e100c", "name": "Druid of the Flame", "phash": "cd233d09bada964cb68237846fc44ce428d0ebf1a6ed2fcd738e5964bc20e6d0", "mana": 3, "whash": "6712bb6cbb5f907e04fe4ede1f1c3ede1a8c1a823b86b91f38ce38ce180e180c", "card_id": "BRM_010"}, {"dhash": "80c10f7a9484794bee947c31f3f6ec8ff7900a716476c88e308f601cec309803", "average_hash": "739ec0e2c09f001a50326476646ee67eef7fe767dc4dce7de639d67890701030", "name": "Velen's Chosen", "phash": "2162d864e4411b1bd51c27d8c696b7202b8c6261587c2fbf4bfd73c759c29b9b", "mana": 3, "whash": "f59ec0e2c09e801250326474e47ee27eee7fe667cc4fce7de679c67890701030", "card_id": "GVG_010"}, {"dhash": "c8f00cf811630b83360f4d36b9ee7299ec3a81754aca3c15c7379e6c30987130", "average_hash": "f71e7f9f3b9f383e1c370c378c33cc32c43c44284c001c00383e381c00000110", "name": "Youthful Brewmaster", "phash": "edf7d990cb19674b37cd93259682009201b216626c488d81bf25de69df52ffa5", "mana": 2, "whash": "ff1e7fbf3bbf3cbe1c370c37cd73cd73c57c4c784d001c40383c383e09180110", "card_id": "EX1_049"}, {"dhash": "ea1ffdffc0c083390fd21c0c3818f060e1c1e001fb83e707863f3c6e40d8f0a3", "average_hash": "0700e107f91fdc3fde3fde3fbc3e3c3e3c167c18381e380e381c201c00000001", "name": "Windfury", "phash": "cbf51bc77cdff63cf630ac321cc1240c86b60cf225cf8e25db04cf0c6b097f01", "mana": 2, "whash": "0700e307ff1fdc3fde3fde3ffc3f3e3e3c367c183c1e3c0e381c201c0018b005", "card_id": "CS2_039"}, {"dhash": "0080ed7f60f881e13f00cc00bea6fdcd803b01e4d77bace313804748fd90fe3f", "average_hash": "ffff1ffcffdfcc3fd83f01b701fc01fc01e005400460863806080e000600ff0f", "name": "Frost Nova", "phash": "6d5ff98a5380b720f7be2c81a32492de094129744b3936a5d3c45adb49dbd35a", "mana": 3, "whash": "ffff0ffeffdfcf3fd87f01fd01fc01fc01e805400660073807080f000700ff0f", "card_id": "CS2_026"}, {"dhash": "ea8ffd7f609f806f0dfd02f809f037e17f02de04f00982176cb8c0408899f8bf", "average_hash": "0730e1c3fd03fc03fe03fc03fc01fc03fc03fc07f81ff839404000000000f00f", "name": "Mortal Coil", "phash": "f37d37fa7cebfdfe7bf524b0c9061ea425c00e581b60261d4b035ea6c3015681", "mana": 1, "whash": "073cf3e3ff8ffd0bfe03fe03fc03fc03fc03fc07fc1ff83bf05080000000f00f", "card_id": "EX1_302"}, {"dhash": "0180ef1fe0b7c1dc87310bc31486a8ccd319273348e6b498d930b3656cccf88f", "average_hash": "ffff0bf80dc2ee03e601e780e78007c087e08fe00ec00c800c801d801180f18f", "name": "Circle of Healing", "phash": "177e3b00e9078d2ffd616f01335033820bd40ae45b5c5b6d5b077ab95b2a56ba", "mana": 0, "whash": "ffff0ff80fc2ef03ee09ef89ef8807c08fe0cfe14fc00fc00d801d8019c0f18f", "card_id": "EX1_621"}, {"dhash": "eaf90ce511028b1436004d3fb8fc6369cc92c125b7c92c977f36fc69f493893e", "average_hash": "771e799e7b9f283d88310438cc30fc277c307c300c300c303830382018001900", "name": "Recombobulator", "phash": "a79779b8dbe1c78c73e83c702ce712849230180ea7857f08f988d3c33f07af27", "mana": 2, "whash": "7f1e7fbe7bbff8bfac310438fc78fc677c727c304c300c303c307820f900b900", "card_id": "GVG_108"}, {"dhash": "c70fb8ff298b1716ad375c2c807000616186201c7868d0d10da7f045ec8f3f3f", "average_hash": "0ff003d0007e817f817ed97e393eb91fe107e001e001000040008100800ffdbf", "name": "Entomb", "phash": "e97fad2ab50f1608df9fc6a7a547b42fa51a8c3aa56c8987a7762a012c146a74", "mana": 6, "whash": "0ff003f8037f817fe1fef9fe7dbfb91fe18fe003e00161007000c100c10ffdbf", "card_id": "LOE_104"}, {"dhash": "0a8f390ce2f84c70b8e270c7879a59ef978a3f7c488a6644bcb0d1732183872e", "average_hash": "ff1fe767c7c3d0cbc0c380c180410041e009e00ff803f004e005e003e1078103", "name": "Y'Shaarj, Rage Unbound", "phash": "33fa3f893b0473c23f5d4e7ccb97416e64f9c6ed29db57b1868538901e80064c", "mana": 10, "whash": "ff5fef67c7e3d5ebc4c384c1854181d1e1c9f10bf943f145e007e083e1838127", "card_id": "OG_042"}, {"dhash": "8cfc0de092490cbb786e289952a3c5641bff20212dc4cc88393963e686419d33", "average_hash": "f71ffb5f9b5ed86c94a1b2a1202022c024083b083c840c0488c1880100018000", "name": "Anima Golem", "phash": "d31beb9099834b0afb709138e39831d818ce46c6662e6e67377736a32c2718f7", "mana": 6, "whash": "ff1fff5fbb7fd8ecdca1b6e1a4e026c036c87f883f849c8c8cc188c188818611", "card_id": "GVG_077"}, {"dhash": "ea89d537b08f037714bc28f81300a6c4fd896723de4e76ad889b017fa73cfe7b", "average_hash": "07300744f384ff20fc03f802fc03f000fe00ec00ec40e064607c0c780e7afe7f", "name": "Crush", "phash": "e3fcf3f5adeb3cab1fe9ee5c932c0e85cb90866d33503c0443039c7143c81ba4", "mana": 7, "whash": "073867e4f385ff20fc03fc02fc03f800fe00fe40ecc1e074e07c0c780f7afe7f", "card_id": "GVG_052"}, {"dhash": "c0cfdd37e06609ee7d10a331c6c44187db7daed11eef659e83217a7680e8fe8f", "average_hash": "573ee3f7f387fc0778cf308c040c8041c2e1d263dc639c601c0048014040f00f", "name": "Mark of Nature", "phash": "33d27b10eb2c9f633c014e9bc950a540431b969c4bd43cf2fb51e6a4f1ea3adc", "mana": 3, "whash": "ffffe7f7f387ff87f8ff308c448c8041c2e1d273dc639ce15c0048014050f00f", "card_id": "EX1_155"}, {"dhash": "98f42cc941a68bc6340cce38bdf077c7e04ec595b929634702b6166dfd9b313e", "average_hash": "c71fe39f639f603e202f383a3838703e70323033303f103e103410381006011e", "name": "Nightblade", "phash": "e9f47b0a5b8fb9d0378226a39b1996a0499259d22669d6266fc3dbf44c589ac9", "mana": 5, "whash": "ef1fefbf73bfe0bf303f383a3878707e70763033303f103e103c103c1116011f", "card_id": "EX1_593"}, {"dhash": "e89fdd6c30b84078b1c166b3c96c90b95f66bec4c3997153cf20b43eb875f40f", "average_hash": "0720f7c7f30bd10be003641b6417f800f809f80ff80378027c02bc00e042f80f", "name": "Assassinate", "phash": "d36f3fb87f70c7932f1e0eac58640c28020734264f39ecf0278932ccd9d8b3f3", "mana": 5, "whash": "077cf7e7f389f38be407e41b641ffc03f809f80ff80378037c02bc48f042f80f", "card_id": "CS2_076"}, {"dhash": "400e9df930ff43529fac3d1b7ce4f9b0e761cc83fc0b4107875e2cbc597980e6", "average_hash": "0700030021078037803fe03c203c903bf07bf87fb87fb87ff077f073b877f877", "name": "Hex", "phash": "a9317e4dde149f4a164e66980d92b32039cf8b7166de2b06ab710ba666989fb5", "mana": 3, "whash": "070003000103803fa03fe03c203cf03bf07bf87fbc7fb87ff077f073b877f077", "card_id": "EX1_246"}, {"dhash": "badd6c3351820a80b46b5311a7465e9d803219453e8a114466b9c9732383572e", "average_hash": "cfb36fbffbb7fcfb444d744fe746cd464d600d40194311c10041008205860322", "name": "Mana Addict", "phash": "dfce7b8cfd049fcd7f6659e88ec904c3a4c90363232093c19b81de117e027ea6", "mana": 2, "whash": "cff36fbffba7fdfb44cd7d4fe7c6edc64f401dc01bc111c10041058205860322", "card_id": "EX1_055"}, {"dhash": "fa19fcf3a0ffd3fe26cc2f00dc61a6ff61d9d0c20005c71f0c2c7052f2a0ad63", "average_hash": "078003800300b03ff03ff83f703e603fb83ffc3e3c0e7c2e6000c001e000c901", "name": "Light's Justice", "phash": "e97eddf0368f06e25f046c6e0376137999c1c8a025e40f3cdbc97668273396a5", "mana": 1, "whash": "078207804354f13ff83ff83ff03ee03ffabffc3e3c2e7c3ff400c401e100e901", "card_id": "CS2_091"}, {"dhash": "cafc4d6392ef0d877b2ee67c0cd8d2b02361cdc63299ce029d3f69f692483119", "average_hash": "b709fb5c7b4cf81e789c3c981f105c101c90dca0d924680c609c281828080000", "name": "Keeper of the Grove", "phash": "e38bf36378983bd69e612d991ed0853910d41a4298b19e498eb954b4ff21be67", "mana": 4, "whash": "b719fb5efb4cf87e7cdc3c983f187c905eb1deb0dda47c0c60bc287c28182008", "card_id": "EX1_166"}, {"dhash": "9a9f6d3320647870f387e40f8bbd37634f36b0ec0151728be9b697e67d88f312", "average_hash": "47e4e5e663c600c782c303c21be03be30fa70fe30ba049b000f011f87310f710", "name": "Shadowboxer", "phash": "d5b5d99a62aa1ddeef12a4a53e92087918d0e60126e3cac2ef65f2c03d293bd5", "mana": 2, "whash": "4ff4e5e753e7c0c682d303e21bf03fe30fe79fa34fa049e000f031f87310f710", "card_id": "GVG_072"}, {"dhash": "4af085c0129f253a7ae0b0810007621ded72c8ed1353268a5d36ffe1f04f73be", "average_hash": "f70ff17d395c787ef87bd2838301002009301d300bb00cb00cb02840e00fe41f", "name": "Neptulon", "phash": "af1febfb6bfaee577bf812981b44930f00441a02efac5a1057807591362a9b8b", "mana": 7, "whash": "ff1ffb7f3b5c787efc7bc683874102600d301d381fb80cb00cb02846e80ff41f", "card_id": "GVG_042"}, {"dhash": "0218a16064d11ca6b1486501a2835e27b44a4935b262acdc38b9115263809d3f", "average_hash": "fdffc9efb4cfbccff6c777c7834383c383c2039003d040c8044001c005000b20", "name": "The Curator", "phash": "d9fad99bc95b6de7bf27f3e8eb8cc3a48169866b8d88c2c8698c739061107250", "mana": 7, "whash": "ffffebeff7efaccff5c7f7c7834383c3834203d007d045c804c005c005800b78", "card_id": "KAR_061"}, {"dhash": "f6b828e005c45ff8bb975727aada7499c112bfe5f483db1dc73f1a4e3090013a", "average_hash": "c12fe10fb01f801ec01fc01fd00cf834b820c83c183c181f781f781fe00f811e", "name": "Netherspite Historian", "phash": "69f77972d91cc79fb657dbcc1db3386c946c46a6246280f9a5b9194c18c31643", "mana": 2, "whash": "c9aff12ff01fc01ec01fe01fd00cf834f821c83c3c3c181f781f781fe11fc13f", "card_id": "KAR_062"}, {"dhash": "c28eed3f60c31f8e9e11342870d8c472b9207261e6ce8d192730ce2d081bf2be", "average_hash": "073c21e7f9a7fc7c7e7c3e7c167c185c085c081c081c380c1c021c0600062006", "name": "Shatter", "phash": "cdb2b5bad2745a8f5c938e682dc92eec23ea2cb203d7e3314b07da237b017f00", "mana": 2, "whash": "77be3be7fda7fc7c7e7e3efc16fc1adc1a5c085c085c3c0c1c021e060006b00f", "card_id": "OG_081"}, {"dhash": "1613294ce4b8d8feb0b56701af6678cde092012507624cdcff3921570080053f", "average_hash": "cfe1c3c1c4c1c4c3e4c7f79e775e633f6d7a2d1005400440e445000005000320", "name": "Barnes", "phash": "7dbe39e3b653b69eed4d69637632b3358912dec849e9a67ca16452b20c30034a", "mana": 4, "whash": "cfe1cbe1c7c1c4c3e4c7f7df775e67ff6f7e2f1027c004c0e4c505c005800338", "card_id": "KAR_114"}, {"dhash": "e70f98ff68bb975e209ccbf99da03bc5d780e7238f493a32dce431096493cc3f", "average_hash": "0bf001802008200ef0067802f003f023f86bfc33fc31f801f204ff00e800fd9f", "name": "Excavated Evil", "phash": "f3dddc284c33b5899d4dd61695dd1fc6cd33a63d2d18804e2f76283607962a31", "mana": 5, "whash": "0ff001802008700ef906f902f983f1b3fd7bfe73fc31f821f204ff04e902fdbf", "card_id": "LOE_111"}, {"dhash": "00829dfef1c14f9ab86171dde231de47fdb9c127131a7ce5f3db0f9f3f34e279", "average_hash": "07200300013c007c007c004d20073047607f4c76e802c217f65ffe3fff18ff1f", "name": "Demonwrath", "phash": "69bafc3b2f41ba25c60ddb89166c25ca0920e71489f43fc169df5760b9d66989", "mana": 3, "whash": "073003c0013c00fc007c004f20473047e07ffc76e052c657fe5ffe3fff1cff3f", "card_id": "BRM_005"}, {"dhash": "eac4ed0d933f44fff8faa3a145138b36146d2d8e5a11bd4842b171e7fb43078d", "average_hash": "17300f600b6015e054c0d3cb83e393eb93c91bea07ea03cb00c880ddc68fe60f", "name": "Anub'ar Ambusher", "phash": "bf49e6e22f9eee259b11dde9334444e340f099198cac1b416e217364bd93d6d8", "mana": 4, "whash": "1f700f703b60176054c0d7cb87e393fb93e80bea07ea03ea00f885ddc2afe62f", "card_id": "FP1_026"}, {"dhash": "361da17e66947c68f0e0210747f49bab37177c4ccdb43819e23380a1314bde3e", "average_hash": "c323c147e04ff0c7c283c3874c82fc82b983e985918b9087408700060206c43e", "name": "Deadly Fork", "phash": "d964336ea8e6ff4f2e27de663ec19e87936566c4648c9398e94973644c126780", "mana": 3, "whash": "cb63c167e0cff0c7c283c7c75ec3fec2eb83e987d19b908f509700868206e43f", "card_id": "KAR_094"}, {"dhash": "6acfe4dea1b94372b6246c49b83241a7a14e1f9d342a2144cab9d461398b7132", "average_hash": "0fb08fb197b9b5b9b4bf34bd037d33d8f350b341334231c0304030a031903b10", "name": "Undertaker", "phash": "edf12db59ec7d6c25e8e8cc84d08a190e6c04e806f11e6a1ee847b61fd396d69", "mana": 1, "whash": "0ff08fb193b8b7fdb4ff35fd03fd73d8f350e3c131c231c03040358035b03b30", "card_id": "FP1_028"}, {"dhash": "02b2087661eddbc3bf9f5d7ebf887c31e3e2cce5f183a61f433f8eee0c9d0178", "average_hash": "8304810c800e001e001e1806781e183d183b183f183f183f981fd81ff01f807f", "name": "Anubisath Sentinel", "phash": "c9a776727a72894db799c393c79e986dac884b9b430e1a66a4e14e705526cf4c", "mana": 5, "whash": "fd8e918c801e801e001e182ef81f383d183f183f183f983f983ff81ff11fe17f", "card_id": "LOE_061"}, {"dhash": "f8fffdff01000f581cf038b6f166e385870981337e47f88cc3181c33297ceaf3", "average_hash": "07004300f91ba070c060c071e073f07af076e060cc7dcc6fbc6ff44e7038f07f", "name": "Starfire", "phash": "a16ab605ced15f1a66c45da89486766289322421869cc9cd0bb3af9d5b7adbed", "mana": 6, "whash": "0700c200f91ba070c072e071e073f07ef07ee460ec6dcc6fbc6ff44e707ef07f", "card_id": "EX1_173"}, {"dhash": "8cf30cc081bf0b0c36da4c30b3647689a44249a5b64aed4512b34c6c9580ad32", "average_hash": "f71ffbbffbb9f83bfc39fc337c22ec10f81260002006200a1019103000000900", "name": "Skeram Cultist", "phash": "e36573847fc15b707e9619d05b92008409214c3ee6d0de69ffb4363d360b9eb3", "mana": 6, "whash": "ff1fffbffbb9fcbffc39fc337d60ed52b95269402946b05a1018103901100900", "card_id": "OG_339"}, {"dhash": "f887d9ff20180f787e80fb01708fc11b836c44f908860304072eac7f60f0bcce", "average_hash": "0700e341f31bf071c07bc07d40fc087902790279127c105c0064007e807fc02f", "name": "Tree of Life", "phash": "a98b33ccd2754e811e09cc2e5ce22c60862f8eb667d07cf8c3c9322d7be5d1f2", "mana": 9, "whash": "0708f363fbbbf47fc07fc07f40fc0879027d027d127c127c187c007e807fd03f", "card_id": "GVG_033"}, {"dhash": "dc3f3cf720c81580270c2e38d8d1b2e36784cd02c927ba5ac4a3045602a0cd6f", "average_hash": "0783c307e31ff01f703d303ef038b032f03b703b6019201f800f800300000000", "name": "Ogre Warmaul", "phash": "697d7f201e121bc836048e3004982698199026c933e1fb6c3769ce37ffbd7e5f", "mana": 3, "whash": "8783e78ff31ff11ff83f783ef83cf83af23ff03f7039e01fa00f800f00004901", "card_id": "GVG_054"}, {"dhash": "f63f30e00f049c3d98db70b77760c0c489997f7220c4cf0d96330053f0a8fc17", "average_hash": "c1e7f99ffc7f7cf06cc17ce07ee03ec044e06c941e84bece384d004c00000000", "name": "Purify", "phash": "07aea14ef9575933fc75de58de85b4dcb4c906a225f6a47e06fa88148b69a482", "mana": 2, "whash": "c9f7f9bffc7f7cf06cc17ee07ee03ec066e06e951e843ecf380d004c0020d017", "card_id": "KAR_013"}, {"dhash": "5332b8e4040138bf7070cfc09b29009b30664ccccbb0b7596a72dec4b98de37f", "average_hash": "e5efb3cdb8df28c0fbc1dbb80bbf038203800380039010d010d033c01700877f", "name": "Museum Curator", "phash": "f5e5519b297ad479bb5e31bf2993f6d210eec2c81dac00e4312c86c493f049db", "mana": 2, "whash": "edeff3effcdf28c0fbc5dbb80bff43c203c0138023d030d011d033c01700c7ff", "card_id": "LOE_006"}, {"dhash": "28df2db0920009167771a9c61139c2c13c177bfcf710ed89d03526e14842d7b6", "average_hash": "f71fff7ffbd7f89078b00aa81b063ac22180018cab84a084a08c9044a004e600", "name": "Vitality Totem", "phash": "7336bbcdbbfa7be7bb8409c22e5120d9225cc2201ca75c016708ff24fea0eea6", "mana": 2, "whash": "f71fff5ffbf7f8b0fcb08ea81f823e822980818cbf8ea286a88eb044e285e401", "card_id": "GVG_039"}, {"dhash": "c287bd7d60ea83d11fa73f3cbc4870b1e166864d389264e6c80d013a7226eccf", "average_hash": "0730e3c1f381fc1dbc1e3c3f783f183898709961db63c2628e600c404440e807", "name": "Darkbomb", "phash": "0b47ebb17eb8b642a4cb0decc9c4861a09764c86cbd93af0cb0d7e39f3619e06", "mana": 2, "whash": "0738e3c1f781fd1fae1ebc3ffc7f987c98709a619b63c6628e600e404c40f80f", "card_id": "GVG_015"}, {"dhash": "7235e9c30434194832d86d219f137007405c0cf51922735ce0b989530382873e", "average_hash": "fbeff9fff8fbfcf24492e0bfc03f203f00270003000300030003000201000128", "name": "Prince Malchezaar", "phash": "d9ec61a7761bd3cbed5279fa3ec9966e9c4c42b629a4b654a51c364d28814e42", "mana": 5, "whash": "fffff9fffcfbfcf74cd3e5ffe33f237f032f010701038043004300060100033c", "card_id": "KAR_096"}, {"dhash": "8aec8dcb92973d2172e684cec99db33b2f77c0cc81510389803202e56b8ab715", "average_hash": "f75b7bfa79ddf8d818d21fd39fb19bb18fb11fb01b90088008c000c003000700", "name": "Murloc Knight", "phash": "c785e3696ae8f35cbb5010389f4c08e41064735221f73b937b92bad3e6a35e71", "mana": 4, "whash": "f75b79fa79ddf8fd1cf21ff39fb19fb18fb11fb03fb008b008d000c403040700", "card_id": "AT_076"}, {"dhash": "eac2cd07931f043c797827e24880c100133f3ef0dfc40809593ae2e5c4410332", "average_hash": "f71c7b24fb60f840fcc07e90fc109cc104018381ec87c801c0c1d000f0014000", "name": "Wrathguard", "phash": "638be7d3ebf259bb5c7029b0691c1818145b931b2d13bf497f043f41ae03365e", "mana": 2, "whash": "f71cff5cfb60f9e0fc80fef0fcb09ec906818785ef87dc81c0e1f0c3f0816200", "card_id": "AT_026"}, {"dhash": "48dc0cf8c1600a1f35065ae1bc026179c68288351d8b7ac6a5b96b689e9f313f", "average_hash": "f797ffa7bbbf382fdc2bf019d47988712032203000210001000100017913391a", "name": "Mukla's Champion", "phash": "bbdd5b61db04de14e7a432e323e12284180a8cc36cf2e3ec3e2737e73931b331", "mana": 5, "whash": "ff97ffb7fbbfbdafdc3bf419d579097921722170016101438001008179b3391a", "card_id": "AT_090"}, {"dhash": "e01ffdfeb00107a41ed83ce0f084e70eed5893c102032707ccfc107933c0fe8f", "average_hash": "0720c38ff31ff877f075e072e871c075e676fe7e7a3c7036002600640000e007", "name": "Totemic Might", "phash": "a996bb9ddec94f201f32b40ee60924a480b486998b59ae65e9266a8d6bdbff81", "mana": 0, "whash": "0730c3c7f13ff977f875f87ae871e875e676fe7e7a7e7036001200640000f00f", "card_id": "EX1_244"}, {"dhash": "faefad9c5238cc78f8e3b2864faddd6217c76c88f935f44b813f10e7a34f5f3c", "average_hash": "07108758837b0051c0c1c2c36007badbb00ba387e0bfc0bf00dd805c001e021e", "name": "Succubus", "phash": "3989b72f6e76ef35cf0966a212c410d200c14cf4c698db443f89df017f13ef74", "mana": 2, "whash": "0f10af58cb7b00f3c481c6c36cc7badbf28be38fe3bfc0bf00fd80fc009e021e", "card_id": "EX1_306"}, {"dhash": "5cf10c6b41d70a8c35405a89bd1a52b5af7a58c5b8abd1d66731066e0c901934", "average_hash": "f71ff38e639f783ff83f300f14131412040704030023000a080e181418000100", "name": "Dalaran Mage", "phash": "c9dd731867127fd69ea41e219b3003a623e2a6c59bed996c3c9a364b29b09ba3", "mana": 3, "whash": "ef9fffbf63bf7cbffc3f343f1d7315530557955701530468180f181419000104", "card_id": "EX1_582"}, {"dhash": "3adb2c2861d0ca78b1c5528bac9a4935b3627445efcbdc173837e06041838336", "average_hash": "c796cfa783a780038003840384419049981c181cd81dcc1de43be007f107e107", "name": "Ancient Mage", "phash": "1bdb9e1cfb346f87eea133a48fa4c0e021631262e46c6e643b42b949cf23bb33", "mana": 4, "whash": "cf97cfa783a78483840384038541914999581d5cdd5fcd7fec3fe087f187f107", "card_id": "EX1_584"}, {"dhash": "88f70dde9384056b7adea4244941920624ff7e9e1fb40f48a1b1c8e26b4597ae", "average_hash": "f73dff7ffb7bfd787cf27bf63bf203d003d003cc03c603c300c100c006800600", "name": "Undercity Huckster", "phash": "f7a4e36733da7b9a2e10d390cb9064b024f90e224ec33f01df265fc1bf09de24", "mana": 2, "whash": "ff3dff7ffb7bfd787cf27ff63ff217f007d0038c07c603c300c004c002800620", "card_id": "OG_330"}, {"dhash": "acefc97c82d384017993836e1304664bc4f288a53903124861b17de0fb4707bf", "average_hash": "670fd363bb4ff84fcccecfc6ef662b660f233d222f8300c000c08040c2018603", "name": "Anub'arak", "phash": "9b0eebc23bd9f6c9bb547c266f840e1e4ccc5398ced839d14e8c51b1c68b11cc", "mana": 9, "whash": "4f1fdf6ffbeff86fdccecfcefe666e660f233f222f82008000c00040e2858607", "card_id": "AT_036"}, {"dhash": "6cff2cecc1910b06b7194872befc67698202840d186a20540eb8fb7163863739", "average_hash": "f79fffbffbbf383f3c3e6c3ecc7ff47ff833f821400300004000400001003110", "name": "Dragonhawk Rider", "phash": "fb5dc9017e3133e1b3913ee22e666222c6b09978bec78687dbcc78d88e0b30e4", "mana": 3, "whash": "ff1fffbffbbf3cbf3c3e6c3ecd7ffd7ff973f861c10300404000400001003100", "card_id": "AT_083"}, {"dhash": "fa3ff5ffe3032f541fa73f467818f8bcf2f10023a34204870f0d399ae0787b83", "average_hash": "0500c102f91f7c73be793e7c1e7e0e7e8e7f7e7f4e7c4e7c7e7cfe3822707000", "name": "Sinister Strike", "phash": "2b2adbfff2f57ede9ce53e324e902718064884c6c3dc8d464937e7101b43bb80", "mana": 1, "whash": "0500c102f81f7c73fe793e7c1e7e0e7e0e7f3e7f2e7c4e7c3e3cea3802301000", "card_id": "CS2_075"}, {"dhash": "eaf78d48122328ce710eaabc16d8aca17bcbe79ccf411f0b783662edec4bd1b3", "average_hash": "f70ff11f794f3886380e38801800980030b831b8f139f890f8b1d801f8109001", "name": "Dust Devil", "phash": "e313cb4afbe279f93efa16872f583c90123406069e90d6249e68478ddb9669f8", "mana": 1, "whash": "f71ff95f794fb887788e3c80bc20bc0030b8f5b8f5b9fcb0f8b1f801f810b003", "card_id": "EX1_243"}, {"dhash": "faf7fde0a101679adc30326f27de6e3c99e5009bf33067c30e2688cc0090fe2f", "average_hash": "070071483d3c3c781e6c8642866ca67ebe7e7ade721e261e780370030000c003", "name": "Naturalize", "phash": "cf602f7bf2dfeb8479f23e725e20968d09722c70c3cd1e19c9211e364b99c341", "mana": 1, "whash": "0708f37d3dbc3d7c1e7c8e62866ca67efe7e7e5e731e661e780770030001c007", "card_id": "EX1_161"}, {"dhash": "6afaedf412c329845748ac181831d4e16187eb0cf631ccca98b591e2234547ba", "average_hash": "870fc14ffbcff09ff0bd72bc3b1038907898f99cf3bee08ec08ec00600020000", "name": "Flametongue Totem", "phash": "6996cb9999ea7b768e2d16e42ed022cd20d8cb20d6a4d744dd225d89bf23be89", "mana": 2, "whash": "c71ffb5ffb5ff0bff8bd7abc3a9038907998f9bcf3bce08ec08ec04600020000", "card_id": "EX1_565"}, {"dhash": "c8dfcd39920728c97142e3c4073ccc391cf3602c81c03788ec30fbe67e4df3bf", "average_hash": "f713fb63fbc7f883fc807e801f001c803d8c1d980f800c8048806840f800760e", "name": "Earth Elemental", "phash": "c70ff3c3f3e0bc6ce9703c1c0e51632101d096247dcc4745dcac76e7d9326b9a", "mana": 5, "whash": "ff13ff43fbe7f8a3fc817e801f881ec03dce1d9a0f801e806c80e840fa80fc1f", "card_id": "EX1_250"}, {"dhash": "fad96c26d15c0af135e672c5a59a4f578da2724ded8bda862f34e469dc87c13f", "average_hash": "0797cfa6dba09981980380039041984108078801900df82ff827f807f90ff90f", "name": "Zombie Chow", "phash": "9bd39ee7ef74ef94ffa499a03969802c0d8a86413e499e44be04d6e599899b84", "mana": 1, "whash": "0f97ffa6dba398a19c0380038141994109458941f94ff86ffc07f897f91ff90f", "card_id": "FP1_001"}, {"dhash": "fc32fc48e0af9d7f267e29f8dc90bbc326006a02d025665a9da07155c2a0c56f", "average_hash": "07800388030038107c33fc2bfc3bf83be01fa00f0006c006e000e001e001e901", "name": "Perdition's Blade", "phash": "3335cf013e1a5c20391933d88752031865e6846119a17e66cffc9e399ffb7e36", "mana": 3, "whash": "07c0079907d079707d37fcaffc3ff83bfb9ff81f3006c006e001e541e901e9a3", "card_id": "EX1_133"}, {"dhash": "c8d90c32519c8a31358f581ea06443c987f23ec5478a89155e3bbc606089c136", "average_hash": "7791fbb6fbacc8298c3be411784078403c403803fc01fc00fc00f800f900f900", "name": "Coldlight Oracle", "phash": "e3279dd06f23c7a1ee90e6a43bf199688cf033d3ce9966b872a6ece4cc489848", "mana": 3, "whash": "ff95ffb6fbacccad8c2bec137d4079403d403d43fd41fd40fc10f800f900f900", "card_id": "EX1_050"}, {"dhash": "08f0cdc512934d26bb1476d9887673c96c02d87403eb8615d8b6256643889d34", "average_hash": "f71f3f7f3bdd30dd30fc30d8304878327c36043c040004100014001801100100", "name": "Nat, the Darkfisher", "phash": "edb9eb89ff3b9bc6db0653604c7008640304720c6a305f253705fb86ef0ddf17", "mana": 2, "whash": "ff5fbf7f3bfd35fd34fd30dc397879f27df60d3c05580d500434049801900110", "card_id": "OG_338"}, {"dhash": "faf7f9c1c26345bebb7076219c027087451a9a94342be2c4c8b7d36f7f99e736", "average_hash": "0f14077f07da01fc20ff80dfc03f403ee022802000000000003c003ec11fc11f", "name": "Yogg-Saron, Hope's End", "phash": "2db40f894f2a9648bb162dcd5a7bc0fe645ee6f726b3d2b45f4486a42f013780", "mana": 10, "whash": "0f500f7f0ffe35fce4ffa0ffc17fc1fee1e281210142014c003c00bee1b7c13f", "card_id": "OG_134"}, {"dhash": "ead8ed31d2f384ef39c3710d83104e6d19b203643ccbfc976137d76e0c9c893b", "average_hash": "070707670bc70047e84fbc879c07040f0c0dcc01c003d803d83b983b9819191f", "name": "Millhouse Manastorm", "phash": "9befdfb4ed8436e64e3483645be423c9295826298e64c681dd1076497b036f8f", "mana": 2, "whash": "0f170f671be71047fc4fbcc79d47144b8d0dcc01c503dc03d83bb83fd919191f", "card_id": "NEW1_029"}, {"dhash": "8a03fdff608e831b5e731dff38f0e1e0cf051f0be0430492382ce15906a7f89e", "average_hash": "07204180e100f830f838fe3cfe75fe73fa77fa6fe078c03080200064000ed00f", "name": "Lightning Bolt", "phash": "235637f5dcfdfcbcdbe88efe6fcc892222658c7883384a852d001aa1d3922f45", "mana": 1, "whash": "073043c0e110f938fc78fe3cfe77fe73fa7ffa7fe278e03080300066000ff00f", "card_id": "EX1_238"}, {"dhash": "eab3f5e7201c6739985f34a629f95b3c91c8c6309d4168cfde1b34b72b2cf64c", "average_hash": "0700810cf91c5c7c0e7b7e101e00fe41be007e306e201e162e9c365e3a4efc6f", "name": "Bestial Wrath", "phash": "0728bedff9ff7eb4b25b5e20969c1ce80d369cc003fec623230c6b4cb9611a0b", "mana": 1, "whash": "0700c34df93c5d7c4e7a7e503e40fe40be007e306e30ae163e1c265e3a4efc7f", "card_id": "EX1_549"}, {"dhash": "c8d70da843000d667780ae29581bf37640ff91c8ba05ff48feb900e3794a8736", "average_hash": "f717ff7feb6b34f13cfb02f803f802f613f01bf01ff034d8e0cf00ce00a60617", "name": "Imp Gang Boss", "phash": "edb4b3895b1a73929e8669322f6106e7c1a8c4343ee67e76cc665964cb4c344d", "mana": 3, "whash": "ff17ff7feb7bb6f11cb902f803f802f612f01fb01fb03498e0ce00ce00a60617", "card_id": "BRM_006"}, {"dhash": "0ac60d189234a9c97023e30e0c94d9a941f384641f40208847307fe3f24fc799", "average_hash": "f717fb73fbd3d883cc878e8d0f580cb00da00d800f80008000c00040c00cc61d", "name": "Dunemaul Shaman", "phash": "170fb385fbe0f76d2b787e785b10429600342924c6ec5d4b99a0cdf4ceac2ba7", "mana": 4, "whash": "ff1fff77fbf3dcb7cc878e8f1f983ef01de01f804f83068000c00440c28cc61f", "card_id": "GVG_066"}, {"dhash": "00808d9fe07feff1dc91316662decfe83f1adfe53f0b6116782c72d8f400e017", "average_hash": "77bc03e001b0807206670ee2cec0eec0e406fc27fc63fc31f060f8307e10f80c", "name": "Vaporize", "phash": "b3769c28ec85f3555a2926ab9b4889066960233999476eb50bdddff0dae9f220", "mana": 3, "whash": "f7be03f001b0c0f28e620ee38ec0eec0f806fc27fc73fc31f870f870fe10f80e", "card_id": "EX1_594"}, {"dhash": "0cf04de0d2808b0f7e1f2c3ed844a08920f281e70b841308263bdce6a0493336", "average_hash": "f71f1b5e091c083c0cf84e382cbc1dbc0c1a3d803d803c003890781038123216", "name": "Savannah Highmane", "phash": "cd2d298bfba0d34bdb70331093d031659878730421dc1ad63bedd793b3732c8d", "mana": 6, "whash": "f71f1b7f0b5f08bc0cfc4e7c6ebc3dbc0d3e3d807f803c803c9078903a163616", "card_id": "EX1_534"}, {"dhash": "0af30d2453c4c5017e6320ce5a1ce53842f380e03fc57e8a3938e3e086450f3e", "average_hash": "f71ecf7c036f05f60cf80ec19db11ef11ce01fe01de00ca088c188c3c003861f", "name": "Voidcaller", "phash": "9f9f6f9279a067c2cfc013996ec4326424a431e4b1b1da64db43d740ee1ab5b9", "mana": 4, "whash": "f71ecf7c0b6f80fe0cf80ee39cf11ef91ee01fa01fa00ca48cf388c3c08b861f", "card_id": "FP1_022"}, {"dhash": "68ce6cb951d3832437ff4c58b8a27345ec9a903529c946961934b3696492c73c", "average_hash": "979b8bb34bbf7dbd0c3dfc3ff4307571253c042c0d230c60cc20c8248104c110", "name": "Eldritch Horror", "phash": "2f9fdb895a239e4effa0b3260e6d09f22569caf826604bf29bc73cc34c8a9224", "mana": 8, "whash": "9f9b8fb34bbf7dbd2c3dfc373571b579657c056c0d610d60cc20cca48184c118", "card_id": "OG_142"}, {"dhash": "03c04d7ec0c89b91bc27761ff8e883cdb68b419703643718f633c16706cf7cd6", "average_hash": "ffff19f8c3ff887f807f80f831f010da4cde24d23280318038821c830cd64cce", "name": "Shadow Word: Pain", "phash": "c5ff29139b17f60047ecc64f76d0097e499b0e2031380b975bc15bf0db4bdb06", "mana": 2, "whash": "ffff49f8c3fd887f807fc0f871f011da4dde25d22280318038c21ccb0cd65cce", "card_id": "CS2_234"}, {"dhash": "8cfd0d72d2208dc27b85e41c197ff692682dd11e8cf9194323be46f4094e2718", "average_hash": "f717fb6f4b5f18760cfb0efe033600be02be02be01b90139009980dd000c0010", "name": "Mech-Bear-Cat", "phash": "cd84d306534afbc87bc663c8266944300274ba30b6b4de921fdb4c6fbd253ecd", "mana": 6, "whash": "f71ffb7fcb5f18770cfb0efe07f606fe02be02be03b9013900fb80dd009c0210", "card_id": "GVG_034"}, {"dhash": "f63fc87120c3158d33122f3cd974a2a57c90b36266849b196427985d42922569", "average_hash": "c783e307f30f300fd80f981e7c1efc06240e6c0d880df80bf813e037e0082008", "name": "Charged Hammer", "phash": "4b1fdf80fe083b481e5cb3a40cc33cd6c7ec02fb24bc67780df3466c23787c32", "mana": 4, "whash": "c787e30ff30f390fd81fd81ffc1ffc0ef40ee80fc80df80bf817e037e0092008", "card_id": "AT_050"}, {"dhash": "a8f90c8641084ab9b5744bc7b2186f25f47aeec598cb611703375e6e7489c132", "average_hash": "c717c79fc39dc008000800250023103f941f9c3f9c371c3f1c38d818f800e114", "name": "Argent Watchman", "phash": "89777cd076068fa5793886c9d2d4226b4c6892c6b69477327cb0cf83bf07be0c", "mana": 2, "whash": "ef17cfbfc3bdc0b88008042d8863107f947f9c3f9c371c3f1c39d819f90ae914", "card_id": "AT_109"}, {"dhash": "78d8ad36e2614881f9e2b1c5674b9e96390e731fcc34b8cbe1b700ee0748ff3f", "average_hash": "070303668107c00780c38247828fc38fc00fc189c399c03f80bfc0bf001ae61f", "name": "King of Beasts", "phash": "19a3d6728fa4390d66028d843689262989f436c949d99e757bedf691b9ed4669", "mana": 5, "whash": "87138b6783c7c087c0c3c2c782cfc3cfc10fc38dc39dc0bf80bfc0bf021fe61f", "card_id": "GVG_046"}, {"dhash": "48f0cce491980b21360f4c96b8046269edb2f6e5c9c911130734a269c49a1920", "average_hash": "f71ffb1ff91f983d98377c312c300c004c242c3d3c3918301820182008001000", "name": "Lil' Exorcist", "phash": "8b976b107be169f87a78d626af4c18969266986929cd3b9e9bc95a69c892938c", "mana": 3, "whash": "f71ffb9ffb9f983f9c3f7c393c339c02cc3d6c3f3c391c311c30183018111900", "card_id": "GVG_097"}, {"dhash": "6cf08ce0c19f8b03368f4cdcbe387171e942d32583cb6497c3370e6ed89f813b", "average_hash": "070b131c011c001b203130303839783c383c383e4816081f183fb81ff01fc01f", "name": "Archmage", "phash": "e9b576405a295f666bd387c99224a13018c433c90dcd79e67ea679962e1b538f", "mana": 6, "whash": "871ebb1e011c403f20333830383d783d7c3cfc3fd837981f183fb83ff01fc01f", "card_id": "CS2_155"}, {"dhash": "38b82d40e2017d47f07883fdc49f83333e2774ccca909d0922b30ce63185671c", "average_hash": "c767e3fe03da00d000c083c1c3c3fbc3fb8ffb9fdb9cdbcd20c820cc03040704", "name": "Quartermaster", "phash": "5da19d222c3e6947bb4613903fc2e69092c493e539f938816e6f9ecfa9c37251", "mana": 5, "whash": "c7f7ebfe03da00d000c083c5c3c3fbc3fb8bfb9fdb9cdbdf20cc21cc03040704", "card_id": "GVG_060"}, {"dhash": "9adb2d24d273cd857b6be6fe108d76826d94cf2f9e597e83b83c70f6844d8b1c", "average_hash": "c706e3640341004d08cc8891cb21c835c031e0b3e131e130f888f8d3b019981c", "name": "Fandral Staghelm", "phash": "3387dc1ecfeb336ebe23c3422e41016c48f2923093cc1e311ccdd2f1bb0bdec7", "mana": 4, "whash": "ef16e3670355006f08ac8ab9eb21ea35c231e2b3e1b1f831f8baf85bf819d81c", "card_id": "OG_044"}, {"dhash": "3afc2de06080f904f209e4b1897117876c3ed8ec30c1608bc03281e483892713", "average_hash": "c70fc1dfc1dfe0dfe0bf63bfe19fe187c1a709a709a700b2009200d203030300", "name": "Lightspawn", "phash": "d995796b3ceb9f57db425068ae5484d400d043870cf12ab0f78ddac4bc335b75", "mana": 4, "whash": "c7cfc1dfc1dfe0ffe0bfe3bfe39fe387c1a709a70da700b3009201d303030301", "card_id": "EX1_335"}, {"dhash": "8087cd3f20f801f03fe07f008f003c417882c304e348b633498ee218f7818c1f", "average_hash": "f7bfe3e7f18ff01ff01ff81ff01f701e380e68066007481380318400c803f807", "name": "Seal of Champions", "phash": "5b55f99abf230e044e54ad601b0c045aef144a544bf11b854ff85bbd4bee53e4", "mana": 3, "whash": "f7bfe3f7f18ff01ff01ff81ff81f781e780e680e6807c81380518401c803f80f", "card_id": "AT_074"}, {"dhash": "28c64c8c911f833836d14ca6bbe47ec9f0f2c1cd8f89141641b40069ff801d3f", "average_hash": "f71ffb91fbb1f830fc306c304c186c3c0c3e183cf831d0200020003000000100", "name": "Force-Tank MAX", "phash": "2797330db261f9c14d2b0c6cb3c418b4495a734ae7e79ee759b6d6d20c839c07", "mana": 8, "whash": "ff9fffb1fbb1fdb1fc31ec305d786d7c0d7e397cf971dd60102000b001800108", "card_id": "GVG_079"}, {"dhash": "68e68d1d427bbc84713d8349c6f99ceb3b1735ecc830130b9630f0e2a185471a", "average_hash": "c749b3f9a1d120cd68ec33cc13ccbb86ef8bcf8db38c31a830c080ca03030702", "name": "Aldor Peacekeeper", "phash": "5529a79bbc6c3119bb6544e36ed01b5a0296d8a45afb9b5cf7313ab6288ec2c4", "mana": 3, "whash": "e7599bf921d120c768ec77cc338ebbdeef8fcf8db39db0a050cc80ca83030702", "card_id": "EX1_382"}, {"dhash": "03808d0f64f771ee67dccb929f37736dceff10d94d10bf28ca959cc3f996e03f", "average_hash": "ffff6bf063c07000360023062133a37325e773e9fb88ff80720071007101f1b7", "name": "Mindgames", "phash": "f7500dae62e73783bd9e99344b01c3da89f24b8149d552fa5b323b68d9c9528c", "mana": 4, "whash": "ffff6bf0618070003a006306213b23f3e7e7f3e9fb88fb80f2807180f181f1bf", "card_id": "EX1_345"}, {"dhash": "4ade042401084a5cb4f851f3a3864f1d9f0a3c1d792a404490b834616382873d", "average_hash": "f793ffb7f3adf4a0f440e403e543c543c143c043c10700448000800001000100", "name": "Young Priestess", "phash": "33faf3b56eb39de39b899bb8e38c86600c2136c43b191f196f801e439f81f3c0", "mana": 1, "whash": "ffffffbffbadf7e0f4e0e547f7c3e5c3c343c143c16781c48040858005800320", "card_id": "EX1_004"}, {"dhash": "28f04cced19e0361b61e68f5a40a4d3dbb720e65b68acc0418b97876f18be32e", "average_hash": "e71fdf9e4b99c939103e503b100b00058405cc03c801e809e000f010f014f10e", "name": "Nerub'ar Weblord", "phash": "33ff9dcd63063337dbb072c8cb46813409323381b2083b9bef0cdc10df28dee7", "mana": 2, "whash": "ff9fdfbfcbbdc8b9143f543b154b054d9555cd43d913e841e000f010f114710e", "card_id": "FP1_017"}, {"dhash": "fce7accd61934b26b75c4e9dbc306071dd06f81de0abc6470db2186865999f37", "average_hash": "0718039823982038303c703f383f387ff83f003f003f401e401e4000c0001111", "name": "Nerubian Prophet", "phash": "69dd6f0936255c8696031686cdc0646022290c3ccfdc7bc7fecbbc716fc29bb1", "mana": 6, "whash": "0f980fb823b974b8303c703f3d7f397ff97f117f417fc07fe01fc000c1101113", "card_id": "OG_138"}, {"dhash": "0afcccf211248b4136834c24b9787a61e082d3a5cd4993932c37376cdc9b4136", "average_hash": "f70371073b1e58138832183a1c3e7c3e7c3e243d3039603cc83ab835f817f016", "name": "Annoy-o-Tron", "phash": "e975df64d2a37fd8d39007e73c6c9294214c93911c63e784e7213b64ee11be24", "mana": 2, "whash": "f703710ffb1ed8170832183a1c3e7c3f7c3e2c3d303b603cc83ef815f817f00e", "card_id": "GVG_085"}, {"dhash": "eafff8e007000e81b90f76f1e6045c3878c2c107416fb1fc811f0f1040a2ff4f", "average_hash": "0300813f807f807f007f407dc07d807c003e207f007f007f807ff0742060f00f", "name": "Protect the King!", "phash": "a9ba5fcdb655fc346e5436d2d6c9e62c0e3716323782a69c67c41cd293b00cf0", "mana": 3, "whash": "0740813fd07f88ff00ff787dc07dc47e003e20ff807f807f807ff07c2064f81f", "card_id": "KAR_026"}, {"dhash": "8a9fedfef0c143029fc51ecc39187266e4d4d3b98c4379049a897c1e04340360", "average_hash": "0700c103c10ffc1fbc1d3e3e3a3b30703870fc788e718e6eb65cfa7ff87ffe3f", "name": "Call Pet", "phash": "ab20daf57ade3cbab4849c20067425dc46b8a1494bd14976bff47a219f46bf0c", "mana": 2, "whash": "0500c103c10ffc1f3c193e1e3a3b30702870dc700e708e6ea65cf27ff87ffe3f", "card_id": "GVG_017"}, {"dhash": "9a990d32e04cf8e1f1c2e71d8f2b385772cec0bc11706e488cb380e093812716", "average_hash": "87c7c1e6c3c680c1c0c4039f03bf03bf83bf63bf638e618fc0c601c003020300", "name": "Shrinkmeister", "phash": "7da595e3167a733eaf61d9e04b98847102c1ce180c399b99ce136e246f8b7bc6", "mana": 2, "whash": "8fd7c9e6c3c680c1c0cf03ff83ff93ff83bfe3bf638ee18fc0c701c203020300", "card_id": "GVG_011"}, {"dhash": "6af04d709305044b79b3a3264f04984b3497722ee4550d8b1bb257e4984143b8", "average_hash": "f71ff92f1b5e986618cacfcecdce2cde23de019e09be80bec0d8185100000600", "name": "Cutpurse", "phash": "492fdb62b2d1fdb16b98336c1b16828934f08e847ae25f128f036c61ef257ecc", "mana": 2, "whash": "f71ffb4f1b5e98661cca8fcece8e2e9e2a9e299e0bbf84bec0fa10594a084400", "card_id": "AT_031"}, {"dhash": "0ac0853f60c3598fce048319367b1de4f200e03b20f7cdec2fca1f802516ca9d", "average_hash": "f79f01fce1cf700eb00e320e370c771ff73f20fc027c02de006e50265e0c9e0c", "name": "Avenge", "phash": "6d50f3b892ffbdae3be18ec80c21a3760bad4ef4d01c7b49dbd4590853281c1b", "mana": 1, "whash": "f79f01f0e1cf742eb45e360e370c771ff77f20fc027c02de006e50265a0cde0c", "card_id": "FP1_020"}, {"dhash": "cafb05e592cb05373b486c9a902465d94332afc4c80b33976e34cd7092810527", "average_hash": "f70e715e195d385d383d1cb86c202c38fc33dc3b1c180812c803600240030103", "name": "Dr. Boom", "phash": "abdf6db3daa07f66bb5e76349e6123c3214466301bc33826cc311c4b598af2d8", "mana": 7, "whash": "ff1e7b7f3bdf387d3c3d1cbc7c286c38fc3bdc3b1c390832e8036003c1038103", "card_id": "GVG_110"}, {"dhash": "0dc0cb3fe4ff13d06e00d181a6374c6fc88e131b2736cc4c98306268d8f13ccf", "average_hash": "ffff07f00297ec3fe07f814f814fe3c7e160e3e0e2c0f0c03ce018e180e1e0e3", "name": "Mind Control", "phash": "f15a490085956624df589fdb7b6aa153633e999f4bf5537e43a15aa859c11740", "mana": 10, "whash": "ffff0ff0029fec3fc07f814f81cfe3c7e1e0e3c0e2c0f0c038e018e180e1f0e3", "card_id": "CS1_113"}, {"dhash": "ca3f35e7e18087303ce7191f3626611cc0d891b1272b8f46168f641c8931fea7", "average_hash": "0700070de33c287808710065047d007f087e247c247c3478243824380002f00f", "name": "Sprint", "phash": "add8bf9f52e56ffbdbe6b6f99392866423418c4c63100e0013c21ab63ba93d26", "mana": 7, "whash": "072c87ffe3bc2b7808790c77067d047f0c7e2e7e347cb478b43834380432fc1f", "card_id": "CS2_077"}, {"dhash": "b8f36dce92118dc2781de32200d9c0220345069e0978b2c0ceb999f563429718", "average_hash": "e71fcf7ddb7fdcf7fcff7fcedfc4bec193c183c103c103c200c200c400880600", "name": "Druid of the Fang", "phash": "5d2b2b436dcefec92b9146264e4c50908034ba36b3a4dec1e7ad51e9fd236a56", "mana": 5, "whash": "ef1fdf7fdb7ff9f7fcfe7fceffc4bec193c18381038103c200c200c000880608", "card_id": "GVG_080"}, {"dhash": "6adacd21929608fcf9a0b3c3e61d87a71c5f32aa4c5431887a38e1f5c6032fb7", "average_hash": "f717ff677bed78c1f4c7f2c7e2c3f3c3b3c48f81828300c084c088c10b010601", "name": "Sparring Partner", "phash": "93b3fbc2ef86ee635f6219261830193260f8b64c248c6e837f43df4c7d0c3f05", "mana": 2, "whash": "f717ff6f7bed78e1f447f6c7eac3f5c3fb84af81ab810c808cc088c10b810401", "card_id": "AT_069"}, {"dhash": "f27fc8ffc17507080f115c6abac6b40c6f18d2618fcb20d77f0ffc3e00f80008", "average_hash": "0301f903f817f81fe81fec15e434c43084370c34cc311c187e1efc3ff87ff00f", "name": "Moonglade Portal", "phash": "2bbff614d3498e45b41277b8bc0f990a4e69b1e66e789ce839b42690aca59378", "mana": 6, "whash": "0101f907f81ff81fe81fec15e435c43084370c34cc311c187e1efc3ff87ff01f", "card_id": "KAR_075"}, {"dhash": "7cd9ed2ed35b04a37800a1c0448b8f3e1ef320ae4140be88fd31bce3f04583af", "average_hash": "0f33df66fb61bde1bcc193c003c083c11fc01fc02fc06fc07cc0fcc0f681e627", "name": "Kidnapper", "phash": "d70a9f03abf4ce063e007ce01e04036309f03818cce9778d4fbff2c9786ec77d", "mana": 6, "whash": "0f33df76fb63fe63bcc18fc007c003c11fc01fc02fc06fc07cc0fcc0f281f627", "card_id": "NEW1_005"}, {"dhash": "0ab00d6460c07801f100e21d8fe31ca7314e2d1c1af020c8c6b2ade75b8ba31c", "average_hash": "f7eff7efe3cfc0c302c103c063c473cc1388c382c38301c000c021c02314371c", "name": "Hooded Acolyte", "phash": "7f39595aab9a996febc2449cba81906d00e17b8610de1e919e613b24eb3a7e94", "mana": 4, "whash": "ffffffefe3efc4c713c303c073c473d813cac3c2c3c303c000c001d073943f1c", "card_id": "OG_334"}, {"dhash": "82c3fd0f601e03ff0ce2130830006480cb0887192cf3116624cc5099673efe67", "average_hash": "671ee1e1f981fc21fe2ffe0ffe67fe61c621c221826100610260026006785e3d", "name": "Blessing of Wisdom", "phash": "b3d0d3ff6cffbcad1f6fcda0c9083cc027d4263112f60a994b095b01d3649b98", "mana": 1, "whash": "779ee1e3f981fc21fe2ffe4ffe67fe61c661c261826102610260026006787e7d", "card_id": "EX1_363"}, {"dhash": "eae6acd8118f0b38b5604f6fbcd07181e702cd159a2b30578eb46160c3813d33", "average_hash": "379dfbbbbbbbf839f009cc3dfc38f878f832503b003b00300020000001000100", "name": "Armored Warhorse", "phash": "a57d7b4bde199fccffe40e9a0dd0128a42c2226069718c93eda07c21d6a3fb4e", "mana": 4, "whash": "bf9fffbbbbbbfcb9f439dc3dfd78f970f97ad17b617b01700020008001800100", "card_id": "AT_108"}, {"dhash": "023ec979c693396e93b12cc243349c793ce27062c38484082b31ffe246440f3e", "average_hash": "f963f94ff8cffcdffebffcb63e9f3c9f3f1e1f9e498818880080c00000000020", "name": "Malchezaar's Imp", "phash": "434fb3bdb2e2decbcf92b6746ee6733621f132dd8dbd82f22136141469401e40", "mana": 1, "whash": "f963f94ff8cffcdffe9ffcbf3edfbc9f3f1e1e9e4c881c880080c08000000020", "card_id": "KAR_089"}, {"dhash": "4cf0ccc0918f0bb337464c9cb0785aa1a47218e5748ad7146c39d373ac85613a", "average_hash": "f71f3b9f3b9ff83ffc3f3c3f3c023c123c001c001c001c0040005008f80a6118", "name": "Sideshow Spelleater", "phash": "c367c93179405e32fe30b0488f98313031a4837336f29739efb9fc8d8f731cc6", "mana": 6, "whash": "ff1fffbfbbbff8bffc3f3c3f3d423c423c403c401c005c004002d80af90a711a", "card_id": "AT_098"}, {"dhash": "3acf6d3ed2eccc79f996e43d09c3d2066d75deeeb1190783ec3c83f50c4ab913", "average_hash": "c710c161c347c047c0c3e2c3f312f09780b39ab539bc793268a718d618101801", "name": "Anodized Robo Cub", "phash": "b3a73b620ec32f9726a1662467c91ce718d067206b923e40dfa25c86ff45ef4c", "mana": 2, "whash": "c710c961c347c067c0e3e2d3f352f2f7c2b3dab77fbd793668e718d618101801", "card_id": "GVG_030"}, {"dhash": "0af76c09d1fa0ad1b5a35606ab3c4679e092ff3596eb2cd7f3b4a0637184cb3d", "average_hash": "47908fa95ba0182c0c1e0c178c478c436c606430c430c031f425f002f109f900", "name": "Dragonling Mechanic", "phash": "3397cf8f77c137f4bf0c9b280da4c16843920ce3c7a4b69cc628b0cc3ce4766e", "mana": 4, "whash": "ef908fa95ba0ddad0c3e0c178d578d476d61e570c570c571f425f482f188f90d", "card_id": "EX1_025"}, {"dhash": "ca8bcd7730cd738acf779aff3146737c84f81831b3e36dc6f888b290f761e79f", "average_hash": "07008100f30cb81c3e0c3e697e735e733e033e425f60cf60ce40e600e601f603", "name": "Demonfire", "phash": "0760ec3378fb9ee6df8c35cc8d584698097328edc9e5b3512c817f886b4673c4", "mana": 2, "whash": "0700c3c2f31cbc1c7e2cbe695e73de737e033e425f60df60ce40e600ee41fe07", "card_id": "EX1_596"}, {"dhash": "0cd72ceec1998bb336494bc6bc8c7979e0e2c18587c94f179f361a695492a92f", "average_hash": "e701f380d9809811883b8c198c380c3c1c3c3c387834f834f834782478000901", "name": "Scaled Nightmare", "phash": "a3973f09f200e7e47c6461823a0882c889988ccd2df137e95ccadf3ce7ccfec9", "mana": 6, "whash": "e711fba0fba89839ac3f8c3d8c3d0c7e1c3c7c3c7c3cfc3df835f82479000901", "card_id": "OG_271"}, {"dhash": "226ccdc82693fc0650c9a6e04d8f931b7e64f8c2e485c81b01b303e60f58feff", "average_hash": "d51b191b1019000f6086300390839803183f18be183c08be009e001e001c007f", "name": "Reliquary Seeker", "phash": "c9e3b3e97cf86de793c39a6da7844e96119e329319f18c648363367071169b84", "mana": 1, "whash": "df5fdb7b92ff108f6a87308392cb98c31fbf1ebe18bc08be009e001e021c80ff", "card_id": "LOE_116"}, {"dhash": "f6a1201b05f05de6bb04564ab71462e1eccadf153003a39f183f216e4490993e", "average_hash": "030e710bc003300ff00fb017f80f30267033f433a43ff03ff03da8399818811c", "name": "Zoobot", "phash": "e97dd34c6c0bbdccfe26661b6d339bd1ccc4dc12199396c60e86133361461f13", "mana": 3, "whash": "09af791bd803700ff00fb81fe80f38267837f433943ff03ff83de8399909813c", "card_id": "KAR_095"}, {"dhash": "8cf18cc5119b0b3737664c7eb9ec76d9ecb29b653749e2929f37366dcc9cb139", "average_hash": "730e731c391c381838381830dc364c366c366c367c272c2eb83cb81cb81df01d", "name": "Flesheating Ghoul", "phash": "6317dec46bc95b1e7bc60d269a6824e3790c134bb6e1cc34db69c1d9394a23b3", "mana": 3, "whash": "770e711c391c383938381c38dc366c366c366c32fc2eec2eb83cf83cf81df81d", "card_id": "tt_004"}, {"dhash": "4cff0cfc91814b03b7064ec9bcb27c61d18ae0d5088b011637326e6df69ff93e", "average_hash": "f71ffb1ff91fb03fb03d183c301d101d001d00100000000000000000d80ef80e", "name": "Grand Crusader", "phash": "e9f179615b06160c7b0a4698e66034c0783819d8b6bc5fc7dfc62d873e98dada", "mana": 6, "whash": "f71ffbbffbbff0bfb03fbc3dbc7fb07d105d00381020180018001810f90ef91e", "card_id": "AT_118"}, {"dhash": "88b92d4502c87d1f7b9fe53cde5994e32086030c1a70e5488eb319e47300b726", "average_hash": "777e7bee33ec10dc00fe1bfefbfe3ffe7ff46fc0038003c020c001c003000700", "name": "Twilight Flamecaller", "phash": "4d02c9e3be68f61ceb1649c39f4426ac08f05ee369d91695a6f7d2d43d2c7264", "mana": 3, "whash": "7f5e7fee3bec10fc12fe1ffefffe3ffe3fd46f8023c003c020c001c003000700", "card_id": "OG_083"}, {"dhash": "0cf04ce401808b00b73b4e07b87c61d98892706533eaf605b93bc270f88f053e", "average_hash": "f71ffb9fd99fe83f083e043c0c3c0c3e6c0f2c0760066807e8099000c003011c", "name": "Blood of The Ancient One", "phash": "699f6d91fb61d3881f1e97613d8052382b8cd2c6bc313bdcf3da89cdcc826226", "mana": 9, "whash": "ff1ffbbfdbbfe8bf083e043c0c7c0c7e6c4f2c0760076807f81b9002e107811d", "card_id": "OG_173"}, {"dhash": "e00f3d7f70f463c8cf91992733de6ea0d958b22176c3cc07991f523cb461e8c7", "average_hash": "07008301c10fc41fc63ec23ffe2ffe2ffe2ffc2ffc2cd81cd878587878607846", "name": "Far Sight", "phash": "2b423a74f48fa633d610ac806c36ac400ef187660bfd3a8e4a4f679b6b7db792", "mana": 3, "whash": "07008201c10fc41fc63ec23ffe0ffe0ffe2ffc2ffc2ed81cd879587078607844", "card_id": "CS2_053"}, {"dhash": "2afccdf102837d0ef339a662cacd9c1b3137406c7750ca09b0b263e10b82172e", "average_hash": "d74e3bff39df30dee0dcebf8cffcc39c039c03868387818100c180c103000700", "name": "Selfless Hero", "phash": "5db9e9ec92f8dae36f4239d7374430b220f0721386c466857399ebc463335ad1", "mana": 1, "whash": "f75e3bff3bdf30fee0ffeffccfbccbfc039c03864387818100c580c503000700", "card_id": "OG_221"}, {"dhash": "1af12cc6d1b98b02371b506ea49c4c799a02310566ebd5c389b7536ea99cf739", "average_hash": "671fe39ff99d983f7c1f7c0cfc047c0c7c08040c000c002c003c003820183118", "name": "Ancient Brewmaster", "phash": "695fdb1669257cd2d7f856ceda48006cc1c819349372b3c466cb7659ad897c98", "mana": 4, "whash": "f71febbff9bdf83ffc3f7c0efc4efc4d7c4c140c800c003e003c00386118711c", "card_id": "EX1_057"}, {"dhash": "bcc96c33c1fc8b3fb41050c1a7fe4b0db53a48e5318ac7149a3b746bdc963939", "average_hash": "07900fa00ba0083c9001c400004be44be407bc077c06f806f80cf80df90ff91f", "name": "North Sea Kraken", "phash": "5b7f3e04ef81dfa6bb2c76d8cee001a1212149cc8c997e7373e6f269cc88ccc0", "mana": 9, "whash": "0f900fa00ba00cbc9429c400014bed4bf547bd477d47fd47f80ff88df90ff91f", "card_id": "AT_103"}, {"dhash": "bc3f5cffa09915e2230c2e6cdcd9f933f360848309079e1a38a7a05c429a8d6d", "average_hash": "4780c30bf31ff01f381f381f9839d839d878783c782cf819e01b8015800d800d", "name": "Cogmaster's Wrench", "phash": "29615fe05e9c374c3e021e25c69884349361021381b13c5b6fa6796d6dffefd9", "mana": 3, "whash": "4780c30bf31ff91ff81f783f983fd83bf879f83c783cfc39e01b8015800d800d", "card_id": "GVG_024"}, {"dhash": "7cdf8d3842400463b9e4739f8d305ec739ea871472eac045a8b2b170ef8f1f3e", "average_hash": "37519f7fdbff0d6f34ee04cf3d43315e853ff103c504004000000000810e311e", "name": "Gazlowe", "phash": "df7a9f893f0136c2eb118601432110706c915626e64c1dcd7cbff9e4e76465e7", "mana": 6, "whash": "3f519ffffbffcf6f346f84cf3d4a315f877ff503c144094000000480818f313e", "card_id": "GVG_117"}, {"dhash": "00fffdff20c36f009c8d32096412c0a4cc089b3338e7874e3cc960341348ee97", "average_hash": "070881cff13ff47f367fb67fa67f9267d027c463c473e478e058001004000010", "name": "Lay on Hands", "phash": "3978c91a668d36d79e97760b0d488684e34224fc21676372cbc77b915b995ccd", "mana": 8, "whash": "071c80fff93ff47f367fbe7fa67fb267d267c463c473e479e058005006100010", "card_id": "EX1_354"}, {"dhash": "0082cd1c70f04bcfbcbf7360f78cbe3c0fba0322dbcdb0196033dc7682ed3cdb", "average_hash": "77bc03e10193807ef06ef866cc73843dc40fe003e00370031803005b00dbd04f", "name": "Flame Lance", "phash": "916f3d12ec8956953d91870c63464b864df229381b07cbc573dcc3c9fbe8183f", "mana": 5, "whash": "f7bf83f181bb807ef06ef867cc73843fc41fe807f8037803188304db00dbf04f", "card_id": "AT_001"}, {"dhash": "f8eff99f61362ee01c0031e4709ac1fc80d1631b0fb209250fc806904f32ff67", "average_hash": "0700e303f923fc637ee13ef09ef80ef80ef82678067c3250126010600c20de2f", "name": "Shield Block", "phash": "a788b3cdda557e200c178c236fb0061a53a80cd7b35286f64a7aee89af8dbf92", "mana": 3, "whash": "0700e303f923fce3fee17ef89ef81ef84ef86678267c3278126010601c70fe2f", "card_id": "EX1_606"}, {"dhash": "b8f3ecec41b84b42b6b44fc3be1e79e1e02a875dd28b2417cb36946902991d32", "average_hash": "0716839d819d803f803ec01ec01f243f603ed01f481e003ed03ec01f001f0113", "name": "Darkscale Healer", "phash": "29f53f999611e7c66fc809824ea0226084744f9c33cb3cf3e634d3f3ce09f69c", "mana": 5, "whash": "071e8b9f819f803f803ec03fc03f3c7fe03ed03ff83e803ed03fc83f401f2117", "card_id": "DS1_055"}, {"dhash": "30896f2ea4fd78e2f311e0e38487130f0e3e586cb0b0624825b0c9e13380b725", "average_hash": "0ff00fe087e084c1c7ff43cec3c1c3c303c78be383e303c000c101c227812721", "name": "Shadowfiend", "phash": "5d8c1d63269a8c87bf0444cc2f11c92409f0bbb112f356dafff7aad9692b0ac5", "mana": 3, "whash": "0ff00fe087e0c7e1c7ffc3cec3c3c3c3c3c78be3c3e343c240e345c227c12761", "card_id": "AT_014"}, {"dhash": "0280cd7f10c86330c7078e1f342f401e26f8fdc8f13143c2000119067a9cc6bf", "average_hash": "f7bf03e0338c001c021f023e0e7e0f5a1f023f17bf3fff0f0e0f460ce000f005", "name": "Blessing of Kings", "phash": "4b50eca3723b9790b7e63e0ea9511a740be92193c9b573764b6453a95b6ab350", "mana": 4, "whash": "f7ff0bf0338c101e023f023e0e7e0f5a1f023f17bf3fbf0f2e0f460ce000f00f", "card_id": "CS2_092"}, {"dhash": "80870d3c30d8033035e04ec19f803f017f02fe0cf439d077a8e9f4f78b3f080f", "average_hash": "0718c343c1878001800180018003c003e007e007f007f00ff00ff007f81ff80f", "name": "Moonfire", "phash": "3b713c330f88fbcc0f240e484e04092046cc0d144a7167cc5b7377f95bffdfba", "mana": 0, "whash": "673ec3e3e38fc11fc00fc007c007e007e007f00ff00ff00ff00ff00ffc5ffc1f", "card_id": "CS2_008"}, {"dhash": "ca871d7e30806360dc88b1c377beed389361ec8bdd13fe87fc3f30f800e41ddb", "average_hash": "0700c100c107c0100060024006608e698c79b07bf07ff07ff87fe07f807b803f", "name": "Upgrade!", "phash": "a9aa56f4f8f7ebff1e7b1f9ae694946007cc2c0cc33018294306ce28dbe29d89", "mana": 1, "whash": "0700c301e107e0334062024086698e799e79b07bf87ff87ff87ff07f807fe07f", "card_id": "EX1_409"}, {"dhash": "28cd6cf881a1cb1ab771568ba4365941868a3b1d64aa89441a39a7634c839130", "average_hash": "c793cfb38bbf80bee039c41f445d6d59604ea00680048000e000980181018101", "name": "Fel Reaver", "phash": "39392dcd9f27d6c6d78c13c939c161a0262c0d9949935968ee8d3737cfa17ac8", "mana": 5, "whash": "df93cfb39bbf87bee4bdc55f65dd6dd9634ea14681449140e040dc809181b321", "card_id": "GVG_016"}, {"dhash": "02c68d3f70e0c2903921c7c3bede03394ff8b8d0e305de0318077c0ef098808f", "average_hash": "071c03e3c187e007e005e005e001fc03fc07fe07fe07fe07fe1bfc19f808f804", "name": "Hammer of Wrath", "phash": "5357fcb2fc2bcfc496ed272d4e9242b282ab235869b17b524b41539959d079a4", "mana": 4, "whash": "771e01e3c18fe007e005e005e001fc03fc07fe07fe07fe0ffe1bfc19fc09f80d", "card_id": "CS2_094"}, {"dhash": "f89ffde13288cfff1bd01fbd6102c8feef70b8010492e700cf01f803300cf0ff", "average_hash": "0700e30779081c007e414e7ff62fb210fe27fc0fbc43fe07f80078026000f00f", "name": "Bouncing Blade", "phash": "03403f80fe5dda9778503e001d2007fe06b416ade9f73e568d0fe62f63c333f8", "mana": 3, "whash": "0700e30779081d40fec1ce7ff66ff210fe27fc2ffe43fe07fc007803f000f00f", "card_id": "GVG_050"}, {"dhash": "7af82ce77188cb00b6114c33b8c67297e70ac275b0cb301683b5896f6e9c8d30", "average_hash": "071f059f83bfc03fc03fe03ee03ec03fc03f803f003f00370009003e801dc119", "name": "Loot Hoarder", "phash": "39b57d645e1fded3ffa3c668868808e093e029043610b3217f02bf49bf87e7c1", "mana": 2, "whash": "0f1f0bbf83bfc0bfe03fe03fe07ee07fd07f803f003f0037001f003ec11fc11d", "card_id": "EX1_096"}, {"dhash": "03808d09e0dff83fa03e4fec91e627d95e32fb4c80e933917d37074c0490f807", "average_hash": "ffff0bf003c018041c001f0091804de04fa009fc017c253ef9bbfdbd3da6f987", "name": "Mind Blast", "phash": "e75776aac1a9ad8cb7d6336dcbe18b924b0c42b669c34b947bc81b2c5b207622", "mana": 2, "whash": "ffff0bf003c000041e001f0093804dc04fe009fc43fc29bed9fbfdbd3fa6f98f", "card_id": "DS1_233"}, {"dhash": "cad98c6291810a76372d57b6b87c7bf1f64246858d0a13c47433ff6eac981935", "average_hash": "6707f186930a180838140c1c4c1b783af81ff81fb81b70017801f81bb013311c", "name": "Wild Pyromancer", "phash": "e36d5f76de70dbd8eb841c8c5e9801b0648896450e0d3ec1ef01be70ff04b747", "mana": 2, "whash": "6717f186b38b9808381c3c1d5c1b783af83ff81fb81bf0017819f81bb813f91f", "card_id": "NEW1_020"}, {"dhash": "0af82cf0d1c28b85343b497ebeb879e1e382c9059f093e12e934926ab08d4137", "average_hash": "f70fe30f1b0f182e08260c203c383c38fc3b7c307830e822c8225002c0138007", "name": "Twilight Guardian", "phash": "cb57cf92d8c0f3e423b9136b369236ac092665c14dc293679e03ae25674bfb49", "mana": 4, "whash": "f71ffb9f1b8f182e0c268c24bc3cbc38fc3b7c38f832f832f822700be013c117", "card_id": "AT_017"}, {"dhash": "caf295c1229ffd307767cdca9a85355963b2ca4c15932a8947323be4728aef14", "average_hash": "f70ff15df15ff00f8cb1af31af31af311f391d300bb0019000d001c003000300", "name": "Prophet Velen", "phash": "af9ffb7978e8cdeeaf9a511c1f92131e10505a1803862a267f4e0ec4ad391e95", "mana": 7, "whash": "f71ff3dff1dff05f8cb1afb7af35af353f3d1f3c0bb0099008d001d003000300", "card_id": "EX1_350"}, {"dhash": "0cff0dfcc3c18dc3771caf705867d8882537df60f604084961b8d063ae411b3f", "average_hash": "f70bf30f831ff81ff83fe83ee039ec98cc144c26889f801100c300028003b003", "name": "Felguard", "phash": "69a56b82da519e06366476c86b304ec666e93639f6d8becc6d2dddc4aec18191", "mana": 3, "whash": "f71bfb0feb5ff87ffc3fe83ff031fc986c145eb68c9f8013008780038003b003", "card_id": "EX1_301"}, {"dhash": "888f7d78e0c1a3eec419093711e676dcbf7926136e26594c80bd1879e1e8fec3", "average_hash": "0718c3c70f8f3c2e3e3f0e030e020e00ce49f6c9c4e5e061007040f000f8e07f", "name": "Call of the Wild", "phash": "3dc85f54edef73fb3fa936240b402c20cb042434c6782ed85bd3ce985bc619a7", "mana": 8, "whash": "071cc3e31f8f3f2e3e2f0e070e024e00ced9f6c9f6eff0670070c0f880f8f07f", "card_id": "OG_211"}, {"dhash": "1ad22ce0d1c48b0937234cc4b83a71458f0a261558ea83c517328c6cf899f137", "average_hash": "f71fffafcbbec83ecc3d9c3d001100200001800100012000380038307811f919", "name": "Boneguard Lieutenant", "phash": "e39d19c6e334de32fbb0a6b0199903212120ac843e607e486f06bf677f27fbcc", "mana": 2, "whash": "ff9fffbfcbbfedbfdc3d9c3d017901600141814901493150381078b07991f91d", "card_id": "AT_089"}, {"dhash": "08a6a749608cf8b0f133e56e83991ac7349e012c0cd1fb0a023144e4bc8f0b10", "average_hash": "f7ec83eb81c280c000cc8bc483c463d26398e3803b8039c038e819e01b1b1700", "name": "Upgraded Repair Bot", "phash": "7717393a89c2e3587fc401333b92288840c8db8622722bc7fef76b6c72b64bd9", "mana": 5, "whash": "fffdaffba1e884c002dd8bd487c463da63e8e3a86be03de138e819e01b1b1700", "card_id": "GVG_083"}, {"dhash": "88c40c8cd13283e337cf6d36b02c6019d8e2e4d58b0b3b1736366f6ccc819923", "average_hash": "77197f901bb119b11c3e0c3f8c3fcc630c2830383c303c303c30183198019901", "name": "Spectral Knight", "phash": "879d5d8cf6e166b873425b0e49606ce1361b938db9b0d9584f9ed3692d29984b", "mana": 5, "whash": "7f997fb81bb118b91c3f0c3fcd7fed630d6871783d703c703c3118b199019901", "card_id": "FP1_008"}, {"dhash": "08c70c2ed1188a6334e659ccbc98694d969a2df57fcafb15ff330c7630880133", "average_hash": "77102f8003818803980388018819c01960007402fc08fc0ff81ff81ff803f903", "name": "Unstable Ghoul", "phash": "b35d9cc3f3846ff476ec292306220324c16033639919390cff8bbce4ef27e743", "mana": 2, "whash": "ff102fa003a198a39c038c038d69e1696549f443fd0bfc1ffc1ff81ff903f903", "card_id": "FP1_024"}, {"dhash": "f8caedfcc21d85317ac2b2e68104732fce7e3c399fb6fc49d1bb04f20304cfa9", "average_hash": "0f508f713bf89d7f9cfb8eebcf438f738763074043c7c3c780cb01c807808620", "name": "Grommash Hellscream", "phash": "9f2baf896fb17eb07e541256128606c999d50b8649a96e706cfde46d666693c0", "mana": 8, "whash": "0fd08f703bf89f7f9cf38efbcf438ff38763074047c7c3c780cb04c807a08620", "card_id": "EX1_414"}, {"dhash": "8af80ce011c942b8b52258c7bc4c7f31bcc2d085e18bd31787372c6f589ca130", "average_hash": "f71ff39fb39f900cc00d80040002000a201c001c101c301e383c301cb01d2118", "name": "Crowd Favorite", "phash": "e91b7b42fb19cb9cff8c93cc1ecc01f208ce06e18ec4d6b06e321b13dd20bf2c", "mana": 4, "whash": "ff1ffbbfb3bf91adc00f840c0452985e385e381d383c383e383d303cb91d211d", "card_id": "AT_121"}, {"dhash": "cad6cd6992f508b77b0eb014eba99c7339e702ea4dc49308263a98f0fe0cc1b3", "average_hash": "3713bb615bcb384ff84ffa477a86398839983b807e003c0018c048c0fa11c011", "name": "Axe Flinger", "phash": "c783dbf0ba78ee593f31deb4039061bc88b0430c4ce463677e6c33c14c5b79cc", "mana": 4, "whash": "37123b61dbcb78affc7ffe477cc6bdc83f9c7d807e803c0018d048c0fa11c811", "card_id": "BRM_016"}, {"dhash": "28f2ecf111ee8b1137074cccb97870f1e062c5c5938927136d360c6c3898713e", "average_hash": "c704111c3b1cc81e883e1838183e183e183e183e383ff83ff83f783e381ef01e", "name": "Ancient Watcher", "phash": "e9f51e135a9ab3f8fef087d93631088c3102930c73646788f782ff11df00df31", "mana": 2, "whash": "c704111c3b1ee81e883e1838383e183e183e183ef83ff83ff83f783f781ef01e", "card_id": "EX1_045"}, {"dhash": "2cf04c03c14c0ab3b56c4fd1b8e640fd8f023b05fc0a1215ec30f26fc8800136", "average_hash": "f71f439fc38c982090296038601c6004f806f807f80ff003e807f81ff0032100", "name": "Magma Rager", "phash": "73df6f423f41db647f2639882d899361967212b38ea6336469b132de966293d0", "mana": 3, "whash": "ff1fffbfdbacb8a414296439647c6044f846f807f80ff803f807f81ff1032100", "card_id": "CS2_118"}, {"dhash": "2a0ffdfbd0b5236b5f829b607e99f83cf331a0e10108c7533e8f74900130ffff", "average_hash": "07000302cb0bf83bf83ffe3fde7f8e7d0e7f0e3e24002018503cf0010200f83f", "name": "Betrayal", "phash": "2b519f4ff6ec7efc1f9b34b24376868c21d8163d21672e42c3986c82db60b706", "mana": 2, "whash": "07000302cb0bf93bfe3ffe7fde7f9e7d1e7f0e3e2c002018527cf0012200fc3f", "card_id": "EX1_126"}, {"dhash": "bcf72d8c6235c5c8b81177678c5e413d86722865fccb3997e733fc669896012d", "average_hash": "c70cc37de3d3e057e04e409c001c80010c030c008c111c19381ff01ff00ff90f", "name": "Mogor the Ogre", "phash": "197f3ca67b00860b1ec369c16c925236c672b3b0ee25d6fc9f27664fb498858d", "mana": 6, "whash": "cf1cc77fe3f3e077e06ec09e407d84438c030c008c3b1c393c1ff01ff90ff90f", "card_id": "GVG_112"}, {"dhash": "3acf2c0a61bcca60b4c3534dafb25bc7b40a6b5df62a04c548b8b3724380872c", "average_hash": "8791cfb3c3a3c4a9c443804fa55fa15fc15fc11ec11fc05fc00ec00781000120", "name": "Polluted Hoarder", "phash": "3967bf0db6177fe5cfb14bc82da1126204c891b10d9336f1c988d664e784afc9", "mana": 4, "whash": "8f91cfbbc3a1c5bbc483854fb5dfa15fe35fc15fc15fc15fc00fc08781800320", "card_id": "OG_323"}, {"dhash": "fcf98dcc12bb05603b86762e8c9679f9e6b2c104a70bde971236856c788cf339", "average_hash": "070bf35fb15bb05b804f009f081c0c3c5c327838683c203c183c081c1018011c", "name": "Emperor Thaurissan", "phash": "29ad9b48fe20279436064c2679d293a4991c2ed9a2a573e99e2c96dbdbb293e6", "mana": 6, "whash": "8f13f37fb3dbb05b805f0cbf1c5c0c7e7c3f7c3c7c3c343c383e083c1118011c", "card_id": "BRM_028"}, {"dhash": "8adfecffd1a08b1837494832b7ec4c19bf227b65e68a98153b31766ee4868139", "average_hash": "27008107f90ff806e8016801ec04fc01fc0fdc0fdc0dd809d80df801f009e009", "name": "Puddlestomper", "phash": "034fdc1cfcc1cfe37bf8868c2c9880e00620238c4ee6ee68b7863b1bff247f0f", "mana": 2, "whash": "2700f98ffb8ff81fe80f6c07ec0efc0bfc0fdc0ffc0dd80bd80df801f009e009", "card_id": "GVG_064"}, {"dhash": "4662898064c1bfb3154d22b04ec017c7000e7b38706585c83fb3ffe7d55e033f", "average_hash": "b97fb3df901f0cfc0cfd6ecc7394611f4104838603ae0088c080d00ff21fe63f", "name": "Kindly Grandmother", "phash": "795bcd7ba969dc63fba0e38b0f48874e2cba49563076726a8afa9934b8d11248", "mana": 2, "whash": "b97fb3ff909f0cfc0ebd6ecd77b4f11d41048386038a0088c080f00ff21ff63f", "card_id": "KAR_005"}, {"dhash": "828bfdff60bb4340df00be077ddbfb36f6ec0cd931f2e227450e8c1d193f3260", "average_hash": "070003008103e013701010101e78167e167f3a7f7c7f7e7ffe7ffe7ffe7ffe7f", "name": "Hellfire", "phash": "8b00fe7f721edf9fcc612e6026ceb9e009f809ee82472bf14bf14b054fc073a1", "mana": 4, "whash": "070001008100e003600010101c78167e127f1a7f7c7f7e3f7e7ffe7ffe7ffe3f", "card_id": "CS2_062"}, {"dhash": "9cdf2d3a42e04486b90b731d84324001854a3ef573aa864459b190720781772f", "average_hash": "ef54efe7ffff05cf24cff6c737413d4013400100354251c0404000c005800320", "name": "The Beast", "phash": "dfeadb803d062e23260127c03bc018480c73c639abac1ec71f8777fdf6c9d6d8", "mana": 6, "whash": "efd5effffbff877f64cffde73f413fc01b400342374251c044c2458005a00320", "card_id": "EX1_577"}, {"dhash": "e007fd7f50d60f3138fef0d8e781ff06f0790c8730daf3ee879d09333e60f8cf", "average_hash": "073043c0f901f8619841f043f043e02f607838720060246e3e6f1e7e0e40fc0f", "name": "Bloodlust", "phash": "a18ab2cd4f45ce343e614bb892618ca349980e46c3cc67a42bd39b35cbf736cf", "mana": 5, "whash": "0f3043c1f901f9e1f861f043f043e06fe07038720c642e6e3e7f1e7e4e74fe0f", "card_id": "CS2_046"}, {"dhash": "e803fd0fe074e299810706fe1b3c27700ce072c4f90982130727dbceb310ffbf", "average_hash": "0f7ee7e9ff80df268e1c1e183e3f3e073e063c0118020000000000000000fe0f", "name": "Shadowstep", "phash": "0f550f227c0af4c9c3c02e880d300c30096a8306c3d636e6e3ef3ef979f9ffbb", "mana": 0, "whash": "6fffeffbffcfdfa68f1d1f1c3f3f7e073e063f033d060900010000000200fe0f", "card_id": "EX1_144"}, {"dhash": "ccce8c34916902e7359a5334a6964b39aee2b8c5f60b5b179637116f24980530", "average_hash": "b711b383bb8718035807c807e001ec03b807dc0cbc1fb807e01dd01c401c0104", "name": "Scarlet Crusader", "phash": "d3d397217cb0e3b1ee9931984c0c0dc69e1926cd66e3c6938fa3797cc94683c3", "mana": 3, "whash": "bf11bba3bba71883dc07c807e841fc43b8475c0efc3ffc2ff43fd03e411c0104", "card_id": "EX1_020"}, {"dhash": "7cf2edcc02bbc5003a1f6cec9b7a65b14b429c84312be754c8b9b96916930137", "average_hash": "871f937db3d9a07f08f818b01c38dc3cbc2f382f3007c406e006e02368000100", "name": "Toshley", "phash": "89762d8976095b6aafd33446cee49294582c43b499f8d3e737d1466d0d8fc668", "mana": 6, "whash": "8f1fbf7dbbf9207f0c781cf81d7ddc7cfd2f3c2f3d07c406e406e02369000100", "card_id": "GVG_115"}, {"dhash": "4af08cc791990b43b6845019ad327a65e5ced01dab6bd64638b7656e83822729", "average_hash": "371f119e9b9f983fb03f300f3007303a70307038603ce03cc03d201d00090108", "name": "Dire Wolf Alpha", "phash": "e9f9cf211dcb67cec69cd870d2c6c398b13019cc3ba15b43bf8c7606d7083a86", "mana": 2, "whash": "3f1f1fbe9bbf99bfb03f300f314f317f7179703c613ce03ce03d201d010d0109", "card_id": "EX1_162"}, {"dhash": "0203ed3e10ed23f894c24fb09371fec2fc11e3ad8c53d10760f802bf0908fef3", "average_hash": "073083c09303e003c007f037f80f700ff83ef83fb93fbe79902f023c0218e01f", "name": "Stormcrack", "phash": "ab75f2973ef10f336d5eccc8938d2ce443eaae08897f2f44cbc4d2850fc15a02", "mana": 2, "whash": "073083c09303e103c007f037f80ff80ff83efc3fbd7fbe7dd02f027c0238f01f", "card_id": "OG_206"}, {"dhash": "2883ed7ff0bbc3de3f110c643fc07c24fd827267e60b06a66818bb3b4c37fa2f", "average_hash": "07000340830fe01fe41ff81ef00ed00cf01fc81ef81fe0004006a024303ef81f", "name": "Ball of Spiders", "phash": "6b763f219e097e8eeff3362023440914192880714b8463ccbb5b1ee773eb9bb9", "mana": 6, "whash": "07080340939ff11fe43ffa1ef41fd01ff01ff81ef81fe0056207e826303ef81f", "card_id": "AT_062"}, {"dhash": "cab9ad5312a77d15f92ae6ddd9b9a2e33c965b2ea55042899d3176e60c0c3318", "average_hash": "0700236831de10de00c813a8b3b479e27b987f9c3f9c798c58c818d813180210", "name": "Demented Frostcaller", "phash": "e721bd689e6a671ebf3412c31e9009a912ca99a40db36e81aef19e471d877ba5", "mana": 4, "whash": "075039ea31de10fe50eb11e8f3f479a27b987f9c7f9c7d8c58c898d833182318", "card_id": "OG_085"}, {"dhash": "8af10dc2129cbd1d731b8664ccd99bf33447538ce450d808a3330ce239877f1c", "average_hash": "f74ff1ff7bdc48d8e8d85bd81b889b9ffb97bb85db8c888f00cf00c203001700", "name": "Argent Protector", "phash": "470b61cff0dcf9b0fb70986d3792303c99bc1c620689db846726dfd1b3896e90", "mana": 2, "whash": "f71ff1df7bdc48dae8d85f981f889b9ffb97bb85cb8c088f008f00c203007700", "card_id": "EX1_362"}, {"dhash": "2af1cce611dc0a0935374ee4b6945969e2924d25d3ca349449308360cc87393f", "average_hash": "f70ff98cfb8cf80ddc08fc20fc0c2c196c13e4036c1078020800080018003104", "name": "Lance Carrier", "phash": "a3cf73c3ffc379ecb9b81e636362100c982c09c95b90e344e710db49f3283f87", "mana": 2, "whash": "f71ff9bffbbef83dfc39fc3cfc1cbc5bec13ec13ec18fc020800180019003904", "card_id": "AT_084"}, {"dhash": "0ad0c43581e30a87370e4c38b07040e989c21d95538a86042f37fc6eb0984139", "average_hash": "771e35853187180e783e3c3e3c3f700e780770077002700070107038f018f118", "name": "Southsea Deckhand", "phash": "e3e5dd927fe5d6f0bfb8258c6f4c29a463c2584e18429652e721b330b5013bcd", "mana": 1, "whash": "7f1f3ba733a7188f7c3e3c3e3c7f785ef847f0077002780070387038f118f918", "card_id": "CS2_146"}, {"dhash": "e2cfe57740c50f881d191b36344c68c0e10848b9974366840c017b9ab620e09f", "average_hash": "0718f1e7f90ffc7efc7efe3e7c7e5e3c2e3006102e320e4c0e084e007e007c00", "name": "Blessing of Might", "phash": "6346e973f2df3c39bef10ecf4f383e840c5a876c48d363111b144ed159e2931c", "mana": 1, "whash": "071ce1e7f91ffc7efe7e7e3e7e7e7e7c3e3006102e720e4c0e004e007e007c00", "card_id": "CS2_087"}, {"dhash": "18ff2d7c028c4500fb76b66d0c1951e472cea79dcf13f2090436b0e540428324", "average_hash": "e707e36fe14fe05fc059d29c921831183018f108f393e01f70be900f00050601", "name": "Dreadscale", "phash": "79297b6a1b9ef34cbe100725368006e304fc9a8d6b724bff47e556cc086336b9", "mana": 3, "whash": "ef17e37ff34ff05fc059d29c925831583118f108f39bf09ff8be909f02050605", "card_id": "AT_063t"}, {"dhash": "68e6cccc019b0b36376653bdac7073c1a4024c05bd8b63144432896c2e887936", "average_hash": "9719bb99bb99b839b839b81dfc1bfc337c103c02380738030000080008001100", "name": "Recruiter", "phash": "e3e93b613c11df8c77641bc249c22418309c970c6ee66ee35dc2de766e263666", "mana": 5, "whash": "9f19ffb9bbb9fcb9bc3ffc1ffd5bfd537d527c433d0738432000081009001900", "card_id": "AT_113"}, {"dhash": "5ccfac3dc1738a9d35b853f1bfc27f0fec3ad8e5a08b4e4639b5636c83912737", "average_hash": "0790afa1eba1b804f8006004e04fc07fc43f043e083ec02dc029c03301036100", "name": "Arcane Golem", "phash": "3997c720b60d0db3bda433c94e2286ac33e3cc604fbe7efa67c68ce1b9212623", "mana": 3, "whash": "0f90efa3fba3bc87fc04e406e14fc17fc57f057f057ec17fc039c0b381836100", "card_id": "EX1_089"}, {"dhash": "f887ddff2000637898ed011b17763ec46618be6038d1e022c345068b3917fb1e", "average_hash": "07107341f19f440006003603360f7e1ff607ee07be079e071a0e32045206c20f", "name": "Feral Rage", "phash": "5b5037b8ec839b876b48ce109e0c368026c12ce4287edb76b3395bcf5bf15e23", "mana": 3, "whash": "071ce363fd9f651086003607fe0ffe1ffe07ee07fe07be071e0e12045206f20f", "card_id": "OG_047"}, {"dhash": "88b00d65c2c2bd0573faa4b7d87fb50343979c6c65c19208353068e7058e9f1c", "average_hash": "774c33ec09dc08dc48de43fee3b3fbb1bbb32ba57fb97c9908c180c9031c070c", "name": "Scarlet Purifier", "phash": "4d9feda64cbad71bfb7033c16f4c18a602d023c5d8d89b853bcfeac4822c183d", "mana": 3, "whash": "671c33fc01dd08dc48fe43fee3bbfbb5bba30fa57fb95c8908c100c9031c070c", "card_id": "GVG_101"}, {"dhash": "7af2ece491890bc5b77a4ef6bc147339a6fa2e25d8cba0848137036c4e843d30", "average_hash": "c717cb99bbbc783c183e1c3f8c7b887b041bcc0f8c0f0807001c002400080110", "name": "Cult Master", "phash": "096f6be176d25b6abf2999859b4640e0e4b0932613c3dbe9cc306d167692ee69", "mana": 4, "whash": "cf97cfb9fbbc7cb81c3e1c3f8d7f897f855fdd4f8d0f885f001d003401080110", "card_id": "EX1_595"}, {"dhash": "7cc6ec3d41e1c2ceb53b5223a10c58b1ff6ab0f5c1cb0f969e363f6d769ae13e", "average_hash": "87908fa103a305a6044ecc06cd400550317c3d3c1d3c2c103434f83ce91ce11e", "name": "Drakonid Crusher", "phash": "4f169e707b84861177a136a40f0661e06c78068fd9e0f37cbf8c9ecdebc859d8", "mana": 6, "whash": "8f908fa003a307a7048ecc4fcd420558357c3d7c1d7c0d7c3c34fcbcf9bce13e", "card_id": "BRM_024"}, {"dhash": "0082cf7fb4c00b032e06581ab06e60c5c09a01f337664ecc9c98797187b0fe27", "average_hash": "ffff81f3e19ff03ff87f3c7e2c3a6460046006600640066006600e600e601c20", "name": "Holy Nova", "phash": "05fee905f90156a8165e061e87478683e3c16ba16b595b9073f053e87bee59ae", "mana": 5, "whash": "ffff81f3f19ff83ff87ffc7e2c7a74784660066006e0066006600e600ff01da4", "card_id": "CS1_112"}, {"dhash": "9cd94d39126604ce38386d7183ec47d18bc23ca533cb681695307a6904838934", "average_hash": "6706736739473046f046f8a0fc00fc00fc017c03fc06f8047800d80198000100", "name": "Alexstrasza", "phash": "c386f9e4dc809b193fd936c6362613664ea21a26aee88ee1372763ec538a733a", "mana": 9, "whash": "6f177b677bc73146f866fcb0fc507c41fc01fc03fc07fc07fc04f801d901d900", "card_id": "EX1_561"}, {"dhash": "3ad76c2ed15502eb359c536ca7d04d03bb7a40e58f8a5e4431334060c683bd24", "average_hash": "c798cfb4cba1f9a038427c00f846b94790479c11fd01f001f001080001040100", "name": "Faceless Shambler", "phash": "f3f37f633e5633c31c3361e063e00c69b1e0538226313be233839f393c99bb91", "mana": 4, "whash": "cf98cfb4fba5fda4bcc2fd42fdc4b9c79b579d51fd41f141f8510c8001840320", "card_id": "OG_174"}, {"dhash": "6ae7c48891990b3f37024eceb8146631b8b2c14513cb6c96db36b66d6c99dd30", "average_hash": "971df199b99dd81df83f383bbc39bc2c1c1e3c36180688009830c830c010f110", "name": "Leper Gnome", "phash": "a3b74bd933b1def26ff93c737e58298442121e128610cc00cec2c736f3a4bead", "mana": 1, "whash": "971dfb99f99df81ffc3ffc3fbc39bc2c3c1e3c361c069c029830c830e010f110", "card_id": "EX1_029"}, {"dhash": "02f80dfc70c0ee23fc6733077f04ec79f003c2170e8e041888731272e4c4fc8f", "average_hash": "ffff01fe81ff807f806fc67fc0ffe0fff0f8f0f88090c080404800480000c007", "name": "Hand of Protection", "phash": "79fb9d9ade4966ade7b513f612f4a648635a185a69807be11b253846c3d0529a", "mana": 1, "whash": "ffff03fe81ff81ff806fc67fc0ffe0fff0fdf0f8c0d2c08040c800c80000f00f", "card_id": "EX1_371"}, {"dhash": "6ac6ad3d52f089457f703f91fe46899912b3034a0e04f8087439f7f0e00567be", "average_hash": "d713f563fbc3f8e37cf03efd44fc4ddc0d88df80fe819801e0c0f0c042002400", "name": "Cruel Taskmaster", "phash": "338bbbddb36492631cd22dd92bb186e44c645a1279c21f49db317704e707bc3c", "mana": 2, "whash": "f713fb63fbc3f8b37c707e7d4efd4ddc0f88df80ff81dc81e0c0f0c042802000", "card_id": "EX1_603"}, {"dhash": "0180cf3fe0f3d9e6ad895b78b8f36086cd4c1b9a2c3441e886970d43f28efc3d", "average_hash": "ffff0bf0e1c7782f3c6f1ffeb7fce3f0e3f0a7c00384038427c02784018cf9bf", "name": "Lightbomb", "phash": "7d70a10fc9cf7e203527cd78c9410338a10c7b9553715ac753ec53e2537ad9b8", "mana": 6, "whash": "ffff0bf0e18778073c7e1ffea7fce3f0c3f083c00384038027c02784018cb9bf", "card_id": "GVG_008"}, {"dhash": "02866d3fe0f113f31ffc9f111e6638c1f086e00edf19a11346368ee91693798e", "average_hash": "771e01e1c183fc7ff07ff07ff03ffc3c703c781c301f301f101700060806d807", "name": "Equality", "phash": "e95537289e87ccfd668fbdf44971a41223780b4663f05a3b7ba45b255f009bc0", "mana": 2, "whash": "771e01e1c183fc7ffa7ff07ff03ffc3ef03c783c301f301f101700070807d807", "card_id": "EX1_619"}, {"dhash": "e8cf8c3901c00a90b766510dacb05bc7f69acc25f34b1297ff33ee67cc87892c", "average_hash": "6717f3837987000d8035100f3009701b603b643a681e080fb81fb81ff80f910f", "name": "Gurubashi Berserker", "phash": "cbd9fe94db13670c7bc087213c9239623c904c81ce395e4eebc2dba48e87b3b1", "mana": 5, "whash": "f717fba37ba7600d8035100f3859705b603b643e681e082fb81fb81ff90fb90f", "card_id": "EX1_399"}, {"dhash": "8af90ce6118c8a5935f34cc4bd887931a6b2e8659789200763b0046049801721", "average_hash": "f71efb9efb9cf820d810d833d83b983998185c3e5c3318101000000000000100", "name": "Maiden of the Lake", "phash": "27677b667e52cf8c6f8cd9c89b8402a614688749e1ccd2ccbb899dc96c82f396", "mana": 4, "whash": "ff9effbefbbcfca8dc30dc33fd7b9d79dd5c5d7e5d3b18721001000001000100", "card_id": "AT_085"}, {"dhash": "30f86d60428acd1cba21640e98bc707172c2ed84910b63968c37116f67989d31", "average_hash": "c70f037e0bde00de84fc04bc0c3c38383c1a3c1b3c36642e403e003c00180100", "name": "The Boogeymonster", "phash": "6d3ddfa27e02db48a7492cd69a9401e963d80bb19b8d57d87791b3ed63035422", "mana": 8, "whash": "cf5f8f7e0bfe047e847e0cfc0d7c3d783dba3d3b3d37647e443e003c01380100", "card_id": "OG_300"}, {"dhash": "1af439c082094d92bb646619998277455c3a8484322a6495c93892710084073e", "average_hash": "e70f877f87dc80dd80dda09fe03fa03fe03ff807f407e4164802000000000100", "name": "C'Thun", "phash": "19be29a93e0b9b4e9b5892ccf27c92db299ff2fc82c73c9d5b319604260067e2", "mana": 10, "whash": "ef1faf7f8ffe857da47da0fff17fa17fe13ff807f507ec164802100001000100", "card_id": "OG_280"}, {"dhash": "0a988d7612e3bcc47b3fe748cc14b93b72f6c4acc9d1108b4732dae4760be91f", "average_hash": "f707f166b14670ce38ceb99ebf9f09b919b81b9b09b91899188808c60817c21f", "name": "Snowchugger", "phash": "c90df372f2e2b6dd3bd016170bc1428400c9ba9c7379bb257b56db419b217e31", "mana": 2, "whash": "f707b146b3c670ce38debd9ebf9f09bb19b81b9b09b9189b188808c60a17c21f", "card_id": "GVG_002"}, {"dhash": "4cfc3ac1c192030d7752cc24bec06337df72cc8da829134724b46b6c9399e533", "average_hash": "b71f769f4f9d483fd83fd83fd03e8037803f983f003f00330021003120106112", "name": "Mountain Giant", "phash": "a9c54b43660c9fb1d3603672e3e78393907cecf607c32f8d9e981ed227a16e0a", "mana": 12, "whash": "f71ffabf4fbd49bfd83fd83fd83ec07fa03f983f803f00338031003121106912", "card_id": "EX1_105"}, {"dhash": "08f1ece7d1ca0b97372e4c0cbb10677dc83ad56510cb4c17bd36c06e1e893932", "average_hash": "771e5b9c2b9c183c1c3c1c3c1c3e0427402388311c2058307838003b201a3111", "name": "Wolfrider", "phash": "cfc74f927b2c36c6fa608764a7a329ec72d01c2c271e63a5fa68731c2929e3c2", "mana": 3, "whash": "7f1e5fbe3bbe18bd1c3c1c3d1d7e046f5063c8395d201c30bc3a083b391b3913", "card_id": "CS2_124"}, {"dhash": "fafc6dd2d2d089eb7f933766f288aeff51b7aa6a9515ef8a953f06f4fc0cf9bb", "average_hash": "0707831f83df801f487f583edc2e00aec9ac60843824282c181c18087800f813", "name": "Bloodhoof Brave", "phash": "c987996cfbe9fe18bf540e83029426cc38f8966406b137c596b17d6132d897c7", "mana": 4, "whash": "0717ab7f83df90bfc87fdc7edcae01bf49af498c3884682c187c188c7a087813", "card_id": "OG_218"}, {"dhash": "fafffdf163c38f0d1d363e787020e0c6e2ed0d7910c28b25f6c9c1171e3c7c64", "average_hash": "07000107bf3e7c7c7efc3e7c3c7e0078007042503e003e50a64f0e3f067f306f", "name": "Battle Rage", "phash": "ad2865f75b5f5eadf8e49e5927e21c82348b04fc1b30a4344f237b802f459f62", "mana": 2, "whash": "07000307ff3efcfc7efc3e7c3e7e0078027242503e403e50864f0e3f067fb86f", "card_id": "EX1_392"}, {"dhash": "42c2ed7fe0e003010e869d99b935f321e642c9c5863f010f003efc7ce0f99f21", "average_hash": "f7bfa1f5fb9ffc2f96331031207b207b307b903e907b007800788079c03dc01d", "name": "Seal of Light", "phash": "a975d39a5bdbe56b75283cad8ed098f006cc09664b7869c7d3897a0ddb80f380", "mana": 2, "whash": "f7bfa1f7f99ffc2f9e331035b07ba87b307b107e907a007800788079c03dc01d", "card_id": "GVG_057"}, {"dhash": "82c3ed1f44303b431c9e51a0c2a7051f9ffcf8dbe3b7eecd9d9e3f437c8cc139", "average_hash": "fffff9f1f9d3bc731e6059c00180070007c207e646fc66fe7efe7e1eff18fb1a", "name": "Power Word: Glory", "phash": "67e496bfe32d7de25a596d9e64c24fa28d700a1989d073cd139e73c05ba01738", "mana": 1, "whash": "f7ffe9f1f993bc711e4018400080030007c207c646fc66fe7efe7e5c7b18fb18", "card_id": "AT_013"}, {"dhash": "6a9fcdb8126009d37ea62c78d984b1107ee6708f05740189ee3791e5274edf31", "average_hash": "e707f167fb33f822bcf03eb0fcb039f9183e319c138808000080001000188210", "name": "Steamwheedle Sniper", "phash": "a325b3a7bbe65b5a3959346c06b04ce20c4716036b046e05bf85ef09fb9c73e3", "mana": 2, "whash": "e717fb67fbd7f8b2fcf23ef0feb13ff9193e3b9c138c0c80408200b0021c9610", "card_id": "GVG_087"}, {"dhash": "4cf10cc611890bb437694e56bc097837e0ced31dacf950c623b4af77fe843139", "average_hash": "f70ff31ff91e783c783fbc3ff83f103d303ca0340035002500002818b8083900", "name": "Master Jouster", "phash": "e9916b60532f3cf0ef600b4b189c60389cd089c37634933b1def9367799ab8d9", "mana": 6, "whash": "f71ffb9ff99ff83ffc3fbc3ff83f187f303ca03c003d002580016818f8083900", "card_id": "AT_112"}, {"dhash": "867d03c0440039677646ac98d9612693496f338cc4101c4898b021e34306877c", "average_hash": "f3dff1dff0df78b33bb13bb33bb053a4d3acd384e388c080808001840304070c", "name": "Nightbane Templar", "phash": "7515e1ad19aa796b7f02d49ea709de3304d6e69c24bc1268ed4976d2710e1b72", "mana": 3, "whash": "f9dff1dff0ff78f37bb33fb33bb0d3b4c3acf38cf388c080808001840304071c", "card_id": "KAR_010"}, {"dhash": "8ae42cdbd1ac0b3b36e64ccfb9387641cb3a86e5400abf977937596efe9d813b", "average_hash": "f71e799a6b99983880398033b833b8358031880b3800581fc83bc838e01be11b", "name": "Novice Engineer", "phash": "a96dffe06f6c4e4afb9889c4d230307269d2672219033ba06602fb911f8fbb6d", "mana": 2, "whash": "f71e3bba7bb9d8388c399433f833b87db079880b3008581fc83bc839e11bf11b", "card_id": "EX1_015"}, {"dhash": "fa83dd7ef07023c34ccc9bb333c3643e91d86e2185723a85c40e383de37b9cf2", "average_hash": "07000100d30388230a232267c667c6618e516e59e6512e730674407dc07ff03f", "name": "Blood To Ichor", "phash": "bbb0de63f8f776bd3febbcd2e3980c888e6239944b440925329179b043489f83", "mana": 1, "whash": "07008300d303c923ba232a67e677c6659e516e5fee516e734674c07dc07ff07f", "card_id": "OG_314"}, {"dhash": "cae704e7118e0b3836404c99ba7e7f4de3828c8d110b3704e43b40711e84bd2f", "average_hash": "770cf99ef9bdf838f83b00332030683d2c333823782070003006000300007100", "name": "Lightwarden", "phash": "e3dd63f57773d7ee7f989cf896c098c86328240b6c203e901b836b0279493b59", "mana": 1, "whash": "7f1effbffbbdf8b8fc3f10332179787ffc77782378217801781600030100f100", "card_id": "EX1_001"}, {"dhash": "289c2d30c238fc7379a0e24fcd9d92330ed6182c7d50d388033012e2c405393b", "average_hash": "876783e7c1c3c0c3b0c383c38bc29bc33fc33fc00f8d0fcf18c019c00b000303", "name": "Spellslinger", "phash": "9709b93888cccbd9bf4446273998868981fc198f66e63e3c98cfe6d1772c4b45", "mana": 3, "whash": "cf57cfe7c3e3c0c3f2c383c3afc2bfc33fc23f800fcd0fcd1cc019c00b800f03", "card_id": "AT_007"}, {"dhash": "0af044cc91b10b67361a4cecbdd87339cf7280e5610bd3970c3738680e94f939", "average_hash": "f70ff11f991f181f783ad8389c3c9c3fbc3f1c0f3c0c7c0be838e0083808b00b", "name": "Captured Jormungar", "phash": "cb5ff9e57ad85b9a7bf3357266e84c8c990c421218634ec93b638c955848e7a1", "mana": 7, "whash": "f70ff91f991f181f583adc389c3c9c3fbc3f3c0f3c0f7c0bf838a00a3808b00b", "card_id": "AT_102"}, {"dhash": "a8f0ec47119d0a32b46658c9b02664bdcf72b3d5679bcd040b392ff6dc8ce931", "average_hash": "f7977fbcbba899bb946184610561056005643164994058483c081818d911f911", "name": "Venture Co. Mercenary", "phash": "87533d707b85c7423db40c8c93a19838191693c1fb705b8e7f63e77ccf0a78c2", "mana": 5, "whash": "ff9f5fbcfbaabfbb84a39d6105e105e00764396439603948384838987991fb31", "card_id": "CS2_227"}, {"dhash": "38d38c27614dca9ab5654fc9b0327be7c6ce9c95292b7346c6b6d86d3181e53f", "average_hash": "07940fa4c3a084a0a041b029307131783077303f31273007703270326100e118", "name": "Questing Adventurer", "phash": "f3f41e133e86cf94efc1668c1b83088c386246e629e5527ab97266a75969b6d2", "mana": 3, "whash": "4f940fa4c3a085a0e485b579f5e071f8337f317f39773163707274b27180e339", "card_id": "EX1_044"}, {"dhash": "02800d3f30fce3e2ffcdff333626080c7078e0c08101030206b40c5ef93fe02b", "average_hash": "173c03e08183800f807fe07fe67fe21f0c3f1c3e3c3f3c26300030107c3cfc2f", "name": "Polymorph", "phash": "e95c3ca3f6c3c6352f0ff7b889560cad189323c8caf109f79b0c538a46e55b90", "mana": 4, "whash": "77be03e08183800f807fe07fe67fe21f0c3f1c3e3e3f3c2e300030107e3cfc3f", "card_id": "CS2_022"}, {"dhash": "cafb996702dc85383a252c4a92c6670d467a88e10187230adf3e36f74c0af99e", "average_hash": "f70ff76ffb4df85c8c318cb18420803604330c211c203c201c30181c181c181e", "name": "Varian Wrynn", "phash": "83b3733863a3c768d974b634963c186d39ed7243687a5e4c47855f923b9916e0", "mana": 10, "whash": "ff1fff7fff4df87c8c79acb1a4208536053b1d2b1ca03c201c78989c1a1c181e", "card_id": "AT_072"}, {"dhash": "b8f12cc3c19e827d31b25265a55a5af584023d5d5e2abec4c03983733c877d3c", "average_hash": "079f4fbc63b8cdb9ec4ac046dd427552f1405042d101e04324061806090fc11f", "name": "Injured Blademaster", "phash": "536e27870d1dde98efc1cf2439a0c6c2333c8c718ce1dbc6e6d84c9c9946267b", "mana": 3, "whash": "0f9f4fbc6bb8c7b9ec8ad546ddc27552f7405142f141e1472c061c86098fc33f", "card_id": "CS2_181"}, {"dhash": "88d0ba20412f4aceb114527da5384b7183d216b5a5ea6d551ab82561f3827d3d", "average_hash": "fff3fff03fe03de03ccc3fc01fc01fc01fc00fc00fc01dc004c005e005800f30", "name": "Molten Giant", "phash": "d770e7003b143f943f8d12e18be5d8daa92fb987bf05de41ff007f006f007b20", "mana": 25, "whash": "fff3fff07fe03fe03cec3fc01fc01fe01fc11fc00fd09dc024d005e045a00f70", "card_id": "EX1_620"}, {"dhash": "cadd8c811153029835275e4ea8ec40f98f723ec5700af957fd38d273e68f8d37", "average_hash": "f71fff9f7ba8382c1c0c9c181c000c00ec00b801b8009025d807c003c01fe117", "name": "Ravenholdt Assassin", "phash": "c3f7e7f979c4f7f077bc1de7cdb1116858864cd2c3082d90360b1841dc187ac6", "mana": 7, "whash": "ff1fffbf7ba83dbc9c3c9c3c0d544d40ed40b941b9009065d80fc007c11fe117", "card_id": "CS2_161"}, {"dhash": "eab2cdc48083393f72dae7269cdd3ef97c32d36ca681840b183608e695886b17", "average_hash": "070c81cc11d810d8188a5b8c19b69dbeddbeddbcd9bc78bc00bc009c031e030f", "name": "Holy Champion", "phash": "4d87f73bdca865379b32d4185bc608e6893caec40cb66e84fb65ca01b3cc7a45", "mana": 4, "whash": "074c91dc11d818dc188a5b8e1dbebdbeddbeddbcd9bc78bc00bc00dc031e031f", "card_id": "AT_011"}, {"dhash": "3cd6ed6c93d90fa07c4b2b964c3dd3402e831420ab641ac9e0b849e0974d6736", "average_hash": "c71fcb07994f987f082f4a8d101178836c234503e081e00300c3400100100014", "name": "Dread Infernal", "phash": "9b899b53393333196f2436007c940ccc4932e361ccf9a369f3c7d64dba6674ce", "mana": 6, "whash": "c71fdb4fdb6f98ff0cbf4ebd5c95fec37ca34783e585e01300cb40412092001e", "card_id": "CS2_064"}, {"dhash": "eaffa53342c80411b9e076c78d6a5d958c2a30d5452a26c5dcb3786381849b3d", "average_hash": "071ca56fe1efe84dd0d9c0d1d059d15de00e300c310060006008200811000100", "name": "Rend Blackhand", "phash": "7bfa1bab9f7deffedfe3364c8e814e11268026896b885e347704328793809720", "mana": 7, "whash": "1f5fbf7ffbeffd6ff479c4fbd559d15de34e310c314061406008608911801129", "card_id": "BRM_029"}, {"dhash": "0080fd7f60c0037e9e983f39fc407089c7a29f47330e6c17da3e047ff895e82b", "average_hash": "17be03e0f187fc19fc0bfc7e7c7e3c709c63d867f026e036f0347018f01ef00f", "name": "Dragon's Breath", "phash": "e1f71f087e01bca69fc92d3813c806c38a11625c43870b376ef57be0dba85b9e", "mana": 5, "whash": "77be03e0f18ffc01fc0ffc3a7c5c3c709c63d863f066e016f0367018f00ef00f", "card_id": "BRM_003"}, {"dhash": "2af1c5e9129309247c4ea68cd958a2e00fc2972f29c41a09f43aa2e704416b36", "average_hash": "f70ff15f791f781f38493830183018b03007e18168817801f883d80740047000", "name": "Timber Wolf", "phash": "e399edf97bee7b9afe760c2736801972c6f064043165ca186b84d6069e0bccc9", "mana": 1, "whash": "f71ffb5f795f783f38ed3e393cb01db23807e98178817c01f883d88750047006", "card_id": "DS1_175"}, {"dhash": "02f648fc81811b3137c25c1cb8396431c264eccd9802215d02bab061c3970d7f", "average_hash": "f30ff11fd81ff81d083f183f383f383730139013901b101200008000800f017f", "name": "Murloc Tinyfin", "phash": "89f14bd37b5bb261db5256168da53699268c12183732ce70cd648be517a7de4c", "mana": 0, "whash": "ffbff99fd83ff83d183f183fb87fb83fb817b833b01b101300108000810f817f", "card_id": "LOEA10_3"}, {"dhash": "88f40cc0c1928b2d37564c4cb8906301c6b29875b1e942d707360f609886c139", "average_hash": "771ff39f099d483fd83f983fbc3c9c370c230032003e003c0014300010080118", "name": "Illuminator", "phash": "e9dd6b097e09de9cd630c3306be312cc19ccc7d8e63066f23e0d3cc764a3a6a4", "mana": 3, "whash": "7f1fffbfabbf48bfdc3fbc3fbc7c9c7f0c670037003e003e203e3000110c0118", "card_id": "GVG_089"}, {"dhash": "88814d2e90b8216036ffcd7c9e8931674e9abff4c1888313662c8f5a1edcf0b1", "average_hash": "673ec3e0d380b01b001a9007d01db0302028441d781c383a307b383e383cf83f", "name": "Healing Touch", "phash": "e9553ea267a537683792a5484bac0c58ac5c4fa159594795d3f21c9e1a8f7cca", "mana": 3, "whash": "f77fc7f0d390b31b001a9007f81d98316028cc1f7c1c783a707f383f383cf83f", "card_id": "CS2_007"}, {"dhash": "2ade2ca0d1200b7cb7f14ecbb8846039a0bacce5398a57042c3bc46018812729", "average_hash": "f79fffa7fbb33830a4398c3d8c3f8c7d80124c33381638099801380001000100", "name": "Stormwind Knight", "phash": "bbbd3bb5f6c4dbc61ff21b3329d8286c2461c660070763c36d613c5b7c087664", "mana": 4, "whash": "ff9fffa7fbb3bcb0e4388c3dcd7f8d7f855e4d733956394d3801380201000100", "card_id": "CS2_131"}, {"dhash": "9c981c38a0d0143223d626accdd396077915f6cad9f4a0db47a68c553aa3f56c", "average_hash": "e7c7e7a7f747b841a803d883f002b006a30d000f001f003a00160416010e692c", "name": "Sword of Justice", "phash": "db913341bf9e3d847310b324194d1c0b0d31cc3087e14f433fcd58e6bf6d6f1a", "mana": 3, "whash": "e7c7ffe7ffc7bf47b903f883f003f086a38f800f001f003b04170556190e6bae", "card_id": "EX1_366"}, {"dhash": "46e3c0ce85e11f033e965d6cb29841e19e023945e283ce9c3d3b4a561c89913f", "average_hash": "bbbe993fb83fb83f98369c36f84c3c003c021c0608040808180018080100113e", "name": "Violet Illusionist", "phash": "c973e97369195ecbf77a76cb7dd2d664cf4c461849895334a1149634318c3709", "mana": 3, "whash": "fbbeb9bff83fbc7fbc7ebc36fc5c3c403c461c061c2c1c181c1818183114113f", "card_id": "KAR_712"}, {"dhash": "cad38cf5d1c50ba2375c4eb5b9c47381e60ac73d9f6b0ec71936306b7386f538", "average_hash": "171eb39dfb8eb83e381cd01bec31f431f038e0386033c031c020400ce00cf11c", "name": "Defender of Argus", "phash": "637ddf214f139cb07f3c96a54cc84cc885710c1ce62c7346dbc46c466f92cfc6", "mana": 4, "whash": "171ebbbdfbbfb83a3c3cf43bec71fc71f078e038603bc031c020e00ce11cf11c", "card_id": "EX1_093"}, {"dhash": "8af98cc0118f0b13374e4cbeb9647ae1fc02c3658ecb9b1776366c6cf898d131", "average_hash": "771f719e3b9e383c18396c307c32fc38783c683808383838381820185018c118", "name": "Twilight Geomancer", "phash": "e36dcb64db315fd87bee93cc1ecc089221e092c4330867847f02be01ff11ff98", "mana": 2, "whash": "771ff9be3bbe383c183d7c387c3afc7a7c3c6c38383838383838201c7118e11d", "card_id": "OG_284"}, {"dhash": "4290c86181051a773c8e5812bbf457a5a74a48959062a35d4cb819614396e57f", "average_hash": "bfad1bac19a8186018326c602570b473f013f0136053745a20000000010083ff", "name": "Ancient Shade", "phash": "e7b5c7974c53fb79b9317638731c33f799618631353262ce198c9a3101a13e1c", "mana": 4, "whash": "bfbd1fac1eba1c703cf26f602b60b373f37ff353e153705a258200000520c3ff", "card_id": "LOE_110"}, {"dhash": "f8c82d77429ec43f3af070e08d0852ff87327f0dbc6b60d6a0b007763d88f331", "average_hash": "871b776aebfee45bfcd3fcc3d4739810ec01e801c02244000000101031103100", "name": "Foe Reaper 4000", "phash": "b3ef7b2aec581e5e8e030c61b3d003090e4dade518e75a5b5d23e5b323b1339c", "mana": 8, "whash": "ef5b7f6afbfff47ffc73fcc3fd539950ed41e901c16345600400109031903130", "card_id": "GVG_113"}, {"dhash": "0080dd07707e4378b4e2bd9d773dc4739a863c0dc01399ef10d867907f27cf7f", "average_hash": "073003c00380403080010021007f30fd307f117f037f83ff86f78f63ff1fff7f", "name": "Twisting Nether", "phash": "bd80f6fd3734fd23cb089c1a09e90a6421b3638d8946eb91c6507aa7dbd81aed", "mana": 8, "whash": "073803c003a0c13080110029007f30fd307f107f027f83ff8effaf63ff5fff7f", "card_id": "EX1_312"}, {"dhash": "8280ed3780c7011b07ac4cb8b942d78dfc1bf155e4b7836f0fdf1cbe7974f6e0", "average_hash": "f7bf2be03b8478081818080000000006006e003c003c02fc127c367c36fcfeff", "name": "Consecration", "phash": "6df2d207a7286d931d7efc283b8d29720bc70bf183535bb9da115b30d3a11b18", "mana": 4, "whash": "ffff7bf03b84fd0c3c1978006400040600fe007c027c02fc12fc367c37fefeff", "card_id": "CS2_093"}, {"dhash": "2cc8cc25011f4a7eb5c85001abd256459d6a261d592a3244f9b800614383e73e", "average_hash": "77951fb07ba8fda1f443f443f557e55fe15ff14b514101428003000001800120", "name": "Corrupted Seer", "phash": "f3b8e3012e164e834da11ca693a033e831986694e3e84e6e7f9bf3d92d2b394b", "mana": 6, "whash": "7ff51fb07ba8fee1f4c3f543f7d7f5dff35ff149514101c28043058405800320", "card_id": "OG_161"}, {"dhash": "f8bffdff000003f01ef33984f308fe79e8e3e083810f031f0660fc856e5a9a3d", "average_hash": "0700f303fd1b7c019e341c37087f007f087f307f307b307030308004d005f80f", "name": "Unleash the Hounds", "phash": "89d4136e7213f429598a9c468c600e92acdc0cb3c66d43ff4b652bd3794ff34b", "mana": 3, "whash": "0700f343fd1f7d01be76dc3f087f007f087f307f387f387a3030a004d80dfc2f", "card_id": "EX1_538"}, {"dhash": "d8bff9ff20ce6378deb03d4362804420db40ae9d1d333f043f0f781cc000809f", "average_hash": "07000300e30ff01ff207f677fc61f060b020f021f011fc01f019f011f001f00f", "name": "Bash", "phash": "33405d245ec5ae6d861a379a0f20b0704e63129f29bd1bdce9f45ee25bcf530b", "mana": 3, "whash": "07000300f31ff11ff23ff677fc67f060f061f023f831fe11f419f031f801f81f", "card_id": "AT_064"}, {"dhash": "9af66dfed2e485057b1c3e3098e06047749e80e1b9c7e70b193e66e6cc0891b8", "average_hash": "e709c17dc95f685e783e78bc703c202320380c203c32183c983d981998181018", "name": "Malkorok", "phash": "29ab7bce3ba39f1fbe588399c6d0a698488cd3841cce6fc1ed4d9620c9a2de60", "mana": 7, "whash": "ef19cb7dcb5fe87e783e7cbc7c3e612b20380d203cb29c3e983d98b998189018", "card_id": "OG_220"}, {"dhash": "caf08ce111cb0b2337c64c04b93c72d9cd929225610b02061cb72e6cdd9e633c", "average_hash": "f71ff19f7b1f783fb83f1c3f1c3b0c306c26680f60020000000000007015611c", "name": "Bluegill Warrior", "phash": "e99dcb657bf17e1a7f1e96638be19130241826989841db847e06e7849e23fb61", "mana": 2, "whash": "f71ffb9ffb9ff83fbc3f9c3f9c3b2c7cec2e682f681f200000008000701d711c", "card_id": "CS2_173"}, {"dhash": "0af80dc092082902764ce99e0030c1213e6348cc3f80708b03343fe0dc4fb39b", "average_hash": "f71ffb5ffbdc389738a13ea13f003c801d801d801f8008b018a01840a01bb013", "name": "Windspeaker", "phash": "c7317be279aa7fc67b6816581b5414d010d49281c7a71d4d36a37cd0ae926eeb", "mana": 4, "whash": "f71fff5ffbfff8bf3ca13ea33f813e801d801d901f800eb01ca01840a09bb413", "card_id": "EX1_587"}, {"dhash": "88dc2d21c25244bcb96073c987824b1db3420f34c4cb98076936466088814532", "average_hash": "070327672bef7947f0c3d0c1b44bc009c019e003e001c833d031900009020100", "name": "Kel'Thuzad", "phash": "bbfbdf88ce862f166e210fe1329192f104c64ecc6ae387c5365b19dbd1098599", "mana": 8, "whash": "2f572f67ebef7647f443f4c1f55bc14bc95be107e101ed73d431f08029070900", "card_id": "FP1_013"}, {"dhash": "8cdc9ab141a28b0d35db5aa6bf6a7cfdf13207c51c8ab004e333d266a2914527", "average_hash": "f7133e833f86581a0c08cc0b441c443cfc38bc1398039803d803c01b40020102", "name": "Clockwork Giant", "phash": "83cd6f98de0093b16fb038e69263a32b642e58cec9e1f7707f381f328f21c718", "mana": 12, "whash": "ff177eb73fbf58ba4c39cc1a457d447cfc78fc1f99039803d807c01349064106", "card_id": "GVG_121"}, {"dhash": "ca03ddff30fa03269fcc3c1961f0c7c8bf3370cde613cd65328b48763d29f09e", "average_hash": "07204100f109f00ff07b707f3074e003e001c81cf81efa1ff40d7c0f7e06f80f", "name": "Earth Shock", "phash": "e3a8bc9ddef57e733696a5f8ad638ccc1b19c666cc980f34b1c4238148217639", "mana": 1, "whash": "0720e380f109f10ff07b707f7074e007e005c01cf85efa1ffc1f7c577e06f80f", "card_id": "EX1_245"}, {"dhash": "0af88d20029b48a0f97fb4f4e8e187d30c87114aeb94f04937bae4f404081bbc", "average_hash": "f71fff7ffbedf1c900c0fac0f2c1fbc16bc44b844b81109490d390c1cb006600", "name": "Ancient Shieldbearer", "phash": "f38afbdaab79dbd67b4699252ec41a06c97032c1b3c81f93e38c16434c333ca2", "mana": 7, "whash": "ff1fff7f7bedf1e944c0fec1f6c179c56b8563854b81109590d390c183807400", "card_id": "OG_301"}, {"dhash": "3a7afce4c04113032636396cf318e4b3ecc0f9821b07d25f40ae04514aaebd7c", "average_hash": "8787c30fc11fb83f18369031982438263836583f607f303f103e001e000f600f", "name": "Gorehowl", "phash": "c9f717b35b1b7d8c5e021473b3dc8627c7b4529a00c2b331533cac4866e56eb6", "mana": 7, "whash": "8787c38fe11fb93f18369873dc3d386e3c36583f607f307f103f001e800fe01f", "card_id": "EX1_411"}, {"dhash": "ee3fb8f7416ab79f4f14d968b2d074a1e1c2c3c18b0bcf17802f00def89cfc79", "average_hash": "0140c107e037f07ff83ff83cf83cf83cf83e783c783ef03e701e101c081cf81f", "name": "Raven Idol", "phash": "e9d7bf321ee9deed9e776ef3b74f8e09b13e0cb6849a000ca434889227c88225", "mana": 1, "whash": "0140c107e037f07ff83ff83cf83cf83cf83e783c783ef03e701c381c081cf83f", "card_id": "LOE_115"}, {"dhash": "fadda533126f3cde79ac86d09b2166035886e11d06334c4890b1a3e643831724", "average_hash": "07007162f9c6f846f8c6fb95fb37fb2e732e633cc3a283c080c000c003000700", "name": "Eadric the Pure", "phash": "f736e3ae1efebcefef5815993f9126290040594628ba0e819f254b67618a1cd6", "mana": 7, "whash": "0f107b62fbc6f846fec7ffb5ff7ffb6e7b2e633ec3a283c080c001c003800700", "card_id": "AT_081"}, {"dhash": "02800d7ff0fce1e3771f4f3c9c3830c0412266ccb5b969438b06366dcc92b82f", "average_hash": "ffff01f0018e001f001f181ffc1f7e1e98098001a00436261e309c341001f00f", "name": "Redemption", "phash": "c9d75d6dbcfeb6b6b3c9a7e44b321893ebc03b9023615b215902de0059e6338a", "mana": 1, "whash": "ffff0ff0038e011f001fb81ffc1f7e3f9a099807f2047f341e309c3c1803f04f", "card_id": "EX1_136"}, {"dhash": "16a3604ea49e5d59fbb054c7adfc7b33e6444c1c18223a5ce1b98b4f37984d31", "average_hash": "eba6c998c059f058f81aec181d5fb87b98738013800300430003003b0138013c", "name": "Book Wyrm", "phash": "b9ed63276c56da90af179f1e7e9665e4d382492dcd9852da31147a188985c783", "mana": 6, "whash": "ffafc9b8f4d9f458f8daed581f5bbb7fdb77e117810300470043003b0138033c", "card_id": "KAR_033"}, {"dhash": "eaf30dffc2c009027f10a66660cfbc3960fec3ef8e850c4813ba67e74d408b3f", "average_hash": "f71efb7ffb1ff8be7cfc4edcc2cc85bc003809b803bc009c00c0008000000601", "name": "Fiery Bat", "phash": "6de763eef3e8fff6bed21eea2c8084218cb19a115c5018449d281f0d670bf6f0", "mana": 1, "whash": "f71eff7ffbfff8be7cfc4efcc6ec8ffc053c0bbc03bc409c00c4008002800601", "card_id": "OG_179"}, {"dhash": "9c762ce96012932c26403dc0f39960b398648c231e07f3dfceaa115464bae97b", "average_hash": "8785c30de31f603ff037b079907d9067d06038009c036036701c181008000008", "name": "Powermace", "phash": "a9d639985e8d5668564225b28451a6c5d2a49a364db06d2c372dda29d7b67fc7", "mana": 3, "whash": "8787c79fe35fe13ff83ff07dd07ff067fa6078209c236436f01c1c1059101918", "card_id": "GVG_036"}, {"dhash": "0af48dc0123105413b8a64c69f2c715942a290c4358b43040cb3b86023870d3c", "average_hash": "f70ff15ffb5ff85718175c3a1c3f0c3f0c390424700478000000000000000100", "name": "Nat Pagle", "phash": "cb9feba97328f3d03d5c694c52a212f2800dcb062eb2760bb685be07cf09ef89", "mana": 2, "whash": "ff1fff7ffbfff85f5c7f5cfa5c7f1c7f1c3f5c25780678047000000001000100", "card_id": "EX1_557"}, {"dhash": "3cdc2c38e1b4ca60b4e15189bf12783ff0e2e59dc36b0b171c36266b188ee13c", "average_hash": "8793cfa343a3c003c0038003803100783c38303c703c783c3c30300d591ce11e", "name": "Hobgoblin", "phash": "69dd9e441b06e7013f8396b1739406f9922664e8b8728f8f9b79fce4c3c8464e", "mana": 3, "whash": "8f93cfa3c3a3c6a3c487c027817101783f78317c717c7d7c3c32108d599ce13f", "card_id": "GVG_104"}, {"dhash": "faf1edeb02170d00f89f3039479acc24377f6ce8001487c8deb97de3e044c33b", "average_hash": "071e1f7f7b7f7cff04c676c317cc16cc07db8b9f0bcb20c060c070c0e0808610", "name": "Fearsome Doomguard", "phash": "dfe9cfee6fd69fcf9fe176f21e44990c14b0420661b23b08c78866802b221c9f", "mana": 7, "whash": "0f1e1f5f7b7f7dff049636c797ce16cd06df8b9f0b8b208074c074c0e0809611", "card_id": "AT_020"}, {"dhash": "42188d3224e778d8d180a359c0bb9be32e8d597a83e404997b72ce813107e6ff", "average_hash": "ff67b9e7b1c2f0c2f2c3b2c033c01bc043826380638050d018d039820602c4ff", "name": "Obsidian Destroyer", "phash": "f3e4f372897ef6ffbd5cc6b939ba3a8421e76246219a2e7489218ec139320270", "mana": 7, "whash": "ff67bbe7b6e6f4c2f6c3f2c1f3c01bc263c26382e29078901cd039024602ccff", "card_id": "LOE_009"}, {"dhash": "ca83cd2f92750c837976e3c807fccbf817e12cc67988e300cf39bcf16843530f", "average_hash": "f710fb60fb41f8417cc0bec1df41fcc3fc83fc837d803d003cc038c018000000", "name": "Ancient of War", "phash": "c72bf3e1fcf2e7bd3bf46cb22e8541a1201c3c0649c19b01e389709c7c86cfc7", "mana": 7, "whash": "f714ff60fb61f8c1fcc0fec1ffc3fec3fe83fe837f813f003cc038c018800200", "card_id": "EX1_178"}, {"dhash": "3cde6d31c286441fb9f475338ac84031bbe25545e78acd151a33346f6c8fd93e", "average_hash": "0700036201c00040d0c0f0c6780098003818781c781c781cfc1cf81ff91ff91f", "name": "Majordomo Executus", "phash": "63a79e88b9046e0bbf0d17746b801a91867936c66bde4daddd31527b66c22587", "mana": 9, "whash": "0f140f6003e3044ad460f4c7dd64b9403d187c1cfd1c7c5cf81cf81ff91ff91f", "card_id": "BRM_027"}, {"dhash": "82803dff70f6e38c9c1900681c1b08b0d36c6fd3380643106661c84f3199fe3f", "average_hash": "7f7e67e06bff6f7f663e7e1ebf081e0038403ec03ec02800300030000000f00f", "name": "Shadowflame", "phash": "674c2d8cf99c96ab96636e510ba80de069d68cf681e9be3c4bf116987bc17e80", "mana": 4, "whash": "ffff6ff06fff6fff7e7e7f1e3f085f087e407ec03ec02800300030002000f00f", "card_id": "EX1_303"}, {"dhash": "fcd6ad4f020d457ebeac61fe81f043d18fa21b253e4be094e03100771b80f724", "average_hash": "07012b61ebd0f070f0f1fcb0fc40fc01fc03ec01ec01ec000806000800000100", "name": "Hogger", "phash": "b33ee7a0fc901b0e1fc7cc4033900cf1cc6032790b674e6639e5db3c670b97ce", "mana": 6, "whash": "0f556ff9ebfaf671f471fdf0fd41fd41ff43fd01ed43ed442c06048801800120", "card_id": "NEW1_040"}, {"dhash": "08e40d4153da84e57b97a73c48e1b06b03d706ce4f95190824314ae0b5404fbd", "average_hash": "f71ffb3ffb4f7845ccde7fde3fd060f123803980db80288000c0004000000600", "name": "Undercity Valiant", "phash": "f389fb3afbd8f3593b5019ec4b640946109832c2a6c17e187f023f43ff017761", "mana": 2, "whash": "f71ffb5ffb4ff86dfcfe7ffe7ef366f1be80b9a0dba0388000e0006002000400", "card_id": "AT_030"}, {"dhash": "927409c842b3bda65649ae02540d8d7272e7cd8e1105661b8eb394a6694886fd", "average_hash": "e30fc11fc01fe01fe09fe88f880b180c1839383628a66096701ef01ee00cc07f", "name": "Unearthed Raptor", "phash": "490d990b79faeea69e4b536b97166b118de9561229ad32b49d26564a192736d9", "mana": 3, "whash": "e10ff15fc01fe09fe8bfe8afc88f188d18b938bc28a6609ef01ef01ee00cc07f", "card_id": "LOE_019"}, {"dhash": "9a333046eee059fd93f2af09df630c867f4dde123c2d70d9e02341d6d6a71fff", "average_hash": "efcee7cee3824182f08790076006430cf007b0078007848384038103c903ffff", "name": "Cursed Blade", "phash": "dbdd66669f2f5bb86f425e1fc5f59b556fee9e5d643691216422334124801180", "mana": 1, "whash": "efeee7cee7c27182f087f087e206630df00ff007c0878407c5038943c903ffff", "card_id": "LOE_118"}, {"dhash": "eaf1fdefe094e327137c3e936c2cc203fc9f91610ec2f1ce8e092b80ff38f8df", "average_hash": "07006308fb1c3c3c3e1c2e5f867f8847047e0f7c0e79047e0c7c18000e00e61f", "name": "Multi-Shot", "phash": "0fa86bcdf655bc7eb826dca91b580e90c9b4967729cba9fcd3c1330a4b403d24", "mana": 4, "whash": "07086348ff183c3c3e1e2e5f8e7fc847047e0e7c0f7d2c7e1e7c1e000e40f63f", "card_id": "DS1_183"}, {"dhash": "d4b02fc1c080b910726fe4ce9cfd313b6376ceec8491940b3932f3e4e6899b1f", "average_hash": "a74f63cd71dd18d808b09bb909b889b99999899bd999d898c891c9d053003301", "name": "Temple Enforcer", "phash": "350d4d18f2c8714adb58300e8bd09e3400788ee7097d8ae7fff39ab4bca766d1", "mana": 6, "whash": "e74f62df79dd48dc08b01fb90fb88bb999b989bbd9b9d898c891c9d053003309", "card_id": "EX1_623"}, {"dhash": "889a2d2442c9bc207b79e4eec913826f1fd636cee990bf0b6f3776e47c08f11d", "average_hash": "2700236481c000c808c0099083800180238133813b98399bf8bff8dffa1ffa1f", "name": "Soot Spewer", "phash": "e7311e1e2ba2cf50fb109d642f4c068d08ec666133d39b4d0eff2ef79ba11891", "mana": 3, "whash": "2712a3e689c428c808d00dd09781018023a1b3813b98b9bff8fff8fffb1ffb1f", "card_id": "GVG_123"}, {"dhash": "b80d7c7e60f9d4f5a382a799cf099e63780de952f0a5025982a2c854f8affd6f", "average_hash": "07c00fe087c9814fc08fc08fe01fb00ff38fc00fe00ff00734001d408903f92f", "name": "Hammer of Twilight", "phash": "5bed1f60b65b26906f04c6b421800669c1f0c82c27a036316fb35edb3f9fff0d", "mana": 5, "whash": "07e00fe087c787cfc18fd08fe08f928ff38fe00ff80ff80734003d409907fbaf", "card_id": "OG_031"}, {"dhash": "eadf8c3f417ccaf4b4004bc5b3f06701ef02d91db42b78d7a4b7c36702860d38", "average_hash": "0710ef83e3a3f023e003f000f000f007e037e0338017801f403f801f000e010c", "name": "Young Dragonhawk", "phash": "b3b6b3967f4ddce5cee70ce12ee109289188db10a6706e68372273923b03decc", "mana": 1, "whash": "ef13ffa3f3a3f0a3f423f400fd44f847f977e037a137c017c03f801f010f810c", "card_id": "CS2_169"}, {"dhash": "e61fe8ff81f316862f585eb4b804200943328e651c8b93072e2bf8d601b80a7e", "average_hash": "01e00186f00ff80ff81ff83ff83fe839e830f83138313c50f818f00ff007f01f", "name": "Silvermoon Portal", "phash": "a3dfdd175ce0966abc1675e987ed782f666890b3219e87b8a917a611891a2ad0", "mana": 4, "whash": "01e00187f80ff80ff81ff83ff83fe839e831f83938313c50f818f01ff007f81f", "card_id": "KAR_077"}, {"dhash": "0ce18dda92d509037e483ebedc30b1416af3d62281c5070bffbc08f0530eafbc", "average_hash": "f70ef31bf91ff81f783f3c3b3c393c3d0c3e043f04357830700c600c4004001c", "name": "Bloodsail Cultist", "phash": "e99dcb617a62fb741e5863580e04583c63c21eb649f93fc65c6ed3b0b4274c38", "mana": 3, "whash": "f71efb1ff95ff83ff83f7c3b3c397cbd0c3f04bf0431f830700c600c4005001c", "card_id": "OG_315"}, {"dhash": "3af8ec80d1318beb34df49f0bfc07c09e31a5e25dc4b6097d037616f438c9535", "average_hash": "870f811f830118023803fc03fc07fc35e431ec19cc1b0c16000f801f801f801f", "name": "Captain's Parrot", "phash": "993fd746dc813cfe3bb9d990c74c02888cc089a55bc0b624fe497699df329f60", "mana": 2, "whash": "871f899f9387d8337823fc03fc27fc35e433ec19cc3b8c3f801f801f801f811f", "card_id": "NEW1_016"}, {"dhash": "da34997d22e25c0fb1b6e3c80199587221c4030dae22585da4b88b636197f57f", "average_hash": "176133d905cf44cef080ba81bb019a113810f800f141804140004000010683ff", "name": "Reno Jackson", "phash": "f3f1cf695d5e3699af31c631b33166640dc418e6190eca5889c1abd553c54e5c", "mana": 6, "whash": "37533fe917ef44eef481bbc1bb439bd1bb50fb41f9418041458145040526cbff", "card_id": "LOE_011"}, {"dhash": "0284ed3b60ff03cc8f3306691d84781f5268e8d8b52399c236854f0fd4989821", "average_hash": "773f51e23b83fc13ee1cf61c8e1ecc2b0e07161216300639043bfc3dd81df80f", "name": "A Light in the Darkness", "phash": "0b51f72ef19f9cd0dee66d5b6e2029b4115a2119c3359ab1cf505a035f23df42", "mana": 2, "whash": "f79f43e23b83fc13ee1cf61c8a0ecc2b1e07161216280639843ff63dd81df80f", "card_id": "OG_311"}, {"dhash": "fafffdff208d1392c939bee73d9ceb0082e1132be8521087a748499f9024f94f", "average_hash": "07000107f31fbc3ff20ce668fe7ffe7ffc5ffc4f4e7f0e72280118290018400e", "name": "Misdirection", "phash": "2b0a7b77fed59e9c3de0a6483e6646998a342d6103da1c726fc17b81db015e8c", "mana": 2, "whash": "07000107f91fbc1ff20ce668de7ffe7ffc5ffc5f4e5fae72380118690008400e", "card_id": "EX1_533"}, {"dhash": "6cf88df342cc051efb61e01e1ffc7cb8e910d385854b0883f63e3ded6a4ad316", "average_hash": "8703335c715e701cb01d00019d34fc3c7c3c3c3c0d3c00300c3cb034b016f016", "name": "Cenarius", "phash": "69377fcada602e9fa3639e109e49069929cd79b20d969684733e51ee632249e6", "mana": 9, "whash": "8713335cf15e785cf01dac83bd3cfc3cfc3c3e3c2d3c08300cbcb874b016f81e", "card_id": "EX1_573"}, {"dhash": "8a894d3f90e401c937004f90be017f0ccc3310e66286dd10ba61604ee09ffe3f", "average_hash": "0738c3e2f387f807f007f007e027c13f807f8c0f9808f809f00de0038003f00f", "name": "Bear Trap", "phash": "db77ffa67ee5276b1cbca49803f9089c49a22e410b4c3a995b807b107bc65e2a", "mana": 2, "whash": "073cc7e2f387f90ff00ff007e027c17f887f981fd88af80df80de003e00ff00f", "card_id": "AT_060"}, {"dhash": "88d90ca291450bbb35734be4b6905c6df1e2e28d918907171b36666cd4990933", "average_hash": "f717f317790f180c1828cc2dc40c201c303e383a383838181818181018130113", "name": "Silver Hand Knight", "phash": "c36573d29ac4e724792c1ac2134da42148384c923cd39fe7fe788f1df2e15e36", "mana": 5, "whash": "f717fbbf7bbf383cbc28dc2dd43de85eb03e383a38383c381c38183019130913", "card_id": "CS2_151"}, {"dhash": "3a7730fec1c34f261f9a76ec6d18f931e063c3871d7f6eeef10c2a18c67ebcdf", "average_hash": "0100c105e03f703f783ff85bf80ffc7f9c70bcf1b870dc74de27ca608071f85f", "name": "Kara Kazham!", "phash": "a1bb4add7e9c36b6746f1e47f42bb4a84fc4f098cb592c0aee0e0d4c92968e90", "mana": 5, "whash": "0100c115e03f783d783ff85bb83ffc3b1c70bc713870de70de23ca618071f85f", "card_id": "KAR_025"}, {"dhash": "7abcec616093950622f9acc8d935b16b6215dc628384265bcba6bc5502a2ed7c", "average_hash": "8783038f331f301f781eb83fb037b83b78310816481850021006600601048905", "name": "Stormforged Axe", "phash": "c93ddf395e7a1ee69f1390f2048586bca6a420ed81e1b67127696e0a67c8ee49", "mana": 2, "whash": "87c3078f375f315f781eb83fb83ff83bfab51817681a58021c0665470105e9af", "card_id": "EX1_247"}, {"dhash": "ea9fcdff309d631894e509c117460edc77e4e1191232bee67f8c6119cc329007", "average_hash": "0700e107f10ff00bf001f600c4104218fc1ffa0fee02e663f661fe61fe20fe00", "name": "Corruption", "phash": "3340687fb7b3dfee4eef3cd65e34a79318c82c2ec34491704a86e64119cb36a4", "mana": 1, "whash": "0700e107f11ff00ff421f621c6184218fc1ffe0ffe02e763f661fe61fe60fe00", "card_id": "CS2_063"}, {"dhash": "02800538f48200ff24fecf31be87e2053d0af81488eda09b4337566e0c81b81f", "average_hash": "ffff09f801c600000000e13fe1ff81ff853f803f803f001f981f981b99044007", "name": "Flash Heal", "phash": "d9f7749ad6cb9de9753ae167699c03c50246262221445b91dbe05e8279e91eb6", "mana": 1, "whash": "ffff0bf801c600000000e13fe1ff81ff85bf81bf80bf809f989f999b9984f187", "card_id": "AT_055"}, {"dhash": "fa81fdff40c703020f7c1ce0338860916f02df4421b1c063e2cfcd8f1100f83f", "average_hash": "07100140791ffc3f7c38fc31f025d020c8248c1fc00fd202361f361f6e00f00f", "name": "On the Hunt", "phash": "2b75a7ffce6cdcfe3696366232e80d9c6463324109181a964be11e29c98c77ce", "mana": 1, "whash": "07184360f99ffd3f7c38fc31f825d020c824cc17c00fd20f363fbe1ffe01f00f", "card_id": "OG_061"}, {"dhash": "8ae824d9d1b20b433486483cb3384675d88a81b51b6aa6941139676af89ff13f", "average_hash": "f79f7dbe7bbf1dbf1c331c221c442946346404220501cc001c0038007921f91f", "name": "Injured Kvaldir", "phash": "c7fffdcc7b8d3e7a7bb04693396086c0392298108b613e869b011c847722ffb3", "mana": 1, "whash": "ff9f7fbe7bbf5dbf1c371c221d643d46356405620541cd401c003c8079a0f91f", "card_id": "AT_105"}, {"dhash": "ecc88cb101c3432635785e46bc9e599de33296e528abcb56b0b5506f63908722", "average_hash": "779f3fb73bbf31bd3c3884198059c558cc3188259125f04c000c801e81080101", "name": "Acolyte of Pain", "phash": "3d99efa1df34d3c43e8538631a9208cce6c865e1d8d333333bac5c4ee524d296", "mana": 3, "whash": "7f9f7fbf3bbf35bd3c388c3981590559cd71cd65c565f54c802d809e818a0101", "card_id": "EX1_007"}, {"dhash": "0ac7ed1dd2f30804ff39bcebe185951b0b77b3ea66c49d88683980f23e05fbba", "average_hash": "e71e836163c178dd207cc271e2d1e98189a48bacda0cf80be8cb00ca120a301a", "name": "Arathi Weaponsmith", "phash": "330fbf98cf6cdfc31ed2326d29e44c34419c639873d85b82cc92fd14f43c1c83", "mana": 4, "whash": "e71f8b617bc178bd207cc671ecd1e9c18b848bacda9efc9be8cb00ca328a380b", "card_id": "EX1_398"}, {"dhash": "808ceddfe0001b623ec5848b3dbe39b87428dbf8a2c33787451e8139932d3ef2", "average_hash": "070003c3e91bfc31bc3118111e191e381e3c0e3e0e3f0c7f0c7f04ff067f027f", "name": "Ice Block", "phash": "8d4c7225f11e5f864ed05c18be010ca4a3560f9907f37965f3433bfd53ea79a0", "mana": 3, "whash": "070800e3e93bfc31bc311c191e191e391e3c0e3e0e3f0c7f2c7f06ff067f027f", "card_id": "EX1_295"}, {"dhash": "00808d1f70e7e1bc34e74f829f1439396e4adc942c090b127764fe9fd03f0400", "average_hash": "ffff07e0038003008001c00380038003b007b007900d8001c003f07fe03ff01f", "name": "Sense Demons", "phash": "99591e806f26eea2e9689b0a0399211866c52d1ccae67964d3db3add59bb3a75", "mana": 3, "whash": "ffff0ff003800380c003e003a01f800bb117b1bfb52f8107c927f3ffeafff01f", "card_id": "EX1_317"}, {"dhash": "8ce18d8b02274516bb3c66c9843259d7922a27954e6a9f5468bbdd739b85513f", "average_hash": "f71e7f7e7bf2787874f8f4f81548114941088008e108e040e4027001310b111f", "name": "Nefarian", "phash": "67e9e7a0fb047b06dfc35f483b3332361894ca980ae3d7e433959465938cd4a9", "mana": 9, "whash": "ff5f7f7e7bf37d797479f4f8155811495348c108e148e540e4027083318b113f", "card_id": "BRM_030"}, {"dhash": "eae3ccc991b30bcd37be4f50bf647ef9fcf2f0a5f1c9e1178637086f739ee53c", "average_hash": "770ef10ff90378027804781c7c1e6c3e7c3f3c3e3c3e1c3e083e001c001c001c", "name": "Violet Teacher", "phash": "c917f3c17c818f717df86cb247e360980cce4cec2c31c3993a081f46bce3fb98", "mana": 4, "whash": "f70ff90ff90b780b781c7c1e7c3e7c3e7c3f3c3e3c3e1c3e283e003c201c001c", "card_id": "NEW1_026"}, {"dhash": "808c2d33e0c08b1fcfd316271bce64b8d93027ecdc9131038f7cda772189f802", "average_hash": "77be03f6418f8c0fcc0bc612ce3c8e7c0e7c1c3c1b3f1a3748007012081ac00f", "name": "Blizzard", "phash": "0b473f3ad0accd016d5037e4834160e8025a8f1542dd0ef773f1728bfbf572cb", "mana": 6, "whash": "f7bf03f6418f8c0fcc5bce5ace3c8e7c0e7c1e3d1a3f1a3768417016181a500f", "card_id": "CS2_028"}, {"dhash": "0283f53fe0f9c3d98e311f6311ce633cc45808b11f66f889c13f045a3bfe0452", "average_hash": "070081c0f807fc07fe17fe3bfe24fe23fe603e40fe03ec07380e206ff43f6016", "name": "Flamestrike", "phash": "037e3fd7e8bfd6a9f6647d420ed9ac680b841b94cc5b23f4d3410ba15e0671a2", "mana": 7, "whash": "05008140f807fc07fe17fe1bfe24fe23fe603e40fe03ec07b85f206ff47c7016", "card_id": "CS2_032"}, {"dhash": "ca8f8d3f12e83c387bf0e5e0ca819d030206f71e9e313c4ac0b281e5030b0713", "average_hash": "3700f360f943f8cdf8d3f99ff389e1890381c380c3b3c0b7c097c0d300138211", "name": "Water Elemental", "phash": "f1a5e75ba2e8fed46e581cb42e644c3900b86c310cb15a94bafc3b51d3a1bec5", "mana": 4, "whash": "7700f940f9c3f8cff8dbe9bffbabe1890381c381c1b3c0b7c097c0d7821b8213", "card_id": "CS2_033"}, {"dhash": "6adbccb491290b53362450f8b0c07705e13acf5d5eab1c863130e372e485c53a", "average_hash": "d712ffb3fbb1b835bc38bc00dc23c423c431c031d039c011c801c800c000c100", "name": "Shieldbearer", "phash": "b377c7b1ff3139e13efc1ece4ee0c4600cb0c14086825e6c6e89e7c17d60b339", "mana": 1, "whash": "df92ffb7fbb5bcbdbc3cbc18dd63c563c571d179d179c1518811c880c180c100", "card_id": "EX1_405"}, {"dhash": "0ce60d8902104d63ba8e6d5c9a386cc3130260ecd88a2745cfb8987321862728", "average_hash": "f71ffb7ffbdf00f330f918bc783c781c600900180008100630060006000c010c", "name": "Soggoth the Slitherer", "phash": "e9a9a3891b2a5b686b5a43d22e933cbcd23c6e485ad65ac67316437eb382d394", "mana": 9, "whash": "ff5fff7ffbff1573347d3cfc7d7c795c718d0918015a11463006008e018c010c", "card_id": "OG_340"}, {"dhash": "08f40d7842a089617706ae6c0891c3af7947f6ec80c10f0a7b35fce7784fe19a", "average_hash": "e70ff30fe14fc09f981f989bb000a001809c019c1938f880f880f816f81ff00f", "name": "Mana Tide Totem", "phash": "632119cbdbcc6e1e7bd0b6208f8149d41252de8c3eb967c78cb9d9987272a6e1", "mana": 3, "whash": "e71ff34ff14fc09fd89f989bb201a08180bc019d39bcf880f882f80ef81ff01f", "card_id": "EX1_575"}, {"dhash": "fa9ffd7f60e20f841e68b8d07081f11af37686c92d03cb16079d0c300024fc9f", "average_hash": "07000307fb0f6c76647002790079807988791c383e3c3c7c347c304c0800f80f", "name": "Cleave", "phash": "ad9abf8c5ae6cff33803bce82790266021fc06b643660c4f5b529e217b8b7e16", "mana": 2, "whash": "0700c747ff8ffd77667c2279c079807988799c783e3c3c7c3c7c344c1808fc0f", "card_id": "CS2_114"}, {"dhash": "8abf7dff60d0c7609bc132027504cc7a88ccd93913332e6618cc7939d3f064eb", "average_hash": "0700c10fe31fc45f4c5f0e7f046f006f406662726270e238e238623c2023e00b", "name": "Snipe", "phash": "794c19ff73f57604dcd70e72066986bc0cc224fc23908b1d6b43fe19eb633b01", "mana": 2, "whash": "0700c34fe31fc57fce7f0e7f066f006f406762726278e278e278627c203be03f", "card_id": "EX1_609"}, {"dhash": "08f82cf201eccb18b7034c4eb83c73e184822105870a1e04f4b9c8730187952c", "average_hash": "f79fffbfebbee4bf8c3d0c380d70397079587808790078417001600001000100", "name": "Blackwing Corruptor", "phash": "675f69275b32f3c8e7a0968836810160c118d3b47be45ed66f167363338bb3c9", "mana": 5, "whash": "ff9fffbfebbfe5bf8cbd8c780d707d707b78794879407941f001e08001800120", "card_id": "BRM_034"}, {"dhash": "aa01fdcee0350f0e1e71003e879168ea99a437586f20d213bc275acf60befe3f", "average_hash": "072007c01b381cf87cf80a19e80cb064f86cf20873093c01c008602c801ef01f", "name": "Deadly Poison", "phash": "6bdbbfadbdf576d95b321e3a1bd02cce85ba8c30634d0e05a3499c2063b83186", "mana": 1, "whash": "077c07f03bf85ffc7ef83e39ec0cb468fa6c7208fb097d01c009601e801ff81f", "card_id": "CS2_074"}, {"dhash": "ccfa8da39245091f762cacd81b61b1cd6713d32c8651008af234f9e006414bb6", "average_hash": "771ab37e73de78bc38bc3ab03b3944b8e5bce1b4e3b020a00080004200020602", "name": "Thing from Below", "phash": "e534e309dea27f183bc28608a700133604d02ae5e7fb56d43cae74ede9ea9b8e", "mana": 6, "whash": "f71abb5e73ff78be7cbcfeb27fb966b9e5bdf1bce3b022a000a0004200820406", "card_id": "OG_028"}, {"dhash": "76f440c7851c1f21becc58b3b1445ec9a0025914f7e2ac5da33b4657b888613d", "average_hash": "ebbf793ff839d83bcc3b643bcc1fc81fc0078003e006e003f009301c71046138", "name": "Moat Lurker", "phash": "29cf2905ef51da337f1e5bd8ff1c16d1918713363363929069499e198d39174c", "mana": 6, "whash": "ebbf79bff83fdc7bcc3b643fcc5fc81fc807c006e00eb003f009301c7104713a", "card_id": "KAR_041"}, {"dhash": "c8f00c8301300ae734ce4b8cb7386a3190e26c95c9ca1094b33966625c8cf931", "average_hash": "771f7b9e7bb238033807bc079c2f184318003008040908001808180818007100", "name": "Doomcaller", "phash": "c349db6467947ee87f8889d31b9840e996d29c0c9bf1b6f13bad73cca520624c", "mana": 8, "whash": "ff9fffbefbb23cb3bc07bc27bd6f3d4f394931480d491c441c08181839007910", "card_id": "OG_255"}, {"dhash": "3ac0ec9f117f4af8b4c151c1a7867d2df9b2e25dc5290bc73fb6326cf9990533", "average_hash": "07900fb0e3a5e027e007d007c40f846f843d003e003c0038003c003001330111", "name": "Mini-Mage", "phash": "39fd5302f6056cb19e9b0ee9eec480a4115831ec63c21b91afb1db433e21cfbd", "mana": 4, "whash": "0f980fb8fbb7e5a7e4a7f547c5cfcd7f877d017e057c0178007c04b001b30331", "card_id": "GVG_109"}, {"dhash": "28dccd3703d345c4fa79a3e34e9f913e02773ece7c9485096233c4e28c4f3bbe", "average_hash": "d7177b26fb47705d60e6c3cd83dd80c98d809d859986b88238cb104390079a04", "name": "Shadowcaster", "phash": "dba3fb8cebe4c7251e133c693b86049c60bc99b10b9b334373c7db98b2304c36", "mana": 5, "whash": "f7177b47fb47707d60f6c3df829d82c98e809d851f86b88a38cb10439a0f9c0e", "card_id": "OG_291"}, {"dhash": "1cf80cf20180cb17b6094c4bb8b07375c4ee018d3d8adf17b337046d669ecd3c", "average_hash": "e70feb9f411e003e303f083104306033303638003800d839981dc81cc01ec91e", "name": "Piloted Sky Golem", "phash": "e9a5f99c7946d34373c4311b9299c690b1d93c3c0e332de5d9cc9399b323cc62", "mana": 6, "whash": "e71ffb9fc3bf003e303f2c3d8c397073703e38003c01fc399c3dc83cc81ec91e", "card_id": "GVG_105"}, {"dhash": "98f90cc46100ca07b57d56dfa9385365a6ca0c154b2af6c49c39136626909d2f", "average_hash": "ffb7ffbe87bf04f804d8055837d033d237707568734861ce4c5e4db809a00320", "name": "Faceless Manipulator", "phash": "6f7cff211b0673c4d721432473a0066cc82496c2198cd9b83de99b2ffdb236d3", "mana": 5, "whash": "fff7ffbe03bf07f804d8055837d031d2375037e073c871ce4c5e4db80da00b20", "card_id": "EX1_564"}, {"dhash": "fc87fd3f20ff01fe87000c601ce037506a10f1a4ccc193032e07c80df01cfcbf", "average_hash": "0700f301ff01fc03fe077e003c10dc096804ac002c003809f80120044003f00f", "name": "Thistle Tea", "phash": "635d3360fc603c1b1c868e83cc607c3419c81c7d8e39c69c638e3363cbe3bffc", "mana": 6, "whash": "0700ff81ff03ff07fe1ffe04fe10fc1dec17ac0eec037c09f809600c6017f00f", "card_id": "OG_073"}, {"dhash": "127b29f6a2803921d272a4cc5939b3f364e4d1caa3053f1b1e32ece4195df2ff", "average_hash": "c507c35fe01ff01d72989810f8903830783078b078303890389030103008a07f", "name": "Dark Peddler", "phash": "e3a539ee797ac3b69f569661b775264981dc46dc319b02d219bb4eb01904ee30", "mana": 2, "whash": "cd4fc37fe2fff09df298b8b9faf078f07b307ab078907890389030103208e0ff", "card_id": "LOE_023"}, {"dhash": "cafc84fd41a28b0034274b5ea4b443c98fe2f8c5200b4b17f4b4f961028e9534", "average_hash": "3707f19ff99fe81fc8083c283c007c003c071c071c060c018007c007c003e101", "name": "Core Hound", "phash": "db3fcfed79b1dbe4d7e866d026d398c23698360649309968cb0064c89e239ee3", "mana": 7, "whash": "b717fbbffbbfe83fcc383c283c087c407c47bc079c07ec07e007c007e103e115", "card_id": "CS2_201"}, {"dhash": "ccf80cc291840b3c36f34942b6c4798df19283259ce90092833466611c81f92f", "average_hash": "f71ffb9ff91bf838f831fc2dfc3c4c3c0c386c3004320c301820100010003100", "name": "Kodorider", "phash": "23577325d221ddb479585b92339012b22d8dcce043cb971bb9f0b2d63d8d3ce9", "mana": 6, "whash": "f71ffbbffbbff8b8fc39fc2dfc7cec7ccc7cec380c3a0c301820180019003100", "card_id": "AT_099"}, {"dhash": "e8d2ec6821934a64b6a054cbb17647c9961a8d25626bb4d7c1b63f6ffc9ce13d", "average_hash": "879f8bad33af301f0017800180016c43e440646040200030003cf01cf91ef91f", "name": "Flying Machine", "phash": "7bdd9e1049913e46738e1ca1bea0026688ace3cc9cd967ec6d69c6cee32cdcb0", "mana": 3, "whash": "8f9fafaf33af34bf84178011a5436d43ed416d6041600570043cf0bcf9bdf91f", "card_id": "GVG_084"}, {"dhash": "88eccce211840b2034fc59e9be7261e5c4c2a9859fcb3d129b30f673648ed93c", "average_hash": "f719f18efb0ef8018800e400f03c70227c007c20fc30f821f800f80cf81cf11c", "name": "Mad Bomber", "phash": "e377ffc8798397d56df830f83c9c060b92c129c16319f3c8fb08f684d9006e63", "mana": 2, "whash": "f71dfb8efb8ff8338c20e400f43cf4227c027c22fc33fc21f800f81cf81cf11e", "card_id": "EX1_082"}, {"dhash": "48f88d4013c10c13796e24984260d92027f3f9c0cf859b0b373e2ce44041913c", "average_hash": "f70ff90fbb0d180c180438017c083c801c103c183c981c3938bdb01c1004000d", "name": "Darkshire Librarian", "phash": "c3b1f3e2f9706b7d7e6c739c0bc018a40c81424e63989eccb731d7013f23f69c", "mana": 2, "whash": "f71ffb0fbb4f187d18847e917c887c883c903e983cbc3c3d38fdb03d781c000c", "card_id": "OG_109"}, {"dhash": "1a9e2d7c40f878f073e0e7c48f8917132f067c4cf810c009f033c0e58089031c", "average_hash": "e307e1c7c1c7c087c087c187c187c187c187e187f18ff08fe087e087c1038301", "name": "Lightwell", "phash": "d98711eeaefae17ebe4301a46fd010a400f06b0508f4be05ff197b05fe0b7b55", "mana": 2, "whash": "e707e1c7e1c7e0c7e087c387e187e187e187f187f18ff08ff08fe087c3038301", "card_id": "EX1_341"}, {"dhash": "4cf08dc382950bad7e1e2cbcd8d8bc317162c0e60f85bf08773b6ee4cc41032e", "average_hash": "f70ff31ff9187838783c7c387c3ebc3c9c381c381c1018001018900090000000", "name": "Carrion Grub", "phash": "e3c36b8a7aa03f391f72d9a08e0c89b909b9d3446cc472cc376f77637ca4c4b0", "mana": 3, "whash": "f71ffb5ffb18f838787e7e3e7cbefcbc9c3d1db83cb03c109818d001b0020002", "card_id": "OG_325"}, {"dhash": "02c0fd7fe0e75f029c5430bd241c49d89290e7730fe09d2070c78d0f6b16d82c", "average_hash": "671e01e0f1bffc7e3e7c3e783e781e784e78c679e600fc01e208260c1c269826", "name": "Holy Light", "phash": "275aa5abf8763aad8dcf4e89b2612db023b9037639980b77dbc599015b895e42", "mana": 2, "whash": "779e01f0f5bffc7e3e7c3e783e781e784e78ce79e600f6016208260c3c2c1826", "card_id": "CS2_089"}, {"dhash": "8addadf012c749b0d701a6ce0c09d1173037c1ecbe51448a8a3065e2d94727bd", "average_hash": "c717e17ff3dff09fe0bfaa9fb31d12b4099009b00fbc089400800040200b0609", "name": "Eternal Sentinel", "phash": "598dfba49bf89f369ec19aa00fd0844c40c6c3d066ba4f056e26fdc1df61fb08", "mana": 2, "whash": "e717eb5ff3fff1bff4bf2e9df79d529519ba0db00fbc0e9600800040008b4409", "card_id": "OG_026"}, {"dhash": "8287ad7f50e0918037034e8e983c315962b2c6c48089f713ce2418cf219ffe3e", "average_hash": "77bea1e7f38ff81ff81fdc3f1c3f3c3d3c391819180c380e700660020000700e", "name": "Unstable Portal", "phash": "c9dd39b9f662f662a69d86e60cc32cad49944e981b614b647b077b805ba17e98", "mana": 2, "whash": "ffffa3e7f39ff81ff81fdc3f3c3f3c3f3c3d1819180c300e700660024000f00e", "card_id": "GVG_003"}, {"dhash": "4affd8b081470b9e36604cc2bb047d7df2f2e0c5ce8b9b1767378c6e989b7130", "average_hash": "f70ff71ffa07f804f8202430043104382c383c381c383c1fb81b381f301ff00f", "name": "Frost Giant", "phash": "833d33b2db80cb693c6c18bc32fba63c739e4cdbe2649e2c7cc4de404b803fc2", "mana": 10, "whash": "f71fff1ffe1ff804f820ac300c3104382c383c383c38bc1ff81f381f301ff01f", "card_id": "AT_120"}, {"dhash": "fc8ff93b04e6019e1ffc1df03fe37b04b40a6048cc3033436006864c009cf83f", "average_hash": "0738f7c3ff87fc01fc01fc07f80ffc1bf6173b18131b861f040710030000f00f", "name": "DOOM!", "phash": "6345a708de521ea03d2a3cf62cd38cbf097e8c5f4bf766bfc394060dc3831ea0", "mana": 10, "whash": "073cf7e3ff87fd01fc01fc07f81ffe1bf61f7f181a1b921f060730030000f00f", "card_id": "OG_239"}, {"dhash": "0acc8c93019e0a2cb1b04cc9b9126205b89a07656bcade160437b96c648a9934", "average_hash": "f713f383f38070096001a021a02100000410ec007c00fc097c187816d8057905", "name": "Piloted Shredder", "phash": "635fbf68fb85434e9cb29d3193d818ae6cc843c2391c29f3cea452067b22ecb5", "mana": 4, "whash": "ff13ffa3fbb074b9e40ba021a171c1400558ec417d08fc697c3cf815f915f90d", "card_id": "GVG_096"}, {"dhash": "e8c1cc3f91f60bbc376049c6b6cc7819e432cbc596493f923436c16df08b6130", "average_hash": "170073007902f80ff82dfc25dc38cc381c301c309c30ec31e831f816e007e003", "name": "Murloc Warleader", "phash": "a317cfc3fa282e258ee436961cd3cdb23919c96626679bc9e672999c698e188c", "mana": 3, "whash": "17107b80f98af82ffc3dfc2ddc3ddc381c381c381c30ec31e839f816f817f113", "card_id": "EX1_507"}, {"dhash": "fcc76d0c025e84b4b87ce7d98bb25de783ce7f9dff2b83478c36786de09b0130", "average_hash": "8700e3607940e8406040709030083006f003f00ff01ff03ff03ff03fe01fe10f", "name": "Nozdormu", "phash": "f3b8b60a3e0f3b639e616d90ef391a850cc6c9634e735699c785cec1c630c578", "mana": 9, "whash": "8f10c761fbc0e84070607091305c705ef00ff00ff01ff03ff03ff03ff11ff11f", "card_id": "EX1_560"}, {"dhash": "c8ff2cf041888b39374b4e36bccc7829e15296a5e96b1b976d38c377448cb93f", "average_hash": "f71ff39fe19fc81dc81bf81ffc3cbc388c308039003c0819c803c80e9005210f", "name": "Stranglethorn Tiger", "phash": "295f7b40db20be30bf3043c669c892c396646ce35a928ec36d245bf39b8e6ea6", "mana": 5, "whash": "f71ffb9fe39fc83dc83bfc3f7c3fbc7888308839003c281bc803c80f9017210f", "card_id": "EX1_028"}, {"dhash": "ec998d3702ec4c80f97ce3c107bccc581be17c86c118ba015cbbb0f6434d971e", "average_hash": "3706f367f147f04770c0f0c0bf077c817c83388339002100408a805e800e801e", "name": "Volcanic Lumberer", "phash": "d32af3b27972ff4c9f853c266e3084c906cd2e3edbf09a8126bb44b9990cda50", "mana": 9, "whash": "f716fb67f347f047f0c1fec1bf07fe817e833e833d88610a40cac05e801f801e", "card_id": "BRM_009"}, {"dhash": "12182970c6e6dcd8b1ef0396092c93d906b1f98ed7412e08593670e0e2496b32", "average_hash": "e16749c700c744c28c804f812f831cc15fc07ff07ff8fc98f8b0e8804810fe32", "name": "Enchanted Raven", "phash": "170fd9baf9f2e97e9f9e939d6f6474d601f38839e1b61286699984d410231b20", "mana": 1, "whash": "e967c94740c744c28e804f813fc11ec17fc07ff07ff8fc98fcb0e8a06a11fc32", "card_id": "KAR_300"}, {"dhash": "c09f9dfc30334f4ddcd4b2fb6186c31c0672fce5f90f829b00611f0e9cf7f8ce", "average_hash": "073003cfb39f407fd0e2f07be0cbc0430c031c017c38281808080808386ef84f", "name": "Forbidden Ritual", "phash": "896bbde04d164e9513993608676286461864a5c62b8dcd786b699d73739ddb7a", "mana": 0, "whash": "073883ffb3ff51ffd0fbf0fbf8dbcc430c031c117c38383c08180828386ef84f", "card_id": "OG_114"}, {"dhash": "88817d3cf0f383ce3f093b7c7e82f8a4e15b8387204f443c1339fc7561f3fced", "average_hash": "073c07e01387391f7c7e7c7c347c107800f800f000e000e000c080f0c0ecf0ff", "name": "Animal Companion", "phash": "e5fa4b45fb06b6aa2df136a0031a0316cb69c950cbf15cd81be51e8b91e67978", "mana": 3, "whash": "ffff47f1178f3b3ffc7f7c7c367c127800fc00f400e000e000e0c0f0c0ecf0ff", "card_id": "NEW1_031"}, {"dhash": "60c0fd9f60ff03c61f1b1e603e10f040e0b9c1e383860f0d765b48b4b320f07f", "average_hash": "071823c07913fc3bfe7e7e7c9e7f327f007e0c783c58f8507850205000400029", "name": "Avenging Wrath", "phash": "a5280b16f259562dbc525f8007432cccf1d88b6909d7731be3f44f995b3679e1", "mana": 6, "whash": "671c32f0f9b3fc7ffe7e7e7e9e7f367f007e0c7c3c5878507850207000400069", "card_id": "EX1_384"}, {"dhash": "ecf8cc8701870b1834f34e26b90c7239e472c1e50f8b2e16fc34fb6de69f4d3e", "average_hash": "170f339efb11f8184808d8335c361c381c381c387c30fc206c20e836c81fd91f", "name": "Goblin Sapper", "phash": "a307fe315b08a650cda19a94cbcb0cce64dbb3ac63c2bbed93f33c99b6824648", "mana": 3, "whash": "371fbb9ebb91f838d828dc3f5c361c381c381c387c30fc306c20e836e81fd91f", "card_id": "GVG_095"}, {"dhash": "c33a22f7ccc439097332c68c883d11fb40f6c5ec0ec11918aef11cc08187e37f", "average_hash": "fbe0f1c4e0dcd8de1b9c139803992b805ba07bb01ba0088060c071c00700073c", "name": "Onyx Bishop", "phash": "775d412771bad23eff1af2f9af23f7e609c2864239ed0cc3b38b0c10c3207c14", "mana": 5, "whash": "f9e1f1ece0dedcde9bde1b9d13993b885ba07fb03fb01cc060c071c00700877d", "card_id": "KAR_204"}, {"dhash": "0a8c1d3b70e4434f969dce330bc8fe80f90907a4f80bed171a2a2477c8def2bf", "average_hash": "073807c2038684066002f002f806f836f07fe40b300ef00ef006f800b019f01f", "name": "Heroic Strike", "phash": "73665fccde399f7325cb23a6635824d809f30e9313736938e5885e90db463d22", "mana": 2, "whash": "073c07e30386c7066003f003f806fc3efc7ffc0bf80ff91ff807f812b01df01f", "card_id": "CS2_105"}, {"dhash": "b8ce6d3fc2f8c8e979bba34e62258c991c323b6742449b097fb09fe17c47e12f", "average_hash": "87104362c341c08f48cedac73acf7dc44d064d820f80088070c07880f800fa02", "name": "Ram Wrangler", "phash": "538e1d8ff3e03e3d0706c69089a044660cf266c9d9d93b073b67372767ece4b8", "mana": 5, "whash": "cf104f63c3e3c0af4ccfdecfbec7ffc4cd86cf822f800c8070e07880fa80fe06", "card_id": "AT_010"}, {"dhash": "028c0d7af0e00b85bf207e03fc33fa43801f807fdbb7a6cf843e197a23e4fec9", "average_hash": "173c01e6039f007fc07fc07f80ff80ffe07f007f407f407f007e007c00fc887c", "name": "Frostbolt", "phash": "e9ceda955673d6c4d77b65f623e72099039c0d228e513a917b803ec1db2c5b18", "mana": 2, "whash": "779e01e6019f007f807fc07f80ffc0ffa07f007f407f407f407e00fe00fc88fe", "card_id": "CS2_024"}, {"dhash": "4cf6ccd811b38b46371e4f78ba987421f962f2c5e489d3170c37146e0898f139", "average_hash": "f30ff30ff91f781b781bf81df81c7836b83fb83f383d383e781e381838181008", "name": "Target Dummy", "phash": "e915db407e4959901eb263926e9a2408180cb6843b63cf3cefb3d686fbb63e4b", "mana": 0, "whash": "f30ff00ff91f781bf81bf81df83c7837b83db83f383d383ef81e381828181008", "card_id": "GVG_093"}, {"dhash": "2aee0dd8d230a94056c2acac1f19f9e372c7c19c8531070b0e3636e66c4cdbb9", "average_hash": "f717cb57c3d3e893c09102901b38389c31bc31bc33bc30bc309c105c10180018", "name": "Flamewreathed Faceless", "phash": "e9c1bb2e9beefb94efe346e32ed022f40170238859b214151ca85c632ee3aee3", "mana": 4, "whash": "f71ffb5fcbdbe8b3c09302901bb83abc39bc31bc73bc78bc30bc105c18181c18", "card_id": "OG_024"}, {"dhash": "828f5d7e60c00f243cd059ee961068c7e7dac8819f6576c2ff9cfc4300b1fc8f", "average_hash": "ffffc3eff18ffc7d4071c03cd81d70687c723e7066306420740074020000f007", "name": "Mirror Image", "phash": "b3d639cb5bfb97bc5b675a6ba4b4a62ca4cc24d0439b1a165bc456405b271ba0", "mana": 1, "whash": "ffffc3eff1cffc7d4071c038f81df068fe733e70767164207c0074032000f007", "card_id": "CS2_027"}, {"dhash": "68d98d2dc34a8ef3788c303b46e0d1863edb6de1992452498fbbe4e539486338", "average_hash": "17171f66836d89e5bce336c27fc03ac005d06fd833c000c430d830de7092fe18", "name": "Void Terror", "phash": "c7b89f9ab9d77ce12e04c7600640996c1ce3d39c23eb3ebcd9b0a3cd190e03d9", "mana": 3, "whash": "1f171f76836dcfe58ce336c07fc03ec007d04fd837d400e470f830de70b2fe18", "card_id": "EX1_304"}, {"dhash": "dc9f3c7f00f6158ea3182e70f8c5e113cf6c9c8373278c5f70accf58229ba17e", "average_hash": "0780630073067007f01ce838e030c8719077703ff03ec03f003f603e001e001f", "name": "King's Defender", "phash": "e9d476885e2d9d601f03341c63008cb41cc122a709d83e35cf6c8ef97ffafe65", "mana": 3, "whash": "478067807306710f701ef83cf030e871d277f07ff03fe03f003fe03f381ee91f", "card_id": "AT_065"}, {"dhash": "68d70cec41f08b19370350c6a01c4cf9d1629fe50fc91a160c347f68c091c13f", "average_hash": "c797ffb9ebbdc8bfcc3fdc085c403c401c601c200c202c200400f821f9001100", "name": "Booty Bay Bodyguard", "phash": "335f79817b01b7d4d6a112a00d8409e3cc60dc361ce69ee6bf231c6faf933cb2", "mana": 5, "whash": "cf9fffbdfbbddcbfdc3fdc0add403d401d601d600d603d600c20f8a1f9807100", "card_id": "CS2_187"}, {"dhash": "f8cf8dbb42d0bd057b23c6ee9d5960930126026d7e92dcc9a3b207e468089f31", "average_hash": "2710bb71fbff78ff7ed81fd89f7adb209b000b000fc003d800f009f083908711", "name": "Rhonin", "phash": "3f07e34eb9b8f6d4be7285751e18460f61d07e4e8bcc7aa145fc62b077084326", "mana": 8, "whash": "2f10ff71fbf77c7f7ed81fd8df7adbe097100f000fc003f000f409f083900731", "card_id": "AT_009"}, {"dhash": "aacd6cc7d1980b7036364c8cb8f47381e60210155c0a975464b3c0694d96373c", "average_hash": "f717fb9ffb9bf839f83ffc3ffc31fc31f8207008240140034001000000000118", "name": "Refreshment Vendor", "phash": "f3edf3077c055ee8773492f00ce10ca2a18c8638268933e33b937b9b7d4a9e34", "mana": 4, "whash": "ff1fffbffbbbf8bbfc3ffc3ffc71fc71fc727408a407c00b4001000001000118", "card_id": "AT_111"}, {"dhash": "0280ed3fe0ff11fe19f883e5fe60dc01ff93bc27e30f087e2098dc7081e17ccf", "average_hash": "ffbe01e03380fc03f60fe00ff04ff0f7c07fe07fc0ff80ff00fb44730043004f", "name": "Flamecannon", "phash": "f9ea5711defdad3ef99e37e06942096c830903f64bb0d89d63155a04db203ec8", "mana": 2, "whash": "f7ff03e01380fc03f60fe00ff04ff057c07fe0ffc0ff80ff80fb40f20043004f", "card_id": "GVG_001"}, {"dhash": "6ac624bcc1688ad7b4af5377a59c5831b7c269255fcaa804c638e46b9891312d", "average_hash": "97948fb10ba00da40466044e0d469d41bd59fd48fd0ff8463004300331033100", "name": "Worgen Infiltrator", "phash": "d73f1d97bcd473e1a7e99f20793649c8c6921b036c32cbe0b981360baf244ea2", "mana": 1, "whash": "9f948fb10bb00fa40ce60d4e8dc69dd1bf59fd48fd4ffd463c4434a339813320", "card_id": "EX1_010"}, {"dhash": "5ada8c3901e7cadeb57b5127bfc84017ab3e51dda42bcad61f39f46ca191633b", "average_hash": "b794bfb533a7203ee00fcc07cc1ec04dc005d01430250008e018f01071002100", "name": "Sen'jin Shieldmasta", "phash": "73f3df36e63cff91bf8133670f9233a42c094cd8276416ccb3c433039cc04d39", "mana": 4, "whash": "af94bfb53baf64bee4bfdc07cd5fc14dc155f15531650568e818f0b071803120", "card_id": "CS2_179"}, {"dhash": "c201fd7f60fec3491f07b6e0691074b8e95cd6c1accb0de60fcedb143739fe77", "average_hash": "073003c00304840fdc0778181e1f043cb8311c300c3c00344e304e124e32fe3f", "name": "Crackle", "phash": "2fb4d26b1f4cfcf3af906ea4a1730389995208f48bd8e205e3496ea65b22bf91", "mana": 2, "whash": "0f3003c02304850ffe07fe199e1f5c3dfc391e350c3c8c344e307e136e36fe7f", "card_id": "GVG_038"}, {"dhash": "caf695c902b37d73f2c0cc9c98116127075e0ebd3872f3488cb199e33387e71c", "average_hash": "771dbffd3fd9bcfabff31fb9033903a10303830103c003c000c001c007800720", "name": "Confessor Paletress", "phash": "ffb9a9f947fa525aff5a45962f9093b64064762628a66a265f061200f78c5754", "mana": 7, "whash": "7f5dbffdbff9bffabff39ffd0ff903f10343830103c003c000c005c007c00760", "card_id": "AT_018"}, {"dhash": "601efdf9c1ce97016f32be44649be026814424f1d9ca1305268e7f7ecef8fcef", "average_hash": "073003ce3b3ffc7f987eda7e827d127c0260007000780038001800300030f01f", "name": "Ancestral Healing", "phash": "ad0b690252fd96fcd66036912904a4660312ad9d29c8a7da4b66ec35ebb57edd", "mana": 0, "whash": "0f7803df7bfffdffd87ffafee2fd327c0268027000780c3800380c380038f83f", "card_id": "CS2_041"}, {"dhash": "e880fd3fa01f00fe04fc09f817e02dc07f98ed40dab1fc031027004c0098fc3f", "average_hash": "f77f7fe0ff80ff00fe00fe00ff00ff01fe01ff00ff00ff03b80100000000f00f", "name": "Starfall", "phash": "7354c7b09cb33dc7798c9c180b202c640b500e3443670efd4bd31ed75bcbfe9a", "mana": 5, "whash": "ffff7ff0ff88ff00fe00fe00ff01ff01ff01ff01ff01ff03f80100000000f00f", "card_id": "NEW1_007"}, {"dhash": "aab4454312be3d63727e80fcc8c99f933c27714ce4908a8b1f362ce4c08c8319", "average_hash": "e70ff14579c0f8d038803b80d980d983d98ec19c039f00bef89ff898701d320d", "name": "Guardian of Kings", "phash": "638bb7eef9f879d61bcb33371c906169193438a533313a12df41630cec60c6cd", "mana": 7, "whash": "e70ff14d79c078d038c0bb80dd80dd83d98ec19c039f00bff8bff0d9f01d330d", "card_id": "CS2_088"}, {"dhash": "e67f09da06c43b86777e8c641008e61b6cf58aee15c17a0aa234dfedac50192f", "average_hash": "f341f15bf83a78be38bd7fbedd620ce00ab428b02920382020a178b038001000", "name": "Menagerie Warden", "phash": "a117690b72e9b63dbe6793c2ff936d910c99964b9836da866d9616cc05619e60", "mana": 6, "whash": "fb61f97bf87a78bf3cbd7fbfde634ce28af628b0292038a028a178b138203028", "card_id": "KAR_065"}, {"dhash": "6adb852f039b4ce6f984b3494e73e7e60487681903743e09ddbb32e2c3448733", "average_hash": "d730bf74ff63f4ebfcefb7c637cc33e63fc223d063c069c040e000c000800621", "name": "Blood Imp", "phash": "f7b8ebf23ff77eee8e644de26e800c2003c432493cc99a0c5b0a7d86ab8993c9", "mana": 1, "whash": "ff30bf74fb61f7ebfced3fce37ec72ee3fc223d063c069c048e004c000a00621", "card_id": "CS2_059"}, {"dhash": "caf18cc7911f0a85343c515cba3877e1e7c2ce2598c9309607347e689c91313f", "average_hash": "f71e7b9c7b8cf8077802fc003c303c30fc31fc31fc3018203800380038013100", "name": "Dread Corsair", "phash": "e39d63327cc83df03ffdc6e278ec133808c690c423b413923e23e662ff88f634", "mana": 4, "whash": "f71ffbbe7bbdf887fc07fc003c707c70fc73fc31fc313c203c20380039013902", "card_id": "NEW1_022"}, {"dhash": "12b6384d61b35a3cbbc2560d28db4d36b36c669dcea299dc00b94162d396e57f", "average_hash": "cfadc3a9e5bb645be45bf65fd358925d924db0488141804100000000010083ff", "name": "Eerie Statue", "phash": "39f973dc3d1e76db7f931ed799a6c6f121aa669a4524c2f019e01a3c118426c3", "mana": 4, "whash": "cfbdc7afe6bb747be4dbd75fd35d935db34db348814980410580000005218bff", "card_id": "LOE_107"}, {"dhash": "0af26dcc92032917776fecb41868b4e97093e1644381ae0bdc3638ec1048abb4", "average_hash": "e70423481bd838b038b83c985f3c6c3c6cbc7dbc7d2878b470bc681c00101000", "name": "Draenei Totemcarver", "phash": "6795bf29feb07f5e3b7a298687d1607c40f402864fe654a449281ca5a729bbe2", "mana": 4, "whash": "e7143949195838b838b83cb95ebd6cbc7dbe7dbc7dac78bc58bc787c10141004", "card_id": "AT_047"}, {"dhash": "1adb6c2ed1cc8a1f37324cecb8b879e19b227705c6ca7c14a9395f63cc8f313f", "average_hash": "e702c184cb85c809f838fc38fc18fc19fc0dfc0cdc04dc07e0037801f803f11f", "name": "Mana Wraith", "phash": "63ff3f16dcc45be6ebb0a62c25246328218c0749de48df487f08cf01e703ff38", "mana": 2, "whash": "e712c984cb87c809fc38fc38fc18fc59fc1dfc0dfc05dc07e003f801f80bf11f", "card_id": "EX1_616"}, {"dhash": "0afeace111800b48b7764cbdb0485b91b522dfc5f28be50781360e6c589cb13f", "average_hash": "e717a38ff389301810188019d008f8105808d006183e183e183e38143811511f", "name": "Frigid Snobold", "phash": "ebe53f48fb19db9633891e9719e286b02cecc66066626699634234a5764a6ece", "mana": 4, "whash": "e71fab9ff38f303a1018981dfc0af84af818d806583e983e183eb81e381ff91f", "card_id": "AT_093"}, {"dhash": "02808d3f34e1018627b05e6f9f5c34a173f283263d4df298cc371c6ec190f02f", "average_hash": "ffff09f08187e00fe01fd01fd93fdd3fc13ff9af6887440fe01ff01830008003", "name": "Power Word: Shield", "phash": "795d9da2f6e9179ea7f32df5830e61b14bccc38461315a6559805278796b9c28", "mana": 1, "whash": "ffff0bf08187e00fe01fc01fd93fd93fc93ff9af6887c00fe01ff018b0008003", "card_id": "CS2_004"}, {"dhash": "dad8a46f41c50ab1b50e531da0f07f61fb42e0458b6b16d72d32f3628c85213b", "average_hash": "0716259681adc00c100f38067c00bc2fbc3f1c3e403b401d4408980bb8031901", "name": "Clockwork Gnome", "phash": "cbdb5fb2fee53dcd8731d6ac7e84896308909341266e23c19991dc189e07cee4", "mana": 1, "whash": "0f1e2fbebbadd48d140f3c063d40bd6fbd7f9c3e413b407d641c980bf9031901", "card_id": "GVG_082"}, {"dhash": "78ce8d7912f63c007b4b86d48891520b35f638ed639286083931f4e6b1816712", "average_hash": "e701f363fbc9f8df48db3bd99b1103000b0003044bc45bc0f0c061c043006700", "name": "Bolvar Fordragon", "phash": "7789ab0933f4d7963e1234312f840c2c00665926cbf2638b779f765ae62bd6c5", "mana": 5, "whash": "ef11ff63fbe9fcdffedb3fd99f510bc00f00070c4b847bc0f0c0e1c043806700", "card_id": "GVG_063"}, {"dhash": "f88ff9ff00dd6ff79ffc39f07880e5828b0c96394662bcc7f98d791cf23a3c7d", "average_hash": "07000300f10df07dfe7ffe7ffc77f065c005e041e621fe30fe387c3de83ff83f", "name": "Infest", "phash": "2390fea9cc6d3e645e820fd41f600c980fc1b09fe57a0bb74bc94b68e3945be3", "mana": 3, "whash": "07000200f10cf07dfc7ffe7ffc77e025c005e040e620fe30fe3a7c3dc81ff81f", "card_id": "OG_045"}, {"dhash": "01c08f3fe0ce0983bd145c88d130a4651b9a3516c32d98db70f5856102963c0d", "average_hash": "ffff0bf8e1dff83f707c30f031a43085218fe08fe00de03f801f8087010f810f", "name": "Silence", "phash": "f959a7a01d07390cd6589db165c1436269802bd21b673b9d73f8d8695bd4dc72", "mana": 0, "whash": "ffff0bf0e1dffc7f707c30f031a631c5218be18be089e03f809f8087018d818f", "card_id": "EX1_332"}, {"dhash": "08d6aca801534a34b5c3560cbd20721dc7f2df05e60b0c1730b6c1688b910f3f", "average_hash": "f715a3bdabb9b00fe003cc1bbc3fe0398031f83dfc1cc8358033800200030102", "name": "Big Game Hunter", "phash": "b97ddb99de982793ef809c2443261262a49062c26f629fe76e6735ccb3921663", "mana": 5, "whash": "ff15afbfabbbb08fe403ec1bfd7fe07b8075f83ff91ce8358033800201030103", "card_id": "EX1_005"}, {"dhash": "7e3f88fc00fb15f027002c02f004e179cbe0c6c339c7e71f5f2f945e309b4176", "average_hash": "c707e30ff21ff81ff81fe837e0231021183108310037d83ef83ff81ff01fe00e", "name": "Truesilver Champion", "phash": "e9351b187b249ef03e5896761cf6a3bd56f784a426e92aa596b09c788cb87b20", "mana": 4, "whash": "c707e30ff01ff81ff81fe83fe823182118310031803fd83ef83ff81ff01ff00e", "card_id": "CS2_097"}, {"dhash": "2cfb2dd042e0c900f681ac031743d8b5374f4e0cb011214b06b48de03341e3be", "average_hash": "c70fc31fe1dfe09fe03fe0a7e21ff01eb08fd183013200b36090600000000000", "name": "Fire Elemental", "phash": "792419232e8a9f634e6366089e5492d208f16e0c73bedd95c6f636f79b29caa8", "mana": 6, "whash": "c71fcb5fe3dfe0bfe0bfe2b7e29ff29fb19fd98783b7a0bb6090604020000000", "card_id": "CS2_042"}, {"dhash": "8283fd7fe0e083805d369eef3e1e086ce0b0f0a3e147830f0e988d6818dff8a7", "average_hash": "f73fc1e1fd8ffc1ffe4c3e7d9e3d1e1c2e0c2c7c247c207c20780820003c201e", "name": "Humility", "phash": "6d5e33ebf0fff5ac1ef7aee12dc08e964b78097313510b9c534c4830538359a0", "mana": 1, "whash": "f79fc3e3fd8ffc3f7e7c3e7dde3d1e1c2e0c2c7c247c227c20780820003c000e", "card_id": "EX1_360"}, {"dhash": "03c08d07f406013b70eac7949d693f9378029384646dc99a3b25fe4a4097da3f", "average_hash": "ffff0ffe03de80184009618f018fc18fc1b48185c184a188b983f9838186e18f", "name": "Shadow Madness", "phash": "7957ad8867202532d9d6d3cd5bd8a332234b32e2db89389473e158f9dbac1c62", "mana": 4, "whash": "ffff0ffe03de83184009418f118fc18fc1b5818fa185e589fd83f9838186f18f", "card_id": "EX1_334"}, {"dhash": "889959f7a0cccf18df673e1d60d1c9b2bf4550cb0006955f7f3bfcb881672ec0", "average_hash": "07004300411e007c00781e783e601248927f987c104800f8007fe07fe0ffe47f", "name": "Kill Command", "phash": "adea7e0579defe25d31086680649390524b9526a875229b1d3f35a0d7d46dbf2", "mana": 3, "whash": "07004306411e407c00783e783e683648b27fda7f10e810f8b0fff0ffe0fff4ff", "card_id": "EX1_539"}, {"dhash": "f8dfccf311a68b4c36994922a2c44f2996726b85fc4a3097c3370f6f788cfd38", "average_hash": "c707fb0ff91ff83ff8266c06ac018c0304009c10841f0437181f081c001cd11c", "name": "Light's Champion", "phash": "4b3ef34469819de136639e9a363426269cc63c3927a1fcec6d8e466ba6a3ccd0", "mana": 3, "whash": "e717fb9ff99ff83ffc36ec06bc058c434402dc11ac1f04371c3f283c201cf11c", "card_id": "AT_106"}, {"dhash": "e891cf6f80fd38e07392e5248b111c633f867dcc9090a0894333efe7818f031c", "average_hash": "37c453e0f1c0f0c7d8c7c386038603803380139e038f018f809fe1cf830fc30f", "name": "Darkshire Alchemist", "phash": "dde7e7e21bdaa459fe1804309b04c1b40018eb3131747b347f1f1e31ff8c1ad5", "mana": 5, "whash": "37c553e5f1c0f8cfd0cfdb8603860380339c939f038f819f80dfe1cf830fc30f", "card_id": "OG_234"}, {"dhash": "58f3ace441c8cb90340353bca7604b59a8e2e0659dc918960130f470fb87e72f", "average_hash": "c71feb9fe3bff03ffc1f7c0c7c4e7c433c219c305c301c200c000000c103e107", "name": "Ogre Brute", "phash": "9b5f69a13911a6819bc8c6486f8624e293b4d9e46e62d6d9e6e9b1e6d9c01bca", "mana": 3, "whash": "ef1fefbff3bff4bffc3f7c0c7d4f7d433d611c305d301c200c000000c103e107", "card_id": "GVG_065"}, {"dhash": "e8742cd211244b08b6106cd1b88a7037e36ec69d142a81c51ebb3d62db808721", "average_hash": "d79fefbfebbff5be74383038037d017c03788178013c01580050000001800120", "name": "Nerubian Egg", "phash": "2df569c55b4e5b2b9f934c98469022a024c259929f83fe81ff03ff01bf099ea6", "mana": 2, "whash": "ffffefbffbbff7be74fc35fc07fd03fc03780178017801d80050059005800320", "card_id": "FP1_007"}, {"dhash": "68f3cdcc929105df3b0476029c4c7e9195726284e0cb03173930c26a4c82113d", "average_hash": "c70df35c795f385ff81ffc1f9c36c83e8c1e9c1f001e181898019801d8003108", "name": "Leeroy Jenkins", "phash": "4be3692af6c13ec93e5216c66bb013134c488e939e99d6c9799c13cf2da798f0", "mana": 5, "whash": "c71dfb5ef95f385ff81ffc9f9c3ecc3e8c1e9c1f901e1819d801d801d8007108", "card_id": "EX1_116"}, {"dhash": "02c00d1ff4ef09fefffd7b30c6003c01c042309eeb3cd1db80b20d4f1b9afe35", "average_hash": "ffff0ff003c2f07fe07ff5fff1df61bf01ee018c008c009e008c000c012ce1ff", "name": "Mind Vision", "phash": "fddfb728b6b45ed9e777699653a806daa1706b1553c15b6053525a9459a11428", "mana": 1, "whash": "ffff0bf001c2e0fff07ff9fff1ff61bf01ee018c008c00de008c008c019ce1df", "card_id": "CS2_003"}, {"dhash": "e0009dfef0f9e3a3cf4d9e33353f4af8d0ecf6d9b92317c68f0c93992f3f5c60", "average_hash": "0700010001000018003c207e0e7e0e7e1e7f3e7f3e7ffe7ffe7fde761e7e3e7e", "name": "Feral Spirit", "phash": "8d00fc7d7677f3818e201706ed1829812a4d0fe3393b3b57e2864f393ef4ebc0", "mana": 3, "whash": "0700000001000010003c207e0e7e0e7e1e7e3e7f3e7ffe7ffe7fde761e7e3e7e", "card_id": "EX1_248"}, {"dhash": "9ce02dc362e6fdfcf311ef23da43bc87600ec9dea231014a26b69fe43e08f135", "average_hash": "c70fe37c61d0e0d8e0fe43be03fe03be03be03be03be01b000b038d03b007a00", "name": "Coldarra Drake", "phash": "6d84390eb28a9c59cb438cb35bc249a710d043f379f496d1ebfd9ac5d381699c", "mana": 6, "whash": "c75fe3fc63d0e0fce0ff43fe43fe01be03be03be03be01b000f038d07b10fb00", "card_id": "AT_008"}, {"dhash": "d286384f21be5a76bbe4548929915332a7644ccd9882331d6eb8f862e397c57f", "average_hash": "efa0e7a877e0b451b051b651bb51bb539b5099501950385060004080c521c3ff", "name": "Summoning Stone", "phash": "b76db7397d5ee6c73d275658adb11ad3012116b40d8226f01da08ed415e46ece", "mana": 5, "whash": "efa8f7a877f0f471bcd1bf51bbd1bb539b529b501950385065804000c521cbff", "card_id": "LOE_086"}, {"dhash": "6aff855002c98c917b4fe7f80891f16223c542ce2f38fb4084b9d9f2b64f0312", "average_hash": "f71fff7ff96ff87d3cdd7efc3f791af93ef023903388330c00c400c2009b0400", "name": "Ancient of Lore", "phash": "e7b9ebbbbbe6fbf2af65cfe46f90048d0090b3c13e1c6b04c6e244609f023251", "mana": 7, "whash": "f71ffb7ffb4ff87d3cfd3efc3ff91ef93eb02a903388310c20c400c200990000", "card_id": "NEW1_008"}, {"dhash": "4cf90dc003084534f849a2e55c5f81bf3d63418ee614c90b1a3612e4344c67b8", "average_hash": "f71ff31fe15e407dc0c943c143f118d03d9c3980398c10bc10fc0858001c060c", "name": "Blade of C'Thun", "phash": "69a3fb2a7b7ecb73db60b9c47bd0128d01d1b2c89cec4d057e2b56c4acc11998", "mana": 9, "whash": "f71ffb7ff35fc07d60e943c142f15ad03f9c3f883f8c38bc10fc0878021c060c", "card_id": "OG_282"}, {"dhash": "fc3fdcff80bf156627d86ef2dfc8b9337341e482db076f1f982f605400a90170", "average_hash": "0700c30df10ff81ff81ffc1ffc1ff81ff01bf01bf013f03fe01fe01bc0030000", "name": "Rallying Blade", "phash": "636d5f005e127ed01e011d978654946825a5822603297e96b7b4ee39cffaf6f5", "mana": 3, "whash": "0700c20ff10ff81ff81ffc1ff81ff81ff81bf01bf01bf83fe81fe00be003c000", "card_id": "OG_222"}, {"dhash": "8ae684c941928b21364e48b9b3d473b99b0266d588ab714487392c77528c8139", "average_hash": "f79fffbf7bb9f83b1c311c302450e472984dc0398031100e3804200001180110", "name": "Elven Archer", "phash": "e3f9b3cd6f9c7bf23f6946b23cf00989cb02342c2c044fc3f610ad046d29d69b", "mana": 1, "whash": "ff9fffbf7bb9fcb93c313c302d58e573994dc95b8571114e3804308429980118", "card_id": "CS2_189"}, {"dhash": "0af0a5f1024485bb7ec72d8c18197132606ef29de4231949363745ec8c4cb131", "average_hash": "e70c215c395e083eb8371c37983b9039903cb03c903d3019a09d182918198011", "name": "Gahz'rilla", "phash": "e9b1cbe65e9e6f7a3e4393cd93f0d2d232722e833912462619847920d37039b7", "mana": 7, "whash": "ef1e335c3b5e087eb8371eb7983bd939903cb13c90bd301df0bf38b918198019", "card_id": "GVG_049"}, {"dhash": "0ac6ed7fb0c96396462899e232006511e8828b7c9ab8a75338a71142e6a6f81d", "average_hash": "073e83f1fb8df83df63dfb6cfb67cc230030c032101bf018c01c88088c0ce00f", "name": "Poison Seeds", "phash": "2b53b7805f7bbc6d9cde170a6b802c3429f00ef75967e59243c01b8a7319de8c", "mana": 4, "whash": "e73f87f1fb9dff3cf63dfb7cff67cc660470c0301019f01c801d80088c04f40f", "card_id": "FP1_019"}, {"dhash": "38e26cccd1970b7f34c05919b3c66a1df772c8c5a08bdf17b73b6e667c8ce13c", "average_hash": "871d0b9c1b98181a8c0700070022841b0c3b9c3c183c183c181df81ff80df91c", "name": "Darkspeaker", "phash": "4b4f1c4063830f937b301898f3d0930c33a63660c7a6ff6c9ccd1bd9f366ce4e", "mana": 5, "whash": "8f1d0fbc1bb819ba8c3b04078062c47b0c7b9c3c1c3c1c3cbc1df81ff91ff91c", "card_id": "OG_102"}, {"dhash": "8ad18c6391df0ace34984f33bf6c6799fc32f965e2c9cc972136836f0e9e1938", "average_hash": "f706f106790c780e7806603f0c3f4c1fcc1f8c3fcc3fcc3f081f081e001c000c", "name": "Hungry Dragon", "phash": "4997f3d0f6b03bb77eeb386cb070c824536209a32c5c38e176436e1a3b196f93", "mana": 4, "whash": "f716f986798c782e7826603f4c3fcc1fcc1fcc3fcc3fcc3f083f083f001e001c", "card_id": "BRM_026"}, {"dhash": "e8f48cc1c18b0b5f36b64c7cb3c04685b532cb65068a9d156a338e60588e2139", "average_hash": "f71f3b9f3b9c383cb83efc35f826f006c012c8301c003c081802380238191118", "name": "Salty Dog", "phash": "e3676994cf995f483880934013c399240c61471836e6e7fddea63ce7f9060fe7", "mana": 5, "whash": "ef1f7bbf7bbc38bcbc3ffc37fc76f856c85ac830dc003c081808380239191918", "card_id": "GVG_070"}, {"dhash": "da9ca5796427789af167c01a81950b4b3ab6e44cce719d890332c9e4d9893317", "average_hash": "0ff025f121d804ec83ee9fc33fc02bc00bf80bf983b8c1f860d821d023002700", "name": "Shadowbomber", "phash": "fd3dddfaaef82dcdff98922c3b418cc100425e3246902a44de61ce4c6dac6e71", "mana": 1, "whash": "0ff02ff123fc04ec87ebbfc3bfc02bc00bf90bf983f8c7f860d821d033903710", "card_id": "GVG_009"}, {"dhash": "08e26de492c10906ff7eaee558d3b187621dc55a80f5758a29bf06eea10d4f9a", "average_hash": "f71de31d095e083c203850389038c0bc40bc00be003a003e803f003c001e001f", "name": "Warsong Commander", "phash": "2de5779b5b3b9b1a3b503361866064f8095ace728af3cfc6e7f0d9a1843834c1", "mana": 3, "whash": "f71fcb7d1bdf08bc2078727890b8e1fd43be01be02be083e80ff90fe023e801e", "card_id": "EX1_084"}, {"dhash": "e8cccc1351ec8add37034f0cb8f06065980a027526cadf167637e96cc8990136", "average_hash": "771b7b82f982f80efc1f7c3e3c3ff027f004200018007808b819b813b0039107", "name": "Bomb Lobber", "phash": "c3376fc69f0576c8fee089990cc908ac490a9be32c30636e3ea3fb9cd3c22c67", "mana": 5, "whash": "771b7ba2fba2f80efc3f7c3f7c3ff867f04460001c007c08b839b813b9079117", "card_id": "GVG_099"}, {"dhash": "c087fd6940dc03fd9dfb7fc6ff16fb69c6f3dee9b1f304276a4e181e303068a3", "average_hash": "0700030001008821c417e00ff05fb87ffc3ff83ffe7ffe7afe7f6e7e72327033", "name": "Bane of Doom", "phash": "a9045efb7e400f22d0197f2832a4381c0be7ad47c91316694b7ade9c6ee5f3e1", "mana": 5, "whash": "0700010001008021c017e00ff01fb83ffc3ff83ffc7ffe7afe7f7e7e623a7033", "card_id": "EX1_320"}, {"dhash": "98dc6db2d2c909377ffd3c9efa28a75b0cb73142fbe405885a38edf0c20785bd", "average_hash": "07174377c3f638f83cf98cb77ef73be70d8f4f8de686fc02ecc1a8c1aa01041c", "name": "Frothing Berserker", "phash": "9f0eed863e23df66dfc4939829e00619c4e9e6e106b31797597639e190a571a4", "mana": 3, "whash": "07174b7fc3d628bcbc7b8c777cb729e70d8f4d8de686fc02fcc1a8c1aa01001c", "card_id": "EX1_604"}, {"dhash": "48f8cdd112a24d22bbc574db83b07d65b2dac4e4df0bb9d76236c46cc9993327", "average_hash": "f70fb37f79de305f805ba497b40ff03f70183838bc1aa01ba01bb01120032101", "name": "Sneed's Old Shredder", "phash": "cb79cbc2ff21e346f39219301a3624f138c8dbc1461a5eec67a7178fb82d5648", "mana": 8, "whash": "ff1ffb7f7bde305f945ba497b40ff03f7018383abc1aa01ba01bb03331032101", "card_id": "GVG_114"}, {"dhash": "c8839d2f60d081e1271f5e34b049509ea17802c786e08d815b00a68f4914fe3f", "average_hash": "ff7ff7e5f38fec03bc0b5d72d97843181e501cc01e009f009f0002000000fc0f", "name": "Demonheart", "phash": "0747fb0cfb181eb06da2240787678c210b3c3c0c43c727dcfbf1feb14b0b1f27", "mana": 5, "whash": "fffff7e7f38fef07fe0b7c6adb7843589f401cc01f809f009e0002000208fc1f", "card_id": "GVG_019"}, {"dhash": "38c2ec3c91e90a03375268b9b0e675c1e69a39354d2a8f8516326564c68fa539", "average_hash": "87978fb19ba7f9bc7c3c1c200c3065306438601c300e1c1f8c11c019811d011a", "name": "Mad Scientist", "phash": "6dffbfd31b235b293eac96b5cb40694889a449604b02db40df246f0ef6c496e4", "mana": 2, "whash": "8f978fb1dba7fcbd7c3c1c200d706d70657a615c314e1d5f8c31c499c11d411e", "card_id": "FP1_004"}, {"dhash": "fa8fcd1e92f1bc3779eee3ccc1b1930324060b5e5730dc4830b3d1e48b013732", "average_hash": "27707fe0fbe3f8c3fcc1ffc1ffc1ffc193c083c0c380c3c080c001c003000700", "name": "Goblin Blastmage", "phash": "f729c7cbacf87c76ee3447bd2e10048c00d84b980cb13a01abf7cad59f21deb4", "mana": 4, "whash": "2ff07fe0fbe3fcebfec1ffc1ffc1ffc1dfc0c3c0c3c0e3c0c0c081c003800700", "card_id": "GVG_004"}, {"dhash": "68f8ad00527f44f8b8e073d3878e4f1fbeae7f1cd8eab14566bb88763380cf21", "average_hash": "8f16277203e08141c0c3c0c7c04fc14fc01fe00fe00fb01b601b601a41000100", "name": "Loatheb", "phash": "f96c9fb2e614db215ed249cc1b3322c68b3132e28ba5a7b1d616da69e702cd2c", "mana": 5, "whash": "cfd66f7a2be0c761c4e3c4efc15fc14fe15fe10fe14fb15b705b649a41800120", "card_id": "FP1_030"}, {"dhash": "02808d1ff4fff1f4efcf43141f003c67418c82f00dcd171a6cb6396f60f4c88d", "average_hash": "ffff09f0c1c3e007e007ff8f7fbe37fc07f807b00cb078906080e08cc1c8e18f", "name": "Inner Fire", "phash": "75dd9fa071efa4fd2d5363fec37c1b52e11029035bc453c45b597860d3031c8a", "mana": 1, "whash": "ffff09f0c1c3e007f0077f8f7fbe3ffc07f007f005b078906080408cc1ccf18f", "card_id": "CS1_129"}, {"dhash": "c8e0ad01425b05b47a248cce13996d339a6265cdca8284890d331be2324c7798", "average_hash": "370f337b3b603875b0e59ba03f6018481f1c1d1c5d9e5c9c4cdc284c20183618", "name": "Edwin VanCleef", "phash": "4f0bbbb0fc3c4fcfee611d0e1be1193901911bc8ef9c5ecc5ec33445c23259a2", "mana": 3, "whash": "3f1fbf7a3be07875b4659fe03e601e683e1c1f1c5f9e5c9c4cdc284822183618", "card_id": "EX1_613"}, {"dhash": "9cc6ec6901de4a3035c25281a71e5e79f8e28dc5238bcf17b737fe6dfc9b1134", "average_hash": "071803807389f00dc807d003c0033c063c3c3826182cdc3c703ff01ff0077106", "name": "Frost Elemental", "phash": "cbdd3e087f106605e62438201ee689a30c327973ecd863fbe6c9d36cb34db949", "mana": 6, "whash": "0f180fa073abb08dcc0fd007c0477c463c7c3c36383cfc3c783ff03ff11f7107", "card_id": "EX1_283"}, {"dhash": "03808d1fe0ff197727f84f139d2c7a21e0430cb5d0ec2f08f8d30166438dfc3b", "average_hash": "ffff21f01380dc007801e01fe91f3d7f35f925d9243afe0ee28f0084010ae83f", "name": "Divine Spirit", "phash": "537da78e56c36de0ad3f599629540ca843c30e5759875be119f1fb08d2ba1c68", "mana": 2, "whash": "ffff09f01180dc007801f00fe93fadff25f921d824bafe8ee28f008401caf8bf", "card_id": "CS2_236"}, {"dhash": "eaf04c6391a3023f370c4fdab0246dc9be32f345e0cb029715368569fc930924", "average_hash": "f70f7b8f7b0e380eb83fbc3d7c2d7c0d6c06d83dcc1d6c103830182018001900", "name": "Blackwater Pirate", "phash": "c3dffb61fe617f8e1bce253839b191e0122c1e4aa15173646c6499c19b83db58", "mana": 4, "whash": "f71f799ff98e380eb83ffc3dfc3d7c4dec0fdc3dcc1d6c103c30182418021900", "card_id": "OG_322"}, {"dhash": "fcc9ec3f417fcaf0b5e053b9ad825145aea27c156c2a9314efb180634c87d93c", "average_hash": "07910fb063a0e4a3e44bf40df45771536855700f100f4007f00790069106991c", "name": "Blackwing Technician", "phash": "fbfdb7048e33de8c6f8513a00dc98828238c9b6963727bde8e99f36866223362", "mana": 3, "whash": "0f910fb06ba1f4a3f40bf41ff55ff1536955714f314f414ff007b0869186991c", "card_id": "BRM_033"}, {"dhash": "b2b16dce82383de35284a59e5b79b0e24aef94de652483580eb338a0434ee6ff", "average_hash": "0f6f896cd9dad8d39bf30bf00bfe1bb203bd03ac338c30c020c0c000060084ff", "name": "Pit Snake", "phash": "5d858fa73bfaf25baddeeebccdb4f69091dc82c489a60ef0312186241964cb48", "mana": 1, "whash": "0f6f8b6dde7adcf7aff72bf00bfe1bf203fd038c338c309860c0c200060086ff", "card_id": "LOE_010"}, {"dhash": "b61bf27d6cdaf90072ffc7dc8d0113330c4e40ecbbc1f71bfd71e0c3098ee37f", "average_hash": "01c041c5e0cdb09b0380e38ffb9b9b9303830381cb81f8abf88ff1872303831d", "name": "Priest of the Feast", "phash": "db0d5cbe64ba9487bea09307be47de770527ae6d61796148590b46969c863293", "mana": 4, "whash": "01e041c5e0cfb09f8381e38fffdb9bd783830381cb81f8abf88ff1872303871f", "card_id": "KAR_035"}, {"dhash": "00800d3ef0fce19987815fe7bfdc1e21740eb0f9e0774f4410982f74e680f8af", "average_hash": "ffff03e083808006861b041e0c1f8c1f801f830f266ec67fc401fc007c00f80f", "name": "Counterspell", "phash": "59725c24b685b792c94d07bd4d90224dbb818b444b2e7ab669b23ed9334bb6ec", "mana": 3, "whash": "ffff03f08380810f861f051e0c5f8e1fc01f838f36eec67fc641fc40fc00f88f", "card_id": "EX1_287"}, {"dhash": "caa78d4c121bbc3c70a0e3cccd73bbef40c6fe0ee991424b04b728e619083730", "average_hash": "f701f369f1c9a0c0e0c0038033da31bff9bbfb9f73bf70bc20bc00dc021c020e", "name": "Ethereal Arcanist", "phash": "6d19931b3ebae9c59f646ce45e86262782f4c9a0446c6e4b8e9c69d4ffa14e05", "mana": 4, "whash": "f701f169f9c9f8c1a0c00188339e31bfbfbbfbbf73bf70bc20bc00fc021c020e", "card_id": "EX1_274"}, {"dhash": "2ae24484d1098af434315143b69e77fdec5ad185082bc347af377a668c883933", "average_hash": "d79fdf9fcbb7ec01ec058400846004662436283e00372038303c181811103112", "name": "War Golem", "phash": "addf1b9b7b29e9c199f41ccb5a684b29c19092586b483e345ba4cd862d0edbb2", "mana": 7, "whash": "df9fdfbfcbbfedb1ec0584048564056625772d7e217f3178183c189811b03116", "card_id": "CS2_186"}, {"dhash": "ca630cce11bc0bb437484e91b86e7019e06ac89583691ed331374d6cba98153c", "average_hash": "f70ff118f918f839603d003f003f883f003800380038003c003f003fa01f201e", "name": "Lost Tallstrider", "phash": "29fdf3c1732ae661f62699d86cd888d809933336ccec9e61668c9b25ec80fec8", "mana": 4, "whash": "f71ffb99fb99f83df43d403f003fc87f1c3a1438043c043c003f203fa01fe11f", "card_id": "GVG_071"}, {"dhash": "289d8d32024b3cb579f2e3dcc6f986430986123ea7504f897432c8e49005433b", "average_hash": "f773fbe2f9e078c078c09fc03fc473c67bc473847b847bd038d101d103004700", "name": "Servant of Yogg-Saron", "phash": "d782d352386cfbb4fb50092d2e428089a3c872bc1c9c3e47abf3faf577281a15", "mana": 5, "whash": "ff53fbe3fbe078c03ec01fc03fc47fc57bc47b847f847fd038d101d103004700", "card_id": "OG_087"}, {"dhash": "c8b7ad5f12a23c0473ff807ec7bc1af932c3672c8dd01088253148e3f0842b27", "average_hash": "3760f3e8f9ce70c330d03bc0ffc1ffda7fd83f983b9019c138c008c003000700", "name": "Dragon Consort", "phash": "d70de122bcd26dc92f61164bbe912c9000c99aa4dbbc9b4cdfc7ea6568733345", "mana": 5, "whash": "3770fbf8fbce70c638e03fe0ffc3ffda7f98bf98fb901d8138c008c003001700", "card_id": "BRM_018"}, {"dhash": "529e203ce5f85ce5b9ec5619ad035ad7a7341fccf622efdd003b0054e3817d3f", "average_hash": "8da181e1c440444362c3b7c333d313d8cb87cb47f3dff05ffc5901c00503cb2f", "name": "Runic Egg", "phash": "9bef16dbbe5f2d37be17e639dbb196dc2989ccec25cc92cc64b0068264244640", "mana": 1, "whash": "8da183e1c5c0444374c3f74373d31359cb47cb07fb1ff85ffc5d04400583c33f", "card_id": "KAR_029"}, {"dhash": "b898ad2142d6c889f9e0a21763af8e383576674fe8c4ce891d3338e6624c8f31", "average_hash": "07378f6703ee008f80cc42c742c3abc1bd08df981fdc5ccc4cfcc0f8c218861d", "name": "Giant Sand Worm", "phash": "5d8bdf982d94ef07bf9198a48f80216382f93e8993ecea64ee34770366b36422", "mana": 8, "whash": "0f178f6f03ec00af84cc46c746c3abc1bdc9df981f9c5cdc4cfcc0f8c298861d", "card_id": "OG_308"}, {"dhash": "fa9f5dbfb37247bd38e010807966c4ccf9f3fa2f07530c84700c9b986678deaf", "average_hash": "0700c307f323f073f07be00fc066c036607e307fd87dcc120c1748068e00fc0b", "name": "Mortal Strike", "phash": "b926f9d1be4dce2e4eb25e260b7926d925f8a4b6cdd01e58634b3a21c3249da4", "mana": 4, "whash": "0700c307f333f173f47bf00fc066e076607e387fd87fce3a2e1f48068e00fc0f", "card_id": "EX1_408"}, {"dhash": "a8d8cca3018d0b1ab73d4e83bcfc7951e3b2c84513c94796fe37fa6eec9f393a", "average_hash": "4703438431003008201864380c38f839fc317c307c203c20783cf83bf01ff91f", "name": "Cult Apothecary", "phash": "e35d7e72d9a096863b810d8c4e523924921399616686dff3b3b86bb9bc291c9e", "mana": 5, "whash": "77177b867184b8182038f43c0c38f879fc317c307c203c207c3ef83ff81ff91f", "card_id": "OG_295"}, {"dhash": "0a80dd1ef03ce2db1df0b3cd6a9b503c9fd380a367061e444108a7184a20fc8f", "average_hash": "773e07e003818103cc61c201f211f851bc493e607e60de610e000e000400f00f", "name": "Savagery", "phash": "977bf9f17ccc87fee7fb3ea129c4073e13633204cb949a585b81668063c8149a", "mana": 1, "whash": "ffff07e003818303ce61e243f2d1fad1bced3ee07ff0fe639e000f400640f80f", "card_id": "EX1_578"}, {"dhash": "68b3cd4c92933da77b6fe71accbd917302a61c6c39006309b2b08de30e025b2c", "average_hash": "e70fb1ee3bdb38df38dd3bbf3ffc7df17b817b803f803d8000c020c433001300", "name": "Cult Sorcerer", "phash": "d709a9cb3872d776bfc4193e2f15528600f08f1d5c0ceb019b9ffec47f205e01", "mana": 2, "whash": "e71fb9ee1bdf38dd38ff3dff3fbe7da17f817b803f803d8000c000c433001300", "card_id": "OG_303"}, {"dhash": "123a10f4a6ac290d53daaeb1de25351a7c60b082e145e48bc11b4a509084857f", "average_hash": "c107810fc00fc01fe01fe01fe01ff00ff80ffc0ffc07fc07f80bd801c0010000", "name": "Spirit Claws", "phash": "5b4f7968b4ef8732df13f299369c92cfcd87b2b40de8a064d1c672681369220d", "mana": 1, "whash": "e187c10fc01fc01fe01ff01fe01ff80ff80ffc0ffc07fc07fc0ffc01f801e900", "card_id": "KAR_063"}, {"dhash": "c26019d422a06d4ef20a85e358c1bf0636ad7a9e0c30b34dd8b370e0c107d2ff", "average_hash": "f74e7158101e009e00bf631cc01fc0bf829fc09fe10fe013e08ce001c007e07f", "name": "Jungle Moonkin", "phash": "79a1ed2de67ef37abb5b630ef3ac5a91097c9a66096b8a249e1a09d311a4062d", "mana": 4, "whash": "fd5e715c105e00de809f439dcabfc2bf829fc09fe30fe093e089e005d007e0ff", "card_id": "LOE_051"}, {"dhash": "0ab0fdffd04e23b9d4f01b133f602c25e6db1b25e6facc8489093a97732efedd", "average_hash": "07000708433ec12cc007c02f603f203d3036e814c84cec0c7e0c724c3e4cfe3f", "name": "Journey Below", "phash": "6bd6be7d4ff876afed39aecc13679620825126124b840f8cb9e137836318dc88", "mana": 1, "whash": "072007c0e33fc32fd02fc03f603fe03d303ee854f84cec0c7e4c724c7e4cfe3f", "card_id": "OG_072"}, {"dhash": "0af90cd65188cb12b7024e0fbc386151d2e20c25b0cb6297cf369b6f229e6538", "average_hash": "f71ff99fe39f803f803f143e183c3839181a3802182608360036403e601c611e", "name": "Amani Berserker", "phash": "e975fb797b26d68673c90dc68ee230c880980d9387306e20bb44f71c6f09ff31", "mana": 2, "whash": "ff1ffbbfa3bf80bf943f143e1c7e3879187838021c260c360836c03ee11ce11e", "card_id": "EX1_393"}, {"dhash": "0880991ef0e34115f6eb681eff36926924b449dbf6aecda1274dcc1b387f7afc", "average_hash": "072007c0030000000000080000102c12081212d61ede1f7f7e7f7e7ffe7ffe7f", "name": "Fan of Knives", "phash": "cd2436357bda5fc22f24831c07f31304231d49f0f7c09b171a7e3790f8a1f976", "mana": 3, "whash": "0f7e07e0038003000000080028122c16081a1ade1ffe1f7f7f7f7efffeffffff", "card_id": "EX1_129"}, {"dhash": "8af104cf119c0b0037304e46bebe796df21ac4e59449239306368d6c5a92ed3c", "average_hash": "f71ef91ff91df83ff83c043c003c203c24380438043820382038201020000800", "name": "Stormwind Champion", "phash": "697d63c3f3c2fbfc5eda4c920ca61626c148d30038a37b38be84264f678ef698", "mana": 7, "whash": "f71ef9bffbbff83ffc3e8c3c043d2c7d243b84380c3f243f203d203429041910", "card_id": "CS2_222"}, {"dhash": "c8dfcd6812c1048e7990b66a0c9554a82a16f1ec8d4782881cb979e2f146eb3f", "average_hash": "e707f367f94f784ffc4ee68a821d81158006c91ceb8de008e088e000e000f600", "name": "Princess Huhuran", "phash": "732a8f23f3c0f918bfe43c302f040cc646d446160fa35e67d7317761bb63d370", "mana": 5, "whash": "ef17fb6ffbcf784f7c4bee8ea61d891d8916891deb8de088e088e080f200f600", "card_id": "OG_309"}, {"dhash": "f8ef6cdcd1188be0b7534f1bbc3670cdf0fac1c586090dc73836796ce8988121", "average_hash": "07138399c99fc033c03f803f203f203f043e1c3cd83c203c00388019c001a100", "name": "Silent Knight", "phash": "293d992db6016667df24ed0866d29ac9ccccf38c87943be7c9c352ce694c3692", "mana": 3, "whash": "0713cbbbc9bfe83fc43fc43fa43f247f0c3edc3ef83ce03c2038c839e111e100", "card_id": "AT_095"}, {"dhash": "fe3fe8fd87d83d305b29bdd25284e548cb919663adc75e0fc31dfc1bf16400dc", "average_hash": "0100e103f007f81ff81fe835ec25ec210c350c250c35dc3bbc3cfc7ffc0ff00f", "name": "Maelstrom Portal", "phash": "819ef2917a69beff1e7cae57ae854cfa64e915ec44ea05e2e49e06ea818604a8", "mana": 2, "whash": "0100e103f047f81ff81fe837cc276c210c350c250c35dc3bbc3ff87ffc1ff01f", "card_id": "KAR_073"}, {"dhash": "8af08de112c3059e3943728c85704b45c402b91430ab62c7c3369f6c049c0d38", "average_hash": "771f717f3bdf387e9c4f98cf1847f003e023c026000600360016081608180118", "name": "Lorewalker Cho", "phash": "c9aec3da6d997eb4734893491cc9030e203cceb04ef23f84df183d399b26d666", "mana": 2, "whash": "7f1f7f7fbbff397e9c6f9ccf9d47f143e927c0260107007604360816093c0118", "card_id": "EX1_100"}, {"dhash": "eadffdff00ec1f107c449f9024234f0ef01880f119c043861989ff33c343061e", "average_hash": "0700c347fb3ffc7fa07f027f027f027f067e06700e000f600e40ce43e607e207", "name": "Power of the Wild", "phash": "1d08e9fd53f7067f7ad49c8acc813c3c2c7213bc27c1a7c4cb81d215fb097e21", "mana": 2, "whash": "0708e367ffbffd7fa07f027f027f027f067e06700f000f600e40ce47e607e20f", "card_id": "EX1_160"}, {"dhash": "4cd80ce0d1800b03378f4c26b07866f9fca2c5c58b4bb59608302968fe9e9933", "average_hash": "f71ffb8ff99e183d183c0c3c0c244c22783e3c3b7c387c182c00082008101111", "name": "Mogor's Champion", "phash": "cb5759027bb17360bb841e320b29a181606836ce9edb6ba97dd23c7d2d0b67a3", "mana": 6, "whash": "f71ffbbffbbf383f1c3c1c3e1c264c66787e3c3bfc387c183c00082089301911", "card_id": "AT_088"}, {"dhash": "48e68cd8d13d8b6236814e70bbbe676dc8d294a53acbc11603372e6f4c9cb939", "average_hash": "f719bb990998003f8c3e443e803fa03fbc3f3c37bc0f3c0c1800181c181c191c", "name": "Corrupted Healbot", "phash": "c95d3d246601c761fb903c803be60320939859d2be846edb7b8bd6ce6d8eb639", "mana": 5, "whash": "f719bb9b099a083f8c3f443ea03fa07fbc3f7c3fbc0f7c1c3c08181c181c191d", "card_id": "OG_147"}, {"dhash": "fa9f55ff70fc8f031dcc7621626c409c6299dc33316340c43199ef33597ef6df", "average_hash": "07008305c11fe47ffc7ffc7f644e7c007c332e370e650660060026012020f01f", "name": "Whirlwind", "phash": "0bfcf96f78739ef8c694666a0fb0831c266d3c1209d02c7de3c572215bda1e70", "mana": 1, "whash": "0700c307d11fed7ffc7ffc7f6e5c7c427c333e370e65066006402603b025f81f", "card_id": "EX1_400"}, {"dhash": "08ff0c3241e64accb72c4f59beb05c65b22aa75d4daaba4421bb4e769885213a", "average_hash": "f71fe19fe38ff00f703f701db01db01d701dd009c0092008000a100200020118", "name": "Beckoner of Evil", "phash": "69da7bb6de643bc32ff34fb926b182c060c81218b6c6d6406f093c43bf236e6c", "mana": 2, "whash": "f71ffbbfe3aff0aff43f7c3fbc7db85df05dd02fc00aa00e100e10060102011a", "card_id": "OG_281"}, {"dhash": "3aef3978c2c24cadb952702883504ca5326a615c83aaba4506bb7c72f987d32f", "average_hash": "ef53ef7befff78dc5cccdcc20d46214c111a10184110205830103008310ca107", "name": "N'Zoth, the Corruptor", "phash": "7f681bcabb183fa99b72967892b666e368fe669ee6b64c65371146006b801782", "mana": 10, "whash": "ef53ff7beffff5fc5cfcdcc22d4621ce11da11184950215830183088b18ca12f", "card_id": "OG_133"}, {"dhash": "5ce069c8922c05597a92ac6519497792402ec67ddcf3ba8b013613e6a44cff39", "average_hash": "f71feb79db5cd876c8f282b2c231c33a8130833003b900bd18be98980818661d", "name": "King Krush", "phash": "290773224b4b7f1b3b5919473b98926759d4598683b36ef647c7cd849398848f", "mana": 9, "whash": "ef1feb79fbfcd87ed87286b08271c37f8130833003b904bd18fc989c0a186e1d", "card_id": "EX1_543"}, {"dhash": "e8f3ccef119303b4351852b0b8d873e1ef9284656b8bbf14c13b376f689c813c", "average_hash": "171ffb9ff999f81c780ff819700038302c300c001c01d8031808b81cd00cc10c", "name": "Spider Tank", "phash": "e35bf3907b039fc79eb01ce19ecc4c1888e443f266d833e7af19d6c679224c18", "mana": 3, "whash": "171ffbbffbbff8bdf83f7c19fc5838702c780c221c01dc035c08b81cd10cd10c", "card_id": "GVG_044"}, {"dhash": "8ab9057004ca38967346e7788ec118737fc6c28c3110e6098d331ae7348e631c", "average_hash": "f70ff1cf79ce38cc388c398c399c1998b19e398f7986788e588e488c430c430c", "name": "Northshire Cleric", "phash": "4397d3f93bee91f4bb741d679f5010190449560cc19cea806e271e47b766e374", "mana": 1, "whash": "f70ff1cf79ce38cc388c398c399e9998b99e398f7986788e788e489c430e430c", "card_id": "CS2_235"}, {"dhash": "e29ff57fe0c99b889e991f352df246c8881321a7f24e34884b02ee7c181922bf", "average_hash": "05008141f90ffc3ffe7ffe7ffe7efe7e7c66fc707c784c1e3c133c0320014004", "name": "Ice Lance", "phash": "83fe7d93daddc67ff6da9fa02cec96984a56258c23654a06494336815ba15932", "mana": 1, "whash": "07000101f90ffc3ffe7ffe7ffe7efe767c6e3c627c784c1e1c123c1330010000", "card_id": "CS2_031"}, {"dhash": "8083ed3720cfe2b41d4318ba27c6522cedc8b2331dee6890dc26a3ccc091fb7f", "average_hash": "f7bfe3e4f986fc2dee653e63fe72be179625e469e063fc06e80288000003c007", "name": "Stand Against Darkness", "phash": "3312779afa476d371de18e25bdc83c890b986acc614cca71c3915a9c43737be4", "mana": 5, "whash": "f7bfe3e4f986fc6dee65be63fe73be179e27e468e063f806e800c8000002c007", "card_id": "OG_273"}, {"dhash": "c89f4d3692e4088d793ab2a4664d859818f6301f5334ac48d4b98be38743172e", "average_hash": "f730ff62dbe6ddce5ccc5ec94fc80fc4054433ced3c2e2c340c140c002860622", "name": "Forlorn Stalker", "phash": "57c6e3c3bf74f370ee703698238844c828b91c256ef99a193ec7d78c619b08ec", "mana": 3, "whash": "ff31ff63dbe6ffae5cce5ec14fcc0fc405c473ced3c2e1c340c244c402860622", "card_id": "OG_292"}, {"dhash": "6adcc43141c28a3c35fb5206bd5c72f38fe67dcdff0ad344a6b78862358de93a", "average_hash": "97039d87f98e680c6c09cc099c1218103803f017f80ff00f300f700e5802d902", "name": "Cogmaster", "phash": "c3df1f9ffff5dbf19ea59fe913692cc8341846022c924e826f03260736609ac1", "mana": 1, "whash": "9713fba7fbae688cec0bdc0d8c523852f843f81ff80ff00f701f701e590ad902", "card_id": "GVG_013"}, {"dhash": "6480fd1fe0ff01f9178e8e1c996164c2c9a6f11de6b38c4f43ba0c7c80b0fc07", "average_hash": "073803c07f80fc00fe1f7a3f78366064606c507c007c807a107c00740062e00f", "name": "Siphon Soul", "phash": "e9d647059e693ceaf41aad24090119921be9275663e7b6657b929e4c9179da95", "mana": 6, "whash": "1f7c17e07f80ff01fe1ffa3f783e7074706c507cc27e807e147e00740072f00f", "card_id": "EX1_309"}, {"dhash": "4af00ce111834b8237144c2bb94672a9c49a91f53d09734264b6cb6cbd9b8334", "average_hash": "f71ffb9f7b9fb03fb03e903e04360c3204300430043000330031003000100100", "name": "Silvermoon Guardian", "phash": "a9f4492b7b89b7d8963699a44b322693386893663c9cf8e01e932d2fd90cf68e", "mana": 4, "whash": "ff9fffbffbbff4bfbc3f9c3e2d760d7205700d7005702173003100b051b00110", "card_id": "EX1_023"}, {"dhash": "c8fe8cf501c08b8435594b86bc1c6a71f6f2cc6583490c941139e7769c81312e", "average_hash": "b70df31ff91fe81e680fec0f9c3f1c1f381f7c33dc318c008800180338033103", "name": "Gnomish Experimenter", "phash": "cb1f6918f23036b19fa51e63369e98987c7a8f732f27d9607392189badc88603", "mana": 3, "whash": "f71ffb9ff99fe81e682fec0f9c3f9c1f383f7c37fc308c008800180338033107", "card_id": "GVG_092"}, {"dhash": "2cdeec6411c38a8f357b6bd6b08c64f9c9729785280963d79e367f6cf68dd939", "average_hash": "e717fb871989580c78087c081c201c201c209c272c233034183838189819d91d", "name": "Deathlord", "phash": "879f5fda79a19db41b0366a47bc0230f64e29e8e3df36e73e6c88cc9c1806648", "mana": 3, "whash": "f717ffbf3bad788c7c0c7c283c601c601c609c373c2334341838383cf91df91f", "card_id": "FP1_009"}, {"dhash": "b8ff7dfec3c08f0b3c777088e0b04a65fdc3fa87d23f0f1c3a78d0f8a1e17ccf", "average_hash": "0700c30fdd3f5c7f987cb87db83934083006207eb07ef078707060620042e00f", "name": "Dark Wispers", "phash": "e97b0f131fc6fca05a38b65c240a440a66e02c6ced1c1a9dabd17a796173fda6", "mana": 6, "whash": "0700c35fdd3f5c7fd87eb87fb839b408305fa07eb07ef078f070e0630043e04f", "card_id": "GVG_041"}, {"dhash": "5a36b97d225b5cb4b17a63a106e34d4e3b1ce4dd9403fe1d1f39f660c097717f", "average_hash": "8741a3c1e043f0c1f081f00bf00fe00de00d9011f838f01cf808f803f107f17f", "name": "Elise Starseeker", "phash": "73db5e9ab73fae2df99a5a5a2d6596d5611848de85620ac851d826644cc6c2dc", "mana": 4, "whash": "a741e3c1f643f481f081f08bf00fe00fe00df013f879f85ef80af803f107f1ff", "card_id": "LOE_079"}, {"dhash": "0086bd3fe0f493e92f0347e41cc83b01f7222c05d248a2930527f54e0a18f436", "average_hash": "173c03e3e183cc07ec0fec1fc01fc01be03fe00ff01f681a3c18bc1c28043804", "name": "Arcane Intellect", "phash": "0b5f198ec610f4e0fce02f314bc60f12230f8ab46bc97ae45bb366ec5b6a78b2", "mana": 3, "whash": "77be83e3e183cc0fec1fec1fc01fe01fe03ff00ff01f781a7c18bc1c2804780e", "card_id": "CS2_023"}, {"dhash": "0a82fdddd0a7af1f1d5a32b87ef1e16cc45508b33ca07342e4c1190f3312ce2f", "average_hash": "070003003b1078787c787c78387c307f387fbc37be073e077e077e067e20fe0b", "name": "Astral Communion", "phash": "e788a52b7e479ab1bc706cd803fb2766094c25d6492f67d38b69d331637053a2", "mana": 4, "whash": "07000340391078787c787c78387c307f387fbe779e173e077e077e267e00fe0f", "card_id": "AT_043"}, {"dhash": "9cf429e1428ac537fb9934278e4c44b88332f66126c7c88899397763ce4e3338", "average_hash": "e71fe37d635c004c305e5e864c020c001c001d241cbc0c0c8ccc000c301c1018", "name": "Mal'Ganis", "phash": "4d9b7b9879381b9bb365194e8b8493411269d8b0b6ce436757a6e584ea9ac3e9", "mana": 9, "whash": "ef1fe77f63fc805d347ffec65c5f8e515c081f341dbc0c9c8ccc00cc309e1018", "card_id": "GVG_021"}, {"dhash": "9af92de2428ccde0fbc2a1154e39c0821b1726e969c46e0811ba8ee4934e6739", "average_hash": "071fc75fc35fe0ffc0ff12df101d7ae08c098f890d82008610c020c000080208", "name": "Pit Lord", "phash": "590ad9226d6c5f262e19c6e09bc4916811f246de06bb9991e76cc7c4bb233df3", "mana": 4, "whash": "af1f8f5fe37fe5ffc4bf16ff1add7aec8ac98f8b0f82948610c020c000b80608", "card_id": "EX1_313"}, {"dhash": "388f7c3aa0b41c79a2e6256ddb73a4077799ccf27905b1db4a26b755c8a7017c", "average_hash": "4780c78047400000000000068007c00062130023a003a037c437f81ff90ff91f", "name": "Assassin's Blade", "phash": "9b6436c27fc3b74c674813b6cc1080e9242c42990ca6d9386fa9765abff9db1d", "mana": 5, "whash": "47e04fe0cfc047c0818080869087e281e3bb8423f82fa837e437fd5ff90ffbbf", "card_id": "CS2_080"}, {"dhash": "8af80cb1114e0ac136044940b2366ef5b88ac135130b66469cb56d6ef49d193a", "average_hash": "f71ff39f7b9ff827b82738000000000c601c60183004300620002019101b111e", "name": "Burly Rockjaw Trogg", "phash": "e994db16fb6573ec7de81e78963021c949b218833659b64c6e9a4c866c86dea6", "mana": 4, "whash": "ff1fffbffbbff8a7bc372c258560005c607c703879343026201c7039591b111e", "card_id": "GVG_068"}, {"dhash": "4abc8de7429c7d30703be266c60988a301465abea4911f0b7132c2e41c00bf29", "average_hash": "371a13ecebdcf0dde0cfdbcfdbce3bfaa380039403bcf3b818d300d303002300", "name": "Wee Spellstopper", "phash": "fd01796a8ee8efb5fe5093677bd098b801e06c9c03e53201ae3dca347f805e1e", "mana": 4, "whash": "175a19ede9dcf0ffe0efcbcfdfcebbfaa380039403b4f1b818fb00d303002300", "card_id": "GVG_122"}, {"dhash": "8cf30dc712ca8db07b4fe6180c31d2623cc5608e815806817f3eeff5bc4b211f", "average_hash": "770ef35c795c601e089d388b3b123806308c2080200000007810781f781f781f", "name": "Grove Tender", "phash": "e38d7f326bcc7636cb6097a41e4009991876ba346e74e760c3b893e18f813e67", "mana": 3, "whash": "f71efb5ffb5fe01f18dd389f391a3896388c30802990681078117877781f781f", "card_id": "GVG_032"}, {"dhash": "5a1cbd31e2c15cf2f18ce7192de358a6130c7f9dfc2289dd16b913622796d57f", "average_hash": "876385c787c7b0c4b0c702d7e346e200e201d019b15fa04fc00fc003d100e3ff", "name": "Sir Finley Mrrgglton", "phash": "797b9fbc9dde197bb72d9edccda996f51118666981a4227019c86a5419124b1c", "mana": 1, "whash": "874387e7a647f4c1b087d397e35ee3c0e300d159b15fa04fc58fc007d100e3ff", "card_id": "LOE_076"}, {"dhash": "6ae1e4e2c18d4b3bb7624e87bc2c7995e26ad495a96b10972736006cb899e533", "average_hash": "071a8d9e03bc003c803f803f043f00380039203d603d0c396c3100384010c110", "name": "Stonetusk Boar", "phash": "2dfd5f7b7627ffdc7fe20fe9338020e086422c884b909e617f8073928d2133a6", "mana": 1, "whash": "8f9a8fbe8bbe85be843f843f8d7f09790179a17d617d0d797c3100b861b0c910", "card_id": "CS2_171"}, {"dhash": "3ade2ce0e198cb23b64ccc99b83271cfee3ed13d0eab904703b6286cc7801f30", "average_hash": "cfbfcfbf87bd05b910f9323103700171037d817cc17d11f8103800b005800320", "name": "Dark Iron Dwarf", "phash": "add99b995e0679467f9686a483c001c381e918686cd27cf23e9cc661e7637de6", "mana": 4, "whash": "cfffcfbf83bd07f994f9257103f101f9437d81fcc3fe11f8107805b005b00320", "card_id": "EX1_046"}, {"dhash": "03c0cdfd64fa1f353c497002f236e759ce33866f1cf73b8637048789fe930224", "average_hash": "739e01f060981c7c487f006700e300700878087d0c7f8e3fe63ff01ff80ffc07", "name": "Mass Dispel", "phash": "899ff4aa73612d41e677db001bcec9a34fd049f20c795bc95abd53167aa053a0", "mana": 4, "whash": "f59f01f07098147d107f086700f308700878087d0c7fce7fee3ffa1ff80ffc06", "card_id": "EX1_626"}, {"dhash": "0c80d9ffe49173e2ce6c9f5d3e837986f32e01591e27fcc330a00048008ff9bf", "average_hash": "071807c0021e003f027f827fe27fc07fc07fc07fc47fe03fe007e00000000000", "name": "Pyroblast", "phash": "39408d08461533fee52333302d9286eb89dfa6fec17d69df1bf15a854bd05ba0", "mana": 10, "whash": "779e07e0029e007f827fc27fe27fe07ff07fe07fe67fe43fe007e00100007006", "card_id": "EX1_279"}, {"dhash": "2af80de41288c9b4f76caa8f1c01c31314ef7d7c9f1121c8003021e2f24507ba", "average_hash": "f71ffb7ffbdfa0bd00bda29993f382c3018053907f93328000c0084000000600", "name": "Master of Evolution", "phash": "7d096b9b6bbaf3e6bbe692602bc40ccc10d41b2027fc70491d0f39612f8bae8f", "mana": 4, "whash": "ff1fff5ffbffa4bd24bda6bb9fbb82c301c0719a5fbf768014800c4002810600", "card_id": "OG_328"}, {"dhash": "0a80fd7fa0ed4183877c1d02f2cfcfffdb6ce5f318f37706b30c661f8c3ffa7c", "average_hash": "07000340010214060014c027c0628007f003fe217e203e219e05be3d1e7efe7f", "name": "Arcane Shot", "phash": "0bd2fcff4fb3e7c9796e1e92a7dc21b10bf1cc6033c813c4130d4ec16481772a", "mana": 1, "whash": "070803400b8e95071414c427c0638007fe43fe397e713e21bf0dbe7dbe7efe7f", "card_id": "DS1_185"}, {"dhash": "08fb2ce041ef0bf2b70c4a01b4925225a9e23645e98a144469b99472f185573f", "average_hash": "f71febbffbaff83ff43f741fb41cb0049801a8016001d0001002000000000100", "name": "Spawn of N'Zoth", "phash": "fbf379105f461e87be93e6602bc8138889e336626e6636666e86cce4cc613663", "mana": 3, "whash": "ff9fffbffbbffcbffc3f743fb57cb1549941d94df141d1401002200001000104", "card_id": "OG_256"}, {"dhash": "fa9fed3c92f308467918a6806ba395162f6e549fe9340e4910b8cfe3dd402326", "average_hash": "0710cb67bbe7f88f7ccf72c302c583c181138399639ea08400c100c060006604", "name": "Core Rager", "phash": "d389db0fee66bcf73ed33cb867a020cd00f649410cd19a31ef2c96418cc3dee4", "mana": 4, "whash": "0f10ff677be7f9af7cdf7ed702c783c381d79399a39be08480c120c172807604", "card_id": "BRM_014"}, {"dhash": "6af8c5e7938e0d7976b32c6c59d0f2027067e488cf05020b4c3ea6e45849733a", "average_hash": "f703f10cf90df818f830dc32f83bf03850382038603a303ed09a301830000018", "name": "Possessed Villager", "phash": "6165b3f3debaedd89ed099259f840e0c30e4f3103b8b8e991b899921ce9833fc", "mana": 1, "whash": "f717f90cf94df878f830fc3afc3bf8ba503830b870be703ef0beb01c300c5018", "card_id": "OG_241"}, {"dhash": "a8cf8d3703ee0ccc7932af895c23fc86700ff7786455d18bd9bbb3e7064f1f3c", "average_hash": "77007300f904784e600e029d003d00bc003c023d00bf003fc09fc01fc01ff01f", "name": "Doomguard", "phash": "6982e60c93266e62a64019a58e9019c144cc4b9e33e1be6c2ff37fb3ee9c53fc", "mana": 5, "whash": "77107b00fb4cf86e60ae82bf00bf02bc00be83bf88bf803fc0dfc03fc01ff81f", "card_id": "EX1_310"}, {"dhash": "808f8d7f10c00114374a4c9c98d037816f028904244d40f2a0e4a7e94b1afeaf", "average_hash": "fffffffff3dff81ff81bf81bf803f807e007c00080000000000004000700f48f", "name": "Solemn Vigil", "phash": "f355e9084d983ca613e686c1a4492e04294c0e314b676ed45bf85ef15bbf77b8", "mana": 5, "whash": "fffffffffbdffb3ff81ff81bf983f907e187c18380800000000002000380fd8f", "card_id": "BRM_001"}, {"dhash": "0082dd3f60f383c40f991f361cea788cf7c8eff0c0c381070327064efc14880d", "average_hash": "77bc03e0f1877c0f2c1ecc3e4e1c461a063fc63fee3f1c1e1c181c1cfc0df00f", "name": "Cabalist's Tome", "phash": "0b5cf70cf2d3bd393dc0268323260d508e6923dc4ac71373c9157beccf32db98", "mana": 5, "whash": "77be83e0f1877c0f2c1e4e1e4e1c461a063fc61fee3f1c1f1818181cfc0ff00f", "card_id": "OG_090"}, {"dhash": "28f60de0d3c985037b0ca4e3489cb133463f3cfe40c4d10823b147e3ac4e739d", "average_hash": "f71fdb7fcb7f887d38fc13f807f10bf103e303cf07c010c000c118400a08061c", "name": "Iron Sensei", "phash": "0dcb79936bd39b533b0096606fd0c6e881d98a9919dc7eccee2e49b54603d6e4", "mana": 3, "whash": "ff1fff7fcb7d907d3cfc17f807f11ef103a3078d07811de018c118481aa8061c", "card_id": "GVG_027"}, {"dhash": "0a84fdf9e0a1e3079e0f373262ccc04092b735ef46b289c53f1f637ccca3b14d", "average_hash": "071803403b9c1c7e1e7c0e7c067c007c007800489a48b639fe79f879b82ffa0f", "name": "Bite", "phash": "8d8b5e2cd351b6f5785e37b84f6080cf33f0c87321ee4a79c78573125ec0d306", "mana": 4, "whash": "071c03603b9c1d7e1e7c0e7c06fc007c007800689a68be79fef9fc79b86ffa0f", "card_id": "EX1_570"}, {"dhash": "00c00d1f707083c0bce7790df332e7654fcbbe936031e17202e404c9c19bbc37", "average_hash": "ffffcbf0e1d3e023e023b42334213422742ff60ff02fe02f302390218021e03f", "name": "Divine Favor", "phash": "f97037ae3c0759004fa886e149428b18631c3e73cbf11bd6d3d91a7d7b2358b9", "mana": 3, "whash": "ffffcbf0e1d7e123e023f42334213423740ff60ff22fe02fb02790218031f03f", "card_id": "EX1_349"}, {"dhash": "78ee6dcce2197d70fac3c5249fcd399b7336470d9e107c49b0b364e7c10f0b1f", "average_hash": "0700834101598011c013d317cb1fc93df939fb18f380c081808d80df801fc21f", "name": "Anomalus", "phash": "39a3be8c9cb866136f4f5c12d36482a910d1f69c66ba6e85d6eddac57f022c11", "mana": 8, "whash": "07018343015bc057c01fd19feb1fe93ffb39fb98f390c08580cdc0dfc21fc31f", "card_id": "OG_120"}, {"dhash": "caf38cc511830b0e373c4e58bcb07079e1f2c4e58fc91f172136166c2c985930", "average_hash": "771cf31d311c381c781cf81cbc3c3c3c3c3c3c38383878381830181010001000", "name": "Tournament Medic", "phash": "e31d6b38fe60bf9c7e9c5392334884e40cd614588c71cb81fe10dc66ff84b639", "mana": 4, "whash": "771ff99fb99fb81cf83cfc3cfc3c3c3c3c3c3c38fc387c3d183c183018101110", "card_id": "AT_091"}, {"dhash": "4cdccc2991530aa334e6517ca6f06d21fb02d245b2ab6c4318b7616c83990f27", "average_hash": "f707bb8739835801b8003c007c00fc019c2000300034803c8039c03bc017c107", "name": "Stoneskin Gargoyle", "phash": "a3d7d7c899412db49b2c6b396e26386d27c373e636c94ede9b6d32d361088205", "mana": 3, "whash": "ff97bfa7fba3f8a1fc013c007d40fd61dd70cd700134803c803dc03fc13fc10f", "card_id": "FP1_027"}, {"dhash": "0080cf1f14e0719ee72d4ee799dd3361ff82de4a2c2d90b965e601493286fd39", "average_hash": "fffffbf1f1833006b00de410dd009f21ff99cbbfd05d001d080f07060188e109", "name": "Shadow Word: Death", "phash": "73d5e320fc489d8b37070e6e29336918c909c8635bc672bc53f8f3415ba9333b", "mana": 3, "whash": "fffffbf0f1833006b00fe710dd01bf81ffd9dbbfd05d00150a0f07860188e189", "card_id": "EX1_622"}, {"dhash": "eadfad21d30f8cff786a22d44588cb1010f721e9e705dc0b00ba7ce0c1472734", "average_hash": "071067203b607840fcc1fe83fc01fcc11c007b00fcb8fc3c00d0004000010000", "name": "Fel Cannon", "phash": "a78ff786fcf46c709e4013e92395d6e00ce3216386e08c683f4777006b1af7dc", "mana": 4, "whash": "0f106f707b607ce0fce1fee3ffe1fec91ec0ff80ffb8fcbc30f040c000810605", "card_id": "GVG_020"}, {"dhash": "889fddff00e823d95fe0bb003f817c0edb18bc63fec3f887319e69b9e17202ec", "average_hash": "07000306c10fe01ff01fe04f805f807f807f807fc07fc07fc07fe07fe03ff02f", "name": "Mulch", "phash": "3902ce2166f41e3fce18368886472398ac7129744ec51bfd734f73324de353fa", "mana": 3, "whash": "07000307c10fe01ff03fe04f807f807f807f807fc07fc07fc07fe07fe03ff02f", "card_id": "AT_044"}, {"dhash": "fabc2c6ae0cd54ff22bf2f3adfe4aee9fc10ef339cefb19b0327365f5ca8717b", "average_hash": "07800308030400046003f839f81eec1e6c2e6437843b1c1b1c38183c381e701b", "name": "Fiery War Axe", "phash": "cb3d7ed2fec39f5d7314469391e9a4b406ce98b109e266681b90ae4c9f627710", "mana": 2, "whash": "0781078f03068107f033f83bfc3ee81fee7e647f843b1c3b1c3c183c381ef91f", "card_id": "CS2_106"}, {"dhash": "a8815d7eb0dd81a4374b5784ec1ac974beed7563e9c6c221076718dc7e38f167", "average_hash": "07000740d304f90f7c0d1c0f0c0936437e007e403e8838143838003c3270f27f", "name": "Shadow Strike", "phash": "c7891eebfd147604bfe00c7c031e258cdbc08c781b65e78719c7d63839f236a6", "mana": 3, "whash": "072cc7c0d305fb0d7c0f1c0f1c4f36437e407e487e4c3e143a3c287c3a7cf27f", "card_id": "OG_176"}, {"dhash": "0a00cdfff130e7473ca5793ef259cc949b2167c7d80de370d00c8c3b1ceef09b", "average_hash": "070003000338087a0c7c0c6ed86ed06cd84ffc3ff83ff81e3e063c463818f81f", "name": "Eviscerate", "phash": "c9e3bccdfe3d5bd172cb5e32033d8d9c6b06a56299c1498559102e3179e09f4c", "mana": 2, "whash": "070003001338097e047c0c7f586fd06cdc5ffc3ffc3ffc1e3e063c4e3c1ef81f", "card_id": "EX1_124"}, {"dhash": "e8fdec83110d4bd8b421574fac9443299952389da16a4bc7a6b6c96c1298a530", "average_hash": "0799cfbffbbdf0b1c43c841c0d580d4f014f000621042031203128380138011c", "name": "Bloodfen Raptor", "phash": "6dbf7b63378ddf944e4a0ccd0ca008a692d0b6a4e7a0a6c97e13df10be02767a", "mana": 2, "whash": "0f99cfbffbbdf5b5e4bc8d5c0dd80dcf034f014f2144217160712cb829bc033c", "card_id": "CS2_172"}, {"dhash": "1a90e520844b38f17166e3c98403190b307631ec6f10fd09da3324e11d82bb1c", "average_hash": "e7ef85e791c7b8c19ac193c1b3c103c003800b801b8011c0a0c021c113007700", "name": "Twilight Whelp", "phash": "d7b399fa89de5d3effde59b0ebc008ca0004731823192a60ffc30ec3be0b3a51", "mana": 1, "whash": "efffefe79be7bce39fc1b3c1b7c103c003c00bc03bc03bc0a0c4b1c573807f00", "card_id": "BRM_004"}, {"dhash": "00800f1ff0b8e1653efb5500b81405611adaa464f30d0fdb1a3433e22198fc27", "average_hash": "ffff0ff083d1891b6e57ff97bdc3bd8b31093d081c3e8028c00050004000e107", "name": "Muster for Battle", "phash": "37559d883ec66c32cf1933724906628c695a09c5db693a515be5daac73da1e96", "mana": 3, "whash": "ffff0ff083d183dbfed7ffd7bfe3bfc71188388818bea038a00040004400f90f", "card_id": "GVG_061"}, {"dhash": "a0ddcd7a92802d3073ad84528fad7e7355f622ecc6401d893e3278e6e88c5b18", "average_hash": "4702d343f14bf849a899a3022f009b01ff01ff01ff80fc8078c0f8c05b003700", "name": "Ragnaros, Lightlord", "phash": "e30bb96cfce093cfbec0941c3e34382904984ec3669e7e74ff3132e50c3b415d", "mana": 8, "whash": "4f02d347f94ff849a89deb82af02bf01ff01ff81ff80fc80fcc1f8c07b003f00", "card_id": "OG_229"}, {"dhash": "122ed95392af2c1b72f685084ef38986131d632edec0349d7d32cae29105e6ff", "average_hash": "a7691b73bfd3fec0fec0fbc073c8f2c8e388c3884b0008c058c100c3060884ff", "name": "Mounted Raptor", "phash": "7769e72389b8be2c7d58f6983f3636151186664861b26671b39866f4c4b8cee0", "mana": 3, "whash": "af691f7bfe71fed1fed0fb803bc8f2c8f3c8e7884b80489058c110c30608c4ff", "card_id": "LOE_050"}, {"dhash": "28c74cbe91408a83b55c5339b4f07143ee8e87f56a8b99166e33a46c08893536", "average_hash": "779c9fbbdba74c2d344c700c7064307110300039983c3c19bc1fb81721020100", "name": "Stonesplinter Trogg", "phash": "eff37fc2db358fe5bea4c3306e58282800184692789e7fc87b02bb04bb21c7a3", "mana": 2, "whash": "7f9cbfbbdba7ccaf3c0d600c7964a17111784179997c3d59bc1fb89731920100", "card_id": "GVG_067"}, {"dhash": "0af1acc2c11c4b60b6954f61bc827105ce3ab065c84b10976336c76c1f99f130", "average_hash": "571f839fc39cc03f203fc03fc03fa033802f0c2d0c3900380033003300102110", "name": "Oasis Snapjaw", "phash": "a9fd5b627e259ecbd7d29cb04628c26890d4b3ed23cb6c92b33096246c08fbe9", "mana": 4, "whash": "771f8fbfc3bfd0bfa43fc43fe47fa077807fcc3d8c3b08390031003b31107110", "card_id": "CS2_119"}, {"dhash": "08c9ecf7d1e60b0936304834b2cc7839f96260c5478ac7159b37646dcc84313b", "average_hash": "f71fe187fb9bf83cf838fc087c003c381c3c181c18083800b81c981db00be11f", "name": "Murloc Tidehunter", "phash": "635b7346bb31ceb13cec3092726409c38134d2486bc9dfcc9f21e729f70cf334", "mana": 2, "whash": "f71beb87fb8ff83dfc38fc187c203c783c3c3c1c380c3800b83c981db00ff10f", "card_id": "EX1_506"}, {"dhash": "88f82d6502db058239ad61709282650dcef2f6c4ed0b92164f353c6cc0881931", "average_hash": "e707e36ff95ff04db84c6886f001a4018c21b83df83d783e380eb00180010100", "name": "Blingtron 3000", "phash": "fb9b7b8aff21ab483e8152046f4608310c44b3d9e3b01fe65f34db392c2336b3", "mana": 5, "whash": "ff1ff37ffbcff05fb86c6ca6f445a4010c25f83df83d783e381ab01391010100", "card_id": "GVG_119"}, {"dhash": "02e408ce81301b4d3e30598ea6785be5a6e25045ed829a1d67399ee3fc9fe17f", "average_hash": "f589fb11f81178107826cc017809781a78037800700018011802f800f10ff17f", "name": "Jeweled Scarab", "phash": "e3ed27f57175369973785a79391a12168de4c2e92159183b1c926bf244997b4c", "mana": 2, "whash": "fdbbfbb1f839f831783eec21f8497c5efc037800780018017803f801f10ff1ff", "card_id": "LOE_029"}, {"dhash": "c8fcad7152eb3d0c72d2e424d92d86db0e367fccf311008b183226e6d80e3338", "average_hash": "3703336639de78dfd8fadbb20fb001804386cb83f3bd61be089010d8230c020c", "name": "Kirin Tor Mage", "phash": "cd259b2ccc38fb30de5894e53bc513cd40a463999799da446bfb1ad5dcc06316", "mana": 3, "whash": "271733fe39def8ff58faddf20fb001804b86c383fbbd71be88d810d8230c030c", "card_id": "EX1_612"}, {"dhash": "3cdcac3cc1d74a9f377c49c1a4024a7196e227454c8a9144c7b93c73c18d3532", "average_hash": "07918bb38baff82dfc38f005f44138487840f80938091008100c400c01130117", "name": "Coldlight Seer", "phash": "e3d933d01c841ecb9f0c63a033b123c2e4e29e6d7bda9bf9fb98cc6326280c63", "mana": 3, "whash": "af91cfb39baffdadfc38fc3df5437d497948f94939491948100c408c019f0117", "card_id": "EX1_103"}, {"dhash": "8a98c56182c33c057b1fe78eccfc99f33fc6770cf29000090030c8e3d9012b37", "average_hash": "e7067146394e18cf18dfd99d3b9efd9ff99ffb9ff98ff887388010c000000202", "name": "Mana Wyrm", "phash": "c38b69fb9efaf3f6bbf426b53b0418a982a11b614c2073016b72361d9d25cec1", "mana": 1, "whash": "e706714e394e18de18df789d3f9efd9ff99ffb9ff98ff887388010c000000202", "card_id": "NEW1_012"}, {"dhash": "08f14d6612cb0d8c7b38e7420e14d8f824f151a625081ac024385df2b640691d", "average_hash": "f71ef97efb5cf85ef8de7ade3f1c1cd80c941c802d800100008068c078007800", "name": "Darnassus Aspirant", "phash": "638e693373c6be36af651c28cbc0519488f40b12c931db11fb981663ff23df43", "mana": 2, "whash": "f71efb7efb5ef87ff8de7ede3f9e1ed81e943e802f80050044c068c078007800", "card_id": "AT_038"}, {"dhash": "48f6edc8421185383a736de49ec86993d33a66f5004b03940438f973b2876d2f", "average_hash": "c70fe36bfbf9fc7dccf1ccb8d839d8398019841d0c0e2c0024000000c0034903", "name": "King Mukla", "phash": "3bdbb9b1d38cde3839c316584b8618b20294cd666eb473fdc725d6e4b3189438", "mana": 3, "whash": "ef1fef7ffbf9fc75dc71fcf9dd79d979813d051d0d0e2c40240080004103490b", "card_id": "EX1_014"}, {"dhash": "8af805c15a0a81877ffea4d2580cac28121acf6d9cc52b8b583691e5424b0736", "average_hash": "f70f755f611f081738b8f81d8ca58c8c041885b189b7e833c893c01780170208", "name": "Webspinner", "phash": "a9bdebdafbecfb667e5a13c2838120e4066099939693f24c4e4a7919a5a4bca4", "mana": 1, "whash": "f71f7d5f635f881738f8fe3fdca58cac051885b189b5e8b3c8b3c097c0170208", "card_id": "FP1_011"}, {"dhash": "0afa0dc492980d30fa0c2cf25c6cf3906e67dae0a4c5098b773fcee7bc4f713f", "average_hash": "f70ff31ffb19f81970107c302c3818b8083e003e00bc0838389b781ff81ff81f", "name": "Dreadsteed", "phash": "eb253733fbe99cb35b46c64c2e30d9e400e04cc6332826637f92d721bb499979", "mana": 4, "whash": "f71ff91ff959f839f0387c302c381cb80c3e043e00bc0838389b781ff81ff81f", "card_id": "AT_019"}, {"dhash": "f8d28c8b01b48b4a37a44cc3b2b864f59d0afb25628b9ad72736ec6cc8890932", "average_hash": "a71dfbac7ba7d033fc39b03d1c277004f80ef836483c70383830383018101910", "name": "Earthen Ring Farseer", "phash": "e3f57b58be256fdade82c6382c9b232032c81ccd9c74daec67e1929cccc00e87", "mana": 3, "whash": "af1dfbacfba7d0b3f839bc3d1c67f844f84ef83f781c74383830383019111910", "card_id": "CS2_117"}, {"dhash": "c181cd1f70e0a1ec93f70cdf1b3e3e607ff0ff849f197cc701ac3159e0b0f0e7", "average_hash": "7ffef3e0f9802c002e001e003e013e003e00fc13f833c031e070e0703030f067", "name": "Forbidden Flame", "phash": "a3545e29f40ab187cfe13de009c40e3c0c180a1d433f9ad5c3c3f3e17beef6b8", "mana": 0, "whash": "fffff3e0f9802d002e003e03fe033e00be01fc13f833e573e070fc703070f8ef", "card_id": "OG_086"}, {"dhash": "0af025c6c2988563bf766e2c9c5870b56622fdcc866b0107c2b61f652180452c", "average_hash": "e70c717e895c4879303e90bfd03ed03a503e083f803f3033001e601e00000100", "name": "Troggzor the Earthinator", "phash": "69e76feb769adbb87f6663709b1149c8a60c320838c316cd33b11c33c34a5cd9", "mana": 7, "whash": "ef1c7b7ecbdd4979307edcbfd87ed03ef03e983fa03fa83b003e701e01040100", "card_id": "GVG_118"}, {"dhash": "18dfacee619d8b60371b4ce2bb047e6dcad2d1e5a7c91317cd37326ccc99f937", "average_hash": "47104380a119c019e81ff830cc33043c0c2c04343c377833d838d839f815f91e", "name": "Psych-o-Tron", "phash": "a3973e58f3218ed45eb00687d3a40892696c93b9396796f1366bec98ede0133b", "mana": 5, "whash": "e7104391e199e03be83ffc33ec33043c0c2c24347c377c33f83cd83df815f91e", "card_id": "OG_145"}, {"dhash": "6ad9c53012e70dcf7932e6080c1df2326c21d07e80f94a83853f1ff73c486318", "average_hash": "9716d97e394f785ef8debade3f3e1cfe1cbc02bc01bc0d3c0cbc18dc501c401e", "name": "Knight of the Wild", "phash": "4da7f37afbeebedbbfe08f317e5041c10244e2203c489f849b8b50d1390867c7", "mana": 7, "whash": "b716d95e394f705ef8debcdebf3a1ebe1eb402bc01bc0c3c08bc187c501c401e", "card_id": "AT_041"}, {"dhash": "e2f0c8e5819b1b003e8e5e7eb9f84af1a3221c8d7102835d26bb4c669990e17d", "average_hash": "37bd399c381d381d18100c181c187c0ed8139803200c200830183018310c817f", "name": "Djinni of Zephyrs", "phash": "e3d987976dd97b589b966c3c3c0e96c591b8429a91a11298299e4e9a96c1afe5", "mana": 5, "whash": "7fbd3bbf3ebdbc3d1c381c781c7c7c5ef853b843b00e205830183018310cd1ff", "card_id": "LOE_053"}, {"dhash": "fad3ec2b11970a0fb53f5a7ebcfc7bf183e24e459d8a381647341f6c689e2138", "average_hash": "47107da03b80780078007c00fc10fc38fc10fc11fc033c033c20f830b8182918", "name": "Duskboar", "phash": "e71f7ef27c849bb52fa107661a8c20300cc83121b6849f89ff43cf49bb21fecc", "mana": 2, "whash": "4f907fa07ba07da07c007c00fd70fd78fd50fd53fd5b3c633c20f83039383918", "card_id": "OG_326"}, {"dhash": "3af16cc3d1b48b4d379a4d73bce27085c13a90453feb8cc47ebbcf611880f127", "average_hash": "c71fc79ff3b8d83ecc3e643e403ce038c0304820402c0000200b380538001104", "name": "Kobold Geomancer", "phash": "69d639e7dba95ed9bfd359b84c86828618a4c6024dc2f6c07b123704ef01fb66", "mana": 2, "whash": "4f9fefbffbbaddbecc3e643e657ce17ce1704960416c0558201f380739003904", "card_id": "CS2_142"}, {"dhash": "6af0edc1121b05767a04adfc5225a5d912636fceb915408b82360de410487bb4", "average_hash": "371f311e3b59b879b8b71ba5f8a1fce0fc91b893f9bf98bf08b4081000100000", "name": "One-eyed Cheat", "phash": "e399eb496cf8696d3e485cda19d092250256b390b299b6cdef4e9604b329f764", "mana": 2, "whash": "371b391e3b58b87db8f73da5fca1fca0fc91fcb3f9bfd8bf08bc087000100010", "card_id": "GVG_025"}, {"dhash": "4adbac3011cf4a9eb5e65351a402543febdacef539cb07148f393866228d353e", "average_hash": "9793cfa623ae302cd009b00f1047804680312c333c3f7c0c7c0cc00e6106711c", "name": "Crazed Alchemist", "phash": "dbdf9f717f068f15bcb452a2838930d006e31c60e698c6196f09fe08b60efee1", "mana": 2, "whash": "9f97cfae33ae34aed40bbc0f5547815681712d733d3f7c447c0cc80d6107711c", "card_id": "EX1_059"}, {"dhash": "e23ffdff30c083801f003f1c1e6078d0f2b4a56949538386760def1b3c36f8ec", "average_hash": "0700c30ff31ff01ff81f181f3c3e5c384e386a3c3a283c283e041c0edc0efc2f", "name": "Ancestor's Call", "phash": "cb5879b5db33f60836ce86f3a61e0cc626f8667bc3942bc48acd2b816b336321", "mana": 4, "whash": "0700c307f11ff81ff81f181f7c3e7c386e386a3c3a3c3c283e241c0e5c1efc3f", "card_id": "GVG_029"}, {"dhash": "0a80dd3ff0fd83ed3782ed04cf09f8b190cfb79ffe3efe651c0e22d9c0b01e3d", "average_hash": "071803c00383801fd01f80070017000a104c806fe07fe01fc07fc03d800df00f", "name": "Flare", "phash": "d9dfde643fd125bae66b3985c9a411d24b640e3c2956da2963d336807b41b730", "mana": 2, "whash": "673e07e08387cb1ff81f881f001f004f104f807fe07fe27fc27fc03dc03df01f", "card_id": "EX1_544"}, {"dhash": "0cf3ecc81181030e37774ef8bcc87039e31286e50ecbf9960836d76d88917120", "average_hash": "f71cfb9c3b9b38389838bc387c3c9c3c3c3d8c188c0dac07cc31f018300c1100", "name": "Gnomeregan Infantry", "phash": "6b736f41fa801b9a6f9221a61a3224399cb0dcf836cccbbce3e18673792cd2c6", "mana": 3, "whash": "f71cfbbc3bbf3838bc38fc387c3ebc7c3c7d8c389c0dbc07cc31f018310c1100", "card_id": "GVG_098"}, {"dhash": "c289edff20cf03341ed29f993f00f900e4094ef33847d28f615ec63919f266e0", "average_hash": "07000100f91efc1dfe1ffc3ffe1ff03bf073e073fc23cc798073107f207e403c", "name": "Fireball", "phash": "a9ac43236cdeb60df47c7da00f5e96e0268d4bd843f329bdc3111a874b695b8c", "mana": 4, "whash": "05000100f81efc1dfe1ffc1ffe1ff01bf073e073fe63cc7b807b007f207e003c", "card_id": "CS2_029"}, {"dhash": "8ce10cc311990a06373e4eecbd584309b4926ca5cb4a975742b60c68bc837136", "average_hash": "771f7b9e799c380e3814bc3a9c3a5c1e0c1f4c1e241a30103000300038003104", "name": "Lord of the Arena", "phash": "c36d6b86f39079cc1b8e836139040802c930da611dd2bf577f9e76d667a73c2c", "mana": 6, "whash": "7f1f7bbf7bbcb8bf3c14fc3bdc7bdc5f8c5f4c1f241b301a3010300039003904", "card_id": "CS2_162"}, {"dhash": "3ad9443241c74afcb5c85301a4824077ba0af6f54d4aba95c5311f6b7c84c13f", "average_hash": "c797cfa6e3ae6026e00fe00f804200410041003800080017000ff80ef100e103", "name": "Grotesque Dragonhawk", "phash": "d9fedff6fbd64fb73fe31be03b8923c8928826082c0827b0d988d9d099006f62", "mana": 7, "whash": "ef97cfafe3ae65a7f42fe01f854e81410141017d094e295f680ff88ef981c11b", "card_id": "OG_152"}, {"dhash": "d88ead1962335c64f2d8a525cb1b96672ccf481c9570728b65368fe13f836f1e", "average_hash": "2761a3f321c320c300c303c703c703d73396239703b703b700f700d703074706", "name": "Steward of Darkshire", "phash": "dda4f738235aa51d7f420cc73fc546e800d4dac963fc7b817fa7cac4008932d5", "mana": 3, "whash": "67f1abf3a3f330f700f703d783c703d723962397039703f700f701f703074706", "card_id": "OG_310"}, {"dhash": "acf00c8061fbcbff37ff5f09b2024c1df83ab0fd604a8395023905701a80352e", "average_hash": "ef9d1fbc03b905be8c5ff83fb16f054f016e096e0d0f0c4e0c00080001000100", "name": "Harvest Golem", "phash": "cd5b69813615160efed2d3902b9929a20ca69c9cbcd0b6d35bbb5a49bc58e6f0", "mana": 3, "whash": "ff9d5fbc03b907bfdcbffd7fb5ff05cf036e096e0d4f0d4e0c400c8001800320", "card_id": "EX1_556"}, {"dhash": "ca990d7390ee38997126e158802103031e8e695cf430c1c926334fe63e8ceb11", "average_hash": "f7c7f9e7fbc7f8cef8ce3bce73c263c003800380038001c000c009d06318d300", "name": "Wyrmrest Agent", "phash": "f7a5d3f223ea3c3cbb4c843c3bf00ca710c0630c12586e01df937eccbf077f44", "mana": 2, "whash": "fff7ffe7fbeff8cefadebfce7bc273c203c0038003c001c000c009d06318d710", "card_id": "AT_116"}, {"dhash": "81c16f41e0089b1f3e345d71b072256548c2b1044fec981962f62f68b681ff3f", "average_hash": "fffffbfffdfc1c70cef067f863f071f031ec11e800880080008001800100f10f", "name": "Forbidden Shaping", "phash": "655725a65b27584bfb5039901b480c728bb6068dc9ccce154bbd72ec73ef7a22", "mana": 0, "whash": "fffffbfffffc1df4def0e7f873f073f031ec11e801880880018001800180f18f", "card_id": "OG_101"}, {"dhash": "eafccce191990b3737fe4c9cb91877f1ec62cac5870b0d173c301860e0856136", "average_hash": "670d711f7b1f3819383bfc13fc07fc27fc33f831f831f8007800780010000000", "name": "Acidic Swamp Ooze", "phash": "e3ddc9897cd87ff29e96138e38a48488045206533e8a3f89bf01ff81f702736e", "mana": 2, "whash": "e71f799ffb9fb83b383bfc3ffc17fc37fc33f831f831f8107800780018000100", "card_id": "EX1_066"}, {"dhash": "8a9f3dfff098833516c02900bec078c1ff02ff85fc2bfac7fbff06120c34f861", "average_hash": "07008103c30fc83be83be80fc00fc001e003e007e007f07ffc7fbe7f1e7cfc3f", "name": "Freezing Trap", "phash": "a9737e9dcfe0fcbe5eb28de24294076121cc265909652e845b0c7d927b40df9a", "mana": 2, "whash": "07008303c31fc83be83be80fc00fe003e007e007e007f07ff87f9e7f3e7cfc3f", "card_id": "EX1_611"}, {"dhash": "f299ed67804d38f0f0c8e197832f07131e6e788cd1d0af891f337ee6fc8c7118", "average_hash": "070401e6d1c4f0c70087038703838183818f318f219ff09df899f9ddfb1ff31c", "name": "Auchenai Soulpriest", "phash": "d9a59c9667a24d7ffe46148dae8cc98c00d1a30903732bc377432ec5fe685e4d", "mana": 4, "whash": "074401c4d1cef0c7008703870383c183818fb18f719ff89bf89df9ddfb1ffb1c", "card_id": "EX1_591"}, {"dhash": "f838fdf300e747cc9f30334324884919b016c0ee06ddf8b63fe7030f6058f8bd", "average_hash": "07203bcc3f1e3fde0e7e867fc07ec05f00180010002008cfe0313c3c8c1df01f", "name": "Preparation", "phash": "492b4f20cfb4f6177a4139c81c402ea099cc0c36b3740d3b63599bf6cf8e367f", "mana": 0, "whash": "077c3ffc3f9e3fde0efea67f827ec05f0018081088a009eff831183c9c1df01f", "card_id": "EX1_145"}, {"dhash": "fa3d0cff0088178227203f93fc7ef231c0e0a2831dcee71ffc2f305c009c0170", "average_hash": "8503e10ff30ff01ff83e603d003f787f1c743c783806780ef83ff03ff01fe00f", "name": "Argent Lance", "phash": "e97f7e987ba5d6229e4a2ddc1618a4b187a602a53ce93c689fd0692597a95e42", "mana": 2, "whash": "8703e107f10ff01ff03ee03f003e787f1c7038783806781ef83ff03ff01fe00f", "card_id": "AT_077"}, {"dhash": "8a8ffde9e3d3cf7d1cbf1832310462d8c4b9fd61e1808b123627e04c85197836", "average_hash": "0700c3017f383c7cde77fe77ee326c3240004018381c3808b81190030813c007", "name": "Lock and Load", "phash": "a33c2fbefaf17ac97ac69a352f528da445d80cb66198064dcb413e045bc37e23", "mana": 2, "whash": "0708c3437ffc3dfcfe7ffe77fe776e3248004c593c1c3808b81990138813f00f", "card_id": "AT_061"}, {"dhash": "8aefcccbd1c40b2d37f24ee4b9987325e45aca45988b39174337466ef99cd339", "average_hash": "371c139fcb1fd83d6818983e9837b037f037fc3798179c1b881b0018a0188118", "name": "Ogre Magi", "phash": "a9757f866ecb9fe5fb308ea98f2666d82148c0700e923373ee88d34877063e26", "mana": 4, "whash": "371e199fd99ff83d783a983f9837f837f037fc379c379c1b981b00188018a118", "card_id": "CS2_197"}, {"dhash": "08cb0cd26184cb26b67b4ceeb998793996420e8578eae114c339a267449b1936", "average_hash": "f71c7396419a0038203c0c30dc11fc19bc19bc030007bc079c06880780078107", "name": "Razorfen Hunter", "phash": "83276dc87c86d36273e183a778922c9a635b38e93ef38fb33c0db0e4c3307c86", "mana": 3, "whash": "ff1c7bbe43ba00b8243ccc39fc71fc79bc5bbc038407bc07dc06c8178937a107", "card_id": "CS2_196"}, {"dhash": "0a00fdff600ca339dc7c9fa3390d797ac684c4012003cb849f0f763eec70f8cd", "average_hash": "07000300c31dc030c020e2196e3f3e3b7e7b3e7b1e2c1c0c1e6cdc38d048f84f", "name": "Sap", "phash": "2b49be247ef3ceb3d38c2ecc4a2224c882d20c7199147bcaab313e1b7ba5de56", "mana": 2, "whash": "07000300c31dc130c068e279ee3f3e7b7e7b7e7b1f2c1e1c1e6cfc38d868f84f", "card_id": "EX1_581"}, {"dhash": "cacf8c9f51358beb36d44d28b092638d863af9459fcb3c977536e96c5299b13a", "average_hash": "7710f191fb91f831fc3b983f80328002c0020830c831c839e03be83bf01ef11e", "name": "Doomsayer", "phash": "a375d7716339368c5e784cb00dc2868042cc3634ee807e267b46fd19ff037f63", "mana": 2, "whash": "7710f991fbb1f833fc3bf83f9032804240021831c839c83be83ff83ff81ef11f", "card_id": "NEW1_021"}, {"dhash": "a8d50d6b02c6c800f727acd80f21c1570c8f1b3c7ed0f00b8cb578e7d24d43ba", "average_hash": "d70f734771cd408c00841a80331800801084e186c18750aff09ce05fe01fe01f", "name": "Unbound Elemental", "phash": "7982eecb3baafbc6bb2086419e641c274cd419486f3ac7c596b374b4698c3aa9", "mana": 3, "whash": "f71ffb47f3cdc0ad009c1e80379802841184e186d38770aff0bfe05fe81fe41f", "card_id": "EX1_258"}, {"dhash": "828bedffd07e03fc5cfb3d467f1cd96066c1ddb3f3632606082dbc72c88c983b", "average_hash": "07008300e305f817f00ff807f83f3c3b6c1364377c3e7c3e3818f80cb00bf00f", "name": "Imp-losion", "phash": "e36cbe827e33ef6c9ee9272379c690388ecd61e3e0b01a8463236ed959b6120b", "mana": 4, "whash": "07008340e105f937f00ff807f87f7c3b7c137437fc7e7c3ef838b00cb00bf00f", "card_id": "GVG_045"}, {"dhash": "3cc36d2cd27988e279453b8ee3708b331647209a5af4d5482b3bdef494091bb2", "average_hash": "c717cb71cbe1b8c3bcc31ee11ec34fc333c31382038110c018c278c31b031601", "name": "Shieldmaiden", "phash": "93c93b12bb857ee66f0407c62ef0d16443725b0c0c75cfe1399c7ca7d1e99960", "mana": 6, "whash": "cf17cf71cbe7fce7bce39ee13ec34dc3bba31382038314811cc278c31b831c01", "card_id": "GVG_053"}, {"dhash": "02800d0bf0ec60f3e76e4f3d9ef928617a8204040e0df95ae6ffc0ef3783fe2f", "average_hash": "ffff07f003800000100ff00ff80efc2f7d3f30008001800360f3007f4d1eff0f", "name": "Echo of Medivh", "phash": "db5f7e033ca4b42c074ee9ce4b79a9f083d01ab673655b1d93f052e059821976", "mana": 4, "whash": "ffff0ff003800100100ff00ff91f7d2f7d3ff9018001808360f301ff4dbeff8f", "card_id": "GVG_005"}, {"dhash": "ecf88cc1018b0b1637654c3ca1187837e7cacc35a9e911932736de6c24997930", "average_hash": "f7177b9f3b9a303c383c3c1f9c0790301033243300380c381c38583818180118", "name": "Twilight Elder", "phash": "ad65d3306f25bfd4dae013261e1a28b824b33c631a3e9bbcfcc42665332fce24", "mana": 3, "whash": "ff177fbf3bbe3cbd3c3c3c3f9d4799703173253301380c381c38583819381918", "card_id": "OG_286"}, {"dhash": "acff2ce0413dcb56b6a84ee1b1c25311840690355c6bf855e1b383654b82d739", "average_hash": "d71feb9febbcb03ff037b03bd03fc817c003c00080000010000e00060004810b", "name": "Ironbeak Owl", "phash": "b935b3154f095eca1f8349c2b659c2989bac06332c61ecf9ceac66ce66a253ce", "mana": 3, "whash": "ff9fefbf6bbfb5bff43fb43ff57fc957c143c16081400154001e001e0184810a", "card_id": "CS2_203"}, {"dhash": "aae22dc4928b05d33b266e199ce678d961b2e6c4898b17076eb42c6fd28e2d39", "average_hash": "e70fe15c13581858983c34bc343c0c3c0c3c1c397c3878303030203c801d011d", "name": "Bloodmage Thalnos", "phash": "6db95f227b2ab3cc19ce7130c3c419c252466986b6645bc17f0659c3ef06ff0c", "mana": 2, "whash": "ef1ff77c5bfcb878bc7c3cfc347c0c7c0c3c5c397c3878313030a03c811d011d", "card_id": "EX1_012"}, {"dhash": "088eddf830e0ee9fdd7f3ac801fc76f6fbfcf7512f824ec4998ff313a3274e4e", "average_hash": "07000300010200040800fe00fe00fe20fe7dfe7dfe2dfe00fe607e0e4e4e5e0f", "name": "Explosive Shot", "phash": "0742fcbd5c6e6793a9cc362413b0654a24dbd844ad3219a863d9b7e663b55a97", "mana": 5, "whash": "070003000106002c1860fe60fe12fe31fe7dfe7dfe7dfe05fe60fe0e5e4e5e0f", "card_id": "EX1_537"}, {"dhash": "08d34c2e917a02f4b5f04f0cb9f47281e72a8ee57d4bb09601371e6c7883c13c", "average_hash": "f70efb8ff903f807f007dc07d402e432c0139003b807a80b081a70107004910c", "name": "Master of Ceremonies", "phash": "f37773826f929f0d7ff22c64193162e0316af3388cb11a3b3b8d70ccf828e2e0", "mana": 3, "whash": "f71efb8ff987f807f807dc1ff433fc32d03bb80ff80fa82b483a70087004910c", "card_id": "AT_117"}, {"dhash": "28f9ccd201250b8036ac5c5dbd346069e3e2cfc5b60b01175f33fc6efc8ff13f", "average_hash": "f70fd30ef107e01780118011343928003c003812383f381eb81ff01ff81ff81f", "name": "Raid Leader", "phash": "e3659e447b40cfe7f34a3c1819a132a01e1a3990bbf0b33cfb92d6b32d698999", "mana": 3, "whash": "f70ff10ff10ff01780155011b43938203c32f812783f3c1ef81ff01ff81ff81f", "card_id": "CS2_122"}, {"dhash": "f63df0e7a69b297b5546aa14dc2837c15c8caeea41153888e59a0c510286fd7f", "average_hash": "03a801df805fb03bd83f782f7c357d36701ea80fd8016801280734000000f9af", "name": "Fool's Bane", "phash": "c9cdfdc17c3b7e32ee5ed6de9b479a4b6d05263783b03498c5e29806018866a3", "mana": 5, "whash": "03ac03dfc05ff03bf8bf78af7cbd7dbff01eac0fda096a05680724000000f9bf", "card_id": "KAR_028"}, {"dhash": "08fe6d7882dd05a2bb616c1e9b9c7421e0c20664f9cbd3973f37706cc2908539", "average_hash": "f707e367914fb04fe05f0cbfdc3fbc3d3c312001180f381dd839c801c001c119", "name": "Stalagg", "phash": "a99bdb203e48c640b61405865c8c3258c371c3e33be3d6dddfd9ede36c4e4d06", "mana": 5, "whash": "ef07e75fd14ff05fe03f0cbfdc3fbc3d3c3d2001181f3c1dd839c811c001c119", "card_id": "FP1_014"}, {"dhash": "9cfe2de02203452dbacbe09487694f3393f23ea56c0adc97293783688081053b", "average_hash": "4709e37f03de007840c348837801980bb80bbc0ffc0fcc1fcc3fc00740030103", "name": "The Skeleton Knight", "phash": "9bcd6c887c242b33de09318486cc92302ced6663e6329edddea7cdcd4c23d6c9", "mana": 6, "whash": "4f1de77f03fe0478407348e37941f849bd0bfc0ffd0fcc3fd83fd007c1030103", "card_id": "AT_128"}, {"dhash": "f8affddea1a8ef53dca53c4a733e45dcc63999f129e37ee6d18c3339e761decf", "average_hash": "07008301c111cc7c3e663e731e771e605e621e600e618e718e60ce60ce60fe46", "name": "Charge", "phash": "9722e8357873bed14648be10cf385642691424de2cb3336f27cd9641cbd9db66", "mana": 3, "whash": "07008303d139ec7cfe66be771e771e605e621e611e618e71ce61ce60ce60fe47", "card_id": "CS2_103"}, {"dhash": "bcce6c1981fc4ab7b54c5b01be6878b5b75221e5480ab314e7b9de6f698dcd38", "average_hash": "87118b93cb83f02ff02ff80f783fd83ffc1fb40a180808033009b01e801ec919", "name": "Gadgetzan Auctioneer", "phash": "cb699f943e21de283f27cd80020861b034912cf3c6f83b3cc7c336dbcf26ced9", "mana": 6, "whash": "87158bbbcba7d0aff02ff80f3c7fd47ffc5fb40e18082c033009b01e811fc919", "card_id": "EX1_095"}, {"dhash": "eadc6d83d3670c9b772c2ef8588cf1f800d307016f049c083f39fee26c41b13d", "average_hash": "e701610afb007846b838b8b87c301cb07c007d00f880f800f8c0f800f800f801", "name": "Tiny Knight of Evil", "phash": "e387bdbcfbe25ece8e9c2ca53911c36188c09807738c5603ff439e113e26f338", "mana": 2, "whash": "e7137b5efb42f86efcbabeb87cb01eb07c807f80fc80fc00fcc0f800f881f801", "card_id": "AT_021"}, {"dhash": "03c00d18e0f0dbc2fc07d11f82fc7cf5a37a0ecd1af747ce99382360fcc978bf", "average_hash": "ffff0bf903cc043e067e03fc05ec03f401f99dffcbe7eee0fcc019801981f9a7", "name": "Confuse", "phash": "9d7f2d88d3cd33abe1cd663209528b6921f0caf44b7673d87a817b227e929308", "mana": 2, "whash": "ffff0bf303dc047e027e03fc07ec03f401f185ff82e7eee0ecc018801980f9a7", "card_id": "AT_016"}, {"dhash": "f83ffdffe17fc3df34a38dcc7b90d7213fc3e6069d0fe03c903f087e11f9eee6", "average_hash": "07000300f91ffc37fc17fc17fa17f00ff01ff03fe07fe01fc03fc03f6036e00f", "name": "Vanish", "phash": "e9b417a13c4e7c4ade491ea08724203c187294c4afd10e1ccb7ba0bf6bedff4a", "mana": 6, "whash": "07000300f91ffc37fc17fc17fa17f00ff01ff03fe07fe01fc03fc03e4036a00f", "card_id": "NEW1_004"}, {"dhash": "6cdccc3891d90ab335e6538ea7184eb1986233c5cc8a1914f338c6739985f732", "average_hash": "b7137ba71ba798079c0f9c0f9c0fb80fb80cbc0dbc099c011800100000000100", "name": "Volcanic Drake", "phash": "c373db90bc84e7216f2498243b2d203b02b38c6d0b9bfcb0db339e1db989ceb9", "mana": 6, "whash": "bf977fa73ba79c8f9c0f9c0fbd4fbd4fbd4cfd4dbd49bd411c01108801800100", "card_id": "BRM_025"}, {"dhash": "1adeac2d41facae5b54e5239a7425e17b9eee6dd99a97a477bb6846ce1990733", "average_hash": "07900fa183a100a7004f600370422077907d10391033103b90339032c1114113", "name": "Evil Heckler", "phash": "b9f5df1a3e17aeb16622e364cbcc08e389c11c8ce3e033c9fe2433436c08cbf3", "mana": 4, "whash": "0f900fa183a544a7048fe5437147617f937f11791173917fd073f4bac1b1c33b", "card_id": "AT_114"}, {"dhash": "567629cd84927f35d2cfa4ca5934022120d6c6b809c55b18b33004e1d840733f", "average_hash": "fb7ff9fdd8bbf4fbbe99beb93fb31d000d1007b00fa038803880188012000638", "name": "Cloaked Huntress", "phash": "8719399979dbf3dbfe3ec3333d30bcb4c3eb960e8d548264286e12418b079c94", "mana": 3, "whash": "ff7ff9ffd8bfd4ffbef9bebd2fb31d000d3007f00fa03c803c80188012000638", "card_id": "KAR_006"}, {"dhash": "84dd4d6bd29e0c693bfa76f08864539927220c04f80aa04512b8a5734387873c", "average_hash": "f703fb67fbc9fcd1fcd0fcda7c51ec11dc031803000100008000000001000100", "name": "Mukla, Tyrant of the Vale", "phash": "b38fe38cfc129e409f942614cb218260202526bc92c847ed77e77ef36f39363b", "mana": 6, "whash": "ff53ff6ffbe9fef9fcf0fcdafd5bedd1ddc3dd0399410140800000800180012c", "card_id": "OG_122"}, {"dhash": "48faec6c21914b66b7cc4a89b4126827c64e0c1d48ead04423b80670cb83b72c", "average_hash": "9fbe8fbfb7bfb5ffb4ff337f33ff33f81373036b03e901e0044001c005800320", "name": "Knife Juggler", "phash": "dd79c9714f175e87ffa6462209829064242526f19b48db407f127f03ff01bf6c", "mana": 2, "whash": "9ffe0fbfb3bfb7ffb4ff317f33ff11f8137303eb03c901e00440058005800320", "card_id": "NEW1_019"}, {"dhash": "6cdf4d01d24a0894fb48b4b7e63e8bfb1ce5798ae314c3892e3a8cf11c037bae", "average_hash": "f717fb6f7bc068c4205c024a06c839ce3b8e3b8e7a8f200d30c770c23a063806", "name": "Sea Reaver", "phash": "cb8a3f0bb3f45b23cf623c7808e0c930243966981e97769d3cafd6b46c694749", "mana": 6, "whash": "f717fb6f7bc178e5245c064206cc3dce3b8e3b8e7b8f208d28c770c23a863c06", "card_id": "AT_130"}]}
| 6,429.666667
| 345,395
| 0.847095
| 16,870
| 347,202
| 17.267398
| 0.320095
| 0.019011
| 0.007044
| 0.000192
| 0.000275
| 0.000275
| 0.000199
| 0.000199
| 0
| 0
| 0
| 0.470583
| 0.040734
| 347,202
| 53
| 345,396
| 6,550.981132
| 0.40404
| 0.001578
| 0
| 0
| 0
| 0
| 0.845671
| 0.679827
| 0
| 1
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0.043478
| 0.086957
| 0
| 0.304348
| 0.043478
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8431e4302be3cd87914505146a030a24f59e6144
| 9,383
|
py
|
Python
|
consume/views.py
|
majorblackc/LLINS-Hackathon-g5
|
9deb1ee64a05c97adfe7f7796f7e04a4fbd3a4f6
|
[
"Apache-2.0"
] | 4
|
2022-02-05T07:26:55.000Z
|
2022-02-09T05:56:57.000Z
|
consume/views.py
|
majorblackc/LLINS-Hackathon-g5
|
9deb1ee64a05c97adfe7f7796f7e04a4fbd3a4f6
|
[
"Apache-2.0"
] | null | null | null |
consume/views.py
|
majorblackc/LLINS-Hackathon-g5
|
9deb1ee64a05c97adfe7f7796f7e04a4fbd3a4f6
|
[
"Apache-2.0"
] | 2
|
2022-02-05T03:56:36.000Z
|
2022-02-05T07:25:10.000Z
|
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
import urllib,json
# Create your views here.
url='https://c630-41-89-192-24.ngrok.io/patients'
url2='https://c630-41-89-192-24.ngrok.io/nets'
def dashboard(request):
res=urllib.request.urlopen(url)
res2=urllib.request.urlopen(url2)
patientdata=json.loads(res.read())
netsdata=json.loads(res2.read())
labels=[]
years=[]
months=[]
counties=[]
budgeted=0
issued=0
pending=0
PatientsReceived2019=[]
PatientsNets2019=[]
PatientswithoutNets2019=[]
labels2=[]
years2=[]
months2=[]
counties2=[]
netsIssued=[]
netsProvided=[]
totalCounties=[]
totalPatientsReceived2019Kisumu=0
totalPatientsNets2019Kisumu=0
totalPatientswithoutNets2019Kisumu=0
totalPatientsReceived2019Busia=0
totalPatientsNets2019Busia=0
totalPatientswithoutNets2019Busia=0
totalPatientsReceived2019Vihiga=0
totalPatientsNets2019Vihiga=0
totalPatientswithoutNets2019Vihiga=0
for data in patientdata:
if data['Year'] == 2019:
labelling=data['County'] +'('+str(data['Month']) +')'
labels.append(labelling)
years.append (data['Year'])
counties.append (data['County'])
months.append (data['Month'])
PatientsReceived2019.append (data['PatientsReceived'])
PatientsNets2019.append (data['PatientsNets'])
PatientswithoutNets2019.append (data['PatientswithoutNets'])
pending+=data['PatientswithoutNets']
for data in netsdata:
if data['Year'] == 2019:
labelling=data['County'] +'('+str(data['Month']) +')'
labels2.append(labelling)
years2.append (data['Year'])
counties2.append (data['County'])
months2.append (data['Month'])
netsIssued.append(data['netsIssued'])
netsProvided.append(data['netsProvided'])
issued+=data['netsIssued']
budgeted+=data['netsProvided']
return render(request,'dashboard.html',{'datas':patientdata,
'labels':labels,'years':years,'months':months,'counties':counties,
'PatientsReceived2019':PatientsReceived2019,'PatientsNets2019':PatientsNets2019,'PatientswithoutNets2019':PatientswithoutNets2019,
'datas2':netsdata,
'labels2':labels2,'years2':years2,'months2':months2,'counties2':counties2,
'netsIssued':netsIssued,'netsProvided':netsProvided,
'issued':issued,'budgeted':budgeted,'pending':pending
})
def kisumu(request):
res=urllib.request.urlopen(url)
res2=urllib.request.urlopen(url2)
patientdata=json.loads(res.read())
netsdata=json.loads(res2.read())
labels=[]
years=[]
months=[]
counties=[]
PatientsReceived2019=[]
PatientsNets2019=[]
PatientswithoutNets2019=[]
labels2=[]
years2=[]
months2=[]
counties2=[]
netsIssued=[]
netsProvided=[]
budgeted=0
issued=0
pending=0
kisumupatientsreceived = 0
# busiapatientswithoutnets
# busiapatientswithnets
for data in patientdata:
if data['Year'] == 2019:
if data['County'] == 'kisumu':
labelling=data['County'] +'('+str(data['Month']) +')'
labels.append(labelling)
years.append (data['Year'])
counties.append (data['County'])
months.append (data['Month'])
PatientsReceived2019.append (data['PatientsReceived'])
PatientsNets2019.append (data['PatientsNets'])
PatientswithoutNets2019.append (data['PatientswithoutNets'])
pending+=data['PatientswithoutNets']
for data in netsdata:
if data['Year'] == 2019:
if data['County'] == 'kisumu':
labelling=data['County'] +'('+str(data['Month']) +')'
labels2.append(labelling)
years2.append (data['Year'])
counties2.append (data['County'])
months2.append (data['Month'])
netsIssued.append(data['netsIssued'])
netsProvided.append(data['netsProvided'])
issued+=data['netsIssued']
budgeted+=data['netsProvided']
return render(request,'kisumu.html',{'datas':patientdata,
'labels':labels,'years':years,'months':months,'counties':counties,
'PatientsReceived2019':PatientsReceived2019,'PatientsNets2019':PatientsNets2019,'PatientswithoutNets2019':PatientswithoutNets2019,
'datas2':netsdata,
'labels2':labels2,'years2':years2,'months2':months2,'counties2':counties2,
'netsIssued':netsIssued,'netsProvided':netsProvided,
'issued':issued,'budgeted':budgeted,'pending':pending
})
def vihiga(request):
res=urllib.request.urlopen(url)
res2=urllib.request.urlopen(url2)
patientdata=json.loads(res.read())
netsdata=json.loads(res2.read())
labels=[]
years=[]
months=[]
counties=[]
PatientsReceived2019=[]
PatientsNets2019=[]
PatientswithoutNets2019=[]
labels2=[]
years2=[]
months2=[]
counties2=[]
netsIssued=[]
netsProvided=[]
budgeted=0
issued=0
pending=0
for data in patientdata:
if data['Year'] == 2019:
if data['County'] == 'vihiga':
labelling=data['County'] +'('+str(data['Month']) +')'
labels.append(labelling)
years.append (data['Year'])
counties.append (data['County'])
months.append (data['Month'])
PatientsReceived2019.append (data['PatientsReceived'])
PatientsNets2019.append (data['PatientsNets'])
PatientswithoutNets2019.append (data['PatientswithoutNets'])
pending+=data['PatientswithoutNets']
for data in netsdata:
if data['Year'] == 2019:
if data['County'] == 'vihiga':
labelling=data['County'] +'('+str(data['Month']) +')'
labels2.append(labelling)
years2.append (data['Year'])
counties2.append (data['County'])
months2.append (data['Month'])
netsIssued.append(data['netsIssued'])
netsProvided.append(data['netsProvided'])
issued+=data['netsIssued']
budgeted+=data['netsProvided']
return render(request,'vihiga.html',{'datas':patientdata,
'labels':labels,'years':years,'months':months,'counties':counties,
'PatientsReceived2019':PatientsReceived2019,'PatientsNets2019':PatientsNets2019,'PatientswithoutNets2019':PatientswithoutNets2019,
'datas2':netsdata,
'labels2':labels2,'years2':years2,'months2':months2,'counties2':counties2,
'netsIssued':netsIssued,'netsProvided':netsProvided,
'issued':issued,'budgeted':budgeted,'pending':pending
})
def busia(request):
res=urllib.request.urlopen(url)
res2=urllib.request.urlopen(url2)
patientdata=json.loads(res.read())
netsdata=json.loads(res2.read())
labels=[]
years=[]
months=[]
counties=[]
PatientsReceived2019=[]
PatientsNets2019=[]
PatientswithoutNets2019=[]
labels2=[]
years2=[]
months2=[]
counties2=[]
netsIssued=[]
netsProvided=[]
budgeted=0
issued=0
pending=0
for data in patientdata:
if data['Year'] == 2019:
if data['County'] == 'busia':
labelling=data['County'] +'('+str(data['Month']) +')'
labels.append(labelling)
years.append (data['Year'])
counties.append (data['County'])
months.append (data['Month'])
PatientsReceived2019.append (data['PatientsReceived'])
PatientsNets2019.append (data['PatientsNets'])
PatientswithoutNets2019.append (data['PatientswithoutNets'])
pending+=data['PatientswithoutNets']
for data in netsdata:
if data['Year'] == 2019:
if data['County'] == 'busia':
labelling=data['County'] +'('+str(data['Month']) +')'
labels2.append(labelling)
years2.append (data['Year'])
counties2.append (data['County'])
months2.append (data['Month'])
netsIssued.append(data['netsIssued'])
netsProvided.append(data['netsProvided'])
issued+=data['netsIssued']
budgeted+=data['netsProvided']
return render(request,'busia.html',{'datas':patientdata,
'labels':labels,'years':years,'months':months,'counties':counties,
'PatientsReceived2019':PatientsReceived2019,'PatientsNets2019':PatientsNets2019,'PatientswithoutNets2019':PatientswithoutNets2019,
'datas2':netsdata,
'labels2':labels2,'years2':years2,'months2':months2,'counties2':counties2,
'netsIssued':netsIssued,'netsProvided':netsProvided,
'issued':issued,'budgeted':budgeted,'pending':pending
})
| 27.925595
| 134
| 0.5932
| 768
| 9,383
| 7.247396
| 0.109375
| 0.079051
| 0.028746
| 0.020122
| 0.911966
| 0.900826
| 0.896515
| 0.896515
| 0.887531
| 0.863996
| 0
| 0.056152
| 0.26548
| 9,383
| 335
| 135
| 28.008955
| 0.751451
| 0.010018
| 0
| 0.902655
| 0
| 0
| 0.168246
| 0.00991
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017699
| false
| 0
| 0.013274
| 0
| 0.048673
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
08436ce6ef107134335bf66875a92a6cddee444f
| 3,901
|
py
|
Python
|
forms/migrations/0004_auto_20150309_1358.py
|
opendatadurban/gmmp
|
cc64fdedcf6e04b0377dc8ad7a7d34bae17ec575
|
[
"Apache-2.0"
] | 4
|
2020-01-05T09:14:19.000Z
|
2022-02-17T03:22:09.000Z
|
forms/migrations/0004_auto_20150309_1358.py
|
opendatadurban/gmmp
|
cc64fdedcf6e04b0377dc8ad7a7d34bae17ec575
|
[
"Apache-2.0"
] | 68
|
2019-12-23T02:19:55.000Z
|
2021-04-23T06:13:36.000Z
|
forms/migrations/0004_auto_20150309_1358.py
|
OpenUpSA/gmmp
|
d82a4be0787c3a3a9e27dc590d7974f9f884fbb6
|
[
"Apache-2.0"
] | 2
|
2019-07-25T11:53:10.000Z
|
2020-06-22T02:07:40.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('forms', '0003_auto_20150309_1340'),
]
operations = [
migrations.RemoveField(
model_name='internetnewsperson',
name='age',
),
migrations.RemoveField(
model_name='internetnewsperson',
name='is_photograph',
),
migrations.RemoveField(
model_name='internetnewsperson',
name='is_quoted',
),
migrations.RemoveField(
model_name='internetnewsperson',
name='occupation_other',
),
migrations.RemoveField(
model_name='internetnewsperson',
name='survivor_comments',
),
migrations.RemoveField(
model_name='internetnewsperson',
name='victim_comments',
),
migrations.RemoveField(
model_name='newspaperperson',
name='occupation_other',
),
migrations.RemoveField(
model_name='newspaperperson',
name='survivor_comments',
),
migrations.RemoveField(
model_name='newspaperperson',
name='victim_comments',
),
migrations.RemoveField(
model_name='radioperson',
name='age',
),
migrations.RemoveField(
model_name='radioperson',
name='is_photograph',
),
migrations.RemoveField(
model_name='radioperson',
name='is_quoted',
),
migrations.RemoveField(
model_name='radioperson',
name='occupation_other',
),
migrations.RemoveField(
model_name='radioperson',
name='survivor_comments',
),
migrations.RemoveField(
model_name='radioperson',
name='victim_comments',
),
migrations.RemoveField(
model_name='televisionperson',
name='is_photograph',
),
migrations.RemoveField(
model_name='televisionperson',
name='is_quoted',
),
migrations.RemoveField(
model_name='televisionperson',
name='occupation_other',
),
migrations.RemoveField(
model_name='televisionperson',
name='survivor_comments',
),
migrations.RemoveField(
model_name='televisionperson',
name='victim_comments',
),
migrations.RemoveField(
model_name='twitterperson',
name='age',
),
migrations.RemoveField(
model_name='twitterperson',
name='family_role',
),
migrations.RemoveField(
model_name='twitterperson',
name='function',
),
migrations.RemoveField(
model_name='twitterperson',
name='is_quoted',
),
migrations.RemoveField(
model_name='twitterperson',
name='occupation',
),
migrations.RemoveField(
model_name='twitterperson',
name='occupation_other',
),
migrations.RemoveField(
model_name='twitterperson',
name='survivor_comments',
),
migrations.RemoveField(
model_name='twitterperson',
name='survivor_of',
),
migrations.RemoveField(
model_name='twitterperson',
name='victim_comments',
),
migrations.RemoveField(
model_name='twitterperson',
name='victim_of',
),
migrations.RemoveField(
model_name='twitterperson',
name='victim_or_survivor',
),
]
| 28.064748
| 45
| 0.533453
| 269
| 3,901
| 7.490706
| 0.159851
| 0.323077
| 0.4
| 0.461538
| 0.909181
| 0.909181
| 0.749876
| 0.114144
| 0.064516
| 0
| 0
| 0.006849
| 0.363753
| 3,901
| 138
| 46
| 28.268116
| 0.804996
| 0.005383
| 0
| 0.893939
| 0
| 0
| 0.222022
| 0.005931
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.015152
| 0
| 0.037879
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f24ccc6b7533be4baea2c8145ba35ca5586e0ab6
| 10,724
|
py
|
Python
|
glocaltokens/google/internal/home/foyer/v1_pb2_grpc.py
|
DurgNomis-drol/glocaltokens
|
f2f4c960db243588e01a6be612bc743c748d60a5
|
[
"MIT"
] | 17
|
2021-01-05T20:43:03.000Z
|
2022-02-06T04:17:43.000Z
|
glocaltokens/google/internal/home/foyer/v1_pb2_grpc.py
|
DurgNomis-drol/glocaltokens
|
f2f4c960db243588e01a6be612bc743c748d60a5
|
[
"MIT"
] | 80
|
2021-01-04T15:48:55.000Z
|
2022-03-01T12:10:20.000Z
|
glocaltokens/google/internal/home/foyer/v1_pb2_grpc.py
|
DurgNomis-drol/glocaltokens
|
f2f4c960db243588e01a6be612bc743c748d60a5
|
[
"MIT"
] | 9
|
2021-01-29T16:40:04.000Z
|
2021-12-01T14:25:56.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
from __future__ import annotations
import grpc
from glocaltokens.google.internal.home.foyer import (
v1_pb2 as glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2,
)
class HomeControlServiceStub:
"""Home Control Service"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetAssistantRoutines = channel.unary_stream(
"/google.internal.home.foyer.v1.HomeControlService/GetAssistantRoutines",
request_serializer=glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetAssistantRoutinesRequest.SerializeToString,
response_deserializer=glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetAssistantRoutinesResponse.FromString,
)
class HomeControlServiceServicer:
"""Home Control Service"""
def GetAssistantRoutines(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_HomeControlServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"GetAssistantRoutines": grpc.unary_stream_rpc_method_handler(
servicer.GetAssistantRoutines,
request_deserializer=glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetAssistantRoutinesRequest.FromString,
response_serializer=glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetAssistantRoutinesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.internal.home.foyer.v1.HomeControlService", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class HomeControlService:
"""Home Control Service"""
@staticmethod
def GetAssistantRoutines(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_stream(
request,
target,
"/google.internal.home.foyer.v1.HomeControlService/GetAssistantRoutines",
glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetAssistantRoutinesRequest.SerializeToString,
glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetAssistantRoutinesResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
class StructuresServiceStub:
"""Structure Service"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetHomeGraph = channel.unary_unary(
"/google.internal.home.foyer.v1.StructuresService/GetHomeGraph",
request_serializer=glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetHomeGraphRequest.SerializeToString,
response_deserializer=glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetHomeGraphResponse.FromString,
)
class StructuresServiceServicer:
"""Structure Service"""
def GetHomeGraph(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_StructuresServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"GetHomeGraph": grpc.unary_unary_rpc_method_handler(
servicer.GetHomeGraph,
request_deserializer=glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetHomeGraphRequest.FromString,
response_serializer=glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetHomeGraphResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.internal.home.foyer.v1.StructuresService", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class StructuresService:
"""Structure Service"""
@staticmethod
def GetHomeGraph(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.internal.home.foyer.v1.StructuresService/GetHomeGraph",
glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetHomeGraphRequest.SerializeToString,
glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetHomeGraphResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
class HomeDevicesServiceStub:
"""Home Devices Service"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetAssistantDeviceSettings = channel.unary_stream(
"/google.internal.home.foyer.v1.HomeDevicesService/GetAssistantDeviceSettings",
request_serializer=glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetAssistantDeviceSettingsRequest.SerializeToString,
response_deserializer=glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetAssistantDeviceSettingsResponse.FromString,
)
self.UpdateAssistantDeviceSettings = channel.unary_stream(
"/google.internal.home.foyer.v1.HomeDevicesService/UpdateAssistantDeviceSettings",
request_serializer=glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.UpdateAssistantDeviceSettingsRequest.SerializeToString,
response_deserializer=glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.UpdateAssistantDeviceSettingsResponse.FromString,
)
class HomeDevicesServiceServicer:
"""Home Devices Service"""
def GetAssistantDeviceSettings(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateAssistantDeviceSettings(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_HomeDevicesServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"GetAssistantDeviceSettings": grpc.unary_stream_rpc_method_handler(
servicer.GetAssistantDeviceSettings,
request_deserializer=glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetAssistantDeviceSettingsRequest.FromString,
response_serializer=glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetAssistantDeviceSettingsResponse.SerializeToString,
),
"UpdateAssistantDeviceSettings": grpc.unary_stream_rpc_method_handler(
servicer.UpdateAssistantDeviceSettings,
request_deserializer=glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.UpdateAssistantDeviceSettingsRequest.FromString,
response_serializer=glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.UpdateAssistantDeviceSettingsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.internal.home.foyer.v1.HomeDevicesService", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class HomeDevicesService:
"""Home Devices Service"""
@staticmethod
def GetAssistantDeviceSettings(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_stream(
request,
target,
"/google.internal.home.foyer.v1.HomeDevicesService/GetAssistantDeviceSettings",
glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetAssistantDeviceSettingsRequest.SerializeToString,
glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.GetAssistantDeviceSettingsResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def UpdateAssistantDeviceSettings(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_stream(
request,
target,
"/google.internal.home.foyer.v1.HomeDevicesService/UpdateAssistantDeviceSettings",
glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.UpdateAssistantDeviceSettingsRequest.SerializeToString,
glocaltokens_dot_google_dot_internal_dot_home_dot_foyer_dot_v1__pb2.UpdateAssistantDeviceSettingsResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| 38.714801
| 156
| 0.718482
| 1,011
| 10,724
| 7.183976
| 0.117705
| 0.017899
| 0.072284
| 0.08261
| 0.832576
| 0.832576
| 0.825279
| 0.750792
| 0.744871
| 0.728074
| 0
| 0.007496
| 0.216337
| 10,724
| 276
| 157
| 38.855072
| 0.856735
| 0.076651
| 0
| 0.616505
| 1
| 0
| 0.101253
| 0.079072
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067961
| false
| 0
| 0.014563
| 0.019417
| 0.145631
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f24df20b375c4f83335b655f37a58dd7f0a837ab
| 4,486
|
py
|
Python
|
computational_block/one/tests/fixtures.py
|
spectrum-dev/django-block-monolith
|
c17a1ef98ae813a4e94581e2e52a4a03f0e65769
|
[
"MIT"
] | null | null | null |
computational_block/one/tests/fixtures.py
|
spectrum-dev/django-block-monolith
|
c17a1ef98ae813a4e94581e2e52a4a03f0e65769
|
[
"MIT"
] | null | null | null |
computational_block/one/tests/fixtures.py
|
spectrum-dev/django-block-monolith
|
c17a1ef98ae813a4e94581e2e52a4a03f0e65769
|
[
"MIT"
] | null | null | null |
DATA_BLOCK = [
{
"timestamp": "01/01/2020",
"timezone": "UTC/EST",
"open": "10.00",
"high": "10.00",
"low": "10.00",
"close": "10.00",
"volume": "10.00",
},
{
"timestamp": "01/02/2020",
"timezone": "UTC/EST",
"open": "11.00",
"high": "11.00",
"low": "11.00",
"close": "11.00",
"volume": "11.00",
},
{
"timestamp": "01/03/2020",
"timezone": "UTC/EST",
"open": "12.00",
"high": "12.00",
"low": "12.00",
"close": "12.00",
"volume": "12.00",
},
{
"timestamp": "01/04/2020",
"timezone": "UTC/EST",
"open": "13.00",
"high": "13.00",
"low": "13.00",
"close": "13.00",
"volume": "13.00",
},
{
"timestamp": "01/05/2020",
"timezone": "UTC/EST",
"open": "14.00",
"high": "14.00",
"low": "14.00",
"close": "14.00",
"volume": "14.00",
},
]
INTRADAY_DATA_BLOCK = [
{
"timestamp": "01/01/2020T09:30:00.000000000",
"timezone": "UTC/EST",
"open": "10.00",
"high": "10.00",
"low": "10.00",
"close": "10.00",
"volume": "10.00",
},
{
"timestamp": "01/01/2020T09:40:00.000000000",
"timezone": "UTC/EST",
"open": "11.00",
"high": "11.00",
"low": "11.00",
"close": "11.00",
"volume": "11.00",
},
{
"timestamp": "01/01/2020T09:50:00.000000000",
"timezone": "UTC/EST",
"open": "12.00",
"high": "12.00",
"low": "12.00",
"close": "12.00",
"volume": "12.00",
},
{
"timestamp": "01/01/2020T10:00:00.000000000",
"timezone": "UTC/EST",
"open": "13.00",
"high": "13.00",
"low": "13.00",
"close": "13.00",
"volume": "13.00",
},
{
"timestamp": "01/01/2020T10:10:00.000000000",
"timezone": "UTC/EST",
"open": "14.00",
"high": "14.00",
"low": "14.00",
"close": "14.00",
"volume": "14.00",
},
]
INTRADAY_TWO_DAYS_DATA_BLOCK = [
{
"timestamp": "01/01/2020T09:30:00.000000000",
"timezone": "UTC/EST",
"open": "10.00",
"high": "10.00",
"low": "10.00",
"close": "10.00",
"volume": "10.00",
},
{
"timestamp": "01/01/2020T09:40:00.000000000",
"timezone": "UTC/EST",
"open": "11.00",
"high": "11.00",
"low": "11.00",
"close": "11.00",
"volume": "11.00",
},
{
"timestamp": "01/01/2020T09:50:00.000000000",
"timezone": "UTC/EST",
"open": "12.00",
"high": "12.00",
"low": "12.00",
"close": "12.00",
"volume": "12.00",
},
{
"timestamp": "01/01/2020T10:00:00.000000000",
"timezone": "UTC/EST",
"open": "13.00",
"high": "13.00",
"low": "13.00",
"close": "13.00",
"volume": "13.00",
},
{
"timestamp": "01/01/2020T10:10:00.000000000",
"timezone": "UTC/EST",
"open": "14.00",
"high": "14.00",
"low": "14.00",
"close": "14.00",
"volume": "14.00",
},
{
"timestamp": "01/02/2020T09:30:00.000000000",
"timezone": "UTC/EST",
"open": "15.00",
"high": "15.00",
"low": "15.00",
"close": "15.00",
"volume": "15.00",
},
{
"timestamp": "01/02/2020T09:40:00.000000000",
"timezone": "UTC/EST",
"open": "14.00",
"high": "14.00",
"low": "14.00",
"close": "14.00",
"volume": "14.00",
},
{
"timestamp": "01/02/2020T09:50:00.000000000",
"timezone": "UTC/EST",
"open": "13.00",
"high": "13.00",
"low": "13.00",
"close": "13.00",
"volume": "13.00",
},
{
"timestamp": "01/02/2020T10:00:00.000000000",
"timezone": "UTC/EST",
"open": "13.00",
"high": "13.00",
"low": "13.00",
"close": "13.00",
"volume": "13.00",
},
{
"timestamp": "01/02/2020T10:10:00.000000000",
"timezone": "UTC/EST",
"open": "14.00",
"high": "14.00",
"low": "14.00",
"close": "14.00",
"volume": "14.00",
},
]
| 23.610526
| 53
| 0.391663
| 495
| 4,486
| 3.535354
| 0.066667
| 0.057143
| 0.16
| 0.205714
| 0.970857
| 0.938286
| 0.938286
| 0.938286
| 0.914286
| 0.914286
| 0
| 0.273424
| 0.360009
| 4,486
| 189
| 54
| 23.73545
| 0.33612
| 0
| 0
| 0.672043
| 0
| 0
| 0.424654
| 0.096968
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
f27c45439000e773cfc51c8563b7700733e68359
| 88
|
py
|
Python
|
ips/ip/button_hold/__init__.py
|
zld012739/zldrepository
|
5635b78a168956091676ef4dd99fa564be0e5ba0
|
[
"MIT"
] | null | null | null |
ips/ip/button_hold/__init__.py
|
zld012739/zldrepository
|
5635b78a168956091676ef4dd99fa564be0e5ba0
|
[
"MIT"
] | null | null | null |
ips/ip/button_hold/__init__.py
|
zld012739/zldrepository
|
5635b78a168956091676ef4dd99fa564be0e5ba0
|
[
"MIT"
] | null | null | null |
from button_hold_partial import get_ip_name
from button_hold_partial import BUTTON_HOLD
| 29.333333
| 43
| 0.909091
| 15
| 88
| 4.866667
| 0.533333
| 0.410959
| 0.383562
| 0.575342
| 0.739726
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 88
| 2
| 44
| 44
| 0.9125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
4b07f086f9a673b0711c94fa37fa8af8974364f0
| 14,566
|
py
|
Python
|
setup.py
|
JustinSGray/OpenMDAO-CADRE
|
d8378a8a571179990531d8a409efe727cbdf2bb7
|
[
"Apache-2.0"
] | 1
|
2021-07-11T19:15:22.000Z
|
2021-07-11T19:15:22.000Z
|
setup.py
|
JustinSGray/OpenMDAO-CADRE
|
d8378a8a571179990531d8a409efe727cbdf2bb7
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
JustinSGray/OpenMDAO-CADRE
|
d8378a8a571179990531d8a409efe727cbdf2bb7
|
[
"Apache-2.0"
] | 1
|
2015-11-19T18:18:01.000Z
|
2015-11-19T18:18:01.000Z
|
#
# This file is autogenerated during plugin quickstart and overwritten during
# plugin makedist. DO NOT CHANGE IT if you plan to use plugin makedist to update
# the distribution.
#
from setuptools import setup, find_packages
kwargs = {'author': 'Tristan A. Hearn',
'author_email': 'tristan.a.hearn@nasa.gov',
'classifiers': ['Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering'],
'description': 'OpenMDAO implementation of the CADRE CubeSat design problem',
'download_url': '',
'entry_points': u'[openmdao.component]\nCADRE.solar.Solar_ExposedArea=CADRE.solar:Solar_ExposedArea\nCADRE.comm.Comm_GainPattern=CADRE.comm:Comm_GainPattern\nCADRE.orbit.Orbit_Dynamics=CADRE.orbit:Orbit_Dynamics\nCADRE.attitude.Attitude_Attitude=CADRE.attitude:Attitude_Attitude\nCADRE.comm.Comm_VectorECI=CADRE.comm:Comm_VectorECI\nCADRE.attitude.Attitude_Angular=CADRE.attitude:Attitude_Angular\nCADRE.reactionwheel.ReactionWheel_Power=CADRE.reactionwheel:ReactionWheel_Power\nCADRE.comm.Comm_BitRate=CADRE.comm:Comm_BitRate\nCADRE.orbit.Orbit_Initial=CADRE.orbit:Orbit_Initial\nCADRE.reactionwheel.ReactionWheel_Torque=CADRE.reactionwheel:ReactionWheel_Torque\nCADRE.reactionwheel.ReactionWheel_Dynamics=CADRE.reactionwheel:ReactionWheel_Dynamics\nCADRE.sun.Sun_PositionBody=CADRE.sun:Sun_PositionBody\nCADRE.comm.Comm_EarthsSpinMtx=CADRE.comm:Comm_EarthsSpinMtx\nCADRE.comm.Comm_DataDownloaded=CADRE.comm:Comm_DataDownloaded\nCADRE.CADRE_launch.Uniformity=CADRE.CADRE_launch:Uniformity\nCADRE.comm.Comm_GSposEarth=CADRE.comm:Comm_GSposEarth\nCADRE.comm.Comm_VectorBody=CADRE.comm:Comm_VectorBody\nCADRE.sun.Sun_PositionSpherical=CADRE.sun:Sun_PositionSpherical\nCADRE.attitude.Attitude_RotationMtx=CADRE.attitude:Attitude_RotationMtx\nCADRE.attitude.Attitude_Roll=CADRE.attitude:Attitude_Roll\nCADRE.power.Power_SolarPower=CADRE.power:Power_SolarPower\nCADRE.battery.BatteryConstraints=CADRE.battery:BatteryConstraints\nCADRE.attitude.Attitude_Torque=CADRE.attitude:Attitude_Torque\nCADRE.sun.Sun_PositionECI=CADRE.sun:Sun_PositionECI\nCADRE.power.Power_CellVoltage=CADRE.power:Power_CellVoltage\nCADRE.attitude.Attitude_RotationMtxRates=CADRE.attitude:Attitude_RotationMtxRates\nCADRE.battery.BatteryPower=CADRE.battery:BatteryPower\nCADRE.rk4.RK4=CADRE.rk4:RK4\nCADRE.CADRE_launch.GroundLOC=CADRE.CADRE_launch:GroundLOC\nCADRE.comm.Comm_AntRotationMtx=CADRE.comm:Comm_AntRotationMtx\nCADRE.power.Power_Total=CADRE.power:Power_Total\nCADRE.test.test_rk_deriv.RKTest=CADRE.test.test_rk_deriv:RKTest\nCADRE.CADRE_assembly.CADRE=CADRE.CADRE_assembly:CADRE\nCADRE.comm.Comm_EarthsSpin=CADRE.comm:Comm_EarthsSpin\nCADRE.comm.Comm_AntRotation=CADRE.comm:Comm_AntRotation\nCADRE.comm.Comm_VectorAnt=CADRE.comm:Comm_VectorAnt\nCADRE.comm.Comm_LOS=CADRE.comm:Comm_LOS\nCADRE.KS.KSComp=CADRE.KS:KSComp\nCADRE.reactionwheel.ReactionWheel_Motor=CADRE.reactionwheel:ReactionWheel_Motor\nCADRE.attitude.Attitude_Sideslip=CADRE.attitude:Attitude_Sideslip\nCADRE.parameters.BsplineParameters=CADRE.parameters:BsplineParameters\nCADRE.comm.Comm_VectorSpherical=CADRE.comm:Comm_VectorSpherical\nCADRE.battery.BatterySOC=CADRE.battery:BatterySOC\nCADRE.attitude.Attitude_AngularRates=CADRE.attitude:Attitude_AngularRates\nCADRE.comm.Comm_GSposECI=CADRE.comm:Comm_GSposECI\nCADRE.sun.Sun_LOS=CADRE.sun:Sun_LOS\nCADRE.CADRE_mdp.CADRE_Optimization=CADRE.CADRE_mdp:CADRE_Optimization\nCADRE.thermal_temperature.ThermalTemperature=CADRE.thermal_temperature:ThermalTemperature\nCADRE.CADRE_launch.CADRE_Launch=CADRE.CADRE_launch:CADRE_Launch\nCADRE.comm.Comm_Distance=CADRE.comm:Comm_Distance\n\n[openmdao.container]\nCADRE.solar.Solar_ExposedArea=CADRE.solar:Solar_ExposedArea\nCADRE.comm.Comm_GainPattern=CADRE.comm:Comm_GainPattern\nCADRE.orbit.Orbit_Dynamics=CADRE.orbit:Orbit_Dynamics\nCADRE.attitude.Attitude_Attitude=CADRE.attitude:Attitude_Attitude\nCADRE.comm.Comm_VectorECI=CADRE.comm:Comm_VectorECI\nCADRE.attitude.Attitude_Angular=CADRE.attitude:Attitude_Angular\nCADRE.reactionwheel.ReactionWheel_Power=CADRE.reactionwheel:ReactionWheel_Power\nCADRE.comm.Comm_BitRate=CADRE.comm:Comm_BitRate\nCADRE.orbit.Orbit_Initial=CADRE.orbit:Orbit_Initial\nCADRE.reactionwheel.ReactionWheel_Torque=CADRE.reactionwheel:ReactionWheel_Torque\nCADRE.reactionwheel.ReactionWheel_Dynamics=CADRE.reactionwheel:ReactionWheel_Dynamics\nCADRE.sun.Sun_PositionBody=CADRE.sun:Sun_PositionBody\nCADRE.comm.Comm_EarthsSpinMtx=CADRE.comm:Comm_EarthsSpinMtx\nCADRE.comm.Comm_DataDownloaded=CADRE.comm:Comm_DataDownloaded\nCADRE.CADRE_launch.Uniformity=CADRE.CADRE_launch:Uniformity\nCADRE.comm.Comm_GSposEarth=CADRE.comm:Comm_GSposEarth\nCADRE.comm.Comm_VectorBody=CADRE.comm:Comm_VectorBody\nCADRE.sun.Sun_PositionSpherical=CADRE.sun:Sun_PositionSpherical\nCADRE.attitude.Attitude_RotationMtx=CADRE.attitude:Attitude_RotationMtx\nCADRE.attitude.Attitude_Roll=CADRE.attitude:Attitude_Roll\nCADRE.power.Power_SolarPower=CADRE.power:Power_SolarPower\nCADRE.battery.BatteryConstraints=CADRE.battery:BatteryConstraints\nCADRE.attitude.Attitude_Torque=CADRE.attitude:Attitude_Torque\nCADRE.sun.Sun_PositionECI=CADRE.sun:Sun_PositionECI\nCADRE.power.Power_CellVoltage=CADRE.power:Power_CellVoltage\nCADRE.attitude.Attitude_RotationMtxRates=CADRE.attitude:Attitude_RotationMtxRates\nCADRE.battery.BatteryPower=CADRE.battery:BatteryPower\nCADRE.rk4.RK4=CADRE.rk4:RK4\nCADRE.CADRE_launch.GroundLOC=CADRE.CADRE_launch:GroundLOC\nCADRE.comm.Comm_AntRotationMtx=CADRE.comm:Comm_AntRotationMtx\nCADRE.power.Power_Total=CADRE.power:Power_Total\nCADRE.test.test_rk_deriv.RKTest=CADRE.test.test_rk_deriv:RKTest\nCADRE.CADRE_assembly.CADRE=CADRE.CADRE_assembly:CADRE\nCADRE.comm.Comm_EarthsSpin=CADRE.comm:Comm_EarthsSpin\nCADRE.comm.Comm_AntRotation=CADRE.comm:Comm_AntRotation\nCADRE.comm.Comm_VectorAnt=CADRE.comm:Comm_VectorAnt\nCADRE.comm.Comm_LOS=CADRE.comm:Comm_LOS\nCADRE.KS.KSComp=CADRE.KS:KSComp\nCADRE.reactionwheel.ReactionWheel_Motor=CADRE.reactionwheel:ReactionWheel_Motor\nCADRE.attitude.Attitude_Sideslip=CADRE.attitude:Attitude_Sideslip\nCADRE.parameters.BsplineParameters=CADRE.parameters:BsplineParameters\nCADRE.comm.Comm_VectorSpherical=CADRE.comm:Comm_VectorSpherical\nCADRE.battery.BatterySOC=CADRE.battery:BatterySOC\nCADRE.attitude.Attitude_AngularRates=CADRE.attitude:Attitude_AngularRates\nCADRE.comm.Comm_GSposECI=CADRE.comm:Comm_GSposECI\nCADRE.sun.Sun_LOS=CADRE.sun:Sun_LOS\nCADRE.CADRE_mdp.CADRE_Optimization=CADRE.CADRE_mdp:CADRE_Optimization\nCADRE.thermal_temperature.ThermalTemperature=CADRE.thermal_temperature:ThermalTemperature\nCADRE.CADRE_launch.CADRE_Launch=CADRE.CADRE_launch:CADRE_Launch\nCADRE.comm.Comm_Distance=CADRE.comm:Comm_Distance',
'include_package_data': True,
'install_requires': ['openmdao.main', 'MBI'],
'keywords': ['openmdao'],
'license': 'Apache 2.0',
'maintainer': 'Tristan A. Hearn',
'maintainer_email': 'tristan.a.hearn@nasa.gov',
'name': 'CADRE',
'package_data': {'CADRE': ['sphinx_build/html/.buildinfo',
'sphinx_build/html/.dummy',
'sphinx_build/html/full.html',
'sphinx_build/html/genindex.html',
'sphinx_build/html/glossary.html',
'sphinx_build/html/index.html',
'sphinx_build/html/launch.html',
'sphinx_build/html/objects.inv',
'sphinx_build/html/overview.html',
'sphinx_build/html/pkgdocs.html',
'sphinx_build/html/py-modindex.html',
'sphinx_build/html/roll.html',
'sphinx_build/html/search.html',
'sphinx_build/html/searchindex.js',
'sphinx_build/html/srcdocs.html',
'sphinx_build/html/_downloads/0_0_data.html',
'sphinx_build/html/_downloads/0_1_data.html',
'sphinx_build/html/_downloads/0_2_data.html',
'sphinx_build/html/_downloads/0_3_data.html',
'sphinx_build/html/_downloads/0_4_data.html',
'sphinx_build/html/_downloads/0_5_data.html',
'sphinx_build/html/_downloads/0_all_data.html',
'sphinx_build/html/_downloads/1_0_data.html',
'sphinx_build/html/_downloads/1_1_data.html',
'sphinx_build/html/_downloads/1_2_data.html',
'sphinx_build/html/_downloads/1_3_data.html',
'sphinx_build/html/_downloads/1_4_data.html',
'sphinx_build/html/_downloads/1_5_data.html',
'sphinx_build/html/_downloads/1_all_data.html',
'sphinx_build/html/_images/0_0.png',
'sphinx_build/html/_images/0_1.png',
'sphinx_build/html/_images/0_2.png',
'sphinx_build/html/_images/0_3.png',
'sphinx_build/html/_images/0_4.png',
'sphinx_build/html/_images/0_5.png',
'sphinx_build/html/_images/0_all.png',
'sphinx_build/html/_images/1_0.png',
'sphinx_build/html/_images/1_1.png',
'sphinx_build/html/_images/1_2.png',
'sphinx_build/html/_images/1_3.png',
'sphinx_build/html/_images/1_4.png',
'sphinx_build/html/_images/1_5.png',
'sphinx_build/html/_images/1_all.png',
'sphinx_build/html/_images/cadre3.jpg',
'sphinx_build/html/_images/design.png',
'sphinx_build/html/_images/launch.png',
'sphinx_build/html/_images/opt.png',
'sphinx_build/html/_images/roll_results.png',
'sphinx_build/html/_images/uniform.png',
'sphinx_build/html/_modules/index.html',
'sphinx_build/html/_modules/CADRE/attitude.html',
'sphinx_build/html/_modules/CADRE/battery.html',
'sphinx_build/html/_modules/CADRE/CADRE_assembly.html',
'sphinx_build/html/_modules/CADRE/CADRE_launch.html',
'sphinx_build/html/_modules/CADRE/CADRE_mdp.html',
'sphinx_build/html/_modules/CADRE/comm.html',
'sphinx_build/html/_modules/CADRE/kinematics.html',
'sphinx_build/html/_modules/CADRE/KS.html',
'sphinx_build/html/_modules/CADRE/orbit.html',
'sphinx_build/html/_modules/CADRE/parameters.html',
'sphinx_build/html/_modules/CADRE/power.html',
'sphinx_build/html/_modules/CADRE/reactionwheel.html',
'sphinx_build/html/_modules/CADRE/rk4.html',
'sphinx_build/html/_modules/CADRE/solar.html',
'sphinx_build/html/_modules/CADRE/sun.html',
'sphinx_build/html/_modules/CADRE/thermal_temperature.html',
'sphinx_build/html/_modules/CADRE/test/test_assembly.html',
'sphinx_build/html/_modules/CADRE/test/test_CADRE_derivs.html',
'sphinx_build/html/_modules/CADRE/test/test_components.html',
'sphinx_build/html/_modules/CADRE/test/test_derivatives.html',
'sphinx_build/html/_modules/CADRE/test/test_rk_deriv.html',
'sphinx_build/html/_sources/full.txt',
'sphinx_build/html/_sources/glossary.txt',
'sphinx_build/html/_sources/index.txt',
'sphinx_build/html/_sources/launch.txt',
'sphinx_build/html/_sources/overview.txt',
'sphinx_build/html/_sources/pkgdocs.txt',
'sphinx_build/html/_sources/roll.txt',
'sphinx_build/html/_sources/srcdocs.txt',
'sphinx_build/html/_static/_static',
'sphinx_build/html/_static/ajax-loader.gif',
'sphinx_build/html/_static/basic.css',
'sphinx_build/html/_static/comment-bright.png',
'sphinx_build/html/_static/comment-close.png',
'sphinx_build/html/_static/comment.png',
'sphinx_build/html/_static/default.css',
'sphinx_build/html/_static/doctools.js',
'sphinx_build/html/_static/down-pressed.png',
'sphinx_build/html/_static/down.png',
'sphinx_build/html/_static/file.png',
'sphinx_build/html/_static/jquery.js',
'sphinx_build/html/_static/minus.png',
'sphinx_build/html/_static/plus.png',
'sphinx_build/html/_static/pygments.css',
'sphinx_build/html/_static/searchtools.js',
'sphinx_build/html/_static/sidebar.js',
'sphinx_build/html/_static/underscore.js',
'sphinx_build/html/_static/up-pressed.png',
'sphinx_build/html/_static/up.png',
'sphinx_build/html/_static/websupport.js',
'test/__init__.py',
'test/data1346.pkl',
'test/speeds.py',
'test/test_assembly.py',
'test/test_CADRE_derivs.py',
'test/test_components.py',
'test/test_derivatives.py',
'test/test_rk_deriv.py']},
'package_dir': {'': 'src'},
'packages': ['CADRE', 'CADRE.data', 'CADRE.test'],
'url': 'https://github.com/OpenMDAO-Plugins/CADRE',
'version': '0.5',
'zip_safe': False}
setup(**kwargs)
| 104.042857
| 6,133
| 0.670877
| 1,619
| 14,566
| 5.757875
| 0.130945
| 0.118
| 0.16091
| 0.095795
| 0.829543
| 0.754988
| 0.647501
| 0.586891
| 0.565973
| 0.565973
| 0
| 0.006183
| 0.222779
| 14,566
| 139
| 6,134
| 104.791367
| 0.817242
| 0.011808
| 0
| 0
| 1
| 0.007752
| 0.74046
| 0.706124
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.007752
| 0
| 0.007752
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4b0c17090afbb569c894cfe87b71e0ad864950d8
| 24,759
|
py
|
Python
|
attacks/feature.py
|
Equationliu/GA-Attack
|
b0280674a211f6451774ec6b1d4cee2fc19a4de6
|
[
"MIT"
] | 8
|
2022-01-06T06:55:54.000Z
|
2022-03-23T09:29:54.000Z
|
attacks/feature.py
|
Equationliu/GA-Attack
|
b0280674a211f6451774ec6b1d4cee2fc19a4de6
|
[
"MIT"
] | 1
|
2022-03-20T02:00:31.000Z
|
2022-03-20T04:01:53.000Z
|
attacks/feature.py
|
Equationliu/GA-Attack
|
b0280674a211f6451774ec6b1d4cee2fc19a4de6
|
[
"MIT"
] | 2
|
2022-01-11T14:15:18.000Z
|
2022-01-16T13:02:55.000Z
|
import torch
import math
import numpy as np
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from .utils import Encoder, Decoder
class StyleTransferNet(object):
def __init__(self):
self.encoder = Encoder()
self.decoder = Decoder()
def decode(self, x):
img = self.decoder.decode(x)
# post processing for output of decoder
img = self.encoder.deprocess(img)
return img
def encode(self, img):
# Note that the pretrained vgg model accepts BGR format, but the function by default take RGB value
img = self.encoder.preprocess(img)
x = self.encoder.encode(img)
return x
def moments(content):
meanC = content.mean(dim=(2, 3), keepdims=True)
square_mean = torch.square(content).mean(dim=(2, 3), keepdims=True)
varC = square_mean - meanC.square()
return meanC, varC
stn = StyleTransferNet()
def DI_FSA(model, x_nature, y, args, loss_fn, random_init=False):
batch_size = x_nature.shape[0]
eps = args.epsilon
num_steps = int(0.5 * args.num_steps * (1 + (args.intervals * math.log(args.epsilon)) / math.log(args.max_epsilon)))
alpha = 1.25 * eps / args.num_steps
# Natural statistics
img_scaled = 255 * x_nature
# encode image
enc_c, _ = stn.encode(img_scaled)
enc_c.requires_grad_(False)
generated_img = stn.decode(enc_c)
generated_img_rescaled = generated_img / 255.0
with torch.no_grad():
out = model(generated_img_rescaled, diversity=False)
reconstruct_err = (out.data.max(1)[1] != y.data).float().sum()
print("Batch error after Image Reconstruction: ", reconstruct_err.item())
meanC, varC = moments(enc_c)
sigmaC = torch.sqrt(varC + 1e-5)
sign = torch.sign(meanC)
abs_meanC = torch.abs(meanC) + 1e-6
limit = 10 / torch.sqrt(torch.tensor(128.0).cuda())
if random_init:
meanC_delta_rand = torch.distributions.uniform.Uniform(low = abs_meanC / eps, high = eps * abs_meanC)
sigmaC_delta_rand = torch.distributions.uniform.Uniform(sigmaC / eps, eps * sigmaC)
meanS_delta = Variable(meanC_delta_rand.sample(), requires_grad=True)
sigmaS_delta = Variable(sigmaC_delta_rand.sample(), requires_grad=True)
else:
meanS_delta = Variable(abs_meanC.clone(), requires_grad=True)
sigmaS_delta = Variable(sigmaC.clone(), requires_grad=True)
for idx in range(num_steps):
target_features = (enc_c - meanC) * sigmaS_delta / sigmaC + sign * meanS_delta
# decode target features back to image
generated_adv_img = stn.decode(target_features)
enc_gen_adv, _ = stn.encode(generated_adv_img)
generated_adv_img_rescaled = generated_adv_img / 255.0
with torch.enable_grad():
model_input = F.interpolate(generated_adv_img_rescaled, size=299, mode="nearest")
logits = model(model_input, diversity=True)
adv_loss = loss_fn(logits, y)
# content loss
content_loss = torch.square(enc_gen_adv - target_features).mean(dim = (2,3)).sum(dim = 1)
adv_loss_total = adv_loss * batch_size * 128
loss = content_loss + adv_loss_total
if (idx + 1) % 10 == 0:
print("adv_loss_total: ", adv_loss_total)
print("content_loss: ", content_loss)
target_features = (enc_c - meanC) * sigmaS_delta / sigmaC + sign * meanS_delta
generated_adv_img = stn.decode(target_features.detach()) / 255.0
with torch.no_grad():
out = model(generated_adv_img, diversity=False)
batch_err = (out.data.max(1)[1] != y.data).float().sum()
print("Batch error ", batch_err.item())
[meanS_delta_grad, sigmaS_delta_grad] = torch.autograd.grad(loss.sum(), [meanS_delta, sigmaS_delta])
# gradient clipping
meanS_delta_grad = meanS_delta_grad.detach().clamp_(-1 / torch.sqrt(limit), 1 / torch.sqrt(limit))
sigmaS_delta_grad = sigmaS_delta_grad.detach().clamp_(-1 / torch.sqrt(limit), 1 / torch.sqrt(limit))
meanS_delta = Variable(meanS_delta.data - alpha * torch.sign(meanS_delta_grad), requires_grad=True)
sigmaS_delta = Variable(sigmaS_delta.data - alpha * torch.sign(sigmaS_delta_grad), requires_grad=True)
# clip
meanS_delta = Variable(torch.max(torch.min(meanS_delta.data, eps * abs_meanC), abs_meanC / eps), requires_grad=True)
sigmaS_delta = Variable(torch.max(torch.min(sigmaS_delta.data, eps * sigmaC), sigmaC / eps), requires_grad=True)
target_features = (enc_c - meanC) * sigmaS_delta / sigmaC + sign * meanS_delta
generated_adv_img = stn.decode(target_features.detach()) / 255.0
# budget
enlarge_mean = meanS_delta / abs_meanC
enlarge_sigma = sigmaS_delta / sigmaC
upper_bound_mean = torch.where(enlarge_mean >= 1.0, enlarge_mean, 1 / enlarge_mean)
upper_bound_sigma = torch.where(enlarge_sigma >= 1.0, enlarge_sigma, 1 / enlarge_sigma)
budget = torch.max(torch.amax(upper_bound_mean, dim=(1,2,3)), torch.amax(upper_bound_sigma, dim=(1,2,3)))
return F.interpolate(generated_adv_img, size=299, mode="nearest"), budget
def DMI_FSA(model, x_nature, y, args, loss_fn, random_init=False):
batch_size = x_nature.shape[0]
eps = args.epsilon
num_steps = int(0.5 * args.num_steps * (1 + (args.intervals * math.log(args.epsilon)) / math.log(args.max_epsilon)))
alpha = 1.25 * eps / args.num_steps
# Natural statistics
img_scaled = 255 * x_nature
# encode image
enc_c, _ = stn.encode(img_scaled)
enc_c.requires_grad_(False)
generated_img = stn.decode(enc_c)
generated_img_rescaled = generated_img / 255.0
with torch.no_grad():
out = model(generated_img_rescaled, diversity=False)
reconstruct_err = (out.data.max(1)[1] != y.data).float().sum()
print("Batch error after Image Reconstruction: ", reconstruct_err.item())
meanC, varC = moments(enc_c)
sigmaC = torch.sqrt(varC + 1e-5)
sign = torch.sign(meanC)
abs_meanC = torch.abs(meanC) + 1e-6
limit = 10 / torch.sqrt(torch.tensor(128.0).cuda())
if random_init:
meanC_delta_rand = torch.distributions.uniform.Uniform(low = abs_meanC / eps, high = eps * abs_meanC)
sigmaC_delta_rand = torch.distributions.uniform.Uniform(sigmaC / eps, eps * sigmaC)
meanS_delta = Variable(meanC_delta_rand.sample(), requires_grad=True)
sigmaS_delta = Variable(sigmaC_delta_rand.sample(), requires_grad=True)
else:
meanS_delta = Variable(abs_meanC.clone(), requires_grad=True)
sigmaS_delta = Variable(sigmaC.clone(), requires_grad=True)
# momentum
g_meanS_delta = torch.zeros_like(meanS_delta).cuda()
g_sigmaS_delta = torch.zeros_like(sigmaS_delta).cuda()
for idx in range(num_steps):
target_features = (enc_c - meanC) * sigmaS_delta / sigmaC + sign * meanS_delta
# decode target features back to image
generated_adv_img = stn.decode(target_features)
enc_gen_adv, _ = stn.encode(generated_adv_img)
generated_adv_img_rescaled = generated_adv_img / 255.0
with torch.enable_grad():
model_input = F.interpolate(generated_adv_img_rescaled, size=299, mode="nearest")
logits = model(model_input, diversity=True)
adv_loss = loss_fn(logits, y)
# content loss
content_loss = torch.square(enc_gen_adv - target_features).mean(dim = (2,3)).sum(dim = 1)
adv_loss_total = adv_loss * batch_size * 128
loss = content_loss + adv_loss_total
if (idx + 1) % 10 == 0:
print("adv_loss_total: ", adv_loss_total)
print("content_loss: ", content_loss)
target_features = (enc_c - meanC) * sigmaS_delta / sigmaC + sign * meanS_delta
generated_adv_img = stn.decode(target_features.detach()) / 255.0
with torch.no_grad():
out = model(generated_adv_img, diversity=False)
batch_err = (out.data.max(1)[1] != y.data).float().sum()
print("Batch error ", batch_err.item())
[meanS_delta_grad, sigmaS_delta_grad] = torch.autograd.grad(loss.sum(), [meanS_delta, sigmaS_delta])
# gradient clipping
meanS_delta_grad = meanS_delta_grad.detach().clamp_(-1 / torch.sqrt(limit), 1 / torch.sqrt(limit))
sigmaS_delta_grad = sigmaS_delta_grad.detach().clamp_(-1 / torch.sqrt(limit), 1 / torch.sqrt(limit))
noise_mean = meanS_delta_grad / torch.abs(meanS_delta_grad).mean(dim=(1, 2, 3), keepdim=True)
g_meanS_delta = g_meanS_delta * args.momentum + noise_mean
meanS_delta = Variable(meanS_delta.data - alpha * torch.sign(g_meanS_delta), requires_grad=True)
noise_sigma = sigmaS_delta_grad / torch.abs(sigmaS_delta_grad).mean(dim=(1, 2, 3), keepdim=True)
g_sigmaS_delta = g_sigmaS_delta * args.momentum + noise_sigma
sigmaS_delta = Variable(sigmaS_delta.data - alpha * torch.sign(g_sigmaS_delta), requires_grad=True)
# clip
meanS_delta = Variable(torch.max(torch.min(meanS_delta.data, eps * abs_meanC), abs_meanC / eps), requires_grad=True)
sigmaS_delta = Variable(torch.max(torch.min(sigmaS_delta.data, eps * sigmaC), sigmaC / eps), requires_grad=True)
target_features = (enc_c - meanC) * sigmaS_delta / sigmaC + sign * meanS_delta
generated_adv_img = stn.decode(target_features.detach()) / 255.0
# budget
enlarge_mean = meanS_delta / abs_meanC
enlarge_sigma = sigmaS_delta / sigmaC
upper_bound_mean = torch.where(enlarge_mean >= 1.0, enlarge_mean, 1 / enlarge_mean)
upper_bound_sigma = torch.where(enlarge_sigma >= 1.0, enlarge_sigma, 1 / enlarge_sigma)
budget = torch.max(torch.amax(upper_bound_mean, dim=(1,2,3)), torch.amax(upper_bound_sigma, dim=(1,2,3)))
return F.interpolate(generated_adv_img, size=299, mode="nearest"), budget
def GA_DMI_FSA(model, eval_model, x_nature, y, args, loss_fn):
batch_size = x_nature.shape[0]
eps_list = [math.exp((idx + 1) * (math.log(args.max_epsilon) / args.intervals))for idx in range(args.intervals)]
mask = torch.ones((batch_size, )).bool()
num_steps = args.num_steps
img_scaled = 255 * x_nature
# encode image
enc_c, _ = stn.encode(img_scaled)
enc_c.requires_grad_(False)
generated_img = stn.decode(enc_c)
generated_img_rescaled = generated_img / 255.0
with torch.no_grad():
out = model(generated_img_rescaled, diversity=False)
reconstruct_err = (out.data.max(1)[1] != y.data).float().sum()
print("Batch error after Image Reconstruction: ", reconstruct_err.item())
meanC, varC = moments(enc_c)
sigmaC = torch.sqrt(varC + 1e-5)
sign = torch.sign(meanC)
abs_meanC = torch.abs(meanC) + 1e-6
limit = 10 / torch.sqrt(torch.tensor(128.0).cuda())
meanS_delta = Variable(abs_meanC.clone(), requires_grad=True)
sigmaS_delta = Variable(sigmaC.clone(), requires_grad=True)
# momentum
g_meanS_delta = torch.zeros_like(meanS_delta).cuda()
g_sigmaS_delta = torch.zeros_like(sigmaS_delta).cuda()
for eps in eps_list:
step_size = 1.25 * eps / num_steps
for idx in range(num_steps):
target_features = (enc_c - meanC) * sigmaS_delta / sigmaC + sign * meanS_delta
# decode target features back to image
generated_adv_img = stn.decode(target_features)
enc_gen_adv, _ = stn.encode(generated_adv_img)
generated_adv_img_rescaled = generated_adv_img / 255.0
with torch.enable_grad():
model_input = F.interpolate(generated_adv_img_rescaled, size=299, mode="nearest")
logits = model(model_input, diversity=True)
adv_loss = loss_fn(logits, y)
# content loss
content_loss = torch.square(enc_gen_adv - target_features).mean(dim = (2,3)).sum(dim = 1)
adv_loss_total = adv_loss * batch_size * 128
loss = content_loss + adv_loss_total
if (idx + 1) % 10 == 0:
print("adv_loss_total: ", adv_loss_total)
print("content_loss: ", content_loss)
target_features = (enc_c - meanC) * sigmaS_delta / sigmaC + sign * meanS_delta
generated_adv_img = stn.decode(target_features.detach()) / 255.0
with torch.no_grad():
out = model(generated_adv_img, diversity=False)
batch_err = (out.data.max(1)[1] != y.data).float().sum()
print("Budget now: {}, Batch error: {}".format(eps, batch_err.item()))
[meanS_delta_grad, sigmaS_delta_grad] = torch.autograd.grad(loss.sum(), [meanS_delta, sigmaS_delta])
# gradient clipping
meanS_delta_grad = meanS_delta_grad.detach().clamp_(-1 / torch.sqrt(limit), 1 / torch.sqrt(limit))
sigmaS_delta_grad = sigmaS_delta_grad.detach().clamp_(-1 / torch.sqrt(limit), 1 / torch.sqrt(limit))
noise_mean = meanS_delta_grad / torch.abs(meanS_delta_grad).mean(dim=(1, 2, 3), keepdim=True)
g_meanS_delta[mask] = g_meanS_delta[mask] * args.momentum + noise_mean[mask]
meanS_delta = Variable(meanS_delta.data - step_size * torch.sign(g_meanS_delta), requires_grad=True)
noise_sigma = sigmaS_delta_grad / torch.abs(sigmaS_delta_grad).mean(dim=(1, 2, 3), keepdim=True)
g_sigmaS_delta[mask] = g_sigmaS_delta[mask] * args.momentum + noise_sigma[mask]
sigmaS_delta = Variable(sigmaS_delta.data - step_size * torch.sign(g_sigmaS_delta), requires_grad=True)
# clip
meanS_delta = Variable(torch.max(torch.min(meanS_delta.data, eps * abs_meanC), abs_meanC / eps), requires_grad=True)
sigmaS_delta = Variable(torch.max(torch.min(sigmaS_delta.data, eps * sigmaC), sigmaC / eps), requires_grad=True)
g_meanS_delta.zero_()
g_sigmaS_delta.zero_()
with torch.no_grad():
target_features = (enc_c - meanC) * sigmaS_delta / sigmaC + sign * meanS_delta
generated_adv_img = stn.decode(target_features.detach()) / 255.0
output = eval_model(generated_adv_img, diversity=False).detach()
prob = F.softmax(output, dim=1)
conf = prob[np.arange(batch_size), y.long()]
mask = (conf >= args.thres)
# early stopping
if mask.sum() == 0:
break
target_features = (enc_c - meanC) * sigmaS_delta / sigmaC + sign * meanS_delta
generated_adv_img = stn.decode(target_features.detach()) / 255.0
# budget
enlarge_mean = meanS_delta / abs_meanC
enlarge_sigma = sigmaS_delta / sigmaC
upper_bound_mean = torch.where(enlarge_mean >= 1.0, enlarge_mean, 1 / enlarge_mean)
upper_bound_sigma = torch.where(enlarge_sigma >= 1.0, enlarge_sigma, 1 / enlarge_sigma)
budget = torch.max(torch.amax(upper_bound_mean, dim=(1,2,3)), torch.amax(upper_bound_sigma, dim=(1,2,3)))
return F.interpolate(generated_adv_img, size=299, mode="nearest"), budget
def GA_DI_FSA(model, eval_model, x_nature, y, args, loss_fn):
batch_size = x_nature.shape[0]
eps_list = [math.exp((idx + 1) * (math.log(args.max_epsilon) / args.intervals))for idx in range(args.intervals)]
mask = torch.ones((batch_size, )).bool()
num_steps = args.num_steps
img_scaled = 255 * x_nature
# encode image
enc_c, _ = stn.encode(img_scaled)
enc_c.requires_grad_(False)
generated_img = stn.decode(enc_c)
generated_img_rescaled = generated_img / 255.0
with torch.no_grad():
out = model(generated_img_rescaled, diversity=False)
reconstruct_err = (out.data.max(1)[1] != y.data).float().sum()
print("Batch error after Image Reconstruction: ", reconstruct_err.item())
meanC, varC = moments(enc_c)
sigmaC = torch.sqrt(varC + 1e-5)
sign = torch.sign(meanC)
abs_meanC = torch.abs(meanC) + 1e-6
limit = 10 / torch.sqrt(torch.tensor(128.0).cuda())
meanS_delta = Variable(abs_meanC.clone(), requires_grad=True)
sigmaS_delta = Variable(sigmaC.clone(), requires_grad=True)
# momentum
g_meanS_delta = torch.zeros_like(meanS_delta).cuda()
g_sigmaS_delta = torch.zeros_like(sigmaS_delta).cuda()
for eps in eps_list:
step_size = 1.25 * eps / num_steps
for idx in range(num_steps):
target_features = (enc_c - meanC) * sigmaS_delta / sigmaC + sign * meanS_delta
# decode target features back to image
generated_adv_img = stn.decode(target_features)
enc_gen_adv, _ = stn.encode(generated_adv_img)
generated_adv_img_rescaled = generated_adv_img / 255.0
with torch.enable_grad():
model_input = F.interpolate(generated_adv_img_rescaled, size=299, mode="nearest")
logits = model(model_input, diversity=True)
adv_loss = loss_fn(logits, y)
# content loss
content_loss = torch.square(enc_gen_adv - target_features).mean(dim = (2,3)).sum(dim = 1)
adv_loss_total = adv_loss * batch_size * 128
loss = content_loss + adv_loss_total
if (idx + 1) % 10 == 0:
print("adv_loss_total: ", adv_loss_total)
print("content_loss: ", content_loss)
target_features = (enc_c - meanC) * sigmaS_delta / sigmaC + sign * meanS_delta
generated_adv_img = stn.decode(target_features.detach()) / 255.0
with torch.no_grad():
out = model(generated_adv_img, diversity=False)
batch_err = (out.data.max(1)[1] != y.data).float().sum()
print("Budget now: {}, Batch error: {}".format(eps, batch_err.item()))
[meanS_delta_grad, sigmaS_delta_grad] = torch.autograd.grad(loss.sum(), [meanS_delta, sigmaS_delta])
# gradient clipping
meanS_delta_grad = meanS_delta_grad.detach().clamp_(-1 / torch.sqrt(limit), 1 / torch.sqrt(limit))
sigmaS_delta_grad = sigmaS_delta_grad.detach().clamp_(-1 / torch.sqrt(limit), 1 / torch.sqrt(limit))
g_meanS_delta[mask] = meanS_delta_grad[mask].clone()
meanS_delta = Variable(meanS_delta.data - step_size * torch.sign(g_meanS_delta), requires_grad=True)
g_sigmaS_delta[mask] = sigmaS_delta_grad[mask].clone()
sigmaS_delta = Variable(sigmaS_delta.data - step_size * torch.sign(g_sigmaS_delta), requires_grad=True)
# clip
meanS_delta = Variable(torch.max(torch.min(meanS_delta.data, eps * abs_meanC), abs_meanC / eps), requires_grad=True)
sigmaS_delta = Variable(torch.max(torch.min(sigmaS_delta.data, eps * sigmaC), sigmaC / eps), requires_grad=True)
g_meanS_delta.zero_()
g_sigmaS_delta.zero_()
with torch.no_grad():
target_features = (enc_c - meanC) * sigmaS_delta / sigmaC + sign * meanS_delta
generated_adv_img = stn.decode(target_features.detach()) / 255.0
output = eval_model(generated_adv_img, diversity=False).detach()
prob = F.softmax(output, dim=1)
conf = prob[np.arange(batch_size), y.long()]
mask = (conf >= args.thres)
# early stopping
if mask.sum() == 0:
break
target_features = (enc_c - meanC) * sigmaS_delta / sigmaC + sign * meanS_delta
generated_adv_img = stn.decode(target_features.detach()) / 255.0
# budget
enlarge_mean = meanS_delta / abs_meanC
enlarge_sigma = sigmaS_delta / sigmaC
upper_bound_mean = torch.where(enlarge_mean >= 1.0, enlarge_mean, 1 / enlarge_mean)
upper_bound_sigma = torch.where(enlarge_sigma >= 1.0, enlarge_sigma, 1 / enlarge_sigma)
budget = torch.max(torch.amax(upper_bound_mean, dim=(1,2,3)), torch.amax(upper_bound_sigma, dim=(1,2,3)))
return F.interpolate(generated_adv_img, size=299, mode="nearest"), budget
def Feature_Adam_Attack(model, x_nature, y, args, loss_fn, diversity=True, random_init=False):
batch_size = x_nature.shape[0]
eps = args.epsilon
# Natural statistics
img_scaled = 255 * x_nature
# encode image
enc_c, _ = stn.encode(img_scaled)
enc_c.requires_grad_(False)
generated_img = stn.decode(enc_c)
enc_gen_adv, _ = stn.encode(generated_img)
adv_content_loss = torch.square(enc_gen_adv - enc_c).mean(dim = (2,3)).sum(dim = 1)
generated_img_rescaled = generated_img / 255.0
with torch.no_grad():
out = model(generated_img_rescaled, diversity=False)
adv_mask = (out.data.max(1)[1] == y.data).float()
reconstruct_err = (out.data.max(1)[1] != y.data).float().sum()
print("Batch error after Image Reconstruction: ", reconstruct_err.item())
meanC, varC = moments(enc_c)
sigmaC = torch.sqrt(varC + 1e-5)
sign = torch.sign(meanC)
abs_meanC = torch.abs(meanC) + 1e-6
limit = 10 / torch.sqrt(torch.tensor(128.0).cuda())
if random_init:
meanC_delta_rand = torch.distributions.uniform.Uniform(low = abs_meanC / eps, high = eps * abs_meanC)
sigmaC_delta_rand = torch.distributions.uniform.Uniform(sigmaC / eps, eps * sigmaC)
meanS_delta = Variable(meanC_delta_rand.sample(), requires_grad=True)
sigmaS_delta = Variable(sigmaC_delta_rand.sample(), requires_grad=True)
else:
meanS_delta = Variable(abs_meanC.clone(), requires_grad=True)
sigmaS_delta = Variable(sigmaC.clone(), requires_grad=True)
opt = optim.Adam([meanS_delta, sigmaS_delta], lr=5e-3, betas=(0.5, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
# gradient clipping
for p in opt.param_groups[0]['params']:
p.register_hook(lambda grad: torch.clamp(grad, -1 / torch.sqrt(limit), 1 / torch.sqrt(limit)))
# best adv img
rst_img = generated_img_rescaled.clone()
for idx in range(args.num_steps):
opt.zero_grad()
target_features = (enc_c - meanC) * sigmaS_delta / sigmaC + sign * meanS_delta
# decode target features back to image
generated_adv_img = stn.decode(target_features)
enc_gen_adv, _ = stn.encode(generated_adv_img)
generated_adv_img_rescaled = generated_adv_img / 255.0
with torch.enable_grad():
model_input = F.interpolate(generated_adv_img_rescaled, size=299, mode="nearest")
logits = model(model_input, diversity=diversity)
adv_loss = loss_fn(logits, y)
# content loss
content_loss = torch.square(enc_gen_adv - target_features).mean(dim = (2,3)).sum(dim = 1)
adv_loss_total = adv_loss * batch_size * 128
loss = content_loss + adv_loss_total
loss.sum().backward()
opt.param_groups[0]['lr'] = 5e-3 / (1 + 1e-3 * idx) # 0.0075
opt.step()
# clip
meanS_delta.data.copy_(torch.max(torch.min(meanS_delta.data, eps * abs_meanC), abs_meanC / eps))
sigmaS_delta.data.copy_(torch.max(torch.min(sigmaS_delta.data, eps * sigmaC), sigmaC / eps))
# Monitor the progress
with torch.no_grad():
target_features = (enc_c - meanC) * sigmaS_delta / sigmaC + sign * meanS_delta
decoded_img = stn.decode(target_features.detach())
generated_adv_img = decoded_img / 255.0
out = model(generated_adv_img, diversity=False)
_adv_mask = (out.data.max(1)[1] == y.data).float()
prob_truth = F.softmax(out, dim = 1)[np.arange(batch_size), y]
# content loss
enc_gen_adv, _ = stn.encode(decoded_img)
_content_loss = torch.square(enc_gen_adv - target_features).mean(dim = (2,3)).sum(dim = 1)
for j in range(batch_size):
if _adv_mask[j] < adv_mask[j] or (_adv_mask[j] == adv_mask[j] and _content_loss[j] < adv_content_loss[j]):
rst_img[j] = generated_adv_img[j]
adv_mask[j] = _adv_mask[j]
adv_content_loss[j] = _content_loss[j]
if (idx + 1) % 10 == 0:
print("adv_loss_total: ", adv_loss_total)
print("content_loss: ", content_loss)
batch_err = adv_mask.sum()
print("Batch error ", batch_size - batch_err.item())
print("label softmax: ", prob_truth)
return F.interpolate(rst_img, size=299, mode="nearest")
| 41.964407
| 128
| 0.645745
| 3,374
| 24,759
| 4.461174
| 0.066094
| 0.056471
| 0.047834
| 0.017938
| 0.910776
| 0.896226
| 0.888055
| 0.876428
| 0.874435
| 0.863141
| 0
| 0.020833
| 0.238095
| 24,759
| 590
| 129
| 41.964407
| 0.777089
| 0.030898
| 0
| 0.814815
| 0
| 0
| 0.022588
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0
| 0.018519
| 0
| 0.066138
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d9bbb1cc3018f2ddd89c21484c921c93db1810ec
| 46,157
|
py
|
Python
|
idaes/models/properties/modular_properties/eos/tests/test_enrtl.py
|
OOAmusat/idaes-pse
|
ae7d3bb8e372bc32822dcdcb75e9fd96b78da539
|
[
"RSA-MD"
] | null | null | null |
idaes/models/properties/modular_properties/eos/tests/test_enrtl.py
|
OOAmusat/idaes-pse
|
ae7d3bb8e372bc32822dcdcb75e9fd96b78da539
|
[
"RSA-MD"
] | null | null | null |
idaes/models/properties/modular_properties/eos/tests/test_enrtl.py
|
OOAmusat/idaes-pse
|
ae7d3bb8e372bc32822dcdcb75e9fd96b78da539
|
[
"RSA-MD"
] | 1
|
2022-03-17T11:08:43.000Z
|
2022-03-17T11:08:43.000Z
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Tests for eNRTL methods
Author: Andrew Lee
"""
import pytest
from pyomo.environ import (
ConcreteModel,
Expression,
exp,
log,
Set,
units as pyunits,
value,
Var,
)
from pyomo.util.check_units import assert_units_equivalent
from idaes.core import AqueousPhase, Solvent, Solute, Apparent, Anion, Cation
from idaes.core.util.constants import Constants
from idaes.models.properties.modular_properties.eos.enrtl import ENRTL
from idaes.models.properties.modular_properties.base.generic_property import (
GenericParameterBlock,
StateIndex,
)
from idaes.models.properties.modular_properties.state_definitions import FTPx
from idaes.models.properties.modular_properties.pure.electrolyte import (
relative_permittivity_constant,
)
from idaes.core.util.exceptions import ConfigurationError
import idaes.logger as idaeslog
def dummy_method(b, *args, **kwargs):
return 42 * pyunits.mol / pyunits.m**3
configuration = {
"components": {
"H2O": {
"type": Solvent,
"dens_mol_liq_comp": dummy_method,
"relative_permittivity_liq_comp": relative_permittivity_constant,
"parameter_data": {
"mw": (18e-3, pyunits.kg / pyunits.mol),
"relative_permittivity_liq_comp": 101,
},
},
"C6H12": {
"type": Solute,
"dens_mol_liq_comp": dummy_method,
"relative_permittivity_liq_comp": relative_permittivity_constant,
"parameter_data": {
"mw": (84e-3, pyunits.kg / pyunits.mol),
"relative_permittivity_liq_comp": 102,
},
},
"NaCl": {"type": Apparent, "dissociation_species": {"Na+": 1, "Cl-": 1}},
"HCl": {"type": Apparent, "dissociation_species": {"H+": 1, "Cl-": 1}},
"NaOH": {"type": Apparent, "dissociation_species": {"Na+": 1, "OH-": 1}},
"Na+": {"type": Cation, "charge": +1},
"H+": {"type": Cation, "charge": +1},
"Cl-": {"type": Anion, "charge": -1},
"OH-": {"type": Anion, "charge": -1},
},
"phases": {"Liq": {"type": AqueousPhase, "equation_of_state": ENRTL}},
"base_units": {
"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K,
},
"state_definition": FTPx,
"state_components": StateIndex.true,
"pressure_ref": 1e5,
"temperature_ref": 300,
}
class TestParameters(object):
@pytest.mark.unit
def test_parameters_no_assignment(self):
m = ConcreteModel()
m.params = GenericParameterBlock(default=configuration)
assert isinstance(m.params.Liq.ion_pair_set, Set)
assert len(m.params.Liq.ion_pair_set) == 4
for p in m.params.Liq.ion_pair_set:
assert p in [("Na+, Cl-"), ("Na+, OH-"), ("H+, Cl-"), ("H+, OH-")]
assert isinstance(m.params.Liq.component_pair_set, Set)
assert len(m.params.Liq.component_pair_set) == 32
assert isinstance(m.params.Liq.component_pair_set_symmetric, Set)
assert len(m.params.Liq.component_pair_set_symmetric) == 17
assert isinstance(m.params.Liq.alpha, Var)
assert len(m.params.Liq.alpha) == 17
for (i, j) in m.params.Liq.alpha:
if i != j:
assert (j, i) not in m.params.Liq.alpha
if (i, j) in [("C6H12", "C6H12"), ("H2O", "H2O"), ("H2O", "C6H12")]:
assert m.params.Liq.alpha[(i, j)].value == 0.3
assert m.params.Liq.alpha[(i, j)].fixed
else:
assert m.params.Liq.alpha[(i, j)].value == 0.2
assert m.params.Liq.alpha[(i, j)].fixed
assert isinstance(m.params.Liq.tau, Var)
assert len(m.params.Liq.tau) == 32
for (i, j) in m.params.Liq.tau:
assert m.params.Liq.tau[(i, j)].value == 0
assert m.params.Liq.tau[(i, j)].fixed
@pytest.mark.unit
def test_parameters_assignment(self):
test_config = dict(configuration)
test_config["parameter_data"] = {}
test_config["parameter_data"]["Liq_alpha"] = {}
test_config["parameter_data"]["Liq_alpha"][("H2O", "Na+, Cl-")] = 0.6
test_config["parameter_data"]["Liq_tau"] = {}
test_config["parameter_data"]["Liq_tau"][("H2O", "Na+, Cl-")] = 0.1
m = ConcreteModel()
m.params = GenericParameterBlock(default=test_config)
assert isinstance(m.params.Liq.alpha, Var)
assert len(m.params.Liq.alpha) == 17
for (i, j) in m.params.Liq.alpha:
if i != j:
assert (j, i) not in m.params.Liq.alpha
if (i, j) == ("H2O", "Na+, Cl-"):
assert m.params.Liq.alpha[(i, j)].value == 0.6
assert m.params.Liq.alpha[(i, j)].fixed
elif (i, j) in [("C6H12", "C6H12"), ("H2O", "H2O"), ("H2O", "C6H12")]:
assert m.params.Liq.alpha[(i, j)].value == 0.3
assert m.params.Liq.alpha[(i, j)].fixed
else:
assert m.params.Liq.alpha[(i, j)].value == 0.2
assert m.params.Liq.alpha[(i, j)].fixed
assert isinstance(m.params.Liq.tau, Var)
assert len(m.params.Liq.tau) == 32
for (i, j) in m.params.Liq.tau:
print(i, j)
if (i, j) == ("H2O", "Na+, Cl-"):
assert m.params.Liq.tau[(i, j)].value == 0.1
assert m.params.Liq.tau[(i, j)].fixed
else:
assert m.params.Liq.tau[(i, j)].value == 0
assert m.params.Liq.tau[(i, j)].fixed
@pytest.mark.unit
def test_parameters_unsymmetric_alpha(self):
test_config = dict(configuration)
test_config["parameter_data"] = {}
test_config["parameter_data"]["Liq_alpha"] = {}
test_config["parameter_data"]["Liq_alpha"][("H2O", "Na+, Cl-")] = 0.6
test_config["parameter_data"]["Liq_alpha"][("Na+, Cl-", "H2O")] = 0.8
m = ConcreteModel()
# TODO: Having trouble getting regex to match component tuple
# Using a wildcard for now
with pytest.raises(
ConfigurationError,
match="params.Liq eNRTL alpha parameter assigned "
"non-symmetric value for pair (.+?). Please assign "
"only one value for component pair.",
):
m.params = GenericParameterBlock(default=test_config)
@pytest.mark.unit
def test_parameters_alpha_symmetry_duplicate(self, caplog):
caplog.set_level(
idaeslog.INFO,
logger=(
"idaes.models.properties.modular_properties." "eos.enrtl_parameters"
),
)
test_config = dict(configuration)
test_config["parameter_data"] = {}
test_config["parameter_data"]["Liq_alpha"] = {}
test_config["parameter_data"]["Liq_alpha"][("H2O", "Na+, Cl-")] = 0.6
test_config["parameter_data"]["Liq_alpha"][("Na+, Cl-", "H2O")] = 0.6
m = ConcreteModel()
m.params = GenericParameterBlock(default=test_config)
assert (
"eNRTL alpha value provided for both ('H2O', 'Na+, Cl-') and "
"('Na+, Cl-', 'H2O'). It is only necessary to provide a "
"value for one of these due to symmetry." in caplog.text
)
@pytest.mark.unit
def test_parameters_alpha_unused_parameter(self):
test_config = dict(configuration)
test_config["parameter_data"] = {}
test_config["parameter_data"]["Liq_alpha"] = {}
test_config["parameter_data"]["Liq_alpha"][("H2O", "Na+")] = 0.6
m = ConcreteModel()
# TODO: Having trouble getting regex to match component tuple
# Using a wildcard for now
with pytest.raises(
ConfigurationError,
match="params.Liq eNRTL alpha parameter provided "
"for invalid component pair (.+?). Please check "
"typing and only provide parameters for valid "
"species pairs.",
):
m.params = GenericParameterBlock(default=test_config)
@pytest.mark.unit
def test_parameters_tau_asymmetric(self):
test_config = dict(configuration)
test_config["parameter_data"] = {}
test_config["parameter_data"]["Liq_tau"] = {}
test_config["parameter_data"]["Liq_tau"][("H2O", "Na+, Cl-")] = 0.1
test_config["parameter_data"]["Liq_tau"][("Na+, Cl-", "H2O")] = -0.1
m = ConcreteModel()
m.params = GenericParameterBlock(default=test_config)
assert isinstance(m.params.Liq.tau, Var)
assert len(m.params.Liq.tau) == 32
for (i, j) in m.params.Liq.tau:
print(i, j)
if (i, j) == ("H2O", "Na+, Cl-"):
assert m.params.Liq.tau[(i, j)].value == 0.1
assert m.params.Liq.tau[(i, j)].fixed
elif (i, j) == ("Na+, Cl-", "H2O"):
assert m.params.Liq.tau[(i, j)].value == -0.1
assert m.params.Liq.tau[(i, j)].fixed
else:
assert m.params.Liq.tau[(i, j)].value == 0
assert m.params.Liq.tau[(i, j)].fixed
@pytest.mark.unit
def test_parameters_tau_unused_parameter(self):
test_config = dict(configuration)
test_config["parameter_data"] = {}
test_config["parameter_data"]["Liq_tau"] = {}
test_config["parameter_data"]["Liq_tau"][("H2O", "Na+")] = 0.6
m = ConcreteModel()
# TODO: Having trouble getting regex to match component tuple
# Using a wildcard for now
with pytest.raises(
ConfigurationError,
match="params.Liq eNRTL tau parameter provided for "
"invalid component pair (.+?). Please check typing "
"and only provide parameters for valid species "
"pairs.",
):
m.params = GenericParameterBlock(default=test_config)
class TestStateBlockSymmetric(object):
@pytest.fixture(scope="class")
def model(self):
m = ConcreteModel()
m.params = GenericParameterBlock(default=configuration)
m.state = m.params.build_state_block([1])
# Need to set a value of T for checking expressions later
m.state[1].temperature.set_value(300)
return m
@pytest.mark.unit
def test_common(self, model):
# Reference state composition
assert isinstance(model.state[1].Liq_x_ref, Expression)
assert len(model.state[1].Liq_x_ref) == 6
for k in model.state[1].Liq_x_ref:
assert k in ["H2O", "C6H12", "Na+", "H+", "Cl-", "OH-"]
if k in ["H2O", "C6H12"]:
assert str(model.state[1].Liq_x_ref[k].expr) == str(0.0)
else:
assert str(model.state[1].Liq_x_ref[k].expr) == str(
model.state[1].mole_frac_phase_comp_true["Liq", k]
/ (
model.state[1].mole_frac_phase_comp_true["Liq", "Cl-"]
+ model.state[1].mole_frac_phase_comp_true["Liq", "OH-"]
+ model.state[1].mole_frac_phase_comp_true["Liq", "Na+"]
+ model.state[1].mole_frac_phase_comp_true["Liq", "H+"]
)
)
assert isinstance(model.state[1].Liq_X, Expression)
assert len(model.state[1].Liq_X) == 6
for j in model.state[1].Liq_X:
if j in ["H2O", "C6H12"]:
# _X should be mole_frac_phase_comp_true
assert str(model.state[1].Liq_X[j]._expr) == str(
model.state[1].mole_frac_phase_comp_true["Liq", j]
)
else:
# _X should be mutiplied by |charge|
assert str(model.state[1].Liq_X[j]._expr) == str(
model.state[1].mole_frac_phase_comp_true["Liq", j]
* abs(model.params.get_component(j).config.charge)
)
assert isinstance(model.state[1].Liq_X_ref, Expression)
assert len(model.state[1].Liq_X_ref) == 6
for j in model.state[1].Liq_X_ref:
if j in ["H2O", "C6H12"]:
# _X should be mole_frac_phase_comp_true
assert str(model.state[1].Liq_X_ref[j].expr) == str(
model.state[1].Liq_x_ref[j]
)
else:
# _X should be mutiplied by |charge|
assert str(model.state[1].Liq_X_ref[j]._expr) == str(
model.state[1].Liq_x_ref[j]
* abs(model.params.get_component(j).config.charge)
)
assert isinstance(model.state[1].Liq_Y, Expression)
assert len(model.state[1].Liq_Y) == 4
for j in model.state[1].Liq_Y:
if j in ["H+", "Na+"]:
assert str(model.state[1].Liq_Y[j]._expr) == str(
model.state[1].Liq_X[j]
/ (model.state[1].Liq_X["Na+"] + model.state[1].Liq_X["H+"])
)
else:
assert str(model.state[1].Liq_Y[j]._expr) == str(
model.state[1].Liq_X[j]
/ (model.state[1].Liq_X["Cl-"] + model.state[1].Liq_X["OH-"])
)
assert isinstance(model.state[1].Liq_ionic_strength, Expression)
assert len(model.state[1].Liq_ionic_strength) == 1
assert str(model.state[1].Liq_ionic_strength.expr) == str(
0.5
* (
model.params.get_component("Cl-").config.charge ** 2
* model.state[1].mole_frac_phase_comp_true["Liq", "Cl-"]
+ model.params.get_component("OH-").config.charge ** 2
* model.state[1].mole_frac_phase_comp_true["Liq", "OH-"]
+ model.params.get_component("Na+").config.charge ** 2
* model.state[1].mole_frac_phase_comp_true["Liq", "Na+"]
+ model.params.get_component("H+").config.charge ** 2
* model.state[1].mole_frac_phase_comp_true["Liq", "H+"]
)
)
assert isinstance(model.state[1].Liq_ionic_strength_ref, Expression)
assert len(model.state[1].Liq_ionic_strength_ref) == 1
assert str(model.state[1].Liq_ionic_strength_ref.expr) == str(
0.5
* (
model.params.get_component("Cl-").config.charge ** 2
* model.state[1].Liq_x_ref["Cl-"]
+ model.params.get_component("OH-").config.charge ** 2
* model.state[1].Liq_x_ref["OH-"]
+ model.params.get_component("Na+").config.charge ** 2
* model.state[1].Liq_x_ref["Na+"]
+ model.params.get_component("H+").config.charge ** 2
* model.state[1].Liq_x_ref["H+"]
)
)
assert isinstance(model.state[1].Liq_vol_mol_solvent, Expression)
assert len(model.state[1].Liq_vol_mol_solvent) == 1
assert str(model.state[1].Liq_vol_mol_solvent.expr) == "1/(42*mol/m**3)"
assert isinstance(model.state[1].Liq_relative_permittivity_solvent, Expression)
assert len(model.state[1].Liq_relative_permittivity_solvent) == 1
assert str(model.state[1].Liq_relative_permittivity_solvent.expr) == (
str(model.params.get_component("H2O").relative_permittivity_liq_comp)
)
assert isinstance(model.state[1].Liq_A_DH, Expression)
assert len(model.state[1].Liq_A_DH) == 1
assert_units_equivalent(model.state[1].Liq_A_DH, pyunits.dimensionless)
assert str(model.state[1].Liq_A_DH.expr) == str(
(1 / 3)
* (
2
* Constants.pi
* Constants.avogadro_number
/ model.state[1].Liq_vol_mol_solvent
)
** 0.5
* (
Constants.elemental_charge**2
/ (
4
* Constants.pi
* model.state[1].Liq_relative_permittivity_solvent
* Constants.vacuum_electric_permittivity
* Constants.boltzmann_constant
* model.state[1].temperature
)
)
** (3 / 2)
)
assert isinstance(model.state[1].Liq_log_gamma_pdh, Expression)
assert len(model.state[1].Liq_log_gamma_pdh) == 6
for j in model.state[1].Liq_log_gamma_pdh:
assert j in ["H2O", "C6H12", "Na+", "H+", "Cl-", "OH-"]
if j in ["H2O", "C6H12"]:
assert str(model.state[1].Liq_log_gamma_pdh[j].expr) == str(
(
2
* model.state[1].Liq_A_DH
* model.state[1].Liq_ionic_strength ** (3 / 2)
/ (1 + 14.9 * model.state[1].Liq_ionic_strength ** (1 / 2))
)
)
else:
def ndxdn(j, k):
if j == k:
return (1 - model.state[1].Liq_x_ref[k]) / (
model.state[1].mole_frac_phase_comp_true["Liq", "Cl-"]
+ model.state[1].mole_frac_phase_comp_true["Liq", "OH-"]
+ model.state[1].mole_frac_phase_comp_true["Liq", "Na+"]
+ model.state[1].mole_frac_phase_comp_true["Liq", "H+"]
)
else:
return -model.state[1].Liq_x_ref[k] / (
model.state[1].mole_frac_phase_comp_true["Liq", "Cl-"]
+ model.state[1].mole_frac_phase_comp_true["Liq", "OH-"]
+ model.state[1].mole_frac_phase_comp_true["Liq", "Na+"]
+ model.state[1].mole_frac_phase_comp_true["Liq", "H+"]
)
assert str(model.state[1].Liq_log_gamma_pdh[j].expr) == str(
-model.state[1].Liq_A_DH
* (
(2 * model.params.get_component(j).config.charge ** 2 / 14.9)
* log(
(1 + 14.9 * model.state[1].Liq_ionic_strength ** 0.5)
/ (1 + 14.9 * model.state[1].Liq_ionic_strength_ref ** 0.5)
)
+ (
model.params.get_component(j).config.charge ** 2
* model.state[1].Liq_ionic_strength ** 0.5
- 2 * model.state[1].Liq_ionic_strength ** 1.5
)
/ (1 + 14.9 * model.state[1].Liq_ionic_strength ** 0.5)
- (
2
* model.state[1].Liq_ionic_strength
* model.state[1].Liq_ionic_strength_ref ** -0.5
)
/ (1 + 14.9 * model.state[1].Liq_ionic_strength_ref ** 0.5)
* (
0.5
* (
model.params.get_component("Cl-").config.charge ** 2
* ndxdn(j, "Cl-")
+ model.params.get_component("OH-").config.charge ** 2
* ndxdn(j, "OH-")
+ model.params.get_component("Na+").config.charge ** 2
* ndxdn(j, "Na+")
+ model.params.get_component("H+").config.charge ** 2
* ndxdn(j, "H+")
)
)
)
)
assert isinstance(model.state[1].Liq_log_gamma_lc_I, Expression)
assert len(model.state[1].Liq_log_gamma_lc_I) == 6
for k in model.state[1].Liq_log_gamma_lc_I:
assert k in ["H2O", "C6H12", "Na+", "H+", "Cl-", "OH-"]
assert isinstance(model.state[1].Liq_log_gamma_lc_I0, Expression)
assert len(model.state[1].Liq_log_gamma_lc_I0) == 4
for k in model.state[1].Liq_log_gamma_lc_I0:
assert k in ["Na+", "H+", "Cl-", "OH-"]
assert str(model.state[1].Liq_log_gamma_lc_I0[k].expr) != str(
model.state[1].Liq_log_gamma_lc_I[k].expr
)
assert isinstance(model.state[1].Liq_log_gamma_lc, Expression)
assert len(model.state[1].Liq_log_gamma_lc) == 6
for k in model.state[1].Liq_log_gamma_lc:
assert k in ["H2O", "C6H12", "Na+", "H+", "Cl-", "OH-"]
if k in ["H2O", "C6H12"]:
assert str(model.state[1].Liq_log_gamma_lc[k].expr) == str(
model.state[1].Liq_log_gamma_lc_I[k]
)
else:
assert str(model.state[1].Liq_log_gamma_lc[k].expr) == str(
model.state[1].Liq_log_gamma_lc_I[k]
- model.state[1].Liq_log_gamma_lc_I0[k]
)
assert isinstance(model.state[1].Liq_log_gamma, Expression)
assert len(model.state[1].Liq_log_gamma) == 6
for k, v in model.state[1].Liq_log_gamma.items():
assert str(model.state[1].Liq_log_gamma[k].expr) == str(
model.state[1].Liq_log_gamma_pdh[k] + model.state[1].Liq_log_gamma_lc[k]
)
@pytest.mark.unit
def test_alpha(self, model):
assert isinstance(model.state[1].Liq_alpha, Expression)
assert len(model.state[1].Liq_alpha) == 28
# Molecule-molecule interactions
assert str(model.state[1].Liq_alpha["H2O", "H2O"].expr) == str(
model.params.Liq.alpha["H2O", "H2O"]
)
assert str(model.state[1].Liq_alpha["H2O", "C6H12"].expr) == str(
model.params.Liq.alpha["H2O", "C6H12"]
)
assert str(model.state[1].Liq_alpha["C6H12", "H2O"].expr) == str(
model.params.Liq.alpha["H2O", "C6H12"]
)
assert str(model.state[1].Liq_alpha["C6H12", "C6H12"].expr) == str(
model.params.Liq.alpha["C6H12", "C6H12"]
)
# Molecule-ion interactions
assert str(model.state[1].Liq_alpha["H2O", "Na+"].expr) == str(
(
model.state[1].Liq_Y["Cl-"] * model.params.Liq.alpha["H2O", "Na+, Cl-"]
+ model.state[1].Liq_Y["OH-"]
* model.params.Liq.alpha["H2O", "Na+, OH-"]
)
)
assert str(model.state[1].Liq_alpha["H2O", "H+"].expr) == str(
(
model.state[1].Liq_Y["Cl-"] * model.params.Liq.alpha["H2O", "H+, Cl-"]
+ model.state[1].Liq_Y["OH-"] * model.params.Liq.alpha["H2O", "H+, OH-"]
)
)
assert str(model.state[1].Liq_alpha["Na+", "H2O"].expr) == str(
(
model.state[1].Liq_Y["Cl-"] * model.params.Liq.alpha["H2O", "Na+, Cl-"]
+ model.state[1].Liq_Y["OH-"]
* model.params.Liq.alpha["H2O", "Na+, OH-"]
)
)
assert str(model.state[1].Liq_alpha["H+", "H2O"].expr) == str(
(
model.state[1].Liq_Y["Cl-"] * model.params.Liq.alpha["H2O", "H+, Cl-"]
+ model.state[1].Liq_Y["OH-"] * model.params.Liq.alpha["H2O", "H+, OH-"]
)
)
assert str(model.state[1].Liq_alpha["H2O", "Cl-"].expr) == str(
(
model.state[1].Liq_Y["Na+"] * model.params.Liq.alpha["H2O", "Na+, Cl-"]
+ model.state[1].Liq_Y["H+"] * model.params.Liq.alpha["H2O", "H+, Cl-"]
)
)
assert str(model.state[1].Liq_alpha["H2O", "OH-"].expr) == str(
(
model.state[1].Liq_Y["Na+"] * model.params.Liq.alpha["H2O", "Na+, OH-"]
+ model.state[1].Liq_Y["H+"] * model.params.Liq.alpha["H2O", "H+, OH-"]
)
)
assert str(model.state[1].Liq_alpha["Cl-", "H2O"].expr) == str(
(
model.state[1].Liq_Y["Na+"] * model.params.Liq.alpha["H2O", "Na+, Cl-"]
+ model.state[1].Liq_Y["H+"] * model.params.Liq.alpha["H2O", "H+, Cl-"]
)
)
assert str(model.state[1].Liq_alpha["OH-", "H2O"].expr) == str(
(
model.state[1].Liq_Y["Na+"] * model.params.Liq.alpha["H2O", "Na+, OH-"]
+ model.state[1].Liq_Y["H+"] * model.params.Liq.alpha["H2O", "H+, OH-"]
)
)
assert str(model.state[1].Liq_alpha["C6H12", "Na+"].expr) == str(
(
model.state[1].Liq_Y["Cl-"]
* model.params.Liq.alpha["C6H12", "Na+, Cl-"]
+ model.state[1].Liq_Y["OH-"]
* model.params.Liq.alpha["C6H12", "Na+, OH-"]
)
)
assert str(model.state[1].Liq_alpha["C6H12", "H+"].expr) == str(
(
model.state[1].Liq_Y["Cl-"] * model.params.Liq.alpha["C6H12", "H+, Cl-"]
+ model.state[1].Liq_Y["OH-"]
* model.params.Liq.alpha["C6H12", "H+, OH-"]
)
)
assert str(model.state[1].Liq_alpha["Na+", "C6H12"].expr) == str(
(
model.state[1].Liq_Y["Cl-"]
* model.params.Liq.alpha["C6H12", "Na+, Cl-"]
+ model.state[1].Liq_Y["OH-"]
* model.params.Liq.alpha["C6H12", "Na+, OH-"]
)
)
assert str(model.state[1].Liq_alpha["H+", "C6H12"].expr) == str(
(
model.state[1].Liq_Y["Cl-"] * model.params.Liq.alpha["C6H12", "H+, Cl-"]
+ model.state[1].Liq_Y["OH-"]
* model.params.Liq.alpha["C6H12", "H+, OH-"]
)
)
assert str(model.state[1].Liq_alpha["C6H12", "Cl-"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
* model.params.Liq.alpha["C6H12", "Na+, Cl-"]
+ model.state[1].Liq_Y["H+"]
* model.params.Liq.alpha["C6H12", "H+, Cl-"]
)
)
assert str(model.state[1].Liq_alpha["C6H12", "OH-"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
* model.params.Liq.alpha["C6H12", "Na+, OH-"]
+ model.state[1].Liq_Y["H+"]
* model.params.Liq.alpha["C6H12", "H+, OH-"]
)
)
assert str(model.state[1].Liq_alpha["Cl-", "C6H12"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
* model.params.Liq.alpha["C6H12", "Na+, Cl-"]
+ model.state[1].Liq_Y["H+"]
* model.params.Liq.alpha["C6H12", "H+, Cl-"]
)
)
assert str(model.state[1].Liq_alpha["OH-", "C6H12"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
* model.params.Liq.alpha["C6H12", "Na+, OH-"]
+ model.state[1].Liq_Y["H+"]
* model.params.Liq.alpha["C6H12", "H+, OH-"]
)
)
# Ion-ion interactions
assert str(model.state[1].Liq_alpha["Na+", "Cl-"].expr) == str(
(
model.state[1].Liq_Y["Na+"] * 0.2
+ model.state[1].Liq_Y["H+"]
* model.params.Liq.alpha["Na+, Cl-", "H+, Cl-"]
)
)
assert str(model.state[1].Liq_alpha["Na+", "OH-"].expr) == str(
(
model.state[1].Liq_Y["Na+"] * 0.2
+ model.state[1].Liq_Y["H+"]
* model.params.Liq.alpha["Na+, OH-", "H+, OH-"]
)
)
assert str(model.state[1].Liq_alpha["H+", "Cl-"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
* model.params.Liq.alpha["Na+, Cl-", "H+, Cl-"]
+ model.state[1].Liq_Y["H+"] * 0.2
)
)
assert str(model.state[1].Liq_alpha["H+", "OH-"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
* model.params.Liq.alpha["Na+, OH-", "H+, OH-"]
+ model.state[1].Liq_Y["H+"] * 0.2
)
)
assert str(model.state[1].Liq_alpha["Cl-", "Na+"].expr) == str(
(
model.state[1].Liq_Y["Cl-"] * 0.2
+ model.state[1].Liq_Y["OH-"]
* model.params.Liq.alpha["Na+, Cl-", "Na+, OH-"]
)
)
assert str(model.state[1].Liq_alpha["Cl-", "H+"].expr) == str(
(
model.state[1].Liq_Y["Cl-"] * 0.2
+ model.state[1].Liq_Y["OH-"]
* model.params.Liq.alpha["H+, Cl-", "H+, OH-"]
)
)
assert str(model.state[1].Liq_alpha["OH-", "Na+"].expr) == str(
(
model.state[1].Liq_Y["Cl-"]
* model.params.Liq.alpha["Na+, Cl-", "Na+, OH-"]
+ model.state[1].Liq_Y["OH-"] * 0.2
)
)
assert str(model.state[1].Liq_alpha["OH-", "H+"].expr) == str(
(
model.state[1].Liq_Y["Cl-"]
* model.params.Liq.alpha["H+, Cl-", "H+, OH-"]
+ model.state[1].Liq_Y["OH-"] * 0.2
)
)
# Like species interactions
assert ("Na+", "Na+") not in model.state[1].Liq_alpha
assert ("Na+", "H+") not in model.state[1].Liq_alpha
assert ("H+", "Na+") not in model.state[1].Liq_alpha
assert ("H+", "H+") not in model.state[1].Liq_alpha
assert ("Cl-", "Cl-") not in model.state[1].Liq_alpha
assert ("Cl-", "OH-") not in model.state[1].Liq_alpha
assert ("OH-", "Cl-") not in model.state[1].Liq_alpha
assert ("OH-", "OH-") not in model.state[1].Liq_alpha
@pytest.mark.unit
def test_G(self, model):
assert isinstance(model.state[1].Liq_G, Expression)
assert len(model.state[1].Liq_G) == 28
# Molecule-molecule interactions
assert str(model.state[1].Liq_G["H2O", "H2O"].expr) == str(1.0)
assert str(model.state[1].Liq_G["H2O", "C6H12"].expr) == str(
exp(
-model.params.Liq.alpha["H2O", "C6H12"]
* model.params.Liq.tau["H2O", "C6H12"]
)
)
assert str(model.state[1].Liq_G["C6H12", "H2O"].expr) == str(
exp(
-model.params.Liq.alpha["H2O", "C6H12"]
* model.params.Liq.tau["C6H12", "H2O"]
)
)
assert str(model.state[1].Liq_G["C6H12", "C6H12"].expr) == str(1.0)
# Molecule-ion interactions
assert str(model.state[1].Liq_G["H2O", "Na+"].expr) == str(
(
model.state[1].Liq_Y["Cl-"]
* exp(
-model.params.Liq.alpha["H2O", "Na+, Cl-"]
* model.params.Liq.tau["H2O", "Na+, Cl-"]
)
+ model.state[1].Liq_Y["OH-"]
* exp(
-model.params.Liq.alpha["H2O", "Na+, OH-"]
* model.params.Liq.tau["H2O", "Na+, OH-"]
)
)
)
assert str(model.state[1].Liq_G["H2O", "H+"].expr) == str(
(
model.state[1].Liq_Y["Cl-"]
* exp(
-model.params.Liq.alpha["H2O", "H+, Cl-"]
* model.params.Liq.tau["H2O", "H+, Cl-"]
)
+ model.state[1].Liq_Y["OH-"]
* exp(
-model.params.Liq.alpha["H2O", "H+, OH-"]
* model.params.Liq.tau["H2O", "H+, OH-"]
)
)
)
assert str(model.state[1].Liq_G["Na+", "H2O"].expr) == str(
(
model.state[1].Liq_Y["Cl-"]
* exp(
-model.params.Liq.alpha["H2O", "Na+, Cl-"]
* model.params.Liq.tau["Na+, Cl-", "H2O"]
)
+ model.state[1].Liq_Y["OH-"]
* exp(
-model.params.Liq.alpha["H2O", "Na+, OH-"]
* model.params.Liq.tau["Na+, OH-", "H2O"]
)
)
)
assert str(model.state[1].Liq_G["H+", "H2O"].expr) == str(
(
model.state[1].Liq_Y["Cl-"]
* exp(
-model.params.Liq.alpha["H2O", "H+, Cl-"]
* model.params.Liq.tau["H+, Cl-", "H2O"]
)
+ model.state[1].Liq_Y["OH-"]
* exp(
-model.params.Liq.alpha["H2O", "H+, OH-"]
* model.params.Liq.tau["H+, OH-", "H2O"]
)
)
)
assert str(model.state[1].Liq_G["H2O", "Cl-"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
* exp(
-model.params.Liq.alpha["H2O", "Na+, Cl-"]
* model.params.Liq.tau["H2O", "Na+, Cl-"]
)
+ model.state[1].Liq_Y["H+"]
* exp(
-model.params.Liq.alpha["H2O", "H+, Cl-"]
* model.params.Liq.tau["H2O", "H+, Cl-"]
)
)
)
assert str(model.state[1].Liq_G["H2O", "OH-"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
* exp(
-model.params.Liq.alpha["H2O", "Na+, OH-"]
* model.params.Liq.tau["H2O", "Na+, OH-"]
)
+ model.state[1].Liq_Y["H+"]
* exp(
-model.params.Liq.alpha["H2O", "H+, OH-"]
* model.params.Liq.tau["H2O", "H+, OH-"]
)
)
)
assert str(model.state[1].Liq_G["Cl-", "H2O"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
* exp(
-model.params.Liq.alpha["H2O", "Na+, Cl-"]
* model.params.Liq.tau["Na+, Cl-", "H2O"]
)
+ model.state[1].Liq_Y["H+"]
* exp(
-model.params.Liq.alpha["H2O", "H+, Cl-"]
* model.params.Liq.tau["H+, Cl-", "H2O"]
)
)
)
assert str(model.state[1].Liq_G["OH-", "H2O"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
* exp(
-model.params.Liq.alpha["H2O", "Na+, OH-"]
* model.params.Liq.tau["Na+, OH-", "H2O"]
)
+ model.state[1].Liq_Y["H+"]
* exp(
-model.params.Liq.alpha["H2O", "H+, OH-"]
* model.params.Liq.tau["H+, OH-", "H2O"]
)
)
)
assert str(model.state[1].Liq_G["C6H12", "Na+"].expr) == str(
(
model.state[1].Liq_Y["Cl-"]
* exp(
-model.params.Liq.alpha["C6H12", "Na+, Cl-"]
* model.params.Liq.tau["C6H12", "Na+, Cl-"]
)
+ model.state[1].Liq_Y["OH-"]
* exp(
-model.params.Liq.alpha["C6H12", "Na+, OH-"]
* model.params.Liq.tau["C6H12", "Na+, OH-"]
)
)
)
assert str(model.state[1].Liq_G["C6H12", "H+"].expr) == str(
(
model.state[1].Liq_Y["Cl-"]
* exp(
-model.params.Liq.alpha["C6H12", "H+, Cl-"]
* model.params.Liq.tau["C6H12", "H+, Cl-"]
)
+ model.state[1].Liq_Y["OH-"]
* exp(
-model.params.Liq.alpha["C6H12", "H+, OH-"]
* model.params.Liq.tau["C6H12", "H+, OH-"]
)
)
)
assert str(model.state[1].Liq_G["Na+", "C6H12"].expr) == str(
(
model.state[1].Liq_Y["Cl-"]
* exp(
-model.params.Liq.alpha["C6H12", "Na+, Cl-"]
* model.params.Liq.tau["Na+, Cl-", "C6H12"]
)
+ model.state[1].Liq_Y["OH-"]
* exp(
-model.params.Liq.alpha["C6H12", "Na+, OH-"]
* model.params.Liq.tau["Na+, OH-", "C6H12"]
)
)
)
assert str(model.state[1].Liq_G["H+", "C6H12"].expr) == str(
(
model.state[1].Liq_Y["Cl-"]
* exp(
-model.params.Liq.alpha["C6H12", "H+, Cl-"]
* model.params.Liq.tau["H+, Cl-", "C6H12"]
)
+ model.state[1].Liq_Y["OH-"]
* exp(
-model.params.Liq.alpha["C6H12", "H+, OH-"]
* model.params.Liq.tau["H+, OH-", "C6H12"]
)
)
)
assert str(model.state[1].Liq_G["C6H12", "Cl-"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
* exp(
-model.params.Liq.alpha["C6H12", "Na+, Cl-"]
* model.params.Liq.tau["C6H12", "Na+, Cl-"]
)
+ model.state[1].Liq_Y["H+"]
* exp(
-model.params.Liq.alpha["C6H12", "H+, Cl-"]
* model.params.Liq.tau["C6H12", "H+, Cl-"]
)
)
)
assert str(model.state[1].Liq_G["C6H12", "OH-"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
* exp(
-model.params.Liq.alpha["C6H12", "Na+, OH-"]
* model.params.Liq.tau["C6H12", "Na+, OH-"]
)
+ model.state[1].Liq_Y["H+"]
* exp(
-model.params.Liq.alpha["C6H12", "H+, OH-"]
* model.params.Liq.tau["C6H12", "H+, OH-"]
)
)
)
assert str(model.state[1].Liq_G["Cl-", "C6H12"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
* exp(
-model.params.Liq.alpha["C6H12", "Na+, Cl-"]
* model.params.Liq.tau["Na+, Cl-", "C6H12"]
)
+ model.state[1].Liq_Y["H+"]
* exp(
-model.params.Liq.alpha["C6H12", "H+, Cl-"]
* model.params.Liq.tau["H+, Cl-", "C6H12"]
)
)
)
assert str(model.state[1].Liq_G["OH-", "C6H12"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
* exp(
-model.params.Liq.alpha["C6H12", "Na+, OH-"]
* model.params.Liq.tau["Na+, OH-", "C6H12"]
)
+ model.state[1].Liq_Y["H+"]
* exp(
-model.params.Liq.alpha["C6H12", "H+, OH-"]
* model.params.Liq.tau["H+, OH-", "C6H12"]
)
)
)
# Ion-ion interactions
assert str(model.state[1].Liq_G["Na+", "Cl-"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
+ model.state[1].Liq_Y["H+"]
* exp(
-model.params.Liq.alpha["Na+, Cl-", "H+, Cl-"]
* model.params.Liq.tau["Na+, Cl-", "H+, Cl-"]
)
)
)
assert str(model.state[1].Liq_G["Na+", "OH-"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
+ model.state[1].Liq_Y["H+"]
* exp(
-model.params.Liq.alpha["Na+, OH-", "H+, OH-"]
* model.params.Liq.tau["Na+, OH-", "H+, OH-"]
)
)
)
assert str(model.state[1].Liq_G["H+", "Cl-"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
* exp(
-model.params.Liq.alpha["Na+, Cl-", "H+, Cl-"]
* model.params.Liq.tau["H+, Cl-", "Na+, Cl-"]
)
+ model.state[1].Liq_Y["H+"]
)
)
assert str(model.state[1].Liq_G["H+", "OH-"].expr) == str(
(
model.state[1].Liq_Y["Na+"]
* exp(
-model.params.Liq.alpha["Na+, OH-", "H+, OH-"]
* model.params.Liq.tau["H+, OH-", "Na+, OH-"]
)
+ model.state[1].Liq_Y["H+"]
)
)
assert str(model.state[1].Liq_G["Cl-", "Na+"].expr) == str(
(
model.state[1].Liq_Y["Cl-"]
+ model.state[1].Liq_Y["OH-"]
* exp(
-model.params.Liq.alpha["Na+, Cl-", "Na+, OH-"]
* model.params.Liq.tau["Na+, Cl-", "Na+, OH-"]
)
)
)
assert str(model.state[1].Liq_G["Cl-", "H+"].expr) == str(
(
model.state[1].Liq_Y["Cl-"]
+ model.state[1].Liq_Y["OH-"]
* exp(
-model.params.Liq.alpha["H+, Cl-", "H+, OH-"]
* model.params.Liq.tau["H+, Cl-", "H+, OH-"]
)
)
)
assert str(model.state[1].Liq_G["OH-", "Na+"].expr) == str(
(
model.state[1].Liq_Y["Cl-"]
* exp(
-model.params.Liq.alpha["Na+, Cl-", "Na+, OH-"]
* model.params.Liq.tau["Na+, OH-", "Na+, Cl-"]
)
+ model.state[1].Liq_Y["OH-"]
)
)
assert str(model.state[1].Liq_G["OH-", "H+"].expr) == str(
(
model.state[1].Liq_Y["Cl-"]
* exp(
-model.params.Liq.alpha["H+, Cl-", "H+, OH-"]
* model.params.Liq.tau["H+, OH-", "H+, Cl-"]
)
+ model.state[1].Liq_Y["OH-"]
)
)
# Like species interactions
assert ("Na+", "Na+") not in model.state[1].Liq_G
assert ("Na+", "H+") not in model.state[1].Liq_G
assert ("H+", "Na+") not in model.state[1].Liq_G
assert ("H+", "H+") not in model.state[1].Liq_G
assert ("Cl-", "Cl-") not in model.state[1].Liq_G
assert ("Cl-", "OH-") not in model.state[1].Liq_G
assert ("OH-", "Cl-") not in model.state[1].Liq_G
assert ("OH-", "OH-") not in model.state[1].Liq_G
@pytest.mark.unit
def test_tau(self, model):
assert isinstance(model.state[1].Liq_tau, Expression)
assert len(model.state[1].Liq_tau) == 28
# Molecule-molecule interactions
assert str(model.state[1].Liq_tau["H2O", "H2O"].expr) == str(
model.params.Liq.tau["H2O", "H2O"]
)
assert str(model.state[1].Liq_tau["H2O", "C6H12"].expr) == str(
model.params.Liq.tau["H2O", "C6H12"]
)
assert str(model.state[1].Liq_tau["C6H12", "H2O"].expr) == str(
model.params.Liq.tau["C6H12", "H2O"]
)
assert str(model.state[1].Liq_tau["C6H12", "C6H12"].expr) == str(
model.params.Liq.tau["C6H12", "C6H12"]
)
for i, j in model.state[1].Liq_tau:
if (i, j) not in [
("H2O", "H2O"),
("H2O", "C6H12"),
("C6H12", "H2O"),
("C6H12", "C6H12"),
]:
assert str(model.state[1].Liq_tau[i, j].expr) == str(
-log(model.state[1].Liq_G[i, j]) / model.state[1].Liq_alpha[i, j]
)
# Like species interactions
assert ("Na+", "Na+") not in model.state[1].Liq_tau
assert ("Na+", "H+") not in model.state[1].Liq_tau
assert ("H+", "Na+") not in model.state[1].Liq_tau
assert ("H+", "H+") not in model.state[1].Liq_tau
assert ("Cl-", "Cl-") not in model.state[1].Liq_tau
assert ("Cl-", "OH-") not in model.state[1].Liq_tau
assert ("OH-", "Cl-") not in model.state[1].Liq_tau
assert ("OH-", "OH-") not in model.state[1].Liq_tau
class TestProperties(object):
@pytest.fixture(scope="class")
def model(self):
m = ConcreteModel()
m.params = GenericParameterBlock(default=configuration)
m.state = m.params.build_state_block([1])
# Need to set a value of T for checking expressions later
m.state[1].temperature.set_value(300)
# Need to set a value of log_act_phase_solvents for checking osmotic pressure calculation
m.state[1].log_act_phase_solvents["Liq"].set_value(-1.789578)
return m
@pytest.mark.unit
def test_pressure_osm_phase(self, model):
model.state[1].vol_mol_phase = Var(
model.params.phase_list,
initialize=18e-6,
units=pyunits.m**3 / pyunits.mol,
)
assert_units_equivalent(model.state[1].pressure_osm_phase["Liq"], pyunits.Pa)
assert len(model.state[1].pressure_osm_phase) == 1
assert pytest.approx(
value(-Constants.gas_constant * 300 * log(0.1670306) / 18e-6), rel=1e-6
) == value(model.state[1].pressure_osm_phase["Liq"])
| 40.595427
| 97
| 0.468336
| 5,468
| 46,157
| 3.821507
| 0.056693
| 0.088438
| 0.160557
| 0.188266
| 0.878015
| 0.867152
| 0.841501
| 0.798765
| 0.740333
| 0.63706
| 0
| 0.034125
| 0.358147
| 46,157
| 1,136
| 98
| 40.631162
| 0.671201
| 0.033148
| 0
| 0.431527
| 0
| 0
| 0.102129
| 0.003669
| 0
| 0
| 0
| 0.00088
| 0.187192
| 1
| 0.015764
| false
| 0
| 0.010837
| 0.000985
| 0.034483
| 0.00197
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d9d05b489bb73cf5cc117e439236d0f6f94f668e
| 155
|
py
|
Python
|
python/tf02.py
|
lusing/mljs
|
4c708bb8e0759803ed94ead3e9cfadc3a97d6ed8
|
[
"MIT"
] | null | null | null |
python/tf02.py
|
lusing/mljs
|
4c708bb8e0759803ed94ead3e9cfadc3a97d6ed8
|
[
"MIT"
] | null | null | null |
python/tf02.py
|
lusing/mljs
|
4c708bb8e0759803ed94ead3e9cfadc3a97d6ed8
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
def linear_layer(x):
return 3 * x + 2
@tf.function
def simple_nn(x):
return tf.nn.relu(linear_layer(x))
print(simple_nn)
| 15.5
| 38
| 0.703226
| 28
| 155
| 3.75
| 0.571429
| 0.209524
| 0.228571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015748
| 0.180645
| 155
| 9
| 39
| 17.222222
| 0.811024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.285714
| 0.714286
| 0.142857
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
d9d4575e016d9f1d25ba367fd553b818cdec95b8
| 11,577
|
py
|
Python
|
tests/api/v3_1_0/test_device_administration_network_conditions.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 36
|
2021-05-18T16:24:19.000Z
|
2022-03-05T13:44:41.000Z
|
tests/api/v3_1_0/test_device_administration_network_conditions.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 15
|
2021-06-08T19:03:37.000Z
|
2022-02-25T14:47:33.000Z
|
tests/api/v3_1_0/test_device_administration_network_conditions.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 6
|
2021-06-10T09:32:01.000Z
|
2022-01-12T08:34:39.000Z
|
# -*- coding: utf-8 -*-
"""IdentityServicesEngineAPI device_administration_network_conditions API fixtures and tests.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from ciscoisesdk.exceptions import MalformedRequest
from ciscoisesdk.exceptions import ciscoisesdkException
from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION
pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.0', reason='version does not match')
def is_valid_get_device_admin_network_conditions(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_b4ceac9ee830523ca5ddbfdf3e1b44be_v3_1_0').validate(obj.response)
return True
def get_device_admin_network_conditions(api):
endpoint_result = api.device_administration_network_conditions.get_device_admin_network_conditions(
)
return endpoint_result
@pytest.mark.device_administration_network_conditions
def test_get_device_admin_network_conditions(api, validator):
try:
assert is_valid_get_device_admin_network_conditions(
validator,
get_device_admin_network_conditions(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_device_admin_network_conditions_default(api):
endpoint_result = api.device_administration_network_conditions.get_device_admin_network_conditions(
)
return endpoint_result
@pytest.mark.device_administration_network_conditions
def test_get_device_admin_network_conditions_default(api, validator):
try:
assert is_valid_get_device_admin_network_conditions(
validator,
get_device_admin_network_conditions_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_device_admin_network_condition(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_b95cf8c9aed95518b38be1fa4b514b67_v3_1_0').validate(obj.response)
return True
def create_device_admin_network_condition(api):
endpoint_result = api.device_administration_network_conditions.create_device_admin_network_condition(
active_validation=False,
condition_type='string',
conditions=[{'cliDnisList': ['string'], 'conditionType': 'string', 'description': 'string', 'id': 'string', 'ipAddrList': ['string'], 'link': {'href': 'string', 'rel': 'string', 'type': 'string'}, 'macAddrList': ['string'], 'name': 'string', 'deviceGroupList': ['string'], 'deviceList': ['string']}],
description='string',
id='string',
link={'href': 'string', 'rel': 'string', 'type': 'string'},
name='string',
payload=None
)
return endpoint_result
@pytest.mark.device_administration_network_conditions
def test_create_device_admin_network_condition(api, validator):
try:
assert is_valid_create_device_admin_network_condition(
validator,
create_device_admin_network_condition(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def create_device_admin_network_condition_default(api):
endpoint_result = api.device_administration_network_conditions.create_device_admin_network_condition(
active_validation=False,
condition_type=None,
conditions=None,
description=None,
id=None,
link=None,
name=None,
payload=None
)
return endpoint_result
@pytest.mark.device_administration_network_conditions
def test_create_device_admin_network_condition_default(api, validator):
try:
assert is_valid_create_device_admin_network_condition(
validator,
create_device_admin_network_condition_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_device_admin_network_condition_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_33e9cc593c395c48b31b30149467c846_v3_1_0').validate(obj.response)
return True
def get_device_admin_network_condition_by_id(api):
endpoint_result = api.device_administration_network_conditions.get_device_admin_network_condition_by_id(
id='string'
)
return endpoint_result
@pytest.mark.device_administration_network_conditions
def test_get_device_admin_network_condition_by_id(api, validator):
try:
assert is_valid_get_device_admin_network_condition_by_id(
validator,
get_device_admin_network_condition_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_device_admin_network_condition_by_id_default(api):
endpoint_result = api.device_administration_network_conditions.get_device_admin_network_condition_by_id(
id='string'
)
return endpoint_result
@pytest.mark.device_administration_network_conditions
def test_get_device_admin_network_condition_by_id_default(api, validator):
try:
assert is_valid_get_device_admin_network_condition_by_id(
validator,
get_device_admin_network_condition_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_device_admin_network_condition_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_23f78898b7d655b2b81085dc7c0a964e_v3_1_0').validate(obj.response)
return True
def update_device_admin_network_condition_by_id(api):
endpoint_result = api.device_administration_network_conditions.update_device_admin_network_condition_by_id(
active_validation=False,
condition_type='string',
conditions=[{'cliDnisList': ['string'], 'conditionType': 'string', 'description': 'string', 'id': 'string', 'ipAddrList': ['string'], 'link': {'href': 'string', 'rel': 'string', 'type': 'string'}, 'macAddrList': ['string'], 'name': 'string', 'deviceGroupList': ['string'], 'deviceList': ['string']}],
description='string',
id='string',
link={'href': 'string', 'rel': 'string', 'type': 'string'},
name='string',
payload=None
)
return endpoint_result
@pytest.mark.device_administration_network_conditions
def test_update_device_admin_network_condition_by_id(api, validator):
try:
assert is_valid_update_device_admin_network_condition_by_id(
validator,
update_device_admin_network_condition_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def update_device_admin_network_condition_by_id_default(api):
endpoint_result = api.device_administration_network_conditions.update_device_admin_network_condition_by_id(
active_validation=False,
id='string',
condition_type=None,
conditions=None,
description=None,
link=None,
name=None,
payload=None
)
return endpoint_result
@pytest.mark.device_administration_network_conditions
def test_update_device_admin_network_condition_by_id_default(api, validator):
try:
assert is_valid_update_device_admin_network_condition_by_id(
validator,
update_device_admin_network_condition_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_device_admin_network_condition_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_7c0b4d1bbda75355912f208521362a41_v3_1_0').validate(obj.response)
return True
def delete_device_admin_network_condition_by_id(api):
endpoint_result = api.device_administration_network_conditions.delete_device_admin_network_condition_by_id(
id='string'
)
return endpoint_result
@pytest.mark.device_administration_network_conditions
def test_delete_device_admin_network_condition_by_id(api, validator):
try:
assert is_valid_delete_device_admin_network_condition_by_id(
validator,
delete_device_admin_network_condition_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def delete_device_admin_network_condition_by_id_default(api):
endpoint_result = api.device_administration_network_conditions.delete_device_admin_network_condition_by_id(
id='string'
)
return endpoint_result
@pytest.mark.device_administration_network_conditions
def test_delete_device_admin_network_condition_by_id_default(api, validator):
try:
assert is_valid_delete_device_admin_network_condition_by_id(
validator,
delete_device_admin_network_condition_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
| 37.466019
| 308
| 0.74242
| 1,373
| 11,577
| 5.893664
| 0.13984
| 0.074765
| 0.122343
| 0.146812
| 0.8239
| 0.8239
| 0.818586
| 0.793994
| 0.792511
| 0.774963
| 0
| 0.012961
| 0.180271
| 11,577
| 308
| 309
| 37.587662
| 0.839726
| 0.102445
| 0
| 0.711207
| 0
| 0
| 0.089176
| 0.020705
| 0
| 0
| 0
| 0
| 0.12931
| 1
| 0.107759
| false
| 0
| 0.021552
| 0
| 0.215517
| 0.021552
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d9dd2c93585c769774134476877f5116970858af
| 5,363
|
py
|
Python
|
services/resource_manager/src/oci_cli_resource_manager/resourcemanager_cli_extended.py
|
andrewtvuong/oci-cli
|
7673a808613308a4899c7026964fa2383c30c397
|
[
"Apache-2.0"
] | null | null | null |
services/resource_manager/src/oci_cli_resource_manager/resourcemanager_cli_extended.py
|
andrewtvuong/oci-cli
|
7673a808613308a4899c7026964fa2383c30c397
|
[
"Apache-2.0"
] | null | null | null |
services/resource_manager/src/oci_cli_resource_manager/resourcemanager_cli_extended.py
|
andrewtvuong/oci-cli
|
7673a808613308a4899c7026964fa2383c30c397
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
import click
import os
import os.path
import sys
from oci_cli_resource_manager.generated import resourcemanager_cli
from oci_cli import cli_util
from oci_cli import json_skeleton_utils
import oci # noqa: F401
import base64
import zipfile
resourcemanager_cli.stack_group.commands.pop(resourcemanager_cli.create_stack.name)
resourcemanager_cli.stack_group.commands.pop(resourcemanager_cli.update_stack.name)
# Disabling nested polymorphic commands:
resourcemanager_cli.stack_group.commands.pop(resourcemanager_cli.create_stack_create_zip_upload_config_source_details.name)
resourcemanager_cli.stack_group.commands.pop(resourcemanager_cli.update_stack_update_zip_upload_config_source_details.name)
def create_base64encoded_zip(config_source):
if config_source.endswith(".zip") and os.path.isfile(config_source) and zipfile.is_zipfile(config_source):
with open(config_source, mode='rb') as zip_file:
return base64.b64encode(zip_file.read()).decode('utf-8')
@cli_util.copy_params_from_generated_command(resourcemanager_cli.create_stack, params_to_exclude=['config_source'])
@resourcemanager_cli.stack_group.command(name=cli_util.override('create_stack.command_name', 'create'), help="""Creates a Stack""")
@cli_util.option('--config-source', required=True, help="""A Terraform configuration .zip file.""")
@cli_util.option('--working-directory', help=""" The path of the directory from which to run terraform. If not specified the root will be used.""")
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'variables': {'module': 'resource_manager', 'class': 'dict(str, string)'}, 'freeform-tags': {'module': 'resource_manager', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'resource_manager', 'class': 'dict(str, dict(str, object))'}}, output_type={'module': 'resource_manager', 'class': 'Stack'})
@cli_util.wrap_exceptions
def create_stack_extended(ctx, config_source, working_directory, **kwargs):
config_source = os.path.expandvars(os.path.expanduser(config_source))
if not os.path.exists(config_source):
click.echo('Config source does not exist', file=sys.stderr)
ctx.abort()
if not (config_source.endswith(".zip") and os.path.isfile(config_source) and zipfile.is_zipfile(config_source)):
click.echo('Config source must be a .zip file.', file=sys.stderr)
ctx.abort()
send_value = create_base64encoded_zip(config_source)
if not send_value:
click.echo('Internal error: Unable to generate encoded zip', file=sys.stderr)
ctx.abort()
kwargs['config_source'] = {
'configSourceType': oci.resource_manager.models.ConfigSource.CONFIG_SOURCE_TYPE_ZIP_UPLOAD,
'zipFileBase64Encoded': send_value}
if working_directory is not None:
kwargs['config_source']['workingDirectory'] = working_directory
json_skeleton_utils.remove_json_skeleton_params_from_dict(kwargs)
ctx.invoke(resourcemanager_cli.create_stack, **kwargs)
@cli_util.copy_params_from_generated_command(resourcemanager_cli.update_stack, params_to_exclude=['config_source'])
@resourcemanager_cli.stack_group.command(name=cli_util.override('update_stack.command_name', 'update'), help="""Update the Stack object""")
@cli_util.option('--config-source', help="""A Terraform configuration .zip file.""")
@cli_util.option('--working-directory', help=""" The path of the directory from which to run terraform. If not specified the root will be used.""")
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'variables': {'module': 'resource_manager', 'class': 'dict(str, string)'}, 'freeform-tags': {'module': 'resource_manager', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'resource_manager', 'class': 'dict(str, dict(str, object))'}}, output_type={'module': 'reseouce_manager', 'class': 'Stack'})
@cli_util.wrap_exceptions
def update_stack_extended(ctx, config_source, working_directory, **kwargs):
if working_directory is not None or config_source is not None:
kwargs['config_source'] = {'configSourceType': oci.resource_manager.models.ConfigSource.CONFIG_SOURCE_TYPE_ZIP_UPLOAD}
if working_directory is not None:
kwargs['config_source']['workingDirectory'] = working_directory
if config_source is not None:
config_source = os.path.expandvars(os.path.expanduser(config_source))
if not os.path.exists(config_source):
click.echo('Config source does not exist', file=sys.stderr)
ctx.abort()
if not (config_source.endswith(".zip") and os.path.isfile(config_source) and zipfile.is_zipfile(config_source)):
click.echo('Config source must be a .zip file.', file=sys.stderr)
ctx.abort()
send_value = create_base64encoded_zip(config_source)
if not send_value:
click.echo('Internal error: Unable to generate encoded zip', file=sys.stderr)
ctx.abort()
kwargs['config_source']['zipFileBase64Encoded'] = send_value
json_skeleton_utils.remove_json_skeleton_params_from_dict(kwargs)
ctx.invoke(resourcemanager_cli.update_stack, **kwargs)
| 55.28866
| 390
| 0.747902
| 709
| 5,363
| 5.390691
| 0.198872
| 0.125589
| 0.038462
| 0.047619
| 0.832548
| 0.810832
| 0.776295
| 0.776295
| 0.728676
| 0.699895
| 0
| 0.006227
| 0.131643
| 5,363
| 96
| 391
| 55.864583
| 0.814473
| 0.026105
| 0
| 0.478873
| 1
| 0
| 0.244921
| 0.009582
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042254
| false
| 0.028169
| 0.140845
| 0
| 0.197183
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d9e804f45e205395ab7f4e18c73a2315dd967bb5
| 142
|
py
|
Python
|
reid/utils/data/__init__.py
|
gyxoned/open-reid
|
98c6970555825b02e76de2f64a5d0773fe173983
|
[
"MIT"
] | 1
|
2018-12-20T06:16:25.000Z
|
2018-12-20T06:16:25.000Z
|
reid/utils/data/__init__.py
|
gyxoned/open-reid
|
98c6970555825b02e76de2f64a5d0773fe173983
|
[
"MIT"
] | null | null | null |
reid/utils/data/__init__.py
|
gyxoned/open-reid
|
98c6970555825b02e76de2f64a5d0773fe173983
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from .dataset import Dataset
from .dataset import Dataset_MSMT
from .preprocessor import Preprocessor
| 23.666667
| 38
| 0.859155
| 18
| 142
| 6.444444
| 0.388889
| 0.189655
| 0.293103
| 0.413793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119718
| 142
| 5
| 39
| 28.4
| 0.928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d9ec0767a4495e73f66084d18af9058050d99226
| 6,693
|
py
|
Python
|
Platforms/Web/Processing/Api/Discord/errors.py
|
The-CJ/Phaazebot
|
83a9563d210718071d4e2cdcca3b212c87abaf51
|
[
"MIT"
] | 2
|
2017-09-14T08:07:55.000Z
|
2021-05-18T05:05:05.000Z
|
Platforms/Web/Processing/Api/Discord/errors.py
|
The-CJ/Phaazebot
|
83a9563d210718071d4e2cdcca3b212c87abaf51
|
[
"MIT"
] | 111
|
2018-04-15T14:32:14.000Z
|
2021-03-28T21:06:29.000Z
|
Platforms/Web/Processing/Api/Discord/errors.py
|
The-CJ/Phaazebot
|
83a9563d210718071d4e2cdcca3b212c87abaf51
|
[
"MIT"
] | 1
|
2018-04-15T13:24:44.000Z
|
2018-04-15T13:24:44.000Z
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from Platforms.Web.main_web import PhaazebotWeb
import json
from aiohttp.web import Response
from Utils.Classes.extendedrequest import ExtendedRequest
async def apiDiscordGuildUnknown(cls:"PhaazebotWeb", WebRequest:ExtendedRequest, **kwargs) -> Response:
"""
Optional keywords:
------------------
* msg `str` : (Default: None) * [Overwrites default]
* guild_id `str` *
* guild_name `str` *
Default message (*gets altered by optional keywords):
----------------------------------------------------
Could not find a phaaze known guild
"""
res:dict = dict(status=400, error="discord_guild_unknown")
guild_id:str = kwargs.get("guild_id", "")
if guild_id:
res["guild_id"] = str(guild_id)
guild_name:str = kwargs.get("guild_name", "")
if guild_name:
res["guild_name"] = str(guild_name)
# build message
default_msg:str = "Could not find a phaaze known guild"
if guild_name:
default_msg += f" with name '{guild_name}'"
if guild_id:
default_msg += f" (Guild ID:{guild_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.BASE.Logger.debug(f"(API/Discord) 400: {WebRequest.path} | {msg}", require="api:400")
return cls.response(
text=json.dumps(res),
content_type="application/json",
status=400
)
async def apiDiscordMissingPermission(cls:"PhaazebotWeb", WebRequest:ExtendedRequest, **kwargs) -> Response:
"""
Optional keywords:
------------------
* msg `str` : (Default: None) * [Overwrites default]
* user_id `str` *
* user_name `str` *
* guild_id `str`
* guild_name `str`
Default message (*gets altered by optional keywords):
----------------------------------------------------
Missing 'administrator' or 'manage_guild' permission
"""
res:dict = dict(status=400, error="discord_missing_permission")
user_id:str = kwargs.get("user_id", "")
if user_id:
res["user_id"] = str(user_id)
user_name:str = kwargs.get("user_name", "")
if user_name:
res["user_name"] = str(user_name)
guild_id:str = kwargs.get("guild_id", "")
if guild_id:
res["guild_id"] = str(guild_id)
guild_name:str = kwargs.get("guild_name", "")
if guild_name:
res["guild_name"] = str(guild_name)
# build message
default_msg:str = "Missing 'administrator' or 'manage_guild' permission"
if user_name:
default_msg += f" for user '{user_name}'"
if user_id:
default_msg += f" (User ID:{user_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.BASE.Logger.debug(f"(API/Discord) 400 Missing Permission: {WebRequest.path} | {msg}", require="api:400")
return cls.response(
text=json.dumps(res),
content_type="application/json",
status=400
)
async def apiDiscordMemberNotFound(cls:"PhaazebotWeb", WebRequest:ExtendedRequest, **kwargs) -> Response:
"""
Optional keywords:
------------------
* msg `str` : (Default: None) * [Overwrites default]
* user_id `str` *
* user_name `str` *
* guild_id `str`
* guild_name `str`
Default message (*gets altered by optional keywords):
----------------------------------------------------
Could not find a valid member
"""
res:dict = dict(status=404, error="discord_member_not_found")
user_id:str = kwargs.get("user_id", "")
if user_id:
res["user_id"] = str(user_id)
user_name:str = kwargs.get("user_name", "")
if user_name:
res["user_name"] = str(user_name)
guild_id:str = kwargs.get("guild_id", "")
if guild_id:
res["guild_id"] = str(guild_id)
guild_name:str = kwargs.get("guild_name", "")
if guild_name:
res["guild_name"] = str(guild_name)
# build message
default_msg:str = "Could not find a valid member"
if user_name:
default_msg += f" with name '{user_name}'"
if user_id:
default_msg += f" (User ID: {user_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.BASE.Logger.debug(f"(API/Discord) 400 Member not Found: {WebRequest.path} | {msg}", require="api:404")
return cls.response(
text=json.dumps(res),
content_type="application/json",
status=404
)
async def apiDiscordRoleNotFound(cls:"PhaazebotWeb", WebRequest:ExtendedRequest, **kwargs) -> Response:
"""
Optional keywords:
------------------
* msg `str` : (Default: None) * [Overwrites default]
* role_id `str` *
* role_name `str` *
* guild_id `str`
* guild_name `str`
Default message (*gets altered by optional keywords):
----------------------------------------------------
Could not find a valid role
"""
res:dict = dict(status=404, error="discord_role_not_found")
role_id:str = kwargs.get("role_id", "")
if role_id:
res["role_id"] = str(role_id)
role_name:str = kwargs.get("role_name", "")
if role_name:
res["role_name"] = str(role_name)
guild_id:str = kwargs.get("guild_id", "")
if guild_id:
res["guild_id"] = str(guild_id)
guild_name:str = kwargs.get("guild_name", "")
if guild_name:
res["guild_name"] = str(guild_name)
# build message
default_msg:str = "Could not find a valid role"
if role_name:
default_msg += f" with name '{role_name}'"
if role_id:
default_msg += f" (Role ID:{role_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.BASE.Logger.debug(f"(API/Discord) 400 Role not Found: {WebRequest.path} | {msg}", require="api:404")
return cls.response(
text=json.dumps(res),
content_type="application/json",
status=404
)
async def apiDiscordChannelNotFound(cls:"PhaazebotWeb", WebRequest:ExtendedRequest, **kwargs) -> Response:
"""
Optional keywords:
------------------
* msg `str` : (Default: None) * [Overwrites default]
* channel_id `str` *
* channel_name `str` *
* guild_id `str`
* guild_name `str`
Default message (*gets altered by optional keywords):
----------------------------------------------------
Could not find a valid channel
"""
res:dict = dict(status=404, error="discord_channel_not_found")
channel_id:str = kwargs.get("channel_id", "")
if channel_id:
res["channel_id"] = str(channel_id)
channel_name:str = kwargs.get("channel_name", "")
if channel_name:
res["channel_name"] = str(channel_name)
guild_id:str = kwargs.get("guild_id", "")
if guild_id:
res["guild_id"] = str(guild_id)
guild_name:str = kwargs.get("guild_name", "")
if guild_name:
res["guild_name"] = str(guild_name)
# build message
default_msg:str = "Could not find a valid channel"
if channel_name:
default_msg += f" with name '{channel_name}'"
if channel_id:
default_msg += f" (Channel ID:{channel_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.BASE.Logger.debug(f"(API/Discord) 400 Channel not Found: {WebRequest.path} | {msg}", require="api:404")
return cls.response(
text=json.dumps(res),
content_type="application/json",
status=404
)
| 26.454545
| 109
| 0.657104
| 921
| 6,693
| 4.589577
| 0.087948
| 0.054649
| 0.065295
| 0.035486
| 0.80246
| 0.80246
| 0.745919
| 0.700497
| 0.700497
| 0.700497
| 0
| 0.010471
| 0.143882
| 6,693
| 252
| 110
| 26.559524
| 0.727225
| 0.010309
| 0
| 0.651852
| 0
| 0
| 0.2666
| 0.023529
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.037037
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a1a3587280066544f3a8d15ee2fd39a5e78c679
| 2,588
|
py
|
Python
|
python/pycylon/pycylon/types.py
|
deHasara/cylon
|
f5e31a1191d6a30c0a8c5778a7db4a07c5802da8
|
[
"Apache-2.0"
] | 229
|
2020-07-01T14:05:10.000Z
|
2022-03-25T12:26:58.000Z
|
python/pycylon/pycylon/types.py
|
deHasara/cylon
|
f5e31a1191d6a30c0a8c5778a7db4a07c5802da8
|
[
"Apache-2.0"
] | 261
|
2020-06-30T23:23:15.000Z
|
2022-03-16T09:55:40.000Z
|
python/pycylon/pycylon/types.py
|
deHasara/cylon
|
f5e31a1191d6a30c0a8c5778a7db4a07c5802da8
|
[
"Apache-2.0"
] | 36
|
2020-06-30T23:14:52.000Z
|
2022-03-03T02:37:09.000Z
|
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from pycylon.data.data_type import DataType
from pycylon.data.data_type import Type
from pycylon.data.data_type import Layout
def int8():
return DataType(Type.INT8, Layout.FIXED_WIDTH)
def int16():
return DataType(Type.INT16, Layout.FIXED_WIDTH)
def int32():
return DataType(Type.INT32, Layout.FIXED_WIDTH)
def int64():
return DataType(Type.INT64, Layout.FIXED_WIDTH)
def uint8():
return DataType(Type.UINT8, Layout.FIXED_WIDTH)
def uint16():
return DataType(Type.UINT16, Layout.FIXED_WIDTH)
def uint32():
return DataType(Type.UINT32, Layout.FIXED_WIDTH)
def uint64():
return DataType(Type.UINT64, Layout.FIXED_WIDTH)
def float():
return DataType(Type.FLOAT, Layout.FIXED_WIDTH)
def double():
return DataType(Type.DOUBLE, Layout.FIXED_WIDTH)
def half_float():
return DataType(Type.HALF_FLOAT, Layout.FIXED_WIDTH)
def string():
return DataType(Type.STRING, Layout.FIXED_WIDTH)
def binary():
return DataType(Type.BINARY, Layout.FIXED_WIDTH)
def fixed_sized_binary():
return DataType(Type.FIXED_SIZE_BINARY, Layout.FIXED_WIDTH)
def double():
return DataType(Type.DOUBLE, Layout.FIXED_WIDTH)
def bool():
return DataType(Type.BOOL, Layout.FIXED_WIDTH)
def date32():
return DataType(Type.DATE32, Layout.FIXED_WIDTH)
def date64():
return DataType(Type.DATE64, Layout.FIXED_WIDTH)
def timestamp():
return DataType(Type.TIMESTAMP, Layout.FIXED_WIDTH)
def time32():
return DataType(Type.TIME32, Layout.FIXED_WIDTH)
def time64():
return DataType(Type.TIME64, Layout.FIXED_WIDTH)
def interval():
return DataType(Type.INTERVAL, Layout.FIXED_WIDTH)
def decimal():
return DataType(Type.DECIMAL, Layout.FIXED_WIDTH)
def list():
return DataType(Type.LIST, Layout.FIXED_WIDTH)
def fixed_sized_list():
return DataType(Type.FIXED_SIZED_LIST, Layout.FIXED_WIDTH)
def extension():
return DataType(Type.EXTENSION, Layout.FIXED_WIDTH)
def duration():
return DataType(Type.DURATION, Layout.FIXED_WIDTH)
| 20.377953
| 74
| 0.743818
| 360
| 2,588
| 5.236111
| 0.266667
| 0.200531
| 0.257825
| 0.262069
| 0.185146
| 0.149072
| 0.072149
| 0.072149
| 0.072149
| 0.072149
| 0
| 0.021958
| 0.155332
| 2,588
| 126
| 75
| 20.539683
| 0.840348
| 0.199768
| 0
| 0.070175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.473684
| true
| 0
| 0.052632
| 0.473684
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
8a519308912828072b89b482dcc4866971d6eca5
| 60,134
|
py
|
Python
|
sql/engines/tests.py
|
flyingonthebed/Archery
|
a5e840b26731bc7834e8716f38f4025e2887a351
|
[
"Apache-2.0"
] | 3
|
2019-10-10T08:09:09.000Z
|
2021-04-07T02:35:31.000Z
|
sql/engines/tests.py
|
flyingonthebed/Archery
|
a5e840b26731bc7834e8716f38f4025e2887a351
|
[
"Apache-2.0"
] | 2
|
2020-06-05T21:06:12.000Z
|
2021-06-10T21:32:36.000Z
|
sql/engines/tests.py
|
flyingonthebed/Archery
|
a5e840b26731bc7834e8716f38f4025e2887a351
|
[
"Apache-2.0"
] | 1
|
2019-12-05T02:48:14.000Z
|
2019-12-05T02:48:14.000Z
|
import json
from datetime import timedelta, datetime
from unittest.mock import patch, Mock, ANY
from django.contrib.auth import get_user_model
from django.test import TestCase
from common.config import SysConfig
from sql.engines import EngineBase
from sql.engines.goinception import GoInceptionEngine
from sql.engines.models import ResultSet, ReviewSet, ReviewResult
from sql.engines.mssql import MssqlEngine
from sql.engines.mysql import MysqlEngine
from sql.engines.redis import RedisEngine
from sql.engines.pgsql import PgSQLEngine
from sql.engines.oracle import OracleEngine
from sql.engines.inception import InceptionEngine, _repair_json_str
from sql.models import Instance, SqlWorkflow, SqlWorkflowContent
User = get_user_model()
class TestReviewSet(TestCase):
def test_review_set(self):
new_review_set = ReviewSet()
new_review_set.rows = [{'id': '1679123'}]
self.assertIn('1679123', new_review_set.json())
class TestEngineBase(TestCase):
@classmethod
def setUpClass(cls):
cls.u1 = User(username='some_user', display='用户1')
cls.u1.save()
cls.ins1 = Instance(instance_name='some_ins', type='master', db_type='mssql', host='some_host',
port=1366, user='ins_user', password='some_pass')
cls.ins1.save()
cls.wf1 = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer=cls.u1.username,
engineer_display=cls.u1.display,
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=cls.ins1,
db_name='some_db',
syntax_type=1
)
cls.wfc1 = SqlWorkflowContent.objects.create(
workflow=cls.wf1,
sql_content='some_sql',
execute_result=json.dumps([{
'id': 1,
'sql': 'some_content'
}]))
@classmethod
def tearDownClass(cls):
cls.wfc1.delete()
cls.wf1.delete()
cls.ins1.delete()
cls.u1.delete()
def test_init_with_ins(self):
engine = EngineBase(instance=self.ins1)
self.assertEqual(self.ins1.instance_name, engine.instance_name)
self.assertEqual(self.ins1.user, engine.user)
class TestMssql(TestCase):
@classmethod
def setUpClass(cls):
cls.ins1 = Instance(instance_name='some_ins', type='slave', db_type='mssql', host='some_host',
port=1366, user='ins_user', password='some_pass')
cls.ins1.save()
cls.engine = MssqlEngine(instance=cls.ins1)
cls.wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=cls.ins1,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=cls.wf, sql_content='insert into some_tb values (1)')
@classmethod
def tearDownClass(cls):
cls.ins1.delete()
cls.wf.delete()
SqlWorkflowContent.objects.all().delete()
@patch('sql.engines.mssql.pyodbc.connect')
def testGetConnection(self, connect):
new_engine = MssqlEngine(instance=self.ins1)
new_engine.get_connection()
connect.assert_called_once()
@patch('sql.engines.mssql.pyodbc.connect')
def testQuery(self, connect):
cur = Mock()
connect.return_value.cursor = cur
cur.return_value.execute = Mock()
cur.return_value.fetchmany.return_value = (('v1', 'v2'),)
cur.return_value.description = (('k1', 'some_other_des'), ('k2', 'some_other_des'))
new_engine = MssqlEngine(instance=self.ins1)
query_result = new_engine.query(sql='some_str', limit_num=100)
cur.return_value.execute.assert_called()
cur.return_value.fetchmany.assert_called_once_with(100)
connect.return_value.close.assert_called_once()
self.assertIsInstance(query_result, ResultSet)
@patch.object(MssqlEngine, 'query')
def testAllDb(self, mock_query):
db_result = ResultSet()
db_result.rows = [('db_1',), ('db_2',)]
mock_query.return_value = db_result
new_engine = MssqlEngine(instance=self.ins1)
dbs = new_engine.get_all_databases()
self.assertEqual(dbs.rows, ['db_1', 'db_2'])
@patch.object(MssqlEngine, 'query')
def testAllTables(self, mock_query):
table_result = ResultSet()
table_result.rows = [('tb_1', 'some_des'), ('tb_2', 'some_des')]
mock_query.return_value = table_result
new_engine = MssqlEngine(instance=self.ins1)
tables = new_engine.get_all_tables('some_db')
mock_query.assert_called_once_with(db_name='some_db', sql=ANY)
self.assertEqual(tables.rows, ['tb_1', 'tb_2'])
@patch.object(MssqlEngine, 'query')
def testAllColumns(self, mock_query):
db_result = ResultSet()
db_result.rows = [('col_1', 'type'), ('col_2', 'type2')]
mock_query.return_value = db_result
new_engine = MssqlEngine(instance=self.ins1)
dbs = new_engine.get_all_columns_by_tb('some_db', 'some_tb')
self.assertEqual(dbs.rows, ['col_1', 'col_2'])
@patch.object(MssqlEngine, 'query')
def testDescribe(self, mock_query):
new_engine = MssqlEngine(instance=self.ins1)
new_engine.describe_table('some_db', 'some_db')
mock_query.assert_called_once()
def testQueryCheck(self):
new_engine = MssqlEngine(instance=self.ins1)
# 只抽查一个函数
banned_sql = 'select concat(phone,1) from user_table'
check_result = new_engine.query_check(db_name='some_db', sql=banned_sql)
self.assertTrue(check_result.get('bad_query'))
banned_sql = 'select phone from user_table where phone=concat(phone,1)'
check_result = new_engine.query_check(db_name='some_db', sql=banned_sql)
self.assertTrue(check_result.get('bad_query'))
def test_filter_sql(self):
new_engine = MssqlEngine(instance=self.ins1)
# 只抽查一个函数
banned_sql = 'select user from user_table'
check_result = new_engine.filter_sql(sql=banned_sql, limit_num=10)
self.assertEqual(check_result, "select top 10 user from user_table")
@patch('sql.engines.mssql.MssqlEngine.execute')
def test_execute_workflow(self, mock_execute):
mock_execute.return_value.error = None
new_engine = MssqlEngine(instance=self.ins1)
new_engine.execute_workflow(self.wf)
# 有多少个备份表, 就需要execute多少次, 另外加上一条实际执行的次数
mock_execute.assert_called()
self.assertEqual(1, mock_execute.call_count)
@patch('sql.engines.mssql.MssqlEngine.get_connection')
def test_execute(self, mock_connect):
mock_cursor = Mock()
mock_connect.return_value.cursor = mock_cursor
new_engine = MssqlEngine(instance=self.ins1)
execute_result = new_engine.execute('some_db', 'some_sql')
# 验证结果, 无异常
self.assertIsNone(execute_result.error)
self.assertEqual('some_sql', execute_result.full_sql)
self.assertEqual(2, len(execute_result.rows))
mock_cursor.return_value.execute.assert_called()
mock_cursor.return_value.commit.assert_called()
mock_cursor.reset_mock()
# 验证异常
mock_cursor.return_value.execute.side_effect = Exception('Boom! some exception!')
execute_result = new_engine.execute('some_db', 'some_sql')
self.assertIn('Boom! some exception!', execute_result.error)
self.assertEqual('some_sql', execute_result.full_sql)
self.assertEqual(2, len(execute_result.rows))
mock_cursor.return_value.commit.assert_not_called()
mock_cursor.return_value.rollback.assert_called()
class TestMysql(TestCase):
def setUp(self):
self.ins1 = Instance(instance_name='some_ins', type='slave', db_type='mysql', host='some_host',
port=1366, user='ins_user', password='some_pass')
self.ins1.save()
self.sys_config = SysConfig()
self.wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins1,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=self.wf)
def tearDown(self):
self.ins1.delete()
self.sys_config.purge()
SqlWorkflow.objects.all().delete()
SqlWorkflowContent.objects.all().delete()
@patch('MySQLdb.connect')
def test_engine_base_info(self, _conn):
new_engine = MysqlEngine(instance=self.ins1)
self.assertEqual(new_engine.name, 'MySQL')
self.assertEqual(new_engine.info, 'MySQL engine')
@patch('MySQLdb.connect')
def testGetConnection(self, connect):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.get_connection()
connect.assert_called_once()
@patch('MySQLdb.connect')
def testQuery(self, connect):
cur = Mock()
connect.return_value.cursor = cur
cur.return_value.execute = Mock()
cur.return_value.fetchmany.return_value = (('v1', 'v2'),)
cur.return_value.description = (('k1', 'some_other_des'), ('k2', 'some_other_des'))
new_engine = MysqlEngine(instance=self.ins1)
query_result = new_engine.query(sql='some_str', limit_num=100)
cur.return_value.execute.assert_called()
cur.return_value.fetchmany.assert_called_once_with(size=100)
connect.return_value.close.assert_called_once()
self.assertIsInstance(query_result, ResultSet)
@patch.object(MysqlEngine, 'query')
def testAllDb(self, mock_query):
db_result = ResultSet()
db_result.rows = [('db_1',), ('db_2',)]
mock_query.return_value = db_result
new_engine = MysqlEngine(instance=self.ins1)
dbs = new_engine.get_all_databases()
self.assertEqual(dbs.rows, ['db_1', 'db_2'])
@patch.object(MysqlEngine, 'query')
def testAllTables(self, mock_query):
table_result = ResultSet()
table_result.rows = [('tb_1', 'some_des'), ('tb_2', 'some_des')]
mock_query.return_value = table_result
new_engine = MysqlEngine(instance=self.ins1)
tables = new_engine.get_all_tables('some_db')
mock_query.assert_called_once_with(db_name='some_db', sql=ANY)
self.assertEqual(tables.rows, ['tb_1', 'tb_2'])
@patch.object(MysqlEngine, 'query')
def testAllColumns(self, mock_query):
db_result = ResultSet()
db_result.rows = [('col_1', 'type'), ('col_2', 'type2')]
mock_query.return_value = db_result
new_engine = MysqlEngine(instance=self.ins1)
dbs = new_engine.get_all_columns_by_tb('some_db', 'some_tb')
self.assertEqual(dbs.rows, ['col_1', 'col_2'])
@patch.object(MysqlEngine, 'query')
def testDescribe(self, mock_query):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.describe_table('some_db', 'some_db')
mock_query.assert_called_once()
def testQueryCheck(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = '-- 测试\n select user from usertable'
check_result = new_engine.query_check(db_name='some_db', sql=sql_without_limit)
self.assertEqual(check_result['filtered_sql'], 'select user from usertable')
def test_query_check_wrong_sql(self):
new_engine = MysqlEngine(instance=self.ins1)
wrong_sql = '-- 测试'
check_result = new_engine.query_check(db_name='some_db', sql=wrong_sql)
self.assertDictEqual(check_result,
{'msg': '不支持的查询语法类型!', 'bad_query': True, 'filtered_sql': '-- 测试', 'has_star': False})
def test_query_check_update_sql(self):
new_engine = MysqlEngine(instance=self.ins1)
update_sql = 'update user set id=0'
check_result = new_engine.query_check(db_name='some_db', sql=update_sql)
self.assertDictEqual(check_result,
{'msg': '不支持的查询语法类型!', 'bad_query': True, 'filtered_sql': 'update user set id=0',
'has_star': False})
def test_filter_sql_with_delimiter(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'select user from usertable;'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=100)
self.assertEqual(check_result, 'select user from usertable limit 100;')
def test_filter_sql_without_delimiter(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'select user from usertable'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=100)
self.assertEqual(check_result, 'select user from usertable limit 100;')
def test_filter_sql_with_limit(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'select user from usertable limit 10'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=1)
self.assertEqual(check_result, 'select user from usertable limit 10;')
@patch('sql.engines.mysql.data_masking', return_value=ResultSet())
def test_query_masking(self, _data_masking):
query_result = ResultSet()
new_engine = MysqlEngine(instance=self.ins1)
masking_result = new_engine.query_masking(db_name='archery', sql='select 1', resultset=query_result)
self.assertIsInstance(masking_result, ResultSet)
@patch('sql.engines.mysql.data_masking', return_value=ResultSet())
def test_query_masking_not_select(self, _data_masking):
query_result = ResultSet()
new_engine = MysqlEngine(instance=self.ins1)
masking_result = new_engine.query_masking(db_name='archery', sql='explain select 1', resultset=query_result)
self.assertEqual(masking_result, query_result)
def test_execute_check_select_sql(self):
sql = 'select * from user'
row = ReviewResult(id=1, errlevel=2,
stagestatus='驳回不支持语句',
errormessage='仅支持DML和DDL语句,查询语句请使用SQL查询功能!',
sql=sql)
new_engine = MysqlEngine(instance=self.ins1)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
def test_execute_check_critical_sql(self):
self.sys_config.set('critical_ddl_regex', '^|update')
self.sys_config.get_all_config()
sql = 'update user set id=1'
row = ReviewResult(id=1, errlevel=2,
stagestatus='驳回高危SQL',
errormessage='禁止提交匹配' + '^|update' + '条件的语句!',
sql=sql)
new_engine = MysqlEngine(instance=self.ins1)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('sql.engines.mysql.InceptionEngine')
def test_execute_check_normal_sql(self, _inception_engine):
sql = 'update user set id=1'
row = ReviewResult(id=1,
errlevel=0,
stagestatus='Audit completed',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0, )
_inception_engine.return_value.execute_check.return_value = ReviewSet(full_sql=sql, rows=[row])
new_engine = MysqlEngine(instance=self.ins1)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('sql.engines.mysql.InceptionEngine')
def test_execute_check_normal_sql_with_Exception(self, _inception_engine):
sql = 'update user set id=1'
_inception_engine.return_value.execute_check.side_effect = RuntimeError()
new_engine = MysqlEngine(instance=self.ins1)
with self.assertRaises(RuntimeError):
new_engine.execute_check(db_name=0, sql=sql)
@patch('sql.engines.mysql.InceptionEngine')
def test_execute_workflow(self, _inception_engine):
sql = 'update user set id=1'
_inception_engine.return_value.execute.return_value = ReviewSet(full_sql=sql)
new_engine = MysqlEngine(instance=self.ins1)
execute_result = new_engine.execute_workflow(self.wf)
self.assertIsInstance(execute_result, ReviewSet)
@patch('MySQLdb.connect.cursor.execute')
@patch('MySQLdb.connect.cursor')
@patch('MySQLdb.connect')
def test_execute(self, _connect, _cursor, _execute):
new_engine = MysqlEngine(instance=self.ins1)
execute_result = new_engine.execute(self.wf)
self.assertIsInstance(execute_result, ResultSet)
@patch.object(MysqlEngine, 'query')
def test_server_version(self, _query):
_query.return_value.rows = (('5.7.20',),)
new_engine = MysqlEngine(instance=self.ins1)
server_version = new_engine.server_version
self.assertTupleEqual(server_version, (5, 7, 20))
@patch.object(MysqlEngine, 'query')
def test_get_variables_not_filter(self, _query):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.get_variables()
_query.assert_called_once()
@patch.object(MysqlEngine, 'query')
def test_get_variables_filter(self, _query):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.get_variables(variables=['binlog_format'])
_query.assert_called()
@patch.object(MysqlEngine, 'query')
def test_set_variable(self, _query):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.set_variable('binlog_format', 'ROW')
_query.assert_called_once_with(sql="set global binlog_format=ROW;")
@patch('sql.engines.mysql.GoInceptionEngine')
def test_osc_go_inception(self, _inception_engine):
self.sys_config.set('go_inception', 'true')
_inception_engine.return_value.osc_control.return_value = ReviewSet()
command = 'get'
sqlsha1 = 'xxxxx'
new_engine = MysqlEngine(instance=self.ins1)
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
@patch('sql.engines.mysql.InceptionEngine')
def test_osc_inception(self, _inception_engine):
self.sys_config.set('go_inception', 'false')
_inception_engine.return_value.osc_control.return_value = ReviewSet()
command = 'get'
sqlsha1 = 'xxxxx'
new_engine = MysqlEngine(instance=self.ins1)
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
class TestRedis(TestCase):
@classmethod
def setUpClass(cls):
cls.ins = Instance(instance_name='some_ins', type='slave', db_type='redis', host='some_host',
port=1366, user='ins_user', password='some_pass')
cls.ins.save()
@classmethod
def tearDownClass(cls):
cls.ins.delete()
SqlWorkflow.objects.all().delete()
SqlWorkflowContent.objects.all().delete()
@patch('redis.Redis')
def test_engine_base_info(self, _conn):
new_engine = RedisEngine(instance=self.ins)
self.assertEqual(new_engine.name, 'Redis')
self.assertEqual(new_engine.info, 'Redis engine')
@patch('redis.Redis')
def test_get_connection(self, _conn):
new_engine = RedisEngine(instance=self.ins)
new_engine.get_connection()
_conn.assert_called_once()
@patch('redis.Redis.execute_command', return_value=[1, 2, 3])
def test_query_return_list(self, _execute_command):
new_engine = RedisEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='keys *', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
self.assertTupleEqual(query_result.rows, ([1], [2], [3]))
@patch('redis.Redis.execute_command', return_value='text')
def test_query_return_str(self, _execute_command):
new_engine = RedisEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='keys *', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
self.assertTupleEqual(query_result.rows, (['text'],))
@patch('redis.Redis.execute_command', return_value='text')
def test_query_execute(self, _execute_command):
new_engine = RedisEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='keys *', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
self.assertTupleEqual(query_result.rows, (['text'],))
@patch('redis.Redis.config_get', return_value={"databases": 4})
def test_get_all_databases(self, _config_get):
new_engine = RedisEngine(instance=self.ins)
dbs = new_engine.get_all_databases()
self.assertListEqual(dbs.rows, ['0', '1', '2', '3'])
def test_query_check_safe_cmd(self):
safe_cmd = "keys 1*"
new_engine = RedisEngine(instance=self.ins)
check_result = new_engine.query_check(db_name=0, sql=safe_cmd)
self.assertDictEqual(check_result,
{'msg': '禁止执行该命令!', 'bad_query': True, 'filtered_sql': safe_cmd, 'has_star': False})
def test_query_check_danger_cmd(self):
safe_cmd = "keys *"
new_engine = RedisEngine(instance=self.ins)
check_result = new_engine.query_check(db_name=0, sql=safe_cmd)
self.assertDictEqual(check_result,
{'msg': '禁止执行该命令!', 'bad_query': True, 'filtered_sql': safe_cmd, 'has_star': False})
def test_filter_sql(self):
safe_cmd = "keys 1*"
new_engine = RedisEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=safe_cmd, limit_num=100)
self.assertEqual(check_result, 'keys 1*')
def test_query_masking(self):
query_result = ResultSet()
new_engine = RedisEngine(instance=self.ins)
masking_result = new_engine.query_masking(db_name=0, sql='', resultset=query_result)
self.assertEqual(masking_result, query_result)
def test_execute_check(self):
sql = 'set 1 1'
row = ReviewResult(id=1,
errlevel=0,
stagestatus='Audit completed',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0,
full_sql=sql)
new_engine = RedisEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name=0, sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('redis.Redis.execute_command', return_value='text')
def test_execute_workflow_success(self, _execute_command):
sql = 'set 1 1'
row = ReviewResult(id=1,
errlevel=0,
stagestatus='Execute Successfully',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0,
full_sql=sql)
wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=wf, sql_content=sql)
new_engine = RedisEngine(instance=self.ins)
execute_result = new_engine.execute_workflow(workflow=wf)
self.assertIsInstance(execute_result, ReviewSet)
self.assertEqual(execute_result.rows[0].__dict__.keys(), row.__dict__.keys())
class TestPgSQL(TestCase):
@classmethod
def setUpClass(cls):
cls.ins = Instance(instance_name='some_ins', type='slave', db_type='pgsql', host='some_host',
port=1366, user='ins_user', password='some_pass')
cls.ins.save()
@classmethod
def tearDownClass(cls):
cls.ins.delete()
@patch('psycopg2.connect')
def test_engine_base_info(self, _conn):
new_engine = PgSQLEngine(instance=self.ins)
self.assertEqual(new_engine.name, 'PgSQL')
self.assertEqual(new_engine.info, 'PgSQL engine')
@patch('psycopg2.connect')
def test_get_connection(self, _conn):
new_engine = PgSQLEngine(instance=self.ins)
new_engine.get_connection()
_conn.assert_called_once()
@patch('psycopg2.connect.cursor.execute')
@patch('psycopg2.connect.cursor')
@patch('psycopg2.connect')
def test_query(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchmany.return_value = [(1,)]
new_engine = PgSQLEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
self.assertListEqual(query_result.rows, [(1,)])
@patch('psycopg2.connect.cursor.execute')
@patch('psycopg2.connect.cursor')
@patch('psycopg2.connect')
def test_query_not_limit(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = PgSQLEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=0)
self.assertIsInstance(query_result, ResultSet)
self.assertListEqual(query_result.rows, [(1,)])
@patch('sql.engines.pgsql.PgSQLEngine.query',
return_value=ResultSet(rows=[('postgres',), ('archery',), ('template1',), ('template0',)]))
def test_get_all_databases(self, _query):
new_engine = PgSQLEngine(instance=self.ins)
dbs = new_engine.get_all_databases()
self.assertListEqual(dbs.rows, ['archery'])
@patch('sql.engines.pgsql.PgSQLEngine.query',
return_value=ResultSet(rows=[('information_schema',), ('archery',), ('pg_catalog',)]))
def test_get_all_schemas(self, _query):
new_engine = PgSQLEngine(instance=self.ins)
schemas = new_engine.get_all_schemas(db_name='archery')
self.assertListEqual(schemas.rows, ['archery'])
@patch('sql.engines.pgsql.PgSQLEngine.query', return_value=ResultSet(rows=[('test',), ('test2',)]))
def test_get_all_tables(self, _query):
new_engine = PgSQLEngine(instance=self.ins)
tables = new_engine.get_all_tables(db_name='archery', schema_name='archery')
self.assertListEqual(tables.rows, ['test2'])
@patch('sql.engines.pgsql.PgSQLEngine.query',
return_value=ResultSet(rows=[('id',), ('name',)]))
def test_get_all_columns_by_tb(self, _query):
new_engine = PgSQLEngine(instance=self.ins)
columns = new_engine.get_all_columns_by_tb(db_name='archery', tb_name='test2', schema_name='archery')
self.assertListEqual(columns.rows, ['id', 'name'])
@patch('sql.engines.pgsql.PgSQLEngine.query',
return_value=ResultSet(rows=[('postgres',), ('archery',), ('template1',), ('template0',)]))
def test_describe_table(self, _query):
new_engine = PgSQLEngine(instance=self.ins)
describe = new_engine.describe_table(db_name='archery', schema_name='archery', tb_name='text')
self.assertIsInstance(describe, ResultSet)
def test_query_check_disable_sql(self):
sql = "update xxx set a=1 "
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': '不支持的查询语法类型!', 'bad_query': True, 'filtered_sql': sql.strip(), 'has_star': False})
def test_query_check_star_sql(self):
sql = "select * from xx "
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': 'SQL语句中含有 * ', 'bad_query': False, 'filtered_sql': sql.strip(), 'has_star': True})
def test_filter_sql_with_delimiter(self):
sql = "select * from xx;"
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=100)
self.assertEqual(check_result, "select * from xx limit 100;")
def test_filter_sql_without_delimiter(self):
sql = "select * from xx"
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=100)
self.assertEqual(check_result, "select * from xx limit 100;")
def test_filter_sql_with_limit(self):
sql = "select * from xx limit 10"
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=1)
self.assertEqual(check_result, "select * from xx limit 10;")
def test_query_masking(self):
query_result = ResultSet()
new_engine = PgSQLEngine(instance=self.ins)
masking_result = new_engine.query_masking(db_name=0, sql='', resultset=query_result)
self.assertEqual(masking_result, query_result)
class TestModel(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_result_set_rows_shadow(self):
# 测试默认值为空列表的坑
# 如果默认值是空列表,又使用的是累加的方法更新,会导致残留上次的列表
result_set1 = ResultSet()
for i in range(10):
result_set1.rows += [i]
brand_new_result_set = ResultSet()
self.assertEqual(brand_new_result_set.rows, [])
review_set1 = ReviewSet()
for i in range(10):
review_set1.rows += [i]
brand_new_review_set = ReviewSet()
self.assertEqual(brand_new_review_set.rows, [])
class TestInception(TestCase):
def setUp(self):
self.ins = Instance.objects.create(instance_name='some_ins', type='slave', db_type='mysql', host='some_host',
port=3306, user='ins_user', password='some_pass')
self.ins_inc = Instance.objects.create(instance_name='some_ins_inc', type='slave', db_type='inception',
host='some_host', port=6669)
self.wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=self.wf)
def tearDown(self):
self.ins.delete()
self.ins_inc.delete()
SqlWorkflow.objects.all().delete()
SqlWorkflowContent.objects.all().delete()
@patch('MySQLdb.connect')
def test_get_connection(self, _connect):
new_engine = InceptionEngine()
new_engine.get_connection()
_connect.assert_called_once()
@patch('MySQLdb.connect')
def test_get_backup_connection(self, _connect):
new_engine = InceptionEngine()
new_engine.get_backup_connection()
_connect.assert_called_once()
def test_execute_check_critical_sql(self):
sql = 'alter table user'
row = ReviewResult(id=1, errlevel=2, stagestatus='SQL语法错误',
errormessage='ALTER TABLE 必须带有选项',
sql=sql)
new_engine = InceptionEngine()
check_result = new_engine.execute_check(db_name=0, sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('sql.engines.inception.InceptionEngine.query')
def test_execute_check_normal_sql(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 0, 'Audit completed', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '']
_query.return_value = ResultSet(full_sql=sql, rows=[row])
new_engine = InceptionEngine()
check_result = new_engine.execute_check(instance=self.ins, db_name=0, sql=sql)
self.assertIsInstance(check_result, ReviewSet)
@patch('sql.engines.inception.InceptionEngine.query')
def test_execute_exception(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 1, 'Execute failed', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '']
column_list = ['ID', 'stage', 'errlevel', 'stagestatus', 'errormessage', 'SQL', 'Affected_rows', 'sequence',
'backup_dbname', 'execute_time', 'sqlsha1']
_query.return_value = ResultSet(full_sql=sql, rows=[row], column_list=column_list)
new_engine = InceptionEngine()
execute_result = new_engine.execute(workflow=self.wf)
self.assertIsInstance(execute_result, ReviewSet)
@patch('sql.engines.inception.InceptionEngine.query')
def test_execute_finish(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 0, 'Execute Successfully', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '']
column_list = ['ID', 'stage', 'errlevel', 'stagestatus', 'errormessage', 'SQL', 'Affected_rows', 'sequence',
'backup_dbname', 'execute_time', 'sqlsha1']
_query.return_value = ResultSet(full_sql=sql, rows=[row], column_list=column_list)
new_engine = InceptionEngine()
execute_result = new_engine.execute(workflow=self.wf)
self.assertIsInstance(execute_result, ReviewSet)
@patch('MySQLdb.connect.cursor.execute')
@patch('MySQLdb.connect.cursor')
@patch('MySQLdb.connect')
def test_query(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = InceptionEngine()
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
@patch('MySQLdb.connect.cursor.execute')
@patch('MySQLdb.connect.cursor')
@patch('MySQLdb.connect')
def test_query_not_limit(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = InceptionEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=0)
self.assertIsInstance(query_result, ResultSet)
@patch('sql.engines.inception.InceptionEngine.query')
def test_query_print(self, _query):
sql = 'update user set id=100'
row = [1,
'select * from sql_instance limit 100',
0,
'{"command":"select","select_list":[{"type":"FIELD_ITEM","field":"*"}],"table_ref":[{"db":"archery","table":"sql_instance"}],"limit":{"limit":[{"type":"INT_ITEM","value":"100"}]}}',
'None']
column_list = ['ID', 'statement', 'errlevel', 'query_tree', 'errmsg']
_query.return_value = ResultSet(full_sql=sql, rows=[row], column_list=column_list)
new_engine = InceptionEngine()
print_result = new_engine.query_print(self.ins, db_name=None, sql=sql)
self.assertDictEqual(print_result, json.loads(_repair_json_str(row[3])))
@patch('MySQLdb.connect')
def test_get_rollback_list(self, _connect):
self.wf.sqlworkflowcontent.execute_result = """[{
"id": 1,
"stage": "RERUN",
"errlevel": 0,
"stagestatus": "Execute Successfully",
"errormessage": "None",
"sql": "use archer_test",
"affected_rows": 0,
"sequence": "'1554135032_13038_0'",
"backup_dbname": "None",
"execute_time": "0.000",
"sqlsha1": "",
"actual_affected_rows": 0
}, {
"id": 2,
"stage": "EXECUTED",
"errlevel": 0,
"stagestatus": "Execute Successfully Backup successfully",
"errormessage": "None",
"sql": "insert into tt1 (user_name)values('A'),('B'),('C')",
"affected_rows": 3,
"sequence": "'1554135032_13038_1'",
"backup_dbname": "mysql_3306_archer_test",
"execute_time": "0.000",
"sqlsha1": "",
"actual_affected_rows": 3
}]"""
self.wf.sqlworkflowcontent.save()
new_engine = InceptionEngine()
new_engine.get_rollback(self.wf)
@patch('sql.engines.inception.InceptionEngine.query')
def test_osc_get(self, _query):
new_engine = InceptionEngine()
command = 'get'
sqlsha1 = 'xxxxx'
sql = f"inception get osc_percent '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.inception.InceptionEngine.query')
def test_osc_kill(self, _query):
new_engine = InceptionEngine()
command = 'kill'
sqlsha1 = 'xxxxx'
sql = f"inception stop alter '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.inception.InceptionEngine.query')
def test_osc_not_support(self, _query):
new_engine = InceptionEngine()
command = 'stop'
sqlsha1 = 'xxxxx'
sql = f"inception stop alter '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
with self.assertRaisesMessage(ValueError, 'pt-osc不支持暂停和恢复,需要停止执行请使用终止按钮!'):
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
@patch('sql.engines.inception.InceptionEngine.query')
def test_get_variables(self, _query):
new_engine = InceptionEngine(instance=self.ins_inc)
new_engine.get_variables()
sql = f"inception get variables;"
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.inception.InceptionEngine.query')
def test_get_variables_filter(self, _query):
new_engine = InceptionEngine(instance=self.ins_inc)
new_engine.get_variables(variables=['inception_osc_on'])
sql = f"inception get variables 'inception_osc_on';"
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.inception.InceptionEngine.query')
def test_set_variable(self, _query):
new_engine = InceptionEngine(instance=self.ins)
new_engine.set_variable('inception_osc_on', 'on')
_query.assert_called_once_with(sql="inception set inception_osc_on=on;")
class TestGoInception(TestCase):
def setUp(self):
self.ins = Instance.objects.create(instance_name='some_ins', type='slave', db_type='mysql',
host='some_host',
port=3306, user='ins_user', password='some_pass')
self.ins_inc = Instance.objects.create(instance_name='some_ins_inc', type='slave', db_type='goinception',
host='some_host', port=4000)
self.wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=self.wf)
def tearDown(self):
self.ins.delete()
self.ins_inc.delete()
SqlWorkflow.objects.all().delete()
SqlWorkflowContent.objects.all().delete()
@patch('pymysql.connect')
def test_get_connection(self, _connect):
new_engine = GoInceptionEngine()
new_engine.get_connection()
_connect.assert_called_once()
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_execute_check_normal_sql(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 0, 'Audit completed', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '', '']
_query.return_value = ResultSet(full_sql=sql, rows=[row])
new_engine = GoInceptionEngine()
check_result = new_engine.execute_check(instance=self.ins, db_name=0, sql=sql)
self.assertIsInstance(check_result, ReviewSet)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_execute_exception(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 1, 'Execute failed', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '', '']
column_list = ['order_id', 'stage', 'error_level', 'stage_status', 'error_message', 'sql',
'affected_rows', 'sequence', 'backup_dbname', 'execute_time', 'sqlsha1', 'backup_time']
_query.return_value = ResultSet(full_sql=sql, rows=[row], column_list=column_list)
new_engine = GoInceptionEngine()
execute_result = new_engine.execute(workflow=self.wf)
self.assertIsInstance(execute_result, ReviewSet)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_execute_finish(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 0, 'Execute Successfully', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '', '']
column_list = ['order_id', 'stage', 'error_level', 'stage_status', 'error_message', 'sql',
'affected_rows', 'sequence', 'backup_dbname', 'execute_time', 'sqlsha1', 'backup_time']
_query.return_value = ResultSet(full_sql=sql, rows=[row], column_list=column_list)
new_engine = GoInceptionEngine()
execute_result = new_engine.execute(workflow=self.wf)
self.assertIsInstance(execute_result, ReviewSet)
@patch('pymysql.connect.cursor.execute')
@patch('pymysql.connect.cursor')
@patch('pymysql.connect')
def test_query(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = GoInceptionEngine()
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
@patch('pymysql.connect.cursor.execute')
@patch('pymysql.connect.cursor')
@patch('pymysql.connect')
def test_query_not_limit(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = GoInceptionEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=0)
self.assertIsInstance(query_result, ResultSet)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_osc_get(self, _query):
new_engine = GoInceptionEngine()
command = 'get'
sqlsha1 = 'xxxxx'
sql = f"inception get osc_percent '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_osc_pause(self, _query):
new_engine = GoInceptionEngine()
command = 'pause'
sqlsha1 = 'xxxxx'
sql = f"inception {command} osc '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_osc_resume(self, _query):
new_engine = GoInceptionEngine()
command = 'resume'
sqlsha1 = 'xxxxx'
sql = f"inception {command} osc '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_osc_kill(self, _query):
new_engine = GoInceptionEngine()
command = 'kill'
sqlsha1 = 'xxxxx'
sql = f"inception kill osc '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_get_variables(self, _query):
new_engine = GoInceptionEngine(instance=self.ins_inc)
new_engine.get_variables()
sql = f"inception get variables;"
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_get_variables_filter(self, _query):
new_engine = GoInceptionEngine(instance=self.ins_inc)
new_engine.get_variables(variables=['inception_osc_on'])
sql = f"inception get variables like 'inception_osc_on';"
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_set_variable(self, _query):
new_engine = GoInceptionEngine(instance=self.ins)
new_engine.set_variable('inception_osc_on', 'on')
_query.assert_called_once_with(sql="inception set inception_osc_on=on;")
class TestOracle(TestCase):
"""Oracle 测试"""
def setUp(self):
self.ins = Instance.objects.create(instance_name='some_ins', type='slave', db_type='oracle',
host='some_host', port=3306, user='ins_user', password='some_pass',
sid='some_id')
self.wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=self.wf)
self.sys_config = SysConfig()
def tearDown(self):
self.ins.delete()
self.sys_config.purge()
SqlWorkflow.objects.all().delete()
SqlWorkflowContent.objects.all().delete()
@patch('cx_Oracle.makedsn')
@patch('cx_Oracle.connect')
def test_get_connection(self, _connect, _makedsn):
# 填写 sid 测试
new_engine = OracleEngine(self.ins)
new_engine.get_connection()
_connect.assert_called_once()
_makedsn.assert_called_once()
# 填写 service_name 测试
_connect.reset_mock()
_makedsn.reset_mock()
self.ins.service_name = 'some_service'
self.ins.sid = ''
self.ins.save()
new_engine = OracleEngine(self.ins)
new_engine.get_connection()
_connect.assert_called_once()
_makedsn.assert_called_once()
# 都不填写, 检测 ValueError
_connect.reset_mock()
_makedsn.reset_mock()
self.ins.service_name = ''
self.ins.sid = ''
self.ins.save()
new_engine = OracleEngine(self.ins)
with self.assertRaises(ValueError):
new_engine.get_connection()
@patch('cx_Oracle.connect')
def test_engine_base_info(self, _conn):
new_engine = OracleEngine(instance=self.ins)
self.assertEqual(new_engine.name, 'Oracle')
self.assertEqual(new_engine.info, 'Oracle engine')
_conn.return_value.version = '12.1.0.2.0'
self.assertTupleEqual(new_engine.server_version, ('12', '1', '0'))
@patch('cx_Oracle.connect.cursor.execute')
@patch('cx_Oracle.connect.cursor')
@patch('cx_Oracle.connect')
def test_query(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchmany.return_value = [(1,)]
new_engine = OracleEngine(instance=self.ins)
query_result = new_engine.query(db_name='archery', sql='select 1', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
self.assertListEqual(query_result.rows, [(1,)])
@patch('cx_Oracle.connect.cursor.execute')
@patch('cx_Oracle.connect.cursor')
@patch('cx_Oracle.connect')
def test_query_not_limit(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = OracleEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=0)
self.assertIsInstance(query_result, ResultSet)
self.assertListEqual(query_result.rows, [(1,)])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('AUD_SYS',), ('archery',), ('ANONYMOUS',)]))
def test_get_all_databases(self, _query):
new_engine = OracleEngine(instance=self.ins)
dbs = new_engine.get_all_databases()
self.assertListEqual(dbs.rows, ['archery'])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('AUD_SYS',), ('archery',), ('ANONYMOUS',)]))
def test__get_all_databases(self, _query):
new_engine = OracleEngine(instance=self.ins)
dbs = new_engine._get_all_databases()
self.assertListEqual(dbs.rows, ['AUD_SYS', 'archery', 'ANONYMOUS'])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('archery',)]))
def test__get_all_instances(self, _query):
new_engine = OracleEngine(instance=self.ins)
dbs = new_engine._get_all_instances()
self.assertListEqual(dbs.rows, ['archery'])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('ANONYMOUS',), ('archery',), ('SYSTEM',)]))
def test_get_all_schemas(self, _query):
new_engine = OracleEngine(instance=self.ins)
schemas = new_engine._get_all_schemas()
self.assertListEqual(schemas.rows, ['archery'])
@patch('sql.engines.oracle.OracleEngine.query', return_value=ResultSet(rows=[('test',), ('test2',)]))
def test_get_all_tables(self, _query):
new_engine = OracleEngine(instance=self.ins)
tables = new_engine.get_all_tables(db_name='archery')
self.assertListEqual(tables.rows, ['test2'])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('id',), ('name',)]))
def test_get_all_columns_by_tb(self, _query):
new_engine = OracleEngine(instance=self.ins)
columns = new_engine.get_all_columns_by_tb(db_name='archery', tb_name='test2')
self.assertListEqual(columns.rows, ['id', 'name'])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('archery',), ('template1',), ('template0',)]))
def test_describe_table(self, _query):
new_engine = OracleEngine(instance=self.ins)
describe = new_engine.describe_table(db_name='archery', tb_name='text')
self.assertIsInstance(describe, ResultSet)
def test_query_check_disable_sql(self):
sql = "update xxx set a=1;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': '仅支持^select语法!', 'bad_query': True, 'filtered_sql': sql.strip(';'),
'has_star': False})
def test_query_check_star_sql(self):
sql = "select * from xx;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': '禁止使用 * 关键词\n', 'bad_query': False, 'filtered_sql': sql.strip(';'),
'has_star': True})
def test_query_check_IndexError(self):
sql = ""
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': '没有有效的SQL语句', 'bad_query': True, 'filtered_sql': sql.strip(), 'has_star': False})
def test_query_check_plus(self):
sql = "select 100+1 from tb;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': '禁止使用 + 关键词\n', 'bad_query': True, 'filtered_sql': sql.strip(';'),
'has_star': False})
def test_filter_sql_with_delimiter(self):
sql = "select * from xx;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=100)
self.assertEqual(check_result, "select * from xx WHERE ROWNUM <= 100")
def test_filter_sql_with_delimiter_and_where(self):
sql = "select * from xx where id>1;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=100)
self.assertEqual(check_result, "select * from xx where id>1 AND ROWNUM <= 100")
def test_filter_sql_without_delimiter(self):
sql = "select * from xx;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=100)
self.assertEqual(check_result, "select * from xx WHERE ROWNUM <= 100")
def test_filter_sql_with_limit(self):
sql = "select * from xx limit 10;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=1)
self.assertEqual(check_result, "select * from xx limit 10 WHERE ROWNUM <= 1")
def test_query_masking(self):
query_result = ResultSet()
new_engine = OracleEngine(instance=self.ins)
masking_result = new_engine.query_masking(schema_name='', sql='select 1', resultset=query_result)
self.assertEqual(masking_result, query_result)
def test_execute_check_select_sql(self):
sql = 'select * from user;'
row = ReviewResult(id=1, errlevel=2,
stagestatus='驳回不支持语句',
errormessage='仅支持DML和DDL语句,查询语句请使用SQL查询功能!',
sql=sql)
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
def test_execute_check_critical_sql(self):
self.sys_config.set('critical_ddl_regex', '^|update')
self.sys_config.get_all_config()
sql = 'update user set id=1'
row = ReviewResult(id=1, errlevel=2,
stagestatus='驳回高危SQL',
errormessage='禁止提交匹配' + '^|update' + '条件的语句!',
sql=sql)
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
def test_execute_check_normal_sql(self):
self.sys_config.purge()
sql = 'alter table tb set id=1'
row = ReviewResult(id=1,
errlevel=0,
stagestatus='Audit completed',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0, )
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('cx_Oracle.connect.cursor.execute')
@patch('cx_Oracle.connect.cursor')
@patch('cx_Oracle.connect')
def test_execute_workflow_success(self, _conn, _cursor, _execute):
sql = 'update user set id=1'
row = ReviewResult(id=1,
errlevel=0,
stagestatus='Execute Successfully',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0,
full_sql=sql)
wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=wf, sql_content=sql)
new_engine = OracleEngine(instance=self.ins)
execute_result = new_engine.execute_workflow(workflow=wf)
self.assertIsInstance(execute_result, ReviewSet)
self.assertEqual(execute_result.rows[0].__dict__.keys(), row.__dict__.keys())
@patch('cx_Oracle.connect.cursor.execute')
@patch('cx_Oracle.connect.cursor')
@patch('cx_Oracle.connect', return_value=RuntimeError)
def test_execute_workflow_exception(self, _conn, _cursor, _execute):
sql = 'update user set id=1'
row = ReviewResult(id=1,
errlevel=2,
stagestatus='Execute Failed',
errormessage=f'异常信息:{f"Oracle命令执行报错,语句:{sql}"}',
sql=sql,
affected_rows=0,
execute_time=0, )
wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=wf, sql_content=sql)
with self.assertRaises(AttributeError):
new_engine = OracleEngine(instance=self.ins)
execute_result = new_engine.execute_workflow(workflow=wf)
self.assertIsInstance(execute_result, ReviewSet)
self.assertEqual(execute_result.rows[0].__dict__.keys(), row.__dict__.keys())
| 44.412112
| 196
| 0.643097
| 7,036
| 60,134
| 5.211342
| 0.053724
| 0.060627
| 0.029045
| 0.019091
| 0.884119
| 0.859465
| 0.828265
| 0.811465
| 0.781002
| 0.750593
| 0
| 0.013709
| 0.234559
| 60,134
| 1,353
| 197
| 44.444937
| 0.782898
| 0.002894
| 0
| 0.749367
| 0
| 0.000844
| 0.1632
| 0.048311
| 0.001688
| 0
| 0
| 0
| 0.140928
| 1
| 0.1173
| false
| 0.008439
| 0.013502
| 0
| 0.139241
| 0.002532
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a61e7e5919e4c0513c8489fd5faf6513c4046a7
| 11,454
|
py
|
Python
|
tests/test_exact_solver.py
|
pau557/dimod
|
d3c6d3abf23182b035e1100c46f7c947202edefb
|
[
"Apache-2.0"
] | null | null | null |
tests/test_exact_solver.py
|
pau557/dimod
|
d3c6d3abf23182b035e1100c46f7c947202edefb
|
[
"Apache-2.0"
] | null | null | null |
tests/test_exact_solver.py
|
pau557/dimod
|
d3c6d3abf23182b035e1100c46f7c947202edefb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
import unittest
import numpy as np
import numpy.testing as npt
import dimod
import dimod.testing
from dimod.exceptions import SamplerUnknownArgWarning
@dimod.testing.load_sampler_bqm_tests(dimod.ExactSolver)
class TestExactSolver(unittest.TestCase):
def test_instantiation(self):
sampler = dimod.ExactSolver()
dimod.testing.assert_sampler_api(sampler)
# this sampler has no properties and has no accepted parameters
self.assertEqual(sampler.properties, {})
self.assertEqual(sampler.parameters, {})
def test_sample_SPIN_empty(self):
bqm = dimod.BinaryQuadraticModel({}, {}, 0.0, dimod.SPIN)
response = dimod.ExactSolver().sample(bqm)
self.assertEqual(response.record.sample.shape, (0, 0))
self.assertIs(response.vartype, bqm.vartype)
def test_sample_BINARY_empty(self):
bqm = dimod.BinaryQuadraticModel({}, {}, 0.0, dimod.BINARY)
response = dimod.ExactSolver().sample(bqm)
self.assertEqual(response.record.sample.shape, (0, 0))
self.assertIs(response.vartype, bqm.vartype)
def test_sample_SPIN(self):
bqm = dimod.BinaryQuadraticModel({0: 0.0, 1: 0.0, 2: 0.0},
{(0, 1): -1.0, (1, 2): 1.0, (0, 2): 1.0},
1.0,
dimod.SPIN)
response = dimod.ExactSolver().sample(bqm)
# every possible conbination should be present
self.assertEqual(len(response), 2**len(bqm))
self.assertEqual(response.record.sample.shape, (2**len(bqm), len(bqm)))
# confirm vartype
self.assertIs(response.vartype, bqm.vartype)
dimod.testing.assert_response_energies(response, bqm)
def test_sample_BINARY(self):
bqm = dimod.BinaryQuadraticModel({0: 0.0, 1: 0.0, 2: 0.0},
{(0, 1): -1.0, (1, 2): 1.0, (0, 2): 1.0},
1.0,
dimod.BINARY)
response = dimod.ExactSolver().sample(bqm)
# every possible conbination should be present
self.assertEqual(len(response), 2**len(bqm))
self.assertEqual(response.record.sample.shape, (2**len(bqm), len(bqm)))
# confirm vartype
self.assertIs(response.vartype, bqm.vartype)
dimod.testing.assert_response_energies(response, bqm)
def test_sample_ising(self):
h = {0: 0.0, 1: 0.0, 2: 0.0}
J = {(0, 1): -1.0, (1, 2): 1.0, (0, 2): 1.0}
response = dimod.ExactSolver().sample_ising(h, J)
# every possible conbination should be present
self.assertEqual(len(response), 2**3)
self.assertEqual(response.record.sample.shape, (2**3, 3))
# confirm vartype
self.assertIs(response.vartype, dimod.SPIN)
# check their energies
for sample, energy in response.data(['sample', 'energy']):
self.assertAlmostEqual(energy, dimod.ising_energy(sample, h, J))
def test_sample_qubo(self):
Q = {(0, 0): 0.0, (1, 1): 0.0, (2, 2): 0.0}
Q.update({(0, 1): -1.0, (1, 2): 1.0, (0, 2): 1.0})
response = dimod.ExactSolver().sample_qubo(Q)
# every possible conbination should be present
self.assertEqual(len(response), 2**3)
self.assertEqual(response.record.sample.shape, (2**3, 3))
# confirm vartype
self.assertIs(response.vartype, dimod.BINARY)
# check their energies
for sample, energy in response.data(['sample', 'energy']):
self.assertAlmostEqual(energy, dimod.qubo_energy(sample, Q))
def test_sample_mixed_labels(self):
h = {'3': 0.6669921875, 4: -2.0, 5: -1.334375, 6: 0.0, 7: -2.0, '1': 1.3328125,
'2': -1.3330078125, '0': -0.666796875}
J = {(5, '2'): 1.0, (7, '0'): 0.9998046875, (4, '0'): 0.9998046875, ('3', 4): 0.9998046875,
(7, '1'): -1.0, (5, '1'): 0.6671875, (6, '2'): 1.0, ('3', 6): 0.6671875,
(7, '2'): 0.9986328125, (5, '0'): -1.0, ('3', 5): -0.6671875, ('3', 7): 0.998828125,
(4, '1'): -1.0, (6, '0'): -0.3328125, (4, '2'): 1.0, (6, '1'): 0.0}
response = dimod.ExactSolver().sample_ising(h, J)
# every possible conbination should be present
self.assertEqual(len(response), 2**len(h))
self.assertEqual(response.record.sample.shape, (2**len(h), len(h)))
# confirm vartype
self.assertIs(response.vartype, dimod.SPIN)
# check their energies
for sample, energy in response.data(['sample', 'energy']):
self.assertAlmostEqual(energy, dimod.ising_energy(sample, h, J))
def test_arbitrary_labels(self):
bqm = dimod.BQM.from_ising({}, {'ab': -1})
sampleset = dimod.ExactSolver().sample(bqm)
self.assertEqual(set(sampleset.variables), set(bqm.variables))
def test_kwargs(self):
bqm = dimod.BinaryQuadraticModel({}, {}, 0.0, dimod.SPIN)
with self.assertWarns(SamplerUnknownArgWarning):
sampleset = dimod.ExactSolver().sample(bqm, a=1, b="abc")
class TestExactPolySolver(unittest.TestCase):
def test_instantiation(self):
sampler = dimod.ExactPolySolver()
# this sampler has no properties and has no accepted parameters
self.assertEqual(sampler.properties, {})
self.assertEqual(sampler.parameters, {})
def test_sample_SPIN_empty(self):
poly= dimod.BinaryPolynomial({}, dimod.SPIN)
response = dimod.ExactPolySolver().sample_poly(poly)
self.assertEqual(response.record.sample.shape, (0, 0))
self.assertIs(response.vartype, poly.vartype)
def test_sample_BINARY_empty(self):
poly = dimod.BinaryPolynomial({}, dimod.BINARY)
response = dimod.ExactPolySolver().sample_poly(poly)
self.assertEqual(response.record.sample.shape, (0, 0))
self.assertIs(response.vartype, poly.vartype)
def test_sample_SPIN(self):
poly = dimod.BinaryPolynomial.from_hising({0: 0.0, 1: 0.0, 2: 0.0},
{(0, 1): -1.0, (1, 2): 1.0, (0, 1, 2): 1.0},
1.0)
response = dimod.ExactPolySolver().sample_poly(poly)
# every possible combination should be present
self.assertEqual(len(response), 2**len(poly.variables))
self.assertEqual(response.record.sample.shape, (2**len(poly.variables), len(poly.variables)))
# confirm vartype
self.assertIs(response.vartype, poly.vartype)
dimod.testing.assert_response_energies(response, poly)
def test_sample_BINARY(self):
poly = dimod.BinaryPolynomial({(): 1.0, (0,): 0.0, (1,): 0.0, (0, 1): -1.0, (1, 2): 1.0, (0, 1, 2): 1.0},
dimod.BINARY)
response = dimod.ExactPolySolver().sample_poly(poly)
# every possible conbination should be present
self.assertEqual(len(response), 2**len(poly.variables))
self.assertEqual(response.record.sample.shape, (2**len(poly.variables), len(poly.variables)))
# confirm vartype
self.assertIs(response.vartype, poly.vartype)
dimod.testing.assert_response_energies(response, poly)
def test_sample_hising(self):
h = {0: 0.0, 1: 0.0, 2: 0.0}
J = {(0, 1): -1.0, (1, 2): 1.0, (0, 1, 2): 1.0}
response = dimod.ExactPolySolver().sample_hising(h, J)
# every possible conbination should be present
self.assertEqual(len(response), 2**3)
self.assertEqual(response.record.sample.shape, (2**3, 3))
# confirm vartype
self.assertIs(response.vartype, dimod.SPIN)
def test_sample_hubo(self):
Q = {(0, 0): 0.0, (1, 1): 0.0, (2, 2): 0.0}
Q.update({(0, 1): -1.0, (1, 2): 1.0, (0, 1, 2): 1.0})
response = dimod.ExactPolySolver().sample_hubo(Q)
# every possible conbination should be present
self.assertEqual(len(response), 2**3)
self.assertEqual(response.record.sample.shape, (2**3, 3))
# confirm vartype
self.assertIs(response.vartype, dimod.BINARY)
def test_sample_ising(self):
h = {0: 0.0, 1: 0.0, 2: 0.0}
J = {(0, 1): -1.0, (1, 2): 1.0, (0, 2): 1.0}
response = dimod.ExactPolySolver().sample_hising(h, J)
# every possible conbination should be present
self.assertEqual(len(response), 2**3)
self.assertEqual(response.record.sample.shape, (2**3, 3))
# confirm vartype
self.assertIs(response.vartype, dimod.SPIN)
# check their energies
for sample, energy in response.data(['sample', 'energy']):
self.assertAlmostEqual(energy, dimod.ising_energy(sample, h, J))
def test_sample_qubo(self):
Q = {(0, 0): 0.0, (1, 1): 0.0, (2, 2): 0.0}
Q.update({(0, 1): -1.0, (1, 2): 1.0, (0, 2): 1.0})
response = dimod.ExactPolySolver().sample_hubo(Q)
# every possible conbination should be present
self.assertEqual(len(response), 2**3)
self.assertEqual(response.record.sample.shape, (2**3, 3))
# confirm vartype
self.assertIs(response.vartype, dimod.BINARY)
# check their energies
for sample, energy in response.data(['sample', 'energy']):
self.assertAlmostEqual(energy, dimod.qubo_energy(sample, Q))
def test_sample_mixed_labels(self):
h = {'3': 0.6669921875, 4: -2.0, 5: -1.334375, 6: 0.0, 7: -2.0, '1': 1.3328125,
'2': -1.3330078125, '0': -0.666796875}
J = {(5, '2'): 1.0, (7, '0'): 0.9998046875, (4, '0'): 0.9998046875, ('3', 4): 0.9998046875,
(7, '1'): -1.0, (5, '1'): 0.6671875, (6, '2'): 1.0, ('3', 6): 0.6671875,
(7, '2'): 0.9986328125, (5, '0'): -1.0, ('3', 5): -0.6671875, ('3', 7): 0.998828125,
(4, '1'): -1.0, (6, '0'): -0.3328125, (4, '2'): 1.0, (6, '1'): 0.0}
response = dimod.ExactPolySolver().sample_hising(h, J)
# every possible conbination should be present
self.assertEqual(len(response), 2**len(h))
self.assertEqual(response.record.sample.shape, (2**len(h), len(h)))
# confirm vartype
self.assertIs(response.vartype, dimod.SPIN)
def test_arbitrary_labels(self):
poly = dimod.BinaryPolynomial.from_hising({}, {('a','b','c'): -1})
sampleset = dimod.ExactPolySolver().sample_poly(poly)
self.assertEqual(set(sampleset.variables), set(poly.variables))
def test_kwargs(self):
poly = dimod.BinaryPolynomial({}, dimod.SPIN)
with self.assertWarns(SamplerUnknownArgWarning):
response = dimod.ExactPolySolver().sample_poly(poly, a=True, b=2)
| 39.496552
| 113
| 0.591496
| 1,489
| 11,454
| 4.495635
| 0.107455
| 0.022707
| 0.011652
| 0.069316
| 0.874664
| 0.854048
| 0.793546
| 0.783089
| 0.728563
| 0.728563
| 0
| 0.079072
| 0.246988
| 11,454
| 289
| 114
| 39.633218
| 0.697043
| 0.143269
| 0
| 0.806061
| 0
| 0
| 0.011059
| 0
| 0
| 0
| 0
| 0
| 0.375758
| 1
| 0.133333
| false
| 0
| 0.036364
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8a821d100203c94607643215a956e3ea4e056e51
| 40,609
|
py
|
Python
|
silver/migrations/0032_auto_20170201_1342.py
|
Millennium-Softwares/silver
|
047566804a3373647bb37f1afbf6b18fbda5a5b2
|
[
"Apache-2.0"
] | 1
|
2020-05-10T12:33:09.000Z
|
2020-05-10T12:33:09.000Z
|
silver/migrations/0032_auto_20170201_1342.py
|
Millennium-Softwares/silver
|
047566804a3373647bb37f1afbf6b18fbda5a5b2
|
[
"Apache-2.0"
] | null | null | null |
silver/migrations/0032_auto_20170201_1342.py
|
Millennium-Softwares/silver
|
047566804a3373647bb37f1afbf6b18fbda5a5b2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_fsm
import jsonfield.fields
from decimal import Decimal
import silver.utils.models
import django.utils.timezone
import django.core.validators
import uuid
class Migration(migrations.Migration):
dependencies = [
('silver', '0031_auto_20170125_1343'),
]
def customer_name_split_forward(apps, schema_editor):
Customer = apps.get_model('silver', 'Customer')
for customer in Customer.objects.all():
try:
customer.first_name, customer.last_name = customer.name.rsplit(" ", 1)
except ValueError:
customer.last_name = customer.name
customer.save()
def customer_name_split_reverse(apps, schema_editor):
Customer = apps.get_model('silver', 'Customer')
for customer in Customer.objects.all():
if customer.first_name:
customer.name = "%s %s" % (customer.first_name, customer.last_name)
else:
customer.name = customer.last_name
customer.save()
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('kind', models.CharField(max_length=40)),
('series', models.CharField(max_length=20, null=True, blank=True)),
('number', models.IntegerField(null=True, blank=True)),
('due_date', models.DateField(null=True, blank=True)),
('issue_date', models.DateField(null=True, blank=True)),
('paid_date', models.DateField(null=True, blank=True)),
('cancel_date', models.DateField(null=True, blank=True)),
('sales_tax_percent', models.DecimalField(null=True, max_digits=4, decimal_places=2, blank=True)),
('sales_tax_name', models.CharField(max_length=64, null=True, blank=True)),
('currency', models.CharField(max_length=4)),
('state', models.CharField(max_length=10)),
('pdf', models.FileField(upload_to=silver.models.documents.base.documents_pdf_path, null=True, editable=False, blank=True)),
],
options={
'managed': False,
},
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('payment_processor', models.CharField(max_length=256, choices=[(b'manual', b'manual')])),
('added_at', models.DateTimeField(default=django.utils.timezone.now)),
('data', jsonfield.fields.JSONField(default={}, null=True, blank=True)),
('verified', models.BooleanField(default=False)),
('canceled', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('amount', models.DecimalField(max_digits=12, decimal_places=2, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('currency', models.CharField(help_text=b'The currency used for billing.', max_length=4, choices=[('AED', 'UAE Dirham'), ('AFN', 'Afghani'), ('ALL', 'Lek'), ('AMD', 'Armenian Dram'), ('ANG', 'Netherlands Antillean Guilder'), ('AOA', 'Kwanza'), ('ARS', 'Argentine Peso'), ('AUD', 'Australian Dollar'), ('AWG', 'Aruban Florin'), ('AZN', 'Azerbaijanian Manat'), ('BAM', 'Convertible Mark'), ('BBD', 'Barbados Dollar'), ('BDT', 'Taka'), ('BGN', 'Bulgarian Lev'), ('BHD', 'Bahraini Dinar'), ('BIF', 'Burundi Franc'), ('BMD', 'Bermudian Dollar'), ('BND', 'Brunei Dollar'), ('BOB', 'Boliviano'), ('BRL', 'Brazilian Real'), ('BSD', 'Bahamian Dollar'), ('BTN', 'Ngultrum'), ('BWP', 'Pula'), ('BYR', 'Belarusian Ruble'), ('BZD', 'Belize Dollar'), ('CAD', 'Canadian Dollar'), ('CDF', 'Congolese Franc'), ('CHF', 'Swiss Franc'), ('CLP', 'Chilean Peso'), ('CNY', 'Yuan Renminbi'), ('COP', 'Colombian Peso'), ('CRC', 'Costa Rican Colon'), ('CUC', 'Peso Convertible'), ('CUP', 'Cuban Peso'), ('CVE', 'Cabo Verde Escudo'), ('CZK', 'Czech Koruna'), ('DJF', 'Djibouti Franc'), ('DKK', 'Danish Krone'), ('DOP', 'Dominican Peso'), ('DZD', 'Algerian Dinar'), ('EGP', 'Egyptian Pound'), ('ERN', 'Nakfa'), ('ETB', 'Ethiopian Birr'), ('EUR', 'Euro'), ('FJD', 'Fiji Dollar'), ('FKP', 'Falkland Islands Pound'), ('GBP', 'Pound Sterling'), ('GEL', 'Lari'), ('GHS', 'Ghana Cedi'), ('GIP', 'Gibraltar Pound'), ('GMD', 'Dalasi'), ('GNF', 'Guinea Franc'), ('GTQ', 'Quetzal'), ('GYD', 'Guyana Dollar'), ('HKD', 'Hong Kong Dollar'), ('HNL', 'Lempira'), ('HRK', 'Kuna'), ('HTG', 'Gourde'), ('HUF', 'Forint'), ('IDR', 'Rupiah'), ('ILS', 'New Israeli Sheqel'), ('INR', 'Indian Rupee'), ('IQD', 'Iraqi Dinar'), ('IRR', 'Iranian Rial'), ('ISK', 'Iceland Krona'), ('JMD', 'Jamaican Dollar'), ('JOD', 'Jordanian Dinar'), ('JPY', 'Yen'), ('KES', 'Kenyan Shilling'), ('KGS', 'Som'), ('KHR', 'Riel'), ('KMF', 'Comoro Franc'), ('KPW', 'North Korean Won'), ('KRW', 'Won'), ('KWD', 'Kuwaiti Dinar'), ('KYD', 'Cayman Islands Dollar'), ('KZT', 'Tenge'), ('LAK', 'Kip'), ('LBP', 'Lebanese Pound'), ('LKR', 'Sri Lanka Rupee'), ('LRD', 'Liberian Dollar'), ('LSL', 'Loti'), ('LYD', 'Libyan Dinar'), ('MAD', 'Moroccan Dirham'), ('MDL', 'Moldovan Leu'), ('MGA', 'Malagasy Ariary'), ('MKD', 'Denar'), ('MMK', 'Kyat'), ('MNT', 'Tugrik'), ('MOP', 'Pataca'), ('MRO', 'Ouguiya'), ('MUR', 'Mauritius Rupee'), ('MVR', 'Rufiyaa'), ('MWK', 'Malawi Kwacha'), ('MXN', 'Mexican Peso'), ('MYR', 'Malaysian Ringgit'), ('MZN', 'Mozambique Metical'), ('NAD', 'Namibia Dollar'), ('NGN', 'Naira'), ('NIO', 'Cordoba Oro'), ('NOK', 'Norwegian Krone'), ('NPR', 'Nepalese Rupee'), ('NZD', 'New Zealand Dollar'), ('OMR', 'Rial Omani'), ('PAB', 'Balboa'), ('PEN', 'Sol'), ('PGK', 'Kina'), ('PHP', 'Philippine Peso'), ('PKR', 'Pakistan Rupee'), ('PLN', 'Zloty'), ('PYG', 'Guarani'), ('QAR', 'Qatari Rial'), ('RON', 'Romanian Leu'), ('RSD', 'Serbian Dinar'), ('RUB', 'Russian Ruble'), ('RWF', 'Rwanda Franc'), ('SAR', 'Saudi Riyal'), ('SBD', 'Solomon Islands Dollar'), ('SCR', 'Seychelles Rupee'), ('SDG', 'Sudanese Pound'), ('SEK', 'Swedish Krona'), ('SGD', 'Singapore Dollar'), ('SHP', 'Saint Helena Pound'), ('SLL', 'Leone'), ('SOS', 'Somali Shilling'), ('SRD', 'Surinam Dollar'), ('SSP', 'South Sudanese Pound'), ('STD', 'Dobra'), ('SVC', 'El Salvador Colon'), ('SYP', 'Syrian Pound'), ('SZL', 'Lilangeni'), ('THB', 'Baht'), ('TJS', 'Somoni'), ('TMT', 'Turkmenistan New Manat'), ('TND', 'Tunisian Dinar'), ('TOP', 'Pa\u2019anga'), ('TRY', 'Turkish Lira'), ('TTD', 'Trinidad and Tobago Dollar'), ('TWD', 'New Taiwan Dollar'), ('TZS', 'Tanzanian Shilling'), ('UAH', 'Hryvnia'), ('UGX', 'Uganda Shilling'), ('USD', 'US Dollar'), ('UYU', 'Peso Uruguayo'), ('UZS', 'Uzbekistan Sum'), ('VEF', 'Bol\xedvar'), ('VND', 'Dong'), ('VUV', 'Vatu'), ('WST', 'Tala'), ('XAF', 'CFA Franc BEAC'), ('XAG', 'Silver'), ('XAU', 'Gold'), ('XBA', 'Bond Markets Unit European Composite Unit (EURCO)'), ('XBB', 'Bond Markets Unit European Monetary Unit (E.M.U.-6)'), ('XBC', 'Bond Markets Unit European Unit of Account 9 (E.U.A.-9)'), ('XBD', 'Bond Markets Unit European Unit of Account 17 (E.U.A.-17)'), ('XCD', 'East Caribbean Dollar'), ('XDR', 'SDR (Special Drawing Right)'), ('XOF', 'CFA Franc BCEAO'), ('XPD', 'Palladium'), ('XPF', 'CFP Franc'), ('XPT', 'Platinum'), ('XSU', 'Sucre'), ('XTS', 'Codes specifically reserved for testing purposes'), ('XUA', 'ADB Unit of Account'), ('XXX', 'The codes assigned for transactions where no currency is involved'), ('YER', 'Yemeni Rial'), ('ZAR', 'Rand'), ('ZMW', 'Zambian Kwacha'), ('ZWL', 'Zimbabwe Dollar')])),
('external_reference', models.CharField(max_length=256, null=True, blank=True)),
('data', jsonfield.fields.JSONField(default={}, null=True, blank=True)),
('state', django_fsm.FSMField(default=b'initial', max_length=8, choices=[(b'canceled', 'Canceled'), (b'refunded', 'Refunded'), (b'initial', 'Initial'), (b'failed', 'Failed'), (b'settled', 'Settled'), (b'pending', 'Pending')])),
('uuid', models.UUIDField(default=uuid.uuid4)),
('valid_until', models.DateTimeField(null=True, blank=True)),
('last_access', models.DateTimeField(null=True, blank=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('updated_at', silver.utils.models.AutoDateTimeField(default=django.utils.timezone.now)),
('fail_code', models.CharField(blank=True, max_length=32, null=True, choices=[(b'default', b'default'), (b'expired_payment_method', b'expired_payment_method'), (b'insufficient_funds', b'insufficient_funds')])),
('refund_code', models.CharField(blank=True, max_length=32, null=True, choices=[(b'default', b'default')])),
('cancel_code', models.CharField(blank=True, max_length=32, null=True, choices=[(b'default', b'default')])),
],
),
migrations.AlterModelOptions(
name='customer',
options={'ordering': ['first_name', 'last_name', 'company']},
),
migrations.AddField(
model_name='customer',
name='first_name',
field=models.CharField(default='', help_text=b"The customer's first name.", max_length=128),
preserve_default=False,
),
migrations.AddField(
model_name='customer',
name='last_name',
field=models.CharField(default='', help_text=b"The customer's last name.", max_length=128),
preserve_default=False,
),
migrations.RunPython(customer_name_split_forward,
customer_name_split_reverse),
migrations.AddField(
model_name='customer',
name='phone',
field=models.CharField(max_length=15, null=True, blank=True),
),
migrations.AddField(
model_name='provider',
name='phone',
field=models.CharField(max_length=15, null=True, blank=True),
),
migrations.AlterField(
model_name='customer',
name='country',
field=models.CharField(max_length=3, choices=[('AW', 'Aruba'), ('AF', 'Afghanistan'), ('AO', 'Angola'), ('AI', 'Anguilla'), ('AX', '\xc5land Islands'), ('AL', 'Albania'), ('AD', 'Andorra'), ('AE', 'United Arab Emirates'), ('AR', 'Argentina'), ('AM', 'Armenia'), ('AS', 'American Samoa'), ('AQ', 'Antarctica'), ('TF', 'French Southern Territories'), ('AG', 'Antigua and Barbuda'), ('AU', 'Australia'), ('AT', 'Austria'), ('AZ', 'Azerbaijan'), ('BI', 'Burundi'), ('BE', 'Belgium'), ('BJ', 'Benin'), ('BQ', 'Bonaire, Sint Eustatius and Saba'), ('BF', 'Burkina Faso'), ('BD', 'Bangladesh'), ('BG', 'Bulgaria'), ('BH', 'Bahrain'), ('BS', 'Bahamas'), ('BA', 'Bosnia and Herzegovina'), ('BL', 'Saint Barth\xe9lemy'), ('BY', 'Belarus'), ('BZ', 'Belize'), ('BM', 'Bermuda'), ('BO', 'Bolivia, Plurinational State of'), ('BR', 'Brazil'), ('BB', 'Barbados'), ('BN', 'Brunei Darussalam'), ('BT', 'Bhutan'), ('BV', 'Bouvet Island'), ('BW', 'Botswana'), ('CF', 'Central African Republic'), ('CA', 'Canada'), ('CC', 'Cocos (Keeling) Islands'), ('CH', 'Switzerland'), ('CL', 'Chile'), ('CN', 'China'), ('CI', "C\xf4te d'Ivoire"), ('CM', 'Cameroon'), ('CD', 'Congo, The Democratic Republic of the'), ('CG', 'Congo'), ('CK', 'Cook Islands'), ('CO', 'Colombia'), ('KM', 'Comoros'), ('CV', 'Cabo Verde'), ('CR', 'Costa Rica'), ('CU', 'Cuba'), ('CW', 'Cura\xe7ao'), ('CX', 'Christmas Island'), ('KY', 'Cayman Islands'), ('CY', 'Cyprus'), ('CZ', 'Czech Republic'), ('DE', 'Germany'), ('DJ', 'Djibouti'), ('DM', 'Dominica'), ('DK', 'Denmark'), ('DO', 'Dominican Republic'), ('DZ', 'Algeria'), ('EC', 'Ecuador'), ('EG', 'Egypt'), ('ER', 'Eritrea'), ('EH', 'Western Sahara'), ('ES', 'Spain'), ('EE', 'Estonia'), ('ET', 'Ethiopia'), ('FI', 'Finland'), ('FJ', 'Fiji'), ('FK', 'Falkland Islands (Malvinas)'), ('FR', 'France'), ('FO', 'Faroe Islands'), ('FM', 'Micronesia, Federated States of'), ('GA', 'Gabon'), ('GB', 'United Kingdom'), ('GE', 'Georgia'), ('GG', 'Guernsey'), ('GH', 'Ghana'), ('GI', 'Gibraltar'), ('GN', 'Guinea'), ('GP', 'Guadeloupe'), ('GM', 'Gambia'), ('GW', 'Guinea-Bissau'), ('GQ', 'Equatorial Guinea'), ('GR', 'Greece'), ('GD', 'Grenada'), ('GL', 'Greenland'), ('GT', 'Guatemala'), ('GF', 'French Guiana'), ('GU', 'Guam'), ('GY', 'Guyana'), ('HK', 'Hong Kong'), ('HM', 'Heard Island and McDonald Islands'), ('HN', 'Honduras'), ('HR', 'Croatia'), ('HT', 'Haiti'), ('HU', 'Hungary'), ('ID', 'Indonesia'), ('IM', 'Isle of Man'), ('IN', 'India'), ('IO', 'British Indian Ocean Territory'), ('IE', 'Ireland'), ('IR', 'Iran, Islamic Republic of'), ('IQ', 'Iraq'), ('IS', 'Iceland'), ('IL', 'Israel'), ('IT', 'Italy'), ('JM', 'Jamaica'), ('JE', 'Jersey'), ('JO', 'Jordan'), ('JP', 'Japan'), ('KZ', 'Kazakhstan'), ('KE', 'Kenya'), ('KG', 'Kyrgyzstan'), ('KH', 'Cambodia'), ('KI', 'Kiribati'), ('KN', 'Saint Kitts and Nevis'), ('KR', 'Korea, Republic of'), ('KW', 'Kuwait'), ('LA', "Lao People's Democratic Republic"), ('LB', 'Lebanon'), ('LR', 'Liberia'), ('LY', 'Libya'), ('LC', 'Saint Lucia'), ('LI', 'Liechtenstein'), ('LK', 'Sri Lanka'), ('LS', 'Lesotho'), ('LT', 'Lithuania'), ('LU', 'Luxembourg'), ('LV', 'Latvia'), ('MO', 'Macao'), ('MF', 'Saint Martin (French part)'), ('MA', 'Morocco'), ('MC', 'Monaco'), ('MD', 'Moldova, Republic of'), ('MG', 'Madagascar'), ('MV', 'Maldives'), ('MX', 'Mexico'), ('MH', 'Marshall Islands'), ('MK', 'Macedonia, Republic of'), ('ML', 'Mali'), ('MT', 'Malta'), ('MM', 'Myanmar'), ('ME', 'Montenegro'), ('MN', 'Mongolia'), ('MP', 'Northern Mariana Islands'), ('MZ', 'Mozambique'), ('MR', 'Mauritania'), ('MS', 'Montserrat'), ('MQ', 'Martinique'), ('MU', 'Mauritius'), ('MW', 'Malawi'), ('MY', 'Malaysia'), ('YT', 'Mayotte'), ('NA', 'Namibia'), ('NC', 'New Caledonia'), ('NE', 'Niger'), ('NF', 'Norfolk Island'), ('NG', 'Nigeria'), ('NI', 'Nicaragua'), ('NU', 'Niue'), ('NL', 'Netherlands'), ('NO', 'Norway'), ('NP', 'Nepal'), ('NR', 'Nauru'), ('NZ', 'New Zealand'), ('OM', 'Oman'), ('PK', 'Pakistan'), ('PA', 'Panama'), ('PN', 'Pitcairn'), ('PE', 'Peru'), ('PH', 'Philippines'), ('PW', 'Palau'), ('PG', 'Papua New Guinea'), ('PL', 'Poland'), ('PR', 'Puerto Rico'), ('KP', "Korea, Democratic People's Republic of"), ('PT', 'Portugal'), ('PY', 'Paraguay'), ('PS', 'Palestine, State of'), ('PF', 'French Polynesia'), ('QA', 'Qatar'), ('RE', 'R\xe9union'), ('RO', 'Romania'), ('RU', 'Russian Federation'), ('RW', 'Rwanda'), ('SA', 'Saudi Arabia'), ('SD', 'Sudan'), ('SN', 'Senegal'), ('SG', 'Singapore'), ('GS', 'South Georgia and the South Sandwich Islands'), ('SH', 'Saint Helena, Ascension and Tristan da Cunha'), ('SJ', 'Svalbard and Jan Mayen'), ('SB', 'Solomon Islands'), ('SL', 'Sierra Leone'), ('SV', 'El Salvador'), ('SM', 'San Marino'), ('SO', 'Somalia'), ('PM', 'Saint Pierre and Miquelon'), ('RS', 'Serbia'), ('SS', 'South Sudan'), ('ST', 'Sao Tome and Principe'), ('SR', 'Suriname'), ('SK', 'Slovakia'), ('SI', 'Slovenia'), ('SE', 'Sweden'), ('SZ', 'Swaziland'), ('SX', 'Sint Maarten (Dutch part)'), ('SC', 'Seychelles'), ('SY', 'Syrian Arab Republic'), ('TC', 'Turks and Caicos Islands'), ('TD', 'Chad'), ('TG', 'Togo'), ('TH', 'Thailand'), ('TJ', 'Tajikistan'), ('TK', 'Tokelau'), ('TM', 'Turkmenistan'), ('TL', 'Timor-Leste'), ('TO', 'Tonga'), ('TT', 'Trinidad and Tobago'), ('TN', 'Tunisia'), ('TR', 'Turkey'), ('TV', 'Tuvalu'), ('TW', 'Taiwan, Province of China'), ('TZ', 'Tanzania, United Republic of'), ('UG', 'Uganda'), ('UA', 'Ukraine'), ('UM', 'United States Minor Outlying Islands'), ('UY', 'Uruguay'), ('US', 'United States'), ('UZ', 'Uzbekistan'), ('VA', 'Holy See (Vatican City State)'), ('VC', 'Saint Vincent and the Grenadines'), ('VE', 'Venezuela, Bolivarian Republic of'), ('VG', 'Virgin Islands, British'), ('VI', 'Virgin Islands, U.S.'), ('VN', 'Viet Nam'), ('VU', 'Vanuatu'), ('WF', 'Wallis and Futuna'), ('WS', 'Samoa'), ('YE', 'Yemen'), ('ZA', 'South Africa'), ('ZM', 'Zambia'), ('ZW', 'Zimbabwe')]),
),
migrations.AlterField(
model_name='customer',
name='customer_reference',
field=models.CharField(blank=True, max_length=256, null=True, help_text=b"It's a reference to be passed between silver and clients. It usually points to an account ID.", validators=[django.core.validators.RegexValidator(regex=b'^[^,]*$', message='Reference must not contain commas.')]),
),
migrations.AlterField(
model_name='customer',
name='email',
field=models.CharField(max_length=254, null=True, blank=True),
),
migrations.AlterField(
model_name='customer',
name='meta',
field=jsonfield.fields.JSONField(default={}, null=True, blank=True),
),
migrations.AlterField(
model_name='invoice',
name='currency',
field=models.CharField(default=b'USD', help_text=b'The currency used for billing.', max_length=4, choices=[('AED', 'UAE Dirham'), ('AFN', 'Afghani'), ('ALL', 'Lek'), ('AMD', 'Armenian Dram'), ('ANG', 'Netherlands Antillean Guilder'), ('AOA', 'Kwanza'), ('ARS', 'Argentine Peso'), ('AUD', 'Australian Dollar'), ('AWG', 'Aruban Florin'), ('AZN', 'Azerbaijanian Manat'), ('BAM', 'Convertible Mark'), ('BBD', 'Barbados Dollar'), ('BDT', 'Taka'), ('BGN', 'Bulgarian Lev'), ('BHD', 'Bahraini Dinar'), ('BIF', 'Burundi Franc'), ('BMD', 'Bermudian Dollar'), ('BND', 'Brunei Dollar'), ('BOB', 'Boliviano'), ('BRL', 'Brazilian Real'), ('BSD', 'Bahamian Dollar'), ('BTN', 'Ngultrum'), ('BWP', 'Pula'), ('BYR', 'Belarusian Ruble'), ('BZD', 'Belize Dollar'), ('CAD', 'Canadian Dollar'), ('CDF', 'Congolese Franc'), ('CHF', 'Swiss Franc'), ('CLP', 'Chilean Peso'), ('CNY', 'Yuan Renminbi'), ('COP', 'Colombian Peso'), ('CRC', 'Costa Rican Colon'), ('CUC', 'Peso Convertible'), ('CUP', 'Cuban Peso'), ('CVE', 'Cabo Verde Escudo'), ('CZK', 'Czech Koruna'), ('DJF', 'Djibouti Franc'), ('DKK', 'Danish Krone'), ('DOP', 'Dominican Peso'), ('DZD', 'Algerian Dinar'), ('EGP', 'Egyptian Pound'), ('ERN', 'Nakfa'), ('ETB', 'Ethiopian Birr'), ('EUR', 'Euro'), ('FJD', 'Fiji Dollar'), ('FKP', 'Falkland Islands Pound'), ('GBP', 'Pound Sterling'), ('GEL', 'Lari'), ('GHS', 'Ghana Cedi'), ('GIP', 'Gibraltar Pound'), ('GMD', 'Dalasi'), ('GNF', 'Guinea Franc'), ('GTQ', 'Quetzal'), ('GYD', 'Guyana Dollar'), ('HKD', 'Hong Kong Dollar'), ('HNL', 'Lempira'), ('HRK', 'Kuna'), ('HTG', 'Gourde'), ('HUF', 'Forint'), ('IDR', 'Rupiah'), ('ILS', 'New Israeli Sheqel'), ('INR', 'Indian Rupee'), ('IQD', 'Iraqi Dinar'), ('IRR', 'Iranian Rial'), ('ISK', 'Iceland Krona'), ('JMD', 'Jamaican Dollar'), ('JOD', 'Jordanian Dinar'), ('JPY', 'Yen'), ('KES', 'Kenyan Shilling'), ('KGS', 'Som'), ('KHR', 'Riel'), ('KMF', 'Comoro Franc'), ('KPW', 'North Korean Won'), ('KRW', 'Won'), ('KWD', 'Kuwaiti Dinar'), ('KYD', 'Cayman Islands Dollar'), ('KZT', 'Tenge'), ('LAK', 'Kip'), ('LBP', 'Lebanese Pound'), ('LKR', 'Sri Lanka Rupee'), ('LRD', 'Liberian Dollar'), ('LSL', 'Loti'), ('LYD', 'Libyan Dinar'), ('MAD', 'Moroccan Dirham'), ('MDL', 'Moldovan Leu'), ('MGA', 'Malagasy Ariary'), ('MKD', 'Denar'), ('MMK', 'Kyat'), ('MNT', 'Tugrik'), ('MOP', 'Pataca'), ('MRO', 'Ouguiya'), ('MUR', 'Mauritius Rupee'), ('MVR', 'Rufiyaa'), ('MWK', 'Malawi Kwacha'), ('MXN', 'Mexican Peso'), ('MYR', 'Malaysian Ringgit'), ('MZN', 'Mozambique Metical'), ('NAD', 'Namibia Dollar'), ('NGN', 'Naira'), ('NIO', 'Cordoba Oro'), ('NOK', 'Norwegian Krone'), ('NPR', 'Nepalese Rupee'), ('NZD', 'New Zealand Dollar'), ('OMR', 'Rial Omani'), ('PAB', 'Balboa'), ('PEN', 'Sol'), ('PGK', 'Kina'), ('PHP', 'Philippine Peso'), ('PKR', 'Pakistan Rupee'), ('PLN', 'Zloty'), ('PYG', 'Guarani'), ('QAR', 'Qatari Rial'), ('RON', 'Romanian Leu'), ('RSD', 'Serbian Dinar'), ('RUB', 'Russian Ruble'), ('RWF', 'Rwanda Franc'), ('SAR', 'Saudi Riyal'), ('SBD', 'Solomon Islands Dollar'), ('SCR', 'Seychelles Rupee'), ('SDG', 'Sudanese Pound'), ('SEK', 'Swedish Krona'), ('SGD', 'Singapore Dollar'), ('SHP', 'Saint Helena Pound'), ('SLL', 'Leone'), ('SOS', 'Somali Shilling'), ('SRD', 'Surinam Dollar'), ('SSP', 'South Sudanese Pound'), ('STD', 'Dobra'), ('SVC', 'El Salvador Colon'), ('SYP', 'Syrian Pound'), ('SZL', 'Lilangeni'), ('THB', 'Baht'), ('TJS', 'Somoni'), ('TMT', 'Turkmenistan New Manat'), ('TND', 'Tunisian Dinar'), ('TOP', 'Pa\u2019anga'), ('TRY', 'Turkish Lira'), ('TTD', 'Trinidad and Tobago Dollar'), ('TWD', 'New Taiwan Dollar'), ('TZS', 'Tanzanian Shilling'), ('UAH', 'Hryvnia'), ('UGX', 'Uganda Shilling'), ('USD', 'US Dollar'), ('UYU', 'Peso Uruguayo'), ('UZS', 'Uzbekistan Sum'), ('VEF', 'Bol\xedvar'), ('VND', 'Dong'), ('VUV', 'Vatu'), ('WST', 'Tala'), ('XAF', 'CFA Franc BEAC'), ('XAG', 'Silver'), ('XAU', 'Gold'), ('XBA', 'Bond Markets Unit European Composite Unit (EURCO)'), ('XBB', 'Bond Markets Unit European Monetary Unit (E.M.U.-6)'), ('XBC', 'Bond Markets Unit European Unit of Account 9 (E.U.A.-9)'), ('XBD', 'Bond Markets Unit European Unit of Account 17 (E.U.A.-17)'), ('XCD', 'East Caribbean Dollar'), ('XDR', 'SDR (Special Drawing Right)'), ('XOF', 'CFA Franc BCEAO'), ('XPD', 'Palladium'), ('XPF', 'CFP Franc'), ('XPT', 'Platinum'), ('XSU', 'Sucre'), ('XTS', 'Codes specifically reserved for testing purposes'), ('XUA', 'ADB Unit of Account'), ('XXX', 'The codes assigned for transactions where no currency is involved'), ('YER', 'Yemeni Rial'), ('ZAR', 'Rand'), ('ZMW', 'Zambian Kwacha'), ('ZWL', 'Zimbabwe Dollar')]),
),
migrations.AlterField(
model_name='invoice',
name='proforma',
field=models.ForeignKey(related_name='related_invoice', blank=True, to='silver.Proforma', null=True, on_delete=django.db.models.deletion.CASCADE),
),
migrations.AlterField(
model_name='plan',
name='currency',
field=models.CharField(default=b'USD', help_text=b'The currency in which the subscription will be charged.', max_length=4, choices=[('AED', 'UAE Dirham'), ('AFN', 'Afghani'), ('ALL', 'Lek'), ('AMD', 'Armenian Dram'), ('ANG', 'Netherlands Antillean Guilder'), ('AOA', 'Kwanza'), ('ARS', 'Argentine Peso'), ('AUD', 'Australian Dollar'), ('AWG', 'Aruban Florin'), ('AZN', 'Azerbaijanian Manat'), ('BAM', 'Convertible Mark'), ('BBD', 'Barbados Dollar'), ('BDT', 'Taka'), ('BGN', 'Bulgarian Lev'), ('BHD', 'Bahraini Dinar'), ('BIF', 'Burundi Franc'), ('BMD', 'Bermudian Dollar'), ('BND', 'Brunei Dollar'), ('BOB', 'Boliviano'), ('BRL', 'Brazilian Real'), ('BSD', 'Bahamian Dollar'), ('BTN', 'Ngultrum'), ('BWP', 'Pula'), ('BYR', 'Belarusian Ruble'), ('BZD', 'Belize Dollar'), ('CAD', 'Canadian Dollar'), ('CDF', 'Congolese Franc'), ('CHF', 'Swiss Franc'), ('CLP', 'Chilean Peso'), ('CNY', 'Yuan Renminbi'), ('COP', 'Colombian Peso'), ('CRC', 'Costa Rican Colon'), ('CUC', 'Peso Convertible'), ('CUP', 'Cuban Peso'), ('CVE', 'Cabo Verde Escudo'), ('CZK', 'Czech Koruna'), ('DJF', 'Djibouti Franc'), ('DKK', 'Danish Krone'), ('DOP', 'Dominican Peso'), ('DZD', 'Algerian Dinar'), ('EGP', 'Egyptian Pound'), ('ERN', 'Nakfa'), ('ETB', 'Ethiopian Birr'), ('EUR', 'Euro'), ('FJD', 'Fiji Dollar'), ('FKP', 'Falkland Islands Pound'), ('GBP', 'Pound Sterling'), ('GEL', 'Lari'), ('GHS', 'Ghana Cedi'), ('GIP', 'Gibraltar Pound'), ('GMD', 'Dalasi'), ('GNF', 'Guinea Franc'), ('GTQ', 'Quetzal'), ('GYD', 'Guyana Dollar'), ('HKD', 'Hong Kong Dollar'), ('HNL', 'Lempira'), ('HRK', 'Kuna'), ('HTG', 'Gourde'), ('HUF', 'Forint'), ('IDR', 'Rupiah'), ('ILS', 'New Israeli Sheqel'), ('INR', 'Indian Rupee'), ('IQD', 'Iraqi Dinar'), ('IRR', 'Iranian Rial'), ('ISK', 'Iceland Krona'), ('JMD', 'Jamaican Dollar'), ('JOD', 'Jordanian Dinar'), ('JPY', 'Yen'), ('KES', 'Kenyan Shilling'), ('KGS', 'Som'), ('KHR', 'Riel'), ('KMF', 'Comoro Franc'), ('KPW', 'North Korean Won'), ('KRW', 'Won'), ('KWD', 'Kuwaiti Dinar'), ('KYD', 'Cayman Islands Dollar'), ('KZT', 'Tenge'), ('LAK', 'Kip'), ('LBP', 'Lebanese Pound'), ('LKR', 'Sri Lanka Rupee'), ('LRD', 'Liberian Dollar'), ('LSL', 'Loti'), ('LYD', 'Libyan Dinar'), ('MAD', 'Moroccan Dirham'), ('MDL', 'Moldovan Leu'), ('MGA', 'Malagasy Ariary'), ('MKD', 'Denar'), ('MMK', 'Kyat'), ('MNT', 'Tugrik'), ('MOP', 'Pataca'), ('MRO', 'Ouguiya'), ('MUR', 'Mauritius Rupee'), ('MVR', 'Rufiyaa'), ('MWK', 'Malawi Kwacha'), ('MXN', 'Mexican Peso'), ('MYR', 'Malaysian Ringgit'), ('MZN', 'Mozambique Metical'), ('NAD', 'Namibia Dollar'), ('NGN', 'Naira'), ('NIO', 'Cordoba Oro'), ('NOK', 'Norwegian Krone'), ('NPR', 'Nepalese Rupee'), ('NZD', 'New Zealand Dollar'), ('OMR', 'Rial Omani'), ('PAB', 'Balboa'), ('PEN', 'Sol'), ('PGK', 'Kina'), ('PHP', 'Philippine Peso'), ('PKR', 'Pakistan Rupee'), ('PLN', 'Zloty'), ('PYG', 'Guarani'), ('QAR', 'Qatari Rial'), ('RON', 'Romanian Leu'), ('RSD', 'Serbian Dinar'), ('RUB', 'Russian Ruble'), ('RWF', 'Rwanda Franc'), ('SAR', 'Saudi Riyal'), ('SBD', 'Solomon Islands Dollar'), ('SCR', 'Seychelles Rupee'), ('SDG', 'Sudanese Pound'), ('SEK', 'Swedish Krona'), ('SGD', 'Singapore Dollar'), ('SHP', 'Saint Helena Pound'), ('SLL', 'Leone'), ('SOS', 'Somali Shilling'), ('SRD', 'Surinam Dollar'), ('SSP', 'South Sudanese Pound'), ('STD', 'Dobra'), ('SVC', 'El Salvador Colon'), ('SYP', 'Syrian Pound'), ('SZL', 'Lilangeni'), ('THB', 'Baht'), ('TJS', 'Somoni'), ('TMT', 'Turkmenistan New Manat'), ('TND', 'Tunisian Dinar'), ('TOP', 'Pa\u2019anga'), ('TRY', 'Turkish Lira'), ('TTD', 'Trinidad and Tobago Dollar'), ('TWD', 'New Taiwan Dollar'), ('TZS', 'Tanzanian Shilling'), ('UAH', 'Hryvnia'), ('UGX', 'Uganda Shilling'), ('USD', 'US Dollar'), ('UYU', 'Peso Uruguayo'), ('UZS', 'Uzbekistan Sum'), ('VEF', 'Bol\xedvar'), ('VND', 'Dong'), ('VUV', 'Vatu'), ('WST', 'Tala'), ('XAF', 'CFA Franc BEAC'), ('XAG', 'Silver'), ('XAU', 'Gold'), ('XBA', 'Bond Markets Unit European Composite Unit (EURCO)'), ('XBB', 'Bond Markets Unit European Monetary Unit (E.M.U.-6)'), ('XBC', 'Bond Markets Unit European Unit of Account 9 (E.U.A.-9)'), ('XBD', 'Bond Markets Unit European Unit of Account 17 (E.U.A.-17)'), ('XCD', 'East Caribbean Dollar'), ('XDR', 'SDR (Special Drawing Right)'), ('XOF', 'CFA Franc BCEAO'), ('XPD', 'Palladium'), ('XPF', 'CFP Franc'), ('XPT', 'Platinum'), ('XSU', 'Sucre'), ('XTS', 'Codes specifically reserved for testing purposes'), ('XUA', 'ADB Unit of Account'), ('XXX', 'The codes assigned for transactions where no currency is involved'), ('YER', 'Yemeni Rial'), ('ZAR', 'Rand'), ('ZMW', 'Zambian Kwacha'), ('ZWL', 'Zimbabwe Dollar')]),
),
migrations.AlterField(
model_name='proforma',
name='currency',
field=models.CharField(default=b'USD', help_text=b'The currency used for billing.', max_length=4, choices=[('AED', 'UAE Dirham'), ('AFN', 'Afghani'), ('ALL', 'Lek'), ('AMD', 'Armenian Dram'), ('ANG', 'Netherlands Antillean Guilder'), ('AOA', 'Kwanza'), ('ARS', 'Argentine Peso'), ('AUD', 'Australian Dollar'), ('AWG', 'Aruban Florin'), ('AZN', 'Azerbaijanian Manat'), ('BAM', 'Convertible Mark'), ('BBD', 'Barbados Dollar'), ('BDT', 'Taka'), ('BGN', 'Bulgarian Lev'), ('BHD', 'Bahraini Dinar'), ('BIF', 'Burundi Franc'), ('BMD', 'Bermudian Dollar'), ('BND', 'Brunei Dollar'), ('BOB', 'Boliviano'), ('BRL', 'Brazilian Real'), ('BSD', 'Bahamian Dollar'), ('BTN', 'Ngultrum'), ('BWP', 'Pula'), ('BYR', 'Belarusian Ruble'), ('BZD', 'Belize Dollar'), ('CAD', 'Canadian Dollar'), ('CDF', 'Congolese Franc'), ('CHF', 'Swiss Franc'), ('CLP', 'Chilean Peso'), ('CNY', 'Yuan Renminbi'), ('COP', 'Colombian Peso'), ('CRC', 'Costa Rican Colon'), ('CUC', 'Peso Convertible'), ('CUP', 'Cuban Peso'), ('CVE', 'Cabo Verde Escudo'), ('CZK', 'Czech Koruna'), ('DJF', 'Djibouti Franc'), ('DKK', 'Danish Krone'), ('DOP', 'Dominican Peso'), ('DZD', 'Algerian Dinar'), ('EGP', 'Egyptian Pound'), ('ERN', 'Nakfa'), ('ETB', 'Ethiopian Birr'), ('EUR', 'Euro'), ('FJD', 'Fiji Dollar'), ('FKP', 'Falkland Islands Pound'), ('GBP', 'Pound Sterling'), ('GEL', 'Lari'), ('GHS', 'Ghana Cedi'), ('GIP', 'Gibraltar Pound'), ('GMD', 'Dalasi'), ('GNF', 'Guinea Franc'), ('GTQ', 'Quetzal'), ('GYD', 'Guyana Dollar'), ('HKD', 'Hong Kong Dollar'), ('HNL', 'Lempira'), ('HRK', 'Kuna'), ('HTG', 'Gourde'), ('HUF', 'Forint'), ('IDR', 'Rupiah'), ('ILS', 'New Israeli Sheqel'), ('INR', 'Indian Rupee'), ('IQD', 'Iraqi Dinar'), ('IRR', 'Iranian Rial'), ('ISK', 'Iceland Krona'), ('JMD', 'Jamaican Dollar'), ('JOD', 'Jordanian Dinar'), ('JPY', 'Yen'), ('KES', 'Kenyan Shilling'), ('KGS', 'Som'), ('KHR', 'Riel'), ('KMF', 'Comoro Franc'), ('KPW', 'North Korean Won'), ('KRW', 'Won'), ('KWD', 'Kuwaiti Dinar'), ('KYD', 'Cayman Islands Dollar'), ('KZT', 'Tenge'), ('LAK', 'Kip'), ('LBP', 'Lebanese Pound'), ('LKR', 'Sri Lanka Rupee'), ('LRD', 'Liberian Dollar'), ('LSL', 'Loti'), ('LYD', 'Libyan Dinar'), ('MAD', 'Moroccan Dirham'), ('MDL', 'Moldovan Leu'), ('MGA', 'Malagasy Ariary'), ('MKD', 'Denar'), ('MMK', 'Kyat'), ('MNT', 'Tugrik'), ('MOP', 'Pataca'), ('MRO', 'Ouguiya'), ('MUR', 'Mauritius Rupee'), ('MVR', 'Rufiyaa'), ('MWK', 'Malawi Kwacha'), ('MXN', 'Mexican Peso'), ('MYR', 'Malaysian Ringgit'), ('MZN', 'Mozambique Metical'), ('NAD', 'Namibia Dollar'), ('NGN', 'Naira'), ('NIO', 'Cordoba Oro'), ('NOK', 'Norwegian Krone'), ('NPR', 'Nepalese Rupee'), ('NZD', 'New Zealand Dollar'), ('OMR', 'Rial Omani'), ('PAB', 'Balboa'), ('PEN', 'Sol'), ('PGK', 'Kina'), ('PHP', 'Philippine Peso'), ('PKR', 'Pakistan Rupee'), ('PLN', 'Zloty'), ('PYG', 'Guarani'), ('QAR', 'Qatari Rial'), ('RON', 'Romanian Leu'), ('RSD', 'Serbian Dinar'), ('RUB', 'Russian Ruble'), ('RWF', 'Rwanda Franc'), ('SAR', 'Saudi Riyal'), ('SBD', 'Solomon Islands Dollar'), ('SCR', 'Seychelles Rupee'), ('SDG', 'Sudanese Pound'), ('SEK', 'Swedish Krona'), ('SGD', 'Singapore Dollar'), ('SHP', 'Saint Helena Pound'), ('SLL', 'Leone'), ('SOS', 'Somali Shilling'), ('SRD', 'Surinam Dollar'), ('SSP', 'South Sudanese Pound'), ('STD', 'Dobra'), ('SVC', 'El Salvador Colon'), ('SYP', 'Syrian Pound'), ('SZL', 'Lilangeni'), ('THB', 'Baht'), ('TJS', 'Somoni'), ('TMT', 'Turkmenistan New Manat'), ('TND', 'Tunisian Dinar'), ('TOP', 'Pa\u2019anga'), ('TRY', 'Turkish Lira'), ('TTD', 'Trinidad and Tobago Dollar'), ('TWD', 'New Taiwan Dollar'), ('TZS', 'Tanzanian Shilling'), ('UAH', 'Hryvnia'), ('UGX', 'Uganda Shilling'), ('USD', 'US Dollar'), ('UYU', 'Peso Uruguayo'), ('UZS', 'Uzbekistan Sum'), ('VEF', 'Bol\xedvar'), ('VND', 'Dong'), ('VUV', 'Vatu'), ('WST', 'Tala'), ('XAF', 'CFA Franc BEAC'), ('XAG', 'Silver'), ('XAU', 'Gold'), ('XBA', 'Bond Markets Unit European Composite Unit (EURCO)'), ('XBB', 'Bond Markets Unit European Monetary Unit (E.M.U.-6)'), ('XBC', 'Bond Markets Unit European Unit of Account 9 (E.U.A.-9)'), ('XBD', 'Bond Markets Unit European Unit of Account 17 (E.U.A.-17)'), ('XCD', 'East Caribbean Dollar'), ('XDR', 'SDR (Special Drawing Right)'), ('XOF', 'CFA Franc BCEAO'), ('XPD', 'Palladium'), ('XPF', 'CFP Franc'), ('XPT', 'Platinum'), ('XSU', 'Sucre'), ('XTS', 'Codes specifically reserved for testing purposes'), ('XUA', 'ADB Unit of Account'), ('XXX', 'The codes assigned for transactions where no currency is involved'), ('YER', 'Yemeni Rial'), ('ZAR', 'Rand'), ('ZMW', 'Zambian Kwacha'), ('ZWL', 'Zimbabwe Dollar')]),
),
migrations.AlterField(
model_name='proforma',
name='invoice',
field=models.ForeignKey(related_name='related_proforma', blank=True, to='silver.Invoice', null=True, on_delete=django.db.models.deletion.CASCADE),
),
migrations.AlterField(
model_name='provider',
name='country',
field=models.CharField(max_length=3, choices=[('AW', 'Aruba'), ('AF', 'Afghanistan'), ('AO', 'Angola'), ('AI', 'Anguilla'), ('AX', '\xc5land Islands'), ('AL', 'Albania'), ('AD', 'Andorra'), ('AE', 'United Arab Emirates'), ('AR', 'Argentina'), ('AM', 'Armenia'), ('AS', 'American Samoa'), ('AQ', 'Antarctica'), ('TF', 'French Southern Territories'), ('AG', 'Antigua and Barbuda'), ('AU', 'Australia'), ('AT', 'Austria'), ('AZ', 'Azerbaijan'), ('BI', 'Burundi'), ('BE', 'Belgium'), ('BJ', 'Benin'), ('BQ', 'Bonaire, Sint Eustatius and Saba'), ('BF', 'Burkina Faso'), ('BD', 'Bangladesh'), ('BG', 'Bulgaria'), ('BH', 'Bahrain'), ('BS', 'Bahamas'), ('BA', 'Bosnia and Herzegovina'), ('BL', 'Saint Barth\xe9lemy'), ('BY', 'Belarus'), ('BZ', 'Belize'), ('BM', 'Bermuda'), ('BO', 'Bolivia, Plurinational State of'), ('BR', 'Brazil'), ('BB', 'Barbados'), ('BN', 'Brunei Darussalam'), ('BT', 'Bhutan'), ('BV', 'Bouvet Island'), ('BW', 'Botswana'), ('CF', 'Central African Republic'), ('CA', 'Canada'), ('CC', 'Cocos (Keeling) Islands'), ('CH', 'Switzerland'), ('CL', 'Chile'), ('CN', 'China'), ('CI', "C\xf4te d'Ivoire"), ('CM', 'Cameroon'), ('CD', 'Congo, The Democratic Republic of the'), ('CG', 'Congo'), ('CK', 'Cook Islands'), ('CO', 'Colombia'), ('KM', 'Comoros'), ('CV', 'Cabo Verde'), ('CR', 'Costa Rica'), ('CU', 'Cuba'), ('CW', 'Cura\xe7ao'), ('CX', 'Christmas Island'), ('KY', 'Cayman Islands'), ('CY', 'Cyprus'), ('CZ', 'Czech Republic'), ('DE', 'Germany'), ('DJ', 'Djibouti'), ('DM', 'Dominica'), ('DK', 'Denmark'), ('DO', 'Dominican Republic'), ('DZ', 'Algeria'), ('EC', 'Ecuador'), ('EG', 'Egypt'), ('ER', 'Eritrea'), ('EH', 'Western Sahara'), ('ES', 'Spain'), ('EE', 'Estonia'), ('ET', 'Ethiopia'), ('FI', 'Finland'), ('FJ', 'Fiji'), ('FK', 'Falkland Islands (Malvinas)'), ('FR', 'France'), ('FO', 'Faroe Islands'), ('FM', 'Micronesia, Federated States of'), ('GA', 'Gabon'), ('GB', 'United Kingdom'), ('GE', 'Georgia'), ('GG', 'Guernsey'), ('GH', 'Ghana'), ('GI', 'Gibraltar'), ('GN', 'Guinea'), ('GP', 'Guadeloupe'), ('GM', 'Gambia'), ('GW', 'Guinea-Bissau'), ('GQ', 'Equatorial Guinea'), ('GR', 'Greece'), ('GD', 'Grenada'), ('GL', 'Greenland'), ('GT', 'Guatemala'), ('GF', 'French Guiana'), ('GU', 'Guam'), ('GY', 'Guyana'), ('HK', 'Hong Kong'), ('HM', 'Heard Island and McDonald Islands'), ('HN', 'Honduras'), ('HR', 'Croatia'), ('HT', 'Haiti'), ('HU', 'Hungary'), ('ID', 'Indonesia'), ('IM', 'Isle of Man'), ('IN', 'India'), ('IO', 'British Indian Ocean Territory'), ('IE', 'Ireland'), ('IR', 'Iran, Islamic Republic of'), ('IQ', 'Iraq'), ('IS', 'Iceland'), ('IL', 'Israel'), ('IT', 'Italy'), ('JM', 'Jamaica'), ('JE', 'Jersey'), ('JO', 'Jordan'), ('JP', 'Japan'), ('KZ', 'Kazakhstan'), ('KE', 'Kenya'), ('KG', 'Kyrgyzstan'), ('KH', 'Cambodia'), ('KI', 'Kiribati'), ('KN', 'Saint Kitts and Nevis'), ('KR', 'Korea, Republic of'), ('KW', 'Kuwait'), ('LA', "Lao People's Democratic Republic"), ('LB', 'Lebanon'), ('LR', 'Liberia'), ('LY', 'Libya'), ('LC', 'Saint Lucia'), ('LI', 'Liechtenstein'), ('LK', 'Sri Lanka'), ('LS', 'Lesotho'), ('LT', 'Lithuania'), ('LU', 'Luxembourg'), ('LV', 'Latvia'), ('MO', 'Macao'), ('MF', 'Saint Martin (French part)'), ('MA', 'Morocco'), ('MC', 'Monaco'), ('MD', 'Moldova, Republic of'), ('MG', 'Madagascar'), ('MV', 'Maldives'), ('MX', 'Mexico'), ('MH', 'Marshall Islands'), ('MK', 'Macedonia, Republic of'), ('ML', 'Mali'), ('MT', 'Malta'), ('MM', 'Myanmar'), ('ME', 'Montenegro'), ('MN', 'Mongolia'), ('MP', 'Northern Mariana Islands'), ('MZ', 'Mozambique'), ('MR', 'Mauritania'), ('MS', 'Montserrat'), ('MQ', 'Martinique'), ('MU', 'Mauritius'), ('MW', 'Malawi'), ('MY', 'Malaysia'), ('YT', 'Mayotte'), ('NA', 'Namibia'), ('NC', 'New Caledonia'), ('NE', 'Niger'), ('NF', 'Norfolk Island'), ('NG', 'Nigeria'), ('NI', 'Nicaragua'), ('NU', 'Niue'), ('NL', 'Netherlands'), ('NO', 'Norway'), ('NP', 'Nepal'), ('NR', 'Nauru'), ('NZ', 'New Zealand'), ('OM', 'Oman'), ('PK', 'Pakistan'), ('PA', 'Panama'), ('PN', 'Pitcairn'), ('PE', 'Peru'), ('PH', 'Philippines'), ('PW', 'Palau'), ('PG', 'Papua New Guinea'), ('PL', 'Poland'), ('PR', 'Puerto Rico'), ('KP', "Korea, Democratic People's Republic of"), ('PT', 'Portugal'), ('PY', 'Paraguay'), ('PS', 'Palestine, State of'), ('PF', 'French Polynesia'), ('QA', 'Qatar'), ('RE', 'R\xe9union'), ('RO', 'Romania'), ('RU', 'Russian Federation'), ('RW', 'Rwanda'), ('SA', 'Saudi Arabia'), ('SD', 'Sudan'), ('SN', 'Senegal'), ('SG', 'Singapore'), ('GS', 'South Georgia and the South Sandwich Islands'), ('SH', 'Saint Helena, Ascension and Tristan da Cunha'), ('SJ', 'Svalbard and Jan Mayen'), ('SB', 'Solomon Islands'), ('SL', 'Sierra Leone'), ('SV', 'El Salvador'), ('SM', 'San Marino'), ('SO', 'Somalia'), ('PM', 'Saint Pierre and Miquelon'), ('RS', 'Serbia'), ('SS', 'South Sudan'), ('ST', 'Sao Tome and Principe'), ('SR', 'Suriname'), ('SK', 'Slovakia'), ('SI', 'Slovenia'), ('SE', 'Sweden'), ('SZ', 'Swaziland'), ('SX', 'Sint Maarten (Dutch part)'), ('SC', 'Seychelles'), ('SY', 'Syrian Arab Republic'), ('TC', 'Turks and Caicos Islands'), ('TD', 'Chad'), ('TG', 'Togo'), ('TH', 'Thailand'), ('TJ', 'Tajikistan'), ('TK', 'Tokelau'), ('TM', 'Turkmenistan'), ('TL', 'Timor-Leste'), ('TO', 'Tonga'), ('TT', 'Trinidad and Tobago'), ('TN', 'Tunisia'), ('TR', 'Turkey'), ('TV', 'Tuvalu'), ('TW', 'Taiwan, Province of China'), ('TZ', 'Tanzania, United Republic of'), ('UG', 'Uganda'), ('UA', 'Ukraine'), ('UM', 'United States Minor Outlying Islands'), ('UY', 'Uruguay'), ('US', 'United States'), ('UZ', 'Uzbekistan'), ('VA', 'Holy See (Vatican City State)'), ('VC', 'Saint Vincent and the Grenadines'), ('VE', 'Venezuela, Bolivarian Republic of'), ('VG', 'Virgin Islands, British'), ('VI', 'Virgin Islands, U.S.'), ('VN', 'Viet Nam'), ('VU', 'Vanuatu'), ('WF', 'Wallis and Futuna'), ('WS', 'Samoa'), ('YE', 'Yemen'), ('ZA', 'South Africa'), ('ZM', 'Zambia'), ('ZW', 'Zimbabwe')]),
),
migrations.AlterField(
model_name='provider',
name='email',
field=models.CharField(max_length=254, null=True, blank=True),
),
migrations.AlterField(
model_name='provider',
name='flow',
field=models.CharField(default=b'proforma', help_text=b'One of the available workflows for generating proformas and invoices (see the documentation for more details).', max_length=10, choices=[(b'proforma', 'Proforma'), (b'invoice', 'Invoice')]),
),
migrations.AlterField(
model_name='provider',
name='invoice_series',
field=models.CharField(help_text=b'The series that will be used on every invoice generated by this provider.', max_length=20),
),
migrations.AlterField(
model_name='provider',
name='meta',
field=jsonfield.fields.JSONField(default={}, null=True, blank=True),
),
migrations.AlterField(
model_name='provider',
name='proforma_series',
field=models.CharField(help_text=b'The series that will be used on every proforma generated by this provider.', max_length=20, null=True, blank=True),
),
migrations.AlterField(
model_name='subscription',
name='reference',
field=models.CharField(blank=True, max_length=128, null=True, help_text=b"The subscription's reference in an external system.", validators=[django.core.validators.RegexValidator(regex=b'^[^,]*$', message='Reference must not contain commas.')]),
),
migrations.AlterIndexTogether(
name='customer',
index_together=set([('first_name', 'last_name', 'company')]),
),
migrations.AddField(
model_name='transaction',
name='invoice',
field=models.ForeignKey(blank=True, to='silver.Invoice', null=True, on_delete=django.db.models.deletion.CASCADE),
),
migrations.AddField(
model_name='transaction',
name='payment_method',
field=models.ForeignKey(to='silver.PaymentMethod', on_delete=django.db.models.deletion.CASCADE),
),
migrations.AddField(
model_name='transaction',
name='proforma',
field=models.ForeignKey(blank=True, to='silver.Proforma', null=True, on_delete=django.db.models.deletion.CASCADE),
),
migrations.AddField(
model_name='paymentmethod',
name='customer',
field=models.ForeignKey(to='silver.Customer', on_delete=django.db.models.deletion.CASCADE),
),
migrations.RemoveField(
model_name='customer',
name='name',
),
]
| 178.109649
| 5,869
| 0.57145
| 4,632
| 40,609
| 4.975389
| 0.24525
| 0.010414
| 0.010718
| 0.014015
| 0.90276
| 0.887356
| 0.866832
| 0.854595
| 0.83815
| 0.829211
| 0
| 0.003992
| 0.154892
| 40,609
| 227
| 5,870
| 178.894273
| 0.667531
| 0.000517
| 0
| 0.575342
| 0
| 0.004566
| 0.483418
| 0.001651
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009132
| false
| 0.004566
| 0.045662
| 0
| 0.068493
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8a849ab3af31d370b4f99f8846b63ac2a4d7b30e
| 7,227
|
py
|
Python
|
qa/practicing.py
|
ChristopherOloo/KilimoQAPortal
|
c905a42282bbce70b5477862185ad332185307ce
|
[
"MIT"
] | null | null | null |
qa/practicing.py
|
ChristopherOloo/KilimoQAPortal
|
c905a42282bbce70b5477862185ad332185307ce
|
[
"MIT"
] | null | null | null |
qa/practicing.py
|
ChristopherOloo/KilimoQAPortal
|
c905a42282bbce70b5477862185ad332185307ce
|
[
"MIT"
] | null | null | null |
getCreateClose_object = CloseQuestionVotes.objects.filter(question_to_closing=data).exclude(ended=True).first()
if request.method == 'POST':
close_form = CloseForm_Q(data=request.POST)
if close_form.is_valid():
new_post = close_form.save(commit=False)
formData = close_form.cleaned_data['why_closing']
formData_duplicate_of = close_form.cleaned_data['duplicate_of']
if formData != "DUPLICATE":
if getCreateClose_object:
if formData == getCreateClose_object.why_closing:
# print("Same as Before")
print("First Statement is Excecuting")
new_post.user = request.user
new_post.question_to_closing = data
# createInstance,created = ReviewCloseVotes.objects.get_or_create(question_to_closed=data)
# createInstance.reviewed_by.add(request.user)
# print("Instance Created")
new_post.save()
# SAVE THE INSTANCE FIRST
# https://stackoverflow.com/questions/33838433/save-prohibited-to-prevent-data-loss-due-to-unsaved-related-object
# createInstance.review_of = new_post
# createInstance.save()
getCreateClose_object.how_many_votes_on_Close += 1
getCreateClose_object.save()
# Maybe i can remove these two lines
createInstance.review_of = new_post
createInstance.save()
else:
# print("Save the New Request")
print("Second Statement is Excecuting")
new_post.user = request.user
new_post.question_to_closing = data
createInstance,created = ReviewCloseVotes.objects.get_or_create(question_to_closed=data)
# createInstance.reviewed_by.add(request.user)
# createInstance.reviewed_by.add(request.user)
# createInstance.save()
getCreateClose_object.how_many_votes_on_Close += 1
getCreateClose_object.save()
# print("Instance Created")
new_post.save()
# SAVE THE INSTANCE FIRST
# https://stackoverflow.com/questions/33838433/save-prohibited-to-prevent-data-loss-due-to-unsaved-related-object
# createInstance.review_of = new_post
# createInstance.save()
# Maybe i can remove these two lines
createInstance.review_of = new_post
createInstance.save()
else:
new_post.user = request.user
new_post.question_to_closing = data
createInstance,created = ReviewCloseVotes.objects.get_or_create(question_to_closed=data)
createInstance.reviewed_by.add(request.user)
# createInstance.reviewed_by.add(request.user)
# createInstance.save()
new_post.how_many_votes_on_Close += 1
print("Third Statement is Excecuting")
new_post.save()
# SAVE THE INSTANCE FIRST
# https://stackoverflow.com/questions/33838433/save-prohibited-to-prevent-data-loss-due-to-unsaved-related-object
# createInstance.review_of = new_post
# createInstance.save()
createInstance.review_of = new_post
createInstance.save()
elif formData == "DUPLICATE" and formData_duplicate_of == None:
messages.error(request, "Please Write the Another Question's URL")
print("Please Write the Another Question's URL")
else:
if getCreateClose_object:
if formData == getCreateClose_object.why_closing:
print("Fourth Statement is Excecuting")
new_post.user = request.user
new_post.question_to_closing = data
createInstance,created = ReviewCloseVotes.objects.get_or_create(question_to_closed=data)
createInstance.reviewed_by.add(request.user)
# print("Instance Created")
new_post.save()
# SAVE THE INSTANCE FIRST
# https://stackoverflow.com/questions/33838433/save-prohibited-to-prevent-data-loss-due-to-unsaved-related-object
getCreateClose_object.how_many_votes_on_Close += 1
getCreateClose_object.save()
createInstance.review_of = new_post
createInstance.save()
else:
print("Fifth Statement is Excecuting")
new_post.user = request.user
new_post.question_to_closing = data
createInstance,created = ReviewCloseVotes.objects.get_or_create(question_to_closed=data)
createInstance.reviewed_by.add(request.user)
# createInstance.reviewed_by.add(request.user)
# createInstance.save()
getCreateClose_object.how_many_votes_on_Close += 1
getCreateClose_object.save()
# print("Instance Created")
createInstance.review_of = new_post
new_post.save()
# SAVE THE INSTANCE FIRST
# https://stackoverflow.com/questions/33838433/save-prohibited-to-prevent-data-loss-due-to-unsaved-related-object
createInstance.review_of = new_post
createInstance.save()
else:
print("Sixth Statement is Excecuting")
new_post.user = request.user
new_post.question_to_closing = data
createInstance,created = ReviewCloseVotes.objects.get_or_create(question_to_closed=data)
createInstance.reviewed_by.add(request.user)
# createInstance.reviewed_by.add(request.user)
# createInstance.save()
new_post.how_many_votes_on_Close += 1
# print("Instance Created")
new_post.save()
# SAVE THE INSTANCE FIRST
# https://stackoverflow.com/questions/33838433/save-prohibited-to-prevent-data-loss-due-to-unsaved-related-object
createInstance.review_of = new_post
createInstance.save()
return redirect('qa:questionDetailView', pk=data.id,) # slug=slug)
else:
close_form = CloseForm_Q()
| 53.932836
| 137
| 0.539505
| 652
| 7,227
| 5.757669
| 0.150307
| 0.057805
| 0.063932
| 0.071923
| 0.860948
| 0.847629
| 0.847629
| 0.830048
| 0.817528
| 0.798881
| 0
| 0.012217
| 0.388405
| 7,227
| 133
| 138
| 54.338346
| 0.837104
| 0.235367
| 0
| 0.759494
| 0
| 0
| 0.058341
| 0.003829
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.088608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8ab99fbf84be04753d997e30a676ac4ed303f62e
| 20,632
|
py
|
Python
|
tests/test_defaultnetworkxfactory.py
|
ndexbio/nice-cx-python
|
f8cf207e439b3afcf77c45ea80792968a2258bfb
|
[
"BSD-3-Clause"
] | 5
|
2017-12-01T20:19:46.000Z
|
2022-03-06T19:22:36.000Z
|
tests/test_defaultnetworkxfactory.py
|
ndexbio/nice-cx-python
|
f8cf207e439b3afcf77c45ea80792968a2258bfb
|
[
"BSD-3-Clause"
] | 77
|
2017-11-20T17:53:38.000Z
|
2022-01-26T23:43:34.000Z
|
tests/test_defaultnetworkxfactory.py
|
ndexbio/nice-cx-python
|
f8cf207e439b3afcf77c45ea80792968a2258bfb
|
[
"BSD-3-Clause"
] | 5
|
2017-11-20T17:03:34.000Z
|
2021-04-26T20:40:20.000Z
|
# -*- coding: utf-8 -*-
"""Tests for `DefaultNetworkXFactory` class."""
import os
import unittest
import networkx
import ndex2
from ndex2.exceptions import NDExError
from ndex2.nice_cx_network import DefaultNetworkXFactory
from ndex2.nice_cx_network import NetworkXFactory
from ndex2.nice_cx_network import NiceCXNetwork
SKIP_REASON = 'NDEX2_TEST_USER environment variable detected, ' \
'skipping for integration tests'
NETWORKX_MAJOR_VERSION = 0
netx_ver_str = str(networkx.__version__)
period_pos = netx_ver_str.index('.')
if period_pos != -1:
try:
NETWORKX_MAJOR_VERSION = int(netx_ver_str[0:period_pos])
except ValueError:
pass
@unittest.skipIf(os.getenv('NDEX2_TEST_SERVER') is not None, SKIP_REASON)
class TestDefaultNetworkXFactory(unittest.TestCase):
TEST_DIR = os.path.dirname(__file__)
WNT_SIGNAL_FILE = os.path.join(TEST_DIR, 'data', 'wntsignaling.cx')
DARKTHEME_FILE = os.path.join(TEST_DIR, 'data', 'darkthemefinal.cx')
DARKTHEMENODE_FILE = os.path.join(TEST_DIR, 'data',
'darkthemefinalwithnodevis.cx')
GLYPICAN_FILE = os.path.join(TEST_DIR, 'data', 'glypican2.cx')
def setUp(self):
"""Set up test fixtures, if any."""
pass
def tearDown(self):
"""Tear down test fixtures, if any."""
pass
def test_get_networkx_major_version(self):
# try with no arg
res = NetworkXFactory.get_networkx_major_version()
self.assertTrue(res > 0)
# try passing none
res = NetworkXFactory.get_networkx_major_version(networkx_version=None)
self.assertEqual(0, res)
# try passing empty string
res = NetworkXFactory.get_networkx_major_version(networkx_version='')
self.assertEqual(0, res)
# try passing string with no period
res = NetworkXFactory.get_networkx_major_version(networkx_version='12')
self.assertEqual(0, res)
# try passing with only period
res = NetworkXFactory.get_networkx_major_version(networkx_version='.')
self.assertEqual(0, res)
# try passing with non numeric value
res = NetworkXFactory.get_networkx_major_version(networkx_version='fo')
self.assertEqual(0, res)
# try passing 1.11
res = NetworkXFactory.get_networkx_major_version(networkx_version='1.11')
self.assertEqual(1, res)
# try passing 12.4.1b1
res = NetworkXFactory.get_networkx_major_version(networkx_version='12.4.1b1')
self.assertEqual(12, res)
def test_none_passed_into_get_graph(self):
fac = DefaultNetworkXFactory()
try:
fac.get_graph(None)
self.fail('Expected NDExError')
except NDExError as ne:
self.assertEqual('input network is None', str(ne))
def test_empty_network_passed_in_with_various_legacy_modes(self):
net = NiceCXNetwork()
fac = DefaultNetworkXFactory()
g = fac.get_graph(net)
self.assertTrue(isinstance(g, networkx.MultiGraph))
self.assertEqual(0, len(g))
self.assertEqual(0, g.number_of_edges())
fac = DefaultNetworkXFactory(legacymode=True)
g = fac.get_graph(net)
self.assertTrue(isinstance(g, networkx.Graph))
self.assertEqual(0, len(g))
self.assertEqual(0, g.number_of_edges())
fac = DefaultNetworkXFactory(legacymode=False)
g = fac.get_graph(net)
self.assertTrue(isinstance(g, networkx.MultiGraph))
self.assertEqual(0, len(g))
self.assertEqual(0, g.number_of_edges())
fac = DefaultNetworkXFactory(legacymode=None)
g = fac.get_graph(net)
self.assertTrue(isinstance(g, networkx.MultiGraph))
self.assertEqual(0, len(g))
self.assertEqual(0, g.number_of_edges())
try:
DefaultNetworkXFactory(legacymode='blah')
self.fail('Expected NDExError')
except NDExError as ne:
self.assertEqual('blah not a valid value for '
'legacymode parameter', str(ne))
def test_one_node_no_edge_network(self):
net = NiceCXNetwork()
net.create_node('first')
net.set_name('bob')
fac = DefaultNetworkXFactory()
g = fac.get_graph(net)
self.assertEqual('bob', g.graph['name'])
self.assertEqual(1, len(g))
self.assertEqual(0, g.number_of_edges())
self.assertTrue(0 in g)
nodelist = g.nodes(data=True)
if NETWORKX_MAJOR_VERSION >= 2:
self.assertEqual('first', nodelist[0]['name'])
else:
self.assertEqual('first', nodelist[0][1]['name'])
# TODO Fix Issue #51
# network name is not properly set.
# see https://github.com/ndexbio/ndex2-client/issues/51
# net_two = ndex2.create_nice_cx_from_networkx(g)
# self.assertEqual('bob', net_two.get_name())
def test_one_node_no_edge_network_legacytrue(self):
net = NiceCXNetwork()
net.create_node('first')
net.set_name('bob')
fac = DefaultNetworkXFactory(legacymode=True)
g = fac.get_graph(net)
self.assertEqual('bob', g.graph['name'])
self.assertEqual(1, len(g))
self.assertEqual(0, g.number_of_edges())
self.assertTrue(0 in g)
if NETWORKX_MAJOR_VERSION >= 2:
nodelist = list(g.nodes(data=True))
else:
nodelist = g.nodes(data=True)
self.assertEqual('first', nodelist[0][1]['name'])
def test_two_node_one_edge_network(self):
net = NiceCXNetwork()
net.create_node('first')
net.create_node('second')
net.create_edge(edge_source=0, edge_target=1)
net.set_name('bob')
fac = DefaultNetworkXFactory()
g = fac.get_graph(net)
self.assertEqual('bob', g.graph['name'])
self.assertEqual(2, len(g))
self.assertEqual(1, g.number_of_edges())
self.assertTrue(0 in g)
if NETWORKX_MAJOR_VERSION >= 2:
nodelist = list(g.nodes(data=True))
edgelist = list(g.edges(data=True))
else:
nodelist = g.nodes(data=True)
edgelist = g.edges(data=True)
self.assertEqual('first', nodelist[0][1]['name'])
self.assertEqual('second', nodelist[1][1]['name'])
self.assertEqual(0, edgelist[0][0])
self.assertEqual(1, edgelist[0][1])
self.assertEqual(None, edgelist[0][2]['interaction'])
def test_glypican_network_legacyfalse_and_multigraph_passed_in(self):
net = ndex2.create_nice_cx_from_file(TestDefaultNetworkXFactory
.GLYPICAN_FILE)
fac = DefaultNetworkXFactory()
g = fac.get_graph(net, networkx_graph=networkx.MultiGraph())
self.assertEqual('Glypican 2 network', g.graph['name'])
self.assertEqual('', g.graph['reference'])
self.assertEqual('Mirko von Elstermann', g.graph['author'])
self.assertEqual('Jorge Filmus', g.graph['reviewers'])
self.assertEqual('glypican_2pathway', g.graph['labels'])
self.assertEqual('APR-2018', g.graph['version'])
self.assertEqual('human', g.graph['organism'])
self.assertEqual('<i>Glypican 2 network</i> was derived from '
'the latest BioPAX3 version of the Pathway '
'Interaction Database (PID) curated by NCI/Nature. '
'The BioPAX was first converted to Extended Binary '
'SIF (EBS) by the PAXTools v5 utility. It was then '
'processed to remove redundant edges, to add a '
'\'directed flow\' layout, and to add a graphic '
'style using Cytoscape Visual Properties. This '
'network can be found in searches using its original '
'PID accession id, present in the \'labels\' '
'property.', g.graph['description'])
self.assertEqual(2, len(g))
self.assertEqual(1, g.number_of_edges())
self.assertTrue(0 in g)
self.assertTrue(1 in g)
if NETWORKX_MAJOR_VERSION >= 2:
nodelist = list(g.nodes(data=True))
edgelist = list(g.edges(data=True))
else:
nodelist = g.nodes(data=True)
edgelist = g.edges(data=True)
self.assertEqual('MDK', nodelist[0][1]['name'])
self.assertEqual('Protein', nodelist[0][1]['type'])
aliaslist = nodelist[0][1]['alias']
self.assertEqual(2, len(aliaslist))
self.assertTrue('uniprot knowledgebase:Q2LEK4' in aliaslist)
self.assertTrue('uniprot knowledgebase:Q9UCC7' in aliaslist)
self.assertEqual('GPC2', nodelist[1][1]['name'])
self.assertEqual('Protein', nodelist[1][1]['type'])
aliaslist = nodelist[1][1]['alias']
self.assertEqual(1, len(aliaslist))
self.assertTrue('uniprot knowledgebase:Q8N158' in aliaslist)
self.assertEqual(0, edgelist[0][0])
self.assertEqual(1, edgelist[0][1])
self.assertEqual('in-complex-with', edgelist[0][2]['interaction'])
self.assertEqual('false', edgelist[0][2]['directed'])
# check coordinates
self.assertTrue((g.pos[0][0] + 398.3) < 1.0)
self.assertTrue((g.pos[0][1] - 70.71) < 1.0)
self.assertTrue((g.pos[1][0] + 353.49) < 1.0)
self.assertTrue((g.pos[1][1] - 70.71) < 1.0)
def test_glypican_network_legacyfalse(self):
net = ndex2.create_nice_cx_from_file(TestDefaultNetworkXFactory
.GLYPICAN_FILE)
fac = DefaultNetworkXFactory()
g = fac.get_graph(net)
self.assertEqual('Glypican 2 network', g.graph['name'])
self.assertEqual('', g.graph['reference'])
self.assertEqual('Mirko von Elstermann', g.graph['author'])
self.assertEqual('Jorge Filmus', g.graph['reviewers'])
self.assertEqual('glypican_2pathway', g.graph['labels'])
self.assertEqual('APR-2018', g.graph['version'])
self.assertEqual('human', g.graph['organism'])
self.assertEqual('<i>Glypican 2 network</i> was derived from '
'the latest BioPAX3 version of the Pathway '
'Interaction Database (PID) curated by NCI/Nature. '
'The BioPAX was first converted to Extended Binary '
'SIF (EBS) by the PAXTools v5 utility. It was then '
'processed to remove redundant edges, to add a '
'\'directed flow\' layout, and to add a graphic '
'style using Cytoscape Visual Properties. This '
'network can be found in searches using its original '
'PID accession id, present in the \'labels\' '
'property.', g.graph['description'])
self.assertEqual(2, len(g))
self.assertEqual(1, g.number_of_edges())
self.assertTrue(0 in g)
self.assertTrue(1 in g)
if NETWORKX_MAJOR_VERSION >= 2:
nodelist = list(g.nodes(data=True))
edgelist = list(g.edges(data=True))
else:
nodelist = g.nodes(data=True)
edgelist = g.edges(data=True)
self.assertEqual('MDK', nodelist[0][1]['name'])
self.assertEqual('Protein', nodelist[0][1]['type'])
aliaslist = nodelist[0][1]['alias']
self.assertEqual(2, len(aliaslist))
self.assertTrue('uniprot knowledgebase:Q2LEK4' in aliaslist)
self.assertTrue('uniprot knowledgebase:Q9UCC7' in aliaslist)
self.assertEqual('GPC2', nodelist[1][1]['name'])
self.assertEqual('Protein', nodelist[1][1]['type'])
aliaslist = nodelist[1][1]['alias']
self.assertEqual(1, len(aliaslist))
self.assertTrue('uniprot knowledgebase:Q8N158' in aliaslist)
self.assertEqual(1, edgelist[0][0])
self.assertEqual(0, edgelist[0][1])
self.assertEqual('in-complex-with', edgelist[0][2]['interaction'])
self.assertEqual('false', edgelist[0][2]['directed'])
# check coordinates
self.assertTrue((g.pos[0][0] + 398.3) < 1.0)
self.assertTrue((g.pos[0][1] - 70.71) < 1.0)
self.assertTrue((g.pos[1][0] + 353.49) < 1.0)
self.assertTrue((g.pos[1][1] - 70.71) < 1.0)
def test_glypican_network_legacymode_true(self):
net = ndex2.create_nice_cx_from_file(TestDefaultNetworkXFactory
.GLYPICAN_FILE)
fac = DefaultNetworkXFactory(legacymode=True)
g = fac.get_graph(net)
self.assertTrue(isinstance(g, networkx.Graph))
self.assertEqual('Glypican 2 network', g.graph['name'])
self.assertEqual('', g.graph['reference'])
self.assertEqual('Mirko von Elstermann', g.graph['author'])
self.assertEqual('Jorge Filmus', g.graph['reviewers'])
self.assertEqual('glypican_2pathway', g.graph['labels'])
self.assertEqual('APR-2018', g.graph['version'])
self.assertEqual('human', g.graph['organism'])
self.assertEqual('<i>Glypican 2 network</i> was derived from '
'the latest BioPAX3 version of the Pathway '
'Interaction Database (PID) curated by NCI/Nature. '
'The BioPAX was first converted to Extended Binary '
'SIF (EBS) by the PAXTools v5 utility. It was then '
'processed to remove redundant edges, to add a '
'\'directed flow\' layout, and to add a graphic '
'style using Cytoscape Visual Properties. This '
'network can be found in searches using its original '
'PID accession id, present in the \'labels\' '
'property.', g.graph['description'])
self.assertEqual(2, len(g))
self.assertEqual(1, g.number_of_edges())
self.assertTrue(0 in g)
self.assertTrue(1 in g)
if NETWORKX_MAJOR_VERSION >= 2:
nodelist = list(g.nodes(data=True))
edgelist = list(g.edges(data=True))
else:
nodelist = g.nodes(data=True)
edgelist = g.edges(data=True)
self.assertEqual('MDK', nodelist[0][1]['name'])
self.assertEqual('Protein', nodelist[0][1]['type'])
aliaslist = nodelist[0][1]['alias']
self.assertEqual(2, len(aliaslist))
self.assertTrue('uniprot knowledgebase:Q2LEK4' in aliaslist)
self.assertTrue('uniprot knowledgebase:Q9UCC7' in aliaslist)
self.assertEqual('GPC2', nodelist[1][1]['name'])
self.assertEqual('Protein', nodelist[1][1]['type'])
aliaslist = nodelist[1][1]['alias']
self.assertEqual(1, len(aliaslist))
self.assertTrue('uniprot knowledgebase:Q8N158' in aliaslist)
self.assertEqual(0, edgelist[0][0])
self.assertEqual(1, edgelist[0][1])
self.assertEqual('in-complex-with', edgelist[0][2]['interaction'])
self.assertEqual('false', edgelist[0][2]['directed'])
# check coordinates
self.assertTrue((g.pos[0][0] + 398.3) < 1.0)
self.assertTrue((g.pos[0][1] - 70.71) < 1.0)
self.assertTrue((g.pos[1][0] + 353.49) < 1.0)
self.assertTrue((g.pos[1][1] - 70.71) < 1.0)
def test_darktheme_network_legacyfalse(self):
net = ndex2.create_nice_cx_from_file(TestDefaultNetworkXFactory
.DARKTHEME_FILE)
fac = DefaultNetworkXFactory()
g = fac.get_graph(net)
self.assertEqual('Dark theme final version', g.graph['name'])
self.assertTrue('Perfetto L.,' in g.graph['reference'])
self.assertEqual('Theodora Pavlidou', g.graph['author'])
self.assertEqual('SIGNOR-EGF', g.graph['labels'])
self.assertEqual('18-Jan-2019', g.graph['version'])
self.assertEqual('Human, 9606, Homo sapiens', g.graph['organism'])
self.assertEqual('SIGNOR-EGF', g.graph['labels'])
self.assertTrue('epidermal growth factor' in g.graph['description'])
self.assertEqual(34, len(g))
self.assertEqual(116, g.number_of_edges())
self.assertTrue(1655 in g)
self.assertTrue(1622 in g)
if NETWORKX_MAJOR_VERSION >= 2:
nodelist = list(g.nodes(data=True))
edgelist = list(g.edges(data=True))
else:
nodelist = g.nodes(data=True)
edgelist = g.edges(data=True)
stat_three_index = -1
for i in range(0, len(nodelist)):
if nodelist[i][1]['name'] == 'STAT3':
stat_three_index = i
break
self.assertEqual('STAT3', nodelist[stat_three_index][1]['name'])
self.assertEqual('protein', nodelist[stat_three_index][1]['type'])
self.assertEqual('uniprot:P40763',
nodelist[stat_three_index][1]['represents'])
sixteenfiftyfiveedge = -1
for i in range(len(edgelist)):
if edgelist[i][0] == 1655 and edgelist[i][1] == 1654:
sixteenfiftyfiveedge = i
break
self.assertTrue((1655 == edgelist[sixteenfiftyfiveedge][0] and
1654 == edgelist[sixteenfiftyfiveedge][1]) or
(1654 == edgelist[sixteenfiftyfiveedge][0] and
1655 == edgelist[sixteenfiftyfiveedge][1]))
self.assertEqual('form complex',
edgelist[sixteenfiftyfiveedge][2]['interaction'])
self.assertEqual('true', edgelist[sixteenfiftyfiveedge][2]['directed'])
self.assertEqual('"pubmed:15284024"',
edgelist[sixteenfiftyfiveedge][2]['citation'])
# check coordinates
self.assertTrue((g.pos[1655][0] + 90.96) < 1.0)
self.assertTrue((g.pos[1655][1] - 145.72) < 1.0)
def test_darktheme_network_legacytrue(self):
net = ndex2\
.create_nice_cx_from_file(TestDefaultNetworkXFactory
.DARKTHEME_FILE)
fac = DefaultNetworkXFactory(legacymode=True)
g = fac.get_graph(net)
self.assertEqual('Dark theme final version', g.graph['name'])
self.assertTrue('Perfetto L.,' in g.graph['reference'])
self.assertEqual('Theodora Pavlidou', g.graph['author'])
self.assertEqual('SIGNOR-EGF', g.graph['labels'])
self.assertEqual('18-Jan-2019', g.graph['version'])
self.assertEqual('Human, 9606, Homo sapiens', g.graph['organism'])
self.assertEqual('SIGNOR-EGF', g.graph['labels'])
self.assertTrue('epidermal growth factor' in g.graph['description'])
self.assertEqual(34, len(g))
self.assertEqual(50, g.number_of_edges())
self.assertTrue(1655 in g)
self.assertTrue(1622 in g)
if NETWORKX_MAJOR_VERSION >= 2:
nodelist = list(g.nodes(data=True))
edgelist = list(g.edges(data=True))
else:
nodelist = g.nodes(data=True)
edgelist = g.edges(data=True)
stat_three_index = -1
for i in range(0, len(nodelist)):
if nodelist[i][1]['name'] == 'STAT3':
stat_three_index = i
break
self.assertEqual('STAT3', nodelist[stat_three_index][1]['name'])
self.assertEqual('protein', nodelist[stat_three_index][1]['type'])
self.assertTrue('represents' not in nodelist[stat_three_index][1])
sixteenfiftyfiveedge = -1
for i in range(len(edgelist)):
if edgelist[i][0] == 1655 and edgelist[i][1] == 1654:
sixteenfiftyfiveedge = i
break
self.assertTrue((1655 == edgelist[sixteenfiftyfiveedge][0] and
1654 == edgelist[sixteenfiftyfiveedge][1]) or
(1654 == edgelist[sixteenfiftyfiveedge][0] and
1655 == edgelist[sixteenfiftyfiveedge][1]))
self.assertEqual('form complex',
edgelist[sixteenfiftyfiveedge][2]['interaction'])
self.assertEqual('true', edgelist[sixteenfiftyfiveedge][2]['directed'])
self.assertEqual('"pubmed:15284024"',
edgelist[sixteenfiftyfiveedge][2]['citation'])
# check coordinates
self.assertTrue((g.pos[1655][0] + 90.96) < 1.0)
self.assertTrue((g.pos[1655][1] - 145.72) < 1.0)
| 42.804979
| 85
| 0.598585
| 2,393
| 20,632
| 5.058086
| 0.118262
| 0.15119
| 0.031395
| 0.023794
| 0.877726
| 0.858559
| 0.834435
| 0.816755
| 0.799323
| 0.783708
| 0
| 0.035115
| 0.272586
| 20,632
| 481
| 86
| 42.893971
| 0.771389
| 0.029517
| 0
| 0.819588
| 0
| 0
| 0.164999
| 0.001401
| 0
| 0
| 0
| 0.002079
| 0.443299
| 1
| 0.033505
| false
| 0.015464
| 0.020619
| 0
| 0.069588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8aba11c1a7d298c41a95402fb5a8a8bd41b47520
| 198
|
py
|
Python
|
sktime/classification/dictionary_based/__init__.py
|
Multivin12/sktime
|
3731c550a9ee708c5b485aeffbbc72ca0a9062bf
|
[
"BSD-3-Clause"
] | 1
|
2020-06-15T04:48:14.000Z
|
2020-06-15T04:48:14.000Z
|
sktime/classification/dictionary_based/__init__.py
|
Multivin12/sktime
|
3731c550a9ee708c5b485aeffbbc72ca0a9062bf
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/classification/dictionary_based/__init__.py
|
Multivin12/sktime
|
3731c550a9ee708c5b485aeffbbc72ca0a9062bf
|
[
"BSD-3-Clause"
] | null | null | null |
__all__ = [
"BOSSIndividual",
"BOSSEnsemble"
]
from sktime.classification.dictionary_based._boss import BOSSEnsemble
from sktime.classification.dictionary_based._boss import BOSSIndividual
| 24.75
| 71
| 0.813131
| 19
| 198
| 8.052632
| 0.526316
| 0.20915
| 0.287582
| 0.470588
| 0.797386
| 0.797386
| 0.797386
| 0.797386
| 0
| 0
| 0
| 0
| 0.116162
| 198
| 7
| 72
| 28.285714
| 0.874286
| 0
| 0
| 0
| 0
| 0
| 0.131313
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
76fc90e67c57d91adad15fac39d408639ad2305a
| 30,935
|
py
|
Python
|
Fuzzy_clustering/version3/DatasetManager/create_dataset_for_load_LV.py
|
joesider9/forecasting_library
|
db07ff8f0f2693983058d49004f2fc6f8849d197
|
[
"Apache-2.0"
] | null | null | null |
Fuzzy_clustering/version3/DatasetManager/create_dataset_for_load_LV.py
|
joesider9/forecasting_library
|
db07ff8f0f2693983058d49004f2fc6f8849d197
|
[
"Apache-2.0"
] | null | null | null |
Fuzzy_clustering/version3/DatasetManager/create_dataset_for_load_LV.py
|
joesider9/forecasting_library
|
db07ff8f0f2693983058d49004f2fc6f8849d197
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
import joblib, os, logging
from joblib import Parallel, delayed
from workalendar.europe import Greece
from scipy.interpolate import interp2d
def rescale(arr, nrows, ncol):
W, H = arr.shape
new_W, new_H = (nrows, ncol)
xrange = lambda x: np.linspace(0, 1, x)
f = interp2d(xrange(H), xrange(W), arr, kind="linear")
new_arr = f(xrange(new_H), xrange(new_W))
return new_arr
def stack_2d(X, sample, compress):
if compress:
sample = rescale(sample, 8, 8)
if len(sample.shape) == 3:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 3:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :, :]))
elif len(sample.shape) == 2:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 2:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :]))
return X
def stack_3d(X, sample):
if X.shape[0] == 0:
X = sample
elif len(sample.shape)!=len(X.shape):
X = np.vstack((X, sample[np.newaxis]))
else:
X = np.vstack((X, sample))
return X
class dataset_creator_LV():
def __init__(self, projects_group, projects, data, path_nwp, nwp_model, nwp_resolution, data_variables, njobs=1, test=False):
self.projects = projects
self.isfortest = test
self.projects_group = projects_group
self.data = data
self.path_nwp = path_nwp
self.create_logger()
self.check_dates()
self.nwp_model = nwp_model
self.nwp_resolution = nwp_resolution
if self.nwp_resolution == 0.05:
self.compress = True
else:
self.compress = False
self.njobs = njobs
self.variables = data_variables
def create_logger(self):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(os.path.dirname(self.path_nwp), 'log_' + self.projects_group + '.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
self.logger.addHandler(handler)
def check_dates(self):
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates is checked. Number of time samples %s', str(len(dates)))
self.dates = pd.DatetimeIndex(dates)
def check_empty_nwp(self, nwp, variables):
flag = True
for var in variables:
if nwp[var].shape[0] == 0:
flag = False
break
return flag
def stack_daily_nwps(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables, compress):
X = dict()
fname = os.path.join(path_nwp, nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H').strftime(
'%d%m%y%H%M')
for project in projects:
X[project['_id']] = pd.DataFrame()
areas = project['static_data']['areas']
x = pd.DataFrame()
for date in pdates:
try:
nwp = nwps[date]
date = pd.to_datetime(date, format='%d%m%y%H%M')
if self.check_empty_nwp(nwp, variables):
inp = self.create_sample_nwp(date, nwp, lats[project['_id']], longs[project['_id']])
x = pd.concat([x, inp])
except:
continue
if x.shape[0]>0:
X[project['_id']] = x
cols = ['Temp' + '_'+ area for area in lats[project['_id']].keys()]
X[project['_id']]['Temp_max'] = x[cols].mean(axis=1).max()
X[project['_id']]['Temp_min'] = x[cols].mean(axis=1).min()
print(t.strftime('%d%m%y%H%M'), ' extracted')
return (X, t.strftime('%d%m%y%H%M'))
def lats_longs(self):
lats = dict()
longs = dict()
flag = False
for t in self.dates:
fname = os.path.join(self.path_nwp, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=48), freq='H').strftime(
'%d%m%y%H%M')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
flag = True
break
except:
continue
if flag:
break
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
for project in self.projects:
areas = project['static_data']['areas']
if isinstance(areas, list):
lats[project['_id']] = \
(np.where((nwp['lat'][:, 0] >= areas[0][0]) & (nwp['lat'][:, 0] <= areas[1][0])))[0]
longs[project['_id']] = \
(np.where((nwp['long'][0, :] >= areas[0][1]) & (nwp['long'][0, :] <= areas[1][1])))[
0]
else:
lats[project['_id']] = dict()
longs[project['_id']] = dict()
for area in sorted(areas.keys()):
lats[project['_id']][area] = \
(np.where((nwp['lat'][:, 0] >= areas[area][0][0]) & (nwp['lat'][:, 0] <= areas[area][1][0])))[0]
longs[project['_id']][area] = \
(np.where((nwp['long'][0, :] >= areas[area][0][1]) & (nwp['long'][0, :] <= areas[area][1][1])))[
0]
return lats, longs
def make_dataset_lv(self):
X = dict()
for project in self.projects:
X[project['_id']] = pd.DataFrame()
if self.isfortest:
file_nwp = 'weather_data_test.csv'
else:
file_nwp = 'weather_data.csv'
if not os.path.exists(os.path.join(self.projects[0]['static_data']['path_data'], file_nwp)):
lats, longs = self.lats_longs()
nwp = self.stack_daily_nwps(self.dates[-1], self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects, self.variables,
self.compress)
nwp_daily = Parallel(n_jobs=self.njobs)(
delayed(self.stack_daily_nwps)(t, self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects, self.variables,
self.compress) for t in self.dates)
for nwp in nwp_daily:
for project in self.projects:
if nwp[0][project['_id']].shape[0] != 0:
X[project['_id']] = pd.concat([X[project['_id']], nwp[0][project['_id']]])
self.logger.info('All Inputs stacked for date %s', nwp[1])
for project in self.projects:
X[project['_id']].to_csv(os.path.join(project['static_data']['path_data'], file_nwp))
else:
for project in self.projects:
X[project['_id']] =pd.read_csv(os.path.join(project['static_data']['path_data'], file_nwp), header=0, index_col=0, parse_dates=True, dayfirst=True)
for project in self.projects:
data_path = project['static_data']['path_data']
if self.isfortest:
dataset_X, dataset_y, X_3d = self.create_dataset(X[project['_id']], data_path, start_index=300, test = self.isfortest)
if dataset_y.isna().any().values[0]:
dataset_X = dataset_X.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
if len(X_3d.shape) > 1:
X_3d = np.delete(X_3d, np.where(dataset_y.isna())[0], axis=0)
dataset_y = dataset_y.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
dataset_X.to_csv(os.path.join(project['static_data']['path_data'], 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(project['static_data']['path_data'], 'dataset_y_test.csv'))
joblib.dump(X_3d, os.path.join(project['static_data']['path_data'], 'dataset_lstm_test.pickle'))
self.logger.info('Datasets saved for project %s', project['_id'])
else:
dataset_X, dataset_y, X_3d = self.create_dataset(X[project['_id']], data_path, start_index=300, test = self.isfortest)
if dataset_y.isna().any().values[0]:
dataset_X = dataset_X.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
if len(X_3d.shape) > 1:
X_3d = np.delete(X_3d, np.where(dataset_y.isna())[0], axis=0)
dataset_y = dataset_y.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
dataset_X.to_csv(os.path.join(project['static_data']['path_data'], 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(project['static_data']['path_data'], 'dataset_y.csv'))
joblib.dump(X_3d, os.path.join(project['static_data']['path_data'], 'dataset_lstm.pickle'))
self.logger.info('Datasets saved for project %s', project['_id'])
def sp_index(self, r):
### Is modified
cal = Greece()
# {'New year','Epiphany','Clean Monday','Independence Day','Good Friday','Easter Saturday','Easter Sunday','Easter Monday','Labour Day','Pentecost','Whit Monday','Assumption of Mary to Heaven','Ohi Day','Christmas Eve'
# ,'Christmas Day','Glorifying Mother of God','Last day of year'}
if cal.is_holiday(r):
sp = 100
else:
if r.dayofweek == 6:
sp = 50
else:
sp = 0
return sp
def create_dataset(self, nwps, data_path, start_index=9001, test=False):
self.data['dayweek'] = self.data.index.dayofweek
self.data['month'] = self.data.index.month
self.data['hour'] = self.data.index.hour
self.data['sp_index'] = [self.sp_index(d) for d in self.data.index]
col = ['Temp', 'hour', 'month', 'sp_index', 'dayweek'] + nwps.drop(columns=['Temp_max']).columns.tolist() + [
'Temp_month', 'Temp_sp_days']
col += ['SCADA_' + str(i) for i in range(14)]
col += ['RESForecast_' + str(i) for i in range(14)]
col += ['TOTAL_' + str(i) for i in range(14)]
col += ['LoadForecast_' + str(i) for i in range(17)]
col += ['LoadForecast_' + str(i) for i in range(17, 25)]
col += ['Temp_max_' + str(i) for i in range(8)]
col += ['Temp_min_' + str(i) for i in range(8)]
col += ['sp_index_' + str(i) for i in range(8)]
dataset = pd.DataFrame(columns=col)
target = pd.DataFrame(columns=['target'])
dataset_3d = np.array([])
nwps_lstm = nwps.copy(deep=True)
for var in self.variables:
if var=='WS':
var='wind'
elif var=='WD':
var='direction'
elif var=='Temperature':
var='Temp'
cols = [col for col in nwps.columns if str.lower(var) in str.lower(col)]
nwps_lstm[str.lower(var)] = nwps_lstm[cols].mean(axis=1).values
lags1 = np.hstack(
[np.arange(48, 57), np.arange(72, 76), np.arange(190, 181), 216])
lags_curr = np.arange(16, 33)
lags_pred = np.arange(1, 9)
lags_days = np.arange(0, 8)
for date in self.data.index[start_index:]:
date_inp1 = [date - pd.DateOffset(hours=int(l)) for l in lags1]
date_curr = [date - pd.DateOffset(hours=int(l)) for l in lags_curr]
date_pred = [date + pd.DateOffset(hours=int(l)) for l in lags_pred]
date_days = [date - pd.DateOffset(days=int(l)) for l in lags_days]
try:
temp_max = nwps[['Temp_max']].loc[date].values
var_imp = np.hstack((temp_max, self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[date].values,
nwps.drop(columns=['Temp_max']).loc[date].values,
np.power(self.data['month'].loc[date]*temp_max/12, 3), np.power(self.data['sp_index'].loc[date]*temp_max/100, 3)))
col = ['Temp','hour', 'month', 'sp_index', 'dayweek'] + nwps.drop(columns=['Temp_max']).columns.tolist() + ['Temp_month', 'Temp_sp_days']
var_unimp = np.hstack((
self.data.loc[date_inp1, 'SCADA'].values,
self.data.loc[date_inp1,'APE_net'].values,
self.data.loc[date_inp1, 'SCADA'].values + self.data.loc[date_inp1, 'APE_net'].values,
self.data.loc[date_curr, 'LoadForecast'].values + self.data.loc[date_curr, 'RESForecast'].values,
self.data.loc[date_pred, 'LoadForecast'].values + self.data.loc[date_pred, 'RESForecast'].values,
nwps.loc[date_days, 'Temp_max'].values,
nwps.loc[date_days, 'Temp_min'].values,
[self.sp_index(d) for d in date_days]
))
col +=['SCADA_'+str(i) for i in range(14)]
col +=['RESForecast_'+str(i) for i in range(14)]
col +=['TOTAL_'+str(i) for i in range(14)]
col += ['LoadForecast_' + str(i) for i in range(17)]
col += ['LoadForecast_' + str(i) for i in range(17, 25)]
col +=['Temp_max_'+str(i) for i in range(8)]
col +=['Temp_min_'+str(i) for i in range(8)]
col +=['sp_index_'+str(i) for i in range(8)]
temp_max = nwps[['Temp_max']].loc[date].values
var_3d = np.hstack((self.data.loc[date, 'RESForecast']+self.data.loc[date, 'LoadForecast'],
nwps_lstm[['cloud', 'wind', 'direction', 'Temp_max', 'Temp_min',
'Temp_athens','Temp_thessaloniki', 'Temp_ioannina', 'Temp_larissa','Temp_patra']].loc[date].values,
self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[date].values,
np.power(self.data['month'].loc[date] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[date] * temp_max / 100, 3)))
for d in date_pred:
temp_max = nwps[['Temp_max']].loc[d].values
v = np.hstack(
(self.data.loc[date, 'RESForecast']+self.data.loc[date, 'LoadForecast'],
nwps_lstm[['cloud', 'wind', 'direction', 'Temp_max', 'Temp_min', 'Temp_athens',
'Temp_thessaloniki', 'Temp_ioannina', 'Temp_larissa','Temp_patra']].loc[d].values,
self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[d].values,
np.power(self.data['month'].loc[d] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[d] * temp_max / 100, 3)))
var_3d = np.vstack((var_3d, v))
for d in date_curr:
temp_max = nwps[['Temp_max']].loc[d].values
v = np.hstack(
(self.data.loc[date, 'RESForecast']+self.data.loc[date, 'LoadForecast'],
nwps_lstm[['cloud', 'wind', 'direction', 'Temp_max', 'Temp_min', 'Temp_athens',
'Temp_thessaloniki', 'Temp_ioannina', 'Temp_larissa', 'Temp_patra']].loc[
d].values,
self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[d].values,
np.power(self.data['month'].loc[d] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[d] * temp_max / 100, 3)))
var_3d = np.vstack((var_3d, v))
for d in date_inp1:
temp_max = nwps[['Temp_max']].loc[d].values
v = np.hstack(
(self.data.loc[d, 'APE_net']+self.data.loc[d, 'SCADA'],
nwps_lstm[['cloud', 'wind', 'direction', 'Temp_max', 'Temp_min', 'Temp_athens','Temp_thessaloniki', 'Temp_ioannina', 'Temp_larissa','Temp_patra']].loc[d].values,
self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[d].values,
np.power(self.data['month'].loc[d] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[d] * temp_max / 100, 3)))
var_3d = np.vstack((var_3d, v))
except:
continue
inp = np.hstack((var_imp, var_unimp))
inp1 = pd.DataFrame(inp.reshape(-1, 1).T, index=[date], columns=col)
targ1 = pd.DataFrame(self.data['lv_load'].loc[date], index=[date], columns=['target'])
if not inp1.isnull().any(axis=1).values and not targ1.isnull().any().values:
dataset = pd.concat([dataset, inp1])
target = pd.concat([target, targ1])
if dataset_3d.shape[0]==0:
dataset_3d = var_3d
elif len(dataset_3d.shape)==2:
dataset_3d = np.stack((dataset_3d, var_3d))
else:
dataset_3d = np.vstack((dataset_3d, var_3d[np.newaxis,:,:]))
if not test:
corr = []
for f in range(dataset.shape[1]):
corr.append(np.abs(np.corrcoef(dataset.values[:, f], target.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset.columns[ind]
dataset = dataset[columns]
joblib.dump(ind, os.path.join(data_path, 'dataset_columns_order.pickle'))
else:
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset.columns[ind]
dataset = dataset[columns]
return dataset, target, dataset_3d
def create_sample_nwp(self, date, nwp, lats, longs):
inp = pd.DataFrame()
for var in sorted(self.variables):
if var in {'WS', 'Flux', 'WD', 'Cloud', 'Temperature'}:
if isinstance(lats, dict) and isinstance(longs, dict):
for area in lats.keys():
X0 = nwp[var][np.ix_(lats[area], longs[area])]
X = np.mean(X0)
if var == 'Flux':
var_name = 'flux'
elif var == 'WS':
var_name = 'wind'
elif var == 'Cloud':
var_name = 'cloud'
elif var == 'Temperature':
var_name = 'Temp'
else:
var_name = 'direction'
col = [var_name + '_'+ area]
inp = pd.concat([inp, pd.DataFrame(X.reshape(-1, 1).T, index=[date], columns=col)], axis=1)
else:
X0 = nwp[var][np.ix_(lats, longs)]
X = np.mean(X0)
if var == 'Flux':
var_name = 'flux'
elif var=='WS':
var_name = 'wind'
elif var == 'Cloud':
var_name = 'cloud'
elif var == 'Temperature':
var_name = 'Temp'
else:
var_name = 'direction'
col = [var_name]
inp = pd.concat([inp, pd.DataFrame(X.reshape(-1, 1).T, index=[date], columns=col)], axis=1)
else:
continue
return inp
def make_dataset_lv_online(self):
X = dict()
for project in self.projects:
X[project['_id']] = pd.DataFrame()
lats, longs = self.lats_longs()
nwp = self.stack_daily_nwps_online(self.dates[-1], self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects, self.variables,
self.compress)
for project in self.projects:
if nwp[0][project['_id']].shape[0] != 0:
X[project['_id']] = pd.concat([X[project['_id']], nwp[0][project['_id']]])
for project in self.projects:
data_path = project['static_data']['path_data']
dataset_X, dataset_y, X_3d = self.create_dataset_online(X[project['_id']], data_path, start_index=300, test = self.isfortest)
if dataset_y.isna().any().values[0]:
dataset_X = dataset_X.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
if len(X_3d.shape) > 1:
X_3d = np.delete(X_3d, np.where(dataset_y.isna())[0], axis=0)
dataset_y = dataset_y.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
def stack_daily_nwps_online(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables, compress):
X = dict()
fname = os.path.join(path_nwp, nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H').strftime(
'%d%m%y%H%M')
for project in projects:
X[project['_id']] = pd.DataFrame()
areas = project['static_data']['areas']
x = pd.DataFrame()
for date in pdates:
try:
nwp = nwps[date]
date = pd.to_datetime(date, format='%d%m%y%H%M')
if self.check_empty_nwp(nwp, variables):
inp = self.create_sample_nwp(date, nwp, lats[project['_id']], longs[project['_id']])
x = pd.concat([x, inp])
except:
continue
if x.shape[0]>0:
X[project['_id']] = x
cols = ['Temp' + '_'+ area for area in lats[project['_id']].keys()]
X[project['_id']]['Temp_max'] = x[cols].mean(axis=1).max()
X[project['_id']]['Temp_min'] = x[cols].mean(axis=1).min()
print(t.strftime('%d%m%y%H%M'), ' extracted')
return (X, t.strftime('%d%m%y%H%M'))
def create_dataset_online(self, nwps, data_path, start_index=9001, test=False):
self.data['dayweek'] = self.data.index.dayofweek
self.data['month'] = self.data.index.month
self.data['hour'] = self.data.index.hour
self.data['sp_index'] = [self.sp_index(d) for d in self.data.index]
col = ['Temp', 'hour', 'month', 'sp_index', 'dayweek'] + nwps.drop(columns=['Temp_max']).columns.tolist() + [
'Temp_month', 'Temp_sp_days']
col += ['SCADA_' + str(i) for i in range(14)]
col += ['RESForecast_' + str(i) for i in range(14)]
col += ['TOTAL_' + str(i) for i in range(14)]
col += ['LoadForecast_' + str(i) for i in range(17)]
col += ['LoadForecast_' + str(i) for i in range(17, 25)]
col += ['Temp_max_' + str(i) for i in range(8)]
col += ['Temp_min_' + str(i) for i in range(8)]
col += ['sp_index_' + str(i) for i in range(8)]
dataset = pd.DataFrame(columns=col)
dataset_3d = np.array([])
nwps_lstm = nwps.copy(deep=True)
for var in self.variables:
if var=='WS':
var='wind'
elif var=='WD':
var='direction'
elif var=='Temperature':
var='Temp'
cols = [col for col in nwps.columns if str.lower(var) in str.lower(col)]
nwps_lstm[str.lower(var)] = nwps_lstm[cols].mean(axis=1).values
lags1 = np.hstack(
[np.arange(48, 57), np.arange(72, 76), np.arange(190, 181), 216])
lags_curr = np.arange(16, 33)
lags_pred = np.arange(1, 9)
lags_days = np.arange(0, 8)
for date in self.data.index[start_index:]:
date_inp1 = [date - pd.DateOffset(hours=int(l)) for l in lags1]
date_curr = [date - pd.DateOffset(hours=int(l)) for l in lags_curr]
date_pred = [date + pd.DateOffset(hours=int(l)) for l in lags_pred]
date_days = [date - pd.DateOffset(days=int(l)) for l in lags_days]
try:
temp_max = nwps[['Temp_max']].loc[date].values
var_imp = np.hstack((temp_max, self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[date].values,
nwps.drop(columns=['Temp_max']).loc[date].values,
np.power(self.data['month'].loc[date]*temp_max/12, 3), np.power(self.data['sp_index'].loc[date]*temp_max/100, 3)))
col = ['Temp','hour', 'month', 'sp_index', 'dayweek'] + nwps.drop(columns=['Temp_max']).columns.tolist() + ['Temp_month', 'Temp_sp_days']
var_unimp = np.hstack((
self.data.loc[date_inp1, 'SCADA'].values,
self.data.loc[date_inp1,'APE_net'].values,
self.data.loc[date_inp1, 'SCADA'].values + self.data.loc[date_inp1, 'APE_net'].values,
self.data.loc[date_curr, 'LoadForecast'].values,
self.data.loc[date_pred, 'LoadForecast'].values,
nwps.loc[date_days, 'Temp_max'].values,
nwps.loc[date_days, 'Temp_min'].values,
[self.sp_index(d) for d in date_days]
))
col +=['SCADA_'+str(i) for i in range(14)]
col +=['RESForecast_'+str(i) for i in range(14)]
col +=['TOTAL_'+str(i) for i in range(14)]
col += ['LoadForecast_' + str(i) for i in range(17)]
col += ['LoadForecast_' + str(i) for i in range(17, 25)]
col +=['Temp_max_'+str(i) for i in range(8)]
col +=['Temp_min_'+str(i) for i in range(8)]
col +=['sp_index_'+str(i) for i in range(8)]
temp_max = nwps[['Temp_max']].loc[date].values
var_3d = np.hstack((self.data.loc[date, 'LoadForecast'],
nwps_lstm[['cloud', 'wind', 'direction', 'Temp_max', 'Temp_min',
'Temp_athens','Temp_thessaloniki', 'Temp_ioannina', 'Temp_larissa','Temp_patra']].loc[date].values,
self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[date].values,
np.power(self.data['month'].loc[date] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[date] * temp_max / 100, 3)))
for d in date_pred:
temp_max = nwps[['Temp_max']].loc[d].values
v = np.hstack(
(self.data.loc[date, 'LoadForecast'],
nwps_lstm[['cloud', 'wind', 'direction', 'Temp_max', 'Temp_min', 'Temp_athens',
'Temp_thessaloniki', 'Temp_ioannina', 'Temp_larissa','Temp_patra']].loc[d].values,
self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[d].values,
np.power(self.data['month'].loc[d] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[d] * temp_max / 100, 3)))
var_3d = np.vstack((var_3d, v))
for d in date_curr:
temp_max = nwps[['Temp_max']].loc[d].values
v = np.hstack(
(self.data.loc[date, 'LoadForecast'],
nwps_lstm[['cloud', 'wind', 'direction', 'Temp_max', 'Temp_min', 'Temp_athens',
'Temp_thessaloniki', 'Temp_ioannina', 'Temp_larissa', 'Temp_patra']].loc[
d].values,
self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[d].values,
np.power(self.data['month'].loc[d] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[d] * temp_max / 100, 3)))
var_3d = np.vstack((var_3d, v))
for d in date_inp1:
temp_max = nwps[['Temp_max']].loc[d].values
v = np.hstack(
(self.data.loc[d, 'APE_net']+self.data.loc[d, 'SCADA'],
nwps_lstm[['cloud', 'wind', 'direction', 'Temp_max', 'Temp_min', 'Temp_athens','Temp_thessaloniki', 'Temp_ioannina', 'Temp_larissa','Temp_patra']].loc[d].values,
self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[d].values,
np.power(self.data['month'].loc[d] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[d] * temp_max / 100, 3)))
var_3d = np.vstack((var_3d, v))
except:
continue
inp = np.hstack((var_imp, var_unimp))
inp1 = pd.DataFrame(inp.reshape(-1, 1).T, index=[date], columns=col)
if not inp1.isnull().any(axis=1).values:
dataset = pd.concat([dataset, inp1])
target = pd.concat([target, targ1])
if dataset_3d.shape[0]==0:
dataset_3d = var_3d
elif len(dataset_3d.shape)==2:
dataset_3d = np.stack((dataset_3d, var_3d))
else:
dataset_3d = np.vstack((dataset_3d, var_3d[np.newaxis,:,:]))
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset.columns[ind]
dataset = dataset[columns]
return dataset, target, dataset_3d
| 49.103175
| 226
| 0.506223
| 3,894
| 30,935
| 3.86492
| 0.076014
| 0.04412
| 0.014884
| 0.01701
| 0.804784
| 0.796213
| 0.777409
| 0.767641
| 0.751827
| 0.736744
| 0
| 0.020414
| 0.338096
| 30,935
| 630
| 227
| 49.103175
| 0.714593
| 0.011217
| 0
| 0.724528
| 0
| 0
| 0.111115
| 0.004218
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030189
| false
| 0
| 0.011321
| 0
| 0.064151
| 0.003774
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0a232bb39fadd1014079908d00400a550b62f808
| 6,830
|
py
|
Python
|
generator.py
|
oliverquintana/UUr-cGAN
|
769e6e6c72f91f67efe58b6d68a0c302f8db95bb
|
[
"MIT"
] | 1
|
2022-01-12T02:57:15.000Z
|
2022-01-12T02:57:15.000Z
|
generator.py
|
oliverquintana/UUr-cGAN
|
769e6e6c72f91f67efe58b6d68a0c302f8db95bb
|
[
"MIT"
] | null | null | null |
generator.py
|
oliverquintana/UUr-cGAN
|
769e6e6c72f91f67efe58b6d68a0c302f8db95bb
|
[
"MIT"
] | null | null | null |
from tensorflow.keras.layers import Add
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Concatenate
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras.layers import BatchNormalization
from tensorflow_addons.layers import InstanceNormalization
from tensorflow.keras.initializers import he_normal
from tensorflow.keras.initializers import RandomNormal
def residual_block(r, f):
init = RandomNormal(stddev = 0.02)
r_short = r
r = Conv2D(f, (3,3), padding = 'same', kernel_initializer = init)(r)
r = InstanceNormalization()(r)
r = LeakyReLU(alpha = 0.2)(r)
r = Conv2D(f, (3,3), padding = 'same', kernel_initializer = init)(r)
r = InstanceNormalization()(r)
r = Add()([r_short, r])
r = LeakyReLU(alpha = 0.2)(r)
return r
def build_generator(img_shape = [256, 256, 1], drop = 0.1):
input_img = Input(shape = img_shape)
init = RandomNormal(stddev = 0.02)
#init = he_normal()
# Encoder
g1 = Conv2D(64, (3,3), padding = 'same', kernel_initializer = init)(input_img)
g1 = Conv2D(64, (3,3), padding = 'same', kernel_initializer = init)(g1)
g1 = InstanceNormalization()(g1)
g1 = LeakyReLU(alpha = 0.2)(g1)
g2 = Conv2D(128, (3,3), strides = (2,2), padding = 'same', kernel_initializer = init)(g1)
g2 = Conv2D(128, (3,3), padding = 'same', kernel_initializer = init)(g2)
g2 = InstanceNormalization()(g2)
g2 = LeakyReLU(alpha = 0.2)(g2)
g3 = Conv2D(256, (3,3), strides = (2,2), padding = 'same', kernel_initializer = init)(g2)
g3 = Conv2D(256, (3,3), padding = 'same', kernel_initializer = init)(g3)
g3 = InstanceNormalization()(g3)
g3 = LeakyReLU(alpha = 0.2)(g3)
g4 = Conv2D(512, (3,3), strides = (2,2), padding = 'same', kernel_initializer = init)(g3)
g4 = Conv2D(512, (3,3), padding = 'same', kernel_initializer = init)(g4)
g4 = InstanceNormalization()(g4)
g4 = LeakyReLU(alpha = 0.2)(g4)
# Bottleneck
g = Conv2D(1024, (3,3), strides = (2,2), padding = 'same', kernel_initializer = init)(g4)
g = Conv2D(1024, (3,3), padding = 'same', kernel_initializer = init)(g)
g = Conv2D(1024, (3,3), padding = 'same', kernel_initializer = init)(g)
g = InstanceNormalization()(g)
g = Dropout(drop)(g, training = True)
g = LeakyReLU(alpha = 0.2)(g)
# Decoder
g5 = Conv2DTranspose(512, (3,3), strides = (2,2), padding = 'same', kernel_initializer = init)(g)
g5 = Add()([g4, g5])
g5 = Conv2D(256, (3,3), padding = 'same', kernel_initializer = init)(g5)
g5 = Conv2D(256, (3,3), padding = 'same', kernel_initializer = init)(g5)
g5 = InstanceNormalization()(g5)
g5 = LeakyReLU(alpha = 0.2)(g5)
g6 = Conv2DTranspose(256, (3,3), strides = (2,2), padding = 'same', kernel_initializer = init)(g5)
g6 = Add()([g3, g6])
g6 = Conv2D(128, (3,3), padding = 'same', kernel_initializer = init)(g6)
g6 = Conv2D(128, (3,3), padding = 'same', kernel_initializer = init)(g6)
g6 = InstanceNormalization()(g6)
g6 = LeakyReLU(alpha = 0.2)(g6)
g7 = Conv2DTranspose(128, (3,3), strides = (2,2), padding = 'same', kernel_initializer = init)(g6)
g7 = Add()([g2, g7])
g7 = Conv2D(64, (3,3), padding = 'same', kernel_initializer = init)(g7)
g7 = Conv2D(64, (3,3), padding = 'same', kernel_initializer = init)(g7)
g7 = InstanceNormalization()(g7)
g7 = LeakyReLU(alpha = 0.2)(g7)
g8 = Conv2DTranspose(64, (3,3), strides = (2,2), padding = 'same', kernel_initializer = init)(g7)
g8 = Add()([g1, g8])
g8 = Conv2D(64, (3,3), padding = 'same', kernel_initializer = init)(g8)
g8 = Conv2D(64, (3,3), padding = 'same', kernel_initializer = init)(g8)
g8 = InstanceNormalization()(g8)
g8 = LeakyReLU(alpha = 0.2)(g8)
g1 = residual_block(g8, 64)
g1 = Add()([g8, g1])
g1 = Conv2D(64, (3,3), padding = 'same', kernel_initializer = init)(g1)
g1 = InstanceNormalization()(g1)
g1 = LeakyReLU(alpha = 0.2)(g1)
g2 = Conv2D(64, (3,3), strides = (2,2), padding = 'same', kernel_initializer = init)(g1)
g2 = Add()([g7, g2])
g2 = residual_block(g2, 64)
g2 = Conv2D(128, (3,3), padding = 'same', kernel_initializer = init)(g2)
g2 = InstanceNormalization()(g2)
g2 = LeakyReLU(alpha = 0.2)(g2)
g3 = Conv2D(128, (3,3), strides = (2,2), padding = 'same', kernel_initializer = init)(g2)
g3 = Add()([g6, g3])
g3 = residual_block(g3, 128)
g3 = Conv2D(256, (3,3), padding = 'same', kernel_initializer = init)(g3)
g3 = InstanceNormalization()(g3)
g3 = LeakyReLU(alpha = 0.2)(g3)
g4 = Conv2D(256, (3,3), strides = (2,2), padding = 'same', kernel_initializer = init)(g3)
g4 = Add()([g5, g4])
g4 = residual_block(g4, 256)
g4 = Conv2D(512, (3,3), padding = 'same', kernel_initializer = init)(g4)
g4 = InstanceNormalization()(g4)
g4 = LeakyReLU(alpha = 0.2)(g4)
g = Conv2D(512, (3,3), strides = (2,2), padding = 'same', kernel_initializer = init)(g4)
g = residual_block(g, 512)
g = Conv2D(1024, (3,3), padding = 'same', kernel_initializer = init)(g)
g = InstanceNormalization()(g)
g = Dropout(drop)(g, training = True)
g = LeakyReLU(alpha = 0.2)(g)
g5 = Conv2DTranspose(512, (3,3), strides = (2,2), padding = 'same', kernel_initializer = init)(g)
g5 = Add()([g4, g5])
g5 = residual_block(g5, 512)
g5 = Conv2D(256, (3,3), padding = 'same', kernel_initializer = init)(g5)
g5 = InstanceNormalization()(g5)
g5 = LeakyReLU(alpha = 0.2)(g5)
g6 = Conv2DTranspose(256, (3,3), strides = (2,2), padding = 'same', kernel_initializer = init)(g5)
g6 = Add()([g3, g6])
g6 = residual_block(g6, 256)
g6 = Conv2D(128, (3,3), padding = 'same', kernel_initializer = init)(g6)
g6 = InstanceNormalization()(g6)
g6 = LeakyReLU(alpha = 0.2)(g6)
g7 = Conv2DTranspose(128, (3,3), strides = (2,2), padding = 'same', kernel_initializer = init)(g6)
g7 = Add()([g2, g7])
g7 = residual_block(g7, 128)
g7 = Conv2D(64, (3,3), padding = 'same', kernel_initializer = init)(g7)
g7 = InstanceNormalization()(g7)
g7 = LeakyReLU(alpha = 0.2)(g7)
g8 = Conv2DTranspose(64, (3,3), strides = (2,2), padding = 'same', kernel_initializer = init)(g7)
g8 = Add()([g1, g8])
g8 = residual_block(g8, 64)
g8 = Conv2D(1, (3,3), padding = 'same', kernel_initializer = init)(g8)
g8 = InstanceNormalization()(g8)
g8 = Activation('tanh')(g8)
model = Model(input_img, g8)
return model
if __name__ == '__main__':
model = build_generator()
model.summary()
| 40.654762
| 102
| 0.630015
| 942
| 6,830
| 4.491507
| 0.076433
| 0.019853
| 0.168754
| 0.277948
| 0.844481
| 0.739305
| 0.739305
| 0.731033
| 0.731033
| 0.731033
| 0
| 0.097339
| 0.196779
| 6,830
| 167
| 103
| 40.898204
| 0.673897
| 0.006589
| 0
| 0.621212
| 0
| 0
| 0.026545
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015152
| false
| 0
| 0.106061
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0a4e4517bb1fe9d0cb05920be504e79c51642ab7
| 107
|
py
|
Python
|
saefportal/users/views/__init__.py
|
harry-consulting/SAEF
|
12ef43bbcc3178b8a988e21c1bef035881cf6e6d
|
[
"BSD-2-Clause"
] | 4
|
2020-12-16T13:14:26.000Z
|
2022-03-26T08:54:12.000Z
|
saefportal/users/views/__init__.py
|
harry-consulting/SAEF
|
12ef43bbcc3178b8a988e21c1bef035881cf6e6d
|
[
"BSD-2-Clause"
] | 1
|
2022-03-26T09:09:04.000Z
|
2022-03-26T09:09:04.000Z
|
saefportal/users/views/__init__.py
|
harry-consulting/SAEF
|
12ef43bbcc3178b8a988e21c1bef035881cf6e6d
|
[
"BSD-2-Clause"
] | 1
|
2020-12-16T13:20:17.000Z
|
2020-12-16T13:20:17.000Z
|
from .admin_view import *
from .login_view import *
from .logout_view import *
from .register_view import *
| 26.75
| 28
| 0.785047
| 16
| 107
| 5
| 0.4375
| 0.5
| 0.525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140187
| 107
| 4
| 28
| 26.75
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0a709c96b640be74506ed0ad1673af33351d2746
| 105
|
py
|
Python
|
Codes/drawtriangle.py
|
JojiJohnson/Python-Basics
|
0107cbf3b85cd73c57fa6474a5febb91cdae216c
|
[
"MIT"
] | null | null | null |
Codes/drawtriangle.py
|
JojiJohnson/Python-Basics
|
0107cbf3b85cd73c57fa6474a5febb91cdae216c
|
[
"MIT"
] | null | null | null |
Codes/drawtriangle.py
|
JojiJohnson/Python-Basics
|
0107cbf3b85cd73c57fa6474a5febb91cdae216c
|
[
"MIT"
] | null | null | null |
print (" /\ ")
print (" / \ ")
print (" / \ ")
print (" / \ ")
print ("/________\ ")
| 17.5
| 22
| 0.314286
| 5
| 105
| 5
| 0.2
| 1.6
| 1.8
| 1.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 105
| 5
| 23
| 21
| 0.396825
| 0
| 0
| 0.8
| 0
| 0
| 0.45
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
6a54dc667888382e0a2d98bf7437ba1c706709d1
| 110,898
|
py
|
Python
|
test/api_test.py
|
kenrp8/cacahuate
|
58998bf1323a689e28c5eec66b8d94b4a5b5b592
|
[
"MIT"
] | null | null | null |
test/api_test.py
|
kenrp8/cacahuate
|
58998bf1323a689e28c5eec66b8d94b4a5b5b592
|
[
"MIT"
] | null | null | null |
test/api_test.py
|
kenrp8/cacahuate
|
58998bf1323a689e28c5eec66b8d94b4a5b5b592
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from flask import json
from random import choice
from string import ascii_letters
import pika
import urllib.parse
from cacahuate.handler import Handler
from cacahuate.models import Pointer, Execution
from cacahuate.xml import Xml
from cacahuate.node import Form
from cacahuate.jsontypes import SortedMap, Map
from .utils import make_auth, make_pointer, make_user, make_date
from .utils import assert_near_date
EXECUTION_ID = '15asbs'
def test_continue_process_asks_for_user(client):
res = client.post('/v1/pointer')
assert res.status_code == 401
assert 'WWW-Authenticate' in res.headers
assert res.headers['WWW-Authenticate'] == \
'Basic realm="User Visible Realm"'
assert json.loads(res.data) == {
'errors': [{
'detail': 'You must provide basic authorization headers',
'where': 'request.authorization',
}],
}
def test_continue_process_requires(client):
juan = make_user('juan', 'Juan')
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': "'execution_id' is required",
'code': 'validation.required',
'where': 'request.body.execution_id',
},
{
'detail': "'node_id' is required",
'code': 'validation.required',
'where': 'request.body.node_id',
},
],
}
def test_continue_process_asks_living_objects(client):
''' the app must validate that the ids sent are real objects '''
juan = make_user('juan', 'Juan')
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'execution_id': 'verde',
'node_id': 'nada',
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': 'execution_id is not valid',
'code': 'validation.invalid',
'where': 'request.body.execution_id',
},
],
}
def test_continue_process_requires_valid_node(client):
juan = make_user('juan', 'Juan')
exc = Execution(
process_name='simple.2018-02-19.xml',
).save()
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'execution_id': exc.id,
'node_id': 'notarealnode',
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': 'node_id is not a valid node',
'code': 'validation.invalid_node',
'where': 'request.body.node_id',
},
],
}
def test_continue_process_requires_living_pointer(client):
juan = make_user('juan', 'Juan')
exc = Execution(
process_name='simple.2018-02-19.xml',
).save()
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'execution_id': exc.id,
'node_id': 'mid_node',
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': 'node_id does not have a live pointer',
'code': 'validation.no_live_pointer',
'where': 'request.body.node_id',
},
],
}
def test_continue_process_requires_user_hierarchy(client):
''' a node whose auth has a filter must be completed by a person matching
the filter '''
juan = make_user('juan', 'Juan')
ptr = make_pointer('simple.2018-02-19.xml', 'mid_node')
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'execution_id': ptr.proxy.execution.get().id,
'node_id': ptr.node_id,
}))
assert res.status_code == 403
assert json.loads(res.data) == {
'errors': [{
'detail': 'Provided user does not have this task assigned',
'where': 'request.authorization',
}],
}
def test_continue_process_requires_data(client):
manager = make_user('juan_manager', 'Juanote')
ptr = make_pointer('simple.2018-02-19.xml', 'mid_node')
manager.proxy.tasks.set([ptr])
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(manager)}, data=json.dumps({
'execution_id': ptr.proxy.execution.get().id,
'node_id': ptr.node_id,
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [{
'detail': "form count lower than expected for ref mid_form",
'where': 'request.body.form_array',
}],
}
def test_continue_process(client, mocker, config):
mocker.patch(
'pika.adapters.blocking_connection.'
'BlockingChannel.basic_publish'
)
manager = make_user('juan_manager', 'Juanote')
ptr = make_pointer('simple.2018-02-19.xml', 'mid_node')
manager.proxy.tasks.set([ptr])
exc = ptr.proxy.execution.get()
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(manager)}, data=json.dumps({
'execution_id': exc.id,
'node_id': ptr.node_id,
'form_array': [
{
'ref': 'mid_form',
'data': {
'data': 'yes',
},
},
],
}))
assert res.status_code == 202
assert json.loads(res.data) == {
'data': 'accepted',
}
# rabbit is called
pika.adapters.blocking_connection.BlockingChannel.\
basic_publish.assert_called_once()
args = pika.adapters.blocking_connection.BlockingChannel.\
basic_publish.call_args[1]
json_message = {
'command': 'step',
'pointer_id': ptr.id,
'user_identifier': 'juan_manager',
'input': [Form.state_json('mid_form', [
{
"name": "data",
"type": "text",
"value": "yes",
'label': 'data',
'value_caption': 'yes',
'state': 'valid',
'hidden': False,
},
])],
}
assert args['exchange'] == ''
assert args['routing_key'] == config['RABBIT_QUEUE']
body = json.loads(args['body'])
assert body == json_message
# makes a useful call for the handler
handler = Handler(config)
pointer, user, inputs = handler.recover_step(json_message)
assert pointer.id == ptr.id
def test_start_process_requirements(client, mongo, config):
# first requirement is to have authentication
res = client.post('/v1/execution', headers={
'Content-Type': 'application/json',
}, data=json.dumps({
'process_name': 'simple',
}))
assert res.status_code == 401
assert 'WWW-Authenticate' in res.headers
assert res.headers['WWW-Authenticate'] == \
'Basic realm="User Visible Realm"'
assert json.loads(res.data) == {
'errors': [{
'detail': 'You must provide basic authorization headers',
'where': 'request.authorization',
}],
}
assert Execution.count() == 0
# next, validate the form data
juan = make_user('juan', 'Juan')
res = client.post('/v1/execution', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'process_name': 'simple',
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [{
'detail': "form count lower than expected for ref start_form",
'where': 'request.body.form_array',
}],
}
assert Execution.count() == 0
juan = make_user('juan', 'Juan')
res = client.post('/v1/execution', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data='{}')
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': "'process_name' is required",
'where': 'request.body.process_name',
'code': 'validation.required',
},
],
}
# we need an existing process to start
res = client.post('/v1/execution', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'process_name': 'foo',
}))
assert res.status_code == 404
assert json.loads(res.data) == {
'errors': [
{
'detail': 'foo process does not exist',
'where': 'request.body.process_name',
},
],
}
# no registry should be created yet
assert mongo[config["POINTER_COLLECTION"]].count_documents({}) == 0
def test_start_process(client, mocker, config, mongo):
mocker.patch(
'pika.adapters.blocking_connection.'
'BlockingChannel.basic_publish'
)
juan = make_user('juan', 'Juan')
res = client.post('/v1/execution', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'process_name': 'simple',
'form_array': [{
'ref': 'start_form',
'data': {
'data': 'yes',
},
}],
}))
assert res.status_code == 201
exc = Execution.get_all()[0]
assert exc.process_name == 'simple.2018-02-19.xml'
ptr = exc.proxy.pointers.get()[0]
assert ptr.node_id == 'start_node'
pika.adapters.blocking_connection.BlockingChannel.\
basic_publish.assert_called_once()
args = pika.adapters.blocking_connection.\
BlockingChannel.basic_publish.call_args[1]
json_message = {
'command': 'step',
'pointer_id': ptr.id,
'user_identifier': 'juan',
'input': [Form.state_json('start_form', [
{
'label': 'Info',
'type': 'text',
'value': 'yes',
'value_caption': 'yes',
'name': 'data',
'state': 'valid',
'hidden': False,
},
])],
}
assert args['exchange'] == ''
assert args['routing_key'] == config['RABBIT_QUEUE']
assert json.loads(args['body']) == json_message
handler = Handler(config)
pointer, user, input = handler.recover_step(json_message)
assert pointer.id == ptr.id
# mongo has a registry
reg = next(mongo[config["POINTER_COLLECTION"]].find())
assert_near_date(reg['started_at'])
assert reg['finished_at'] is None
assert reg['execution']['id'] == exc.id
assert reg['node']['id'] == ptr.node_id
reg = next(mongo[config["EXECUTION_COLLECTION"]].find())
assert_near_date(reg['started_at'])
del reg['started_at']
del reg['_id']
assert len(reg['values']['_execution'][0]['id']) > 0
assert len(reg['values']['_execution'][0]['started_at']) > 0
del reg['values']['_execution'][0]['id']
del reg['values']['_execution'][0]['started_at']
assert reg == {
'_type': 'execution',
'id': exc.id,
'name': exc.name,
'description': exc.description,
'status': 'ongoing',
'finished_at': None,
'process_name': 'simple.2018-02-19.xml',
'status': 'ongoing',
'state': {
'_type': ':sorted_map',
'items': {
'start_node': {
'_type': 'node',
'type': 'action',
'id': 'start_node',
'state': 'unfilled',
'comment': '',
'actors': {
'_type': ':map',
'items': {},
},
'milestone': False,
'name': 'Primer paso',
'description': 'Resolver una tarea',
},
'mid_node': {
'_type': 'node',
'type': 'action',
'id': 'mid_node',
'state': 'unfilled',
'comment': '',
'actors': {
'_type': ':map',
'items': {},
},
'milestone': False,
'name': 'Segundo paso',
'description': 'añadir información',
},
'final_node': {
'_type': 'node',
'type': 'action',
'id': 'final_node',
'state': 'unfilled',
'comment': '',
'actors': {
'_type': ':map',
'items': {},
},
'milestone': False,
'name': '',
'description': '',
},
},
'item_order': [
'start_node',
'mid_node',
'final_node',
],
},
'values': {
'_execution': [{
'description': 'A simple process that does nothing',
'name': 'Simplest process ever started with: yes',
'process_name': 'simple.2018-02-19.xml',
}],
},
'actors': {},
'actor_list': [],
}
def test_validation_requirements(client):
juan = make_user('juan', 'Juan')
ptr = make_pointer('validation.2018-05-09.xml', 'approval_node')
exc = ptr.proxy.execution.get()
juan.proxy.tasks.add(ptr)
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'execution_id': exc.id,
'node_id': 'approval_node',
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': "'response' is required",
'code': 'validation.required',
'where': 'request.body.response',
},
],
}
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'execution_id': exc.id,
'node_id': 'approval_node',
'response': ''.join(choice(ascii_letters) for c in range(10)),
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': "'response' value invalid",
'code': 'validation.invalid',
'where': 'request.body.response',
},
],
}
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'execution_id': exc.id,
'node_id': 'approval_node',
'response': 'reject',
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': "'inputs' is required",
'code': 'validation.required',
'where': 'request.body.inputs',
},
],
}
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'execution_id': exc.id,
'node_id': 'approval_node',
'response': 'reject',
'inputs': 'de',
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': "'inputs' must be a list",
'code': 'validation.required_list',
'where': 'request.body.inputs',
},
],
}
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'execution_id': exc.id,
'node_id': 'approval_node',
'response': 'reject',
'inputs': [],
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': "'inputs' must be a list",
'code': 'validation.required_list',
'where': 'request.body.inputs',
},
],
}
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'execution_id': exc.id,
'node_id': 'approval_node',
'response': 'reject',
'inputs': ['de'],
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': "'inputs.0' must be an object",
'code': 'validation.required_dict',
'where': 'request.body.inputs.0',
},
],
}
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'execution_id': exc.id,
'node_id': 'approval_node',
'response': 'reject',
'inputs': [{
}],
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': "'inputs.0.ref' is required",
'code': 'validation.required',
'where': 'request.body.inputs.0.ref',
},
],
}
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'execution_id': exc.id,
'node_id': 'approval_node',
'response': 'reject',
'inputs': [{
'ref': 'de',
}],
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': "'inputs.0.ref' value invalid",
'code': 'validation.invalid',
'where': 'request.body.inputs.0.ref',
},
],
}
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'execution_id': exc.id,
'node_id': 'approval_node',
'response': 'reject',
'inputs': [{
'ref': 'start_node.juan.0:work.task',
}],
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': "'comment' is required",
'code': 'validation.required',
'where': 'request.body.comment',
},
],
}
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'execution_id': exc.id,
'node_id': 'approval_node',
'response': 'reject',
'inputs': [{
'ref': 'start_node.juan.0:work.task',
}],
'comment': [],
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': "'comment' must be a str",
'code': 'validation.invalid',
'where': 'request.body.comment',
},
],
}
def test_validation_approval(client, mocker, config):
''' the api for an approval '''
mocker.patch(
'pika.adapters.blocking_connection.'
'BlockingChannel.basic_publish'
)
juan = make_user('juan', 'Juan')
ptr = make_pointer('validation.2018-05-09.xml', 'approval_node')
exc = ptr.proxy.execution.get()
juan.proxy.tasks.add(ptr)
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'execution_id': exc.id,
'node_id': 'approval_node',
'response': 'accept',
'comment': 'I like the previous work',
}))
assert res.status_code == 202
# rabbit is called
pika.adapters.blocking_connection.BlockingChannel.\
basic_publish.assert_called_once()
args = pika.adapters.blocking_connection.BlockingChannel.basic_publish \
.call_args[1]
assert args['exchange'] == ''
assert args['routing_key'] == config['RABBIT_QUEUE']
assert json.loads(args['body']) == {
'command': 'step',
'pointer_id': ptr.id,
'user_identifier': 'juan',
'input': [Form.state_json('approval_node', [
{
'name': 'response',
'value': 'accept',
'value_caption': 'accept',
},
{
'name': 'comment',
'value': 'I like the previous work',
'value_caption': 'I like the previous work',
},
{
'name': 'inputs',
'value': None,
'value_caption': 'null',
},
])],
}
def test_validation_reject(client, mocker, config):
''' the api for a reject '''
mocker.patch(
'pika.adapters.blocking_connection.'
'BlockingChannel.basic_publish'
)
juan = make_user('juan', 'Juan')
ptr = make_pointer('validation.2018-05-09.xml', 'approval_node')
exc = ptr.proxy.execution.get()
juan.proxy.tasks.add(ptr)
res = client.post('/v1/pointer', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'execution_id': exc.id,
'node_id': ptr.node_id,
'response': 'reject',
'comment': 'I dont like it',
'inputs': [{
'ref': 'start_node.juan.0:work.task',
}],
}))
assert res.status_code == 202
# rabbit is called
pika.adapters.blocking_connection.BlockingChannel.\
basic_publish.assert_called_once()
args = pika.adapters.blocking_connection.BlockingChannel.basic_publish \
.call_args[1]
assert args['exchange'] == ''
assert args['routing_key'] == config['RABBIT_QUEUE']
assert json.loads(args['body']) == {
'command': 'step',
'pointer_id': ptr.id,
'user_identifier': 'juan',
'input': [Form.state_json('approval_node', [
{
'name': 'response',
'value': 'reject',
'value_caption': 'reject',
},
{
'name': 'comment',
'value': 'I dont like it',
'value_caption': 'I dont like it',
},
{
'name': 'inputs',
'value': [{
'ref': 'start_node.juan.0:work.task',
}],
'value_caption': '[{"ref": "start_node.juan.0:work.task"}]',
},
])],
}
def test_patch_requirements(client, mongo, config):
juan = make_user('juan', 'Juan')
ptr = make_pointer('exit_request.2018-03-20.xml', 'requester')
exc = ptr.proxy.execution.get()
mongo[config["EXECUTION_COLLECTION"]].insert_one({
'_type': 'execution',
'id': exc.id,
'state': Xml.load(config, exc.process_name, direct=True).get_state(),
})
mongo[config['EXECUTION_COLLECTION']].update_one({
'id': exc.id,
}, {
'$set': {
'state.items.requester.actors': Map([{
"_type": "actor",
"forms": [{
'_type': 'form',
'state': 'valid',
'ref': 'exit_form',
'inputs': SortedMap([{
'_type': 'field',
'state': 'valid',
'value': 'yes',
'name': 'reason',
}], key='name').to_json(),
}],
"state": "valid",
"user": {
"_type": "user",
"identifier": "__system__",
"fullname": "System"
},
}], key=lambda a: a['user']['identifier']).to_json(),
},
})
# 'inputs' key is required
res = client.patch('/v1/execution/{}'.format(exc.id), headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'comment': 'I dont like it',
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [{
'code': 'validation.required',
'detail': '\'inputs\' is required',
'where': 'request.body.inputs',
}],
}
# all refs must exist
res = client.patch('/v1/execution/{}'.format(exc.id), headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'comment': 'I dont like it',
'inputs': [{
'ref': 'node_id.form_ref.input_name',
}],
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': 'node node_id not found',
'code': 'validation.invalid',
'where': 'request.body.inputs.0.ref',
},
],
}
# ref must pass validation if value present
res = client.patch('/v1/execution/{}'.format(exc.id), headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'comment': 'I dont like it',
'inputs': [{
'ref': 'requester.exit_form.reason',
'value': '',
}],
}))
assert res.status_code == 400
assert json.loads(res.data) == {
'errors': [
{
'detail': 'value invalid: \'reason\' is required',
'where': 'request.body.inputs.0.value',
'code': 'validation.invalid',
},
],
}
def test_patch_just_invalidate(client, mongo, config, mocker):
mocker.patch(
'pika.adapters.blocking_connection.'
'BlockingChannel.basic_publish'
)
juan = make_user('juan', 'Juan')
ptr = make_pointer('exit_request.2018-03-20.xml', 'requester')
exc = ptr.proxy.execution.get()
mongo[config["EXECUTION_COLLECTION"]].insert_one({
'_type': 'execution',
'id': exc.id,
'state': Xml.load(config, exc.process_name, direct=True).get_state(),
})
mongo[config['EXECUTION_COLLECTION']].update_one({
'id': exc.id,
}, {
'$set': {
'state.items.requester.actors': Map([{
"_type": "actor",
"forms": [{
'_type': 'form',
'state': 'valid',
'ref': 'exit_form',
'inputs': SortedMap([{
'_type': 'field',
'state': 'valid',
'value': 'yes',
'name': 'reason',
}], key='name').to_json(),
}],
"state": "valid",
"user": {
"_type": "user",
"identifier": "juan",
"fullname": "System"
},
}], key=lambda a: a['user']['identifier']).to_json(),
},
})
res = client.patch('/v1/execution/{}'.format(exc.id), headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'comment': 'a comment',
'inputs': [{
'ref': 'requester.exit_form.reason',
}],
}))
assert res.status_code == 202
# message is queued
pika.adapters.blocking_connection.BlockingChannel.\
basic_publish.assert_called_once()
args = pika.adapters.blocking_connection.BlockingChannel.\
basic_publish.call_args[1]
json_message = {
'command': 'patch',
'execution_id': exc.id,
'comment': 'a comment',
'inputs': [{
'ref': 'requester.juan.0:exit_form.reason',
}],
'user_identifier': 'juan',
}
assert args['exchange'] == ''
assert args['routing_key'] == config['RABBIT_QUEUE']
body = json.loads(args['body'])
assert body == json_message
def test_patch_set_value(client, mongo, config, mocker):
mocker.patch(
'pika.adapters.blocking_connection.'
'BlockingChannel.basic_publish'
)
juan = make_user('juan', 'Juan')
ptr = make_pointer('exit_request.2018-03-20.xml', 'requester')
exc = ptr.proxy.execution.get()
mongo[config["EXECUTION_COLLECTION"]].insert_one({
'_type': 'execution',
'id': exc.id,
'state': Xml.load(config, exc.process_name, direct=True).get_state(),
})
mongo[config['EXECUTION_COLLECTION']].update_one({
'id': exc.id,
}, {
'$set': {
'state.items.requester.actors': Map([{
"_type": "actor",
"forms": [
{
'_type': 'form',
'state': 'valid',
'ref': 'exit_form',
'inputs': SortedMap([{
'_type': 'field',
'state': 'valid',
'value': 'want to pee',
'name': 'reason',
}], key='name').to_json(),
},
{
'_type': 'form',
'state': 'valid',
'ref': 'code_form',
'inputs': SortedMap([{
'_type': 'field',
'state': 'valid',
'value': 'kadabra',
'name': 'code',
}], key='name').to_json(),
},
],
"state": "valid",
"user": {
"_type": "user",
"identifier": "juan",
"fullname": "System"
},
}], key=lambda a: a['user']['identifier']).to_json(),
},
})
res = client.patch('/v1/execution/{}'.format(exc.id), headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'comment': 'pee is not a valid reason',
'inputs': [
{
'ref': 'requester.exit_form.reason',
'value': 'am hungry',
},
{
'ref': 'requester.code_form.code',
'value': 'alakazam',
},
],
}))
assert res.status_code == 202
# message is queued
pika.adapters.blocking_connection.BlockingChannel.\
basic_publish.assert_called_once()
args = pika.adapters.blocking_connection.BlockingChannel.\
basic_publish.call_args[1]
json_message = {
'command': 'patch',
'execution_id': exc.id,
'comment': 'pee is not a valid reason',
'inputs': [
{
'ref': 'requester.juan.0:exit_form.reason',
'value': 'am hungry',
'value_caption': 'am hungry',
},
{
'ref': 'requester.juan.1:code_form.code',
'value': 'alakazam',
'value_caption': 'alakazam',
},
],
'user_identifier': 'juan',
}
assert args['exchange'] == ''
assert args['routing_key'] == config['RABBIT_QUEUE']
body = json.loads(args['body'])
assert body == json_message
def test_list_processes(client):
res = client.get('/v1/process')
body = json.loads(res.data)
exit_req = list(filter(
lambda xml: xml['id'] == 'simple', body['data']
))[0]
assert res.status_code == 200
assert exit_req == {
'id': 'simple',
'version': '2018-02-19',
'author': 'categulario',
'date': '2018-02-19',
'name': 'Simplest process ever',
'description': 'A simple process that does nothing',
'versions': ['2018-02-19'],
'form_array': [
{
'ref': 'start_form',
'inputs': [
{
'type': 'text',
'name': 'data',
'required': True,
'label': 'Info',
},
],
},
],
}
def test_list_processes_multiple(client):
res = client.get('/v1/process')
body = json.loads(res.data)
exit_req = list(filter(
lambda xml: xml['id'] == 'form-multiple', body['data']
))[0]
assert res.status_code == 200
assert exit_req == {
'id': 'form-multiple',
'version': '2018-04-08',
'author': 'categulario',
'date': '2018-04-08',
'name': 'Con un formulario múltiple',
'description':
'Este proceso tiene un formulario que puede enviar muchas copias',
'versions': ['2018-04-08'],
'form_array': [
{
'ref': 'single_form',
'inputs': [
{
'type': 'text',
'name': 'name',
'required': True,
'label': 'Single Form',
},
],
},
{
'ref': 'multiple_form',
'multiple': '1-10',
'inputs': [
{
'type': 'text',
'name': 'phone',
'required': True,
'label': 'Multi Form',
},
],
},
],
}
def test_read_process(client):
res = client.get('/v1/process/oldest?version=2018-02-14')
data = json.loads(res.data)
assert res.status_code == 200
assert data['data']['name'] == 'Oldest process'
assert data['data']['version'] == '2018-02-14'
res = client.get('/v1/process/oldest')
data = json.loads(res.data)
assert res.status_code == 200
assert data['data']['name'] == 'Oldest process v2'
assert data['data']['version'] == '2018-02-17'
res = client.get('/v1/process/prueba')
data = json.loads(res.data)
assert res.status_code == 404
assert data['errors'][0]['detail'] == 'prueba process does not exist'
def test_list_activities_requires(client):
res = client.get('/v1/activity')
assert res.status_code == 401
def test_list_activities(client):
'''Given 4 activities, two for the current user and two for
another, list only the two belonging to him or her'''
juan = make_user('juan', 'Juan')
Execution(
process_name='simple.2018-02-19.xml',
).save()
exc_2 = Execution(
process_name='simple.2018-02-19.xml',
).save()
exc_2.proxy.actors.add(juan)
exc_2.save()
res = client.get('/v1/activity', headers=make_auth(juan))
assert res.status_code == 200
assert json.loads(res.data) == {
'data': [
exc_2.to_json(include=['*', 'execution']),
],
}
def test_data_mix(mongo, client, config):
# Create pointers
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_02 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_03 = make_pointer('exit_request.2018-03-20.xml', 'requester')
ptr_04 = make_pointer('validation.2018-05-09.xml', 'approval_node')
ptr_01_json = ptr_01.to_json(include=['*', 'execution'])
ptr_02_json = ptr_02.to_json(include=['*', 'execution'])
ptr_03_json = ptr_03.to_json(include=['*', 'execution'])
ptr_04_json = ptr_04.to_json(include=['*', 'execution'])
# set started_at to ptrs
ptr_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
ptr_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
ptr_03_json['started_at'] = '2018-04-01T21:47:00+00:00'
ptr_04_json['started_at'] = '2018-04-01T21:48:00+00:00'
# Pointer collection
mongo[config["POINTER_COLLECTION"]].insert_many([
ptr_01_json.copy(),
ptr_02_json.copy(),
ptr_03_json.copy(),
ptr_04_json.copy(),
])
# Create executions
exec_01 = ptr_01.proxy.execution.get()
exec_02 = ptr_02.proxy.execution.get()
exec_03 = ptr_03.proxy.execution.get()
exec_04 = ptr_04.proxy.execution.get()
exec_01_json = exec_01.to_json()
exec_02_json = exec_02.to_json()
exec_03_json = exec_03.to_json()
exec_04_json = exec_04.to_json()
# set started_at to ptrs
exec_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
exec_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
exec_03_json['started_at'] = '2018-04-01T21:47:00+00:00'
exec_04_json['started_at'] = '2018-04-01T21:48:00+00:00'
# Execution collection
mongo[config["EXECUTION_COLLECTION"]].insert_many([
exec_01_json.copy(),
exec_02_json.copy(),
exec_03_json.copy(),
exec_04_json.copy(),
])
# clean pointers
ptr_01_json.pop('execution')
ptr_02_json.pop('execution')
ptr_03_json.pop('execution')
ptr_04_json.pop('execution')
# set pointers in executions
exec_01_json['pointer'] = ptr_01_json
exec_02_json['pointer'] = ptr_02_json
exec_03_json['pointer'] = ptr_03_json
exec_04_json['pointer'] = ptr_04_json
res = client.get('/v1/inbox')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_04_json,
exec_03_json,
exec_02_json,
exec_01_json,
],
}
def test_data_mix_pagination(mongo, client, config):
# Create pointers
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_02 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_03 = make_pointer('exit_request.2018-03-20.xml', 'requester')
ptr_04 = make_pointer('validation.2018-05-09.xml', 'approval_node')
ptr_01_json = ptr_01.to_json(include=['*', 'execution'])
ptr_02_json = ptr_02.to_json(include=['*', 'execution'])
ptr_03_json = ptr_03.to_json(include=['*', 'execution'])
ptr_04_json = ptr_04.to_json(include=['*', 'execution'])
# set started_at to ptrs
ptr_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
ptr_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
ptr_03_json['started_at'] = '2018-04-01T21:47:00+00:00'
ptr_04_json['started_at'] = '2018-04-01T21:48:00+00:00'
# Pointer collection
mongo[config["POINTER_COLLECTION"]].insert_many([
ptr_01_json.copy(),
ptr_02_json.copy(),
ptr_03_json.copy(),
ptr_04_json.copy(),
])
# Create executions
exec_01 = ptr_01.proxy.execution.get()
exec_02 = ptr_02.proxy.execution.get()
exec_03 = ptr_03.proxy.execution.get()
exec_04 = ptr_04.proxy.execution.get()
exec_01_json = exec_01.to_json()
exec_02_json = exec_02.to_json()
exec_03_json = exec_03.to_json()
exec_04_json = exec_04.to_json()
# set started_at to ptrs
exec_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
exec_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
exec_03_json['started_at'] = '2018-04-01T21:47:00+00:00'
exec_04_json['started_at'] = '2018-04-01T21:48:00+00:00'
# Execution collection
mongo[config["EXECUTION_COLLECTION"]].insert_many([
exec_01_json.copy(),
exec_02_json.copy(),
exec_03_json.copy(),
exec_04_json.copy(),
])
# clean pointers
ptr_01_json.pop('execution')
ptr_02_json.pop('execution')
ptr_03_json.pop('execution')
ptr_04_json.pop('execution')
# set pointers in executions
exec_01_json['pointer'] = ptr_01_json
exec_02_json['pointer'] = ptr_02_json
exec_03_json['pointer'] = ptr_03_json
exec_04_json['pointer'] = ptr_04_json
res = client.get('/v1/inbox?offset=1&limit=1')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_03_json,
],
}
def test_data_mix_pointer_unique(mongo, client, config):
# Create pointers
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_01_json = ptr_01.to_json(include=['*', 'execution'])
# set started_at to ptrs
ptr_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
# Pointer collection
mongo[config["POINTER_COLLECTION"]].insert_many([
{
'started_at': '2010-08-21-01T21:45:00+00:00',
'execution': {
'id': ptr_01_json['execution']['id'],
},
'node': {
'id': 'final_node',
},
'process_id': 'simple.2018-02-19.xml',
},
ptr_01_json.copy(),
])
# Create executions
exec_01 = ptr_01.proxy.execution.get()
exec_01_json = exec_01.to_json()
# set started_at to ptrs
exec_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
# Execution collection
mongo[config["EXECUTION_COLLECTION"]].insert_many([
exec_01_json.copy(),
])
# clean pointers
ptr_01_json.pop('execution')
# set pointers in executions
exec_01_json['pointer'] = ptr_01_json
res = client.get('/v1/inbox')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_01_json,
],
}
def test_data_mix_pointerless(mongo, client, config):
# Create pointers
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_01_json = ptr_01.to_json(include=['*', 'execution'])
# set started_at to ptrs
ptr_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
# No pointers in collection
# Create executions
exec_01 = ptr_01.proxy.execution.get()
exec_01_json = exec_01.to_json()
# set started_at to ptrs
exec_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
# Execution collection
mongo[config["EXECUTION_COLLECTION"]].insert_many([
exec_01_json.copy(),
])
# clean pointers
ptr_01_json.pop('execution')
res = client.get('/v1/inbox')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_01_json,
],
}
def test_data_mix_filter_sort(mongo, client, config):
# Create pointers
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_02 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_03 = make_pointer('exit_request.2018-03-20.xml', 'requester')
ptr_04 = make_pointer('validation.2018-05-09.xml', 'approval_node')
ptr_01_json = ptr_01.to_json(include=['*', 'execution'])
ptr_02_json = ptr_02.to_json(include=['*', 'execution'])
ptr_03_json = ptr_03.to_json(include=['*', 'execution'])
ptr_04_json = ptr_04.to_json(include=['*', 'execution'])
# set started_at to ptrs
ptr_01_json['started_at'] = '2018-04-01T21:47:00+00:00'
ptr_02_json['started_at'] = '2018-04-01T21:45:00+00:00'
ptr_03_json['started_at'] = '2018-04-01T21:48:00+00:00'
ptr_04_json['started_at'] = '2018-04-01T21:46:00+00:00'
# Pointer collection
mongo[config["POINTER_COLLECTION"]].insert_many([
ptr_01_json.copy(),
ptr_02_json.copy(),
ptr_03_json.copy(),
ptr_04_json.copy(),
])
# Create executions
exec_01 = ptr_01.proxy.execution.get()
exec_02 = ptr_02.proxy.execution.get()
exec_03 = ptr_03.proxy.execution.get()
exec_04 = ptr_04.proxy.execution.get()
exec_01_json = exec_01.to_json()
exec_02_json = exec_02.to_json()
exec_03_json = exec_03.to_json()
exec_04_json = exec_04.to_json()
# set started_at to ptrs
exec_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
exec_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
exec_03_json['started_at'] = '2018-04-01T21:47:00+00:00'
exec_04_json['started_at'] = '2018-04-01T21:48:00+00:00'
# set started_at to ptrs
exec_01_json['test_key'] = 'D'
exec_02_json['test_key'] = 'A'
exec_03_json['test_key'] = 'C'
exec_04_json['test_key'] = 'B'
# Execution collection
mongo[config["EXECUTION_COLLECTION"]].insert_many([
exec_01_json.copy(),
exec_02_json.copy(),
exec_03_json.copy(),
exec_04_json.copy(),
])
# clean pointers
ptr_01_json.pop('execution')
ptr_02_json.pop('execution')
ptr_03_json.pop('execution')
ptr_04_json.pop('execution')
# set pointers in executions
exec_01_json['pointer'] = ptr_01_json
exec_02_json['pointer'] = ptr_02_json
exec_03_json['pointer'] = ptr_03_json
exec_04_json['pointer'] = ptr_04_json
# simple sort
res = client.get('/v1/inbox?sort=test_key')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_02_json,
exec_04_json,
exec_03_json,
exec_01_json,
],
}
# ascending
res = client.get('/v1/inbox?sort=test_key,ASCENDING')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_02_json,
exec_04_json,
exec_03_json,
exec_01_json,
],
}
# descending
res = client.get('/v1/inbox?sort=test_key,DESCENDING')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_01_json,
exec_03_json,
exec_04_json,
exec_02_json,
],
}
# pointer.started_at
res = client.get('/v1/inbox?sort=pointer.started_at')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_02_json,
exec_04_json,
exec_01_json,
exec_03_json,
],
}
res = client.get('/v1/inbox?sort=pointer.started_at,DESCENDING')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_03_json,
exec_01_json,
exec_04_json,
exec_02_json,
],
}
# invalid order
res = client.get('/v1/inbox?sort=test_key,RANDOM')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_02_json,
exec_04_json,
exec_03_json,
exec_01_json,
],
}
# invalid key
res = client.get('/v1/inbox?sort')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_04_json,
exec_03_json,
exec_02_json,
exec_01_json,
],
}
def test_data_mix_filter_pointer_key(mongo, client, config):
# Create pointers
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_02 = make_pointer('exit_request.2018-03-20.xml', 'requester')
ptr_01_json = ptr_01.to_json(include=['*', 'execution'])
ptr_02_json = ptr_02.to_json(include=['*', 'execution'])
# set started_at to ptrs
ptr_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
ptr_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
# Pointer collection
mongo[config["POINTER_COLLECTION"]].insert_many([
ptr_01_json.copy(),
ptr_02_json.copy(),
])
# Create executions
exec_01 = ptr_01.proxy.execution.get()
exec_02 = ptr_02.proxy.execution.get()
exec_01_json = exec_01.to_json()
exec_02_json = exec_02.to_json()
# set started_at to ptrs
exec_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
exec_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
# Execution collection
mongo[config["EXECUTION_COLLECTION"]].insert_many([
exec_01_json.copy(),
exec_02_json.copy(),
])
# clean pointers
ptr_01_json.pop('execution')
ptr_02_json.pop('execution')
# set pointers in executions
exec_01_json['pointer'] = ptr_01_json
exec_02_json['pointer'] = ptr_02_json
res = client.get('/v1/inbox?pointer.node_id=mid_node')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_01_json,
],
}
def test_data_mix_filter_exclude_pointer_key(mongo, client, config):
# Create pointers
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_02 = make_pointer('exit_request.2018-03-20.xml', 'requester')
ptr_01_json = ptr_01.to_json(include=['*', 'execution'])
ptr_02_json = ptr_02.to_json(include=['*', 'execution'])
# set started_at to ptrs
ptr_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
ptr_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
# Pointer collection
mongo[config["POINTER_COLLECTION"]].insert_many([
ptr_01_json.copy(),
ptr_02_json.copy(),
])
# Create executions
exec_01 = ptr_01.proxy.execution.get()
exec_02 = ptr_02.proxy.execution.get()
exec_01_json = exec_01.to_json()
exec_02_json = exec_02.to_json()
# set started_at to ptrs
exec_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
exec_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
# Execution collection
mongo[config["EXECUTION_COLLECTION"]].insert_many([
exec_01_json.copy(),
exec_02_json.copy(),
])
# clean pointers
ptr_01_json.pop('execution')
ptr_01_json.pop('node_id')
ptr_02_json.pop('execution')
ptr_02_json.pop('node_id')
# set pointers in executions
exec_01_json['pointer'] = ptr_01_json
exec_02_json['pointer'] = ptr_02_json
res = client.get('/v1/inbox?exclude=pointer.node_id')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_02_json,
exec_01_json,
],
}
def test_data_mix_filter_include_pointer_key(mongo, client, config):
# Create pointers
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_02 = make_pointer('exit_request.2018-03-20.xml', 'requester')
ptr_01_json = ptr_01.to_json(include=['*', 'execution'])
ptr_02_json = ptr_02.to_json(include=['*', 'execution'])
# set started_at to ptrs
ptr_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
ptr_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
# Pointer collection
mongo[config["POINTER_COLLECTION"]].insert_many([
ptr_01_json.copy(),
ptr_02_json.copy(),
])
# Create executions
exec_01 = ptr_01.proxy.execution.get()
exec_02 = ptr_02.proxy.execution.get()
exec_01_json = exec_01.to_json()
exec_02_json = exec_02.to_json()
# set started_at to ptrs
exec_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
exec_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
# Execution collection
mongo[config["EXECUTION_COLLECTION"]].insert_many([
exec_01_json.copy(),
exec_02_json.copy(),
])
# clean pointers
ptr_01_json = {'node_id': ptr_01_json.get('node_id')}
ptr_02_json = {'node_id': ptr_02_json.get('node_id')}
# set pointers in executions
exec_01_json = {'pointer': ptr_01_json}
exec_02_json = {'pointer': ptr_02_json}
res = client.get('/v1/inbox?include=pointer.node_id')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_02_json,
exec_01_json,
],
}
def test_data_mix_filter_user_identifier(mongo, client, config):
juan = make_user('user', 'User')
# Create pointers
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_02 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_03 = make_pointer('exit_request.2018-03-20.xml', 'requester')
ptr_04 = make_pointer('validation.2018-05-09.xml', 'approval_node')
# set some tasks to user
juan.proxy.tasks.set([ptr_02])
ptr_01_json = ptr_01.to_json(include=['*', 'execution'])
ptr_02_json = ptr_02.to_json(include=['*', 'execution'])
ptr_03_json = ptr_03.to_json(include=['*', 'execution'])
ptr_04_json = ptr_04.to_json(include=['*', 'execution'])
# set started_at to ptrs
ptr_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
ptr_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
ptr_03_json['started_at'] = '2018-04-01T21:47:00+00:00'
ptr_04_json['started_at'] = '2018-04-01T21:48:00+00:00'
# Pointer collection
mongo[config["POINTER_COLLECTION"]].insert_many([
ptr_01_json.copy(),
ptr_02_json.copy(),
ptr_03_json.copy(),
ptr_04_json.copy(),
])
# Create executions
exec_01 = ptr_01.proxy.execution.get()
exec_02 = ptr_02.proxy.execution.get()
exec_03 = ptr_03.proxy.execution.get()
exec_04 = ptr_04.proxy.execution.get()
# set some activities to user
juan.proxy.activities.set([exec_03])
exec_01_json = exec_01.to_json()
exec_02_json = exec_02.to_json()
exec_03_json = exec_03.to_json()
exec_04_json = exec_04.to_json()
# set started_at to ptrs
exec_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
exec_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
exec_03_json['started_at'] = '2018-04-01T21:47:00+00:00'
exec_04_json['started_at'] = '2018-04-01T21:48:00+00:00'
# Execution collection
mongo[config["EXECUTION_COLLECTION"]].insert_many([
exec_01_json.copy(),
exec_02_json.copy(),
exec_03_json.copy(),
exec_04_json.copy(),
])
# clean pointers
ptr_01_json.pop('execution')
ptr_02_json.pop('execution')
ptr_03_json.pop('execution')
ptr_04_json.pop('execution')
# set pointers in executions
exec_01_json['pointer'] = ptr_01_json
exec_02_json['pointer'] = ptr_02_json
exec_03_json['pointer'] = ptr_03_json
exec_04_json['pointer'] = ptr_04_json
res = client.get(f'/v1/inbox?user_identifier={juan.identifier}')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_03_json,
exec_02_json,
],
}
# user not found
res = client.get('/v1/inbox?user_identifier=not_an_user')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
],
}
# aditional query
res = client.get('/v1/inbox?user_identifier={}&process_name={}'.format(
juan.identifier, 'simple.2018-02-19.xml',
))
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_02_json,
],
}
# aditional query fails
res = client.get('/v1/inbox?user_identifier={}&process_name={}'.format(
juan.identifier, 'not_a_process',
))
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
],
}
def test_data_mix_filter_user_identifier_executionless(mongo, client, config):
juan = make_user('juan', 'Juan')
jorge = make_user('jorge', 'Jorge')
# Create pointers
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_02 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_03 = make_pointer('exit_request.2018-03-20.xml', 'requester')
ptr_04 = make_pointer('validation.2018-05-09.xml', 'approval_node')
# set some tasks to user
juan.proxy.tasks.set([ptr_02])
ptr_01_json = ptr_01.to_json(include=['*', 'execution'])
ptr_02_json = ptr_02.to_json(include=['*', 'execution'])
ptr_03_json = ptr_03.to_json(include=['*', 'execution'])
ptr_04_json = ptr_04.to_json(include=['*', 'execution'])
# set started_at to ptrs
ptr_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
ptr_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
ptr_03_json['started_at'] = '2018-04-01T21:47:00+00:00'
ptr_04_json['started_at'] = '2018-04-01T21:48:00+00:00'
# Pointer collection
mongo[config["POINTER_COLLECTION"]].insert_many([
ptr_01_json.copy(),
ptr_02_json.copy(),
ptr_03_json.copy(),
ptr_04_json.copy(),
])
# Create executions
exec_01 = ptr_01.proxy.execution.get()
exec_02 = ptr_02.proxy.execution.get()
exec_03 = ptr_03.proxy.execution.get()
exec_04 = ptr_04.proxy.execution.get()
# set some activities to user
juan.proxy.activities.set([exec_03])
exec_01_json = exec_01.to_json()
exec_02_json = exec_02.to_json()
exec_03_json = exec_03.to_json()
exec_04_json = exec_04.to_json()
# set started_at to ptrs
exec_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
exec_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
exec_03_json['started_at'] = '2018-04-01T21:47:00+00:00'
exec_04_json['started_at'] = '2018-04-01T21:48:00+00:00'
# Execution collection
mongo[config["EXECUTION_COLLECTION"]].insert_many([
exec_01_json.copy(),
exec_02_json.copy(),
exec_03_json.copy(),
exec_04_json.copy(),
])
# clean pointers
ptr_01_json.pop('execution')
ptr_02_json.pop('execution')
ptr_03_json.pop('execution')
ptr_04_json.pop('execution')
# set pointers in executions
exec_01_json['pointer'] = ptr_01_json
exec_02_json['pointer'] = ptr_02_json
exec_03_json['pointer'] = ptr_03_json
exec_04_json['pointer'] = ptr_04_json
# check if valid user user doesn't have inbox items
res = client.get(f'/v1/inbox?user_identifier={jorge.identifier}')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [],
}
def test_data_mix_filter_actor_identifier(mongo, client, config):
# Create pointers
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_02 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_03 = make_pointer('exit_request.2018-03-20.xml', 'requester')
ptr_04 = make_pointer('validation.2018-05-09.xml', 'approval_node')
ptr_01_json = ptr_01.to_json(include=['*', 'execution'])
ptr_02_json = ptr_02.to_json(include=['*', 'execution'])
ptr_03_json = ptr_03.to_json(include=['*', 'execution'])
ptr_04_json = ptr_04.to_json(include=['*', 'execution'])
# set started_at to ptrs
ptr_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
ptr_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
ptr_03_json['started_at'] = '2018-04-01T21:47:00+00:00'
ptr_04_json['started_at'] = '2018-04-01T21:48:00+00:00'
# Pointer collection
mongo[config["POINTER_COLLECTION"]].insert_many([
ptr_01_json.copy(),
ptr_02_json.copy(),
ptr_03_json.copy(),
ptr_04_json.copy(),
])
# Create executions
exec_01 = ptr_01.proxy.execution.get()
exec_02 = ptr_02.proxy.execution.get()
exec_03 = ptr_03.proxy.execution.get()
exec_04 = ptr_04.proxy.execution.get()
exec_01_json = exec_01.to_json()
exec_02_json = exec_02.to_json()
exec_03_json = exec_03.to_json()
exec_04_json = exec_04.to_json()
# set started_at to ptrs
exec_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
exec_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
exec_03_json['started_at'] = '2018-04-01T21:47:00+00:00'
exec_04_json['started_at'] = '2018-04-01T21:48:00+00:00'
# add actors to executions
exec_01_json['actors'] = {
'last_node': 'foo',
'mid_node': 'bar',
'first_node': 'zas',
}
exec_01_json['state'] = {'item_order': [
'first_node',
'mid_node',
'last_node',
]}
exec_02_json['actors'] = {
'first_node': 'mine',
}
exec_02_json['state'] = {'item_order': [
'first_node',
'mid_node',
'last_node',
]}
exec_03_json['actors'] = {
'mid_node': 'foo',
}
exec_03_json['state'] = {'item_order': [
'first_node',
'mid_node',
'last_node',
]}
# Execution collection
mongo[config["EXECUTION_COLLECTION"]].insert_many([
exec_01_json.copy(),
exec_02_json.copy(),
exec_03_json.copy(),
exec_04_json.copy(),
])
# clean pointers
ptr_01_json.pop('execution')
ptr_02_json.pop('execution')
ptr_03_json.pop('execution')
ptr_04_json.pop('execution')
# set pointers in executions
exec_01_json['pointer'] = ptr_01_json
exec_02_json['pointer'] = ptr_02_json
exec_03_json['pointer'] = ptr_03_json
exec_04_json['pointer'] = ptr_04_json
res = client.get('/v1/inbox?actor_identifier=foo')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_03_json,
exec_01_json,
],
}
# user not found
res = client.get('/v1/inbox?actor_identifier=not_an_user')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
],
}
# aditional query
res = client.get(
'/v1/inbox?actor_identifier=foo&process_name=simple.2018-02-19.xml')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_01_json,
],
}
# aditional query fails
res = client.get(
'/v1/inbox?actor_identifier=zas&process_name=not_a_process')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
],
}
def test_data_mix_filter_json(mongo, client, config):
# Create pointers
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_02 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_03 = make_pointer('exit_request.2018-03-20.xml', 'requester')
ptr_04 = make_pointer('validation.2018-05-09.xml', 'approval_node')
ptr_01_json = ptr_01.to_json(include=['*', 'execution'])
ptr_02_json = ptr_02.to_json(include=['*', 'execution'])
ptr_03_json = ptr_03.to_json(include=['*', 'execution'])
ptr_04_json = ptr_04.to_json(include=['*', 'execution'])
# set started_at to ptrs
ptr_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
ptr_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
ptr_03_json['started_at'] = '2018-04-01T21:47:00+00:00'
ptr_04_json['started_at'] = '2018-04-01T21:48:00+00:00'
# Pointer collection
mongo[config["POINTER_COLLECTION"]].insert_many([
ptr_01_json.copy(),
ptr_02_json.copy(),
ptr_03_json.copy(),
ptr_04_json.copy(),
])
# Create executions
exec_01 = ptr_01.proxy.execution.get()
exec_02 = ptr_02.proxy.execution.get()
exec_03 = ptr_03.proxy.execution.get()
exec_04 = ptr_04.proxy.execution.get()
exec_01_json = exec_01.to_json()
exec_02_json = exec_02.to_json()
exec_03_json = exec_03.to_json()
exec_04_json = exec_04.to_json()
# set started_at to ptrs
exec_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
exec_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
exec_03_json['started_at'] = '2018-04-01T21:47:00+00:00'
exec_04_json['started_at'] = '2018-04-01T21:48:00+00:00'
# Execution collection
mongo[config["EXECUTION_COLLECTION"]].insert_many([
exec_01_json.copy(),
exec_02_json.copy(),
exec_03_json.copy(),
exec_04_json.copy(),
])
# clean pointers
ptr_01_json.pop('execution')
ptr_02_json.pop('execution')
ptr_03_json.pop('execution')
ptr_04_json.pop('execution')
# set pointers in executions
exec_01_json['pointer'] = ptr_01_json
exec_02_json['pointer'] = ptr_02_json
exec_03_json['pointer'] = ptr_03_json
exec_04_json['pointer'] = ptr_04_json
json_string = json.dumps({
'$in': [
'simple.2018-02-19.xml',
'validation.2018-05-09.xml',
],
})
json_url_format = urllib.parse.quote(json_string)
res = client.get(f'/v1/inbox?process_name={json_url_format}')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_04_json,
exec_02_json,
exec_01_json,
],
}
def test_data_mix_filter_include(mongo, client, config):
juan = make_user('user', 'User')
# Create pointers
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
juan.proxy.tasks.set([ptr_01])
ptr_01_json = ptr_01.to_json(include=['*', 'execution'])
# set started_at to ptrs
ptr_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
# Pointer collection
mongo[config["POINTER_COLLECTION"]].insert_many([
ptr_01_json.copy(),
])
# Create executions
exec_01 = ptr_01.proxy.execution.get()
juan.proxy.activities.set([exec_01])
exec_01_json = exec_01.to_json()
# set started_at to ptrs
exec_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
# Execution collection
mongo[config["EXECUTION_COLLECTION"]].insert_many([
exec_01_json.copy(),
])
# clean pointers
ptr_01_json.pop('execution')
# set pointers in executions
exec_01_json['pointer'] = ptr_01_json
# include keys
exec_01_json = {
item: exec_01_json[item] for item in ['name', 'process_name']
}
res = client.get('/v1/inbox?include=name,process_name')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_01_json,
],
}
def test_data_mix_filter_exclude(mongo, client, config):
juan = make_user('user', 'User')
# Create pointers
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
juan.proxy.tasks.set([ptr_01])
ptr_01_json = ptr_01.to_json(include=['*', 'execution'])
# set started_at to ptrs
ptr_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
# Pointer collection
mongo[config["POINTER_COLLECTION"]].insert_many([
ptr_01_json.copy(),
])
# Create executions
exec_01 = ptr_01.proxy.execution.get()
juan.proxy.activities.set([exec_01])
exec_01_json = exec_01.to_json()
# set started_at to ptrs
exec_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
# Execution collection
mongo[config["EXECUTION_COLLECTION"]].insert_many([
exec_01_json.copy(),
])
# clean pointers
ptr_01_json.pop('execution')
# set pointers in executions
exec_01_json['pointer'] = ptr_01_json
# exclude keys
exec_01_json.pop('name')
exec_01_json.pop('process_name')
res = client.get('/v1/inbox?exclude=name,process_name')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_01_json,
],
}
def test_data_mix_filter_include_exlcude(mongo, client, config):
juan = make_user('user', 'User')
# Create pointers
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
juan.proxy.tasks.set([ptr_01])
ptr_01_json = ptr_01.to_json(include=['*', 'execution'])
# set started_at to ptrs
ptr_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
# Pointer collection
mongo[config["POINTER_COLLECTION"]].insert_many([
ptr_01_json.copy(),
])
# Create executions
exec_01 = ptr_01.proxy.execution.get()
juan.proxy.activities.set([exec_01])
exec_01_json = exec_01.to_json()
# set started_at to ptrs
exec_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
# Execution collection
mongo[config["EXECUTION_COLLECTION"]].insert_many([
exec_01_json.copy(),
])
# clean pointers
ptr_01_json.pop('execution')
# set pointers in executions
exec_01_json['pointer'] = ptr_01_json
# include keys
exec_01_json = {
item: exec_01_json[item] for item in ['name', 'process_name']
}
res = client.get('/v1/inbox?include=name,process_name&exclude=name')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
exec_01_json,
],
}
def test_logs_all(mongo, client, config):
mongo[config["POINTER_COLLECTION"]].insert_many([
{
'started_at': datetime(2018, 4, 1, 21, 45),
'finished_at': None,
'execution': {
'id': EXECUTION_ID,
},
'node': {
'id': 'first_node',
},
},
{
'started_at': datetime(2018, 4, 1, 21, 46),
'finished_at': None,
'execution': {
'id': EXECUTION_ID,
},
'node': {
'id': 'mid_node',
},
},
{
'started_at': datetime(2018, 4, 1, 21, 45),
'finished_at': None,
'execution': {
'id': 'xxxxffff',
},
'node': {
'id': 'mid_node',
},
},
{
'started_at': datetime(2018, 4, 1, 21, 44),
'finished_at': None,
'execution': {
'id': EXECUTION_ID,
},
'node': {
'id': 'another_node',
},
},
])
res = client.get('/v1/log')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
{
'started_at': '2018-04-01T21:46:00+00:00',
'finished_at': None,
'execution': {
'id': EXECUTION_ID,
},
'node': {
'id': 'mid_node',
},
},
{
'started_at': '2018-04-01T21:45:00+00:00',
'finished_at': None,
'execution': {
'id': 'xxxxffff',
},
'node': {
'id': 'mid_node',
},
},
],
}
def test_logs_filter_user(mongo, client, config):
juan = make_user('user', 'User')
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_02 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_03 = make_pointer('exit_request.2018-03-20.xml', 'requester')
ptr_04 = make_pointer('validation.2018-05-09.xml', 'approval_node')
juan.proxy.tasks.set([ptr_01, ptr_02, ptr_04])
ptr_01_json = ptr_01.to_json(include=['*', 'execution'])
ptr_02_json = ptr_02.to_json(include=['*', 'execution'])
ptr_03_json = ptr_03.to_json(include=['*', 'execution'])
ptr_04_json = ptr_04.to_json(include=['*', 'execution'])
# set started_at to ptrs
ptr_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
ptr_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
ptr_04_json['started_at'] = '2018-04-01T21:48:00+00:00'
mongo[config["POINTER_COLLECTION"]].insert_many([
ptr_01_json.copy(),
ptr_02_json.copy(),
ptr_03_json.copy(),
ptr_04_json.copy(),
])
res = client.get('/v1/log?user_identifier={}'.format(juan.identifier))
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
ptr_04_json,
ptr_02_json,
ptr_01_json,
],
}
def test_logs_filter_user_invalid(mongo, client, config):
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_02 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_03 = make_pointer('exit_request.2018-03-20.xml', 'requester')
ptr_04 = make_pointer('validation.2018-05-09.xml', 'approval_node')
mongo[config["POINTER_COLLECTION"]].insert_many([
ptr_01.to_json(include=['*', 'execution']),
ptr_02.to_json(include=['*', 'execution']),
ptr_03.to_json(include=['*', 'execution']),
ptr_04.to_json(include=['*', 'execution']),
])
res = client.get('/v1/log?user_identifier=foo')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [],
}
def test_logs_filter_key_valid(mongo, client, config):
mongo[config["POINTER_COLLECTION"]].insert_one({
'started_at': datetime(2018, 4, 1, 21, 45),
'finished_at': None,
'execution': {
'id': EXECUTION_ID,
},
'node': {
'id': 'mid_node',
},
'one_key': 'foo',
})
mongo[config["POINTER_COLLECTION"]].insert_one({
'started_at': datetime(2018, 4, 1, 21, 50),
'finished_at': None,
'execution': {
'id': EXECUTION_ID,
},
'node': {
'id': '4g9lOdPKmRUf2',
},
'one_key': 'bar',
})
res = client.get('/v1/log?one_key=foo')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
{
'started_at': '2018-04-01T21:45:00+00:00',
'finished_at': None,
'execution': {
'id': EXECUTION_ID,
},
'node': {
'id': 'mid_node',
},
'one_key': 'foo',
},
],
}
def test_logs_filter_key_invalid(mongo, client, config):
mongo[config["POINTER_COLLECTION"]].insert_one({
'started_at': datetime(2018, 4, 1, 21, 45),
'finished_at': None,
'execution': {
'id': EXECUTION_ID,
},
'node': {
'id': 'mid_node',
},
})
res = client.get('/v1/log?limit=foo')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [
{
'started_at': '2018-04-01T21:45:00+00:00',
'finished_at': None,
'execution': {
'id': EXECUTION_ID,
},
'node': {
'id': 'mid_node',
},
},
],
}
def test_logs_filter_value_invalid(mongo, client, config):
mongo[config["POINTER_COLLECTION"]].insert_one({
'started_at': datetime(2018, 4, 1, 21, 45),
'finished_at': None,
'execution': {
'id': EXECUTION_ID,
},
'node': {
'id': 'mid_node',
},
'one_key': 'bar',
})
res = client.get('/v1/log?one_key=foo')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [],
}
def test_logs_activity(mongo, client, config):
mongo[config["POINTER_COLLECTION"]].insert_one({
'started_at': datetime(2018, 4, 1, 21, 45),
'finished_at': None,
'execution': {
'id': EXECUTION_ID,
},
'node': {
'id': 'mid_node',
},
})
mongo[config["POINTER_COLLECTION"]].insert_one({
'started_at': datetime(2018, 4, 1, 21, 50),
'finished_at': None,
'execution': {
'id': EXECUTION_ID,
},
'node': {
'id': '4g9lOdPKmRUf2',
},
})
res = client.get('/v1/log/{}?node_id=mid_node'.format(EXECUTION_ID))
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
"data": [{
'started_at': '2018-04-01T21:45:00+00:00',
'finished_at': None,
'execution': {
'id': EXECUTION_ID,
},
'node': {
'id': 'mid_node',
},
}],
}
def test_task_list_requires_auth(client):
res = client.get('/v1/task')
assert res.status_code == 401
assert json.loads(res.data) == {
'errors': [{
'detail': 'You must provide basic authorization headers',
'where': 'request.authorization',
}],
}
def test_task_list(client):
juan = make_user('user', 'User')
pointer = make_pointer('simple.2018-02-19.xml', 'mid_node')
juan.proxy.tasks.set([pointer])
res = client.get('/v1/task', headers=make_auth(juan))
assert res.status_code == 200
assert json.loads(res.data) == {
'data': [pointer.to_json(include=['*', 'execution'])],
}
def test_task_read_requires(client):
# auth
res = client.get('/v1/task/foo')
assert res.status_code == 401
# real pointer
juan = make_user('juan', 'Juan')
res = client.get('/v1/task/foo', headers=make_auth(juan))
assert res.status_code == 404
# assigned task
ptr = make_pointer('simple.2018-02-19.xml', 'mid_node')
juan = make_user('juan', 'Juan')
res = client.get('/v1/task/{}'.format(ptr.id), headers=make_auth(juan))
assert res.status_code == 403
def test_task_read(client, config, mongo):
ptr = make_pointer('simple.2018-02-19.xml', 'mid_node')
juan = make_user('juan', 'Juan')
juan.proxy.tasks.set([ptr])
execution = ptr.proxy.execution.get()
execution.started_at = make_date(2020, 8, 21, 4, 5, 6)
execution.status = 'ongoing'
execution.save()
state = Xml.load(config, execution.process_name).get_state()
mongo[config["EXECUTION_COLLECTION"]].insert_one({
'_type': 'execution',
'id': execution.id,
'state': state,
})
res = client.get('/v1/task/{}'.format(ptr.id), headers=make_auth(juan))
assert res.status_code == 200
assert json.loads(res.data) == {
'data': {
'_type': 'pointer',
'id': ptr.id,
'node_id': ptr.node_id,
'node_type': 'action',
'name': None,
'description': None,
'execution': {
'_type': 'execution',
'id': execution.id,
'process_name': execution.process_name,
'name': None,
'name_template': '',
'description': None,
'description_template': '',
'started_at': '2020-08-21T04:05:06Z',
'finished_at': None,
'status': 'ongoing',
},
'form_array': [
{
'ref': 'mid_form',
'inputs': [
{
'name': 'data',
'required': True,
'type': 'text',
'label': 'data',
},
],
},
],
},
}
def test_task_validation(client, mongo, config):
ptr = make_pointer('validation.2018-05-09.xml', 'approval_node')
juan = make_user('juan', 'Juan')
juan.proxy.tasks.add(ptr)
execution = ptr.proxy.execution.get()
state = Xml.load(config, execution.process_name).get_state()
node = state['items']['start_node']
node['state'] = 'valid'
node['actors']['items']['juan'] = {
'_type': 'actor',
'state': 'valid',
'user': {
'_type': 'user',
'identifier': 'juan',
'fullname': None,
},
'forms': [Form.state_json('work', [
{
'_type': 'field',
'state': 'valid',
'label': 'task',
'name': 'task',
'value': 'Get some milk and eggs',
},
])],
}
mongo[config["EXECUTION_COLLECTION"]].insert_one({
'_type': 'execution',
'id': execution.id,
'state': state,
})
res = client.get('/v1/task/{}'.format(ptr.id), headers=make_auth(juan))
body = json.loads(res.data)['data']
assert res.status_code == 200
assert body == {
'_type': 'pointer',
'description': None,
'execution': {
'_type': 'execution',
'description': None,
'description_template': '',
'id': execution.id,
'name': None,
'name_template': '',
'process_name': execution.process_name,
'started_at': None,
'finished_at': None,
'status': None,
},
'fields': [
{
'_type': 'field',
'ref': 'start_node.juan.0:work.task',
'label': 'task',
'name': 'task',
'value': 'Get some milk and eggs',
}
],
'form_array': [],
'id': ptr.id,
'name': None,
'node_id': ptr.node_id,
'node_type': 'validation'
}
def test_task_with_prev_work(client, config, mongo):
ptr = make_pointer('validation-multiform.2018-05-22.xml', 'start_node')
juan = make_user('juan', 'Juan')
juan.proxy.tasks.add(ptr)
execution = ptr.proxy.execution.get()
state = Xml.load(config, execution.process_name).get_state()
node = state['items']['start_node']
prev_work = [Form.state_json('set', [
{'_type': 'field', 'name': 'A', 'value': 'a1', 'state': 'valid'},
{'_type': 'field', 'name': 'B', 'value': 'b1', 'state': 'valid'},
{'_type': 'field', 'name': 'C', 'value': 'c1', 'state': 'invalid'},
{'_type': 'field', 'name': 'D', 'value': 'd1', 'state': 'valid'},
]), Form.state_json('set', [
{'_type': 'field', 'name': 'A', 'value': 'a2', 'state': 'valid'},
{'_type': 'field', 'name': 'B', 'value': 'b2', 'state': 'valid'},
{'_type': 'field', 'name': 'C', 'value': 'c2', 'state': 'valid'},
{'_type': 'field', 'name': 'D', 'value': 'd2', 'state': 'valid'},
])]
node['state'] = 'valid'
node['actors']['items']['juan'] = {
'_type': 'actor',
'state': 'valid',
'user': {
'_type': 'user',
'identifier': 'juan',
'fullname': None,
},
'forms': prev_work,
}
mongo[config["EXECUTION_COLLECTION"]].insert_one({
'_type': 'execution',
'id': execution.id,
'state': state,
})
res = client.get('/v1/task/{}'.format(ptr.id), headers=make_auth(juan))
body = json.loads(res.data)['data']
assert res.status_code == 200
assert body == {
'_type': 'pointer',
'description': None,
'execution': {
'_type': 'execution',
'description': None,
'description_template': '',
'id': execution.id,
'name': None,
'name_template': '',
'process_name': execution.process_name,
'started_at': None,
'finished_at': None,
'status': None,
},
'form_array': [{
'inputs': [
{'label': 'Value A', 'name': 'A', 'type': 'text'},
{'label': 'Value B', 'name': 'B', 'type': 'text'},
{'label': 'Value C', 'name': 'C', 'type': 'text'},
{'label': 'Value D', 'name': 'D', 'type': 'text'}
],
'multiple': '1-5',
'ref': 'set'
}],
'id': ptr.id,
'name': None,
'node_id': ptr.node_id,
'node_type': 'action',
'prev_work': node['actors']['items']['juan']['forms'],
}
def test_execution_has_node_info(client):
juan = make_user('juan', 'Juan')
res = client.post('/v1/execution', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'process_name': 'simple',
'form_array': [
{
'ref': 'start_form',
'data': {
'data': 'yes',
},
},
],
}))
assert res.status_code == 201
exe = Execution.get_all()[0]
ptr = Pointer.get_all()[0]
assert exe.name == 'Simplest process ever started with: yes'
assert exe.description == 'A simple process that does nothing'
assert ptr.name == 'Primer paso'
assert ptr.description == 'Resolver una tarea'
def test_log_has_node_info(client):
juan = make_user('juan', 'Juan')
res = client.post('/v1/execution', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data=json.dumps({
'process_name': 'simple',
'form_array': [
{
'ref': 'start_form',
'data': {
'data': 'yes',
},
},
],
}))
assert res.status_code == 201
body = json.loads(res.data)
execution_id = body['data']['id']
res = client.get('/v1/log/{}'.format(execution_id))
body = json.loads(res.data)
data = body['data'][0]
assert data['node']['id'] == 'start_node'
assert data['node']['name'] == 'Primer paso'
assert data['node']['description'] == 'Resolver una tarea'
assert data['execution']['id'] == execution_id
assert data['execution']['name'] == \
'Simplest process ever started with: yes'
assert data['execution']['description'] == \
'A simple process that does nothing'
def test_delete_process(config, client, mongo, mocker):
mocker.patch(
'pika.adapters.blocking_connection.'
'BlockingChannel.basic_publish'
)
p_0 = make_pointer('simple.2018-02-19.xml', 'mid_node')
execution = p_0.proxy.execution.get()
juan = make_user('juan', 'Juan')
res = client.delete(
'/v1/execution/{}'.format(execution.id),
headers=make_auth(juan)
)
assert res.status_code == 202
pika.adapters.blocking_connection.BlockingChannel.\
basic_publish.assert_called_once()
args = pika.adapters.blocking_connection.BlockingChannel.\
basic_publish.call_args[1]
assert args['exchange'] == ''
assert args['routing_key'] == config['RABBIT_QUEUE']
assert json.loads(args['body']) == {
'execution_id': execution.id,
'command': 'cancel',
}
def test_status_notfound(client):
res = client.get('/v1/execution/doo')
assert res.status_code == 404
def test_status(config, client, mongo):
ptr = make_pointer('simple.2018-02-19.xml', 'mid_node')
execution = ptr.proxy.execution.get()
mongo[config['EXECUTION_COLLECTION']].insert_one({
'id': execution.id,
})
res = client.get('/v1/execution/{}'.format(execution.id))
assert res.status_code == 200
assert json.loads(res.data) == {
'data': {
'id': execution.id,
},
}
def test_execution_list(client, mongo, config):
mongo[config["EXECUTION_COLLECTION"]].insert_one({
'status': 'ongoing',
})
res = client.get('/v1/execution')
data = json.loads(res.data)
assert res.status_code == 200
assert data == {
'total_count': 1,
'data': [{
'status': 'ongoing',
}],
}
def test_execution_filter_key_valid(client, mongo, config):
mongo[config["EXECUTION_COLLECTION"]].insert_many([
{
'id': 1,
'one_key': 'foo',
},
{
'id': 2,
'another_key': 'var',
},
{
'id': 3,
'one_key': 'foo',
},
{
'id': 4,
'one_key': 'zas',
},
])
res = client.get('/v1/execution?one_key=foo')
data = json.loads(res.data)
assert res.status_code == 200
assert data == {
'total_count': 2,
'data': [
{
'id': 1,
'one_key': 'foo',
},
{
'id': 3,
'one_key': 'foo',
}
],
}
def test_execution_filter_key_invalid(client, mongo, config):
mongo[config["EXECUTION_COLLECTION"]].insert_many([
{
'id': 1,
'limit': 'bar',
},
])
res = client.get('/v1/execution?limit=foo')
data = json.loads(res.data)
assert res.status_code == 200
assert data == {
'total_count': 1,
'data': [
{
'id': 1,
'limit': 'bar',
},
],
}
def test_execution_filter_user(mongo, client, config):
juan = make_user('user', 'User')
ptr_01 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_02 = make_pointer('simple.2018-02-19.xml', 'mid_node')
ptr_03 = make_pointer('exit_request.2018-03-20.xml', 'requester')
ptr_04 = make_pointer('validation.2018-05-09.xml', 'approval_node')
exec_01 = ptr_01.proxy.execution.get()
exec_02 = ptr_02.proxy.execution.get()
exec_03 = ptr_03.proxy.execution.get()
exec_04 = ptr_04.proxy.execution.get()
juan.proxy.activities.set([exec_01, exec_02, exec_04])
exec_01_json = exec_01.to_json()
exec_02_json = exec_02.to_json()
exec_03_json = exec_03.to_json()
exec_04_json = exec_04.to_json()
# set started_at to ptrs
exec_01_json['started_at'] = '2018-04-01T21:45:00+00:00'
exec_02_json['started_at'] = '2018-04-01T21:46:00+00:00'
exec_04_json['started_at'] = '2018-04-01T21:48:00+00:00'
mongo[config["EXECUTION_COLLECTION"]].insert_many([
exec_01_json.copy(),
exec_02_json.copy(),
exec_03_json.copy(),
exec_04_json.copy(),
])
res = client.get(f'/v1/execution?user_identifier={juan.identifier}')
ans = json.loads(res.data)
assert res.status_code == 200
assert ans == {
'total_count': 3,
"data": [
exec_04_json,
exec_02_json,
exec_01_json,
],
}
def test_execution_filter_value_invalid(client, mongo, config):
res = client.get('/v1/execution?one_key=foo')
data = json.loads(res.data)
assert res.status_code == 200
assert data == {
'total_count': 0,
'data': [],
}
def test_add_user(client, mocker, config, mongo):
# variables: users
juan = make_user('juan', 'Juan')
luis = make_user('luis', 'Luis')
# variables: pointer and execution
ptr = make_pointer('validation.2018-05-09.xml', 'approval_node')
exc = ptr.proxy.execution.get()
mongo[config["EXECUTION_COLLECTION"]].insert_one({
'_type': 'execution',
'id': exc.id,
'state': Xml.load(config, exc.process_name, direct=True).get_state(),
})
mongo[config["POINTER_COLLECTION"]].insert_one({
'id': ptr.id,
'node': {
'id': ptr.node_id,
},
'notified_users': [],
'execution': exc.to_json(),
'state': 'ongoing',
})
# user has no task assigned
assert luis.proxy.tasks.count() == 0
# add the user
res = client.post(
'/v1/execution/{}/user'.format(exc.id),
headers={
**{'Content-Type': 'application/json'},
**make_auth(juan)},
data=json.dumps({
'identifier': 'luis',
'node_id': 'approval_node',
})
)
# successful post
assert res.status_code == 200
# user has one task assigned
assert luis.proxy.tasks.count() == 1
# test notified_users (log)
res = client.get(
'/v1/log/{}'.format(exc.id),
)
notified_users = json.loads(res.data)['data'][0]['notified_users']
assert res.status_code == 200
assert notified_users == [luis.to_json()]
def test_add_user_new(client, mocker, config, mongo):
# variables: users
juan = make_user('juan', 'Juan')
luis = make_user('luis', 'Luis')
beto = make_user('beto', 'Beto')
# variables: pointer and execution
ptr = make_pointer('validation.2018-05-09.xml', 'approval_node')
exc = ptr.proxy.execution.get()
mongo[config["EXECUTION_COLLECTION"]].insert_one({
'_type': 'execution',
'id': exc.id,
'state': Xml.load(config, exc.process_name, direct=True).get_state(),
})
mongo[config["POINTER_COLLECTION"]].insert_one({
'id': ptr.id,
'node': {
'id': ptr.node_id,
},
'notified_users': [],
'execution': exc.to_json(),
'state': 'ongoing',
})
# user has no task assigned
assert luis.proxy.tasks.count() == 0
assert beto.proxy.tasks.count() == 0
# add the user
res = client.post(
'/v1/execution/{}/user'.format(exc.id),
headers={
**{'Content-Type': 'application/json'},
**make_auth(juan)},
data=json.dumps({
'identifier': 'luis',
'node_id': 'approval_node',
})
)
# successful post
assert res.status_code == 200
# user has one task assigned
assert luis.proxy.tasks.count() == 1
# test notified_users (log)
res = client.get(
'/v1/log/{}'.format(exc.id),
)
notified_users = json.loads(res.data)['data'][0]['notified_users']
assert res.status_code == 200
assert notified_users == [luis.to_json()]
# add the second user
res = client.post(
'/v1/execution/{}/user'.format(exc.id),
headers={
**{'Content-Type': 'application/json'},
**make_auth(juan)},
data=json.dumps({
'identifier': 'beto',
'node_id': 'approval_node',
})
)
# successful post
assert res.status_code == 200
# user has one task assigned
assert beto.proxy.tasks.count() == 1
# test notified_users (log)
res = client.get(
'/v1/log/{}'.format(exc.id),
)
notified_users = json.loads(res.data)['data'][0]['notified_users']
assert res.status_code == 200
assert notified_users == [luis.to_json(), beto.to_json()]
def test_add_user_duplicate(client, mocker, config, mongo):
# variables: users
juan = make_user('juan', 'Juan')
luis = make_user('luis', 'Luis')
# variables: pointer and execution
ptr = make_pointer('validation.2018-05-09.xml', 'approval_node')
exc = ptr.proxy.execution.get()
mongo[config["EXECUTION_COLLECTION"]].insert_one({
'_type': 'execution',
'id': exc.id,
'state': Xml.load(config, exc.process_name, direct=True).get_state(),
})
mongo[config["POINTER_COLLECTION"]].insert_one({
'id': ptr.id,
'node': {
'id': ptr.node_id,
},
'notified_users': [],
'execution': exc.to_json(),
'state': 'ongoing',
})
# user has no task assigned
assert luis.proxy.tasks.count() == 0
# add the user
res = client.post(
'/v1/execution/{}/user'.format(exc.id),
headers={
**{'Content-Type': 'application/json'},
**make_auth(juan)},
data=json.dumps({
'identifier': 'luis',
'node_id': 'approval_node',
})
)
# successful post
assert res.status_code == 200
# user has one task assigned
assert luis.proxy.tasks.count() == 1
# test notified_users (log)
res = client.get(
'/v1/log/{}'.format(exc.id),
)
notified_users = json.loads(res.data)['data'][0]['notified_users']
assert res.status_code == 200
assert notified_users == [luis.to_json()]
# add the second user
res = client.post(
'/v1/execution/{}/user'.format(exc.id),
headers={
**{'Content-Type': 'application/json'},
**make_auth(juan)},
data=json.dumps({
'identifier': 'luis',
'node_id': 'approval_node',
})
)
# successful post
assert res.status_code == 200
# user has one task assigned
assert luis.proxy.tasks.count() == 1
# test notified_users (log)
res = client.get(
'/v1/log/{}'.format(exc.id),
)
notified_users = json.loads(res.data)['data'][0]['notified_users']
assert res.status_code == 200
assert notified_users == [luis.to_json()]
def test_add_user_requirements_id(client, mocker, config, mongo):
juan = make_user('juan', 'Juan')
ptr = make_pointer('validation.2018-05-09.xml', 'approval_node')
exc = ptr.proxy.execution.get()
mongo[config["EXECUTION_COLLECTION"]].insert_one({
'_type': 'execution',
'id': exc.id,
'state': Xml.load(config, exc.process_name, direct=True).get_state(),
})
# try add the user
res = client.post(
'/v1/execution/{}/user'.format(exc.id),
headers={
**{'Content-Type': 'application/json'},
**make_auth(juan)},
data=json.dumps({
'identifier': 'luis',
'node_id': 'approval_node',
})
)
# post requires valid user id
assert res.status_code == 400
def test_add_user_requirements_node(client, mocker, config, mongo):
juan = make_user('juan', 'Juan')
make_user('luis', 'Luis')
ptr = make_pointer('validation.2018-05-09.xml', 'approval_node')
exc = ptr.proxy.execution.get()
mongo[config["EXECUTION_COLLECTION"]].insert_one({
'_type': 'execution',
'id': exc.id,
'state': Xml.load(config, exc.process_name, direct=True).get_state(),
})
# try add the user
res = client.post(
'/v1/execution/{}/user'.format(exc.id),
headers={
**{'Content-Type': 'application/json'},
**make_auth(juan)},
data=json.dumps({
'identifier': 'luis',
'node_id': 'final_node',
})
)
# post requires valid living node
assert res.status_code == 400
def test_start_process_error_405(client, mongo, config):
juan = make_user('juan', 'Juan')
res = client.put('/v1/execution', headers={**{
'Content-Type': 'application/json',
}, **make_auth(juan)}, data='{}')
data = json.loads(res.data)
assert res.status_code == 405
assert data['errors'][0]['detail'] == \
"The method is not allowed for the requested URL."
def test_node_statistics(client, mongo, config):
def make_node_reg(process_id, node_id, started_at, finished_at):
return {
'started_at': started_at,
'finished_at': finished_at,
'execution': {
'id': EXECUTION_ID,
},
'node': {
'id': node_id,
},
'process_id': process_id
}
mongo[config["POINTER_COLLECTION"]].insert_many([
make_node_reg(
'simple.2018-02-19', 'test1',
make_date(),
make_date(2018, 5, 10, 4, 5, 6)
),
make_node_reg(
'simple.2018-02-19', 'test2',
make_date(),
make_date(2018, 5, 10, 6, 3, 3)
),
make_node_reg(
'simple.2018-02-19', 'test1',
make_date(),
make_date(2018, 5, 10, 8, 2, 9)
),
make_node_reg(
'simple.2018-02-19', 'test2',
make_date(),
make_date(2018, 5, 10, 3, 4, 5)
),
make_node_reg(
'simple.2018-02-19',
'test2',
make_date(),
None
),
])
res = client.get('/v1/process/{}/statistics'.format(
'simple.2018-02-19'
))
assert res.status_code == 200
assert json.loads(res.data) == {
'data': [
{
'average': 540217.5,
'max': 547329.0,
'min': 533106.0,
'node': 'test1',
'process_id': 'simple.2018-02-19'
},
{
'average': 534814.0,
'max': 540183.0,
'min': 529445.0,
'node': 'test2',
'process_id': 'simple.2018-02-19'
},
],
}
def test_process_statistics(client, mongo, config):
def make_exec_reg(process_id, started_at, finished_at):
return {
'started_at': started_at,
'finished_at': finished_at,
'status': 'finished',
'process': {
'id': process_id,
'version': 'v1',
},
}
mongo[config["EXECUTION_COLLECTION"]].insert_many([
make_exec_reg('p1', make_date(), make_date(2018, 5, 10, 4, 5, 6)),
make_exec_reg('p2', make_date(), make_date(2018, 5, 10, 10, 34, 32)),
make_exec_reg('p1', make_date(), make_date(2018, 5, 11, 22, 41, 10)),
make_exec_reg('p2', make_date(), make_date(2018, 6, 23, 8, 15, 1)),
])
res = client.get('/v1/process/statistics')
assert res.status_code == 200
assert json.loads(res.data) == {
'data': [
{
'average': 609788.0,
'max': 686470.0,
'min': 533106.0,
'process': 'p1',
},
{
'average': 2453086.5,
'max': 4349701.0,
'min': 556472.0,
'process': 'p2',
},
],
}
def test_pagination_execution_log(client, mongo, config):
def make_exec_reg(process_id, started_at, finished_at):
return {
'started_at': started_at,
'finished_at': finished_at,
'status': 'finished',
'process': {
'id': process_id,
'version': 'v1',
},
}
mongo[config["EXECUTION_COLLECTION"]].insert_many([
make_exec_reg('p1', make_date(), make_date(2018, 5, 10, 4, 5, 6)),
make_exec_reg('p2', make_date(), make_date(2018, 5, 10, 10, 34, 32)),
make_exec_reg('p3', make_date(), make_date(2018, 5, 11, 22, 41, 10)),
make_exec_reg('p4', make_date(), make_date(2018, 6, 23, 8, 15, 1)),
make_exec_reg('p5', make_date(), make_date(2018, 6, 11, 4, 5, 6)),
make_exec_reg('p6', make_date(), make_date(2018, 6, 12, 5, 6, 32)),
make_exec_reg('p7', make_date(), make_date(2018, 6, 13, 6, 7, 10)),
make_exec_reg('p8', make_date(), make_date(2018, 6, 14, 7, 8, 1)),
])
res = client.get('/v1/process/statistics?offset=2&limit=2')
assert res.status_code == 200
assert json.loads(res.data)['data'][0]["process"] == 'p3'
assert json.loads(res.data)['data'][1]["process"] == 'p4'
assert len(json.loads(res.data)['data']) == 2
def test_pagination_v1_log(client, mongo, config):
def make_node_reg(process_id, node_id, started_at, finished_at):
return {
'started_at': started_at,
'finished_at': finished_at,
'execution': {
'id': EXECUTION_ID,
},
'node': {
'id': node_id,
},
'process_id': process_id
}
mongo[config["POINTER_COLLECTION"]].insert_many([
make_node_reg(
'simple.2018-02-19', 'mid_node',
make_date(2018, 5, 20, 5, 5, 5),
make_date(2018, 5, 20, 5, 5, 5)
),
make_node_reg(
'simple.2018-02-19', 'mid_node',
make_date(2018, 5, 21, 6, 6, 6),
make_date(2018, 5, 21, 6, 6, 6)
),
make_node_reg(
'simple.2018-02-19', 'mid_node',
make_date(2018, 5, 22, 7, 7, 7),
make_date(2018, 5, 22, 7, 7, 7)
),
make_node_reg(
'simple.2018-02-19', 'mid_node',
make_date(2018, 5, 23, 8, 8, 8),
make_date(2018, 5, 23, 8, 8, 8)
),
make_node_reg(
'simple.2018-02-19', 'mid_node',
make_date(2018, 5, 24, 9, 9, 9),
make_date(2018, 5, 24, 9, 9, 9)
),
])
res = client.get(
'/v1/log/{}?node_id=mid_node&offset=2&limit=2'.format(EXECUTION_ID)
)
assert json.loads(res.data)['data'][0]["finished_at"] == \
'2018-05-22T07:07:07+00:00'
assert json.loads(res.data)['data'][1]["finished_at"] == \
'2018-05-21T06:06:06+00:00'
assert len(json.loads(res.data)['data']) == 2
def test_pagination_v1_log_all(client, mongo, config):
def make_node_reg(exec_id, process_id, node_id, started_at, finished_at):
return {
'started_at': started_at,
'finished_at': finished_at,
'execution': {
'id': exec_id,
},
'node': {
'id': node_id,
},
'process_id': process_id
}
mongo[config["POINTER_COLLECTION"]].insert_many([
make_node_reg(
'aaaaaaaa',
'simple.2018-02-19', 'mid_node',
make_date(2018, 5, 20, 5, 5, 5),
make_date(2018, 5, 20, 5, 5, 5)
),
make_node_reg(
'bbbbbbbb',
'simple.2018-02-19', 'mid_node',
make_date(2018, 5, 21, 6, 6, 6),
make_date(2018, 5, 21, 6, 6, 6)
),
make_node_reg(
'cccccccc',
'simple.2018-02-19', 'mid_node',
make_date(2018, 5, 22, 7, 7, 7),
make_date(2018, 5, 22, 7, 7, 7)
),
make_node_reg(
'dddddddd',
'simple.2018-02-19', 'mid_node',
make_date(2018, 5, 23, 8, 8, 8),
make_date(2018, 5, 23, 8, 8, 8)
),
make_node_reg(
'eeeeeeee',
'simple.2018-02-19', 'mid_node',
make_date(2018, 5, 24, 9, 9, 9),
make_date(2018, 5, 24, 9, 9, 9)
),
])
res = client.get(
'/v1/log?offset=2&limit=2'
)
assert json.loads(res.data)['data'][0]["started_at"] == \
'2018-05-22T07:07:07+00:00'
assert json.loads(res.data)['data'][1]["started_at"] == \
'2018-05-21T06:06:06+00:00'
assert len(json.loads(res.data)['data']) == 2
def test_name_with_if(client, mongo, config):
xml = Xml.load(config, 'pollo')
assert xml.name == 'pollo.2018-05-20.xml'
def test_get_xml(client):
res = client.get('/v1/process/validation-multiform.xml')
assert res.status_code == 200
assert res.headers['Content-Type'] == 'text/xml; charset=utf-8'
assert res.data.startswith(b'<?xml version="1.0" encoding="UTF-8"?>')
def test_fetch_pointers(client, mongo, config):
def make_node_reg(exec_id, process_id, node_id, started_at, finished_at):
return {
'started_at': started_at,
'finished_at': finished_at,
'execution': {
'id': exec_id,
},
'node': {
'id': node_id,
},
'process_id': process_id
}
ptr_01 = make_node_reg(
'aaaaaaaa',
'simple.2018-02-19', 'mid_node',
make_date(2018, 5, 20, 5, 5, 5),
make_date(2018, 5, 20, 5, 5, 5)
)
ptr_02 = make_node_reg(
'aaaaaaaa',
'simple.2018-02-19', 'mid_node',
make_date(2018, 5, 20, 5, 5, 5),
make_date(2018, 5, 20, 5, 5, 5)
)
ptr_03 = make_node_reg(
'cccccccc',
'simple.2018-02-19', 'mid_node',
make_date(2018, 5, 22, 7, 7, 7),
make_date(2018, 5, 22, 7, 7, 7)
)
ptr_04 = make_node_reg(
'dddddddd',
'simple.2018-02-19', 'mid_node',
make_date(2018, 5, 23, 8, 8, 8),
make_date(2018, 5, 23, 8, 8, 8)
)
ptr_05 = make_node_reg(
'eeeeeeee',
'simple.2018-02-19', 'mid_node',
make_date(2018, 5, 24, 9, 9, 9),
make_date(2018, 5, 24, 9, 9, 9)
)
mongo[config["POINTER_COLLECTION"]].insert_many([
ptr_01.copy(),
ptr_02.copy(),
ptr_03.copy(),
ptr_04.copy(),
ptr_05.copy(),
])
# test simple request
res_simple = client.get('/v1/pointer')
data_simple = json.loads(res_simple.data)
expected_pointers_simple = [
ptr_05.copy(),
ptr_04.copy(),
ptr_03.copy(),
ptr_02.copy(),
ptr_01.copy(),
]
for item in expected_pointers_simple:
item['started_at'] = item['started_at'].isoformat() + '+00:00'
item['finished_at'] = item['finished_at'].isoformat() + '+00:00'
assert data_simple == {
'total_count': 5,
'pointers': expected_pointers_simple,
}
# test sorted
res_sorted = client.get('/v1/pointer?sort=execution.id,ASCENDING')
data_sorted = json.loads(res_sorted.data)
expected_pointers_sorted = [
ptr_01.copy(),
ptr_02.copy(),
ptr_03.copy(),
ptr_04.copy(),
ptr_05.copy(),
]
for item in expected_pointers_sorted:
item['started_at'] = item['started_at'].isoformat() + '+00:00'
item['finished_at'] = item['finished_at'].isoformat() + '+00:00'
assert data_sorted == {
'total_count': 5,
'pointers': expected_pointers_sorted,
}
# test include
res_include = client.get('/v1/pointer?include=execution.id')
data_include = json.loads(res_include.data)
temp_list = [
ptr_05.copy(),
ptr_04.copy(),
ptr_03.copy(),
ptr_02.copy(),
ptr_01.copy(),
]
expected_pointers_include = [
{'execution': {'id': p['execution']['id']}} for p in temp_list
]
assert data_include == {
'total_count': 5,
'pointers': expected_pointers_include,
}
# test limit
res_limit = client.get('/v1/pointer?limit=1')
data_limit = json.loads(res_limit.data)
expected_pointers_limit = [
ptr_05.copy(),
]
for item in expected_pointers_limit:
item['started_at'] = item['started_at'].isoformat() + '+00:00'
item['finished_at'] = item['finished_at'].isoformat() + '+00:00'
assert data_limit == {
'total_count': 5,
'pointers': expected_pointers_limit,
}
# test skip
res_offset = client.get('/v1/pointer?offset=2')
data_offset = json.loads(res_offset.data)
expected_pointers_offset = [
ptr_03.copy(),
ptr_02.copy(),
ptr_01.copy(),
]
for item in expected_pointers_offset:
item['started_at'] = item['started_at'].isoformat() + '+00:00'
item['finished_at'] = item['finished_at'].isoformat() + '+00:00'
assert data_offset == {
'total_count': 5,
'pointers': expected_pointers_offset,
}
def test_fetch_user_info(client):
make_user('juan', 'Juan', 'juan@mail.com')
res = client.get('/v1/user/_identifier/juan/info')
assert res.json == {
'identifier': 'juan',
'fullname': 'Juan',
'email': 'juan@mail.com',
}
def test_fetch_user_info_not_found(client):
res = client.get('/v1/user/_identifier/juan/info')
assert res.status_code == 404
| 28.139558
| 78
| 0.54148
| 13,147
| 110,898
| 4.335894
| 0.037043
| 0.013473
| 0.028156
| 0.035664
| 0.875395
| 0.848818
| 0.819381
| 0.802137
| 0.77798
| 0.761578
| 0
| 0.069291
| 0.299212
| 110,898
| 3,940
| 79
| 28.146701
| 0.6642
| 0.039613
| 0
| 0.723031
| 0
| 0
| 0.239371
| 0.068759
| 0
| 0
| 0
| 0
| 0.091992
| 1
| 0.026803
| false
| 0
| 0.004302
| 0.001985
| 0.033091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6a7a1fff9c3e3f79cd9d3aa029fcc981ade24b17
| 97
|
py
|
Python
|
src/utils/date_utils.py
|
jan-gerling/Empirical-Analysis
|
fbc9d430bd230d81d98476302072fa0e82082b70
|
[
"MIT"
] | 17
|
2020-03-13T17:30:13.000Z
|
2022-03-18T06:00:23.000Z
|
src/utils/date_utils.py
|
jan-gerling/Empirical-Analysis
|
fbc9d430bd230d81d98476302072fa0e82082b70
|
[
"MIT"
] | 86
|
2020-03-03T12:31:46.000Z
|
2020-08-04T08:06:50.000Z
|
src/utils/date_utils.py
|
jan-gerling/Empirical-Analysis
|
fbc9d430bd230d81d98476302072fa0e82082b70
|
[
"MIT"
] | 6
|
2020-03-09T10:10:26.000Z
|
2021-12-07T01:17:20.000Z
|
from time import strftime, gmtime
def now():
return strftime("%Y-%m-%d %H:%M:%S", gmtime())
| 19.4
| 50
| 0.618557
| 16
| 97
| 3.75
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164948
| 97
| 5
| 50
| 19.4
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0.173469
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
6ae7295ce71d88814fba8e998c1927b4e549efa0
| 124
|
py
|
Python
|
larky/src/test/compat.py
|
yuriyshafranyuk1/starlarky
|
42ac4ee87e693814e63aa3f348a7ba318c66052d
|
[
"Apache-2.0"
] | 12
|
2020-11-20T00:01:24.000Z
|
2022-02-03T12:48:11.000Z
|
larky/src/test/compat.py
|
yuriyshafranyuk1/starlarky
|
42ac4ee87e693814e63aa3f348a7ba318c66052d
|
[
"Apache-2.0"
] | 92
|
2020-10-15T15:59:07.000Z
|
2022-03-31T16:37:24.000Z
|
larky/src/test/compat.py
|
yuriyshafranyuk1/starlarky
|
42ac4ee87e693814e63aa3f348a7ba318c66052d
|
[
"Apache-2.0"
] | 23
|
2020-10-27T07:51:32.000Z
|
2022-02-03T12:48:13.000Z
|
from collections import namedtuple
def struct(**kwargs):
return namedtuple('struct', ' '.join(kwargs.keys()))(**kwargs)
| 31
| 66
| 0.709677
| 14
| 124
| 6.285714
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112903
| 124
| 4
| 66
| 31
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
0abbe30351420dd9e6816c83052dfa419497f0eb
| 66,995
|
py
|
Python
|
muskit/layers/rnn/attentions.py
|
A-Quarter-Mile/Muskits
|
60d80727d2ec6b8ec405502d67796e8df319ea82
|
[
"Apache-2.0"
] | 74
|
2021-04-15T15:39:32.000Z
|
2022-03-23T03:34:25.000Z
|
muskit/layers/rnn/attentions.py
|
A-Quarter-Mile/Muskits
|
60d80727d2ec6b8ec405502d67796e8df319ea82
|
[
"Apache-2.0"
] | 33
|
2021-04-30T18:24:47.000Z
|
2022-03-29T13:27:40.000Z
|
muskit/layers/rnn/attentions.py
|
A-Quarter-Mile/Muskits
|
60d80727d2ec6b8ec405502d67796e8df319ea82
|
[
"Apache-2.0"
] | 24
|
2021-04-15T15:20:32.000Z
|
2022-03-19T04:03:25.000Z
|
"""Attention modules for RNN."""
import math
import six
import torch
import torch.nn.functional as F
from muskit.torch_utils.nets_utils import make_pad_mask
from muskit.torch_utils.nets_utils import to_device
def _apply_attention_constraint(
e, last_attended_idx, backward_window=1, forward_window=3
):
"""Apply monotonic attention constraint.
This function apply the monotonic attention constraint
introduced in `Deep Voice 3: Scaling
Text-to-Speech with Convolutional Sequence Learning`_.
Args:
e (Tensor): Attention energy before applying softmax (1, T).
last_attended_idx (int): The index of the inputs of the last attended [0, T].
backward_window (int, optional): Backward window size in attention constraint.
forward_window (int, optional): Forward window size in attetion constraint.
Returns:
Tensor: Monotonic constrained attention energy (1, T).
.. _`Deep Voice 3: Scaling Text-to-Speech with Convolutional Sequence Learning`:
https://arxiv.org/abs/1710.07654
"""
if e.size(0) != 1:
raise NotImplementedError("Batch attention constraining is not yet supported.")
backward_idx = last_attended_idx - backward_window
forward_idx = last_attended_idx + forward_window
if backward_idx > 0:
e[:, :backward_idx] = -float("inf")
if forward_idx < e.size(1):
e[:, forward_idx:] = -float("inf")
return e
class NoAtt(torch.nn.Module):
"""No attention"""
def __init__(self):
super(NoAtt, self).__init__()
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.c = None
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.c = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev):
"""NoAtt forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B, T_max, D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: dummy (does not use)
:param torch.Tensor att_prev: dummy (does not use)
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: previous attention weights
:rtype: torch.Tensor
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# initialize attention weight with uniform dist.
if att_prev is None:
# if no bias, 0 0-pad goes 0
mask = 1.0 - make_pad_mask(enc_hs_len).float()
att_prev = mask / mask.new(enc_hs_len).unsqueeze(-1)
att_prev = att_prev.to(self.enc_h)
self.c = torch.sum(
self.enc_h * att_prev.view(batch, self.h_length, 1), dim=1
)
return self.c, att_prev
class AttDot(torch.nn.Module):
"""Dot product attention
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_enc_h
"""
def __init__(self, eprojs, dunits, att_dim, han_mode=False):
super(AttDot, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev, scaling=2.0):
"""AttDot forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: dummy (does not use)
:param torch.Tensor att_prev: dummy (does not use)
:param float scaling: scaling parameter before applying softmax
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: previous attention weight (B x T_max)
:rtype: torch.Tensor
"""
batch = enc_hs_pad.size(0)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = torch.tanh(self.mlp_enc(self.enc_h))
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
e = torch.sum(
self.pre_compute_enc_h
* torch.tanh(self.mlp_dec(dec_z)).view(batch, 1, self.att_dim),
dim=2,
) # utt x frame
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w = F.softmax(scaling * e, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, w
class AttAdd(torch.nn.Module):
"""Additive attention
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_enc_h
"""
def __init__(self, eprojs, dunits, att_dim, han_mode=False):
super(AttAdd, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev, scaling=2.0):
"""AttAdd forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev: dummy (does not use)
:param float scaling: scaling parameter before applying softmax
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: previous attention weights (B x T_max)
:rtype: torch.Tensor
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(torch.tanh(self.pre_compute_enc_h + dec_z_tiled)).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w = F.softmax(scaling * e, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, w
class AttLoc(torch.nn.Module):
"""location-aware attention module.
Reference: Attention-Based Models for Speech Recognition
(https://arxiv.org/pdf/1506.07503.pdf)
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_enc_h
"""
def __init__(
self, eprojs, dunits, att_dim, aconv_chans, aconv_filts, han_mode=False
):
super(AttLoc, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.mlp_att = torch.nn.Linear(aconv_chans, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1,
aconv_chans,
(1, 2 * aconv_filts + 1),
padding=(0, aconv_filts),
bias=False,
)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(
self,
enc_hs_pad,
enc_hs_len,
dec_z,
att_prev,
scaling=2.0,
last_attended_idx=None,
backward_window=1,
forward_window=3,
):
"""Calcualte AttLoc forward propagation.
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev: previous attention weight (B x T_max)
:param float scaling: scaling parameter before applying softmax
:param torch.Tensor forward_window:
forward window size when constraining attention
:param int last_attended_idx: index of the inputs of the last attended
:param int backward_window: backward window size in attention constraint
:param int forward_window: forward window size in attetion constraint
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: previous attention weights (B x T_max)
:rtype: torch.Tensor
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
# initialize attention weight with uniform dist.
if att_prev is None:
# if no bias, 0 0-pad goes 0
att_prev = 1.0 - make_pad_mask(enc_hs_len).to(
device=dec_z.device, dtype=dec_z.dtype
)
att_prev = att_prev / att_prev.new(enc_hs_len).unsqueeze(-1)
# att_prev: utt x frame -> utt x 1 x 1 x frame
# -> utt x att_conv_chans x 1 x frame
att_conv = self.loc_conv(att_prev.view(batch, 1, 1, self.h_length))
# att_conv: utt x att_conv_chans x 1 x frame -> utt x frame x att_conv_chans
att_conv = att_conv.squeeze(2).transpose(1, 2)
# att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
att_conv = self.mlp_att(att_conv)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(
torch.tanh(att_conv + self.pre_compute_enc_h + dec_z_tiled)
).squeeze(2)
# NOTE: consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
# apply monotonic attention constraint (mainly for SVS)
if last_attended_idx is not None:
e = _apply_attention_constraint(
e, last_attended_idx, backward_window, forward_window
)
w = F.softmax(scaling * e, dim=1)
# weighted sum over flames
# utt x hdim
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, w
class AttCov(torch.nn.Module):
"""Coverage mechanism attention
Reference: Get To The Point: Summarization with Pointer-Generator Network
(https://arxiv.org/abs/1704.04368)
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_enc_h
"""
def __init__(self, eprojs, dunits, att_dim, han_mode=False):
super(AttCov, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.wvec = torch.nn.Linear(1, att_dim)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev_list, scaling=2.0):
"""AttCov forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param list att_prev_list: list of previous attention weight
:param float scaling: scaling parameter before applying softmax
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: list of previous attention weights
:rtype: list
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
# initialize attention weight with uniform dist.
if att_prev_list is None:
# if no bias, 0 0-pad goes 0
att_prev_list = to_device(
enc_hs_pad, (1.0 - make_pad_mask(enc_hs_len).float())
)
att_prev_list = [
att_prev_list / att_prev_list.new(enc_hs_len).unsqueeze(-1)
]
# att_prev_list: L' * [B x T] => cov_vec B x T
cov_vec = sum(att_prev_list)
# cov_vec: B x T => B x T x 1 => B x T x att_dim
cov_vec = self.wvec(cov_vec.unsqueeze(-1))
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(
torch.tanh(cov_vec + self.pre_compute_enc_h + dec_z_tiled)
).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w = F.softmax(scaling * e, dim=1)
att_prev_list += [w]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, att_prev_list
class AttLoc2D(torch.nn.Module):
"""2D location-aware attention
This attention is an extended version of location aware attention.
It take not only one frame before attention weights,
but also earlier frames into account.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
:param int att_win: attention window size (default=5)
:param bool han_mode:
flag to swith on mode of hierarchical attention and not store pre_compute_enc_h
"""
def __init__(
self, eprojs, dunits, att_dim, att_win, aconv_chans, aconv_filts, han_mode=False
):
super(AttLoc2D, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.mlp_att = torch.nn.Linear(aconv_chans, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1,
aconv_chans,
(att_win, 2 * aconv_filts + 1),
padding=(0, aconv_filts),
bias=False,
)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.aconv_chans = aconv_chans
self.att_win = att_win
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev, scaling=2.0):
"""AttLoc2D forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev: previous attention weight (B x att_win x T_max)
:param float scaling: scaling parameter before applying softmax
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: previous attention weights (B x att_win x T_max)
:rtype: torch.Tensor
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
# initialize attention weight with uniform dist.
if att_prev is None:
# B * [Li x att_win]
# if no bias, 0 0-pad goes 0
att_prev = to_device(enc_hs_pad, (1.0 - make_pad_mask(enc_hs_len).float()))
att_prev = att_prev / att_prev.new(enc_hs_len).unsqueeze(-1)
att_prev = att_prev.unsqueeze(1).expand(-1, self.att_win, -1)
# att_prev: B x att_win x Tmax -> B x 1 x att_win x Tmax -> B x C x 1 x Tmax
att_conv = self.loc_conv(att_prev.unsqueeze(1))
# att_conv: B x C x 1 x Tmax -> B x Tmax x C
att_conv = att_conv.squeeze(2).transpose(1, 2)
# att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
att_conv = self.mlp_att(att_conv)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(
torch.tanh(att_conv + self.pre_compute_enc_h + dec_z_tiled)
).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w = F.softmax(scaling * e, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
# update att_prev: B x att_win x Tmax -> B x att_win+1 x Tmax
# -> B x att_win x Tmax
att_prev = torch.cat([att_prev, w.unsqueeze(1)], dim=1)
att_prev = att_prev[:, 1:]
return c, att_prev
class AttLocRec(torch.nn.Module):
"""location-aware recurrent attention
This attention is an extended version of location aware attention.
With the use of RNN,
it take the effect of the history of attention weights into account.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
:param bool han_mode:
flag to swith on mode of hierarchical attention and not store pre_compute_enc_h
"""
def __init__(
self, eprojs, dunits, att_dim, aconv_chans, aconv_filts, han_mode=False
):
super(AttLocRec, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1,
aconv_chans,
(1, 2 * aconv_filts + 1),
padding=(0, aconv_filts),
bias=False,
)
self.att_lstm = torch.nn.LSTMCell(aconv_chans, att_dim, bias=False)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev_states, scaling=2.0):
"""AttLocRec forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param tuple att_prev_states: previous attention weight and lstm states
((B, T_max), ((B, att_dim), (B, att_dim)))
:param float scaling: scaling parameter before applying softmax
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: previous attention weights and lstm states (w, (hx, cx))
((B, T_max), ((B, att_dim), (B, att_dim)))
:rtype: tuple
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
if att_prev_states is None:
# initialize attention weight with uniform dist.
# if no bias, 0 0-pad goes 0
att_prev = to_device(enc_hs_pad, (1.0 - make_pad_mask(enc_hs_len).float()))
att_prev = att_prev / att_prev.new(enc_hs_len).unsqueeze(-1)
# initialize lstm states
att_h = enc_hs_pad.new_zeros(batch, self.att_dim)
att_c = enc_hs_pad.new_zeros(batch, self.att_dim)
att_states = (att_h, att_c)
else:
att_prev = att_prev_states[0]
att_states = att_prev_states[1]
# B x 1 x 1 x T -> B x C x 1 x T
att_conv = self.loc_conv(att_prev.view(batch, 1, 1, self.h_length))
# apply non-linear
att_conv = F.relu(att_conv)
# B x C x 1 x T -> B x C x 1 x 1 -> B x C
att_conv = F.max_pool2d(att_conv, (1, att_conv.size(3))).view(batch, -1)
att_h, att_c = self.att_lstm(att_conv, att_states)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(
torch.tanh(att_h.unsqueeze(1) + self.pre_compute_enc_h + dec_z_tiled)
).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w = F.softmax(scaling * e, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, (w, (att_h, att_c))
class AttCovLoc(torch.nn.Module):
"""Coverage mechanism location aware attention
This attention is a combination of coverage and location-aware attentions.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
:param bool han_mode:
flag to swith on mode of hierarchical attention and not store pre_compute_enc_h
"""
def __init__(
self, eprojs, dunits, att_dim, aconv_chans, aconv_filts, han_mode=False
):
super(AttCovLoc, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.mlp_att = torch.nn.Linear(aconv_chans, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1,
aconv_chans,
(1, 2 * aconv_filts + 1),
padding=(0, aconv_filts),
bias=False,
)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.aconv_chans = aconv_chans
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev_list, scaling=2.0):
"""AttCovLoc forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param list att_prev_list: list of previous attention weight
:param float scaling: scaling parameter before applying softmax
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: list of previous attention weights
:rtype: list
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
# initialize attention weight with uniform dist.
if att_prev_list is None:
# if no bias, 0 0-pad goes 0
mask = 1.0 - make_pad_mask(enc_hs_len).float()
att_prev_list = [
to_device(enc_hs_pad, mask / mask.new(enc_hs_len).unsqueeze(-1))
]
# att_prev_list: L' * [B x T] => cov_vec B x T
cov_vec = sum(att_prev_list)
# cov_vec: B x T -> B x 1 x 1 x T -> B x C x 1 x T
att_conv = self.loc_conv(cov_vec.view(batch, 1, 1, self.h_length))
# att_conv: utt x att_conv_chans x 1 x frame -> utt x frame x att_conv_chans
att_conv = att_conv.squeeze(2).transpose(1, 2)
# att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
att_conv = self.mlp_att(att_conv)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(
torch.tanh(att_conv + self.pre_compute_enc_h + dec_z_tiled)
).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w = F.softmax(scaling * e, dim=1)
att_prev_list += [w]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
return c, att_prev_list
class AttMultiHeadDot(torch.nn.Module):
"""Multi head dot product attention
Reference: Attention is all you need
(https://arxiv.org/abs/1706.03762)
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int aheads: # heads of multi head attention
:param int att_dim_k: dimension k in multi head attention
:param int att_dim_v: dimension v in multi head attention
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_k and pre_compute_v
"""
def __init__(self, eprojs, dunits, aheads, att_dim_k, att_dim_v, han_mode=False):
super(AttMultiHeadDot, self).__init__()
self.mlp_q = torch.nn.ModuleList()
self.mlp_k = torch.nn.ModuleList()
self.mlp_v = torch.nn.ModuleList()
for _ in six.moves.range(aheads):
self.mlp_q += [torch.nn.Linear(dunits, att_dim_k)]
self.mlp_k += [torch.nn.Linear(eprojs, att_dim_k, bias=False)]
self.mlp_v += [torch.nn.Linear(eprojs, att_dim_v, bias=False)]
self.mlp_o = torch.nn.Linear(aheads * att_dim_v, eprojs, bias=False)
self.dunits = dunits
self.eprojs = eprojs
self.aheads = aheads
self.att_dim_k = att_dim_k
self.att_dim_v = att_dim_v
self.scaling = 1.0 / math.sqrt(att_dim_k)
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev):
"""AttMultiHeadDot forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev: dummy (does not use)
:return: attention weighted encoder state (B x D_enc)
:rtype: torch.Tensor
:return: list of previous attention weight (B x T_max) * aheads
:rtype: list
"""
batch = enc_hs_pad.size(0)
# pre-compute all k and v outside the decoder loop
if self.pre_compute_k is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_k = [
torch.tanh(self.mlp_k[h](self.enc_h))
for h in six.moves.range(self.aheads)
]
if self.pre_compute_v is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_v = [
self.mlp_v[h](self.enc_h) for h in six.moves.range(self.aheads)
]
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
c = []
w = []
for h in six.moves.range(self.aheads):
e = torch.sum(
self.pre_compute_k[h]
* torch.tanh(self.mlp_q[h](dec_z)).view(batch, 1, self.att_dim_k),
dim=2,
) # utt x frame
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w += [F.softmax(self.scaling * e, dim=1)]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c += [
torch.sum(
self.pre_compute_v[h] * w[h].view(batch, self.h_length, 1), dim=1
)
]
# concat all of c
c = self.mlp_o(torch.cat(c, dim=1))
return c, w
class AttMultiHeadAdd(torch.nn.Module):
"""Multi head additive attention
Reference: Attention is all you need
(https://arxiv.org/abs/1706.03762)
This attention is multi head attention using additive attention for each head.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int aheads: # heads of multi head attention
:param int att_dim_k: dimension k in multi head attention
:param int att_dim_v: dimension v in multi head attention
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_k and pre_compute_v
"""
def __init__(self, eprojs, dunits, aheads, att_dim_k, att_dim_v, han_mode=False):
super(AttMultiHeadAdd, self).__init__()
self.mlp_q = torch.nn.ModuleList()
self.mlp_k = torch.nn.ModuleList()
self.mlp_v = torch.nn.ModuleList()
self.gvec = torch.nn.ModuleList()
for _ in six.moves.range(aheads):
self.mlp_q += [torch.nn.Linear(dunits, att_dim_k)]
self.mlp_k += [torch.nn.Linear(eprojs, att_dim_k, bias=False)]
self.mlp_v += [torch.nn.Linear(eprojs, att_dim_v, bias=False)]
self.gvec += [torch.nn.Linear(att_dim_k, 1)]
self.mlp_o = torch.nn.Linear(aheads * att_dim_v, eprojs, bias=False)
self.dunits = dunits
self.eprojs = eprojs
self.aheads = aheads
self.att_dim_k = att_dim_k
self.att_dim_v = att_dim_v
self.scaling = 1.0 / math.sqrt(att_dim_k)
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev):
"""AttMultiHeadAdd forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev: dummy (does not use)
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: list of previous attention weight (B x T_max) * aheads
:rtype: list
"""
batch = enc_hs_pad.size(0)
# pre-compute all k and v outside the decoder loop
if self.pre_compute_k is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_k = [
self.mlp_k[h](self.enc_h) for h in six.moves.range(self.aheads)
]
if self.pre_compute_v is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_v = [
self.mlp_v[h](self.enc_h) for h in six.moves.range(self.aheads)
]
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
c = []
w = []
for h in six.moves.range(self.aheads):
e = self.gvec[h](
torch.tanh(
self.pre_compute_k[h]
+ self.mlp_q[h](dec_z).view(batch, 1, self.att_dim_k)
)
).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w += [F.softmax(self.scaling * e, dim=1)]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c += [
torch.sum(
self.pre_compute_v[h] * w[h].view(batch, self.h_length, 1), dim=1
)
]
# concat all of c
c = self.mlp_o(torch.cat(c, dim=1))
return c, w
class AttMultiHeadLoc(torch.nn.Module):
"""Multi head location based attention
Reference: Attention is all you need
(https://arxiv.org/abs/1706.03762)
This attention is multi head attention using location-aware attention for each head.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int aheads: # heads of multi head attention
:param int att_dim_k: dimension k in multi head attention
:param int att_dim_v: dimension v in multi head attention
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_k and pre_compute_v
"""
def __init__(
self,
eprojs,
dunits,
aheads,
att_dim_k,
att_dim_v,
aconv_chans,
aconv_filts,
han_mode=False,
):
super(AttMultiHeadLoc, self).__init__()
self.mlp_q = torch.nn.ModuleList()
self.mlp_k = torch.nn.ModuleList()
self.mlp_v = torch.nn.ModuleList()
self.gvec = torch.nn.ModuleList()
self.loc_conv = torch.nn.ModuleList()
self.mlp_att = torch.nn.ModuleList()
for _ in six.moves.range(aheads):
self.mlp_q += [torch.nn.Linear(dunits, att_dim_k)]
self.mlp_k += [torch.nn.Linear(eprojs, att_dim_k, bias=False)]
self.mlp_v += [torch.nn.Linear(eprojs, att_dim_v, bias=False)]
self.gvec += [torch.nn.Linear(att_dim_k, 1)]
self.loc_conv += [
torch.nn.Conv2d(
1,
aconv_chans,
(1, 2 * aconv_filts + 1),
padding=(0, aconv_filts),
bias=False,
)
]
self.mlp_att += [torch.nn.Linear(aconv_chans, att_dim_k, bias=False)]
self.mlp_o = torch.nn.Linear(aheads * att_dim_v, eprojs, bias=False)
self.dunits = dunits
self.eprojs = eprojs
self.aheads = aheads
self.att_dim_k = att_dim_k
self.att_dim_v = att_dim_v
self.scaling = 1.0 / math.sqrt(att_dim_k)
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev, scaling=2.0):
"""AttMultiHeadLoc forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev:
list of previous attention weight (B x T_max) * aheads
:param float scaling: scaling parameter before applying softmax
:return: attention weighted encoder state (B x D_enc)
:rtype: torch.Tensor
:return: list of previous attention weight (B x T_max) * aheads
:rtype: list
"""
batch = enc_hs_pad.size(0)
# pre-compute all k and v outside the decoder loop
if self.pre_compute_k is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_k = [
self.mlp_k[h](self.enc_h) for h in six.moves.range(self.aheads)
]
if self.pre_compute_v is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_v = [
self.mlp_v[h](self.enc_h) for h in six.moves.range(self.aheads)
]
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
if att_prev is None:
att_prev = []
for _ in six.moves.range(self.aheads):
# if no bias, 0 0-pad goes 0
mask = 1.0 - make_pad_mask(enc_hs_len).float()
att_prev += [
to_device(enc_hs_pad, mask / mask.new(enc_hs_len).unsqueeze(-1))
]
c = []
w = []
for h in six.moves.range(self.aheads):
att_conv = self.loc_conv[h](att_prev[h].view(batch, 1, 1, self.h_length))
att_conv = att_conv.squeeze(2).transpose(1, 2)
att_conv = self.mlp_att[h](att_conv)
e = self.gvec[h](
torch.tanh(
self.pre_compute_k[h]
+ att_conv
+ self.mlp_q[h](dec_z).view(batch, 1, self.att_dim_k)
)
).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w += [F.softmax(scaling * e, dim=1)]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c += [
torch.sum(
self.pre_compute_v[h] * w[h].view(batch, self.h_length, 1), dim=1
)
]
# concat all of c
c = self.mlp_o(torch.cat(c, dim=1))
return c, w
class AttMultiHeadMultiResLoc(torch.nn.Module):
"""Multi head multi resolution location based attention
Reference: Attention is all you need
(https://arxiv.org/abs/1706.03762)
This attention is multi head attention using location-aware attention for each head.
Furthermore, it uses different filter size for each head.
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int aheads: # heads of multi head attention
:param int att_dim_k: dimension k in multi head attention
:param int att_dim_v: dimension v in multi head attention
:param int aconv_chans: maximum # channels of attention convolution
each head use #ch = aconv_chans * (head + 1) / aheads
e.g. aheads=4, aconv_chans=100 => filter size = 25, 50, 75, 100
:param int aconv_filts: filter size of attention convolution
:param bool han_mode: flag to swith on mode of hierarchical attention
and not store pre_compute_k and pre_compute_v
"""
def __init__(
self,
eprojs,
dunits,
aheads,
att_dim_k,
att_dim_v,
aconv_chans,
aconv_filts,
han_mode=False,
):
super(AttMultiHeadMultiResLoc, self).__init__()
self.mlp_q = torch.nn.ModuleList()
self.mlp_k = torch.nn.ModuleList()
self.mlp_v = torch.nn.ModuleList()
self.gvec = torch.nn.ModuleList()
self.loc_conv = torch.nn.ModuleList()
self.mlp_att = torch.nn.ModuleList()
for h in six.moves.range(aheads):
self.mlp_q += [torch.nn.Linear(dunits, att_dim_k)]
self.mlp_k += [torch.nn.Linear(eprojs, att_dim_k, bias=False)]
self.mlp_v += [torch.nn.Linear(eprojs, att_dim_v, bias=False)]
self.gvec += [torch.nn.Linear(att_dim_k, 1)]
afilts = aconv_filts * (h + 1) // aheads
self.loc_conv += [
torch.nn.Conv2d(
1, aconv_chans, (1, 2 * afilts + 1), padding=(0, afilts), bias=False
)
]
self.mlp_att += [torch.nn.Linear(aconv_chans, att_dim_k, bias=False)]
self.mlp_o = torch.nn.Linear(aheads * att_dim_v, eprojs, bias=False)
self.dunits = dunits
self.eprojs = eprojs
self.aheads = aheads
self.att_dim_k = att_dim_k
self.att_dim_v = att_dim_v
self.scaling = 1.0 / math.sqrt(att_dim_k)
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
self.mask = None
self.han_mode = han_mode
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_k = None
self.pre_compute_v = None
self.mask = None
def forward(self, enc_hs_pad, enc_hs_len, dec_z, att_prev):
"""AttMultiHeadMultiResLoc forward
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev: list of previous attention weight
(B x T_max) * aheads
:return: attention weighted encoder state (B x D_enc)
:rtype: torch.Tensor
:return: list of previous attention weight (B x T_max) * aheads
:rtype: list
"""
batch = enc_hs_pad.size(0)
# pre-compute all k and v outside the decoder loop
if self.pre_compute_k is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_k = [
self.mlp_k[h](self.enc_h) for h in six.moves.range(self.aheads)
]
if self.pre_compute_v is None or self.han_mode:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_v = [
self.mlp_v[h](self.enc_h) for h in six.moves.range(self.aheads)
]
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
if att_prev is None:
att_prev = []
for _ in six.moves.range(self.aheads):
# if no bias, 0 0-pad goes 0
mask = 1.0 - make_pad_mask(enc_hs_len).float()
att_prev += [
to_device(enc_hs_pad, mask / mask.new(enc_hs_len).unsqueeze(-1))
]
c = []
w = []
for h in six.moves.range(self.aheads):
att_conv = self.loc_conv[h](att_prev[h].view(batch, 1, 1, self.h_length))
att_conv = att_conv.squeeze(2).transpose(1, 2)
att_conv = self.mlp_att[h](att_conv)
e = self.gvec[h](
torch.tanh(
self.pre_compute_k[h]
+ att_conv
+ self.mlp_q[h](dec_z).view(batch, 1, self.att_dim_k)
)
).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
w += [F.softmax(self.scaling * e, dim=1)]
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c += [
torch.sum(
self.pre_compute_v[h] * w[h].view(batch, self.h_length, 1), dim=1
)
]
# concat all of c
c = self.mlp_o(torch.cat(c, dim=1))
return c, w
class AttForward(torch.nn.Module):
"""Forward attention module.
Reference:
Forward attention in sequence-to-sequence acoustic modeling for speech synthesis
(https://arxiv.org/pdf/1807.06736.pdf)
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
"""
def __init__(self, eprojs, dunits, att_dim, aconv_chans, aconv_filts):
super(AttForward, self).__init__()
self.mlp_enc = torch.nn.Linear(eprojs, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.mlp_att = torch.nn.Linear(aconv_chans, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1,
aconv_chans,
(1, 2 * aconv_filts + 1),
padding=(0, aconv_filts),
bias=False,
)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eprojs = eprojs
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def reset(self):
"""reset states"""
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
def forward(
self,
enc_hs_pad,
enc_hs_len,
dec_z,
att_prev,
scaling=1.0,
last_attended_idx=None,
backward_window=1,
forward_window=3,
):
"""Calculate AttForward forward propagation.
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B x T_max x D_enc)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B x D_dec)
:param torch.Tensor att_prev: attention weights of previous step
:param float scaling: scaling parameter before applying softmax
:param int last_attended_idx: index of the inputs of the last attended
:param int backward_window: backward window size in attention constraint
:param int forward_window: forward window size in attetion constraint
:return: attention weighted encoder state (B, D_enc)
:rtype: torch.Tensor
:return: previous attention weights (B x T_max)
:rtype: torch.Tensor
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
if att_prev is None:
# initial attention will be [1, 0, 0, ...]
att_prev = enc_hs_pad.new_zeros(*enc_hs_pad.size()[:2])
att_prev[:, 0] = 1.0
# att_prev: utt x frame -> utt x 1 x 1 x frame
# -> utt x att_conv_chans x 1 x frame
att_conv = self.loc_conv(att_prev.view(batch, 1, 1, self.h_length))
# att_conv: utt x att_conv_chans x 1 x frame -> utt x frame x att_conv_chans
att_conv = att_conv.squeeze(2).transpose(1, 2)
# att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
att_conv = self.mlp_att(att_conv)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).unsqueeze(1)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(
torch.tanh(self.pre_compute_enc_h + dec_z_tiled + att_conv)
).squeeze(2)
# NOTE: consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
# apply monotonic attention constraint (mainly for TTS)
if last_attended_idx is not None:
e = _apply_attention_constraint(
e, last_attended_idx, backward_window, forward_window
)
w = F.softmax(scaling * e, dim=1)
# forward attention
att_prev_shift = F.pad(att_prev, (1, 0))[:, :-1]
w = (att_prev + att_prev_shift) * w
# NOTE: clamp is needed to avoid nan gradient
w = F.normalize(torch.clamp(w, 1e-6), p=1, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.unsqueeze(-1), dim=1)
return c, w
class AttForwardTA(torch.nn.Module):
"""Forward attention with transition agent module.
Reference:
Forward attention in sequence-to-sequence acoustic modeling for speech synthesis
(https://arxiv.org/pdf/1807.06736.pdf)
:param int eunits: # units of encoder
:param int dunits: # units of decoder
:param int att_dim: attention dimension
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
:param int odim: output dimension
"""
def __init__(self, eunits, dunits, att_dim, aconv_chans, aconv_filts, odim):
super(AttForwardTA, self).__init__()
self.mlp_enc = torch.nn.Linear(eunits, att_dim)
self.mlp_dec = torch.nn.Linear(dunits, att_dim, bias=False)
self.mlp_ta = torch.nn.Linear(eunits + dunits + odim, 1)
self.mlp_att = torch.nn.Linear(aconv_chans, att_dim, bias=False)
self.loc_conv = torch.nn.Conv2d(
1,
aconv_chans,
(1, 2 * aconv_filts + 1),
padding=(0, aconv_filts),
bias=False,
)
self.gvec = torch.nn.Linear(att_dim, 1)
self.dunits = dunits
self.eunits = eunits
self.att_dim = att_dim
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
self.trans_agent_prob = 0.5
def reset(self):
self.h_length = None
self.enc_h = None
self.pre_compute_enc_h = None
self.mask = None
self.trans_agent_prob = 0.5
def forward(
self,
enc_hs_pad,
enc_hs_len,
dec_z,
att_prev,
out_prev,
scaling=1.0,
last_attended_idx=None,
backward_window=1,
forward_window=3,
):
"""Calculate AttForwardTA forward propagation.
:param torch.Tensor enc_hs_pad: padded encoder hidden state (B, Tmax, eunits)
:param list enc_hs_len: padded encoder hidden state length (B)
:param torch.Tensor dec_z: decoder hidden state (B, dunits)
:param torch.Tensor att_prev: attention weights of previous step
:param torch.Tensor out_prev: decoder outputs of previous step (B, odim)
:param float scaling: scaling parameter before applying softmax
:param int last_attended_idx: index of the inputs of the last attended
:param int backward_window: backward window size in attention constraint
:param int forward_window: forward window size in attetion constraint
:return: attention weighted encoder state (B, dunits)
:rtype: torch.Tensor
:return: previous attention weights (B, Tmax)
:rtype: torch.Tensor
"""
batch = len(enc_hs_pad)
# pre-compute all h outside the decoder loop
if self.pre_compute_enc_h is None:
self.enc_h = enc_hs_pad # utt x frame x hdim
self.h_length = self.enc_h.size(1)
# utt x frame x att_dim
self.pre_compute_enc_h = self.mlp_enc(self.enc_h)
if dec_z is None:
dec_z = enc_hs_pad.new_zeros(batch, self.dunits)
else:
dec_z = dec_z.view(batch, self.dunits)
if att_prev is None:
# initial attention will be [1, 0, 0, ...]
att_prev = enc_hs_pad.new_zeros(*enc_hs_pad.size()[:2])
att_prev[:, 0] = 1.0
# att_prev: utt x frame -> utt x 1 x 1 x frame
# -> utt x att_conv_chans x 1 x frame
att_conv = self.loc_conv(att_prev.view(batch, 1, 1, self.h_length))
# att_conv: utt x att_conv_chans x 1 x frame -> utt x frame x att_conv_chans
att_conv = att_conv.squeeze(2).transpose(1, 2)
# att_conv: utt x frame x att_conv_chans -> utt x frame x att_dim
att_conv = self.mlp_att(att_conv)
# dec_z_tiled: utt x frame x att_dim
dec_z_tiled = self.mlp_dec(dec_z).view(batch, 1, self.att_dim)
# dot with gvec
# utt x frame x att_dim -> utt x frame
e = self.gvec(
torch.tanh(att_conv + self.pre_compute_enc_h + dec_z_tiled)
).squeeze(2)
# NOTE consider zero padding when compute w.
if self.mask is None:
self.mask = to_device(enc_hs_pad, make_pad_mask(enc_hs_len))
e.masked_fill_(self.mask, -float("inf"))
# apply monotonic attention constraint (mainly for SVS)
if last_attended_idx is not None:
e = _apply_attention_constraint(
e, last_attended_idx, backward_window, forward_window
)
w = F.softmax(scaling * e, dim=1)
# forward attention
att_prev_shift = F.pad(att_prev, (1, 0))[:, :-1]
w = (
self.trans_agent_prob * att_prev
+ (1 - self.trans_agent_prob) * att_prev_shift
) * w
# NOTE: clamp is needed to avoid nan gradient
w = F.normalize(torch.clamp(w, 1e-6), p=1, dim=1)
# weighted sum over flames
# utt x hdim
# NOTE use bmm instead of sum(*)
c = torch.sum(self.enc_h * w.view(batch, self.h_length, 1), dim=1)
# update transition agent prob
self.trans_agent_prob = torch.sigmoid(
self.mlp_ta(torch.cat([c, out_prev, dec_z], dim=1))
)
return c, w
def att_for(args, num_att=1, han_mode=False):
"""Instantiates an attention module given the program arguments
:param Namespace args: The arguments
:param int num_att: number of attention modules
(in multi-speaker case, it can be 2 or more)
:param bool han_mode: switch on/off mode of hierarchical attention network (HAN)
:rtype torch.nn.Module
:return: The attention module
"""
att_list = torch.nn.ModuleList()
num_encs = getattr(args, "num_encs", 1) # use getattr to keep compatibility
aheads = getattr(args, "aheads", None)
awin = getattr(args, "awin", None)
aconv_chans = getattr(args, "aconv_chans", None)
aconv_filts = getattr(args, "aconv_filts", None)
if num_encs == 1:
for i in range(num_att):
att = initial_att(
args.atype,
args.eprojs,
args.dunits,
aheads,
args.adim,
awin,
aconv_chans,
aconv_filts,
)
att_list.append(att)
elif num_encs > 1: # no multi-speaker mode
if han_mode:
att = initial_att(
args.han_type,
args.eprojs,
args.dunits,
args.han_heads,
args.han_dim,
args.han_win,
args.han_conv_chans,
args.han_conv_filts,
han_mode=True,
)
return att
else:
att_list = torch.nn.ModuleList()
for idx in range(num_encs):
att = initial_att(
args.atype[idx],
args.eprojs,
args.dunits,
aheads[idx],
args.adim[idx],
awin[idx],
aconv_chans[idx],
aconv_filts[idx],
)
att_list.append(att)
else:
raise ValueError(
"Number of encoders needs to be more than one. {}".format(num_encs)
)
return att_list
def initial_att(
atype, eprojs, dunits, aheads, adim, awin, aconv_chans, aconv_filts, han_mode=False
):
"""Instantiates a single attention module
:param str atype: attention type
:param int eprojs: # projection-units of encoder
:param int dunits: # units of decoder
:param int aheads: # heads of multi head attention
:param int adim: attention dimension
:param int awin: attention window size
:param int aconv_chans: # channels of attention convolution
:param int aconv_filts: filter size of attention convolution
:param bool han_mode: flag to swith on mode of hierarchical attention
:return: The attention module
"""
if atype == "noatt":
att = NoAtt()
elif atype == "dot":
att = AttDot(eprojs, dunits, adim, han_mode)
elif atype == "add":
att = AttAdd(eprojs, dunits, adim, han_mode)
elif atype == "location":
att = AttLoc(eprojs, dunits, adim, aconv_chans, aconv_filts, han_mode)
elif atype == "location2d":
att = AttLoc2D(eprojs, dunits, adim, awin, aconv_chans, aconv_filts, han_mode)
elif atype == "location_recurrent":
att = AttLocRec(eprojs, dunits, adim, aconv_chans, aconv_filts, han_mode)
elif atype == "coverage":
att = AttCov(eprojs, dunits, adim, han_mode)
elif atype == "coverage_location":
att = AttCovLoc(eprojs, dunits, adim, aconv_chans, aconv_filts, han_mode)
elif atype == "multi_head_dot":
att = AttMultiHeadDot(eprojs, dunits, aheads, adim, adim, han_mode)
elif atype == "multi_head_add":
att = AttMultiHeadAdd(eprojs, dunits, aheads, adim, adim, han_mode)
elif atype == "multi_head_loc":
att = AttMultiHeadLoc(
eprojs, dunits, aheads, adim, adim, aconv_chans, aconv_filts, han_mode
)
elif atype == "multi_head_multi_res_loc":
att = AttMultiHeadMultiResLoc(
eprojs, dunits, aheads, adim, adim, aconv_chans, aconv_filts, han_mode
)
return att
def att_to_numpy(att_ws, att):
"""Converts attention weights to a numpy array given the attention
:param list att_ws: The attention weights
:param torch.nn.Module att: The attention
:rtype: np.ndarray
:return: The numpy array of the attention weights
"""
# convert to numpy array with the shape (B, Lmax, Tmax)
if isinstance(att, AttLoc2D):
# att_ws => list of previous concate attentions
att_ws = torch.stack([aw[:, -1] for aw in att_ws], dim=1).cpu().numpy()
elif isinstance(att, (AttCov, AttCovLoc)):
# att_ws => list of list of previous attentions
att_ws = (
torch.stack([aw[idx] for idx, aw in enumerate(att_ws)], dim=1).cpu().numpy()
)
elif isinstance(att, AttLocRec):
# att_ws => list of tuple of attention and hidden states
att_ws = torch.stack([aw[0] for aw in att_ws], dim=1).cpu().numpy()
elif isinstance(
att,
(AttMultiHeadDot, AttMultiHeadAdd, AttMultiHeadLoc, AttMultiHeadMultiResLoc),
):
# att_ws => list of list of each head attention
n_heads = len(att_ws[0])
att_ws_sorted_by_head = []
for h in six.moves.range(n_heads):
att_ws_head = torch.stack([aw[h] for aw in att_ws], dim=1)
att_ws_sorted_by_head += [att_ws_head]
att_ws = torch.stack(att_ws_sorted_by_head, dim=1).cpu().numpy()
else:
# att_ws => list of attentions
att_ws = torch.stack(att_ws, dim=1).cpu().numpy()
return att_ws
| 37.034273
| 88
| 0.602418
| 9,843
| 66,995
| 3.884182
| 0.039114
| 0.028719
| 0.020506
| 0.017001
| 0.880048
| 0.863936
| 0.850806
| 0.841494
| 0.833726
| 0.828076
| 0
| 0.010244
| 0.306426
| 66,995
| 1,808
| 89
| 37.054757
| 0.812551
| 0.332055
| 0
| 0.769231
| 0
| 0
| 0.007568
| 0.000566
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045365
| false
| 0
| 0.005917
| 0
| 0.083826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0ac58e94f9288ff934ad1dd08405c74834fc0bdc
| 23,407
|
py
|
Python
|
tests/test_client_ws_functional.py
|
Qix-/aiohttp
|
aee067dccad3dc0e79778a1b213105f20bf39baf
|
[
"Apache-2.0"
] | 1
|
2021-04-08T19:17:09.000Z
|
2021-04-08T19:17:09.000Z
|
tests/test_client_ws_functional.py
|
Qix-/aiohttp
|
aee067dccad3dc0e79778a1b213105f20bf39baf
|
[
"Apache-2.0"
] | null | null | null |
tests/test_client_ws_functional.py
|
Qix-/aiohttp
|
aee067dccad3dc0e79778a1b213105f20bf39baf
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import async_timeout
import pytest
import aiohttp
from aiohttp import hdrs, web
from aiohttp.client_ws import ClientWSTimeout
@pytest.fixture
def ceil(mocker):
def ceil(val):
return val
mocker.patch('aiohttp.helpers.ceil').side_effect = ceil
async def test_send_recv_text(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive_str()
await ws.send_str(msg+'/answer')
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/')
await resp.send_str('ask')
assert resp.get_extra_info('socket') is not None
data = await resp.receive_str()
assert data == 'ask/answer'
await resp.close()
assert resp.get_extra_info('socket') is None
async def test_send_recv_bytes_bad_type(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive_str()
await ws.send_str(msg+'/answer')
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/')
await resp.send_str('ask')
with pytest.raises(TypeError):
await resp.receive_bytes()
await resp.close()
async def test_send_recv_bytes(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive_bytes()
await ws.send_bytes(msg+b'/answer')
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/')
await resp.send_bytes(b'ask')
data = await resp.receive_bytes()
assert data == b'ask/answer'
await resp.close()
async def test_send_recv_text_bad_type(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive_bytes()
await ws.send_bytes(msg+b'/answer')
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/')
await resp.send_bytes(b'ask')
with pytest.raises(TypeError):
await resp.receive_str()
await resp.close()
async def test_send_recv_json(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
data = await ws.receive_json()
await ws.send_json({'response': data['request']})
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/')
payload = {'request': 'test'}
await resp.send_json(payload)
data = await resp.receive_json()
assert data['response'] == payload['request']
await resp.close()
async def test_ping_pong(aiohttp_client) -> None:
loop = asyncio.get_event_loop()
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive_bytes()
await ws.ping()
await ws.send_bytes(msg+b'/answer')
try:
await ws.close()
finally:
closed.set_result(1)
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/')
await resp.ping()
await resp.send_bytes(b'ask')
msg = await resp.receive()
assert msg.type == aiohttp.WSMsgType.BINARY
assert msg.data == b'ask/answer'
msg = await resp.receive()
assert msg.type == aiohttp.WSMsgType.CLOSE
await resp.close()
await closed
async def test_ping_pong_manual(aiohttp_client) -> None:
loop = asyncio.get_event_loop()
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive_bytes()
await ws.ping()
await ws.send_bytes(msg+b'/answer')
try:
await ws.close()
finally:
closed.set_result(1)
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/', autoping=False)
await resp.ping()
await resp.send_bytes(b'ask')
msg = await resp.receive()
assert msg.type == aiohttp.WSMsgType.PONG
msg = await resp.receive()
assert msg.type == aiohttp.WSMsgType.PING
await resp.pong()
msg = await resp.receive()
assert msg.data == b'ask/answer'
msg = await resp.receive()
assert msg.type == aiohttp.WSMsgType.CLOSE
await closed
async def test_close(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
await ws.receive_bytes()
await ws.send_str('test')
await ws.receive()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/')
await resp.send_bytes(b'ask')
closed = await resp.close()
assert closed
assert resp.closed
assert resp.close_code == 1000
msg = await resp.receive()
assert msg.type == aiohttp.WSMsgType.CLOSED
async def test_concurrent_close(aiohttp_client) -> None:
client_ws = None
async def handler(request):
nonlocal client_ws
ws = web.WebSocketResponse()
await ws.prepare(request)
await ws.receive_bytes()
await ws.send_str('test')
await client_ws.close()
msg = await ws.receive()
assert msg.type == aiohttp.WSMsgType.CLOSE
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
ws = client_ws = await client.ws_connect('/')
await ws.send_bytes(b'ask')
msg = await ws.receive()
assert msg.type == aiohttp.WSMsgType.CLOSING
await asyncio.sleep(0.01)
msg = await ws.receive()
assert msg.type == aiohttp.WSMsgType.CLOSED
async def test_close_from_server(aiohttp_client) -> None:
loop = asyncio.get_event_loop()
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
try:
await ws.receive_bytes()
await ws.close()
finally:
closed.set_result(1)
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/')
await resp.send_bytes(b'ask')
msg = await resp.receive()
assert msg.type == aiohttp.WSMsgType.CLOSE
assert resp.closed
msg = await resp.receive()
assert msg.type == aiohttp.WSMsgType.CLOSED
await closed
async def test_close_manual(aiohttp_client) -> None:
loop = asyncio.get_event_loop()
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
await ws.receive_bytes()
await ws.send_str('test')
try:
await ws.close()
finally:
closed.set_result(1)
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/', autoclose=False)
await resp.send_bytes(b'ask')
msg = await resp.receive()
assert msg.data == 'test'
msg = await resp.receive()
assert msg.type == aiohttp.WSMsgType.CLOSE
assert msg.data == 1000
assert msg.extra == ''
assert not resp.closed
await resp.close()
await closed
assert resp.closed
async def test_close_timeout_sock_close_read(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
await ws.receive_bytes()
await ws.send_str('test')
await asyncio.sleep(1)
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
timeout = ClientWSTimeout(ws_close=0.2)
resp = await client.ws_connect('/', timeout=timeout, autoclose=False)
await resp.send_bytes(b'ask')
msg = await resp.receive()
assert msg.data == 'test'
assert msg.type == aiohttp.WSMsgType.TEXT
msg = await resp.close()
assert resp.closed
assert isinstance(resp.exception(), asyncio.TimeoutError)
async def test_close_timeout_deprecated(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
await ws.receive_bytes()
await ws.send_str('test')
await asyncio.sleep(1)
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
with pytest.warns(DeprecationWarning,
match="parameter 'timeout' of type 'float' "
"is deprecated, please use "
r"'timeout=ClientWSTimeout\(ws_close=...\)'"
):
resp = await client.ws_connect('/', timeout=0.2, autoclose=False)
await resp.send_bytes(b'ask')
msg = await resp.receive()
assert msg.data == 'test'
assert msg.type == aiohttp.WSMsgType.TEXT
msg = await resp.close()
assert resp.closed
assert isinstance(resp.exception(), asyncio.TimeoutError)
async def test_close_cancel(aiohttp_client) -> None:
loop = asyncio.get_event_loop()
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
await ws.receive_bytes()
await ws.send_str('test')
await asyncio.sleep(10)
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/', autoclose=False)
await resp.send_bytes(b'ask')
text = await resp.receive()
assert text.data == 'test'
t = loop.create_task(resp.close())
await asyncio.sleep(0.1)
t.cancel()
await asyncio.sleep(0.1)
assert resp.closed
assert resp.exception() is None
async def test_override_default_headers(aiohttp_client) -> None:
async def handler(request):
assert request.headers[hdrs.SEC_WEBSOCKET_VERSION] == '8'
ws = web.WebSocketResponse()
await ws.prepare(request)
await ws.send_str('answer')
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
headers = {hdrs.SEC_WEBSOCKET_VERSION: '8'}
client = await aiohttp_client(app)
resp = await client.ws_connect('/', headers=headers)
msg = await resp.receive()
assert msg.data == 'answer'
await resp.close()
async def test_additional_headers(aiohttp_client) -> None:
async def handler(request):
assert request.headers['x-hdr'] == 'xtra'
ws = web.WebSocketResponse()
await ws.prepare(request)
await ws.send_str('answer')
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/', headers={'x-hdr': 'xtra'})
msg = await resp.receive()
assert msg.data == 'answer'
await resp.close()
async def test_recv_protocol_error(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
await ws.receive_str()
ws._writer.transport.write(b'01234' * 100)
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/')
await resp.send_str('ask')
msg = await resp.receive()
assert msg.type == aiohttp.WSMsgType.ERROR
assert type(msg.data) is aiohttp.WebSocketError
assert msg.data.code == aiohttp.WSCloseCode.PROTOCOL_ERROR
assert str(msg.data) == 'Received frame with non-zero reserved bits'
assert msg.extra is None
await resp.close()
async def test_recv_timeout(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
await ws.receive_str()
await asyncio.sleep(0.1)
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/')
await resp.send_str('ask')
with pytest.raises(asyncio.TimeoutError):
with async_timeout.timeout(0.01):
await resp.receive()
await resp.close()
async def test_receive_timeout_sock_read(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
await ws.receive()
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
receive_timeout = ClientWSTimeout(ws_receive=0.1)
resp = await client.ws_connect('/', timeout=receive_timeout)
with pytest.raises(asyncio.TimeoutError):
await resp.receive(timeout=0.05)
await resp.close()
async def test_receive_timeout_deprecation(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
await ws.receive()
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
with pytest.warns(
DeprecationWarning,
match="float parameter 'receive_timeout' "
"is deprecated, please use parameter "
r"'timeout=ClientWSTimeout\(ws_receive=...\)'"
):
resp = await client.ws_connect('/', receive_timeout=0.1)
with pytest.raises(asyncio.TimeoutError):
await resp.receive(timeout=0.05)
await resp.close()
async def test_custom_receive_timeout(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
await ws.receive()
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/')
with pytest.raises(asyncio.TimeoutError):
await resp.receive(0.05)
await resp.close()
async def test_heartbeat(aiohttp_client, ceil) -> None:
ping_received = False
async def handler(request):
nonlocal ping_received
ws = web.WebSocketResponse(autoping=False)
await ws.prepare(request)
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.PING:
ping_received = True
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/', heartbeat=0.01)
await asyncio.sleep(0.1)
await resp.receive()
await resp.close()
assert ping_received
async def test_heartbeat_no_pong(aiohttp_client, ceil) -> None:
ping_received = False
async def handler(request):
nonlocal ping_received
ws = web.WebSocketResponse(autoping=False)
await ws.prepare(request)
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.PING:
ping_received = True
await ws.receive()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/', heartbeat=0.05)
await resp.receive()
await resp.receive()
assert ping_received
async def test_send_recv_compress(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive_str()
await ws.send_str(msg+'/answer')
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/', compress=15)
await resp.send_str('ask')
assert resp.compress == 15
data = await resp.receive_str()
assert data == 'ask/answer'
await resp.close()
assert resp.get_extra_info('socket') is None
async def test_send_recv_compress_wbits(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive_str()
await ws.send_str(msg+'/answer')
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/', compress=9)
await resp.send_str('ask')
# Client indicates supports wbits 15
# Server supports wbit 15 for decode
assert resp.compress == 15
data = await resp.receive_str()
assert data == 'ask/answer'
await resp.close()
assert resp.get_extra_info('socket') is None
async def test_send_recv_compress_wbit_error(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive_bytes()
await ws.send_bytes(msg+b'/answer')
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
with pytest.raises(ValueError):
await client.ws_connect('/', compress=1)
async def test_ws_client_async_for(aiohttp_client) -> None:
items = ['q1', 'q2', 'q3']
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
for i in items:
await ws.send_str(i)
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/')
it = iter(items)
async for msg in resp:
assert msg.data == next(it)
with pytest.raises(StopIteration):
next(it)
assert resp.closed
async def test_ws_async_with(aiohttp_server) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive()
await ws.send_str(msg.data + '/answer')
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
server = await aiohttp_server(app)
async with aiohttp.ClientSession() as client:
async with client.ws_connect(server.make_url('/')) as ws:
await ws.send_str('request')
msg = await ws.receive()
assert msg.data == 'request/answer'
assert ws.closed
async def test_ws_async_with_send(aiohttp_server) -> None:
# send_xxx methods have to return awaitable objects
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive()
await ws.send_str(msg.data + '/answer')
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
server = await aiohttp_server(app)
async with aiohttp.ClientSession() as client:
async with client.ws_connect(server.make_url('/')) as ws:
await ws.send_str('request')
msg = await ws.receive()
assert msg.data == 'request/answer'
assert ws.closed
async def test_ws_async_with_shortcut(aiohttp_server) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive()
await ws.send_str(msg.data + '/answer')
await ws.close()
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
server = await aiohttp_server(app)
async with aiohttp.ClientSession() as client:
async with client.ws_connect(server.make_url('/')) as ws:
await ws.send_str('request')
msg = await ws.receive()
assert msg.data == 'request/answer'
assert ws.closed
async def test_closed_async_for(aiohttp_client) -> None:
loop = asyncio.get_event_loop()
closed = loop.create_future()
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
try:
await ws.send_bytes(b'started')
await ws.receive_bytes()
finally:
closed.set_result(1)
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/')
messages = []
async for msg in resp:
messages.append(msg)
if b'started' == msg.data:
await resp.send_bytes(b'ask')
await resp.close()
assert 1 == len(messages)
assert messages[0].type == aiohttp.WSMsgType.BINARY
assert messages[0].data == b'started'
assert resp.closed
await closed
async def test_peer_connection_lost(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive_str()
assert msg == 'ask'
await ws.send_str('answer')
request.transport.close()
await asyncio.sleep(10)
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/')
await resp.send_str('ask')
assert 'answer' == await resp.receive_str()
msg = await resp.receive()
assert msg.type == aiohttp.WSMsgType.CLOSED
await resp.close()
async def test_peer_connection_lost_iter(aiohttp_client) -> None:
async def handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
msg = await ws.receive_str()
assert msg == 'ask'
await ws.send_str('answer')
request.transport.close()
await asyncio.sleep(100)
return ws
app = web.Application()
app.router.add_route('GET', '/', handler)
client = await aiohttp_client(app)
resp = await client.ws_connect('/')
await resp.send_str('ask')
async for msg in resp:
assert 'answer' == msg.data
await resp.close()
| 26.182327
| 73
| 0.632247
| 2,926
| 23,407
| 4.927888
| 0.061859
| 0.061169
| 0.036896
| 0.05035
| 0.871142
| 0.839379
| 0.793398
| 0.783758
| 0.760039
| 0.749636
| 0
| 0.004965
| 0.251378
| 23,407
| 893
| 74
| 26.211646
| 0.817896
| 0.005084
| 0
| 0.8
| 0
| 0
| 0.039555
| 0.003608
| 0
| 0
| 0
| 0
| 0.113178
| 1
| 0.003101
| false
| 0
| 0.009302
| 0.00155
| 0.063566
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0accd470a20893224c207d47f04ecbce13a8da79
| 4,756
|
py
|
Python
|
bert/graph_js.py
|
iitmnlp/BERT-Analysis-RCQA
|
8cf405e1192038f4a22ccc92330564aa1ad2341d
|
[
"MIT"
] | 9
|
2020-11-17T12:43:13.000Z
|
2022-02-10T08:58:01.000Z
|
bert/graph_js.py
|
iitmnlp/BERT-Analysis-RCQA
|
8cf405e1192038f4a22ccc92330564aa1ad2341d
|
[
"MIT"
] | null | null | null |
bert/graph_js.py
|
iitmnlp/BERT-Analysis-RCQA
|
8cf405e1192038f4a22ccc92330564aa1ad2341d
|
[
"MIT"
] | 1
|
2021-04-06T12:14:55.000Z
|
2021-04-06T12:14:55.000Z
|
'''
-------------------------------
HEATMAPS ETC FOR JENSON SHANNON
-------------------------------
'''
import numpy as np
import os
import matplotlib.pyplot as plt
num=12 # number of layers
ig_dir_name='ig_scores' # path to directory where IG scores npy's are stored
dataset_name='squad' # name of dataset, for storage purposes - using 'squad' or 'duorc'
num_to_remove=2 # number of scores to be removed from the top
num_to_keep=2 # number of scores to be retained at the top
# normal JSD graph
q1=np.load(ig_dir_name+'/jenson_shannon_matrix.npy')
print(len(q1))
print(q1[0].shape)
qq=[np.average(q1,axis=0)]
for i in range(len(qq)) :
mat=qq[i]
if not os.path.exists('js_heatmaps') :
os.makedirs('js_heatmaps')
path_to_save=os.path.join('js_heatmaps',dataset_name+'_normal_avg_1000')#,str(i))
plt.figure(figsize=(6,6))
plt.rc('xtick', labelsize=12)
plt.rc('ytick', labelsize=12)
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 12}
plt.rc('font', **font)
plt.rcParams["axes.grid"] = False
ticks = [str(j) for j in range(num)]#sent + ['<eos>']
#sns.heatmap(attns, annot=True, fmt="f", annot_kws={"size": 8}, center=0.5)
#extent = (0, mat.shape[1], mat.shape[0], 0)
plt.imshow(mat, interpolation='none', cmap='Blues',vmin=0,vmax=1)#,extent=extent)
plt.xticks(range(len(ticks)), ticks, rotation=90,fontsize=12);
plt.yticks(range(len(ticks)), ticks,fontsize=12);
#plt.title('BERT - SQuAD Integrated Gradients JSD',fontsize=18)
plt.title('BERT - '+dataset_name+' Integrated Gradients JSD',fontsize=18)
for j in range(num) :
for k in range(num) :
text=plt.text(k,j,round(mat[j,k],2),ha="center",va="center",color="black",fontsize=11.5)
#plt.colorbar(label='attention weights')
#plt.grid(which='both',color='k',linestyle='-',linewidth='1')
plt.tight_layout()
plt.savefig(path_to_save)
plt.close()
q1=np.load(ig_dir_name+'/jenson_shannon_matrix_rem'+str(num_to_remove)+'.npy')
print(len(q1))
print(q1[0].shape)
qq=[np.average(q1,axis=0)]
for i in range(len(qq)) :
mat=qq[i]
path_to_save=os.path.join('js_heatmaps',dataset_name+'_rem_'+str(num_to_remove)+'_avg_1000')#,str(i))
plt.figure(figsize=(6,6))
plt.rc('xtick', labelsize=12)
plt.rc('ytick', labelsize=12)
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 12}
plt.rc('font', **font)
plt.rcParams["axes.grid"] = False
ticks = [str(j) for j in range(num)]#sent + ['<eos>']
#sns.heatmap(attns, annot=True, fmt="f", annot_kws={"size": 8}, center=0.5)
#extent = (0, mat.shape[1], mat.shape[0], 0)
plt.imshow(mat, interpolation='none', cmap='Blues',vmin=0,vmax=1)#,extent=extent)
plt.xticks(range(len(ticks)), ticks, rotation=90,fontsize=12);
plt.yticks(range(len(ticks)), ticks,fontsize=12);
#plt.title('BERT - SQuAD Integrated Gradients JSD \n Top 5 Retained',fontsize=18)
plt.title('BERT - '+dataset_name+' Integrated Gradients JSD \n Top '+str(num_to_remove)+' Removed',fontsize=18)
#plt.title('BERT - DuoRC Integrated Gradients JSD',fontsize=18)
for j in range(num) :
for k in range(num) :
text=plt.text(k,j,round(mat[j,k],2),ha="center",va="center",color="black",fontsize=11.5)
#plt.colorbar(label='attention weights')
#plt.grid(which='both',color='k',linestyle='-',linewidth='1')
plt.tight_layout()
plt.savefig(path_to_save)
plt.close()
q1=np.load(ig_dir_name+'/jenson_shannon_matrix_keep'+str(num_to_keep)+'.npy')
print(len(q1))
print(q1[0].shape)
qq=[np.average(q1,axis=0)]
for i in range(len(qq)) :
mat=qq[i]
path_to_save=os.path.join('js_heatmaps',dataset_name+'_rem_'+str(num_to_keep)+'_avg_1000')#,str(i))
plt.figure(figsize=(6,6))
plt.rc('xtick', labelsize=12)
plt.rc('ytick', labelsize=12)
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 12}
plt.rc('font', **font)
plt.rcParams["axes.grid"] = False
ticks = [str(j) for j in range(num)]#sent + ['<eos>']
#sns.heatmap(attns, annot=True, fmt="f", annot_kws={"size": 8}, center=0.5)
#extent = (0, mat.shape[1], mat.shape[0], 0)
plt.imshow(mat, interpolation='none', cmap='Blues',vmin=0,vmax=1)#,extent=extent)
plt.xticks(range(len(ticks)), ticks, rotation=90,fontsize=12);
plt.yticks(range(len(ticks)), ticks,fontsize=12);
#plt.title('BERT - SQuAD Integrated Gradients JSD \n Top 5 Retained',fontsize=18)
plt.title('BERT - '+dataset_name+' Integrated Gradients JSD \n Top '+str(num_to_keep)+' Removed',fontsize=18)
#plt.title('BERT - DuoRC Integrated Gradients JSD',fontsize=18)
for j in range(num) :
for k in range(num) :
text=plt.text(k,j,round(mat[j,k],2),ha="center",va="center",color="black",fontsize=11.5)
#plt.colorbar(label='attention weights')
#plt.grid(which='both',color='k',linestyle='-',linewidth='1')
plt.tight_layout()
plt.savefig(path_to_save)
plt.close()
| 33.492958
| 112
| 0.675568
| 789
| 4,756
| 3.978454
| 0.183777
| 0.02676
| 0.028672
| 0.021026
| 0.882765
| 0.875438
| 0.863332
| 0.863332
| 0.863332
| 0.851864
| 0
| 0.032159
| 0.110807
| 4,756
| 141
| 113
| 33.730496
| 0.710097
| 0.303616
| 0
| 0.784091
| 0
| 0
| 0.171841
| 0.024285
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034091
| 0
| 0.034091
| 0.068182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0ae53d098be77e6e213dafbb75e0f65564c029a3
| 2,039
|
py
|
Python
|
majora2/migrations/0054_auto_20200411_1013.py
|
CLIMB-COVID/majora2
|
46ea1809a61e4a768f8cbacaf54cba5c4d82e1f2
|
[
"MIT"
] | 29
|
2019-04-04T18:03:43.000Z
|
2022-02-09T12:47:30.000Z
|
majora2/migrations/0054_auto_20200411_1013.py
|
CLIMB-COVID/majora2
|
46ea1809a61e4a768f8cbacaf54cba5c4d82e1f2
|
[
"MIT"
] | 66
|
2019-04-02T16:18:40.000Z
|
2022-01-25T16:15:42.000Z
|
majora2/migrations/0054_auto_20200411_1013.py
|
CLIMB-COVID/majora2
|
46ea1809a61e4a768f8cbacaf54cba5c4d82e1f2
|
[
"MIT"
] | 6
|
2020-04-10T14:15:32.000Z
|
2022-01-18T13:08:35.000Z
|
# Generated by Django 2.2.10 on 2020-04-11 10:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('majora2', '0053_auto_20200411_0952'),
]
operations = [
migrations.AlterField(
model_name='majoraartifactprocessrecord',
name='bridge_artifact',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='bridge_process', to='majora2.MajoraArtifact'),
),
migrations.AlterField(
model_name='majoraartifactprocessrecord',
name='bridge_group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='bridge_process', to='majora2.MajoraArtifactGroup'),
),
migrations.AlterField(
model_name='majoraartifactprocessrecord',
name='in_artifact',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='before_process', to='majora2.MajoraArtifact'),
),
migrations.AlterField(
model_name='majoraartifactprocessrecord',
name='in_group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='before_process', to='majora2.MajoraArtifactGroup'),
),
migrations.AlterField(
model_name='majoraartifactprocessrecord',
name='out_artifact',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='after_process', to='majora2.MajoraArtifact'),
),
migrations.AlterField(
model_name='majoraartifactprocessrecord',
name='out_group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='after_process', to='majora2.MajoraArtifactGroup'),
),
]
| 45.311111
| 169
| 0.675821
| 208
| 2,039
| 6.466346
| 0.230769
| 0.047584
| 0.072862
| 0.114498
| 0.852788
| 0.852788
| 0.852788
| 0.791822
| 0.791822
| 0.791822
| 0
| 0.024119
| 0.206964
| 2,039
| 44
| 170
| 46.340909
| 0.807669
| 0.02256
| 0
| 0.473684
| 1
| 0
| 0.245103
| 0.16675
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.131579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7c151a5d65924d9c7230cb47d3bc5af7af840cf1
| 3,562
|
py
|
Python
|
src/blockcipher/modules/sbox.py
|
agent-whisper/3way_cipher
|
d02e4caae92e3e704f994a2b9d3115d3a83a1969
|
[
"MIT"
] | null | null | null |
src/blockcipher/modules/sbox.py
|
agent-whisper/3way_cipher
|
d02e4caae92e3e704f994a2b9d3115d3a83a1969
|
[
"MIT"
] | null | null | null |
src/blockcipher/modules/sbox.py
|
agent-whisper/3way_cipher
|
d02e4caae92e3e704f994a2b9d3115d3a83a1969
|
[
"MIT"
] | null | null | null |
from src.utilities import bytes as byte
SBOX = [['63', '7c', '77', '7b', 'f2', '6b', '6f', 'c5', '30', '01', '67', '2b', 'fe', 'd7', 'ab', '76'], ['ca', '82', 'c9', '7d', 'fa', '59', '47', 'f0', 'ad', 'd4', 'a2', 'af', '9c', 'a4', '72', 'c0'], ['b7', 'fd', '93', '26', '36', '3f', 'f7', 'cc', '34', 'a5', 'e5', 'f1', '71', 'd8', '31', '15'], ['04', 'c7', '23', 'c3', '18', '96', '05', '9a', '07', '12', '80', 'e2', 'eb', '27', 'b2', '75'], ['09', '83', '2c', '1a', '1b', '6e', '5a', 'a0', '52', '3b', 'd6', 'b3', '29', 'e3', '2f', '84'], ['53', 'd1', '00', 'ed', '20', 'fc', 'b1', '5b', '6a', 'cb', 'be', '39', '4a', '4c', '58', 'cf'], ['d0', 'ef', 'aa', 'fb', '43', '4d', '33', '85', '45', 'f9', '02', '7f', '50', '3c', '9f', 'a8'], ['51', 'a3', '40', '8f', '92', '9d', '38', 'f5', 'bc', 'b6', 'da', '21', '10', 'ff', 'f3', 'd2'], ['cd', '0c', '13', 'ec', '5f', '97', '44', '17', 'c4', 'a7', '7e', '3d', '64', '5d', '19', '73'], ['60', '81', '4f', 'dc', '22', '2a', '90', '88', '46', 'ee', 'b8', '14', 'de', '5e', '0b', 'db'], ['e0', '32', '3a', '0a', '49', '06', '24', '5c', 'c2', 'd3', 'ac', '62', '91', '95', 'e4', '79'], ['e7', 'c8', '37', '6d', '8d', 'd5', '4e', 'a9', '6c', '56', 'f4', 'ea', '65', '7a', 'ae', '08'], ['ba', '78', '25', '2e', '1c', 'a6', 'b4', 'c6', 'e8', 'dd', '74', '1f', '4b', 'bd', '8b', '8a'], ['70', '3e', 'b5', '66', '48', '03', 'f6', '0e', '61', '35', '57', 'b9', '86', 'c1', '1d', '9e'], ['e1', 'f8', '98', '11', '69', 'd9', '8e', '94', '9b', '1e', '87', 'e9', 'ce', '55', '28', 'df'], ['8c', 'a1', '89', '0d', 'bf', 'e6', '42', '68', '41', '99', '2d', '0f', 'b0', '54', 'bb', '16']]
def substitute(data):
new_data = []
for byte_idx in range (0, len(data)) :
row = data[byte_idx] // 16
column = data[byte_idx] % 16
new_data.append(bytes([int(SBOX[row][column], 16)]))
# debug
# print(hex(data[byte_idx]), SBOX[row][column])
return(byte.merge_bytes(new_data))
# def inv_substitute(data):
# pass
# [['63', '7c', '77', '7b', 'f2', '6b', '6f', 'c5', '30', '01', '67', '2b', 'fe', 'd7', 'ab', '76'],
# ['ca', '82', 'c9', '7d', 'fa', '59', '47', 'f0', 'ad', 'd4', 'a2', 'af', '9c', 'a4', '72', 'c0'],
# ['b7', 'fd', '93', '26', '36', '3f', 'f7', 'cc', '34', 'a5', 'e5', 'f1', '71', 'd8', '31', '15'],
# ['04', 'c7', '23', 'c3', '18', '96', '05', '9a', '07', '12', '80', 'e2', 'eb', '27', 'b2', '75'],
# ['09', '83', '2c', '1a', '1b', '6e', '5a', 'a0', '52', '3b', 'd6', 'b3', '29', 'e3', '2f', '84'],
# ['53', 'd1', '00', 'ed', '20', 'fc', 'b1', '5b', '6a', 'cb', 'be', '39', '4a', '4c', '58', 'cf'],
# ['d0', 'ef', 'aa', 'fb', '43', '4d', '33', '85', '45', 'f9', '02', '7f', '50', '3c', '9f', 'a8'],
# ['51', 'a3', '40', '8f', '92', '9d', '38', 'f5', 'bc', 'b6', 'da', '21', '10', 'ff', 'f3', 'd2'],
# ['cd', '0c', '13', 'ec', '5f', '97', '44', '17', 'c4', 'a7', '7e', '3d', '64', '5d', '19', '73'],
# ['60', '81', '4f', 'dc', '22', '2a', '90', '88', '46', 'ee', 'b8', '14', 'de', '5e', '0b', 'db'],
# ['e0', '32', '3a', '0a', '49', '06', '24', '5c', 'c2', 'd3', 'ac', '62', '91', '95', 'e4', '79'],
# ['e7', 'c8', '37', '6d', '8d', 'd5', '4e', 'a9', '6c', '56', 'f4', 'ea', '65', '7a', 'ae', '08'],
# ['ba', '78', '25', '2e', '1c', 'a6', 'b4', 'c6', 'e8', 'dd', '74', '1f', '4b', 'bd', '8b', '8a'],
# ['70', '3e', 'b5', '66', '48', '03', 'f6', '0e', '61', '35', '57', 'b9', '86', 'c1', '1d', '9e'],
# ['e1', 'f8', '98', '11', '69', 'd9', '8e', '94', '9b', '1e', '87', 'e9', 'ce', '55', '28', 'df'],
# ['8c', 'a1', '89', '0d', 'bf', 'e6', '42', '68', '41', '99', '2d', '0f', 'b0', '54', 'bb', '16']]
| 107.939394
| 1,575
| 0.358787
| 572
| 3,562
| 2.218531
| 0.498252
| 0.022065
| 0.026005
| 0.012608
| 0.806935
| 0.806935
| 0.806935
| 0.806935
| 0.806935
| 0.806935
| 0
| 0.218877
| 0.170129
| 3,562
| 33
| 1,576
| 107.939394
| 0.210419
| 0.468838
| 0
| 0
| 0
| 0
| 0.273504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
7c1a5dd3b4a297a19e779429c94483bf2240c3f0
| 89,980
|
py
|
Python
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2021_01_01/operations/_diagnostics_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2021_01_01/operations/_diagnostics_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2021_01_01/operations/_diagnostics_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DiagnosticsOperations(object):
"""DiagnosticsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2021_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_hosting_environment_detector_responses(
self,
resource_group_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DetectorResponseCollection"]
"""List Hosting Environment Detector Responses.
Description for List Hosting Environment Detector Responses.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Site Name.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DetectorResponseCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_01.models.DetectorResponseCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectorResponseCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_hosting_environment_detector_responses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DetectorResponseCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_hosting_environment_detector_responses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/detectors'} # type: ignore
def get_hosting_environment_detector_response(
self,
resource_group_name, # type: str
name, # type: str
detector_name, # type: str
start_time=None, # type: Optional[datetime.datetime]
end_time=None, # type: Optional[datetime.datetime]
time_grain=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.DetectorResponse"
"""Get Hosting Environment Detector Response.
Description for Get Hosting Environment Detector Response.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: App Service Environment Name.
:type name: str
:param detector_name: Detector Resource Name.
:type detector_name: str
:param start_time: Start Time.
:type start_time: ~datetime.datetime
:param end_time: End Time.
:type end_time: ~datetime.datetime
:param time_grain: Time Grain.
:type time_grain: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DetectorResponse, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.DetectorResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectorResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.get_hosting_environment_detector_response.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DetectorResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_hosting_environment_detector_response.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/detectors/{detectorName}'} # type: ignore
def list_site_detector_responses(
self,
resource_group_name, # type: str
site_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DetectorResponseCollection"]
"""List Site Detector Responses.
Description for List Site Detector Responses.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DetectorResponseCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_01.models.DetectorResponseCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectorResponseCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_site_detector_responses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DetectorResponseCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_site_detector_responses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/detectors'} # type: ignore
def get_site_detector_response(
self,
resource_group_name, # type: str
site_name, # type: str
detector_name, # type: str
start_time=None, # type: Optional[datetime.datetime]
end_time=None, # type: Optional[datetime.datetime]
time_grain=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.DetectorResponse"
"""Get site detector response.
Description for Get site detector response.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param detector_name: Detector Resource Name.
:type detector_name: str
:param start_time: Start Time.
:type start_time: ~datetime.datetime
:param end_time: End Time.
:type end_time: ~datetime.datetime
:param time_grain: Time Grain.
:type time_grain: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DetectorResponse, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.DetectorResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectorResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.get_site_detector_response.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DetectorResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_site_detector_response.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/detectors/{detectorName}'} # type: ignore
def list_site_diagnostic_categories(
self,
resource_group_name, # type: str
site_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DiagnosticCategoryCollection"]
"""Get Diagnostics Categories.
Description for Get Diagnostics Categories.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiagnosticCategoryCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_01.models.DiagnosticCategoryCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticCategoryCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_site_diagnostic_categories.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DiagnosticCategoryCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_site_diagnostic_categories.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics'} # type: ignore
def get_site_diagnostic_category(
self,
resource_group_name, # type: str
site_name, # type: str
diagnostic_category, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DiagnosticCategory"
"""Get Diagnostics Category.
Description for Get Diagnostics Category.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticCategory, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.DiagnosticCategory
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticCategory"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.get_site_diagnostic_category.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticCategory', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_site_diagnostic_category.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}'} # type: ignore
def list_site_analyses(
self,
resource_group_name, # type: str
site_name, # type: str
diagnostic_category, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DiagnosticAnalysisCollection"]
"""Get Site Analyses.
Description for Get Site Analyses.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiagnosticAnalysisCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_01.models.DiagnosticAnalysisCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticAnalysisCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_site_analyses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DiagnosticAnalysisCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_site_analyses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/analyses'} # type: ignore
def get_site_analysis(
self,
resource_group_name, # type: str
site_name, # type: str
diagnostic_category, # type: str
analysis_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AnalysisDefinition"
"""Get Site Analysis.
Description for Get Site Analysis.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:param analysis_name: Analysis Name.
:type analysis_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AnalysisDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.AnalysisDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AnalysisDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.get_site_analysis.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AnalysisDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_site_analysis.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/analyses/{analysisName}'} # type: ignore
def execute_site_analysis(
self,
resource_group_name, # type: str
site_name, # type: str
diagnostic_category, # type: str
analysis_name, # type: str
start_time=None, # type: Optional[datetime.datetime]
end_time=None, # type: Optional[datetime.datetime]
time_grain=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.DiagnosticAnalysis"
"""Execute Analysis.
Description for Execute Analysis.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Category Name.
:type diagnostic_category: str
:param analysis_name: Analysis Resource Name.
:type analysis_name: str
:param start_time: Start Time.
:type start_time: ~datetime.datetime
:param end_time: End Time.
:type end_time: ~datetime.datetime
:param time_grain: Time Grain.
:type time_grain: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticAnalysis, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.DiagnosticAnalysis
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticAnalysis"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.execute_site_analysis.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticAnalysis', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
execute_site_analysis.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/analyses/{analysisName}/execute'} # type: ignore
def list_site_detectors(
self,
resource_group_name, # type: str
site_name, # type: str
diagnostic_category, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DiagnosticDetectorCollection"]
"""Get Detectors.
Description for Get Detectors.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiagnosticDetectorCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_01.models.DiagnosticDetectorCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticDetectorCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_site_detectors.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DiagnosticDetectorCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_site_detectors.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/detectors'} # type: ignore
def get_site_detector(
self,
resource_group_name, # type: str
site_name, # type: str
diagnostic_category, # type: str
detector_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DetectorDefinition"
"""Get Detector.
Description for Get Detector.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:param detector_name: Detector Name.
:type detector_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DetectorDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.DetectorDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectorDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.get_site_detector.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DetectorDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_site_detector.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/detectors/{detectorName}'} # type: ignore
def execute_site_detector(
self,
resource_group_name, # type: str
site_name, # type: str
detector_name, # type: str
diagnostic_category, # type: str
start_time=None, # type: Optional[datetime.datetime]
end_time=None, # type: Optional[datetime.datetime]
time_grain=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.DiagnosticDetectorResponse"
"""Execute Detector.
Description for Execute Detector.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param detector_name: Detector Resource Name.
:type detector_name: str
:param diagnostic_category: Category Name.
:type diagnostic_category: str
:param start_time: Start Time.
:type start_time: ~datetime.datetime
:param end_time: End Time.
:type end_time: ~datetime.datetime
:param time_grain: Time Grain.
:type time_grain: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticDetectorResponse, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.DiagnosticDetectorResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticDetectorResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.execute_site_detector.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticDetectorResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
execute_site_detector.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/diagnostics/{diagnosticCategory}/detectors/{detectorName}/execute'} # type: ignore
def list_site_detector_responses_slot(
self,
resource_group_name, # type: str
site_name, # type: str
slot, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DetectorResponseCollection"]
"""List Site Detector Responses.
Description for List Site Detector Responses.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param slot: Slot Name.
:type slot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DetectorResponseCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_01.models.DetectorResponseCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectorResponseCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_site_detector_responses_slot.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DetectorResponseCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_site_detector_responses_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/detectors'} # type: ignore
def get_site_detector_response_slot(
self,
resource_group_name, # type: str
site_name, # type: str
detector_name, # type: str
slot, # type: str
start_time=None, # type: Optional[datetime.datetime]
end_time=None, # type: Optional[datetime.datetime]
time_grain=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.DetectorResponse"
"""Get site detector response.
Description for Get site detector response.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param detector_name: Detector Resource Name.
:type detector_name: str
:param slot: Slot Name.
:type slot: str
:param start_time: Start Time.
:type start_time: ~datetime.datetime
:param end_time: End Time.
:type end_time: ~datetime.datetime
:param time_grain: Time Grain.
:type time_grain: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DetectorResponse, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.DetectorResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectorResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.get_site_detector_response_slot.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DetectorResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_site_detector_response_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/detectors/{detectorName}'} # type: ignore
def list_site_diagnostic_categories_slot(
self,
resource_group_name, # type: str
site_name, # type: str
slot, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DiagnosticCategoryCollection"]
"""Get Diagnostics Categories.
Description for Get Diagnostics Categories.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param slot: Slot Name.
:type slot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiagnosticCategoryCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_01.models.DiagnosticCategoryCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticCategoryCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_site_diagnostic_categories_slot.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DiagnosticCategoryCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_site_diagnostic_categories_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics'} # type: ignore
def get_site_diagnostic_category_slot(
self,
resource_group_name, # type: str
site_name, # type: str
diagnostic_category, # type: str
slot, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DiagnosticCategory"
"""Get Diagnostics Category.
Description for Get Diagnostics Category.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:param slot: Slot Name.
:type slot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticCategory, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.DiagnosticCategory
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticCategory"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.get_site_diagnostic_category_slot.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticCategory', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_site_diagnostic_category_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}'} # type: ignore
def list_site_analyses_slot(
self,
resource_group_name, # type: str
site_name, # type: str
diagnostic_category, # type: str
slot, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DiagnosticAnalysisCollection"]
"""Get Site Analyses.
Description for Get Site Analyses.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:param slot: Slot Name.
:type slot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiagnosticAnalysisCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_01.models.DiagnosticAnalysisCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticAnalysisCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_site_analyses_slot.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DiagnosticAnalysisCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_site_analyses_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses'} # type: ignore
def get_site_analysis_slot(
self,
resource_group_name, # type: str
site_name, # type: str
diagnostic_category, # type: str
analysis_name, # type: str
slot, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AnalysisDefinition"
"""Get Site Analysis.
Description for Get Site Analysis.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:param analysis_name: Analysis Name.
:type analysis_name: str
:param slot: Slot - optional.
:type slot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AnalysisDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.AnalysisDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AnalysisDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.get_site_analysis_slot.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AnalysisDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_site_analysis_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses/{analysisName}'} # type: ignore
def execute_site_analysis_slot(
self,
resource_group_name, # type: str
site_name, # type: str
diagnostic_category, # type: str
analysis_name, # type: str
slot, # type: str
start_time=None, # type: Optional[datetime.datetime]
end_time=None, # type: Optional[datetime.datetime]
time_grain=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.DiagnosticAnalysis"
"""Execute Analysis.
Description for Execute Analysis.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Category Name.
:type diagnostic_category: str
:param analysis_name: Analysis Resource Name.
:type analysis_name: str
:param slot: Slot Name.
:type slot: str
:param start_time: Start Time.
:type start_time: ~datetime.datetime
:param end_time: End Time.
:type end_time: ~datetime.datetime
:param time_grain: Time Grain.
:type time_grain: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticAnalysis, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.DiagnosticAnalysis
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticAnalysis"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.execute_site_analysis_slot.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'analysisName': self._serialize.url("analysis_name", analysis_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticAnalysis', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
execute_site_analysis_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/analyses/{analysisName}/execute'} # type: ignore
def list_site_detectors_slot(
self,
resource_group_name, # type: str
site_name, # type: str
diagnostic_category, # type: str
slot, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DiagnosticDetectorCollection"]
"""Get Detectors.
Description for Get Detectors.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:param slot: Slot Name.
:type slot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiagnosticDetectorCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_01.models.DiagnosticDetectorCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticDetectorCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_site_detectors_slot.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DiagnosticDetectorCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_site_detectors_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors'} # type: ignore
def get_site_detector_slot(
self,
resource_group_name, # type: str
site_name, # type: str
diagnostic_category, # type: str
detector_name, # type: str
slot, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DetectorDefinition"
"""Get Detector.
Description for Get Detector.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param diagnostic_category: Diagnostic Category.
:type diagnostic_category: str
:param detector_name: Detector Name.
:type detector_name: str
:param slot: Slot Name.
:type slot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DetectorDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.DetectorDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DetectorDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.get_site_detector_slot.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DetectorDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_site_detector_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors/{detectorName}'} # type: ignore
def execute_site_detector_slot(
self,
resource_group_name, # type: str
site_name, # type: str
detector_name, # type: str
diagnostic_category, # type: str
slot, # type: str
start_time=None, # type: Optional[datetime.datetime]
end_time=None, # type: Optional[datetime.datetime]
time_grain=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.DiagnosticDetectorResponse"
"""Execute Detector.
Description for Execute Detector.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param site_name: Site Name.
:type site_name: str
:param detector_name: Detector Resource Name.
:type detector_name: str
:param diagnostic_category: Category Name.
:type diagnostic_category: str
:param slot: Slot Name.
:type slot: str
:param start_time: Start Time.
:type start_time: ~datetime.datetime
:param end_time: End Time.
:type end_time: ~datetime.datetime
:param time_grain: Time Grain.
:type time_grain: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticDetectorResponse, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.DiagnosticDetectorResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticDetectorResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = self.execute_site_detector_slot.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'detectorName': self._serialize.url("detector_name", detector_name, 'str'),
'diagnosticCategory': self._serialize.url("diagnostic_category", diagnostic_category, 'str'),
'slot': self._serialize.url("slot", slot, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str', pattern=r'PT[1-9][0-9]+[SMH]')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticDetectorResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
execute_site_detector_slot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slot}/diagnostics/{diagnosticCategory}/detectors/{detectorName}/execute'} # type: ignore
| 49.961133
| 255
| 0.65399
| 9,621
| 89,980
| 5.882341
| 0.026193
| 0.038361
| 0.033042
| 0.019666
| 0.974785
| 0.971322
| 0.971322
| 0.967894
| 0.963141
| 0.959148
| 0
| 0.011309
| 0.238375
| 89,980
| 1,800
| 256
| 49.988889
| 0.814507
| 0.253801
| 0
| 0.871296
| 0
| 0.017593
| 0.15659
| 0.066822
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046296
| false
| 0
| 0.009259
| 0
| 0.114815
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7c3c34b0417671c6964ce79f32d05d39e8a8556f
| 101
|
py
|
Python
|
nlptoolkit/classification/models/BERT/__init__.py
|
jackashore/NLP_Toolkit
|
e5bd8bcfad87f4906c45e66351adf93bd5c2727f
|
[
"Apache-2.0"
] | 2
|
2020-09-26T11:20:13.000Z
|
2021-09-17T13:15:48.000Z
|
nlptoolkit/classification/models/BERT/__init__.py
|
jackashore/NLP_Toolkit
|
e5bd8bcfad87f4906c45e66351adf93bd5c2727f
|
[
"Apache-2.0"
] | null | null | null |
nlptoolkit/classification/models/BERT/__init__.py
|
jackashore/NLP_Toolkit
|
e5bd8bcfad87f4906c45e66351adf93bd5c2727f
|
[
"Apache-2.0"
] | 1
|
2020-09-09T14:12:19.000Z
|
2020-09-09T14:12:19.000Z
|
from . import preprocessing_funcs
from . import trainer
from . import train_funcs
from . import BERT
| 20.2
| 33
| 0.80198
| 14
| 101
| 5.642857
| 0.5
| 0.506329
| 0.379747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158416
| 101
| 4
| 34
| 25.25
| 0.929412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7c4e788e5a6957383f9f84ec8f6381fe745e8ae0
| 109
|
py
|
Python
|
Python/Tests/TestData/Grammar/GenComp.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 695
|
2019-05-06T23:49:37.000Z
|
2022-03-30T01:56:00.000Z
|
Python/Tests/TestData/Grammar/GenComp.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 1,672
|
2019-05-06T21:09:38.000Z
|
2022-03-31T23:16:04.000Z
|
Python/Tests/TestData/Grammar/GenComp.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 186
|
2019-05-13T03:17:37.000Z
|
2022-03-31T16:24:05.000Z
|
(fob for fob in oar)
(fob for fob in oar if baz)
(fob for fob in oar for baz in quox)
baz(fob for fob in oar)
| 27.25
| 36
| 0.697248
| 27
| 109
| 2.814815
| 0.259259
| 0.315789
| 0.473684
| 0.578947
| 0.815789
| 0.447368
| 0
| 0
| 0
| 0
| 0
| 0
| 0.229358
| 109
| 4
| 37
| 27.25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7caa742435de9672777e974ee9e1be3b78706896
| 8,174
|
py
|
Python
|
models.py
|
philipperemy/very-deep-convnets-raw-waveforms
|
c8bb551e196740b6067fe4736f6360b1278d82d0
|
[
"Apache-2.0"
] | 69
|
2017-05-14T09:59:21.000Z
|
2021-08-30T03:27:08.000Z
|
models.py
|
philipperemy/very-deep-convnets-raw-waveforms
|
c8bb551e196740b6067fe4736f6360b1278d82d0
|
[
"Apache-2.0"
] | 3
|
2017-06-02T08:37:08.000Z
|
2019-02-26T16:33:23.000Z
|
models.py
|
philipperemy/very-deep-convnets-raw-waveforms
|
c8bb551e196740b6067fe4736f6360b1278d82d0
|
[
"Apache-2.0"
] | 15
|
2018-02-20T17:40:42.000Z
|
2021-04-27T13:43:40.000Z
|
import keras.backend as K
from keras import regularizers
from keras.layers import Lambda
from keras.layers.convolutional import Conv1D, MaxPooling1D
from keras.layers.core import Activation, Dense
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
from constants import *
def m3(num_classes=10):
print('Using Model M3')
m = Sequential()
m.add(Conv1D(256,
input_shape=[AUDIO_LENGTH, 1],
kernel_size=80,
strides=4,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(MaxPooling1D(pool_size=4, strides=None))
m.add(Conv1D(256,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(MaxPooling1D(pool_size=4, strides=None))
m.add(Lambda(lambda x: K.mean(x, axis=1))) # Same as GAP for 1D Conv Layer
m.add(Dense(num_classes, activation='softmax'))
return m
def m5(num_classes=10):
print('Using Model M5')
m = Sequential()
m.add(Conv1D(128,
input_shape=[AUDIO_LENGTH, 1],
kernel_size=80,
strides=4,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(MaxPooling1D(pool_size=4, strides=None))
m.add(Conv1D(128,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(MaxPooling1D(pool_size=4, strides=None))
m.add(Conv1D(256,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(MaxPooling1D(pool_size=4, strides=None))
m.add(Conv1D(512,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(MaxPooling1D(pool_size=4, strides=None))
m.add(Lambda(lambda x: K.mean(x, axis=1))) # Same as GAP for 1D Conv Layer
m.add(Dense(num_classes, activation='softmax'))
return m
def m11(num_classes=10):
print('Using Model M11')
m = Sequential()
m.add(Conv1D(64,
input_shape=[AUDIO_LENGTH, 1],
kernel_size=80,
strides=4,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(MaxPooling1D(pool_size=4, strides=None))
for i in range(2):
m.add(Conv1D(64,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(MaxPooling1D(pool_size=4, strides=None))
for i in range(2):
m.add(Conv1D(128,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(MaxPooling1D(pool_size=4, strides=None))
for i in range(3):
m.add(Conv1D(256,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(MaxPooling1D(pool_size=4, strides=None))
for i in range(2):
m.add(Conv1D(512,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(Lambda(lambda x: K.mean(x, axis=1))) # Same as GAP for 1D Conv Layer
m.add(Dense(num_classes, activation='softmax'))
return m
def m_rec(num_classes=10):
from keras.layers.recurrent import LSTM
print('Using Model LSTM 1')
m = Sequential()
m.add(Conv1D(64,
input_shape=[AUDIO_LENGTH, 1],
kernel_size=80,
strides=4,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(MaxPooling1D(pool_size=4, strides=None))
m.add(LSTM(32,
kernel_regularizer=regularizers.l2(l=0.0001),
return_sequences=True,
dropout=0.2))
m.add(LSTM(32,
kernel_regularizer=regularizers.l2(l=0.0001),
return_sequences=False,
dropout=0.2))
m.add(Dense(32))
m.add(Dense(num_classes, activation='softmax'))
return m
def m18(num_classes=10):
print('Using Model M18')
m = Sequential()
m.add(Conv1D(64,
input_shape=[AUDIO_LENGTH, 1],
kernel_size=80,
strides=4,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(MaxPooling1D(pool_size=4, strides=None))
for i in range(4):
m.add(Conv1D(64,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(MaxPooling1D(pool_size=4, strides=None))
for i in range(4):
m.add(Conv1D(128,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(MaxPooling1D(pool_size=4, strides=None))
for i in range(4):
m.add(Conv1D(256,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(MaxPooling1D(pool_size=4, strides=None))
for i in range(4):
m.add(Conv1D(512,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
kernel_regularizer=regularizers.l2(l=0.0001)))
m.add(BatchNormalization())
m.add(Activation('relu'))
m.add(Lambda(lambda x: K.mean(x, axis=1))) # Same as GAP for 1D Conv Layer
m.add(Dense(num_classes, activation='softmax'))
return m
| 34.931624
| 78
| 0.553218
| 934
| 8,174
| 4.72591
| 0.088865
| 0.070684
| 0.12483
| 0.133439
| 0.912778
| 0.898278
| 0.873811
| 0.873811
| 0.873811
| 0.873811
| 0
| 0.054869
| 0.326645
| 8,174
| 233
| 79
| 35.081545
| 0.747093
| 0.014558
| 0
| 0.890995
| 0
| 0
| 0.060248
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023697
| false
| 0
| 0.042654
| 0
| 0.090047
| 0.023697
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7ce26e31ca321ab4fa819b3209680850181819e4
| 8,718
|
py
|
Python
|
tests/test_tf_layers.py
|
xbodx/DeepPavlov
|
4b60bf162df4294b8b0db3b72786cdd699c674fa
|
[
"Apache-2.0"
] | 5,893
|
2018-02-01T18:13:20.000Z
|
2022-03-31T19:22:21.000Z
|
tests/test_tf_layers.py
|
xbodx/DeepPavlov
|
4b60bf162df4294b8b0db3b72786cdd699c674fa
|
[
"Apache-2.0"
] | 749
|
2018-01-31T11:36:02.000Z
|
2022-03-30T07:24:22.000Z
|
tests/test_tf_layers.py
|
xbodx/DeepPavlov
|
4b60bf162df4294b8b0db3b72786cdd699c674fa
|
[
"Apache-2.0"
] | 1,155
|
2018-02-01T10:52:15.000Z
|
2022-03-29T02:12:15.000Z
|
import shutil
from functools import reduce
from pathlib import Path
import numpy as np
import pytest
import tensorflow as tf
from deeppavlov.core.layers.tf_layers import cudnn_lstm, cudnn_compatible_lstm, cudnn_gru, cudnn_compatible_gru
tests_dir = Path(__file__).parent
tf_layers_data_path = tests_dir / "tf_layers_data"
def setup_module():
shutil.rmtree(str(tf_layers_data_path), ignore_errors=True)
tf_layers_data_path.mkdir(parents=True)
def teardown_module():
shutil.rmtree(str(tf_layers_data_path), ignore_errors=True)
class DPCudnnLSTMModel:
def __init__(self, num_layers, num_units):
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_config)
self.x = tf.placeholder(shape=(None, None, 50), dtype=tf.float32)
with tf.variable_scope('cudnn_model'):
h, (h_last, c_last) = cudnn_lstm(self.x, num_units, num_layers, trainable_initial_states=True)
self.h = h
self.h_last = h_last
self.sess.run(tf.global_variables_initializer())
def __call__(self, x):
feed_dict = {
self.x: x,
}
return self.sess.run([self.h, self.h_last], feed_dict=feed_dict)
def save(self, path='model'):
print('[saving model to {}]'.format(path))
saver = tf.train.Saver()
saver.save(self.sess, path)
def load(self, path):
saver = tf.train.Saver()
saver.restore(self.sess, path)
class DPLSTMModel:
def __init__(self, num_layers, num_units):
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_config)
self.x = tf.placeholder(shape=(None, None, 50), dtype=tf.float32)
with tf.variable_scope('cudnn_model'):
h, (h_last, c_last) = cudnn_compatible_lstm(self.x, num_units, num_layers, trainable_initial_states=True)
self.h = h
self.h_last = h_last
self.sess.run(tf.global_variables_initializer())
def __call__(self, x):
feed_dict = {
self.x: x,
}
return self.sess.run([self.h, self.h_last], feed_dict=feed_dict)
def save(self, path='model'):
print('[saving model to {}]'.format(path))
saver = tf.train.Saver()
saver.save(self.sess, path)
def load(self, path):
saver = tf.train.Saver()
saver.restore(self.sess, path)
class DPCudnnGRUModel:
def __init__(self, num_layers, num_units):
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_config)
self.x = tf.placeholder(shape=(None, None, 50), dtype=tf.float32)
with tf.variable_scope('cudnn_model'):
h, h_last = cudnn_gru(self.x, num_units, num_layers, trainable_initial_states=True)
self.h = h
self.h_last = h_last
self.sess.run(tf.global_variables_initializer())
def __call__(self, x):
feed_dict = {
self.x: x,
}
return self.sess.run([self.h, self.h_last], feed_dict=feed_dict)
def save(self, path='model'):
print('[saving model to {}]'.format(path))
saver = tf.train.Saver()
saver.save(self.sess, path)
def load(self, path):
saver = tf.train.Saver()
saver.restore(self.sess, path)
class DPGRUModel:
def __init__(self, num_layers, num_units):
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_config)
self.x = tf.placeholder(shape=(None, None, 50), dtype=tf.float32)
with tf.variable_scope('cudnn_model'):
h, h_last = cudnn_compatible_gru(self.x, num_units, num_layers, trainable_initial_states=True)
self.h = h
self.h_last = h_last
self.sess.run(tf.global_variables_initializer())
def __call__(self, x):
feed_dict = {
self.x: x,
}
return self.sess.run([self.h, self.h_last], feed_dict=feed_dict)
def save(self, path='model'):
print('[saving model to {}]'.format(path))
saver = tf.train.Saver()
saver.save(self.sess, path)
def load(self, path):
saver = tf.train.Saver()
saver.restore(self.sess, path)
class TestTFLayers:
allowed_error_lvl = 0.01 * 2 ** 0.5
@staticmethod
def equal_values(a, b, round=5):
a, b = np.round(a, round), np.round(b, round)
return np.sum(a == b) / reduce(lambda x, y: x * y, a.shape)
@pytest.mark.parametrize("num_layers", [1, 3])
def test_cudnn_lstm_save_load(self, num_layers):
x = np.random.normal(size=(10, 10, 50))
tf.reset_default_graph()
cdnnlstmmodel = DPCudnnLSTMModel(num_layers=num_layers, num_units=100)
before_load_hidden, before_load_state = cdnnlstmmodel(x)[0], cdnnlstmmodel(x)[1]
cdnnlstmmodel.save(str(tf_layers_data_path / 'dpcudnnlstmmodel' / 'model'))
tf.reset_default_graph()
cdnnlstmmodel = DPCudnnLSTMModel(num_layers=num_layers, num_units=100)
cdnnlstmmodel.load(str(tf_layers_data_path / 'dpcudnnlstmmodel' / 'model'))
after_load_hidden, after_load_state = cdnnlstmmodel(x)[0], cdnnlstmmodel(x)[1]
equal_hidden = self.equal_values(after_load_hidden, before_load_hidden)
equal_state = self.equal_values(after_load_state, before_load_state)
assert equal_hidden > 1 - self.allowed_error_lvl
assert equal_state > 1 - self.allowed_error_lvl
@pytest.mark.parametrize("num_layers", [1, 3])
def test_cudnn_lstm_save_and_cudnn_compatible_load(self, num_layers):
x = np.random.normal(size=(10, 10, 50))
tf.reset_default_graph()
cdnnlstmmodel = DPCudnnLSTMModel(num_layers=num_layers, num_units=100)
before_load_hidden, before_load_state = cdnnlstmmodel(x)[0], cdnnlstmmodel(x)[1]
cdnnlstmmodel.save(str(tf_layers_data_path / 'dpcudnnlstmmodel' / 'model'))
tf.reset_default_graph()
cdnnlstmmodel = DPLSTMModel(num_layers=num_layers, num_units=100)
cdnnlstmmodel.load(str(tf_layers_data_path / 'dpcudnnlstmmodel' / 'model'))
after_load_hidden, after_load_state = cdnnlstmmodel(x)[0], cdnnlstmmodel(x)[1]
equal_hidden = self.equal_values(after_load_hidden, before_load_hidden)
equal_state = self.equal_values(after_load_state, before_load_state)
assert equal_hidden > 1 - self.allowed_error_lvl
assert equal_state > 1 - self.allowed_error_lvl
@pytest.mark.parametrize("num_layers", [1, 3])
def test_cudnn_gru_save_load(self, num_layers):
x = np.random.normal(size=(10, 10, 50))
tf.reset_default_graph()
cdnngrumodel = DPCudnnGRUModel(num_layers=num_layers, num_units=100)
before_load_hidden, before_load_state = cdnngrumodel(x)[0], cdnngrumodel(x)[1]
cdnngrumodel.save(str(tf_layers_data_path / 'cdnngrumodel' / 'model'))
tf.reset_default_graph()
cdnngrumodel = DPCudnnGRUModel(num_layers=num_layers, num_units=100)
cdnngrumodel.load(str(tf_layers_data_path / 'cdnngrumodel' / 'model'))
after_load_hidden, after_load_state = cdnngrumodel(x)[0], cdnngrumodel(x)[1]
equal_hidden = self.equal_values(after_load_hidden, before_load_hidden)
equal_state = self.equal_values(after_load_state, before_load_state)
assert equal_hidden > 1 - self.allowed_error_lvl
assert equal_state > 1 - self.allowed_error_lvl
@pytest.mark.parametrize("num_layers", [1, 3])
def test_cudnn_gru_save_and_cudnn_compatible_load(self, num_layers):
x = np.random.normal(size=(10, 10, 50))
tf.reset_default_graph()
cdnngrumodel = DPCudnnGRUModel(num_layers=num_layers, num_units=100)
before_load_hidden, before_load_state = cdnngrumodel(x)[0], cdnngrumodel(x)[1]
cdnngrumodel.save(str(tf_layers_data_path / 'cdnngrumodel' / 'model'))
tf.reset_default_graph()
cdnngrumodel = DPGRUModel(num_layers=num_layers, num_units=100)
cdnngrumodel.load(str(tf_layers_data_path / 'cdnngrumodel' / 'model'))
after_load_hidden, after_load_state = cdnngrumodel(x)[0], cdnngrumodel(x)[1]
equal_hidden = self.equal_values(after_load_hidden, before_load_hidden)
equal_state = self.equal_values(after_load_state, before_load_state)
assert equal_hidden > 1 - self.allowed_error_lvl
assert equal_state > 1 - self.allowed_error_lvl
| 37.74026
| 117
| 0.674811
| 1,189
| 8,718
| 4.638352
| 0.106812
| 0.052221
| 0.043518
| 0.034814
| 0.903173
| 0.903173
| 0.903173
| 0.903173
| 0.903173
| 0.903173
| 0
| 0.015004
| 0.212549
| 8,718
| 230
| 118
| 37.904348
| 0.788347
| 0
| 0
| 0.790698
| 0
| 0
| 0.040147
| 0
| 0
| 0
| 0
| 0
| 0.046512
| 1
| 0.133721
| false
| 0
| 0.040698
| 0
| 0.238372
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6b0b06a946dbbcdd778a1f630ba5b50ebc3f66a7
| 157,575
|
py
|
Python
|
pirates/battle/CombatAnimations.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 81
|
2018-04-08T18:14:24.000Z
|
2022-01-11T07:22:15.000Z
|
pirates/battle/CombatAnimations.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 4
|
2018-09-13T20:41:22.000Z
|
2022-01-08T06:57:00.000Z
|
pirates/battle/CombatAnimations.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 26
|
2018-05-26T12:49:27.000Z
|
2021-09-11T09:11:59.000Z
|
import WeaponGlobals
from direct.interval.IntervalGlobal import *
from direct.directnotify.DirectNotifyGlobal import directNotify
from pandac.PandaModules import *
from direct.actor import Actor
from pirates.uberdog.UberDogGlobals import InventoryType
from pirates.effects.DaggerProjectile import DaggerProjectile
from pirates.effects.CameraShaker import CameraShaker
from pirates.effects.SmokeCloud import SmokeCloud
from pirates.effects.ThrowDirt import ThrowDirt
from pirates.effects.ShockwaveRing import ShockwaveRing
from pirates.effects.DustRing import DustRing
from pirates.effects.WindBlurCone import WindBlurCone
from pirates.effects.VenomSpitProjectile import VenomSpitProjectile
from pirates.effects.HomingMissile import HomingMissile
from pirates.effects.DaggerProjectile import DaggerProjectile
from pirates.effects.WispSpiral import WispSpiral
from pirates.effects.AttuneSmoke import AttuneSmoke
from pirates.effects.MusketSmoke import MusketSmoke
from pirates.effects.MusketFlame import MusketFlame
from pirates.effects.PistolSmoke import PistolSmoke
from pirates.effects.PistolFlame import PistolFlame
from pirates.effects.BeamEffect import BeamEffect
from pirates.effects.SoulHarvest import SoulHarvest
from pirates.effects.DomeExplosion import DomeExplosion
from pirates.effects.DarkPortal import DarkPortal
from pirates.effects.UnholyFlare import UnholyFlare
from pirates.effects.NovaStar import NovaStar
from pirates.effects.HomingMissile import HomingMissile
from pirates.effects.DarkStar import DarkStar
from pirates.effects.Pestilence import Pestilence
from pirates.effects.VoodooProjectile import VoodooProjectile
from pirates.effects.FlamingSkull import FlamingSkull
from pirates.effects.JollySoulDrain import JollySoulDrain
from pirates.effects.VoodooFire import VoodooFire
from pirates.effects.WitherCharge import WitherCharge
from pirates.effects.EvilRingEffect import EvilRingEffect
from pirates.effects.VoodooPestilence import VoodooPestilence
from pirates.effects.SoulFlay import SoulFlay
from pirates.effects.VoodooSouls import VoodooSouls
from pirates.effects.VoodooGlow import VoodooGlow
from pirates.effects.SoulSpiral import SoulSpiral
from pirates.effects.EnergySpiral import EnergySpiral
from pirates.effects.VoodooAura import VoodooAura
from pirates.effects.VoodooAura2 import VoodooAura2
from pirates.effects.VoodooPower import VoodooPower
from pirates.effects.WindWave import WindWave
from pirates.effects.DesolationSmoke import DesolationSmoke
from pirates.effects.WindCharge import WindCharge
from pirates.effects.DesolationChargeSmoke import DesolationChargeSmoke
from pirates.effects.SoulHarvest2 import SoulHarvest2
from pirates.effects.VoodooStaffFire import VoodooStaffFire
from pirates.effects.MuzzleFlash import MuzzleFlash
from pirates.effects.SpectralTrail import SpectralTrail
from pirates.effects.SpectralSmoke import SpectralSmoke
from pirates.effects.HealSparks import HealSparks
from pirates.effects.FadingCard import FadingCard
from pirates.effects.JRSoulHarvest import JRSoulHarvest
from pirates.effects.JRSoulHarvest2 import JRSoulHarvest2
from pirates.effects.JRGraveSmoke import JRGraveSmoke
from pirates.effects.GrapeshotEffect import GrapeshotEffect
from pirates.effects.PistolShot import PistolShot
from pirates.effects.ScatterShot import ScatterShot
from pirates.effects.MusketShot import MusketShot
from pirates.effects.VoodooAuraBurst import VoodooAuraBurst
from pirates.effects.FlashEffect import FlashEffect
from pirates.effects.HealBlast import HealBlast
from pirates.effects.PulsingGlow import PulsingGlow
from pirates.effects.ConeRays import ConeRays
from pirates.effects.VoodooAuraHeal import VoodooAuraHeal
from pirates.effects.VoodooAuraDisc import VoodooAuraDisc
from pirates.effects.VoodooGroundAura import VoodooGroundAura
from pirates.effects.HitPulse import HitPulse
from pirates.effects.SimpleSmokeCloud import SimpleSmokeCloud
from pirates.effects.MonkeyPanicHit import MonkeyPanicHit
from GrenadeProjectile import GrenadeProjectile
from pirates.piratesbase import PLocalizer
from pirates.inventory import ItemGlobals
from pirates.battle.EnemySkills import *
import random
import copy
from direct.showbase.InputStateGlobal import inputState
MISTIMEDLIST = [
WeaponGlobals.RESULT_MISTIMED_MISS, WeaponGlobals.RESULT_MISTIMED_HIT]
class CombatAnimations():
notify = directNotify.newCategory('CombatAnimations')
BASE_GRENADE_POWER = 0.8
def getHack(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
delay = 0.625
if av == localAvatar and av.guiMgr.combatTray.onLastAttack:
delay = 0.85
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.startSpecialEffect, skillId), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('cutlass_combo', playRate=1.0, startFrame=4, endFrame=30, blendInT=0.2, blendOutT=0.3), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(delay), Func(self.unlockInput, av)))
return ival
def getSlash(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
delay = 0.75
if av == localAvatar and av.guiMgr.combatTray.onLastAttack:
delay = 1.3
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.3), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('cutlass_combo', playRate=1.0, startFrame=31, endFrame=62, blendInT=0.5, blendOutT=0.3), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(delay), Func(self.unlockInput, av)))
return ival
def getCleave(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
delay = 1.25
if av == localAvatar and av.guiMgr.combatTray.onLastAttack:
delay = 1.5
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('cutlass_combo', playRate=1.0, startFrame=63, endFrame=101, blendInT=0.5, blendOutT=0.3), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(delay), Func(self.unlockInput, av)))
return ival
def getFlourish(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
delay = 1.58
if av == localAvatar and av.guiMgr.combatTray.onLastAttack:
delay = 2.08
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('cutlass_combo', playRate=1.0, startFrame=102, endFrame=150, blendInT=0.5, blendOutT=0.3), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(delay), Func(self.unlockInput, av)))
return ival
def getThrust(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
delay = 1.8
if av.isLocal() and av.guiMgr.combatTray.onLastAttack:
delay = 2.05
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
def startVFX():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
effect = WindBlurCone.getEffect(unlimited)
if effect and not av.currentWeapon.isEmpty():
colorId = ItemGlobals.getVfxType1(av.currentWeapon.itemId)
if colorId == ItemGlobals.MotionBlurDark:
effect.setBlendModeOff()
else:
effect.setBlendModeOn()
effect.fadeColor = av.currentWeapon.getBlurColor()
effect.reparentTo(av.currentWeapon)
effect.fadeTime = 0.4
effect.setPos(0, 0, 0)
effect.setScale(1)
effect.setH(0)
effect.play()
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.5), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('cutlass_combo', playRate=1.0, startFrame=151, endFrame=164, blendInT=0.5, blendOutT=0), Func(av.currentWeapon.showSpinBlur), av.actorInterval('cutlass_combo', playRate=1.0, startFrame=165, endFrame=170, blendInT=0, blendOutT=0), Func(startVFX), av.actorInterval('cutlass_combo', playRate=1.0, startFrame=171, endFrame=175, blendInT=0, blendOutT=0), Func(av.currentWeapon.hideSpinBlur), av.actorInterval('cutlass_combo', playRate=1.0, startFrame=176, endFrame=210, blendInT=0, blendOutT=0.5), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(delay), Func(self.unlockInput, av)))
return ival
def getSweep(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
shockwaveRingEffect = ShockwaveRing.getEffect(unlimited)
if shockwaveRingEffect:
shockwaveRingEffect.reparentTo(av)
shockwaveRingEffect.size = 40
shockwaveRingEffect.setPos(0, 0, 0)
shockwaveRingEffect.play()
def startVFX2():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
shockwaveRingEffect = ShockwaveRing.getEffect(unlimited)
if shockwaveRingEffect:
shockwaveRingEffect.reparentTo(av)
shockwaveRingEffect.size = 40
shockwaveRingEffect.setPos(0, 0, 3)
shockwaveRingEffect.play()
dustRingEffect = DustRing.getEffect(unlimited)
if dustRingEffect:
dustRingEffect.reparentTo(av)
dustRingEffect.setPos(0, 0, 0)
dustRingEffect.play()
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.7), Func(av.currentWeapon.beginAttack, av), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=1, endFrame=10, blendOutT=0), Func(startVFX), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=11, endFrame=15, blendInT=0, blendOutT=0), Func(startVFX2), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=16, endFrame=35, blendInT=0), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getBlowbackSweep(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
effect = CameraShaker()
effect.reparentTo(av)
effect.setPos(0, 0, 0)
effect.shakeSpeed = 0.06
effect.shakePower = 2.5
effect.numShakes = 3
effect.scalePower = 1
effect.play(300)
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
shockwaveRingEffect = ShockwaveRing.getEffect(unlimited)
if shockwaveRingEffect:
shockwaveRingEffect.reparentTo(av)
shockwaveRingEffect.size = 40
shockwaveRingEffect.setPos(0, 0, 0)
shockwaveRingEffect.play()
def startVFX2():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
effect = VoodooAuraBurst.getEffect(unlimited)
if effect:
effect.reparentTo(av)
effect.setPos(0, 0, 0.25)
effect.setEffectColor(Vec4(0.15, 0.15, 0.15, 1.0))
effect.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
dustRingEffect = DustRing.getEffect(unlimited)
if dustRingEffect:
dustRingEffect.reparentTo(av)
dustRingEffect.setPos(0, 0, 0)
dustRingEffect.play()
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.7), Func(av.currentWeapon.beginAttack, av), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=1, endFrame=10, blendOutT=0), Func(startVFX), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=11, endFrame=15, blendInT=0, blendOutT=0), Func(startVFX2), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=16, endFrame=35, blendInT=0), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getFireSweep(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
effect = CameraShaker()
effect.reparentTo(av)
effect.setPos(0, 0, 0)
effect.shakeSpeed = 0.06
effect.shakePower = 2.5
effect.numShakes = 3
effect.scalePower = 1
effect.play(300)
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
shockwaveRingEffect = ShockwaveRing.getEffect(unlimited)
if shockwaveRingEffect:
shockwaveRingEffect.reparentTo(av)
shockwaveRingEffect.size = 40
shockwaveRingEffect.setPos(0, 0, 0)
shockwaveRingEffect.play()
def startVFX2():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
effect = VoodooAuraBurst.getEffect(unlimited)
if effect:
effect.reparentTo(av)
effect.setPos(0, 0, 0.25)
effect.setEffectColor(Vec4(1, 0.8, 0.4, 0.4))
effect.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
dustRingEffect = DustRing.getEffect(unlimited)
if dustRingEffect:
dustRingEffect.reparentTo(av)
dustRingEffect.setPos(0, 0, 0)
dustRingEffect.play()
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.motionFSM.off), Func(av.currentWeapon.startSpecialEffect, skillId), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.7), Func(av.currentWeapon.beginAttack, av), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=1, endFrame=10, blendOutT=0), Func(startVFX), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=11, endFrame=15, blendInT=0, blendOutT=0), Func(startVFX2), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=16, endFrame=35, blendInT=0), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getIceSweep(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
effect = CameraShaker()
effect.reparentTo(av)
effect.setPos(0, 0, 0)
effect.shakeSpeed = 0.06
effect.shakePower = 2.5
effect.numShakes = 3
effect.scalePower = 1
effect.play(300)
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
shockwaveRingEffect = ShockwaveRing.getEffect(unlimited)
if shockwaveRingEffect:
shockwaveRingEffect.reparentTo(av)
shockwaveRingEffect.size = 40
shockwaveRingEffect.setPos(0, 0, 0)
shockwaveRingEffect.play()
def startVFX2():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
effect = VoodooAuraBurst.getEffect(unlimited)
if effect:
effect.reparentTo(av)
effect.setPos(0, 0, 0.25)
effect.setEffectColor(Vec4(0.4, 0.6, 1, 0.6))
effect.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
dustRingEffect = DustRing.getEffect(unlimited)
if dustRingEffect:
dustRingEffect.reparentTo(av)
dustRingEffect.setPos(0, 0, 0)
dustRingEffect.play()
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.motionFSM.off), Func(av.currentWeapon.startSpecialEffect, skillId), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.7), Func(av.currentWeapon.beginAttack, av), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=1, endFrame=10, blendOutT=0), Func(startVFX), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=11, endFrame=15, blendInT=0, blendOutT=0), Func(startVFX2), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=16, endFrame=35, blendInT=0), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getThunderSweep(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
effect = CameraShaker()
effect.reparentTo(av)
effect.setPos(0, 0, 0)
effect.shakeSpeed = 0.06
effect.shakePower = 2.5
effect.numShakes = 3
effect.scalePower = 1
effect.play(300)
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
shockwaveRingEffect = ShockwaveRing.getEffect(unlimited)
if shockwaveRingEffect:
shockwaveRingEffect.reparentTo(av)
shockwaveRingEffect.size = 40
shockwaveRingEffect.setPos(0, 0, 0)
shockwaveRingEffect.play()
def startVFX2():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
effect = VoodooAuraBurst.getEffect(unlimited)
if effect:
effect.reparentTo(av)
effect.setPos(0, 0, 0.25)
effect.setEffectColor(Vec4(0.7, 0.9, 0.5, 0.5))
effect.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
dustRingEffect = DustRing.getEffect(unlimited)
if dustRingEffect:
dustRingEffect.reparentTo(av)
dustRingEffect.setPos(0, 0, 0)
dustRingEffect.play()
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.motionFSM.off), Func(av.currentWeapon.startSpecialEffect, skillId), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.7), Func(av.currentWeapon.beginAttack, av), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=1, endFrame=10, blendOutT=0), Func(startVFX), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=11, endFrame=15, blendInT=0, blendOutT=0), Func(startVFX2), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=16, endFrame=35, blendInT=0), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getFurySweep(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
shockwaveRingEffect = ShockwaveRing.getEffect(unlimited)
if shockwaveRingEffect:
shockwaveRingEffect.reparentTo(av)
shockwaveRingEffect.size = 40
shockwaveRingEffect.setPos(0, 0, 0)
shockwaveRingEffect.play()
def startVFX2():
unlimited = av.isLocal()
shockwaveRingEffect = ShockwaveRing.getEffect(unlimited)
if shockwaveRingEffect:
shockwaveRingEffect.reparentTo(av)
shockwaveRingEffect.size = 40
shockwaveRingEffect.setPos(0, 0, 1)
shockwaveRingEffect.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
dustRingEffect = DustRing.getEffect(unlimited)
if dustRingEffect:
dustRingEffect.reparentTo(av)
dustRingEffect.setPos(0, 0, 0)
dustRingEffect.play()
def startVFX3():
unlimited = av.isLocal()
shockwaveRingEffect = ShockwaveRing.getEffect(unlimited)
if shockwaveRingEffect:
shockwaveRingEffect.reparentTo(av)
shockwaveRingEffect.size = 40
shockwaveRingEffect.setPos(0, 0, 2)
shockwaveRingEffect.play()
def startVFX4():
unlimited = av.isLocal()
shockwaveRingEffect = ShockwaveRing.getEffect(unlimited)
if shockwaveRingEffect:
shockwaveRingEffect.reparentTo(av)
shockwaveRingEffect.size = 40
shockwaveRingEffect.setPos(0, 0, 3)
shockwaveRingEffect.play()
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.7), Func(av.currentWeapon.beginAttack, av), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=1, endFrame=10, blendOutT=0), Func(startVFX), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=11, endFrame=18, blendInT=0, blendOutT=0), Func(startVFX2), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=18, endFrame=26, blendInT=0, blendOutT=0), Func(startVFX3), av.actorInterval('cutlass_sweep', playRate=1.0, startFrame=26, endFrame=35, blendInT=0.0), Func(startVFX4), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getBladestorm(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.55), Func(av.currentWeapon.beginAttack, av), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('cutlass_bladestorm', playRate=1.0, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getBrawl(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.3), Func(av.currentWeapon.beginAttack, av), Func(av.currentWeapon.playSkillSfx, skillId, av))
ival.append(av.actorInterval('cutlass_headbutt', playRate=1.0, blendInT=0.5, blendOutT=0.5))
ival.append(Func(av.currentWeapon.endAttack, av))
ival.append(Func(av.considerEnableMovement))
ival.append(Func(self.unlockInput, av))
return ival
def getTaunt(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.setChatAbsolute, PLocalizer.getTauntPhrase(), CFSpeech | CFTimeout), Func(av.currentWeapon.setTrailLength, 0.15), Func(av.currentWeapon.beginAttack, av), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('cutlass_taunt', playRate=1.0, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getCower(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.setChatAbsolute, PLocalizer.getNotInFacePhrase(), CFSpeech | CFTimeout), Func(av.currentWeapon.beginAttack, av), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('cower_in_place', playRate=1.0, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getMonkeyPanic(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
flashEffect = FlashEffect()
flashEffect.reparentTo(av.headNode)
flashEffect.setScale(15.0)
flashEffect.fadeTime = 1.0
flashEffect.setEffectColor(Vec4(1, 0.2, 0.2, 1))
flashEffect.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
effect = MonkeyPanicHit.getEffect(unlimited)
if effect:
effect.reparentTo(av.headNode)
effect.setPos(-1, 0, 0)
effect.play()
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.15), Func(av.currentWeapon.beginAttack, av), Func(av.currentWeapon.playSkillSfx, skillId, av), Func(startVFX), av.actorInterval('emote_thriller', playRate=1.0, startFrame=265, endFrame=320, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getBroadswordHack(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('broadsword_combo', playRate=1.0, startFrame=1, endFrame=28, blendInT=0.1, blendOutT=0.1), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(0.85), Func(self.unlockInput, av)))
return ival
def getBroadswordSlash(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.3), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('broadsword_combo', playRate=1.0, startFrame=29, endFrame=60, blendInT=0.5, blendOutT=0.3), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(0.95), Func(self.unlockInput, av)))
return ival
def getBroadswordCleave(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('broadsword_combo_motion', playRate=1.0, startFrame=61, endFrame=76, blendInT=0.1, blendOutT=0), av.actorInterval('broadsword_combo', playRate=1.0, startFrame=77, endFrame=96, blendInT=0, blendOutT=0.3), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(1.05), Func(self.unlockInput, av)))
return ival
def getBroadswordFlourish(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('broadsword_combo_motion', playRate=1.0, startFrame=97, endFrame=112, blendInT=0.1, blendOutT=0), av.actorInterval('broadsword_combo', playRate=1.0, startFrame=113, endFrame=128, blendInT=0, blendOutT=0), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(0.95), Func(self.unlockInput, av)))
return ival
def getBroadswordThrust(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
wantTrail = 1
delay = 1.0
if av.isLocal() and av.guiMgr.combatTray.onLastAttack:
delay = 1.45
if skillResult in MISTIMEDLIST:
wantTrail = 0
def startVFX():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
effect = WindBlurCone.getEffect(unlimited)
if effect and not av.currentWeapon.isEmpty():
colorId = ItemGlobals.getVfxType1(av.currentWeapon.itemId)
if colorId == ItemGlobals.MotionBlurDark:
effect.setBlendModeOff()
else:
effect.setBlendModeOn()
effect.fadeColor = av.currentWeapon.getBlurColor()
effect.reparentTo(av.currentWeapon)
effect.fadeTime = 0.4
effect.setPos(0, 0, 0)
effect.setScale(1)
effect.setH(0)
effect.play()
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.5), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('broadsword_combo_motion', playRate=1.0, startFrame=129, endFrame=148, blendInT=0.5, blendOutT=0), av.actorInterval('broadsword_combo', playRate=1.0, startFrame=149, endFrame=170, blendInT=0, blendOutT=0), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(delay), Func(self.unlockInput, av)))
return ival
def getSabreHack(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('sabre_combo', playRate=1.0, startFrame=1, endFrame=19, blendInT=0.2, blendOutT=0.3), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(0.425), Func(self.unlockInput, av)))
return ival
def getSabreSlash(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.3), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('sabre_combo', playRate=1.0, startFrame=20, endFrame=37, blendInT=0.1, blendOutT=0.1), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(0.4), Func(self.unlockInput, av)))
return ival
def getSabreCleave(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('sabre_combo', playRate=1.0, startFrame=38, endFrame=81, blendInT=0.5, blendOutT=0.3), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(1.0), Func(self.unlockInput, av)))
return ival
def getSabreFlourish(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('sabre_combo_motion', playRate=1.0, startFrame=82, endFrame=94, blendInT=0.1, blendOutT=0), av.actorInterval('sabre_combo', playRate=1.0, startFrame=95, endFrame=121, blendInT=0, blendOutT=0.1), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(1.0), Func(self.unlockInput, av)))
return ival
def getSabreThrust(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
delay = 1.3
if av.isLocal() and av.guiMgr.combatTray.onLastAttack:
delay = 1.55
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
def startVFX():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
effect = WindBlurCone.getEffect(unlimited)
if effect and not av.currentWeapon.isEmpty():
colorId = ItemGlobals.getVfxType1(av.currentWeapon.itemId)
if colorId == ItemGlobals.MotionBlurDark:
effect.setBlendModeOff()
else:
effect.setBlendModeOn()
effect.fadeColor = av.currentWeapon.getBlurColor()
effect.reparentTo(av.currentWeapon)
effect.fadeTime = 0.4
effect.setPos(0, 0, 0)
effect.setScale(1)
effect.setH(0)
effect.play()
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.5), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('sabre_combo_motion', playRate=1.0, startFrame=122, endFrame=147, blendInT=0.5, blendOutT=0), av.actorInterval('sabre_combo', playRate=1.0, startFrame=148, endFrame=175, blendInT=0, blendOutT=0), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(delay), Func(self.unlockInput, av)))
return ival
def getCut(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
delay = 0.75
if av == localAvatar and av.guiMgr.combatTray.onLastAttack:
delay = 1.25
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('dagger_combo', playRate=1.0, startFrame=1, endFrame=28, blendInT=0.2, blendOutT=0.5), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(delay), Func(self.unlockInput, av)))
return ival
def getSwipe(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
delay = 0.583
if av == localAvatar and av.guiMgr.combatTray.onLastAttack:
delay = 1.083
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.3), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('dagger_combo', playRate=1.0, startFrame=29, endFrame=53, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.hideSpinBlur), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(delay), Func(self.unlockInput, av)), Sequence(Wait(0.1), Func(av.currentWeapon.showSpinBlur)))
return ival
def getGouge(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
delay = 0.958
if av == localAvatar and av.guiMgr.combatTray.onLastAttack:
delay = 1.458
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.5), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('dagger_combo', playRate=1.0, startFrame=54, endFrame=87, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(delay), Func(self.unlockInput, av)))
return ival
def getEviscerate(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
delay = 1.9
if av == localAvatar and av.guiMgr.combatTray.onLastAttack:
delay = 2.4
wantTrail = 1
if skillResult in MISTIMEDLIST:
wantTrail = 0
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.6), Func(av.currentWeapon.beginAttack, av, wantTrail), Func(av.currentWeapon.playSkillSfx, skillId, av, 0, wantTrail), av.actorInterval('dagger_combo', playRate=1.0, startFrame=88, endFrame=142, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av)), Sequence(Wait(delay), Func(self.unlockInput, av)))
return ival
def throwDagger(self, av, time, targetPos, motion_color=None, startOffset=Vec3(0, 0, 0), roll=0, leftHand=False, skillId=None, target=None):
if av:
if skillId:
targetPos, speed, impactT = av.getProjectileInfo(skillId, target)
time *= speed
roll += random.uniform(-15.0, 15.0)
effect = DaggerProjectile.getEffect()
if effect:
effect.reparentTo(render)
if leftHand:
effect.setPos(av.leftHandNode, startOffset)
else:
effect.setPos(av.rightHandNode, startOffset)
effect.setHpr(av.getH(render) + roll, 90 + roll, roll)
effect.play(time, targetPos, motion_color)
def throwDaggerRain(self, av, time, targetPos, motion_color=None, startOffset=Vec3(0, 0, 0), roll=0, leftHand=False, skillId=None, target=None):
if av:
if skillId:
targetPos, speed, impactT = av.getProjectileInfo(skillId, target)
newTargetPos = Point3(targetPos.getX() + random.uniform(-2.0, 2.0), targetPos.getY() + random.uniform(-2.0, 2.0), targetPos.getZ())
time *= speed
roll += random.uniform(-15.0, 15.0)
effect = DaggerProjectile.getEffect()
if effect:
effect.reparentTo(render)
if leftHand:
effect.setPos(av.leftHandNode, startOffset)
else:
effect.setPos(av.rightHandNode, startOffset)
effect.setHpr(av.getH(render) + roll, 90 + roll, roll)
effect.play(time, newTargetPos, motion_color)
def getDaggerThrowDirtInterval(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
effect = ThrowDirt.getEffect(unlimited)
if effect:
effect.reparentTo(render)
effect.setPos(av.getPos(render))
effect.setHpr(av.getHpr(render))
effect.particleDummy.setPos(av.getPos(render))
effect.particleDummy.setHpr(av.getHpr(render))
effect.play()
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(base.disableMouse), Func(av.currentWeapon.endAttack, av), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.hideWeapon), Func(av.currentWeapon.beginAttack, av), av.actorInterval('dagger_throw_sand', playRate=1.0, startFrame=1, endFrame=10, blendInT=0.2, blendOutT=0), Func(av.currentWeapon.playSkillSfx, skillId, av), Func(startVFX), av.actorInterval('dagger_throw_sand', playRate=1.0, startFrame=11, endFrame=38, blendInT=0, blendOutT=0.3), Func(av.currentWeapon.showWeapon), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getDaggerAspInterval(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
targetPos, speed, impactT = av.getProjectileInfo(skillId, target)
track = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('knife_throw', endFrame=17, blendInT=0.2, blendOutT=0), Parallel(av.actorInterval('knife_throw', startFrame=18, blendInT=0, blendOutT=0.4), Func(self.throwDagger, av, speed, targetPos, skillId=skillId, target=target), Func(av.currentWeapon.hideWeapon)), Func(av.currentWeapon.showWeapon), Func(self.unlockInput, av))
return track
def getDaggerAdderInterval(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
motion_color = [
Vec4(0.1, 1.0, 0.4, 1.0), Vec4(0.5, 1.0, 0.4, 1.0)]
targetPos, speed, impactT = av.getProjectileInfo(skillId, target)
track = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('knife_throw', endFrame=17, blendInT=0.2, blendOutT=0), Parallel(av.actorInterval('knife_throw', startFrame=18, blendInT=0, blendOutT=0.4), Func(self.throwDagger, av, 1.0, targetPos, motion_color, skillId=skillId, target=target), Func(av.currentWeapon.hideWeapon)), Func(av.currentWeapon.showWeapon), Func(self.unlockInput, av))
return track
def getDaggerSidewinderInterval(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
motion_color = [
Vec4(1.0, 0.0, 0.0, 1.0), Vec4(1.0, 0.2, 0.0, 1.0)]
targetPos, speed, impactT = av.getProjectileInfo(skillId, target)
track = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('dagger_asp', endFrame=7, blendInT=0.1, blendOutT=0), Parallel(av.actorInterval('dagger_asp', startFrame=8, blendInT=0, blendOutT=0.4), Func(self.throwDagger, av, 1.0, targetPos, motion_color, roll=90, skillId=skillId, target=target), Func(av.currentWeapon.hideWeapon)), Func(av.currentWeapon.showWeapon), Func(self.unlockInput, av))
return track
def getDaggerViperNestInterval(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
numDaggers = 12.0
time = 0.7
placeHolder = av.attachNewNode('daggerPlaceHolder')
daggerTossIval = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.25), av.actorInterval('dagger_vipers_nest', startFrame=21, endFrame=35, blendInT=0, blendOutT=0.4), Func(av.currentWeapon.showWeapon), Func(av.considerEnableMovement), Func(self.unlockInput, av)))
for i in range(numDaggers):
if av.isLocal():
placeHolder.setPos(camera, random.uniform(-12, 12), random.uniform(100, 120), random.uniform(8, 18))
else:
placeHolder.setPos(av, random.uniform(-12, 12), random.uniform(100, 120), random.uniform(2, 12))
targetPos = placeHolder.getPos(render)
daggerTossIval.append(Func(self.throwDagger, av, time + random.uniform(-0.5, 1.0), targetPos, startOffset=Vec3(-3, 0, 0), roll=90))
placeHolder.removeNode()
track = Sequence(Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.hideWeapon), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('dagger_vipers_nest', endFrame=20, blendOutT=0), daggerTossIval)
return track
def getDaggerBarrageInterval(self, av, skillId, ammoSkillId, charge, target, skillResult, areaList=[]):
if not av.currentWeapon:
return None
motion_color = [
Vec4(1.0, 0.0, 0.0, 1.0), Vec4(1.0, 0.2, 0.0, 1.0)]
numDaggers = 6.0
time = 0.7
placeHolder = av.attachNewNode('daggerPlaceHolder')
placeHolder2 = placeHolder.attachNewNode('daggerPlaceHolder2')
daggerTossIval = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.25), av.actorInterval('dagger_vipers_nest', startFrame=21, endFrame=35, blendInT=0, blendOutT=0.4), Func(av.currentWeapon.showWeapon), Func(av.considerEnableMovement), Func(self.unlockInput, av)))
if target:
targetPos, speed, impactT = av.getProjectileInfo(skillId, target)
daggerTossIval.append(Func(self.throwDagger, av, speed, targetPos, motion_color, startOffset=Vec3(-3, 0, 0), roll=90))
for enemy in areaList:
targetPos, speed, impactT = av.getProjectileInfo(skillId, enemy)
daggerTossIval.append(Func(self.throwDagger, av, speed, targetPos, motion_color, startOffset=Vec3(-3, 0, 0), roll=90))
if av.isLocal():
placeHolder.setPos(camera, 0, 0, 0)
else:
placeHolder.setPos(av, 0, 0, 0)
for i in range(numDaggers):
placeHolder.setH(i * 360 / numDaggers)
placeHolder2.setPos(0, random.uniform(100, 120), random.uniform(2, 12))
targetPos = placeHolder2.getPos(render)
daggerTossIval.append(Func(self.throwDagger, av, time + random.uniform(-0.5, 1.0), targetPos, motion_color, startOffset=Vec3(-3, 0, 0), roll=90))
placeHolder.removeNode()
track = Sequence(Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.hideWeapon), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('dagger_vipers_nest', endFrame=20, blendOutT=0), daggerTossIval)
return track
def getDaggerThrowCombo1Interval(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
motion_color = [
Vec4(1.0, 0.0, 0.0, 1.0), Vec4(1.0, 0.2, 0.0, 1.0)]
targetPos, speed, impactT = av.getProjectileInfo(skillId, target)
track = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('dagger_throw_combo', endFrame=6, playRate=1.5, blendInT=0.1, blendOutT=0), Parallel(av.actorInterval('dagger_throw_combo', startFrame=7, endFrame=23, playRate=1.5, blendInT=0, blendOutT=0.4), Func(self.throwDagger, av, 0.5, targetPos, motion_color, skillId=skillId, target=target), Func(av.currentWeapon.hideWeapon)), Func(av.currentWeapon.showWeapon), Func(self.unlockInput, av))
return track
def getDaggerThrowCombo2Interval(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
motion_color = [
Vec4(1.0, 0.0, 0.0, 1.0), Vec4(1.0, 0.2, 0.0, 1.0)]
targetPos, speed, impactT = av.getProjectileInfo(skillId, target)
track = Sequence(Func(av.currentWeapon.reparentTo, av.leftHandNode), Func(av.attackTire), Func(self.lockInput, av), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('dagger_throw_combo', startFrame=24, endFrame=30, playRate=1.5, blendInT=0.1, blendOutT=0), Parallel(av.actorInterval('dagger_throw_combo', startFrame=31, endFrame=46, playRate=1.5, blendInT=0, blendOutT=0.4), Func(self.throwDagger, av, 0.5, targetPos, motion_color, leftHand=True, skillId=skillId, target=target), Func(av.currentWeapon.hideWeapon)), Func(av.currentWeapon.reparentTo, av.rightHandNode), Func(av.currentWeapon.showWeapon), Func(self.unlockInput, av))
return track
def getDaggerThrowCombo3Interval(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return
motion_color = [
Vec4(1.0, 0.0, 0.0, 1.0), Vec4(1.0, 0.2, 0.0, 1.0)]
dagger2 = copy.copy(av.currentWeapon)
av.setSecondWeapon(dagger2)
targetPos, speed, impactT = av.getProjectileInfo(skillId, target)
track = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(dagger2.reparentTo, av.leftHandNode), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('dagger_throw_combo', startFrame=47, endFrame=56, playRate=1.5, blendInT=0.1, blendOutT=0), Parallel(av.actorInterval('dagger_throw_combo', startFrame=57, endFrame=78, playRate=1.5, blendInT=0, blendOutT=0.1), Func(self.throwDagger, av, 0.5, targetPos, motion_color, skillId=skillId, target=target), Sequence(Wait(0.011), Func(self.throwDagger, av, 0.5, targetPos, motion_color, leftHand=True, skillId=skillId, target=target)), Func(av.setSecondWeapon, None), Func(av.currentWeapon.hideWeapon)), Func(av.currentWeapon.showWeapon), Func(self.unlockInput, av))
return track
def getDaggerThrowCombo4Interval(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return
motion_color = [
Vec4(1.0, 0.0, 0.0, 1.0), Vec4(1.0, 0.2, 0.0, 1.0)]
dagger2 = copy.copy(av.currentWeapon)
av.setSecondWeapon(dagger2)
targetPos, speed, impactT = av.getProjectileInfo(skillId, target)
track = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(dagger2.reparentTo, av.leftHandNode), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('dagger_throw_combo', startFrame=79, endFrame=93, playRate=1.5, blendInT=0.1, blendOutT=0), Parallel(av.actorInterval('dagger_throw_combo', startFrame=94, endFrame=130, playRate=1.5, blendInT=0, blendOutT=0.4), Func(self.throwDagger, av, 0.5, targetPos, motion_color, roll=90, skillId=skillId, target=target), Sequence(Wait(0.011), Func(self.throwDagger, av, 0.5, targetPos, motion_color, roll=90, leftHand=True, skillId=skillId, target=target)), Func(av.setSecondWeapon, None), Func(av.currentWeapon.hideWeapon)), Func(av.currentWeapon.showWeapon), Func(self.unlockInput, av))
return track
def getDaggerRainInterval(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
motion_color = [
Vec4(1.0, 0.0, 0.0, 1.0), Vec4(1.0, 0.2, 0.0, 1.0)]
targetPos, speed, impactT = av.getProjectileInfo(skillId, target)
numDaggers = 3.0
daggerTossIval = Parallel(Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.25), av.actorInterval('dagger_asp', startFrame=7, endFrame=20, blendInT=0, blendOutT=0.4), Func(av.currentWeapon.showWeapon), Func(av.considerEnableMovement), Func(self.unlockInput, av)))
for i in range(numDaggers):
daggerTossIval.append(Wait(0.011 * i))
daggerTossIval.append(Func(self.throwDaggerRain, av, random.uniform(0.95, 1.05), targetPos, motion_color, roll=90, skillId=skillId, target=target))
track = Sequence(Func(av.motionFSM.off), Func(av.attackTire), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.hideWeapon), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('dagger_asp', endFrame=6, blendOutT=0), daggerTossIval)
return track
def getDaggerAcidDaggerInterval(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
motion_color = [
Vec4(1.0, 1.0, 0.0, 1.0), Vec4(1.0, 1.0, 0.2, 1.0)]
targetPos, speed, impactT = av.getProjectileInfo(skillId, target)
track = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('dagger_asp', endFrame=7, blendInT=0.1, blendOutT=0), Parallel(av.actorInterval('dagger_asp', startFrame=8, blendInT=0, blendOutT=0.4), Func(self.throwDagger, av, 1.0, targetPos, motion_color, roll=90, skillId=skillId, target=target), Func(av.currentWeapon.hideWeapon)), Func(av.currentWeapon.showWeapon), Func(self.unlockInput, av))
return track
def getPistolChargingAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
subtype = ItemGlobals.getSubtype(av.currentWeaponId)
anim = ItemGlobals.getChargingAnim(subtype)
if ItemGlobals.shouldStopToAim(subtype):
track = Sequence(Func(av.motionFSM.moveLock), Func(base.cr.targetMgr.setWantAimAssist, 1), Func(av.setAimMod, -0.5), av.actorInterval(anim, loop=1, duration=9999, blendInT=0.3, blendOutT=0.3))
elif anim:
track = Sequence(Func(base.cr.targetMgr.setWantAimAssist, 1), Func(av.setAimMod, -0.5), av.actorInterval(anim, loop=1, duration=9999, blendInT=0.3, blendOutT=0.3))
else:
track = Sequence(Func(base.cr.targetMgr.setWantAimAssist, 1), Func(av.setAimMod, -0.5), av.actorInterval('gun_aim_idle', loop=1, duration=9999, blendInT=0.3, blendOutT=0.3))
return track
def getPistolReloadAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return
def finishReload():
if av.isLocal():
messenger.send('reloadFinished')
if ItemGlobals.getType(av.currentWeaponId) == ItemGlobals.GUN:
sfx = av.currentWeapon.reloadSfx
else:
sfx = None
anim = ItemGlobals.getReloadAnim(ItemGlobals.getSubtype(av.currentWeaponId))
if anim:
if ItemGlobals.getSubtype(av.currentWeaponId) == ItemGlobals.BLUNDERBUSS:
ramRod = av.currentWeapon.getRamRod()
track = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.setAimMod, 0), Func(base.playSfx, sfx, node=av, cutoff=60), av.actorInterval(anim, startFrame=1, endFrame=13, blendInT=0, blendOutT=0), Func(ramRod.reparentTo, av.leftHandNode), av.actorInterval(anim, startFrame=14, endFrame=68, blendInT=0, blendOutT=0), Func(ramRod.detachNode), av.actorInterval(anim, startFrame=69, endFrame=110, blendInT=0, blendOutT=0), Func(finishReload), Func(self.unlockInput, av))
else:
track = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.setAimMod, 0), Func(base.playSfx, sfx, node=av, cutoff=60), av.actorInterval(anim, blendInT=0, blendOutT=0), Func(finishReload), Func(self.unlockInput, av))
else:
track = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.setAimMod, 0), Func(base.playSfx, sfx, node=av, cutoff=60), av.actorInterval('gun_reload', blendInT=0, blendOutT=0), Func(finishReload), Func(self.unlockInput, av))
del finishReload
return track
def getPistolFireAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
subtype = ItemGlobals.getSubtype(av.currentWeaponId)
if subtype in [ItemGlobals.MUSKET, ItemGlobals.BAYONET]:
return self.getBayonetFireAnim(av, skillId, ammoSkillId, charge, target, skillResult)
elif subtype in [ItemGlobals.BLUNDERBUSS]:
return self.getPistolScattershotAnim(av, skillId, ammoSkillId, charge, target, skillResult)
def startVFX():
unlimited = av.isLocal()
pistolFlame = PistolShot.getEffect(unlimited)
if pistolFlame:
pistolFlame.reparentTo(av.currentWeapon)
pistolFlame.setPos(1.25, 0.3, 0)
pistolFlame.setHpr(0, 0, 90)
pistolFlame.setScale(1)
pistolFlame.play()
anim = ItemGlobals.getFireAnim(ItemGlobals.getSubtype(av.currentWeaponId))
if anim:
ival = Sequence(Func(base.cr.targetMgr.setWantAimAssist, 0), Func(self.lockInput, av), Func(av.attackTire), Func(av.setAimMod, 0), av.actorInterval(anim, startFrame=1, endFrame=3, blendInT=0.0, blendOutT=0.0, playRate=1.0), Func(startVFX), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval(anim, startFrame=4, endFrame=12, blendInT=0.0, blendOutT=0.0, playRate=1.0), Func(self.unlockInput, av), av.actorInterval(anim, startFrame=13, blendInT=0, blendOutT=0.3, playRate=1.0))
else:
ival = Sequence(Func(base.cr.targetMgr.setWantAimAssist, 0), Func(self.lockInput, av), Func(av.attackTire), Func(av.setAimMod, 0), Func(startVFX), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('gun_fire', endFrame=12, blendInT=0, blendOutT=0), Func(self.unlockInput, av), av.actorInterval('gun_fire', startFrame=13, blendInT=0, blendOutT=0.3))
del startVFX
return ival
def getPistolTakeAimAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
if base.cr.wantSpecialEffects and base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
pistolSmokeEffect = PistolSmoke.getEffect(unlimited)
if pistolSmokeEffect:
pistolSmokeEffect.reparentTo(av)
pistolSmokeEffect.setPos(av, 1.2, 2.5, 5)
pistolSmokeEffect.play()
pistolFlameEffect = PistolFlame.getEffect(unlimited)
if pistolFlameEffect:
pistolFlameEffect.reparentTo(av)
pistolFlameEffect.particleDummy.reparentTo(av)
pistolFlameEffect.flash.setScale(30)
pistolFlameEffect.setPos(av, 1.2, 2.5, 5)
pistolFlameEffect.setColorScale(1, 1, 1, 1)
pistolFlameEffect.play()
subtype = ItemGlobals.getSubtype(av.currentWeaponId)
anim = ItemGlobals.getTakeAimAnim(subtype)
if anim:
if subtype in (ItemGlobals.BAYONET, ItemGlobals.MUSKET, ItemGlobals.BLUNDERBUSS):
ival = Sequence(Func(av.motionFSM.off), Func(base.cr.targetMgr.setWantAimAssist, 0), Func(av.setAimMod, 0), Func(self.lockInput, av), Func(av.attackTire), Func(startVFX), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval(anim, playRate=1, startFrame=26, blendInT=0, blendOutT=0.3), Func(av.considerEnableMovement), Func(self.unlockInput, av))
else:
ival = Sequence(Func(base.cr.targetMgr.setWantAimAssist, 0), Func(av.setAimMod, 0), Func(self.lockInput, av), Func(av.attackTire), Func(startVFX), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval(anim, playRate=1, startFrame=1, blendInT=0, blendOutT=0.3), Func(self.unlockInput, av))
else:
ival = Sequence(Func(base.cr.targetMgr.setWantAimAssist, 0), Func(av.setAimMod, 0), Func(self.lockInput, av), Func(av.attackTire), Func(startVFX), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('gun_fire', playRate=1, blendInT=0, blendOutT=0.3), Func(self.unlockInput, av))
del startVFX
return ival
def getPistolScattershotAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
pistolFlame = ScatterShot.getEffect(unlimited)
if pistolFlame:
pistolFlame.reparentTo(av.currentWeapon)
pistolFlame.setPos(1.75, 0.25, 0)
pistolFlame.setHpr(0, 0, 90)
pistolFlame.setScale(0.4)
pistolFlame.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
effect = GrapeshotEffect.getEffect(unlimited)
if effect:
effect.reparentTo(render)
effect.setHpr(av, 0, 0, 0)
effect.setPos(av, 1.2, 2.0, 4)
effect.setScale(0.2)
effect.time = 0.5
effect.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
effect = SimpleSmokeCloud.getEffect(unlimited)
if effect:
effect.reparentTo(av.currentWeapon)
effect.setPos(1.75, 0.25, 0)
effect.setEffectScale(0.1)
effect.play()
anim = ItemGlobals.getFireAnim(ItemGlobals.getSubtype(av.currentWeaponId))
ival = Sequence(Func(base.cr.targetMgr.setWantAimAssist, 0), Func(self.lockInput, av), Func(av.attackTire), Func(av.setAimMod, 0), av.actorInterval(anim, startFrame=9, endFrame=11, blendInT=0.1, blendOutT=0), Func(av.currentWeapon.playSkillSfx, skillId, av), Func(startVFX), av.actorInterval(anim, startFrame=12, endFrame=39, blendInT=0.0, blendOutT=0.3), Func(self.unlockInput, av))
del startVFX
return ival
def getGrenadeReloadAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def finishReload():
if av.isLocal():
messenger.send('reloadFinished')
if av.currentWeapon.ammoSkillId == InventoryType.GrenadeSiege:
track = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.detachFrom, av), av.actorInterval('bigbomb_draw', endFrame=11, blendInT=0, blendOutT=0), Func(av.currentWeapon.attachTo, av), av.actorInterval('bigbomb_draw', startFrame=12, blendInT=0, blendOutT=0.3), Func(finishReload), Func(self.unlockInput, av))
else:
track = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.currentWeapon.detachFrom, av), av.actorInterval('bomb_draw', endFrame=5, blendInT=0, blendOutT=0), Func(av.currentWeapon.attachTo, av), av.actorInterval('bomb_draw', startFrame=5, blendInT=0, blendOutT=0.3), Func(finishReload), Func(self.unlockInput, av))
del finishReload
return track
def getGrenadeChargingAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
if av.currentWeapon.ammoSkillId == InventoryType.GrenadeSiege:
track = Parallel(Sequence(Func(av.motionFSM.moveLock), Func(av.currentWeapon.hideMouse, av), av.actorInterval('bigbomb_charge', blendInT=0.3, blendOutT=0), av.actorInterval('bigbomb_charge_loop', loop=1, duration=9999, blendInT=0, blendOutT=0.3)), SoundInterval(av.currentWeapon.chargingSfx, loop=1, node=av, cutOff=60))
else:
track = Parallel(Sequence(Func(av.setAimMod, -0.5), Func(av.currentWeapon.hideMouse, av), av.actorInterval('bomb_charge', blendInT=0.3, blendOutT=0), av.actorInterval('bomb_charge_loop', loop=1, duration=9999, blendInT=0, blendOutT=0.3)), SoundInterval(av.currentWeapon.chargingSfx, loop=1, node=av, cutOff=60))
return track
def getGrenadeThrow(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
if ammoSkillId == InventoryType.GrenadeSiege:
attachTime = av.getFrameTime('bigbomb_throw', 40)
track = Parallel(Func(self.lockInput, av), Func(av.attackTire), Func(av.setAimMod, 0), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.playSkillSfx, av.currentWeapon.ammoSkillId, av), Sequence(av.actorInterval('bigbomb_throw', blendInT=0.3, blendOutT=0.3), Func(av.considerEnableMovement), Func(self.unlockInput, av)), Sequence(Wait(attachTime), Func(av.currentWeapon.detachFrom, av), Func(self.spawnGrenade, av, skillId, ammoSkillId, charge, target, skillResult)))
else:
attachTime = av.getFrameTime('bomb_throw', 15)
track = Parallel(Func(self.lockInput, av), Func(av.attackTire), Func(av.setAimMod, 0), Func(av.currentWeapon.playSkillSfx, skillId, av), Sequence(av.actorInterval('bomb_throw', blendInT=0.3, blendOutT=0.3), Func(self.unlockInput, av)), Sequence(Wait(attachTime), Func(av.currentWeapon.detachFrom, av), Func(self.spawnGrenade, av, skillId, ammoSkillId, charge, target, skillResult)))
return track
def getGrenadeLongVolley(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
if ammoSkillId == InventoryType.GrenadeSiege:
attachTime = av.getFrameTime('bigbomb_charge_throw', 9)
track = Parallel(Func(self.lockInput, av), Func(av.attackTire), Func(av.setAimMod, 0), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.playSkillSfx, av.currentWeapon.ammoSkillId, av), Sequence(av.actorInterval('bigbomb_charge_throw', blendInT=0.3, blendOutT=0.3), Func(av.considerEnableMovement), Func(self.unlockInput, av)), Sequence(Wait(attachTime), Func(av.currentWeapon.detachFrom, av), Func(self.spawnGrenade, av, skillId, ammoSkillId, charge, target, skillResult)))
else:
attachTime = av.getFrameTime('bomb_charge_throw', 4)
track = Parallel(Func(self.lockInput, av), Func(av.attackTire), Func(av.setAimMod, 0), Func(av.currentWeapon.playSkillSfx, skillId, av, startTime=2.0), Sequence(av.actorInterval('bomb_charge_throw', blendInT=0.3, blendOutT=0.3), Func(self.unlockInput, av)), Sequence(Wait(attachTime), Func(av.currentWeapon.detachFrom, av), Func(self.spawnGrenade, av, skillId, ammoSkillId, charge, target, skillResult)))
return track
def spawnGrenade(self, av, skillId, ammoSkillId, charge, target, skillResult):
grenade = GrenadeProjectile(av.cr, ammoSkillId, av.projectileHitEvent)
grenade.detachNode()
grenade.setBillboardPointEye()
grenadeModelCol = grenade.find('**/collide')
if grenadeModelCol and not grenadeModelCol.isEmpty():
grenadeModelCol.removeNode()
if av.isLocal():
collNode = grenade.getCollNode()
collNode.reparentTo(render)
else:
collNode = None
av.ammoSequence = av.ammoSequence + 1 & 255
grenade.setTag('ammoSequence', str(av.ammoSequence))
grenade.setTag('skillId', str(int(skillId)))
grenade.setTag('ammoSkillId', str(int(ammoSkillId)))
grenade.setTag('attackerId', str(av.getDoId()))
self.putGrenadeInHand(av, grenade)
self.addCollider(av, grenade, collNode)
self.throwGrenade(av, skillId, ammoSkillId, grenade, collNode, charge, target)
return
def addCollider(self, av, grenade, collNode):
if av.isLocal():
base.cTrav.addCollider(collNode, grenade.collHandler)
def removeCollider(self, av, collNode):
if av.isLocal():
base.cTrav.removeCollider(collNode)
def throwGrenade(self, av, skillId, ammoSkillId, grenade, collNode, powerMod=0, target=None):
if not av:
return
startPos = av.rightHandNode.getPos(render)
endPos = None
duration = None
wayPoint = None
timeToWayPoint = None
if target == None:
power = WeaponGlobals.getAttackProjectilePower(skillId, ammoSkillId)
power *= powerMod + self.BASE_GRENADE_POWER
if av.isLocal():
pitch = camera.getP(render)
else:
pitch = 0.0
m = av.getMat(render)
startVel = m.xformVec(Vec3(0, power, 30 + pitch))
if av.isLocal():
forwardVel = av.controlManager.currentControls.getSpeeds()[0]
sideVel = av.controlManager.currentControls.getSpeeds()[2]
avVel = m.xformVec(Vec3(sideVel / 3.0, forwardVel, 0))
startVel += avVel
endPlaneZ = startPos[2] - 100
else:
startVel = None
endPos = target.getPos(render)
wayPoint = endPos
endPos = None
tgtDist = av.getDistance(target)
duration = WeaponGlobals.getAIProjectileAirTime(tgtDist)
timeToWayPoint = duration
duration = None
endPlaneZ = wayPoint.getZ() - 100
try:
projInterval = ProjectileInterval(grenade, startPos=startPos, endPos=endPos, duration=duration, startVel=startVel, endZ=endPlaneZ, collNode=collNode, wayPoint=wayPoint, timeToWayPoint=timeToWayPoint)
except StandardError, e:
raise StandardError('(localAv %s) Invalid projectile parameters(%s,%s,%s,%s,%s,%s,%s)' % (av.isLocal(), startPos, endPos, duration, startVel, endPlaneZ, wayPoint, timeToWayPoint))
ival = Sequence(projInterval, Func(self.removeCollider, av, collNode), Func(grenade.destroy), name='Grenade-%s-%s' % (av.doId, grenade.get_key()))
grenade.setIval(ival, start=True)
return
def putGrenadeInHand(self, av, grenade):
grenade.reparentTo(render)
grenade.setPos(render, av.rightHandNode.getPos(render))
def playCastingAnim(self, av):
if not av.currentWeapon:
return None
if av.attuneEffect:
av.attuneEffect.castEffect.start()
effect = FadingCard(av.currentWeapon.effectCard, av.currentWeapon.effectColor)
effect.reparentTo(av.currentWeapon)
effect.play()
return None
def getPoke(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(av.attackTire), Func(self.playCastingAnim, av), av.actorInterval('voodoo_doll_poke', endFrame=50, playRate=1.0, blendInT=0.3, blendOutT=0.3), Func(self.unlockInput, av))
return ival
def getEvilEye(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
attuneEffect = VoodooAura.getEffect(unlimited)
if attuneEffect:
attuneEffect.reparentTo(render)
attuneEffect.setPos(av.headNode, 0, 0, 0)
attuneEffect.setEffectColor(Vec4(0.2, 0.1, 0.5, 1))
attuneEffect.particleDummy.reparentTo(render)
attuneEffect.play()
if target:
if hasattr(target, 'creature'):
if target.creature:
targetPos = target.creature.headNode.isEmpty() or target.creature.headNode.getPos(render)
else:
targetPos = target.getPos(render)
elif hasattr(target, 'headNode'):
targetPos = target.headNode.getPos(render)
else:
targetPos = target.getPos(render)
else:
dummy = av.attachNewNode('dummy')
dummy.setPos(0, 30, 4)
targetPos = dummy.getPos(render)
dummy.removeNode()
moveIval = LerpPosInterval(attuneEffect, 0.4, targetPos)
moveIval.start()
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(startVFX), av.actorInterval('emote_anger', playRate=2.0, blendInT=0.2, blendOutT=0.3), Func(self.unlockInput, av))
return ival
def getReflect(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
auraPulse = HitPulse.getEffect(unlimited)
if auraPulse:
auraPulse.reparentTo(av)
auraPulse.setEffectColor(Vec4(1, 0.9, 0.6, 0.75))
auraPulse.effectModel.setPos(0, 3, 4.0)
auraPulse.setScale(1.0)
if target:
auraPulse.lookAt(target)
else:
auraPulse.lookAt(base.camera)
auraPulse.play()
flashEffect = FlashEffect()
flashEffect.reparentTo(av.rightHandNode)
flashEffect.setScale(10.0)
flashEffect.fadeTime = 1.0
flashEffect.setEffectColor(Vec4(1, 0.9, 0.6, 1))
flashEffect.play()
auraEffect = VoodooAura2.getEffect(unlimited)
if auraEffect:
auraEffect.reparentTo(render)
auraEffect.setPos(av, 0, 2, 4)
auraEffect.setEffectColor(Vec4(1, 0.9, 0.6, 0.15))
auraEffect.particleDummy.reparentTo(render)
auraEffect.play()
if target:
if hasattr(target, 'creature'):
if target.creature:
targetPos = target.creature.headNode.isEmpty() or target.creature.headNode.getPos(render)
else:
targetPos = target.getPos(render)
else:
targetPos = target.headNode.getPos(render)
moveIval = LerpPosInterval(auraEffect, 0.4, targetPos)
moveIval.start()
ival = Sequence(Func(self.lockInput, av), av.actorInterval('voodoo_tune', playRate=1.5, startFrame=0, endFrame=20, blendInT=0.1, blendOutT=0.0), Func(startVFX), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('voodoo_tune', playRate=1.5, startFrame=20, endFrame=35, blendInT=0.0, blendOutT=0.3), Func(self.unlockInput, av))
return ival
def getAttune(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), av.actorInterval('voodoo_tune', playRate=2.0, endFrame=35, blendInT=0.2, blendOutT=0.3), Func(self.unlockInput, av))
return ival
def getUnattune(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.currentWeapon.playUnattuneSfx, av.currentWeapon), av.actorInterval('voodoo_swarm', playRate=1.0, blendInT=0.3, blendOutT=0.3), Func(self.unlockInput, av))
return ival
def getHeal(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(self.playCastingAnim, av), av.actorInterval('voodoo_swarm', playRate=1.0, blendInT=0.3, blendOutT=0.3), Func(self.unlockInput, av))
return ival
def getShackles(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(self.playCastingAnim, av), av.actorInterval('voodoo_swarm', playRate=1.0, blendInT=0.3, blendOutT=0.3), Func(self.unlockInput, av))
return ival
def getSwarm(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(self.playCastingAnim, av), av.actorInterval('voodoo_swarm', playRate=1.0, blendInT=0.3, blendOutT=0.3), Func(self.unlockInput, av))
return ival
def getBurn(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(self.playCastingAnim, av), av.actorInterval('voodoo_swarm', playRate=1.0, blendInT=0.3, blendOutT=0.3), Func(self.unlockInput, av))
return ival
def getCure(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(self.playCastingAnim, av), av.actorInterval('voodoo_swarm', playRate=1.0, blendInT=0.3, blendOutT=0.3), Func(self.unlockInput, av))
return ival
def getHexWard(self, av, skillId, ammoSkillId, charge, target, skillResult):
def startVFX():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
auraPulse = HitPulse.getEffect(unlimited)
if auraPulse:
auraPulse.reparentTo(av)
auraPulse.setEffectColor(Vec4(0.4, 0.3, 1, 0.75))
auraPulse.effectModel.setPos(0, 3, 4.0)
auraPulse.setScale(1.0)
if target:
auraPulse.lookAt(target)
else:
auraPulse.lookAt(base.camera)
auraPulse.play()
flashEffect = FlashEffect()
flashEffect.reparentTo(av.rightHandNode)
flashEffect.setScale(10.0)
flashEffect.fadeTime = 1.0
flashEffect.setEffectColor(Vec4(0.2, 0.6, 1, 1))
flashEffect.play()
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(self.playCastingAnim, av), Func(startVFX), av.actorInterval('voodoo_swarm', playRate=1.0, blendInT=0.3, blendOutT=0.3), Func(self.unlockInput, av))
return ival
def getCaptainsResolve(self, av, skillId, ammoSkillId, charge, target, skillResult):
def startVFX():
unlimited = av.isLocal()
pulseFlash = PulsingGlow.getEffect(unlimited)
if pulseFlash:
pulseFlash.reparentTo(av.currentWeapon)
pulseFlash.setEffectColor(Vec4(0.3, 1, 1, 0.8))
pulseFlash.setScale(1.0)
pulseFlash.play()
coneRays = ConeRays.getEffect(unlimited)
if coneRays:
coneRays.reparentTo(av)
coneRays.setPos(0, 0, 1.0)
coneRays.setEffectColor(Vec4(0.3, 1, 1, 0.2))
coneRays.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
healBlast = HealBlast.getEffect(unlimited)
if healBlast:
healBlast.reparentTo(av)
healBlast.setPos(0, 0, 6.0)
healBlast.play()
def startVFX2():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
healDisc = VoodooGroundAura.getEffect(unlimited)
if healDisc:
healDisc.setEffectColor(Vec4(0.3, 1, 1, 0.35))
healDisc.reparentTo(av)
healDisc.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
healAura = VoodooAuraHeal.getEffect(unlimited)
if healAura:
healAura.setEffectColor(Vec4(0.3, 1, 1, 0.5))
healAura.reparentTo(av)
healAura.play()
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(self.playCastingAnim, av), Func(startVFX), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('voodoo_swarm', playRate=1.0, startFrame=0, endFrame=12, blendInT=0.3, blendOutT=0.0), Func(startVFX2), av.actorInterval('voodoo_swarm', playRate=1.0, startFrame=12, endFrame=64, blendInT=0, blendOutT=0.3), Func(self.unlockInput, av))
return ival
def getCurse(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(self.playCastingAnim, av), av.actorInterval('voodoo_swarm', playRate=1.0, blendInT=0.3, blendOutT=0.3), Func(self.unlockInput, av))
return ival
def getLifeDrain(self, av, skillId, ammoSkillId, charge, target, skillResult):
def startVFX():
unlimited = av.isLocal()
effect = SpectralSmoke.getEffect(unlimited)
if effect:
effect.reparentTo(av)
effect.setPos(av, 0, 0, av.getHeight() / 2.0)
effect.setScale(1, 1, av.getHeight() / 2.0)
effect.play(duration=2.0, delay=1.5)
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
effect = HealSparks.getEffect(unlimited)
if effect:
effect.reparentTo(av)
effect.setPos(av, 0, 0, av.getHeight() / 1.5)
effect.setScale(1, 1, av.getHeight() / 2.0)
effect.setEffectColor(Vec4(0.2, 0.2, 1.0, 1))
effect.play(delay=1.5)
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
effect = HomingMissile.getEffect(unlimited)
if effect and target:
effect.reparentTo(render)
effect.setPos(target, 0, 0, target.getHeight() - 1.5)
effect.target = av
effect.initialVelocity = Vec3(0, 0, 1.5)
effect.targetOffset = Vec3(0, 0, 3.0)
effect.duration = 1.75
effect.wantTrail = 0
effect.particleEffect = SpectralTrail.getEffect()
effect.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
effect = HomingMissile.getEffect(unlimited)
if effect and target:
effect.reparentTo(render)
effect.setPos(target, 0, 0, target.getHeight() - 1.5)
effect.target = av
randomness = random.random() * 1.25
effect.initialVelocity = Vec3(-1.0 - randomness, 0, 1.5)
effect.targetOffset = Vec3(0, 0, 3.0)
effect.duration = 1.5 + randomness
effect.wantTrail = 0
effect.particleEffect = SpectralTrail.getEffect()
effect.play()
effect = HomingMissile.getEffect(unlimited)
if effect and target:
effect.reparentTo(render)
effect.setPos(target, 0, 0, target.getHeight() - 1.5)
effect.target = av
randomness = random.random() * 1.25
effect.initialVelocity = Vec3(1.0 + randomness, 0, 1.5)
effect.targetOffset = Vec3(0, 0, 3.0)
effect.duration = 1.5 + randomness
effect.wantTrail = 0
effect.particleEffect = SpectralTrail.getEffect()
effect.play()
ival = Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(self.playCastingAnim, av), Parallel(av.actorInterval('voodoo_swarm', playRate=1.0, blendInT=0.3, blendOutT=0.3), Sequence(Wait(0.5), Func(startVFX))), Func(self.unlockInput, av))
del startVFX
return ival
def startChargeSound(self, av, skillId):
if not av.currentWeapon:
return None
skillInfo = WeaponGlobals.getSkillAnimInfo(skillId)
getChargeSfxFunc = skillInfo[WeaponGlobals.HIT_SFX_INDEX]
getChargeLoopSfxFunc = skillInfo[WeaponGlobals.MISS_SFX_INDEX]
av.currentWeapon.chargeSound = getChargeSfxFunc()
av.currentWeapon.chargeLoopSound = getChargeLoopSfxFunc()
av.currentWeapon.chargeSoundSequence = Sequence(SoundInterval(av.currentWeapon.chargeSound, loop=0, node=av.currentWeapon, cutOff=60), SoundInterval(av.currentWeapon.chargeLoopSound, loop=1, duration=1000, node=av.currentWeapon, cutOff=60))
av.currentWeapon.chargeSoundSequence.start()
if hasattr(av.currentWeapon, 'startChargeEffect'):
av.currentWeapon.startChargeEffect()
return None
def stopChargeSound(self, av):
if not av.currentWeapon:
return
if av.currentWeapon.chargeSoundSequence:
av.currentWeapon.chargeSoundSequence.finish()
av.currentWeapon.chargeSoundSequence = None
if av.currentWeapon.chargeSound:
av.currentWeapon.chargeSound.stop()
av.currentWeapon.chargeSound = None
if av.currentWeapon.chargeLoopSound:
av.currentWeapon.chargeLoopSound.stop()
av.currentWeapon.chargeLoopSound = None
if hasattr(av.currentWeapon, 'stopChargeEffect'):
av.currentWeapon.stopChargeEffect()
return
def playCastSound(self, av, skillId):
skillInfo = WeaponGlobals.getSkillAnimInfo(skillId)
getCastSfxFunc = skillInfo[WeaponGlobals.HIT_SFX_INDEX]
soundFX = getCastSfxFunc()
if soundFX:
base.playSfx(soundFX, node=av, cutoff=60)
def getChargeWitherAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
av.currentWeapon.effect = WitherCharge.getEffect(unlimited)
if av.currentWeapon.effect and not av.currentWeapon.isEmpty():
av.currentWeapon.effect.reparentTo(av.currentWeapon)
av.currentWeapon.effect.setPos(av.currentWeapon, -0.1, 1.5, 0)
av.currentWeapon.effect.setPos(av.currentWeapon, av.currentWeapon.getOffset(av.currentWeapon.itemId))
av.currentWeapon.effect.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
av.currentWeapon.effect.startLoop()
seq = Sequence(Func(av.motionFSM.moveLock), Func(av.currentWeapon.hideMouse, av), Func(base.cr.targetMgr.setWantAimAssist, 1), Func(self.startChargeSound, av, skillId), Func(startVFX), av.actorInterval('wand_cast_start', blendOutT=0), Func(av.loop, 'wand_cast_idle', blendT=0))
del startVFX
return seq
def getCastWitherAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
if av.currentWeapon.effect:
av.currentWeapon.effect.stopLoop()
av.currentWeapon.effect = None
effect = SoulHarvest.getEffect(unlimited)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 5, 0)
effect.radius = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId)
effect.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
effect.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
effect = DomeExplosion.getEffect(unlimited)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 5, 0)
effect.size = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId) * 2.0
effect.play()
effect = DarkPortal.getEffect(unlimited)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 5, 0)
effect.size = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId) * 4.0
effect.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
effect = EvilRingEffect.getEffect(unlimited)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 5, 0)
effect.effectScale = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId)
effect.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
effect.duration = 2.5
effect.play()
return
seq = Sequence(Func(av.considerEnableMovement), Func(self.lockInput, av), Func(self.stopChargeSound, av), Func(self.playCastSound, av, skillId), Func(startVFX), av.actorInterval('wand_cast_fire'), Func(self.unlockInput, av))
del startVFX
return seq
def getChargeSoulflayAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
av.currentWeapon.effect = SoulSpiral.getEffect(unlimited)
if av.currentWeapon.effect and not av.currentWeapon.isEmpty():
av.currentWeapon.effect.reparentTo(av.currentWeapon)
av.currentWeapon.effect.setPos(av.currentWeapon, -0.1, 1.5, 0)
av.currentWeapon.effect.setHpr(av.currentWeapon, 0.0, -90.0, 0.0)
av.currentWeapon.effect.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
av.currentWeapon.effect.setScale(0.9, 0.9, 0.9)
av.currentWeapon.effect.startLoop()
seq = Sequence(Func(av.motionFSM.moveLock), Func(av.currentWeapon.hideMouse, av), Func(base.cr.targetMgr.setWantAimAssist, 1), Func(self.startChargeSound, av, skillId), Func(startVFX), av.actorInterval('wand_cast_start', blendOutT=0), Func(av.loop, 'wand_cast_idle', blendT=0))
del startVFX
return seq
def getCastSoulFlayAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
if av.currentWeapon.effect:
av.currentWeapon.effect.stopLoop()
av.currentWeapon.effect = None
effect = SoulFlay.getEffect(unlimited)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0.0, 4.0, 3.0)
effect.setHpr(av, 0.0, -90.0, 0.0)
effect.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
effect.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
dummy = NodePath('effect')
dummy.reparentTo(av.getParent())
dummy.setPos(av, 0.0, 4.0, 4.0)
dummy.setHpr(av, 0.0, 0.0, 0.0)
effect = VoodooSouls.getEffect(unlimited)
if effect:
effect.reparentTo(dummy)
effect.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
effect.play()
posIval = LerpPosInterval(effect, 1.0, Vec3(0.0, 50.0, 0.0), startPos=Vec3(0.0, 0.0, 0.0))
posIval.start()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
effect = VoodooGlow.getEffect(unlimited)
if effect and not av.currentWeapon.isEmpty():
effect.reparentTo(av.currentWeapon)
effect.setPos(av.currentWeapon, av.currentWeapon.getOffset(av.currentWeapon.itemId))
effect.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
effect.play()
return
seq = Sequence(Func(av.considerEnableMovement), Func(self.lockInput, av), Func(av.attackTire), Func(startVFX), Func(self.stopChargeSound, av), Func(self.playCastSound, av, skillId), av.actorInterval('wand_cast_fire'), Func(self.unlockInput, av))
del startVFX
return seq
def getChargePestilenceAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
av.currentWeapon.effectActor = Actor.Actor('models/effects/mopath_none', {'spin': 'models/effects/mopath_spiral'})
joint = av.currentWeapon.effectActor.find('**/joint1')
av.currentWeapon.effectActor.setScale(1.0, 0.75, 1.0)
av.currentWeapon.effectActor.setHpr(av.currentWeapon.getHpr())
av.currentWeapon.effectActor.reparentTo(av.currentWeapon)
av.currentWeapon.effectActor.setPos(av.currentWeapon, 0.0, 1.8, 0.0)
av.currentWeapon.effectActor.setPlayRate(1.2, 'spin')
av.currentWeapon.effectActor.loop('spin')
av.currentWeapon.effect = VoodooPestilence.getEffect(unlimited)
if av.currentWeapon.effect and not av.currentWeapon.isEmpty():
av.currentWeapon.effect.particleDummy.reparentTo(av.currentWeapon)
av.currentWeapon.effect.reparentTo(joint)
av.currentWeapon.effect.effectScale = 1.0
av.currentWeapon.effect.startLoop()
seq = Sequence(Func(av.motionFSM.moveLock), Func(av.currentWeapon.hideMouse, av), Func(base.cr.targetMgr.setWantAimAssist, 1), Func(self.startChargeSound, av, skillId), Func(startVFX), av.actorInterval('wand_cast_start', blendOutT=0), Func(av.loop, 'wand_cast_idle', blendT=0))
del startVFX
return seq
def getCastPestilenceAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
if av.currentWeapon.effect:
av.currentWeapon.effect.stopLoop()
av.currentWeapon.effect = None
effect = Pestilence.getEffect(unlimited)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 4.0, 3.0)
effect.setHpr(av, 0, -90, 0)
effect.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
if not av.currentWeapon.effectActor:
av.currentWeapon.effectActor = Actor.Actor('models/effects/mopath_none', {'spin': 'models/effects/mopath_spiral'})
joint = av.currentWeapon.effectActor.find('**/joint1')
av.currentWeapon.effectActor.reparentTo(av.getParent())
av.currentWeapon.effectActor.setPos(av, 0.0, 13.0, 4.0)
av.currentWeapon.effectActor.setHpr(av.getHpr())
av.currentWeapon.effectActor.setPlayRate(1.8, 'spin')
av.currentWeapon.effectActor.play('spin')
scaleIval = LerpScaleInterval(av.currentWeapon.effectActor, 0.25, Vec3(10.0, 25.0, 10.0), startScale=Vec3(2.0, 15.0, 2.0))
scaleIval.start()
effect = VoodooPestilence.getEffect(unlimited)
if effect:
effect.particleDummy.reparentTo(av.getParent())
effect.reparentTo(joint)
effect.effectScale = 4.0
effect.play()
return
seq = Sequence(Func(av.considerEnableMovement), Func(self.lockInput, av), Func(av.attackTire), Func(startVFX), Func(self.stopChargeSound, av), Func(self.playCastSound, av, skillId), av.actorInterval('wand_cast_fire'), Func(self.unlockInput, av))
del startVFX
return seq
def getChargeHellfireAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
offset = av.currentWeapon.getOffset(av.currentWeapon.itemId) + Vec3(0, 0.2, 0)
av.currentWeapon.effect = FlamingSkull.getEffect(unlimited)
if av.currentWeapon.effect and not av.currentWeapon.isEmpty():
av.currentWeapon.effect.reparentTo(av.currentWeapon)
av.currentWeapon.effect.setPos(av.currentWeapon, offset + Vec3(0.2, 1, 0.3))
av.currentWeapon.effect.setHpr(av.currentWeapon, 0, -90, 40)
av.currentWeapon.effect.startLoop()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
av.currentWeapon.effect2 = VoodooStaffFire.getEffect(unlimited)
if av.currentWeapon.effect2 and not av.currentWeapon.isEmpty():
av.currentWeapon.effect2.reparentTo(av.currentWeapon)
av.currentWeapon.effect2.setPos(av.currentWeapon, offset)
av.currentWeapon.effect2.setHpr(av.currentWeapon, 0, -90, 0)
av.currentWeapon.effect2.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
av.currentWeapon.effect2.startLoop()
seq = Sequence(Func(av.motionFSM.moveLock), Func(av.currentWeapon.hideMouse, av), Func(base.cr.targetMgr.setWantAimAssist, 1), Func(self.startChargeSound, av, skillId), Func(startVFX), av.actorInterval('wand_cast_start', blendOutT=0), Func(av.loop, 'wand_cast_idle', blendT=0))
del startVFX
return seq
def getCastHellfireAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
if isinstance(av.currentWeapon.effect, FlamingSkull):
av.currentWeapon.effect.wrtReparentTo(render)
targetPos, speed, impactT = av.getProjectileInfo(skillId, None)
av.currentWeapon.effect.playLaunch(speed, targetPos)
if av.currentWeapon.effect2:
av.currentWeapon.effect2.stopLoop()
av.currentWeapon.effect2 = None
return
seq = Sequence(Func(av.considerEnableMovement), Func(self.lockInput, av), Func(av.attackTire), Func(startVFX), Func(self.stopChargeSound, av), Func(self.playCastSound, av, skillId), av.actorInterval('wand_cast_fire'), Func(self.unlockInput, av))
del startVFX
return seq
def getChargeBanishAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
offset = av.currentWeapon.getOffset(av.currentWeapon.itemId)
av.currentWeapon.effect = VoodooPower.getEffect(unlimited)
if av.currentWeapon.effect and not av.currentWeapon.isEmpty():
av.currentWeapon.effect.reparentTo(av.currentWeapon)
av.currentWeapon.effect.setPos(av.currentWeapon, offset + Vec3(0, 1.45, -0.1))
av.currentWeapon.effect.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
av.currentWeapon.effect.startLoop()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
av.currentWeapon.effect2 = EnergySpiral.getEffect(unlimited)
if av.currentWeapon.effect2 and not av.currentWeapon.isEmpty():
av.currentWeapon.effect2.reparentTo(av.currentWeapon)
av.currentWeapon.effect2.setPos(av.currentWeapon, offset + Vec3(0, 0, -0.1))
av.currentWeapon.effect2.setHpr(av.currentWeapon, 0.0, -90.0, 0.0)
av.currentWeapon.effect2.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
av.currentWeapon.effect2.startLoop()
seq = Sequence(Func(av.motionFSM.moveLock), Func(av.currentWeapon.hideMouse, av), Func(base.cr.targetMgr.setWantAimAssist, 1), Func(self.startChargeSound, av, skillId), Func(startVFX), av.actorInterval('wand_cast_start', blendOutT=0), Func(av.loop, 'wand_cast_idle', blendT=0))
del startVFX
return seq
def getCastBanishAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
if av.currentWeapon.effect:
av.currentWeapon.effect.stopLoop()
av.currentWeapon.effect = None
if av.currentWeapon.effect2:
av.currentWeapon.effect2.stopLoop()
av.currentWeapon.effect2 = None
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
effect = VoodooGlow.getEffect(unlimited)
if effect and not av.currentWeapon.isEmpty():
effect.reparentTo(av.currentWeapon)
effect.setPos(av.currentWeapon, av.currentWeapon.getOffset(av.currentWeapon.itemId))
effect.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
effect.play()
return
seq = Sequence(Func(av.considerEnableMovement), Func(self.lockInput, av), Func(av.attackTire), Func(startVFX), Func(self.stopChargeSound, av), Func(self.playCastSound, av, skillId), av.actorInterval('wand_cast_fire'), Func(self.unlockInput, av))
del startVFX
return seq
def getChargeDesolationAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
av.currentWeapon.effectActor = Actor.Actor('models/effects/mopath_none', {'spin': 'models/effects/mopath_spiral'})
joint = av.currentWeapon.effectActor.find('**/joint1')
av.currentWeapon.effectActor.setScale(1.0, 0.75, 1.0)
av.currentWeapon.effectActor.setP(0.0)
av.currentWeapon.effectActor.reparentTo(av.currentWeapon)
av.currentWeapon.effectActor.setPos(av.currentWeapon, 0.0, 1.7, 0.0)
av.currentWeapon.effectActor.setPlayRate(1.5, 'spin')
av.currentWeapon.effectActor.loop('spin')
av.currentWeapon.effect = DesolationChargeSmoke.getEffect(unlimited)
if av.currentWeapon.effect and not av.currentWeapon.isEmpty():
av.currentWeapon.effect.particleDummy.reparentTo(av.currentWeapon)
av.currentWeapon.effect.reparentTo(joint)
av.currentWeapon.effect.effectScale = 1.0
av.currentWeapon.effect.startLoop()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
av.currentWeapon.effect2 = WindCharge.getEffect(unlimited)
if av.currentWeapon.effect2 and not av.currentWeapon.isEmpty():
av.currentWeapon.effect2.reparentTo(av.currentWeapon)
av.currentWeapon.effect2.setPos(av.currentWeapon, 0.0, 1.25, 0.0)
av.currentWeapon.effect2.setHpr(0, -90, 0)
av.currentWeapon.effect2.startLoop()
seq = Sequence(Func(av.motionFSM.moveLock), Func(av.currentWeapon.hideMouse, av), Func(base.cr.targetMgr.setWantAimAssist, 1), Func(self.startChargeSound, av, skillId), Func(startVFX), av.actorInterval('wand_cast_start', blendOutT=0), Func(av.loop, 'wand_cast_idle', blendT=0))
del startVFX
return seq
def getCastDesolationAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
if av.currentWeapon.effect:
av.currentWeapon.effect.stopLoop()
av.currentWeapon.effect = None
if av.currentWeapon.effect2:
av.currentWeapon.effect2.stopLoop()
av.currentWeapon.effect2 = None
effect = WindWave.getEffect(unlimited)
if effect:
effect.reparentTo(av.getParent())
effect.setEffectColor(Vec4(1, 1, 1, 0.75))
effect.setPos(av, 0.0, 0.0, 0.0)
effect.setScale(1.0, 1.0, 1.0)
effect.setHpr(0.0, 0.0, 0.0)
effect.play()
effect = SoulHarvest2.getEffect(unlimited)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 0, 2)
effect.radius = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId)
effect.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
effect = DesolationSmoke.getEffect(unlimited)
if effect:
effect.reparentTo(av.getParent())
effect.setEffectColor(Vec4(1, 1, 1, 1))
effect.setPos(av, 0.0, 0.0, 0.0)
effect.play()
effect = DomeExplosion.getEffect(unlimited)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 0, 0)
effect.size = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId)
effect.play()
effect = DarkPortal.getEffect(unlimited)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 0, 0)
effect.size = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId) * 3.0
effect.play()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
cameraShakerEffect = CameraShaker()
cameraShakerEffect.wrtReparentTo(av.getParent())
cameraShakerEffect.setPos(av, 0.0, 0.0, 0.0)
cameraShakerEffect.shakeSpeed = 0.075
cameraShakerEffect.shakePower = 1.0
cameraShakerEffect.numShakes = 30
cameraShakerEffect.scalePower = 1
cameraShakerEffect.play(100.0)
return
seq = Sequence(Func(av.considerEnableMovement), Func(self.lockInput, av), Func(av.attackTire), Func(startVFX), Func(self.stopChargeSound, av), Func(self.playCastSound, av, skillId), av.actorInterval('wand_cast_fire'), Func(self.unlockInput, av))
del startVFX
return seq
def getFizzleAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return
if av.currentWeapon.effect:
av.currentWeapon.effect.stopLoop()
av.currentWeapon.effect = None
if av.currentWeapon.effect2:
av.currentWeapon.effect2.stopLoop()
av.currentWeapon.effect2 = None
return Sequence(Func(av.considerEnableMovement), Func(base.cr.targetMgr.setWantAimAssist, 0), Func(self.lockInput, av), Func(self.stopChargeSound, av), av.actorInterval('wand_cast_fire'), Func(self.unlockInput, av))
def getCastFireAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
unlimited = av.isLocal()
self.cleanWeaponEffects(av)
motion_color = [
Vec4(1.0, 1.0, 1.0, 1.0), Vec4(0.5, 0.2, 1.0, 1.0)]
targetPos, speed, impactT = av.getProjectileInfo(skillId, target)
effect = VoodooProjectile.getEffect(unlimited)
if effect:
effect.reparentTo(render)
effect.setPos(av, 0, 2, 2)
effect.setH(av.getH(render))
effect.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
effect.play(targetPos, speed, target)
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
effect = VoodooGlow.getEffect()
if effect and not av.currentWeapon.isEmpty():
effect.reparentTo(av.currentWeapon)
effect.setPos(av.currentWeapon, 0.0, 2.0, 0.0)
effect.setEffectColor(av.currentWeapon.getEffectColor(av.currentWeapon.itemId))
effect.play()
seq = Sequence(Func(base.cr.targetMgr.setWantAimAssist, 0), Func(self.lockInput, av), Func(av.attackTire), Func(startVFX), Func(self.playCastSound, av, skillId), av.actorInterval('wand_cast_fire'), Func(self.unlockInput, av))
del startVFX
return seq
def getToggleAuraOnAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
if not hasattr(av.currentWeapon, 'getStartWardingAura'):
return
if skillId == EnemySkills.STAFF_TOGGLE_AURA_WARDING:
av.currentWeapon.getStartWardingAura(av).start()
elif skillId == EnemySkills.STAFF_TOGGLE_AURA_NATURE:
av.currentWeapon.getStartNatureAura(av).start()
elif skillId == EnemySkills.STAFF_TOGGLE_AURA_DARK:
av.currentWeapon.getStartDarkAura(av).start()
seq = Sequence(Func(base.cr.targetMgr.setWantAimAssist, 0), Func(self.lockInput, av), Func(startVFX), Func(self.startChargeSound, av, skillId), av.actorInterval('wand_cast_fire'), Func(self.unlockInput, av))
del startVFX
return seq
def getToggleAuraOffAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
seq = Sequence(Func(base.cr.targetMgr.setWantAimAssist, 0), Func(self.stopChargeSound, av), Func(self.lockInput, av), Func(self.unlockInput, av))
return seq
def cleanWeaponEffects(self, av):
if av.currentWeapon:
if av.currentWeapon.effect:
av.currentWeapon.effect.stopLoop()
av.currentWeapon.effect = None
if av.currentWeapon.effect2:
av.currentWeapon.effect2.stopLoop()
av.currentWeapon.effect2 = None
return
def getDrink(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.consumable:
return None
def hideCurrentWeapon():
if av.currentWeapon:
if not av.currentWeapon.isEmpty():
av.currentWeapon.hide()
def showCurrentWeapon():
if av.currentWeapon:
if not av.currentWeapon.isEmpty():
av.currentWeapon.show()
return Sequence(Func(self.lockInput, av), Func(av.attackTire), Func(self.lockDrink, av), Func(hideCurrentWeapon), Func(av.consumable.updateItemId, ammoSkillId), Func(av.consumable.attachTo, av), av.actorInterval('drink_potion', playRate=1.5, startFrame=8, endFrame=45, blendInT=0.2, blendOutT=0.2), Func(showCurrentWeapon), Func(av.consumable.detachFrom, av), Func(self.unlockInput, av), Wait(0.6), Func(self.unlockDrink, av))
def getChop(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), av.actorInterval('sword_cleave', playRate=1.0, startFrame=9, endFrame=45, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getDoubleSlash(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), av.actorInterval('sword_slash', playRate=1.5, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getLunge(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('sword_lunge', playRate=1.5, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getStab(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval('sword_thrust', playRate=1.0, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getRollThrust(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
placeHolder = av.attachNewNode('rollThrustPlaceHolder')
if target:
placeHolder.lookAt(target)
newH = av.getH() + placeHolder.getH()
self.rollDistance = av.getDistance(target)
else:
newH = av.getH()
self.rollDistance = WeaponGlobals.getAttackRange(skillId, ammoSkillId)
self.rollDistance = max(0.0, self.rollDistance - 0.5)
self.currAmount = 0
def setRollPosition(v):
distance = self.rollDistance * v - self.currAmount
self.currAmount += distance
rotMat = Mat3.rotateMatNormaxis(av.getH(), Vec3.up())
contact = av.physControls.lifter.getContactNormal()
forward = contact.cross(Vec3.right())
forward.normalize()
vel = Vec3(forward * distance)
vel = Vec3(rotMat.xform(vel))
av.setFluidPos(Point3(av.getPos() + vel))
if av.isLocal():
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), Func(av.controlManager.currentControls.setCollisionsActive, 1), Parallel(av.actorInterval('sword_roll_thrust', playRate=1.5, startFrame=1, blendInT=0, blendOutT=0), LerpHprInterval(av, 0.05, Vec3(newH, av.getP(), av.getR())), Sequence(Wait(0.3), LerpFunctionInterval(setRollPosition, duration=0.6, fromData=0.0, toData=1.0, name='setRollPosition')), Sequence(Wait(0.6), Func(av.controlManager.currentControls.setCollisionsActive, 0), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))))
else:
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), Parallel(av.actorInterval('sword_roll_thrust', playRate=1.5, startFrame=1, blendInT=0, blendOutT=0), Sequence(Wait(0.6), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))))
placeHolder.removeNode()
return ival
def getComboA(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), av.actorInterval('sword_comboA', playRate=1.5, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getWildSlash(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.currentWeapon.endAttack, av), Func(av.currentWeapon.setTrailLength, 0.5), Func(av.currentWeapon.beginAttack, av), av.actorInterval('dagger_combo', playRate=1.0, startFrame=54, endFrame=87, blendInT=0.5, blendOutT=0.5)), Sequence(Wait(0.958), Func(self.unlockInput, av)))
return ival
def getFlurry(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.currentWeapon.endAttack, av), Func(av.currentWeapon.setTrailLength, 0.6), Func(av.currentWeapon.beginAttack, av), av.actorInterval('dagger_combo', playRate=1.0, startFrame=88, endFrame=142, blendInT=0.5, blendOutT=0.5)), Sequence(Wait(1.5), Func(self.unlockInput, av)))
return ival
def getRiposte(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.currentWeapon.endAttack, av), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.beginAttack, av), av.actorInterval('dagger_combo', playRate=1.0, startFrame=1, endFrame=28, blendInT=0.2, blendOutT=0.5)), Sequence(Wait(0.75), Func(self.unlockInput, av)))
return ival
def getCoup(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), av.actorInterval('dagger_coup', playRate=1.25, blendInT=0, blendOutT=0), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getBackstab(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), av.actorInterval('dagger_backstab', playRate=1.5, blendInT=0, blendOutT=0), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getDualCutlassCombination(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), av.actorInterval('dualcutlass_comboB', playRate=1.2, startFrame=0, endFrame=75, blendInT=0.5, blendOutT=0.25), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getDualCutlassSpin(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), av.actorInterval('dualcutlass_comboB', playRate=1, startFrame=70, endFrame=101, blendInT=0.25, blendOutT=0.25), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getDualCutlassBarrage(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.22), Func(av.currentWeapon.beginAttack, av), av.actorInterval('dualcutlass_comboB', playRate=0.575, startFrame=101, endFrame=131, blendInT=0.25, blendOutT=0.25), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getDualCutlassXSlash(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), av.actorInterval('dualcutlass_comboA', playRate=1, startFrame=50, endFrame=100, blendInT=0.25, blendOutT=0.25), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getDualCutlassGore(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), av.actorInterval('dualcutlass_comboA', playRate=1, startFrame=100, endFrame=120, blendInT=0.25, blendOutT=0.25), av.actorInterval('dualcutlass_comboB', playRate=1, startFrame=140, endFrame=200, blendInT=0.25, blendOutT=0.25), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getFoilFleche(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), av.actorInterval('foil_thrust', playRate=1, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getFoilReprise(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), av.actorInterval('foil_hack', playRate=1, startFrame=45, endFrame=89, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getFoilSwipe(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), av.actorInterval('foil_coup', playRate=1, startFrame=75, endFrame=97, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getFoilImpale(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), av.actorInterval('foil_thrust', playRate=1, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getFoilRemise(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), av.actorInterval('foil_slash', playRate=1, startFrame=10, endFrame=82, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getFoilBalestraKick(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), av.actorInterval('foil_kick', playRate=1, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getFoilCadence(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(av.currentWeapon.hideMouse, av), Func(av.currentWeapon.setTrailLength, 0.4), Func(av.currentWeapon.beginAttack, av), av.actorInterval('foil_coup', playRate=1, startFrame=75, endFrame=172, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def getKrazyPunch(self, av, skillId, ammoSkillId, charge, target, skillResult):
return Sequence(Func(self.lockInput, av), av.actorInterval('boxing_kick', playRate=2), av.actorInterval('boxing_punch', playRate=2), av.actorInterval('boxing_kick', playRate=2), Func(self.unlockInput, av))
def getBoxingPunch(self, av, skillId, ammoSkillId, charge, target, skillResult):
return Sequence(Func(self.lockInput, av), av.actorInterval('boxing_punch', playRate=1.0, blendInT=0.1, blendOutT=0.2), Func(self.unlockInput, av))
def getKick(self, av, skillId, ammoSkillId, charge, target, skillResult):
return Sequence(Func(self.lockInput, av), av.actorInterval('boxing_kick', playRate=1.0), Func(self.unlockInput, av))
def getBayonetFireAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
def startVFX():
unlimited = av.isLocal()
pistolFlame = MusketShot.getEffect(unlimited)
if pistolFlame:
pistolFlame.reparentTo(av.currentWeapon)
pistolFlame.setPos(2.8, 0.1, 0)
pistolFlame.setHpr(0, 0, 90)
pistolFlame.setScale(1)
pistolFlame.play()
anim = ItemGlobals.getFireAnim(ItemGlobals.getSubtype(av.currentWeaponId))
ival = Sequence(Func(base.cr.targetMgr.setWantAimAssist, 0), Func(self.lockInput, av), Func(startVFX), Func(av.currentWeapon.playSkillSfx, skillId, av), av.actorInterval(anim, startFrame=9, endFrame=14, blendInT=0.0, blendOutT=0), Func(self.unlockInput, av), av.actorInterval(anim, startFrame=15, blendInT=0, blendOutT=0.3))
return ival
def getBayonetReloadAnim(self, av, skillId, ammoSkillId, charge, target):
if not av.currentWeapon:
return None
reloadSfx = av.currentWeapon.reloadSfxs
reloadFx = random.choice(reloadSfx)
gunCockSfx = av.currentWeapon.gunCockSfxs
gunCockFx = random.choice(gunCockSfx)
track = Sequence(Func(self.lockInput, av), av.actorInterval('gun_reload', endFrame=6, blendInT=0, blendOutT=0), Func(base.playSfx, gunCockFx, node=av), av.actorInterval('gun_reload', startFrame=7, endFrame=18, blendInT=0, blendOutT=0), Func(base.playSfx, reloadFx, node=av), av.actorInterval('gun_reload', startFrame=19, blendInT=0, blendOutT=0.3), Func(self.unlockInput, av))
return track
def getBayonetStab(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.beginAttack, av), av.actorInterval('bayonet_attackA', playRate=1.0, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(self.unlockInput, av))
return ival
def getBayonetBash(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.beginAttack, av), av.actorInterval('bayonet_attackB', playRate=1.0, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(self.unlockInput, av))
return ival
def getBayonetRush(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.beginAttack, av), av.actorInterval('bayonet_attackC', playRate=1.0, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(self.unlockInput, av))
return ival
def getPlayerBayonetStab(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.beginAttack, av), av.actorInterval('bayonet_attackA', playRate=1.0, startFrame=6, endFrame=35, blendInT=0, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(self.unlockInput, av)), Sequence(Wait(0.75), Func(self.unlockInput, av)))
return ival
def getPlayerBayonetRush(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(self.lockInput, av), Func(av.currentWeapon.setTrailLength, 0.25), Func(av.currentWeapon.beginAttack, av), av.actorInterval('bayonet_attackC', playRate=1.0, blendInT=0.5, blendOutT=0.5), Func(av.currentWeapon.endAttack, av), Func(self.unlockInput, av))
return ival
def getCrabAttackLeft(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_left', playRate=1.0)
def getCrabAttackRight(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_right', playRate=1.0)
def getCrabAttackBoth(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_both', playRate=1.0)
def getStumpKick(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(av.currentWeapon.setTrailLength, 0.0), av.actorInterval('kick', playRate=1.0))
return ival
def getStumpKickRight(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(av.currentWeapon.setTrailLength, 0.0), av.actorInterval('kick_right', playRate=1.0))
return ival
def getStumpSlapLeft(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(av.currentWeapon.setTrailLength, 0.5), av.actorInterval('slap_left', playRate=1.0))
return ival
def getStumpSlapRight(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(av.currentWeapon.setTrailLength, 0.5), av.actorInterval('slap_right', playRate=1.0))
return ival
def getStumpSwatLeft(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(av.currentWeapon.setTrailLength, 0.5), av.actorInterval('swat_left', playRate=1.0))
return ival
def getStumpSwatRight(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
ival = Sequence(Func(av.currentWeapon.setTrailLength, 0.5), av.actorInterval('swat_right', playRate=1.0))
return ival
def getStumpStomp(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
shockwaveRingEffect = ShockwaveRing.getEffect()
if shockwaveRingEffect:
shockwaveRingEffect.reparentTo(av)
shockwaveRingEffect.size = 40
shockwaveRingEffect.setPos(0, 0, 3)
shockwaveRingEffect.play()
shockwaveRingEffect = ShockwaveRing.getEffect()
if shockwaveRingEffect:
shockwaveRingEffect.reparentTo(av)
shockwaveRingEffect.size = 80
shockwaveRingEffect.setPos(0, 0, 3)
shockwaveRingEffect.play()
shockwaveRingEffect = ShockwaveRing.getEffect()
if shockwaveRingEffect:
shockwaveRingEffect.reparentTo(av)
shockwaveRingEffect.size = 120
shockwaveRingEffect.setPos(0, 0, 3)
shockwaveRingEffect.play()
dustRingEffect = DustRing.getEffect()
if dustRingEffect:
dustRingEffect.reparentTo(av)
dustRingEffect.setPos(0, 0, 0)
dustRingEffect.play()
cameraShakerEffect = CameraShaker()
cameraShakerEffect.reparentTo(av)
cameraShakerEffect.setPos(0, 0, 0)
cameraShakerEffect.shakeSpeed = 0.06
cameraShakerEffect.shakePower = 4.0
cameraShakerEffect.numShakes = 2
cameraShakerEffect.scalePower = 1
cameraShakerEffect.play(50.0)
ival = Parallel(Sequence(Wait(1.33), Func(startVFX)), Sequence(Func(av.currentWeapon.beginAttack, av), av.actorInterval('jump_attack', playRate=1.0), Func(av.currentWeapon.endAttack, av)))
return ival
def getFlyTrapAttackA(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_a', playRate=1.0)
def getFlyTrapAttackJab(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_jab', playRate=1.0)
def getFlyTrapLeftFake(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_left_fake', playRate=1.0)
def getFlyTrapRightFake(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_right_fake', playRate=1.0)
def getFlyTrapSpit(self, av, skillId, ammoSkillId, charge, target, skillResult):
def startVFX():
motion_color = [
Vec4(1.0, 0.0, 0.5, 1.0), Vec4(1.0, 0.0, 0.0, 1.0)]
targetPos, speed, impactT = av.getProjectileInfo(skillId, target)
effect = VenomSpitProjectile.getEffect()
if effect:
effect.reparentTo(render)
effect.setPos(av, 0, 0, av.height * 0.7)
effect.setH(av.getH(render))
effect.play(targetPos, speed, target)
ival = Sequence(av.actorInterval('shoot', endFrame=23), Func(startVFX), av.actorInterval('shoot', startFrame=24))
return ival
def getTentacleSlap(self, av, skillId, ammoSkillId, charge, target, skillResult):
anim = Parallel(Func(av.alignWithVictim, 0.4), Sequence(ActorInterval(av, 'pound_deck', playRate=2.0), Func(av.loop, 'idle', playRate=random.uniform(1.0, 1.2))), Sequence(Wait(0.6), Func(self.playShockwave, av)))
return anim
def getTentaclePound(self, av, skillId, ammoSkillId, charge, target, skillResult):
anim = Parallel(Func(av.alignWithVictim, 0.4), Sequence(ActorInterval(av, 'pound_deck', playRate=2.0), Func(av.loop, 'idle', blendT=0, playRate=random.uniform(1.0, 1.2))))
return anim
def getTentacleEnsnare(self, av, skillId, ammoSkillId, charge, target, skillResult):
anim = Sequence(Parallel(ActorInterval(av, 'grab', playRate=1.0), Func(av.alignWithVictim, 0.66), Sequence(Wait(0.66), Func(av.setupEnsnare), Wait(1.29), Func(av.pickupTarget))), Func(av.loop, 'grab_idle', playRate=random.uniform(1.0, 1.2)))
return anim
def getTentaclePiledriver(self, av, skillId, ammoSkillId, charge, target, skillResult):
anim = Sequence(Parallel(ActorInterval(av, 'grab_slam', playRate=1.0), Sequence(Wait(2.93), Func(av.piledriveTarget))), Func(av.loop, 'idle', playRate=random.uniform(1.0, 1.2)))
return anim
def getTentacleRelease(self, av, skillId, ammoSkillId, charge, target, skillResult):
anim = Sequence(Parallel(ActorInterval(av, 'grab_slam', playRate=1.0), Sequence(Wait(2.93), Func(av.piledriveTarget))), Func(av.loop, 'idle', playRate=random.uniform(1.0, 1.2)))
return anim
def getTentacleConstrict(self, av, skillId, ammoSkillId, charge, target, skillResult):
anim = Sequence(ActorInterval(av, 'grab_constrict', playRate=1.0), Func(av.loop, 'grab_idle', playRate=random.uniform(1.0, 1.2)))
return anim
def getKrakenVomit(self, av, skillId, ammoSkillId, charge, target, skillResult):
anim = Sequence(ActorInterval(av, 'shoot', playRate=1.0), Func(av.loop, 'idle', playRate=random.uniform(1.0, 1.2)))
return anim
def getScorpionAttackLeft(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_left', playRate=1.0)
def getScorpionAttackRight(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_right', playRate=1.0)
def getScorpionAttackBoth(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_both', playRate=1.0)
def getScorpionAttackTailSting(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_tail_sting', playRate=1.0)
def getScorpionPickUpHuman(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('pick_up_human', playRate=1.0)
def getScorpionRearUp(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('rear_up', playRate=1.0)
def getAlligatorAttackLeft(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_left', playRate=1.0)
def getAlligatorAttackRight(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_right', playRate=1.0)
def getAlligatorAttackStraight(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_straight', playRate=1.0)
def getAlligatorAttackTailLeft(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_tail_left', playRate=1.0)
def getAlligatorAttackTailRight(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_tail_right', playRate=1.0)
def getBatAttackLeft(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_forward', playRate=1.0)
def getBatAttackRight(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('attack_right', playRate=1.0)
def getBatShriek(self, av, skillId, ammoSkillId, charge, target, skillResult):
def playFX():
shockwaveRingEffect = ShockwaveRing.getEffect()
if shockwaveRingEffect:
shockwaveRingEffect.reparentTo(av)
shockwaveRingEffect.size = 40
shockwaveRingEffect.setPos(0, 0, 0)
shockwaveRingEffect.play()
ival = Sequence(Func(playFX), av.actorInterval('attack_forward', playRate=2.0))
return ival
def getBatFlurry(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Sequence(av.actorInterval('attack_right', playRate=2.0), av.actorInterval('attack_forward', playRate=2.0), av.actorInterval('attack_right', playRate=2.0), av.actorInterval('attack_forward', playRate=2.0))
return ival
def getWaspAttack(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('sting', playRate=1.0)
def getWaspAttackLeap(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('leap_sting', playRate=1.0)
def getWaspAttackSting(self, av, skillId, ammoSkillId, charge, target, skillResult):
return av.actorInterval('sting', playRate=1.0)
def playShockwave(self, av):
pos = av.getPos(render)
smokeCloudEffect = SmokeCloud.getEffect()
if smokeCloudEffect:
smokeCloudEffect.reparentTo(render)
smokeCloudEffect.setPos(pos)
smokeCloudEffect.setScale(1.0)
smokeCloudEffect.spriteScale = 1.0
smokeCloudEffect.radius = 7.0
smokeCloudEffect.play()
shockwaveRingEffect = ShockwaveRing.getEffect()
if shockwaveRingEffect:
shockwaveRingEffect.reparentTo(render)
shockwaveRingEffect.size = 40
shockwaveRingEffect.setPos(pos)
shockwaveRingEffect.play()
cameraShakerEffect = CameraShaker()
cameraShakerEffect.reparentTo(render)
cameraShakerEffect.setPos(pos)
cameraShakerEffect.shakeSpeed = 0.04
cameraShakerEffect.shakePower = 6.0
cameraShakerEffect.numShakes = 2
cameraShakerEffect.scalePower = 1
cameraShakerEffect.play(80.0)
def getCastDarkThunderAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
seq = Sequence(Func(av.considerEnableMovement), Func(self.lockInput, av), Func(self.playCastSound, av, skillId), av.actorInterval('shoot_up', playRate=1.5, startFrame=6), Func(self.unlockInput, av))
return seq
def getGraveBlindAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
effect = JRGraveSmoke.getEffect(unlimited=True)
if effect:
effect.reparentTo(av.getParent())
effect.setEffectColor(Vec4(0.4, 0.6, 0.1, 1))
effect.radius = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId)
effect.setPos(av, 0.0, 0.0, 0.0)
effect.play()
seq = Sequence(Func(av.considerEnableMovement), Func(self.lockInput, av), Func(self.playCastSound, av, skillId), Func(startVFX), av.actorInterval('roar_idle', playRate=1.75, startFrame=16), Func(self.unlockInput, av))
del startVFX
return seq
def getCorruptionAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
effect = JRSoulHarvest.getEffect(unlimited=True)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 5, 0)
effect.radius = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId)
effect.setEffectColor(Vec4(0.8, 1.0, 0.2, 1.0))
effect.play()
effect = DomeExplosion.getEffect(unlimited=True)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 5, 0)
effect.size = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId) * 2.0
effect.play()
effect = DarkPortal.getEffect(unlimited=True)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 5, 0)
effect.size = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId) * 4.0
effect.play()
effect = EvilRingEffect.getEffect(unlimited=True)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 5, 0)
effect.effectScale = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId)
effect.changeEffectColor(Vec4(0.8, 1.0, 0.1, 1.0))
effect.duration = 2.5
effect.play()
seq = Sequence(Func(av.considerEnableMovement), Func(self.lockInput, av), Func(self.playCastSound, av, skillId), Func(startVFX), av.actorInterval('roar_idle', playRate=1.75, startFrame=16), Func(self.unlockInput, av))
del startVFX
return seq
def getSoulHarvestAnim(self, av, skillId, ammoSkillId, charge, target, skillResult):
if not av.currentWeapon:
return None
def startVFX():
effect = WindWave.getEffect(unlimited=True)
if effect:
effect.reparentTo(av.getParent())
effect.setEffectColor(Vec4(0.8, 1, 0.2, 0.7))
effect.setPos(av, 0.0, 0.0, 0.0)
effect.setScale(1.0, 1.0, 1.0)
effect.setHpr(0.0, 0.0, 0.0)
effect.play()
effect = JRSoulHarvest2.getEffect(unlimited=True)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 0, 2)
effect.radius = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId)
effect.play()
effect = DesolationSmoke.getEffect(unlimited=True)
if effect:
effect.reparentTo(av.getParent())
effect.setEffectColor(Vec4(0.8, 1, 0.2, 1))
effect.setPos(av, 0.0, 0.0, 0.0)
effect.play()
effect = DomeExplosion.getEffect(unlimited=True)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 0, 0)
effect.size = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId)
effect.play()
effect = DarkPortal.getEffect(unlimited=True)
if effect:
effect.reparentTo(av.getParent())
effect.setPos(av, 0, 0, 0)
effect.size = av.cr.battleMgr.getModifiedAttackAreaRadius(av, skillId, ammoSkillId) * 3.0
effect.play()
cameraShakerEffect = CameraShaker()
cameraShakerEffect.wrtReparentTo(av.getParent())
cameraShakerEffect.setPos(av, 0.0, 0.0, 0.0)
cameraShakerEffect.shakeSpeed = 0.075
cameraShakerEffect.shakePower = 1.0
cameraShakerEffect.numShakes = 30
cameraShakerEffect.scalePower = 1
cameraShakerEffect.play(100.0)
seq = Sequence(Func(av.considerEnableMovement), Func(self.lockInput, av), Func(self.playCastSound, av, skillId), Func(startVFX), av.actorInterval('roar_idle', playRate=1.75, startFrame=16), Func(self.unlockInput, av))
del startVFX
return seq
def getBroadsideLeft(self, av, skillId, ammoSkillId, charge=0, target=None, skillResult=None):
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.setChatAbsolute, PLocalizer.getLeftBroadsidePhrase(), CFSpeech | CFTimeout), Func(base.talkAssistant.receiveOpenTalk, av.doId, av.getName(), None, None, PLocalizer.getLeftBroadsidePhrase())), Sequence(Wait(1.0), Func(self.unlockInput, av)))
return ival
def getBroadsideRight(self, av, skillId, ammoSkillId, charge=0, target=None, skillResult=None):
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.setChatAbsolute, PLocalizer.getRightBroadsidePhrase(), CFSpeech | CFTimeout), Func(base.talkAssistant.receiveOpenTalk, av.doId, av.getName(), None, None, PLocalizer.getRightBroadsidePhrase())), Sequence(Wait(1.0), Func(self.unlockInput, av)))
return ival
def getFullSail(self, av, skillId, ammoSkillId, charge=0, target=None, skillResult=None):
def playSfx():
if not av.ship:
return
sfx = av.ship.fullsailSfx
base.playSfx(sfx, node=av.ship, cutoff=1500)
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.setChatAbsolute, PLocalizer.getFullSailPhrase(), CFSpeech | CFTimeout), Func(base.talkAssistant.receiveOpenTalk, av.doId, av.getName(), None, None, PLocalizer.getFullSailPhrase())), Func(playSfx), Sequence(Wait(1.0), Func(self.unlockInput, av)))
del playSfx
return ival
def getComeAbout(self, av, skillId, ammoSkillId, charge=0, target=None, skillResult=None):
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.setChatAbsolute, PLocalizer.getComeAboutPhrase(), CFSpeech | CFTimeout), Func(base.talkAssistant.receiveOpenTalk, av.doId, av.getName(), None, None, PLocalizer.getComeAboutPhrase())), Sequence(Wait(1.0), Func(self.unlockInput, av)))
return ival
def getRammingSpeed(self, av, skillId, ammoSkillId, charge=0, target=None, skillResult=None):
def playSfx():
if not av.ship:
return
sfx = av.ship.fullsailSfx
base.playSfx(sfx, node=av.ship, cutoff=1500)
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.setChatAbsolute, PLocalizer.RammingSpeed, CFSpeech | CFTimeout), Func(base.talkAssistant.receiveOpenTalk, av.doId, av.getName(), None, None, PLocalizer.RammingSpeed)), Func(playSfx), Sequence(Wait(1.0), Func(self.unlockInput, av)))
del playSfx
return ival
def getOpenFire(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.setChatAbsolute, PLocalizer.getOpenFirePhrase(), CFSpeech | CFTimeout), Func(base.talkAssistant.receiveOpenTalk, av.doId, av.getName(), None, None, PLocalizer.getOpenFirePhrase())), Sequence(Wait(1.0), Func(self.unlockInput, av)))
return ival
def getTakeCover(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.setChatAbsolute, PLocalizer.getTakeCoverPhrase(), CFSpeech | CFTimeout), Func(base.talkAssistant.receiveOpenTalk, av.doId, av.getName(), None, None, PLocalizer.getTakeCoverPhrase())), Sequence(Wait(1.0), Func(self.unlockInput, av)))
return ival
def getPowerRecharge(self, av, skillId, ammoSkillId, charge=0, target=None, skillResult=None):
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.setChatAbsolute, PLocalizer.getPowerRechargePhrase(), CFSpeech | CFTimeout), Func(base.talkAssistant.receiveOpenTalk, av.doId, av.getName(), None, None, PLocalizer.getPowerRechargePhrase())), Sequence(Wait(1.0), Func(self.unlockInput, av)))
return ival
def getWreckHull(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.setChatAbsolute, PLocalizer.getWreckHullPhrase(), CFSpeech | CFTimeout), Func(base.talkAssistant.receiveOpenTalk, av.doId, av.getName(), None, None, PLocalizer.getWreckHullPhrase())), Sequence(Wait(1.0), Func(self.unlockInput, av)))
return ival
def getWreckMasts(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.setChatAbsolute, PLocalizer.getWreckMastsPhrase(), CFSpeech | CFTimeout), Func(base.talkAssistant.receiveOpenTalk, av.doId, av.getName(), None, None, PLocalizer.getWreckMastsPhrase())), Sequence(Wait(1.0), Func(self.unlockInput, av)))
return ival
def getSinkHer(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.setChatAbsolute, PLocalizer.getSinkHerPhrase(), CFSpeech | CFTimeout), Func(base.talkAssistant.receiveOpenTalk, av.doId, av.getName(), None, None, PLocalizer.getSinkHerPhrase())), Sequence(Wait(1.0), Func(self.unlockInput, av)))
return ival
def getIncoming(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.setChatAbsolute, PLocalizer.getIncomingPhrase(), CFSpeech | CFTimeout), Func(base.talkAssistant.receiveOpenTalk, av.doId, av.getName(), None, None, PLocalizer.getIncomingPhrase())), Sequence(Wait(1.0), Func(self.unlockInput, av)))
return ival
def getFixItNow(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Parallel(Sequence(Func(self.lockInput, av), Func(av.setChatAbsolute, PLocalizer.getFixItNowPhrase(), CFSpeech | CFTimeout), Func(base.talkAssistant.receiveOpenTalk, av.doId, av.getName(), None, None, PLocalizer.getFixItNowPhrase())), Sequence(Wait(1.0), Func(self.unlockInput, av)))
return ival
def getShipRepair(self, av, skillId, ammoSkillId, charge=0, target=None, skillResult=None):
return self.getComeAbout(av, skillId, ammoSkillId, charge)
def getSummonHelp(self, av, skillId, ammoSkillId, charge, target, skillResult):
ival = Sequence(Func(self.lockInput, av), Func(av.motionFSM.off), Func(self.playCastingAnim, av), av.actorInterval('voodoo_swarm', playRate=1.0, blendInT=0.3, blendOutT=0.3), Func(av.considerEnableMovement), Func(self.unlockInput, av))
return ival
def lockInput(self, av):
if av.isLocal():
messenger.send('skillStarted')
def unlockInput(self, av):
if av.isLocal():
messenger.send('skillFinished')
def lockDrink(self, av):
if av.isLocal():
messenger.send('drinkingStarted')
def unlockDrink(self, av):
if av.isLocal():
messenger.send('drinkingFinished')
| 62.904192
| 838
| 0.662897
| 17,342
| 157,575
| 6.004209
| 0.051378
| 0.104298
| 0.062771
| 0.045445
| 0.828773
| 0.808855
| 0.79613
| 0.775491
| 0.762459
| 0.741445
| 0
| 0.026522
| 0.222091
| 157,575
| 2,505
| 839
| 62.904192
| 0.822931
| 0
| 0
| 0.630553
| 0
| 0.000453
| 0.022319
| 0.001802
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.037171
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
865af1e9a91e299bf06828cb0c8640a13c10f3b7
| 241
|
py
|
Python
|
python/anyascii/_data/_114.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
python/anyascii/_data/_114.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
python/anyascii/_data/_114.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
b=' 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9'
| 241
| 241
| 0.087137
| 21
| 241
| 1
| 0.52381
| 0.190476
| 0.285714
| 0.380952
| 0.952381
| 0.952381
| 0.952381
| 0.952381
| 0.952381
| 0.952381
| 0
| 0.833333
| 0.900415
| 241
| 1
| 241
| 241
| 0.041667
| 0
| 0
| 0
| 0
| 0
| 0.979339
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 16
|
865fb7ba03df4e01a46af13483c0a6d69fee5146
| 7,580
|
py
|
Python
|
clcd/text_generation/comparative.py
|
felipessalvatore/CLCD
|
422f9e93d49e4fcfd8048ad5b36898f8713d0370
|
[
"MIT"
] | 4
|
2020-02-06T19:35:13.000Z
|
2021-09-04T10:29:11.000Z
|
clcd/text_generation/comparative.py
|
felipessalvatore/CLCD
|
422f9e93d49e4fcfd8048ad5b36898f8713d0370
|
[
"MIT"
] | null | null | null |
clcd/text_generation/comparative.py
|
felipessalvatore/CLCD
|
422f9e93d49e4fcfd8048ad5b36898f8713d0370
|
[
"MIT"
] | null | null | null |
import numpy as np
from util import get_n_different_items, get_new_item
from util import Rt, Rt_eq, list2coordination, create_csv
from util import Rt_pt, Rt_eq_pt
from vocab import male_names, female_names, cities_and_states, countries
from vocab_pt import male_names_pt, female_names_pt, cities_pt, countries_pt
def contradiction_instance_1(person_list,
place_list,
n,
Rt_function=Rt,
Rt_eq_function=Rt_eq,
and_str="and"):
"""
T = {x1 > x2, x2 > x3, ... , xn-1 > xn}
new = xj > xi (i<j) ----------- 1
"""
people = get_n_different_items(person_list, n + 1)
chain = []
for i in range(n):
chain.append(Rt_function(people[i], people[i + 1]))
sentence1 = " , ".join(chain)
id_base = np.random.choice(range(n))
id_bigger = np.random.choice(range(id_base + 1, n + 1))
sentence2 = Rt_function(people[id_bigger], people[id_base])
return sentence1, sentence2, 1
def contradiction_instance_2(person_list,
place_list,
n,
Rt_function=Rt,
Rt_eq_function=Rt_eq,
and_str="and"):
"""
T = {x1 >= x2, x2 >= x3, ... , xn-1 >= xn
xn > y}
new = y > xi ----------- 1
"""
people = get_n_different_items(person_list, n + 1)
new_person = get_new_item(people, person_list)
chain = []
for i in range(n):
chain.append(Rt_eq_function(people[i], people[i + 1]))
sentence1 = " , ".join(chain)
sentence1 += " , " + Rt_function(people[-1], new_person)
id_base = np.random.choice(range(n + 1))
sentence2 = Rt_function(new_person, people[id_base])
return sentence1, sentence2, 1
def contradiction_instance_3(person_list,
place_list,
n,
Rt_function=Rt,
Rt_eq_function=Rt_eq,
and_str="and"):
"""
T = {x > [x1, ....., xn], y >= x}
new = xi > y ----------- 1
"""
people = get_n_different_items(person_list, n + 1)
new_person = get_new_item(people, person_list)
chain = []
for i in range(n):
chain.append(Rt_function(people[i], people[i + 1]))
sentence1 = Rt_function(people[0], list2coordination(people[1:], and_str))
eq = [Rt_eq_function(people[0], new_person), Rt_eq_function(new_person, people[0])] # noqa
eq = np.random.choice(eq)
sentence1 += " , " + eq
selected = np.random.choice(people[1:])
sentence2 = Rt_function(selected, new_person)
return sentence1, sentence2, 1
def non_contradiction_instance_1(person_list,
place_list,
n,
Rt_function=Rt,
Rt_eq_function=Rt_eq,
and_str="and"):
"""
T = {x1 > x2, x2 > x3, ... , xn-1 > xn}
new = xi > xj (i<j) ----------- 0
"""
people = get_n_different_items(person_list, n + 1)
chain = []
for i in range(n):
chain.append(Rt_function(people[i], people[i + 1]))
sentence1 = " , ".join(chain)
id_base = np.random.choice(range(n))
id_bigger = np.random.choice(range(id_base + 1, n + 1))
sentence2 = Rt_function(people[id_base], people[id_bigger])
return sentence1, sentence2, 0
def non_contradiction_instance_2(person_list,
place_list,
n,
Rt_function=Rt,
Rt_eq_function=Rt_eq,
and_str="and"):
"""
T = {x1 >= x2, x2 >= x3, ... , xn-1 >= xn,
xn > y}
new = xi > y ----------- 0
"""
people = get_n_different_items(person_list, n + 1)
new_person = get_new_item(people, person_list)
chain = []
for i in range(n):
chain.append(Rt_eq_function(people[i], people[i + 1]))
sentence1 = " , ".join(chain)
sentence1 += " , " + Rt_function(people[-1], new_person)
id_base = np.random.choice(range(n + 1))
sentence2 = Rt_function(people[id_base], new_person)
return sentence1, sentence2, 0
def non_contradiction_instance_3(person_list,
place_list,
n,
Rt_function=Rt,
Rt_eq_function=Rt_eq,
and_str="and"):
"""
T = {x > [x1, ....., xn], y >= x}
new = y > xi ----------- 0
"""
people = get_n_different_items(person_list, n + 1)
new_person = get_new_item(people, person_list)
chain = []
for i in range(n):
chain.append(Rt_function(people[i], people[i + 1]))
sentence1 = Rt_function(people[0], list2coordination(people[1:], and_str))
eq = [Rt_eq_function(people[0], new_person), Rt_eq_function(new_person, people[0])] # noqa
eq = np.random.choice(eq)
sentence1 += " , " + eq
selected = np.random.choice(people[1:])
sentence2 = Rt_function(new_person, selected)
return sentence1, sentence2, 0
def eng2pt(f):
return lambda x, y, z: f(x, y, z, Rt_function=Rt_pt, Rt_eq_function=Rt_eq_pt, and_str="e") # noqa
positive_instances_list_en = [contradiction_instance_1,
contradiction_instance_2,
contradiction_instance_3]
negative_instances_list_en = [non_contradiction_instance_1,
non_contradiction_instance_2,
non_contradiction_instance_3]
positive_instances_list_pt = [eng2pt(f) for f in positive_instances_list_en]
negative_instances_list_pt = [eng2pt(f) for f in negative_instances_list_en]
if __name__ == '__main__':
# call this script in the main folder, i.e., type
# python clcd/text_generation/comparative.py
# english
create_csv(out_path="text_gen_output/comparative_train.csv", # noqa
size=10000,
person_list=male_names,
place_list=countries,
min_n=4,
n=10,
positive_instances_list=positive_instances_list_en,
negative_instances_list=negative_instances_list_en) # noqa
create_csv(out_path="text_gen_output/comparative_test.csv", # noqa
size=1000,
person_list=female_names,
place_list=cities_and_states,
min_n=4,
n=10,
positive_instances_list=positive_instances_list_en,
negative_instances_list=negative_instances_list_en) # noqa
# portuguese
create_csv(out_path="text_gen_output/comparative_pt_train.csv", # noqa
size=10000,
person_list=male_names_pt,
place_list=countries_pt,
min_n=4,
n=10,
positive_instances_list=positive_instances_list_pt,
negative_instances_list=negative_instances_list_pt) # noqa
create_csv(out_path="text_gen_output/comparative_pt_test.csv", # noqa
size=1000,
person_list=female_names_pt,
place_list=cities_pt,
min_n=4,
n=10,
positive_instances_list=positive_instances_list_pt,
negative_instances_list=negative_instances_list_pt) # noqa
| 36.267943
| 102
| 0.554222
| 929
| 7,580
| 4.19591
| 0.113025
| 0.022576
| 0.040021
| 0.032324
| 0.804772
| 0.781683
| 0.777065
| 0.769112
| 0.724474
| 0.651616
| 0
| 0.027778
| 0.335092
| 7,580
| 208
| 103
| 36.442308
| 0.745635
| 0.080079
| 0
| 0.721088
| 0
| 0
| 0.029696
| 0.022235
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.040816
| 0.006803
| 0.136054
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
866f680e1c6f23887b9fc0598ee8ae9b6e0aa307
| 340
|
py
|
Python
|
StructuralAnalysis/FrameElements/__init__.py
|
Hazem-Kassab/StructuralAnalysis
|
d7e99a42c94fed787a2bc4931d33cd1ed65f8707
|
[
"MIT"
] | 6
|
2021-02-08T13:00:40.000Z
|
2022-02-18T07:11:14.000Z
|
StructuralAnalysis/FrameElements/__init__.py
|
Hazem-Kassab/StructuralAnalysis
|
d7e99a42c94fed787a2bc4931d33cd1ed65f8707
|
[
"MIT"
] | null | null | null |
StructuralAnalysis/FrameElements/__init__.py
|
Hazem-Kassab/StructuralAnalysis
|
d7e99a42c94fed787a2bc4931d33cd1ed65f8707
|
[
"MIT"
] | 1
|
2022-03-12T03:14:39.000Z
|
2022-03-12T03:14:39.000Z
|
from StructuralAnalysis.FrameElements.TwoDimensionalTrussElement import TwoDimensionalTrussElement
from StructuralAnalysis.FrameElements.TwoDimensionalFrameElement import TwoDimensionalFrameElement
from StructuralAnalysis.FrameElements.TrussElement import TrussElement
from StructuralAnalysis.FrameElements.FrameElement import FrameElement
| 68
| 98
| 0.929412
| 24
| 340
| 13.166667
| 0.333333
| 0.278481
| 0.443038
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047059
| 340
| 4
| 99
| 85
| 0.975309
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
868e48df41596e5cc38441a5c86ddf9d21d7d1d3
| 101
|
py
|
Python
|
youtrackutils/redmine/__init__.py
|
ekho/youtrack-python-scripts
|
1282dacfa0b5a7c47514a55031bb98c72340409a
|
[
"Apache-2.0"
] | null | null | null |
youtrackutils/redmine/__init__.py
|
ekho/youtrack-python-scripts
|
1282dacfa0b5a7c47514a55031bb98c72340409a
|
[
"Apache-2.0"
] | null | null | null |
youtrackutils/redmine/__init__.py
|
ekho/youtrack-python-scripts
|
1282dacfa0b5a7c47514a55031bb98c72340409a
|
[
"Apache-2.0"
] | null | null | null |
from .client import RedmineClient
from .client import RedmineException
from .mapping import Mapping
| 20.2
| 36
| 0.841584
| 12
| 101
| 7.083333
| 0.5
| 0.235294
| 0.376471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128713
| 101
| 4
| 37
| 25.25
| 0.965909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
86907f1f970c84c2e27e63c39e6564152d8f8aa1
| 13,810
|
py
|
Python
|
impacts_world/pages/migrations/0002_formpage_content.py
|
bruecksen/impacts-world
|
430a4e7b0618d49edd9aa424b63f4a3efb35ab1b
|
[
"MIT"
] | 5
|
2017-05-05T12:52:58.000Z
|
2021-03-04T20:39:30.000Z
|
impacts_world/pages/migrations/0002_formpage_content.py
|
bruecksen/impacts-world
|
430a4e7b0618d49edd9aa424b63f4a3efb35ab1b
|
[
"MIT"
] | null | null | null |
impacts_world/pages/migrations/0002_formpage_content.py
|
bruecksen/impacts-world
|
430a4e7b0618d49edd9aa424b63f4a3efb35ab1b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-21 10:01
from __future__ import unicode_literals
from django.db import migrations
import impacts_world.contrib.blocks
import impacts_world.pages.blocks
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('pages', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='formpage',
name='content',
field=wagtail.wagtailcore.fields.StreamField((('heading', impacts_world.contrib.blocks.HeadingBlock()), ('sub_heading', impacts_world.contrib.blocks.SubHeadingBlock()), ('rich_text_container', impacts_world.contrib.blocks.RichTextContainerBlock()), ('image_container', impacts_world.contrib.blocks.ImageContainerBlock()), ('teaser', wagtail.wagtailcore.blocks.StructBlock((('text', impacts_world.contrib.blocks.RichTextBlock(required=True)), ('page', wagtail.wagtailcore.blocks.PageChooserBlock(required=True))))), ('video_teaser', wagtail.wagtailcore.blocks.StructBlock((('video', wagtail.wagtailembeds.blocks.EmbedBlock(required=True)), ('text', impacts_world.contrib.blocks.RichTextBlock(required=True))))), ('testimonials', wagtail.wagtailcore.blocks.StructBlock((('testimonials', wagtail.wagtailcore.blocks.ListBlock(impacts_world.pages.blocks.Testimonial)),))), ('google_map', wagtail.wagtailcore.blocks.StructBlock((('map_lat', wagtail.wagtailcore.blocks.CharBlock(default='52.520645', label='Latitude', max_length=255, required=True)), ('map_long', wagtail.wagtailcore.blocks.CharBlock(default='13.409779', label='Longitude', max_length=255, required=True)), ('map_zoom_level', wagtail.wagtailcore.blocks.CharBlock(default=14, label='Map zoom level', max_length=3, required=True))))), ('columns_1_to_1', wagtail.wagtailcore.blocks.StructBlock((('left_column', wagtail.wagtailcore.blocks.StreamBlock((('rich_text', impacts_world.contrib.blocks.RichTextBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('is_circled_image', wagtail.wagtailcore.blocks.BooleanBlock(default=False, required=False))))), ('google_map', wagtail.wagtailcore.blocks.StructBlock((('map_lat', wagtail.wagtailcore.blocks.CharBlock(default='52.520645', label='Latitude', max_length=255, required=True)), ('map_long', wagtail.wagtailcore.blocks.CharBlock(default='13.409779', label='Longitude', max_length=255, required=True)), ('map_zoom_level', wagtail.wagtailcore.blocks.CharBlock(default=14, label='Map zoom level', max_length=3, required=True)))))))), ('right_column', wagtail.wagtailcore.blocks.StreamBlock((('rich_text', impacts_world.contrib.blocks.RichTextBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('is_circled_image', wagtail.wagtailcore.blocks.BooleanBlock(default=False, required=False))))), ('google_map', wagtail.wagtailcore.blocks.StructBlock((('map_lat', wagtail.wagtailcore.blocks.CharBlock(default='52.520645', label='Latitude', max_length=255, required=True)), ('map_long', wagtail.wagtailcore.blocks.CharBlock(default='13.409779', label='Longitude', max_length=255, required=True)), ('map_zoom_level', wagtail.wagtailcore.blocks.CharBlock(default=14, label='Map zoom level', max_length=3, required=True))))))))))), ('columns_1_to_2', wagtail.wagtailcore.blocks.StructBlock((('left_column', wagtail.wagtailcore.blocks.StreamBlock((('rich_text', impacts_world.contrib.blocks.RichTextBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('is_circled_image', wagtail.wagtailcore.blocks.BooleanBlock(default=False, required=False))))), ('google_map', wagtail.wagtailcore.blocks.StructBlock((('map_lat', wagtail.wagtailcore.blocks.CharBlock(default='52.520645', label='Latitude', max_length=255, required=True)), ('map_long', wagtail.wagtailcore.blocks.CharBlock(default='13.409779', label='Longitude', max_length=255, required=True)), ('map_zoom_level', wagtail.wagtailcore.blocks.CharBlock(default=14, label='Map zoom level', max_length=3, required=True)))))))), ('right_column', wagtail.wagtailcore.blocks.StreamBlock((('rich_text', impacts_world.contrib.blocks.RichTextBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('is_circled_image', wagtail.wagtailcore.blocks.BooleanBlock(default=False, required=False))))), ('google_map', wagtail.wagtailcore.blocks.StructBlock((('map_lat', wagtail.wagtailcore.blocks.CharBlock(default='52.520645', label='Latitude', max_length=255, required=True)), ('map_long', wagtail.wagtailcore.blocks.CharBlock(default='13.409779', label='Longitude', max_length=255, required=True)), ('map_zoom_level', wagtail.wagtailcore.blocks.CharBlock(default=14, label='Map zoom level', max_length=3, required=True))))))))))), ('columns_1_to_3', wagtail.wagtailcore.blocks.StructBlock((('left_column', wagtail.wagtailcore.blocks.StreamBlock((('rich_text', impacts_world.contrib.blocks.RichTextBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('is_circled_image', wagtail.wagtailcore.blocks.BooleanBlock(default=False, required=False))))), ('google_map', wagtail.wagtailcore.blocks.StructBlock((('map_lat', wagtail.wagtailcore.blocks.CharBlock(default='52.520645', label='Latitude', max_length=255, required=True)), ('map_long', wagtail.wagtailcore.blocks.CharBlock(default='13.409779', label='Longitude', max_length=255, required=True)), ('map_zoom_level', wagtail.wagtailcore.blocks.CharBlock(default=14, label='Map zoom level', max_length=3, required=True)))))))), ('right_column', wagtail.wagtailcore.blocks.StreamBlock((('rich_text', impacts_world.contrib.blocks.RichTextBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('is_circled_image', wagtail.wagtailcore.blocks.BooleanBlock(default=False, required=False))))), ('google_map', wagtail.wagtailcore.blocks.StructBlock((('map_lat', wagtail.wagtailcore.blocks.CharBlock(default='52.520645', label='Latitude', max_length=255, required=True)), ('map_long', wagtail.wagtailcore.blocks.CharBlock(default='13.409779', label='Longitude', max_length=255, required=True)), ('map_zoom_level', wagtail.wagtailcore.blocks.CharBlock(default=14, label='Map zoom level', max_length=3, required=True))))))))))), ('columns_2_to_1', wagtail.wagtailcore.blocks.StructBlock((('left_column', wagtail.wagtailcore.blocks.StreamBlock((('rich_text', impacts_world.contrib.blocks.RichTextBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('is_circled_image', wagtail.wagtailcore.blocks.BooleanBlock(default=False, required=False))))), ('google_map', wagtail.wagtailcore.blocks.StructBlock((('map_lat', wagtail.wagtailcore.blocks.CharBlock(default='52.520645', label='Latitude', max_length=255, required=True)), ('map_long', wagtail.wagtailcore.blocks.CharBlock(default='13.409779', label='Longitude', max_length=255, required=True)), ('map_zoom_level', wagtail.wagtailcore.blocks.CharBlock(default=14, label='Map zoom level', max_length=3, required=True)))))))), ('right_column', wagtail.wagtailcore.blocks.StreamBlock((('rich_text', impacts_world.contrib.blocks.RichTextBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('is_circled_image', wagtail.wagtailcore.blocks.BooleanBlock(default=False, required=False))))), ('google_map', wagtail.wagtailcore.blocks.StructBlock((('map_lat', wagtail.wagtailcore.blocks.CharBlock(default='52.520645', label='Latitude', max_length=255, required=True)), ('map_long', wagtail.wagtailcore.blocks.CharBlock(default='13.409779', label='Longitude', max_length=255, required=True)), ('map_zoom_level', wagtail.wagtailcore.blocks.CharBlock(default=14, label='Map zoom level', max_length=3, required=True))))))))))), ('columns_1_to_1_to_1', wagtail.wagtailcore.blocks.StructBlock((('left_column', wagtail.wagtailcore.blocks.StreamBlock((('rich_text', impacts_world.contrib.blocks.RichTextBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('is_circled_image', wagtail.wagtailcore.blocks.BooleanBlock(default=False, required=False))))), ('google_map', wagtail.wagtailcore.blocks.StructBlock((('map_lat', wagtail.wagtailcore.blocks.CharBlock(default='52.520645', label='Latitude', max_length=255, required=True)), ('map_long', wagtail.wagtailcore.blocks.CharBlock(default='13.409779', label='Longitude', max_length=255, required=True)), ('map_zoom_level', wagtail.wagtailcore.blocks.CharBlock(default=14, label='Map zoom level', max_length=3, required=True)))))))), ('center_column', wagtail.wagtailcore.blocks.StreamBlock((('rich_text', impacts_world.contrib.blocks.RichTextBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('is_circled_image', wagtail.wagtailcore.blocks.BooleanBlock(default=False, required=False))))), ('google_map', wagtail.wagtailcore.blocks.StructBlock((('map_lat', wagtail.wagtailcore.blocks.CharBlock(default='52.520645', label='Latitude', max_length=255, required=True)), ('map_long', wagtail.wagtailcore.blocks.CharBlock(default='13.409779', label='Longitude', max_length=255, required=True)), ('map_zoom_level', wagtail.wagtailcore.blocks.CharBlock(default=14, label='Map zoom level', max_length=3, required=True)))))))), ('right_column', wagtail.wagtailcore.blocks.StreamBlock((('rich_text', impacts_world.contrib.blocks.RichTextBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('is_circled_image', wagtail.wagtailcore.blocks.BooleanBlock(default=False, required=False))))), ('google_map', wagtail.wagtailcore.blocks.StructBlock((('map_lat', wagtail.wagtailcore.blocks.CharBlock(default='52.520645', label='Latitude', max_length=255, required=True)), ('map_long', wagtail.wagtailcore.blocks.CharBlock(default='13.409779', label='Longitude', max_length=255, required=True)), ('map_zoom_level', wagtail.wagtailcore.blocks.CharBlock(default=14, label='Map zoom level', max_length=3, required=True))))))))))), ('columns_1_to_1_to_1_to_1', wagtail.wagtailcore.blocks.StructBlock((('first_column', wagtail.wagtailcore.blocks.StreamBlock((('rich_text', impacts_world.contrib.blocks.RichTextBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('is_circled_image', wagtail.wagtailcore.blocks.BooleanBlock(default=False, required=False))))), ('google_map', wagtail.wagtailcore.blocks.StructBlock((('map_lat', wagtail.wagtailcore.blocks.CharBlock(default='52.520645', label='Latitude', max_length=255, required=True)), ('map_long', wagtail.wagtailcore.blocks.CharBlock(default='13.409779', label='Longitude', max_length=255, required=True)), ('map_zoom_level', wagtail.wagtailcore.blocks.CharBlock(default=14, label='Map zoom level', max_length=3, required=True)))))))), ('second_column', wagtail.wagtailcore.blocks.StreamBlock((('rich_text', impacts_world.contrib.blocks.RichTextBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('is_circled_image', wagtail.wagtailcore.blocks.BooleanBlock(default=False, required=False))))), ('google_map', wagtail.wagtailcore.blocks.StructBlock((('map_lat', wagtail.wagtailcore.blocks.CharBlock(default='52.520645', label='Latitude', max_length=255, required=True)), ('map_long', wagtail.wagtailcore.blocks.CharBlock(default='13.409779', label='Longitude', max_length=255, required=True)), ('map_zoom_level', wagtail.wagtailcore.blocks.CharBlock(default=14, label='Map zoom level', max_length=3, required=True)))))))), ('third_column', wagtail.wagtailcore.blocks.StreamBlock((('rich_text', impacts_world.contrib.blocks.RichTextBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('is_circled_image', wagtail.wagtailcore.blocks.BooleanBlock(default=False, required=False))))), ('google_map', wagtail.wagtailcore.blocks.StructBlock((('map_lat', wagtail.wagtailcore.blocks.CharBlock(default='52.520645', label='Latitude', max_length=255, required=True)), ('map_long', wagtail.wagtailcore.blocks.CharBlock(default='13.409779', label='Longitude', max_length=255, required=True)), ('map_zoom_level', wagtail.wagtailcore.blocks.CharBlock(default=14, label='Map zoom level', max_length=3, required=True)))))))), ('fourth_column', wagtail.wagtailcore.blocks.StreamBlock((('rich_text', impacts_world.contrib.blocks.RichTextBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('is_circled_image', wagtail.wagtailcore.blocks.BooleanBlock(default=False, required=False))))), ('google_map', wagtail.wagtailcore.blocks.StructBlock((('map_lat', wagtail.wagtailcore.blocks.CharBlock(default='52.520645', label='Latitude', max_length=255, required=True)), ('map_long', wagtail.wagtailcore.blocks.CharBlock(default='13.409779', label='Longitude', max_length=255, required=True)), ('map_zoom_level', wagtail.wagtailcore.blocks.CharBlock(default=14, label='Map zoom level', max_length=3, required=True)))))))))))), blank=True, null=True),
),
]
| 511.481481
| 13,219
| 0.780594
| 1,650
| 13,810
| 6.38303
| 0.063636
| 0.210216
| 0.275731
| 0.150399
| 0.922427
| 0.902108
| 0.902108
| 0.902108
| 0.891853
| 0.888245
| 0
| 0.03296
| 0.042143
| 13,810
| 26
| 13,220
| 531.153846
| 0.76323
| 0.004924
| 0
| 0
| 1
| 0
| 0.171264
| 0.001747
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.421053
| 0
| 0.578947
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 13
|
8691d43648c9a53d8c9baf49801b02bceb354714
| 8,346
|
py
|
Python
|
tests/integration/test_restore.py
|
untergeek/curator_api
|
b53bf99b729488c0b2b05fd6b3d159113d0edc62
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_restore.py
|
untergeek/curator_api
|
b53bf99b729488c0b2b05fd6b3d159113d0edc62
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_restore.py
|
untergeek/curator_api
|
b53bf99b729488c0b2b05fd6b3d159113d0edc62
|
[
"Apache-2.0"
] | 3
|
2018-12-11T04:42:25.000Z
|
2020-11-19T18:14:29.000Z
|
# import elasticsearch
# import curator
# import os
# import time
# import json
# import string, random, tempfile
# from click import testing as clicktest
# from mock import patch, Mock
# from . import CuratorTestCase
# from . import testvars as testvars
# import logging
# logger = logging.getLogger(__name__)
# host, port = os.environ.get('TEST_ES_SERVER', 'localhost:9200').split(':')
# port = int(port) if port else 9200
# # ' repository: {0}\n'
# # ' name: {1}\n'
# # ' indices: {2}\n'
# # ' include_aliases: {3}\n'
# # ' ignore_unavailable: {4}\n'
# # ' include_global_state: {5}\n'
# # ' partial: {6}\n'
# # ' rename_pattern: {7}\n'
# # ' rename_replacement: {8}\n'
# # ' extra_settings: {9}\n'
# # ' wait_for_completion: {10}\n'
# # ' skip_repo_fs_check: {11}\n'
# # ' timeout_override: {12}\n'
# # ' wait_interval: {13}\n'
# # ' max_wait: {14}\n'
# class TestActionFileRestore(CuratorTestCase):
# def test_restore(self):
# indices = []
# for i in range(1,4):
# self.add_docs('my_index{0}'.format(i))
# indices.append('my_index{0}'.format(i))
# snap_name = 'snapshot1'
# self.create_snapshot(snap_name, ','.join(indices))
# snapshot = curator.get_snapshot(
# self.client, self.args['repository'], '_all'
# )
# self.assertEqual(1, len(snapshot['snapshots']))
# self.client.indices.delete(','.join(indices))
# self.assertEqual([], curator.get_indices(self.client))
# self.write_config(
# self.args['configfile'], testvars.client_config.format(host, port))
# self.write_config(self.args['actionfile'],
# testvars.restore_snapshot_proto.format(
# self.args['repository'],
# snap_name,
# indices,
# False,
# False,
# True,
# False,
# ' ',
# ' ',
# ' ',
# True,
# False,
# 301,
# 1,
# 3
# )
# )
# test = clicktest.CliRunner()
# _ = test.invoke(
# curator.cli,
# [
# '--config', self.args['configfile'],
# self.args['actionfile']
# ],
# )
# restored_indices = sorted(curator.get_indices(self.client))
# self.assertEqual(indices, restored_indices)
# # The test runs so fast that it tries to execute the cleanup step
# # and delete the repository before Elasticsearch is actually ready
# time.sleep(0.5)
# def test_restore_with_rename(self):
# indices = []
# for i in range(1,4):
# self.add_docs('my_index{0}'.format(i))
# indices.append('my_index{0}'.format(i))
# snap_name = 'snapshot1'
# self.create_snapshot(snap_name, ','.join(indices))
# snapshot = curator.get_snapshot(
# self.client, self.args['repository'], '_all'
# )
# time.sleep(1)
# self.assertEqual(1, len(snapshot['snapshots']))
# self.client.indices.delete(','.join(indices))
# self.assertEqual([], curator.get_indices(self.client))
# self.write_config(
# self.args['configfile'], testvars.client_config.format(host, port))
# self.write_config(self.args['actionfile'],
# testvars.restore_snapshot_proto.format(
# self.args['repository'],
# snap_name,
# indices,
# False,
# False,
# True,
# False,
# 'my_index(.+)',
# 'new_index$1',
# ' ',
# True,
# False,
# 301,
# 1,
# 3,
# )
# )
# test = clicktest.CliRunner()
# _ = test.invoke(
# curator.cli,
# [
# '--config', self.args['configfile'],
# self.args['actionfile']
# ],
# )
# time.sleep(1)
# restored_indices = sorted(curator.get_indices(self.client))
# self.assertEqual(
# ['new_index1', 'new_index2', 'new_index3'],
# restored_indices
# )
# # The test runs so fast that it tries to execute the cleanup step
# # and delete the repository before Elasticsearch is actually ready
# time.sleep(1)
# def test_restore_wildcard(self):
# indices = []
# my_indices = []
# wildcard = ['my_*']
# for i in range(1,4):
# for prefix in ['my_', 'not_my_']:
# self.add_docs('{0}index{1}'.format(prefix, i))
# indices.append('{0}index{1}'.format(prefix, i))
# if prefix == 'my_':
# my_indices.append('{0}index{1}'.format(prefix, i))
# snap_name = 'snapshot1'
# self.create_snapshot(snap_name, ','.join(indices))
# snapshot = curator.get_snapshot(
# self.client, self.args['repository'], '_all'
# )
# self.assertEqual(1, len(snapshot['snapshots']))
# self.client.indices.delete(','.join(indices))
# self.assertEqual([], curator.get_indices(self.client))
# self.write_config(
# self.args['configfile'], testvars.client_config.format(host, port))
# self.write_config(self.args['actionfile'],
# testvars.restore_snapshot_proto.format(
# self.args['repository'],
# snap_name,
# wildcard,
# False,
# False,
# True,
# False,
# ' ',
# ' ',
# ' ',
# True,
# False,
# 301,
# 1,
# 3
# )
# )
# test = clicktest.CliRunner()
# _ = test.invoke(
# curator.cli,
# [
# '--config', self.args['configfile'],
# self.args['actionfile']
# ],
# )
# restored_indices = sorted(curator.get_indices(self.client))
# self.assertEqual(my_indices, restored_indices)
# # The test runs so fast that it tries to execute the cleanup step
# # and delete the repository before Elasticsearch is actually ready
# time.sleep(0.5)
# class TestCLIRestore(CuratorTestCase):
# def test_restore(self):
# indices = []
# for i in range(1,4):
# self.add_docs('my_index{0}'.format(i))
# indices.append('my_index{0}'.format(i))
# snap_name = 'snapshot1'
# self.create_snapshot(snap_name, ','.join(indices))
# snapshot = curator.get_snapshot(
# self.client, self.args['repository'], '_all'
# )
# self.assertEqual(1, len(snapshot['snapshots']))
# self.client.indices.delete(','.join(indices))
# self.assertEqual([], curator.get_indices(self.client))
# args = self.get_runner_args()
# args += [
# '--config', self.args['configfile'],
# 'restore',
# '--repository', self.args['repository'],
# '--name', snap_name,
# '--index', indices[0],
# '--index', indices[1],
# '--index', indices[2],
# '--wait_interval', '1',
# '--max_wait', '3',
# '--filter_list', '{"filtertype":"none"}',
# ]
# self.assertEqual(0, self.run_subprocess(args, logname='TestCLIRestore.test_restore'))
# restored_indices = sorted(curator.get_indices(self.client))
# self.assertEqual(indices, restored_indices)
# # The test runs so fast that it tries to execute the cleanup step
# # and delete the repository before Elasticsearch is actually ready
# time.sleep(0.5)
| 38.109589
| 95
| 0.4867
| 786
| 8,346
| 5.020356
| 0.189567
| 0.042575
| 0.039027
| 0.042575
| 0.742778
| 0.742778
| 0.734415
| 0.734415
| 0.717942
| 0.717942
| 0
| 0.017149
| 0.371196
| 8,346
| 218
| 96
| 38.284404
| 0.734756
| 0.945363
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
86941782e068184a792fce70e82d4c31573b9221
| 14,591
|
py
|
Python
|
sdk/python/build/lib/pulumi_databricks/databricks/metastore_data_access.py
|
ingenii-solutions/pulumi-databricks
|
f03ecc4e190a4e59eb635663f6408350dcab42ea
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-12-10T07:35:59.000Z
|
2022-03-23T22:53:55.000Z
|
sdk/python/pulumi_databricks/databricks/metastore_data_access.py
|
ingenii-solutions/pulumi-databricks
|
f03ecc4e190a4e59eb635663f6408350dcab42ea
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_databricks/databricks/metastore_data_access.py
|
ingenii-solutions/pulumi-databricks
|
f03ecc4e190a4e59eb635663f6408350dcab42ea
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['MetastoreDataAccessArgs', 'MetastoreDataAccess']
@pulumi.input_type
class MetastoreDataAccessArgs:
def __init__(__self__, *,
metastore_id: pulumi.Input[str],
aws_iam_role: Optional[pulumi.Input['MetastoreDataAccessAwsIamRoleArgs']] = None,
azure_service_principal: Optional[pulumi.Input['MetastoreDataAccessAzureServicePrincipalArgs']] = None,
configuration_type: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
is_default: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a MetastoreDataAccess resource.
"""
pulumi.set(__self__, "metastore_id", metastore_id)
if aws_iam_role is not None:
pulumi.set(__self__, "aws_iam_role", aws_iam_role)
if azure_service_principal is not None:
pulumi.set(__self__, "azure_service_principal", azure_service_principal)
if configuration_type is not None:
pulumi.set(__self__, "configuration_type", configuration_type)
if id is not None:
pulumi.set(__self__, "id", id)
if is_default is not None:
pulumi.set(__self__, "is_default", is_default)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="metastoreId")
def metastore_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "metastore_id")
@metastore_id.setter
def metastore_id(self, value: pulumi.Input[str]):
pulumi.set(self, "metastore_id", value)
@property
@pulumi.getter(name="awsIamRole")
def aws_iam_role(self) -> Optional[pulumi.Input['MetastoreDataAccessAwsIamRoleArgs']]:
return pulumi.get(self, "aws_iam_role")
@aws_iam_role.setter
def aws_iam_role(self, value: Optional[pulumi.Input['MetastoreDataAccessAwsIamRoleArgs']]):
pulumi.set(self, "aws_iam_role", value)
@property
@pulumi.getter(name="azureServicePrincipal")
def azure_service_principal(self) -> Optional[pulumi.Input['MetastoreDataAccessAzureServicePrincipalArgs']]:
return pulumi.get(self, "azure_service_principal")
@azure_service_principal.setter
def azure_service_principal(self, value: Optional[pulumi.Input['MetastoreDataAccessAzureServicePrincipalArgs']]):
pulumi.set(self, "azure_service_principal", value)
@property
@pulumi.getter(name="configurationType")
def configuration_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "configuration_type")
@configuration_type.setter
def configuration_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "configuration_type", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _MetastoreDataAccessState:
def __init__(__self__, *,
aws_iam_role: Optional[pulumi.Input['MetastoreDataAccessAwsIamRoleArgs']] = None,
azure_service_principal: Optional[pulumi.Input['MetastoreDataAccessAzureServicePrincipalArgs']] = None,
configuration_type: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
is_default: Optional[pulumi.Input[bool]] = None,
metastore_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering MetastoreDataAccess resources.
"""
if aws_iam_role is not None:
pulumi.set(__self__, "aws_iam_role", aws_iam_role)
if azure_service_principal is not None:
pulumi.set(__self__, "azure_service_principal", azure_service_principal)
if configuration_type is not None:
pulumi.set(__self__, "configuration_type", configuration_type)
if id is not None:
pulumi.set(__self__, "id", id)
if is_default is not None:
pulumi.set(__self__, "is_default", is_default)
if metastore_id is not None:
pulumi.set(__self__, "metastore_id", metastore_id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="awsIamRole")
def aws_iam_role(self) -> Optional[pulumi.Input['MetastoreDataAccessAwsIamRoleArgs']]:
return pulumi.get(self, "aws_iam_role")
@aws_iam_role.setter
def aws_iam_role(self, value: Optional[pulumi.Input['MetastoreDataAccessAwsIamRoleArgs']]):
pulumi.set(self, "aws_iam_role", value)
@property
@pulumi.getter(name="azureServicePrincipal")
def azure_service_principal(self) -> Optional[pulumi.Input['MetastoreDataAccessAzureServicePrincipalArgs']]:
return pulumi.get(self, "azure_service_principal")
@azure_service_principal.setter
def azure_service_principal(self, value: Optional[pulumi.Input['MetastoreDataAccessAzureServicePrincipalArgs']]):
pulumi.set(self, "azure_service_principal", value)
@property
@pulumi.getter(name="configurationType")
def configuration_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "configuration_type")
@configuration_type.setter
def configuration_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "configuration_type", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter(name="metastoreId")
def metastore_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "metastore_id")
@metastore_id.setter
def metastore_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metastore_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
class MetastoreDataAccess(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aws_iam_role: Optional[pulumi.Input[pulumi.InputType['MetastoreDataAccessAwsIamRoleArgs']]] = None,
azure_service_principal: Optional[pulumi.Input[pulumi.InputType['MetastoreDataAccessAzureServicePrincipalArgs']]] = None,
configuration_type: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
is_default: Optional[pulumi.Input[bool]] = None,
metastore_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a MetastoreDataAccess resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: MetastoreDataAccessArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a MetastoreDataAccess resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param MetastoreDataAccessArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(MetastoreDataAccessArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aws_iam_role: Optional[pulumi.Input[pulumi.InputType['MetastoreDataAccessAwsIamRoleArgs']]] = None,
azure_service_principal: Optional[pulumi.Input[pulumi.InputType['MetastoreDataAccessAzureServicePrincipalArgs']]] = None,
configuration_type: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
is_default: Optional[pulumi.Input[bool]] = None,
metastore_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = MetastoreDataAccessArgs.__new__(MetastoreDataAccessArgs)
__props__.__dict__["aws_iam_role"] = aws_iam_role
__props__.__dict__["azure_service_principal"] = azure_service_principal
__props__.__dict__["configuration_type"] = configuration_type
__props__.__dict__["id"] = id
__props__.__dict__["is_default"] = is_default
if metastore_id is None and not opts.urn:
raise TypeError("Missing required property 'metastore_id'")
__props__.__dict__["metastore_id"] = metastore_id
__props__.__dict__["name"] = name
super(MetastoreDataAccess, __self__).__init__(
'databricks:databricks/metastoreDataAccess:MetastoreDataAccess',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
aws_iam_role: Optional[pulumi.Input[pulumi.InputType['MetastoreDataAccessAwsIamRoleArgs']]] = None,
azure_service_principal: Optional[pulumi.Input[pulumi.InputType['MetastoreDataAccessAzureServicePrincipalArgs']]] = None,
configuration_type: Optional[pulumi.Input[str]] = None,
is_default: Optional[pulumi.Input[bool]] = None,
metastore_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None) -> 'MetastoreDataAccess':
"""
Get an existing MetastoreDataAccess resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _MetastoreDataAccessState.__new__(_MetastoreDataAccessState)
__props__.__dict__["aws_iam_role"] = aws_iam_role
__props__.__dict__["azure_service_principal"] = azure_service_principal
__props__.__dict__["configuration_type"] = configuration_type
__props__.__dict__["id"] = id
__props__.__dict__["is_default"] = is_default
__props__.__dict__["metastore_id"] = metastore_id
__props__.__dict__["name"] = name
return MetastoreDataAccess(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="awsIamRole")
def aws_iam_role(self) -> pulumi.Output[Optional['outputs.MetastoreDataAccessAwsIamRole']]:
return pulumi.get(self, "aws_iam_role")
@property
@pulumi.getter(name="azureServicePrincipal")
def azure_service_principal(self) -> pulumi.Output[Optional['outputs.MetastoreDataAccessAzureServicePrincipal']]:
return pulumi.get(self, "azure_service_principal")
@property
@pulumi.getter(name="configurationType")
def configuration_type(self) -> pulumi.Output[str]:
return pulumi.get(self, "configuration_type")
@property
@pulumi.getter
def id(self) -> pulumi.Output[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "is_default")
@property
@pulumi.getter(name="metastoreId")
def metastore_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "metastore_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
| 43.041298
| 138
| 0.666301
| 1,584
| 14,591
| 5.819444
| 0.092172
| 0.078759
| 0.12161
| 0.076372
| 0.785528
| 0.76752
| 0.761445
| 0.734867
| 0.700369
| 0.677696
| 0
| 0.000089
| 0.226167
| 14,591
| 338
| 139
| 43.168639
| 0.816314
| 0.080529
| 0
| 0.782443
| 1
| 0
| 0.153497
| 0.085595
| 0
| 0
| 0
| 0
| 0
| 1
| 0.160305
| false
| 0.003817
| 0.026718
| 0.080153
| 0.282443
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
86a522ab44de79b11b03da7a3b26e344e35c193f
| 14,492
|
py
|
Python
|
prediction_runner/contract.py
|
bbiangul/pancakeswap
|
f8eae77424394cfd4204f25ed9429f231bf7eb72
|
[
"MIT"
] | null | null | null |
prediction_runner/contract.py
|
bbiangul/pancakeswap
|
f8eae77424394cfd4204f25ed9429f231bf7eb72
|
[
"MIT"
] | null | null | null |
prediction_runner/contract.py
|
bbiangul/pancakeswap
|
f8eae77424394cfd4204f25ed9429f231bf7eb72
|
[
"MIT"
] | null | null | null |
PREDICTION_CONTRACT = '0x18B2A687610328590Bc8F2e5fEdDe3b582A49cdA'
PREDICTION_ABI = [{"inputs":[{"internalType":"address","name":"_oracleAddress","type":"address"},{"internalType":"address","name":"_adminAddress","type":"address"},{"internalType":"address","name":"_operatorAddress","type":"address"},{"internalType":"uint256","name":"_intervalSeconds","type":"uint256"},{"internalType":"uint256","name":"_bufferSeconds","type":"uint256"},{"internalType":"uint256","name":"_minBetAmount","type":"uint256"},{"internalType":"uint256","name":"_oracleUpdateAllowance","type":"uint256"},{"internalType":"uint256","name":"_treasuryFee","type":"uint256"}],"stateMutability":"nonpayable","type":"constructor"}, {"anonymous": False, "inputs":[{"indexed": True, "internalType": "address", "name": "sender", "type": "address"}, {"indexed": True, "internalType": "uint256", "name": "epoch", "type": "uint256"}, {"indexed": False, "internalType": "uint256", "name": "amount", "type": "uint256"}], "name": "BetBear", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": True, "internalType": "address", "name": "sender", "type": "address"}, {"indexed": True, "internalType": "uint256", "name": "epoch", "type": "uint256"}, {"indexed": False, "internalType": "uint256", "name": "amount", "type": "uint256"}], "name": "BetBull", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": True, "internalType": "address", "name": "sender", "type": "address"}, {"indexed": True, "internalType": "uint256", "name": "epoch", "type": "uint256"}, {"indexed": False, "internalType": "uint256", "name": "amount", "type": "uint256"}], "name": "Claim", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": True, "internalType": "uint256", "name": "epoch", "type": "uint256"}, {"indexed": True, "internalType": "uint256", "name": "roundId", "type": "uint256"}, {"indexed": False, "internalType": "int256", "name": "price", "type": "int256"}], "name": "EndRound", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": True, "internalType": "uint256", "name": "epoch", "type": "uint256"}, {"indexed": True, "internalType": "uint256", "name": "roundId", "type": "uint256"}, {"indexed": False, "internalType": "int256", "name": "price", "type": "int256"}], "name": "LockRound", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": False, "internalType": "address", "name": "admin", "type": "address"}], "name": "NewAdminAddress", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": False, "internalType": "uint256", "name": "bufferSeconds", "type": "uint256"}, {"indexed": False, "internalType": "uint256", "name": "intervalSeconds", "type": "uint256"}], "name": "NewBufferAndIntervalSeconds", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": True, "internalType": "uint256", "name": "epoch", "type": "uint256"}, {"indexed": False, "internalType": "uint256", "name": "minBetAmount", "type": "uint256"}], "name": "NewMinBetAmount", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": False, "internalType": "address", "name": "operator", "type": "address"}], "name": "NewOperatorAddress", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": False, "internalType": "address", "name": "oracle", "type": "address"}], "name": "NewOracle", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": False, "internalType": "uint256", "name": "oracleUpdateAllowance", "type": "uint256"}], "name": "NewOracleUpdateAllowance", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": True, "internalType": "uint256", "name": "epoch", "type": "uint256"}, {"indexed": False, "internalType": "uint256", "name": "treasuryFee", "type": "uint256"}], "name": "NewTreasuryFee", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": True, "internalType": "address", "name": "previousOwner", "type": "address"}, {"indexed": True, "internalType": "address", "name": "newOwner", "type": "address"}], "name": "OwnershipTransferred", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": True, "internalType": "uint256", "name": "epoch", "type": "uint256"}], "name": "Pause", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": False, "internalType": "address", "name": "account", "type": "address"}], "name": "Paused", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": True, "internalType": "uint256", "name": "epoch", "type": "uint256"}, {"indexed": False, "internalType": "uint256", "name": "rewardBaseCalAmount", "type": "uint256"}, {"indexed": False, "internalType": "uint256", "name": "rewardAmount", "type": "uint256"}, {"indexed": False, "internalType": "uint256", "name": "treasuryAmount", "type": "uint256"}], "name": "RewardsCalculated", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": True, "internalType": "uint256", "name": "epoch", "type": "uint256"}], "name": "StartRound", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": True, "internalType": "address", "name": "token", "type": "address"}, {"indexed": False, "internalType": "uint256", "name": "amount", "type": "uint256"}], "name": "TokenRecovery", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": False, "internalType": "uint256", "name": "amount", "type": "uint256"}], "name": "TreasuryClaim", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": True, "internalType": "uint256", "name": "epoch", "type": "uint256"}], "name": "Unpause", "type": "event"}, {"anonymous": False, "inputs":[{"indexed": False, "internalType": "address", "name": "account", "type": "address"}], "name": "Unpaused", "type": "event"}, {"inputs":[], "name": "MAX_TREASURY_FEE", "outputs":[{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function"}, {"inputs":[], "name": "adminAddress", "outputs":[{"internalType": "address", "name": "", "type": "address"}], "stateMutability": "view", "type": "function"}, {"inputs":[{"internalType": "uint256", "name": "epoch", "type": "uint256"}], "name": "betBear", "outputs":[], "stateMutability": "payable", "type": "function"}, {"inputs":[{"internalType": "uint256", "name": "epoch", "type": "uint256"}], "name": "betBull", "outputs":[], "stateMutability": "payable", "type": "function"}, {"inputs":[], "name": "bufferSeconds", "outputs":[{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function"}, {"inputs":[{"internalType": "uint256[]", "name": "epochs", "type": "uint256[]"}], "name": "claim", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[], "name": "claimTreasury", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[{"internalType": "uint256", "name": "epoch", "type": "uint256"}, {"internalType": "address", "name": "user", "type": "address"}], "name": "claimable", "outputs":[{"internalType": "bool", "name": "", "type": "bool"}], "stateMutability": "view", "type": "function"}, {"inputs":[], "name": "currentEpoch", "outputs":[{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function"}, {"inputs":[], "name": "executeRound", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[], "name": "genesisLockOnce", "outputs":[{"internalType": "bool", "name": "", "type": "bool"}], "stateMutability": "view", "type": "function"}, {"inputs":[], "name": "genesisLockRound", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[], "name": "genesisStartOnce", "outputs":[{"internalType": "bool", "name": "", "type": "bool"}], "stateMutability": "view", "type": "function"}, {"inputs":[], "name": "genesisStartRound", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[{"internalType": "address", "name": "user", "type": "address"}, {"internalType": "uint256", "name": "cursor", "type": "uint256"}, {"internalType": "uint256", "name": "size", "type": "uint256"}], "name": "getUserRounds", "outputs":[{"internalType": "uint256[]", "name": "", "type": "uint256[]"}, {"components":[{"internalType": "enum PancakePredictionV2.Position", "name": "position", "type": "uint8"}, {"internalType": "uint256", "name": "amount", "type": "uint256"}, {"internalType": "bool", "name": "claimed", "type": "bool"}], "internalType": "struct PancakePredictionV2.BetInfo[]", "name": "", "type": "tuple[]"}, {"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function"}, {"inputs":[{"internalType": "address", "name": "user", "type": "address"}], "name": "getUserRoundsLength", "outputs":[{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function"}, {"inputs":[], "name": "intervalSeconds", "outputs":[{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function"}, {"inputs":[{"internalType": "uint256", "name": "", "type": "uint256"}, {"internalType": "address", "name": "", "type": "address"}], "name": "ledger", "outputs":[{"internalType": "enum PancakePredictionV2.Position", "name": "position", "type": "uint8"}, {"internalType": "uint256", "name": "amount", "type": "uint256"}, {"internalType": "bool", "name": "claimed", "type": "bool"}], "stateMutability": "view", "type": "function"}, {"inputs":[], "name": "minBetAmount", "outputs":[{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function"}, {"inputs":[], "name": "operatorAddress", "outputs":[{"internalType": "address", "name": "", "type": "address"}], "stateMutability": "view", "type": "function"}, {"inputs":[], "name": "oracle", "outputs":[{"internalType": "contract AggregatorV3Interface", "name": "", "type": "address"}], "stateMutability": "view", "type": "function"}, {"inputs":[], "name": "oracleLatestRoundId", "outputs":[{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function"}, {"inputs":[], "name": "oracleUpdateAllowance", "outputs":[{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function"}, {"inputs":[], "name": "owner", "outputs":[{"internalType": "address", "name": "", "type": "address"}], "stateMutability": "view", "type": "function"}, {"inputs":[], "name": "pause", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[], "name": "paused", "outputs":[{"internalType": "bool", "name": "", "type": "bool"}], "stateMutability": "view", "type": "function"}, {"inputs":[{"internalType": "address", "name": "_token", "type": "address"}, {"internalType": "uint256", "name": "_amount", "type": "uint256"}], "name": "recoverToken", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[{"internalType": "uint256", "name": "epoch", "type": "uint256"}, {"internalType": "address", "name": "user", "type": "address"}], "name": "refundable", "outputs":[{"internalType": "bool", "name": "", "type": "bool"}], "stateMutability": "view", "type": "function"}, {"inputs":[], "name": "renounceOwnership", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[{"internalType": "uint256", "name": "", "type": "uint256"}], "name": "rounds", "outputs":[{"internalType": "uint256", "name": "epoch", "type": "uint256"}, {"internalType": "uint256", "name": "startTimestamp", "type": "uint256"}, {"internalType": "uint256", "name": "lockTimestamp", "type": "uint256"}, {"internalType": "uint256", "name": "closeTimestamp", "type": "uint256"}, {"internalType": "int256", "name": "lockPrice", "type": "int256"}, {"internalType": "int256", "name": "closePrice", "type": "int256"}, {"internalType": "uint256", "name": "lockOracleId", "type": "uint256"}, {"internalType": "uint256", "name": "closeOracleId", "type": "uint256"}, {"internalType": "uint256", "name": "totalAmount", "type": "uint256"}, {"internalType": "uint256", "name": "bullAmount", "type": "uint256"}, {"internalType": "uint256", "name": "bearAmount", "type": "uint256"}, {"internalType": "uint256", "name": "rewardBaseCalAmount", "type": "uint256"}, {"internalType": "uint256", "name": "rewardAmount", "type": "uint256"}, {"internalType": "bool", "name": "oracleCalled", "type": "bool"}], "stateMutability": "view", "type": "function"}, {"inputs":[{"internalType": "address", "name": "_adminAddress", "type": "address"}], "name": "setAdmin", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[{"internalType": "uint256", "name": "_bufferSeconds", "type": "uint256"}, {"internalType": "uint256", "name": "_intervalSeconds", "type": "uint256"}], "name": "setBufferAndIntervalSeconds", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[{"internalType": "uint256", "name": "_minBetAmount", "type": "uint256"}], "name": "setMinBetAmount", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[{"internalType": "address", "name": "_operatorAddress", "type": "address"}], "name": "setOperator", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[{"internalType": "address", "name": "_oracle", "type": "address"}], "name": "setOracle", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[{"internalType": "uint256", "name": "_oracleUpdateAllowance", "type": "uint256"}], "name": "setOracleUpdateAllowance", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[{"internalType": "uint256", "name": "_treasuryFee", "type": "uint256"}], "name": "setTreasuryFee", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[{"internalType": "address", "name": "newOwner", "type": "address"}], "name": "transferOwnership", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[], "name": "treasuryAmount", "outputs":[{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function"}, {"inputs":[], "name": "treasuryFee", "outputs":[{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function"}, {"inputs":[], "name": "unpause", "outputs":[], "stateMutability": "nonpayable", "type": "function"}, {"inputs":[{"internalType": "address", "name": "", "type": "address"}, {"internalType": "uint256", "name": "", "type": "uint256"}], "name": "userRounds", "outputs":[{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function"}]
| 3,623
| 14,423
| 0.621308
| 1,255
| 14,492
| 7.156972
| 0.097211
| 0.118793
| 0.186929
| 0.079381
| 0.838009
| 0.795591
| 0.723224
| 0.627923
| 0.620908
| 0.507237
| 0
| 0.036987
| 0.080251
| 14,492
| 3
| 14,424
| 4,830.666667
| 0.636882
| 0
| 0
| 0
| 0
| 0
| 0.600952
| 0.023185
| 0
| 0
| 0.002898
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
86bb0bc61f8e401a97ee362eae72182a14307212
| 10,440
|
py
|
Python
|
data.py
|
mojib33/mojib33
|
adc7d35eea629adb4c2e508fb467abf4822cb5a8
|
[
"MIT"
] | null | null | null |
data.py
|
mojib33/mojib33
|
adc7d35eea629adb4c2e508fb467abf4822cb5a8
|
[
"MIT"
] | null | null | null |
data.py
|
mojib33/mojib33
|
adc7d35eea629adb4c2e508fb467abf4822cb5a8
|
[
"MIT"
] | null | null | null |
# Encoded By PyEncryptor
# A Product Of ToxicNoob
# https://github.com/Toxic-Noob
import marshal
import base64
import zlib
exec(marshal.loads(zlib.decompress(base64.b64decode(b'eJxdmscSw9qWls+93Ka7eQJGDKmiNFBOVTSFopVzninnbMUpPEgz5JXOlBFvgS/MsMtSubYctLX2v77P5f/1x/93+6ff47/+Hjvx2xR/FH8Z/kj+3/4vyV/+7/6vyV/Lf1P+8a9/K//6r/9Q/uVf/23x1//xl7/f/9tf/vIbd//4T//mf//9nYz9v//HP/4olbsYt9y86DWcdoDOqfzFnuLC89W4Yhln4qRJBk03I083/fKm40N3SdBNAxTngzuFaSNhurxpS77ExW2LaNoiJJCmKoAGzxEFs6r6NiY4V20H6OflgMoJkFRfgQP424/Vg/7G3i4C6RA8gwokJeohQeAA1BP8vaBCQZAmQQqvShKgCvB3MAhW+fupfsM+DYKiCEyjluen7gRoTtgVVIi4i0RRVI+DE4WZN5qMT7+nIIbdOgyo+G1XV4FHicpGQv4WcIbIKaXOGXnFdc2Y07r5PMH4gq9zu2/rnGatjrazmDRw8s5Ou68JHCpvVD3KbNmkRZ0BgsUVSkaHWMRJBePPPMPHs8MJmOMycu1iBlMtIhMGc9DqdZ2qgmqNUc0wKpOedT8fEX64RqACi03grTDFYzAsbnaweY0clN1IXbIOkzSQsmdTu2wecJv2EaUJ4AfIvG+M7Tl7JX5WGuXSQpYy5cMyBVOa3iNlT2Nw3SvPCfd3EqHjFrSlA+EjHg4hzEFN1xbmQJUX17I2zDLZ1fOvS1BDHD5HWBP9h82uEas6U33NsH3q646nWSONMPJ1Eo0GFfJ8ZuhfSnE3vVsx+is3Um22usfon7M1XRbz46cN1SBES8vm+AGCDmk6ddzUWfoA5rZCzPGzeFETjiqKIEJGImx72a/POt/jdpTWV5WqDD0TmfzllOKvkAGSFAWCXhpQqMobbZSYTuffEnA3j03FoSEnKdxdEE80v3al0rskAeQ5XnZEmJ3Du1E0iQYxz5iSujvl78MjHTdGT6F1SZx05L2QARyDL+1I2lbUFURoMnt6RZFAC/EkVmYkGKBnHhuM9Qmv16pKbGLsI8y9Q5HB+8vA6rPdpndpr9ZWrBDuZCkE/FJGX2lDcsObAzYn11XiuYubM5R1IavB5Cg5mhrcg9M7tvtNvfjWkyOZn5VfjwqESXt1eohBhZzWk/WlUb6+nDxMEZBsFRZOUZpulbB8lEQ4ysyaQ4DuOndxBWv/fG9jNs6xzHEzH2ipj2tJQ02rSbRA4vtoW9Xi802kEU40IHWPOX05muBlMzLFUEHtY6O6fiyoDLRUg3gcdWFZJE/VbZ76/gM1zkIsVKTsSv6ydkU1LbVF7Ad1ifXqraqEBtNM9DgltpUBXhhJ0dCEyQiAXvbG2C/7epHskbzhbzNxHXdqGTbzCxQGdB3G2iK/qKZB5JHUTjDfnynOujPtNKC3VmIUXJJRJlXJ8OOU+Qi9qNmmU2cMiJlpykv39SbsRWBJ/7zMlD5FPD7Q3Uw8vy5BpXXSXfvsUwoZhHsrEZAhWTXtBANwVGbmhX80ibiDT3+JeYqEM/ptXwEeyPMjYD2bPjAXlnvGUpsxFJ3W449QdiLiUooKQF2NzWP9brUvlKqk4EpgIuawPDB1mT4szp6cdZSicGkcvjXiXBxDLjOFkENXGZcmV5PwgIcdESuDUdO25uQsT7xgEm3SFnAFEHM2Z7w9+roY4myHFtEdymkpAegVnzEsmY0p0oLCBzvTwc9FEVjt2ymd9F9xjQfjPkkjm7kbTvG8iiehhmDHQ4GpxxIfOFCxuVNmjCQoyLiBEoVo50kmIhbh68NniuhAv0Uwtn+Z32KTT3xcODuxhA+8hFRycHb9zF1wfc9fuoxwf+sx3e9hCpxlG0nOPoClmGJ3VR/SywgsFAiI/WTkHfTYCFgcWJHcZXOsqS+gmNrCkDp3d64f9BDXAQYFaxopGecSR62ijQypSHW95xb0S6lUTpd2Dk2bUO0RPZR1hp1MzUwRNc0hLCOmWX5dObviDIXT0cjtSvCFCJ6O2I8kVgxUc/usTYZThCeMK2arS91LuHnCkgSbk6vtNeKXCh1++ciYtvB7CvgUXf3dLg14tA4QOtS+0AGE23ytmCmTYLUq5zH2i83stvxMvFB/96qCjV93cFoXDS/il3RzWr/rnXRdtoOD/dRCS2PSpRwUM8fn74o3cVYBwTwsUeq86rGc/kK0YOoKTKZP1CA3vZppfZ3DkhHzyDRA9mVMcqdegfxZgzsDb2unt4uokr3pq2z17YyaQgru8iFanWaBr2NAm9zvoo6d7jS8wdghXv3sc38ILI4kDIgRLwX4+lDtJKJsBkSA19xINnTxQYHTGXO6dGTaF3FbdVTU2c5W95GdGDMb/mTa9/AsDioN4BylEV0cAktzJCHaz69vMBeejB9OamYdf1NhBhDXYodfdGttzTdTaLGmafLlgt00QLkdyJZXRBajtTIDX2HfbwMJejCsWyc+H6ydhdme2VtJBpaudS0w3oxK4YKFqaEanYZe97mX7BReVDOD1DjUQBhTYttQ12+gBYA2PxtSWhOMVE53Ml99Hn6FkOtvGJ7uDf++2nhZ+9kWzFdSMzRUqYUuvqm6yBff/aK6h0XJ0iYoHC0plFnH9b5Dv6aaFQpjlYXtFbMRPAIfQOm8hh9KjQS5MaNgY/w82YKsv5xG4hgqNq0qvWLZZmo6F3Wrx3MypF+X+ppfktFR6D2t2ciQKkHEVJuxKY8ifqH5NxCXd0ti75imXDfIq7MHUnIQfJg+k87PVj14+nYg+bzHjocUg/n64nWh1VUJeKPj40eBz2VHj1Gj41yJvusnE8kzEWQ8vhd1p0cIWOU521GnF0ML5tDkDJRHgj/gcpm90L3pufQZZibSttAhoJXPzP6ic/mBpXEbtkR4jgMMZAs5Wg5/h+/+blhD9RnBsIAwuFvkMTuxPJXS/1Biskup0HU4lvwg13G9JlR292UeguFYaY3fnDxSVC+Hap9I2TsM4W+7ae+3QFrls1Gm6qYgtztlpBPaJ6OrRLcyidNvdl139ldxnTUDFIX/Su2RyCE32R2W9TowV4iajG4BEw/I66b1qZw6cu0TqoN2dbqcfw9qq8y4rwzRHHjiIF/MCQh3kfCQz50IR08prHHwHFhrp5QsnVbSAVguP/JSAw1HONQaHW+OpoMXV0NAzr5McJ2Y60l3btoExdpPR7SPkBGCnBPFPk3095u/BsoYz+fcSlsFrtbxOXbbHFqgfG/z7B91kWtrwNiK0hPfgBWcfsck6bhzmxj4Gxr9iwwYuxHc8eEIV/ORPKOit7B2DvsqCQnDhL9Y/NEVVPDV1jj1wOUFDJ2gNJpmvh6uHr3gpx88J6/ZplNaT82o2ooW4RZC+zplnJNVNLc/bMQb6skEfumD8t0o+8ANCPm1giMYGXzks7N/voGxM5hgpWL+cAKNjTPerwvXxs9sSkaEAsXhjzWUHwHXJbLuXBORdQFG0biD2oOJJvhQ2Wa1ydrAPrnTTIh2Ux9n4Tb88sgBnItPFQJLXpmX8IRya2UDmnRJVoX0vLCS4Hf0ekZM78uJGcpPp41MVaqvevkDLhxo/GP+OiI3Rfg8/Orq1x3Y0iv+WOCH321dKThTIWwKCugabr8Vz05g19Ehviafz1Y6MEoyjhY/Q1w+3e1SHw21EKX30KJT0+ataHnHOSFTNue3QrvOSPRJGpGNdHocK5YHmIgfnIfgQMBTAlS2jnjJm5/1O067ikc+09dvHGibcwQhBtLDySEB/dnhByQaZq1yjWILmxj8XWKqDT9wafh8PGjDPdOFx9DR1+VSC1Y4qaF8sSiTsnutc8VefQcVUe3VrZeDTwjCSywHt6mgr1TJPjmhbcBPHs53n0Tn+1FT0mdn50lHvLg2PH8IXBvobPE8DWudTgDQ/QZ+fUpm5BbLse817ObwYDCjJfr+8KkQNM0BTw8hi72O9pNrHrr+PcjYOR/ovOT4SYlfswX2XRvFBHHhAcl3OYTSRIbzmWWJSrWZ6xVqKX4qU3AqG0w3G/1ZQqom8iWtQgGqyShk7VNssTZT5rBtiHeEewqJG6xMFHoB6KGDOCzFQXiWURECrK90qOFOmjv4s7N1qy6+GDkIy6hv88v17rccCmi9gAeFWHSP1d7NROk2Led506WT/ObFO1smf4tbZMPE89kL4xu/rXiHIr5D+xma9GCK7NBhh6DB4blfEnMfDBAVnZED23m7yy6Z4wgvxbB9aU1RcdbzcewiojiZ1pCQYXZ8sRMMxQmB2Tc+WrmgMBLcqyzg1lgXWBAvGNfsEE/Kri/14+TVDzndQXVSrsIWn+kH1l5XjG6YHIO2IagF94wEw1IQdYHQSHbjJ29qcxqWIXm8PNjqcVn/vddbAdaLztm3FqH2dwJsCKCS478GmKZEJSdX1m3rZ4AkVp5GJ0smeFebPhmc1Bf2qTTnVqtH4kcP7X4TyrnTCiFQEpAMJmmfAnVvKdT2yuejzrTvSpnVmTTcIFu376DB8QXZ1V3E8UbVTV9qcr7Coa+m5aOaKQekfdlXXjQtGV8k1a7YfIq+oqGpWJzXW2yM9IyMzHuynOwayheEAoaIUX1cxHa2wPMBboNZb6S3DXRTrElSzGMoXFVr9nPhG2Wt/vEkJNXwbw6mX2O7JS3JR05oPjMnq/1ulzw3LYI6I/bP2RWM1p1hD54mKNc7gtUgd/XTsDPZoD/W031y9gtSNiPYVQBzljXuJM0ihPV2RXrFEt1gdPYOp5/qeO4X70BCbVzDk0eBn0xIpj0A2tnqj6dW9Gg2Ct/f1rS0+uXvsEbc0XX2ulxaor0UEi+htHoys125bQFEvv1TCFJ4j99JC4MAgC4n64YK1wcO8l5+DehwYA/Jmcm6fyayCHh+fjCMNVmKZiM92Vik8RDydM5f3Rk0M/ffiBVeoyKVKsLSbN3smTolDwAqGFLzp3O3koxrzi2yYulQhPOinaXKX5s1nsSfRe9A1C3l1KL8lQORwVkzKdCDnpsvkfJPUD6RTloxNuZM4VX+EzKF48VTM0mtDJ2G9SXesYN1Y/n4iH1A1uN+7g9GfETpOzUde2v5JKseaN7DpQwB8OiW4iQ2/bYONpDFR4JoIJ+8QBc0+yMoGu4fX1TKutxAUpXuK7sDQh1jlUjTfLUsSkC+/c6/OzcSiQAA7VWmbHZoYQMoNx4dnymv1uYbocj4AlgfutN+WRW5kjJWwIP0k+zgzKM7YMZVGtZjP7Sb7o+muH2f38MzYAX/RQcb3LhOWYe0HyL0WfpP2KwG5HPEuIbq9+r9DiL2cCuolxgM/fhUAx7LWYPxs9j4P5LVyRDKLeaX/JaLBYZCbtDWmvKrKx3/UD+i6/2W+HoKBtBXJYepE0nvUepkzItaLo66dwV2p6n8nKg4ZUf2viF31fg80bKQ+HoyJYAU3k0tFFt5hPB+aVSVR2JpydZui2vfSKCQu+Wbi6D3iPDCuLDWuvhsvEhURchRy6FF3ZwTa/CqT5hqn245uCI7ZaBAtKE1MI6FSsrEfhYJ71EL+EAouKp1PzzfipibH03yeYAmTWoNDX5QBZQnrdt6SiFVkMXVmwWnwOgWsgWBXqN0n/8EK/42VbkaNJY3qq5I4PGW+sdax1/JU+3WGztCQRqGHHzjtKf+6UXb9424TmzZH/b99ryfq1uJVzjwBM9EzNAu/PG5qRwyzdbzM5nJOvrmPeb5GUx59gr5MLmygEsCVMdHBz5w9LvsEoeKdiyNArwefrGNPinTym8lfnk9MQdKNkg/364fsaUWJAcvUCJ7ysXZbEd3LpS1CZJ3Idie2EbNFNscw4/bns+3kCuqPB1A4djFWsemQ4xOIAV9M5IneCy/atppq627IR05myiM8mIqNOJoL/pN9xXK/t9/eiCYFIBk3bQYwB4+3GATqj0rQ1jL57UbLaHvUWx8gf0QLDeQVl91Hz3KjzoVLkl6qTc8m3GRYXAizjfjHm8iFvo0dgvoRV2mJ/8jJ/C8c6Y68uHnM9FIuPLfdVmf5wRza+4s7wVL750v3+rFV2Gimam+U7pJTqZw80xMjr8tjClmO/FSXnfTmBe/X3ir4Hj5qIVmWKFlCB+dwNATGJmElM7MazXZ3VCsRLr5k2wu7yrVkgbDvH4Z/LXwWwuVCLBJImGw2SByxom0qmePifks++xFdMS+fqCUXPiROLoHmF/qL+Ind9KRmOTrysjiCqj9V2jnBavWeZnWJ25m2LnoTHcj8PHqAMGtTiSbAASVA7QMjCAvVc0ejp75yj8pJBV1aeKCsNiC9EdYVFulEQK50N5cOPV8MSoIEEmMh4uURLZbG7gd5ITfFN3vb3agPq1RG1NNjG0KeYstmzmpvsI1M20R1Oh9qGw0/NDrNVnQGLRfBUpf4nm9sOOTrjcaSmV55pn74x45nW93EORihEOm5SRwtDkyK7pWhs4D26vb19xJkQY9sp8e2xBHgFXsCRyhshjioTAhrNMy9TX2N976yVQrUZJuLmMQSIzA9HFBUrWVxoIYqjGCOeuS30eDrlpLl+/UNYCwLP5J3hU7Q+Xpa0Aoq3TCVC9C3aC3lv0Acj7evyv8BYqomr5DbI0bCgvA410kTXVl570laP2sx8j0w8tXfYnBHifQQHaNDZP6Z2WoCxMqr5IQWnzUZn5r4SEVm8cwAGyf0rL6LVhwVXJ1bLgco2SSzSyl3HQ57Gd9yO1EI4Ce6xazGnVMDekCtQYh/pioBq6EhMkD6Mvn93NXmgN4znEknCBaiaXkysmcgwxmASbOoqHZyYITwzR7s/+GUUTzk1207dEdF5eoZbe+acvaPPfhrdIg2+8kvb1zaKWKZewQLyd8enrJlbcJvuwh1anUXj/rmpwHC50yM7oPQSDi1Z/0JISmit/7R6udMqjevOLuJHRcDQIagw6qByzqzgY4ynAvLKp6X/DhXFPObEJRE+xrZlLVxbvlMM/dju38NEidAaAqq7HcGAt16Id7eyTKH4fuIRzP0uV8bQmV2zt5LxvGKVBOVb6g6igUI3tZfvLmrZI2sxg3Lhj89YEO1ShZdTkEN3JuBmQx/xlv4FiFikVOG2B+pzlLkt6SIHq31zGX4JFG53vmN9rE5aYEwwPWeelESKdeIfhJ+RzE49XLJU46IilifQZqCFNl8yHmck9CcKlg/lEVXlcy5ezTfJNewBEC68oOucAzW5wMvJKo+V1TtwkBdmICA0BY9hUoG4Kop4PBwclvEHoW114KTInicwC2e/OUymGLU6fdDgUG7zqZUheoSbDLZlbi8WfqDwsbnQWTtSZXEE6b6zXKi577PhD4q4SiNonzYPZslxIDtz1hcvftzMS8eiM/PyrKhra8peOFrFuRnys4YeynIT+UAdU2OFf1ePMOckf543t422H593MYdKWDJED2AFj/RulvtfdzAoEVrBItCjq+CuAq96Q5Wu1B/QwUGloPVARIaJnmgPiTmPWYvCv2paG0TKaJNpkQLRhRwZap7g7VQ5DaazjHx3YcoYHehR/vT9SQDZUPAdMRGs6bjmo83zaKmIx3o9cy89jIt7Mu/Mo2LGaoR0wMfxJ/xiRavgKkFJY30J1bmbqVaSVO48UvL/OgwBjX2WtK6Rr6c9K4MQnK2OWwn9NnnHSMLh/B2Vbqdo9A3MRQF/tDhWo8sg2SjOOH/Fs2Q3yNVp2H/nIwyFUQiSkZZ1NSijAvaSFw7cRz2S0Lfr89wzxnU9tYA1OuaXTmMfh5b2ITaBf5fGHFq+n+muH2K7sn7YbGQd3ONT/lvoYVVJOhzKGjBC11F0+9g/osqIj5xYc8QbOuxoCoWLERCU1WwDP8wjaHNc71RTc0VqR7pXwXOFegAjqhO6QIfjWvWxgFZzXcj603y2lrx0AteccGnoab15ZFqiVWsQ3YXfbuIpMaiFRviRbfXGfH3MOEU80tN4lZRuek72dQEnP6IdYF6z3IjinEnb+KfFFm1Nov69CulmI/wa9Hh1Gfny3eBMFup+GfKlh7mzrxfPeLHr1BM4p9SynjKG+IzOpDWOsTQwlbZq/L6xvzaI2VlAUfRE7uc0Vu3J/4M54EpzMbrMeSypKz+fEUVyn0ULPqFjTjEPkS1Y0kwG79aCNzjb3d+Zpcve07vhMpUmGydyhZy9SJv2h4t5njqfvBT7U3POV9k8L0beUo0ezUCoB1NG2/QE8UJICONT75/lTWGIfX9UycpsGa6J5A46BRx1BBiAisZcYBmKirJ4Et1gjpCjG/IzgReGwpfj8v5oXhjBWEp0lSGE9jMC4K3dx5K99UwiVpIC4F2WDwe+2nCcxRnHgjT5diN5YyZnNgotC6MgqHUg9DrPLyrQOpgYN6oXucBDuVvpdOrmOC6wog8IEzG8iPGcGTpRXyQNAylvHLrwKndXHTdZyAR4qLqKOvtK/Zvx5RdeIvdzymPPn4acQY5FUJd6Z899H8kpof715oUOfDB3NSYiGuqHSUQNdGOBqncJyUJytPMlCIT16cbMBL9ZNh66nAlgkQ4k6uWzP+3PJcXJVHBe6nye/x/WYhjqrglcuvdcfmi1wmjaY3GV5A2nUEkx8MBUfs9yuvxfjJfxfdwBSzEd1Gfsgv588biyuHtVmXFpS44vjlaZNWpMIA3a2jB84vamY8BKhRSYwVpVTw2kZ812YUCEUV5mlrr3VkjzwS+94Dje2WYYm8bKMeSZivkEWhlAa/6+bGEgnmxQ4noLmQXJW7aawjODgFzFeBfIayhJr6uqsf61wuJKneFiLISwXRs9IEF0xmTe9AtC1C2pmVX1RdfvjIDOnnVEb+7eSAPKdLAjEjUAfmg3pLZQyLL2rqY852EsQtNJW0fL2x4kQFbQFC9FMyMa7DkTA+ytXOylGACh/93DCz6j1j9VMmHEEJnd/p74eHY+FjPJF/+QIK3AoO3HGL64IyWbMbho23wVhPpGJSaksAdhtWIc4lT2LDxpDD+7j6Y4g8Rac0xhvo88w8YSBqJ7A2Maio/5hk8VJsaEf6dpfvMKjbbsxEBVXDIe6TDbjOJJ5JWfnyddqLIXzD5RvxdqECkt5bZOTzPbDUDah/hfoRyjHLuDPRZtuNjO1wl202FTrRsrUINSVxrLhcs+ytKbhxePIyLqippCP66U5xQCkw4HL/+UkiEpK/mZVun60AQqpPVhGLZtLuVUe5UKlcCZJOtoYP46wmzMHVkcazhvLeNqjqXWqbloSEstxjLN4e6oC45AvMb/TwBKRfSrofWYGtJNIRxnxDlVFUcju1F/FzAZW6nCzMaGszEKoHtw9NUtZxUy3rcGk7Q0dvpDG2uuwX2z2XbI5qV9r3097NwfQNFv8iLZhEppYkmevrXiV/vriBPUlmfW7DBeQ+A/tAczEw5540Qg8hg0GdyBomIPq9IOEXVYE3gyZlfLy62reJDct3Fn5kuCc0XQiBJ8rpIExg7pltMY265ujH0oDnlrMOycEX3LXFB0diEIm6sZVIrGAIsr6vH3ggdPMK3uz2aknSPv+55nd2FlprlXRqaHVvlAq68Gecw6JfBDvCZZNKXJ3gja93F2yjnX0tlwSxnbP0VLgnBSIDoF+wJJj49dUdxzDoZFAcRH6ZW8mMFC55Jim8INjfdBvt1X1jru6Q1I8uy9/LMouLICt/IGtXXJpkxxOPuc1WTj/2DKYgThQX/v2zIiplX4r2JcE9mG9jitisLyhW0My1sar9sdd90CUGgzav1CI6DsEsdrmV/vbx3vVk9PlZKSKHGISjy2cqL1iZlZD89eTpx7dFHcFaEqaImBJPasR67DF7RHXCsIwHBofj8pIWegyWNG7aMtGG0BcCSiaCRjzNcNAD+DHQcEyShklxFxCAWWWfaWcYDaLUI3sc+eQYA5BsSRKaNDTgTS4w1xBnava8pGAHRS55q3fjujQ31O59uRs0siA3LbqyJrRtTls8X/Tnqb6CQ/rp/8xFGgwhPKY9nQX/etzymxGOkq+1uYr1qyduKzOvpwdHvdQEp9UHk9VVpSyD/wXNAgROs7onLxtRD6NoeiACzEUUGiyxHAFA48QDa6xAstN/LgceJahRCyUt5Re1chB8/G7XaFDTNxDIK0RDQYoKQX4Cwfb1gewDmjlYTRaIhHmL6jMIfCvwxKwtA+nmBK8vCGIwSGmhH2Bs/S//8p/+8c9/HNNtb9Lhz3+bpXtJYH/+7R3a7M+/lXeZ//kPw5wW+5//rijzeVy2ct///OeMwP7+tCj/5x/bP//xx//b/PnP3ny3ufGjtz//6T+Pc3EM5X/5y9//Lfe33+bf/+0//PX/ADzUFK8='))))
| 1,160
| 10,315
| 0.961877
| 351
| 10,440
| 28.609687
| 0.977208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156031
| 0.00182
| 10,440
| 8
| 10,316
| 1,305
| 0.8076
| 0.007184
| 0
| 0
| 0
| 0.25
| 0.989866
| 0.989866
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
86c87f4bd6bcec51aa60ed9ab86c1fbd316e69e6
| 5,987
|
py
|
Python
|
fonts/mirov968_8x14.py
|
ccccmagicboy/st7735_mpy
|
b15f1bde69fbe6e0eb4931c57e71c136d8e7f024
|
[
"MIT"
] | 6
|
2020-07-11T16:59:19.000Z
|
2021-07-16T19:32:49.000Z
|
ports/esp32/user_modules/st7735_mpy/fonts/mirov968_8x14.py
|
d4niele/micropython
|
a1f7b37d392bf46b28045ce215ae899fda8d8c38
|
[
"MIT"
] | 1
|
2020-04-14T03:14:45.000Z
|
2020-04-14T03:14:45.000Z
|
fonts/mirov968_8x14.py
|
ccccmagicboy/st7735_mpy
|
b15f1bde69fbe6e0eb4931c57e71c136d8e7f024
|
[
"MIT"
] | null | null | null |
"""converted from ..\fonts\MIROV968_8x14.bin """
WIDTH = 8
HEIGHT = 14
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x18\x3c\x3c\x3c\x18\x18\x00\x18\x18\x00\x00\x00'\
b'\x00\x66\x66\x66\x24\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x6c\x6c\xfe\x6c\x6c\x6c\xfe\x6c\x6c\x00\x00\x00'\
b'\x18\x18\x7c\xc6\xc2\xc0\x7c\x06\x86\xc6\x7c\x18\x18\x00'\
b'\x00\x00\x00\x00\xc2\xc6\x0c\x18\x30\x66\xc6\x00\x00\x00'\
b'\x00\x00\x38\x6c\x6c\x38\x76\xdc\xcc\xcc\x76\x00\x00\x00'\
b'\x00\x30\x30\x30\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0c\x18\x30\x30\x30\x30\x30\x18\x0c\x00\x00\x00'\
b'\x00\x00\x30\x18\x0c\x0c\x0c\x0c\x0c\x18\x30\x00\x00\x00'\
b'\x00\x00\x00\x00\x66\x3c\xff\x3c\x66\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x18\x18\x7e\x18\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x18\x30\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x00\x00\x00'\
b'\x00\x00\x02\x06\x0c\x18\x30\x60\xc0\x80\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xce\xde\xf6\xe6\xc6\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x18\x38\x78\x18\x18\x18\x18\x18\x7e\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x0c\x18\x30\x60\xc6\xfe\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x06\x3c\x06\x06\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x0c\x1c\x3c\x6c\xcc\xfe\x0c\x0c\x1e\x00\x00\x00'\
b'\x00\x00\xfe\xc0\xc0\xfc\x0e\x06\x06\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x38\x60\xc0\xc0\xfc\xc6\xc6\xc6\x7c\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x06\x0c\x18\x30\x30\x30\x30\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7c\xc6\xc6\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7e\x06\x06\x0c\x78\x00\x00\x00'\
b'\x00\x00\x00\x18\x18\x00\x00\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x18\x18\x00\x00\x00\x18\x18\x30\x00\x00\x00'\
b'\x00\x00\x06\x0c\x18\x30\x60\x30\x18\x0c\x06\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xfe\x00\x00\xfe\x00\x00\x00\x00\x00'\
b'\x00\x00\x60\x30\x18\x0c\x06\x0c\x18\x30\x60\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x0c\x18\x18\x00\x18\x18\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xde\xde\xde\xdc\xc0\x7c\x00\x00\x00'\
b'\x00\x00\x10\x38\x6c\xc6\xc6\xfe\xc6\xc6\xc6\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x66\x66\x66\xfc\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xc0\xc2\x66\x3c\x00\x00\x00'\
b'\x00\x00\xf8\x6c\x66\x66\x66\x66\x66\x6c\xf8\x00\x00\x00'\
b'\x00\x00\xfe\x66\x62\x68\x78\x68\x62\x66\xfe\x00\x00\x00'\
b'\x00\x00\xfe\x66\x62\x68\x78\x68\x60\x60\xf0\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xde\xc6\x66\x3a\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00'\
b'\x00\x00\x1e\x0c\x0c\x0c\x0c\x0c\xcc\xcc\x78\x00\x00\x00'\
b'\x00\x00\xe6\x66\x6c\x6c\x78\x6c\x6c\x66\xe6\x00\x00\x00'\
b'\x00\x00\xf0\x60\x60\x60\x60\x60\x62\x66\xfe\x00\x00\x00'\
b'\x00\x00\xc6\xee\xfe\xfe\xd6\xc6\xc6\xc6\xc6\x00\x00\x00'\
b'\x00\x00\xc6\xe6\xf6\xfe\xde\xce\xc6\xc6\xc6\x00\x00\x00'\
b'\x00\x00\x38\x6c\xc6\xc6\xc6\xc6\xc6\x6c\x38\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x60\x60\x60\xf0\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xd6\xde\x7c\x0c\x0e\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x6c\x66\x66\xe6\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x60\x38\x0c\xc6\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x7e\x7e\x5a\x18\x18\x18\x18\x18\x3c\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xd6\xd6\xfe\x7c\x6c\x00\x00\x00'\
b'\x00\x00\xc6\xc6\x6c\x38\x38\x38\x6c\xc6\xc6\x00\x00\x00'\
b'\x00\x00\x66\x66\x66\x66\x3c\x18\x18\x18\x3c\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x8c\x18\x30\x60\xc2\xc6\xfe\x00\x00\x00'\
b'\x00\x00\x3c\x30\x30\x30\x30\x30\x30\x30\x3c\x00\x00\x00'\
b'\x00\x00\x80\xc0\xe0\x70\x38\x1c\x0e\x06\x02\x00\x00\x00'\
b'\x00\x00\x3c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x3c\x00\x00\x00'\
b'\x10\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x00'\
b'\x30\x30\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x78\x0c\x7c\xcc\xcc\x76\x00\x00\x00'\
b'\x00\x00\xe0\x60\x60\x78\x6c\x66\x66\x66\xdc\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\xc0\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x1c\x0c\x0c\x3c\x6c\xcc\xcc\xcc\x76\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xfe\xc0\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x38\x6c\x64\x60\xf0\x60\x60\x60\xf0\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\x7c\x0c\xcc\x78\x00'\
b'\x00\x00\xe0\x60\x60\x6c\x76\x66\x66\x66\xe6\x00\x00\x00'\
b'\x00\x00\x18\x18\x00\x38\x18\x18\x18\x18\x3c\x00\x00\x00'\
b'\x00\x00\x06\x06\x00\x0e\x06\x06\x06\x06\x66\x66\x3c\x00'\
b'\x00\x00\xe0\x60\x60\x66\x6c\x78\x6c\x66\xe6\x00\x00\x00'\
b'\x00\x00\x38\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xec\xfe\xd6\xd6\xd6\xd6\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xc6\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x7c\x60\x60\xf0\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\x7c\x0c\x0c\x1e\x00'\
b'\x00\x00\x00\x00\x00\xdc\x76\x62\x60\x60\xf0\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\x70\x1c\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x10\x30\x30\xfc\x30\x30\x30\x36\x1c\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x66\x66\x66\x66\x3c\x18\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xd6\xd6\xfe\x6c\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\x6c\x38\x38\x6c\xc6\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\x7e\x06\x0c\xf8\x00'\
b'\x00\x00\x00\x00\x00\xfe\xcc\x18\x30\x66\xfe\x00\x00\x00'\
b'\x00\x00\x0e\x18\x18\x18\x70\x18\x18\x18\x0e\x00\x00\x00'\
b'\x00\x00\x18\x18\x18\x18\x00\x18\x18\x18\x18\x00\x00\x00'\
b'\x00\x00\x70\x18\x18\x18\x0e\x18\x18\x18\x70\x00\x00\x00'\
b'\x00\x00\x76\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x10\x38\x6c\xc6\xc6\xfe\x00\x00\x00\x00'\
FONT = memoryview(_FONT)
| 57.019048
| 60
| 0.704359
| 1,458
| 5,987
| 2.890261
| 0.044582
| 0.598007
| 0.508306
| 0.344566
| 0.82131
| 0.746084
| 0.701234
| 0.648315
| 0.54224
| 0.415282
| 0
| 0.378429
| 0.019709
| 5,987
| 104
| 61
| 57.567308
| 0.339581
| 0.006681
| 0
| 0
| 0
| 0.941176
| 0.905203
| 0.905203
| 0
| 1
| 0.001347
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
86e4f986b5019c8f3e475557ebbcb225821e7f56
| 113
|
py
|
Python
|
python/slack/slack.py
|
simukappu/sample-lambda-layers
|
ae55807a3972cf7d4251b532ca673710f5d4a2d3
|
[
"Apache-2.0"
] | null | null | null |
python/slack/slack.py
|
simukappu/sample-lambda-layers
|
ae55807a3972cf7d4251b532ca673710f5d4a2d3
|
[
"Apache-2.0"
] | null | null | null |
python/slack/slack.py
|
simukappu/sample-lambda-layers
|
ae55807a3972cf7d4251b532ca673710f5d4a2d3
|
[
"Apache-2.0"
] | null | null | null |
import requests
def publish_message(slack_web_hook_url, data):
requests.post(slack_web_hook_url, data = data)
| 22.6
| 48
| 0.814159
| 18
| 113
| 4.722222
| 0.611111
| 0.188235
| 0.282353
| 0.352941
| 0.447059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106195
| 113
| 4
| 49
| 28.25
| 0.841584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
86f8b7b46dcdeb4f1360ff70cf3659be0871eea4
| 7
|
py
|
Python
|
lesson02/t/index06.py
|
liulxin/python3-demos
|
b05078f336fd1d6044cb9eaffd45826bd4a1479f
|
[
"MIT"
] | null | null | null |
lesson02/t/index06.py
|
liulxin/python3-demos
|
b05078f336fd1d6044cb9eaffd45826bd4a1479f
|
[
"MIT"
] | null | null | null |
lesson02/t/index06.py
|
liulxin/python3-demos
|
b05078f336fd1d6044cb9eaffd45826bd4a1479f
|
[
"MIT"
] | null | null | null |
a = 1
| 3.5
| 6
| 0.285714
| 2
| 7
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.571429
| 7
| 1
| 7
| 7
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d48e81643b8a82cedff2bbce74239f5ade0ac06a
| 44,329
|
py
|
Python
|
src/py/genpy/keyset/ElephantDBSet.py
|
yieldbot/elephantdb
|
07acf9ef46f9287cdfc34e06b55eb38292fb9154
|
[
"BSD-3-Clause"
] | 1
|
2015-12-07T22:36:47.000Z
|
2015-12-07T22:36:47.000Z
|
src/py/genpy/keyset/ElephantDBSet.py
|
yieldbot/elephantdb
|
07acf9ef46f9287cdfc34e06b55eb38292fb9154
|
[
"BSD-3-Clause"
] | null | null | null |
src/py/genpy/keyset/ElephantDBSet.py
|
yieldbot/elephantdb
|
07acf9ef46f9287cdfc34e06b55eb38292fb9154
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Autogenerated by Thrift Compiler (0.8.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:utf8strings
#
from thrift.Thrift import TType, TMessageType, TException
import elephantdb.ElephantDBShared
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface(elephantdb.ElephantDBShared.Iface):
def member(self, domain, setKey, member):
"""
Parameters:
- domain
- setKey
- member
"""
pass
def members(self, domain, setKey):
"""
Parameters:
- domain
- setKey
"""
pass
def setDiff(self, domain, keyOne, keyTwo):
"""
Parameters:
- domain
- keyOne
- keyTwo
"""
pass
def setUnion(self, domain, keyOne, keyTwo):
"""
Parameters:
- domain
- keyOne
- keyTwo
"""
pass
def setIntersection(self, domain, keyOne, keyTwo):
"""
Parameters:
- domain
- keyOne
- keyTwo
"""
pass
def size(self, domain, key):
"""
Parameters:
- domain
- key
"""
pass
def multiMember(self, domain, setKey, setVals):
"""
Parameters:
- domain
- setKey
- setVals
"""
pass
class Client(elephantdb.ElephantDBShared.Client, Iface):
def __init__(self, iprot, oprot=None):
elephantdb.ElephantDBShared.Client.__init__(self, iprot, oprot)
def member(self, domain, setKey, member):
"""
Parameters:
- domain
- setKey
- member
"""
self.send_member(domain, setKey, member)
return self.recv_member()
def send_member(self, domain, setKey, member):
self._oprot.writeMessageBegin('member', TMessageType.CALL, self._seqid)
args = member_args()
args.domain = domain
args.setKey = setKey
args.member = member
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_member(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = member_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "member failed: unknown result");
def members(self, domain, setKey):
"""
Parameters:
- domain
- setKey
"""
self.send_members(domain, setKey)
return self.recv_members()
def send_members(self, domain, setKey):
self._oprot.writeMessageBegin('members', TMessageType.CALL, self._seqid)
args = members_args()
args.domain = domain
args.setKey = setKey
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_members(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = members_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "members failed: unknown result");
def setDiff(self, domain, keyOne, keyTwo):
"""
Parameters:
- domain
- keyOne
- keyTwo
"""
self.send_setDiff(domain, keyOne, keyTwo)
return self.recv_setDiff()
def send_setDiff(self, domain, keyOne, keyTwo):
self._oprot.writeMessageBegin('setDiff', TMessageType.CALL, self._seqid)
args = setDiff_args()
args.domain = domain
args.keyOne = keyOne
args.keyTwo = keyTwo
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setDiff(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = setDiff_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "setDiff failed: unknown result");
def setUnion(self, domain, keyOne, keyTwo):
"""
Parameters:
- domain
- keyOne
- keyTwo
"""
self.send_setUnion(domain, keyOne, keyTwo)
return self.recv_setUnion()
def send_setUnion(self, domain, keyOne, keyTwo):
self._oprot.writeMessageBegin('setUnion', TMessageType.CALL, self._seqid)
args = setUnion_args()
args.domain = domain
args.keyOne = keyOne
args.keyTwo = keyTwo
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setUnion(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = setUnion_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "setUnion failed: unknown result");
def setIntersection(self, domain, keyOne, keyTwo):
"""
Parameters:
- domain
- keyOne
- keyTwo
"""
self.send_setIntersection(domain, keyOne, keyTwo)
return self.recv_setIntersection()
def send_setIntersection(self, domain, keyOne, keyTwo):
self._oprot.writeMessageBegin('setIntersection', TMessageType.CALL, self._seqid)
args = setIntersection_args()
args.domain = domain
args.keyOne = keyOne
args.keyTwo = keyTwo
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setIntersection(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = setIntersection_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "setIntersection failed: unknown result");
def size(self, domain, key):
"""
Parameters:
- domain
- key
"""
self.send_size(domain, key)
return self.recv_size()
def send_size(self, domain, key):
self._oprot.writeMessageBegin('size', TMessageType.CALL, self._seqid)
args = size_args()
args.domain = domain
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_size(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = size_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "size failed: unknown result");
def multiMember(self, domain, setKey, setVals):
"""
Parameters:
- domain
- setKey
- setVals
"""
self.send_multiMember(domain, setKey, setVals)
return self.recv_multiMember()
def send_multiMember(self, domain, setKey, setVals):
self._oprot.writeMessageBegin('multiMember', TMessageType.CALL, self._seqid)
args = multiMember_args()
args.domain = domain
args.setKey = setKey
args.setVals = setVals
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_multiMember(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = multiMember_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "multiMember failed: unknown result");
class Processor(elephantdb.ElephantDBShared.Processor, Iface, TProcessor):
def __init__(self, handler):
elephantdb.ElephantDBShared.Processor.__init__(self, handler)
self._processMap["member"] = Processor.process_member
self._processMap["members"] = Processor.process_members
self._processMap["setDiff"] = Processor.process_setDiff
self._processMap["setUnion"] = Processor.process_setUnion
self._processMap["setIntersection"] = Processor.process_setIntersection
self._processMap["size"] = Processor.process_size
self._processMap["multiMember"] = Processor.process_multiMember
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_member(self, seqid, iprot, oprot):
args = member_args()
args.read(iprot)
iprot.readMessageEnd()
result = member_result()
result.success = self._handler.member(args.domain, args.setKey, args.member)
oprot.writeMessageBegin("member", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_members(self, seqid, iprot, oprot):
args = members_args()
args.read(iprot)
iprot.readMessageEnd()
result = members_result()
result.success = self._handler.members(args.domain, args.setKey)
oprot.writeMessageBegin("members", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_setDiff(self, seqid, iprot, oprot):
args = setDiff_args()
args.read(iprot)
iprot.readMessageEnd()
result = setDiff_result()
result.success = self._handler.setDiff(args.domain, args.keyOne, args.keyTwo)
oprot.writeMessageBegin("setDiff", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_setUnion(self, seqid, iprot, oprot):
args = setUnion_args()
args.read(iprot)
iprot.readMessageEnd()
result = setUnion_result()
result.success = self._handler.setUnion(args.domain, args.keyOne, args.keyTwo)
oprot.writeMessageBegin("setUnion", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_setIntersection(self, seqid, iprot, oprot):
args = setIntersection_args()
args.read(iprot)
iprot.readMessageEnd()
result = setIntersection_result()
result.success = self._handler.setIntersection(args.domain, args.keyOne, args.keyTwo)
oprot.writeMessageBegin("setIntersection", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_size(self, seqid, iprot, oprot):
args = size_args()
args.read(iprot)
iprot.readMessageEnd()
result = size_result()
result.success = self._handler.size(args.domain, args.key)
oprot.writeMessageBegin("size", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_multiMember(self, seqid, iprot, oprot):
args = multiMember_args()
args.read(iprot)
iprot.readMessageEnd()
result = multiMember_result()
result.success = self._handler.multiMember(args.domain, args.setKey, args.setVals)
oprot.writeMessageBegin("multiMember", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class member_args:
"""
Attributes:
- domain
- setKey
- member
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'domain', None, None, ), # 1
(2, TType.STRING, 'setKey', None, None, ), # 2
(3, TType.STRING, 'member', None, None, ), # 3
)
def __init__(self, domain=None, setKey=None, member=None,):
self.domain = domain
self.setKey = setKey
self.member = member
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.domain = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.setKey = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.member = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('member_args')
if self.domain is not None:
oprot.writeFieldBegin('domain', TType.STRING, 1)
oprot.writeString(self.domain.encode('utf-8'))
oprot.writeFieldEnd()
if self.setKey is not None:
oprot.writeFieldBegin('setKey', TType.STRING, 2)
oprot.writeString(self.setKey.encode('utf-8'))
oprot.writeFieldEnd()
if self.member is not None:
oprot.writeFieldBegin('member', TType.STRING, 3)
oprot.writeString(self.member.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class member_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('member_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class members_args:
"""
Attributes:
- domain
- setKey
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'domain', None, None, ), # 1
(2, TType.STRING, 'setKey', None, None, ), # 2
)
def __init__(self, domain=None, setKey=None,):
self.domain = domain
self.setKey = setKey
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.domain = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.setKey = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('members_args')
if self.domain is not None:
oprot.writeFieldBegin('domain', TType.STRING, 1)
oprot.writeString(self.domain.encode('utf-8'))
oprot.writeFieldEnd()
if self.setKey is not None:
oprot.writeFieldBegin('setKey', TType.STRING, 2)
oprot.writeString(self.setKey.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class members_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('members_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setDiff_args:
"""
Attributes:
- domain
- keyOne
- keyTwo
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'domain', None, None, ), # 1
(2, TType.STRING, 'keyOne', None, None, ), # 2
(3, TType.STRING, 'keyTwo', None, None, ), # 3
)
def __init__(self, domain=None, keyOne=None, keyTwo=None,):
self.domain = domain
self.keyOne = keyOne
self.keyTwo = keyTwo
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.domain = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.keyOne = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.keyTwo = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setDiff_args')
if self.domain is not None:
oprot.writeFieldBegin('domain', TType.STRING, 1)
oprot.writeString(self.domain.encode('utf-8'))
oprot.writeFieldEnd()
if self.keyOne is not None:
oprot.writeFieldBegin('keyOne', TType.STRING, 2)
oprot.writeString(self.keyOne.encode('utf-8'))
oprot.writeFieldEnd()
if self.keyTwo is not None:
oprot.writeFieldBegin('keyTwo', TType.STRING, 3)
oprot.writeString(self.keyTwo.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setDiff_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = iprot.readString().decode('utf-8')
self.success.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setDiff_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter6 in self.success:
oprot.writeString(iter6.encode('utf-8'))
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setUnion_args:
"""
Attributes:
- domain
- keyOne
- keyTwo
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'domain', None, None, ), # 1
(2, TType.STRING, 'keyOne', None, None, ), # 2
(3, TType.STRING, 'keyTwo', None, None, ), # 3
)
def __init__(self, domain=None, keyOne=None, keyTwo=None,):
self.domain = domain
self.keyOne = keyOne
self.keyTwo = keyTwo
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.domain = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.keyOne = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.keyTwo = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setUnion_args')
if self.domain is not None:
oprot.writeFieldBegin('domain', TType.STRING, 1)
oprot.writeString(self.domain.encode('utf-8'))
oprot.writeFieldEnd()
if self.keyOne is not None:
oprot.writeFieldBegin('keyOne', TType.STRING, 2)
oprot.writeString(self.keyOne.encode('utf-8'))
oprot.writeFieldEnd()
if self.keyTwo is not None:
oprot.writeFieldBegin('keyTwo', TType.STRING, 3)
oprot.writeString(self.keyTwo.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setUnion_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in xrange(_size7):
_elem12 = iprot.readString().decode('utf-8')
self.success.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setUnion_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter13 in self.success:
oprot.writeString(iter13.encode('utf-8'))
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setIntersection_args:
"""
Attributes:
- domain
- keyOne
- keyTwo
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'domain', None, None, ), # 1
(2, TType.STRING, 'keyOne', None, None, ), # 2
(3, TType.STRING, 'keyTwo', None, None, ), # 3
)
def __init__(self, domain=None, keyOne=None, keyTwo=None,):
self.domain = domain
self.keyOne = keyOne
self.keyTwo = keyTwo
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.domain = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.keyOne = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.keyTwo = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setIntersection_args')
if self.domain is not None:
oprot.writeFieldBegin('domain', TType.STRING, 1)
oprot.writeString(self.domain.encode('utf-8'))
oprot.writeFieldEnd()
if self.keyOne is not None:
oprot.writeFieldBegin('keyOne', TType.STRING, 2)
oprot.writeString(self.keyOne.encode('utf-8'))
oprot.writeFieldEnd()
if self.keyTwo is not None:
oprot.writeFieldBegin('keyTwo', TType.STRING, 3)
oprot.writeString(self.keyTwo.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setIntersection_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in xrange(_size14):
_elem19 = iprot.readString().decode('utf-8')
self.success.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setIntersection_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter20 in self.success:
oprot.writeString(iter20.encode('utf-8'))
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class size_args:
"""
Attributes:
- domain
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'domain', None, None, ), # 1
(2, TType.STRING, 'key', None, None, ), # 2
)
def __init__(self, domain=None, key=None,):
self.domain = domain
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.domain = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.key = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('size_args')
if self.domain is not None:
oprot.writeFieldBegin('domain', TType.STRING, 1)
oprot.writeString(self.domain.encode('utf-8'))
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 2)
oprot.writeString(self.key.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class size_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('size_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class multiMember_args:
"""
Attributes:
- domain
- setKey
- setVals
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'domain', None, None, ), # 1
(2, TType.STRING, 'setKey', None, None, ), # 2
(3, TType.LIST, 'setVals', (TType.STRING,None), None, ), # 3
)
def __init__(self, domain=None, setKey=None, setVals=None,):
self.domain = domain
self.setKey = setKey
self.setVals = setVals
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.domain = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.setKey = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.setVals = []
(_etype24, _size21) = iprot.readListBegin()
for _i25 in xrange(_size21):
_elem26 = iprot.readString().decode('utf-8')
self.setVals.append(_elem26)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('multiMember_args')
if self.domain is not None:
oprot.writeFieldBegin('domain', TType.STRING, 1)
oprot.writeString(self.domain.encode('utf-8'))
oprot.writeFieldEnd()
if self.setKey is not None:
oprot.writeFieldBegin('setKey', TType.STRING, 2)
oprot.writeString(self.setKey.encode('utf-8'))
oprot.writeFieldEnd()
if self.setVals is not None:
oprot.writeFieldBegin('setVals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.setVals))
for iter27 in self.setVals:
oprot.writeString(iter27.encode('utf-8'))
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class multiMember_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(elephantdb.ttypes.Value, elephantdb.ttypes.Value.thrift_spec)), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype31, _size28) = iprot.readListBegin()
for _i32 in xrange(_size28):
_elem33 = elephantdb.ttypes.Value()
_elem33.read(iprot)
self.success.append(_elem33)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('multiMember_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter34 in self.success:
iter34.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 30.656293
| 188
| 0.657448
| 5,101
| 44,329
| 5.493433
| 0.037052
| 0.01588
| 0.028585
| 0.025694
| 0.871637
| 0.837949
| 0.82967
| 0.809543
| 0.784562
| 0.784562
| 0
| 0.006972
| 0.220262
| 44,329
| 1,445
| 189
| 30.677509
| 0.803732
| 0.027364
| 0
| 0.8181
| 1
| 0
| 0.03143
| 0.000518
| 0
| 0
| 0
| 0
| 0
| 1
| 0.123077
| false
| 0.006335
| 0.006335
| 0.038009
| 0.247964
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
be14f860689aa4182b3ccc3696f7abb270c577bb
| 79,612
|
py
|
Python
|
msgraph/cli/command_modules/applications/azext_applications/generated/custom.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | null | null | null |
msgraph/cli/command_modules/applications/azext_applications/generated/custom.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | 22
|
2022-03-29T22:54:37.000Z
|
2022-03-29T22:55:27.000Z
|
msgraph/cli/command_modules/applications/azext_applications/generated/custom.py
|
microsoftgraph/msgraph-cli-archived
|
489f70bf4ede1ce67b84bfb31e66da3e4db76062
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
def applications_application_list(client,
orderby=None,
select=None,
expand=None):
return client.list_application(orderby=orderby,
select=select,
expand=expand)
def applications_application_create(client,
application_id=None,
id_=None,
deleted_date_time=None,
add_ins=None,
app_id=None,
application_template_id=None,
app_roles=None,
created_date_time=None,
description=None,
display_name=None,
group_membership_claims=None,
identifier_uris=None,
info=None,
is_device_only_auth_supported=None,
is_fallback_public_client=None,
key_credentials=None,
logo=None,
notes=None,
oauth2_require_post_response=None,
parental_control_settings=None,
password_credentials=None,
public_client=None,
publisher_domain=None,
required_resource_access=None,
sign_in_audience=None,
tags=None,
token_encryption_key_id=None,
created_on_behalf_of=None,
extension_properties=None,
home_realm_discovery_policies=None,
owners=None,
token_issuance_policies=None,
token_lifetime_policies=None,
home_page_url=None,
implicit_grant_settings=None,
logout_url=None,
redirect_uris=None,
access_token=None,
id_token=None,
saml2_token=None,
accept_mapped_claims=None,
known_client_applications=None,
oauth2_permission_scopes=None,
pre_authorized_applications=None,
requested_access_token_version=None):
body = {}
body['id'] = id_
body['deleted_date_time'] = deleted_date_time
body['add_ins'] = add_ins
body['app_id'] = app_id
body['application_template_id'] = application_template_id
body['app_roles'] = app_roles
body['created_date_time'] = created_date_time
body['description'] = description
body['display_name'] = display_name
body['group_membership_claims'] = group_membership_claims
body['identifier_uris'] = identifier_uris
body['info'] = info
body['is_device_only_auth_supported'] = is_device_only_auth_supported
body['is_fallback_public_client'] = is_fallback_public_client
body['key_credentials'] = key_credentials
body['logo'] = logo
body['notes'] = notes
body['oauth2_require_post_response'] = oauth2_require_post_response
body['parental_control_settings'] = parental_control_settings
body['password_credentials'] = password_credentials
body['public_client'] = public_client
body['publisher_domain'] = publisher_domain
body['required_resource_access'] = required_resource_access
body['sign_in_audience'] = sign_in_audience
body['tags'] = tags
body['token_encryption_key_id'] = token_encryption_key_id
body['created_on_behalf_of'] = created_on_behalf_of
body['extension_properties'] = extension_properties
body['home_realm_discovery_policies'] = home_realm_discovery_policies
body['owners'] = owners
body['token_issuance_policies'] = token_issuance_policies
body['token_lifetime_policies'] = token_lifetime_policies
body['web'] = {}
body['web']['home_page_url'] = home_page_url
body['web']['implicit_grant_settings'] = implicit_grant_settings
body['web']['logout_url'] = logout_url
body['web']['redirect_uris'] = redirect_uris
body['optional_claims'] = {}
body['optional_claims']['access_token'] = access_token
body['optional_claims']['id_token'] = id_token
body['optional_claims']['saml2_token'] = saml2_token
body['api'] = {}
body['api']['accept_mapped_claims'] = accept_mapped_claims
body['api']['known_client_applications'] = known_client_applications
body['api']['oauth2_permission_scopes'] = oauth2_permission_scopes
body['api']['pre_authorized_applications'] = pre_authorized_applications
body['api']['requested_access_token_version'] = requested_access_token_version
if application_id is not None:
return client.update_application(application_id=application_id,
body=body)
return client.create_application(body=body)
def applications_application_delete_application(client,
application_id,
if_match=None):
return client.delete_application(application_id=application_id,
if_match=if_match)
def applications_application_set_logo(client,
application_id,
data):
return client.set_logo(application_id=application_id,
data=data)
def applications_application_show_application(client,
application_id,
select=None,
expand=None):
return client.get_application(application_id=application_id,
select=select,
expand=expand)
def applications_application_show_logo(client,
application_id):
return client.get_logo(application_id=application_id)
def applications_application_add_key(client,
application_id,
key_credential=None,
password_credential=None,
proof=None):
body = {}
body['key_credential'] = key_credential
body['password_credential'] = password_credential
body['proof'] = proof
return client.add_key(application_id=application_id,
body=body)
def applications_application_add_password(client,
application_id,
password_credential=None):
body = {}
body['password_credential'] = password_credential
return client.add_password(application_id=application_id,
body=body)
def applications_application_check_member_group(client,
application_id,
group_ids=None):
body = {}
body['group_ids'] = group_ids
return client.check_member_groups(application_id=application_id,
body=body)
def applications_application_check_member_object(client,
application_id,
ids=None):
body = {}
body['ids'] = ids
return client.check_member_objects(application_id=application_id,
body=body)
def applications_application_create_extension_property(client,
application_id,
id_=None,
deleted_date_time=None,
app_display_name=None,
data_type=None,
is_synced_from_on_premises=None,
name=None,
target_objects=None):
body = {}
body['id'] = id_
body['deleted_date_time'] = deleted_date_time
body['app_display_name'] = app_display_name
body['data_type'] = data_type
body['is_synced_from_on_premises'] = is_synced_from_on_premises
body['name'] = name
body['target_objects'] = target_objects
return client.create_extension_properties(application_id=application_id,
body=body)
def applications_application_create_ref_home_realm_discovery_policy(client,
application_id,
body):
return client.create_ref_home_realm_discovery_policies(application_id=application_id,
body=body)
def applications_application_create_ref_owner(client,
application_id,
body):
return client.create_ref_owners(application_id=application_id,
body=body)
def applications_application_create_ref_token_issuance_policy(client,
application_id,
body):
return client.create_ref_token_issuance_policies(application_id=application_id,
body=body)
def applications_application_create_ref_token_lifetime_policy(client,
application_id,
body):
return client.create_ref_token_lifetime_policies(application_id=application_id,
body=body)
def applications_application_delete_extension_property(client,
application_id,
extension_property_id,
if_match=None):
return client.delete_extension_properties(application_id=application_id,
extension_property_id=extension_property_id,
if_match=if_match)
def applications_application_delete_ref_created_on_behalf_of(client,
application_id,
if_match=None):
return client.delete_ref_created_on_behalf_of(application_id=application_id,
if_match=if_match)
def applications_application_delta(client):
return client.delta()
def applications_application_get_available_extension_property(client,
is_synced_from_on_premises=None):
if is_synced_from_on_premises is None:
is_synced_from_on_premises = False
body = {}
body['is_synced_from_on_premises'] = False if is_synced_from_on_premises is None else is_synced_from_on_premises
return client.get_available_extension_properties(body=body)
def applications_application_get_by_id(client,
ids=None,
types=None):
body = {}
body['ids'] = ids
body['types'] = types
return client.get_by_ids(body=body)
def applications_application_get_member_group(client,
application_id,
security_enabled_only=None):
if security_enabled_only is None:
security_enabled_only = False
body = {}
body['security_enabled_only'] = False if security_enabled_only is None else security_enabled_only
return client.get_member_groups(application_id=application_id,
body=body)
def applications_application_get_member_object(client,
application_id,
security_enabled_only=None):
if security_enabled_only is None:
security_enabled_only = False
body = {}
body['security_enabled_only'] = False if security_enabled_only is None else security_enabled_only
return client.get_member_objects(application_id=application_id,
body=body)
def applications_application_list_extension_property(client,
application_id,
orderby=None,
select=None,
expand=None):
return client.list_extension_properties(application_id=application_id,
orderby=orderby,
select=select,
expand=expand)
def applications_application_list_home_realm_discovery_policy(client,
application_id,
orderby=None,
select=None,
expand=None):
return client.list_home_realm_discovery_policies(application_id=application_id,
orderby=orderby,
select=select,
expand=expand)
def applications_application_list_owner(client,
application_id,
orderby=None,
select=None,
expand=None):
return client.list_owners(application_id=application_id,
orderby=orderby,
select=select,
expand=expand)
def applications_application_list_ref_home_realm_discovery_policy(client,
application_id,
orderby=None):
return client.list_ref_home_realm_discovery_policies(application_id=application_id,
orderby=orderby)
def applications_application_list_ref_owner(client,
application_id,
orderby=None):
return client.list_ref_owners(application_id=application_id,
orderby=orderby)
def applications_application_list_ref_token_issuance_policy(client,
application_id,
orderby=None):
return client.list_ref_token_issuance_policies(application_id=application_id,
orderby=orderby)
def applications_application_list_ref_token_lifetime_policy(client,
application_id,
orderby=None):
return client.list_ref_token_lifetime_policies(application_id=application_id,
orderby=orderby)
def applications_application_list_token_issuance_policy(client,
application_id,
orderby=None,
select=None,
expand=None):
return client.list_token_issuance_policies(application_id=application_id,
orderby=orderby,
select=select,
expand=expand)
def applications_application_list_token_lifetime_policy(client,
application_id,
orderby=None,
select=None,
expand=None):
return client.list_token_lifetime_policies(application_id=application_id,
orderby=orderby,
select=select,
expand=expand)
def applications_application_remove_key(client,
application_id,
key_id=None,
proof=None):
body = {}
body['key_id'] = key_id
body['proof'] = proof
return client.remove_key(application_id=application_id,
body=body)
def applications_application_remove_password(client,
application_id,
key_id=None):
body = {}
body['key_id'] = key_id
return client.remove_password(application_id=application_id,
body=body)
def applications_application_restore(client,
application_id):
return client.restore(application_id=application_id)
def applications_application_set_ref_created_on_behalf_of(client,
application_id,
body):
return client.set_ref_created_on_behalf_of(application_id=application_id,
body=body)
def applications_application_show_created_on_behalf_of(client,
application_id,
select=None,
expand=None):
return client.get_created_on_behalf_of(application_id=application_id,
select=select,
expand=expand)
def applications_application_show_extension_property(client,
application_id,
extension_property_id,
select=None,
expand=None):
return client.get_extension_properties(application_id=application_id,
extension_property_id=extension_property_id,
select=select,
expand=expand)
def applications_application_show_ref_created_on_behalf_of(client,
application_id):
return client.get_ref_created_on_behalf_of(application_id=application_id)
def applications_application_update_extension_property(client,
application_id,
extension_property_id,
id_=None,
deleted_date_time=None,
app_display_name=None,
data_type=None,
is_synced_from_on_premises=None,
name=None,
target_objects=None):
body = {}
body['id'] = id_
body['deleted_date_time'] = deleted_date_time
body['app_display_name'] = app_display_name
body['data_type'] = data_type
body['is_synced_from_on_premises'] = is_synced_from_on_premises
body['name'] = name
body['target_objects'] = target_objects
return client.update_extension_properties(application_id=application_id,
extension_property_id=extension_property_id,
body=body)
def applications_application_validate_property(client,
entity_type=None,
display_name=None,
mail_nickname=None,
on_behalf_of_user_id=None):
body = {}
body['entity_type'] = entity_type
body['display_name'] = display_name
body['mail_nickname'] = mail_nickname
body['on_behalf_of_user_id'] = on_behalf_of_user_id
return client.validate_properties(body=body)
def applications_group_create_app_role_assignment(client,
group_id,
id_=None,
deleted_date_time=None,
app_role_id=None,
created_date_time=None,
principal_display_name=None,
principal_id=None,
principal_type=None,
resource_display_name=None,
resource_id=None):
body = {}
body['id'] = id_
body['deleted_date_time'] = deleted_date_time
body['app_role_id'] = app_role_id
body['created_date_time'] = created_date_time
body['principal_display_name'] = principal_display_name
body['principal_id'] = principal_id
body['principal_type'] = principal_type
body['resource_display_name'] = resource_display_name
body['resource_id'] = resource_id
return client.create_app_role_assignments(group_id=group_id,
body=body)
def applications_group_delete_app_role_assignment(client,
group_id,
app_role_assignment_id,
if_match=None):
return client.delete_app_role_assignments(group_id=group_id,
app_role_assignment_id=app_role_assignment_id,
if_match=if_match)
def applications_group_list_app_role_assignment(client,
group_id,
orderby=None,
select=None,
expand=None):
return client.list_app_role_assignments(group_id=group_id,
orderby=orderby,
select=select,
expand=expand)
def applications_group_show_app_role_assignment(client,
group_id,
app_role_assignment_id,
select=None,
expand=None):
return client.get_app_role_assignments(group_id=group_id,
app_role_assignment_id=app_role_assignment_id,
select=select,
expand=expand)
def applications_group_update_app_role_assignment(client,
group_id,
app_role_assignment_id,
id_=None,
deleted_date_time=None,
app_role_id=None,
created_date_time=None,
principal_display_name=None,
principal_id=None,
principal_type=None,
resource_display_name=None,
resource_id=None):
body = {}
body['id'] = id_
body['deleted_date_time'] = deleted_date_time
body['app_role_id'] = app_role_id
body['created_date_time'] = created_date_time
body['principal_display_name'] = principal_display_name
body['principal_id'] = principal_id
body['principal_type'] = principal_type
body['resource_display_name'] = resource_display_name
body['resource_id'] = resource_id
return client.update_app_role_assignments(group_id=group_id,
app_role_assignment_id=app_role_assignment_id,
body=body)
def applications_service_principal_service_principal_create_service_principal(client,
id_=None,
deleted_date_time=None,
account_enabled=None,
add_ins=None,
alternative_names=None,
app_description=None,
app_display_name=None,
app_id=None,
application_template_id=None,
app_owner_organization_id=None,
app_role_assignment_required=None,
app_roles=None,
description=None,
display_name=None,
homepage=None,
info=None,
key_credentials=None,
login_url=None,
logout_url=None,
notes=None,
notification_email_addresses=None,
oauth2_permission_scopes=None,
password_credentials=None,
preferred_single_sign_on_mode=None,
preferred_token_signing_key_thumbprint=None,
reply_urls=None,
service_principal_names=None,
service_principal_type=None,
tags=None,
token_encryption_key_id=None,
app_role_assigned_to=None,
app_role_assignments=None,
claims_mapping_policies=None,
created_objects=None,
endpoints=None,
home_realm_discovery_policies=None,
member_of=None,
oauth2_permission_grants=None,
owned_objects=None,
owners=None,
token_issuance_policies=None,
token_lifetime_policies=None,
transitive_member_of=None,
relay_state=None):
body = {}
body['id'] = id_
body['deleted_date_time'] = deleted_date_time
body['account_enabled'] = account_enabled
body['add_ins'] = add_ins
body['alternative_names'] = alternative_names
body['app_description'] = app_description
body['app_display_name'] = app_display_name
body['app_id'] = app_id
body['application_template_id'] = application_template_id
body['app_owner_organization_id'] = app_owner_organization_id
body['app_role_assignment_required'] = app_role_assignment_required
body['app_roles'] = app_roles
body['description'] = description
body['display_name'] = display_name
body['homepage'] = homepage
body['info'] = info
body['key_credentials'] = key_credentials
body['login_url'] = login_url
body['logout_url'] = logout_url
body['notes'] = notes
body['notification_email_addresses'] = notification_email_addresses
body['oauth2_permission_scopes'] = oauth2_permission_scopes
body['password_credentials'] = password_credentials
body['preferred_single_sign_on_mode'] = preferred_single_sign_on_mode
body['preferred_token_signing_key_thumbprint'] = preferred_token_signing_key_thumbprint
body['reply_urls'] = reply_urls
body['service_principal_names'] = service_principal_names
body['service_principal_type'] = service_principal_type
body['tags'] = tags
body['token_encryption_key_id'] = token_encryption_key_id
body['app_role_assigned_to'] = app_role_assigned_to
body['app_role_assignments'] = app_role_assignments
body['claims_mapping_policies'] = claims_mapping_policies
body['created_objects'] = created_objects
body['endpoints'] = endpoints
body['home_realm_discovery_policies'] = home_realm_discovery_policies
body['member_of'] = member_of
body['oauth2_permission_grants'] = oauth2_permission_grants
body['owned_objects'] = owned_objects
body['owners'] = owners
body['token_issuance_policies'] = token_issuance_policies
body['token_lifetime_policies'] = token_lifetime_policies
body['transitive_member_of'] = transitive_member_of
body['saml_single_sign_on_settings'] = {}
body['saml_single_sign_on_settings']['relay_state'] = relay_state
return client.create_service_principal(body=body)
def applications_service_principal_service_principal_delete_service_principal(client,
service_principal_id,
if_match=None):
return client.delete_service_principal(service_principal_id=service_principal_id,
if_match=if_match)
def applications_service_principal_service_principal_list_service_principal(client,
orderby=None,
select=None,
expand=None):
return client.list_service_principal(orderby=orderby,
select=select,
expand=expand)
def applications_service_principal_service_principal_show_service_principal(client,
service_principal_id,
select=None,
expand=None):
return client.get_service_principal(service_principal_id=service_principal_id,
select=select,
expand=expand)
def applications_service_principal_service_principal_update_service_principal(client,
service_principal_id,
id_=None,
deleted_date_time=None,
account_enabled=None,
add_ins=None,
alternative_names=None,
app_description=None,
app_display_name=None,
app_id=None,
application_template_id=None,
app_owner_organization_id=None,
app_role_assignment_required=None,
app_roles=None,
description=None,
display_name=None,
homepage=None,
info=None,
key_credentials=None,
login_url=None,
logout_url=None,
notes=None,
notification_email_addresses=None,
oauth2_permission_scopes=None,
password_credentials=None,
preferred_single_sign_on_mode=None,
preferred_token_signing_key_thumbprint=None,
reply_urls=None,
service_principal_names=None,
service_principal_type=None,
tags=None,
token_encryption_key_id=None,
app_role_assigned_to=None,
app_role_assignments=None,
claims_mapping_policies=None,
created_objects=None,
endpoints=None,
home_realm_discovery_policies=None,
member_of=None,
oauth2_permission_grants=None,
owned_objects=None,
owners=None,
token_issuance_policies=None,
token_lifetime_policies=None,
transitive_member_of=None,
relay_state=None):
body = {}
body['id'] = id_
body['deleted_date_time'] = deleted_date_time
body['account_enabled'] = account_enabled
body['add_ins'] = add_ins
body['alternative_names'] = alternative_names
body['app_description'] = app_description
body['app_display_name'] = app_display_name
body['app_id'] = app_id
body['application_template_id'] = application_template_id
body['app_owner_organization_id'] = app_owner_organization_id
body['app_role_assignment_required'] = app_role_assignment_required
body['app_roles'] = app_roles
body['description'] = description
body['display_name'] = display_name
body['homepage'] = homepage
body['info'] = info
body['key_credentials'] = key_credentials
body['login_url'] = login_url
body['logout_url'] = logout_url
body['notes'] = notes
body['notification_email_addresses'] = notification_email_addresses
body['oauth2_permission_scopes'] = oauth2_permission_scopes
body['password_credentials'] = password_credentials
body['preferred_single_sign_on_mode'] = preferred_single_sign_on_mode
body['preferred_token_signing_key_thumbprint'] = preferred_token_signing_key_thumbprint
body['reply_urls'] = reply_urls
body['service_principal_names'] = service_principal_names
body['service_principal_type'] = service_principal_type
body['tags'] = tags
body['token_encryption_key_id'] = token_encryption_key_id
body['app_role_assigned_to'] = app_role_assigned_to
body['app_role_assignments'] = app_role_assignments
body['claims_mapping_policies'] = claims_mapping_policies
body['created_objects'] = created_objects
body['endpoints'] = endpoints
body['home_realm_discovery_policies'] = home_realm_discovery_policies
body['member_of'] = member_of
body['oauth2_permission_grants'] = oauth2_permission_grants
body['owned_objects'] = owned_objects
body['owners'] = owners
body['token_issuance_policies'] = token_issuance_policies
body['token_lifetime_policies'] = token_lifetime_policies
body['transitive_member_of'] = transitive_member_of
body['saml_single_sign_on_settings'] = {}
body['saml_single_sign_on_settings']['relay_state'] = relay_state
return client.update_service_principal(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_add_key(client,
service_principal_id,
key_credential=None,
password_credential=None,
proof=None):
body = {}
body['key_credential'] = key_credential
body['password_credential'] = password_credential
body['proof'] = proof
return client.add_key(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_add_password(client,
service_principal_id,
password_credential=None):
body = {}
body['password_credential'] = password_credential
return client.add_password(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_check_member_group(client,
service_principal_id,
group_ids=None):
body = {}
body['group_ids'] = group_ids
return client.check_member_groups(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_check_member_object(client,
service_principal_id,
ids=None):
body = {}
body['ids'] = ids
return client.check_member_objects(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_create_app_role_assigned_to(client,
service_principal_id,
id_=None,
deleted_date_time=None,
app_role_id=None,
created_date_time=None,
principal_display_name=None,
principal_id=None,
principal_type=None,
resource_display_name=None,
resource_id=None):
body = {}
body['id'] = id_
body['deleted_date_time'] = deleted_date_time
body['app_role_id'] = app_role_id
body['created_date_time'] = created_date_time
body['principal_display_name'] = principal_display_name
body['principal_id'] = principal_id
body['principal_type'] = principal_type
body['resource_display_name'] = resource_display_name
body['resource_id'] = resource_id
return client.create_app_role_assigned_to(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_create_app_role_assignment(client,
service_principal_id,
id_=None,
deleted_date_time=None,
app_role_id=None,
created_date_time=None,
principal_display_name=None,
principal_id=None,
principal_type=None,
resource_display_name=None,
resource_id=None):
body = {}
body['id'] = id_
body['deleted_date_time'] = deleted_date_time
body['app_role_id'] = app_role_id
body['created_date_time'] = created_date_time
body['principal_display_name'] = principal_display_name
body['principal_id'] = principal_id
body['principal_type'] = principal_type
body['resource_display_name'] = resource_display_name
body['resource_id'] = resource_id
return client.create_app_role_assignments(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_create_endpoint(client,
service_principal_id,
id_=None,
deleted_date_time=None,
capability=None,
provider_id=None,
provider_name=None,
provider_resource_id=None,
uri=None):
body = {}
body['id'] = id_
body['deleted_date_time'] = deleted_date_time
body['capability'] = capability
body['provider_id'] = provider_id
body['provider_name'] = provider_name
body['provider_resource_id'] = provider_resource_id
body['uri'] = uri
return client.create_endpoints(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_create_ref_claim_mapping_policy(client,
service_principal_id,
body):
return client.create_ref_claims_mapping_policies(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_create_ref_created_object(client,
service_principal_id,
body):
return client.create_ref_created_objects(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_create_ref_home_realm_discovery_policy(client,
service_principal_id,
body):
return client.create_ref_home_realm_discovery_policies(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_create_ref_member_of(client,
service_principal_id,
body):
return client.create_ref_member_of(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_create_ref_oauth2_permission_grant(client,
service_principal_id,
body):
return client.create_ref_oauth2_permission_grants(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_create_ref_owned_object(client,
service_principal_id,
body):
return client.create_ref_owned_objects(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_create_ref_owner(client,
service_principal_id,
body):
return client.create_ref_owners(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_create_ref_token_issuance_policy(client,
service_principal_id,
body):
return client.create_ref_token_issuance_policies(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_create_ref_token_lifetime_policy(client,
service_principal_id,
body):
return client.create_ref_token_lifetime_policies(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_create_ref_transitive_member_of(client,
service_principal_id,
body):
return client.create_ref_transitive_member_of(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_delete_app_role_assigned_to(client,
service_principal_id,
app_role_assignment_id,
if_match=None):
return client.delete_app_role_assigned_to(service_principal_id=service_principal_id,
app_role_assignment_id=app_role_assignment_id,
if_match=if_match)
def applications_service_principal_delete_app_role_assignment(client,
service_principal_id,
app_role_assignment_id,
if_match=None):
return client.delete_app_role_assignments(service_principal_id=service_principal_id,
app_role_assignment_id=app_role_assignment_id,
if_match=if_match)
def applications_service_principal_delete_endpoint(client,
service_principal_id,
endpoint_id,
if_match=None):
return client.delete_endpoints(service_principal_id=service_principal_id,
endpoint_id=endpoint_id,
if_match=if_match)
def applications_service_principal_delta(client):
return client.delta()
def applications_service_principal_get_available_extension_property(client,
is_synced_from_on_premises=None):
if is_synced_from_on_premises is None:
is_synced_from_on_premises = False
body = {}
body['is_synced_from_on_premises'] = False if is_synced_from_on_premises is None else is_synced_from_on_premises
return client.get_available_extension_properties(body=body)
def applications_service_principal_get_by_id(client,
ids=None,
types=None):
body = {}
body['ids'] = ids
body['types'] = types
return client.get_by_ids(body=body)
def applications_service_principal_get_member_group(client,
service_principal_id,
security_enabled_only=None):
if security_enabled_only is None:
security_enabled_only = False
body = {}
body['security_enabled_only'] = False if security_enabled_only is None else security_enabled_only
return client.get_member_groups(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_get_member_object(client,
service_principal_id,
security_enabled_only=None):
if security_enabled_only is None:
security_enabled_only = False
body = {}
body['security_enabled_only'] = False if security_enabled_only is None else security_enabled_only
return client.get_member_objects(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_list_app_role_assigned_to(client,
service_principal_id,
orderby=None,
select=None,
expand=None):
return client.list_app_role_assigned_to(service_principal_id=service_principal_id,
orderby=orderby,
select=select,
expand=expand)
def applications_service_principal_list_app_role_assignment(client,
service_principal_id,
orderby=None,
select=None,
expand=None):
return client.list_app_role_assignments(service_principal_id=service_principal_id,
orderby=orderby,
select=select,
expand=expand)
def applications_service_principal_list_claim_mapping_policy(client,
service_principal_id,
orderby=None,
select=None,
expand=None):
return client.list_claims_mapping_policies(service_principal_id=service_principal_id,
orderby=orderby,
select=select,
expand=expand)
def applications_service_principal_list_created_object(client,
service_principal_id,
orderby=None,
select=None,
expand=None):
return client.list_created_objects(service_principal_id=service_principal_id,
orderby=orderby,
select=select,
expand=expand)
def applications_service_principal_list_endpoint(client,
service_principal_id,
orderby=None,
select=None,
expand=None):
return client.list_endpoints(service_principal_id=service_principal_id,
orderby=orderby,
select=select,
expand=expand)
def applications_service_principal_list_home_realm_discovery_policy(client,
service_principal_id,
orderby=None,
select=None,
expand=None):
return client.list_home_realm_discovery_policies(service_principal_id=service_principal_id,
orderby=orderby,
select=select,
expand=expand)
def applications_service_principal_list_member_of(client,
service_principal_id,
orderby=None,
select=None,
expand=None):
return client.list_member_of(service_principal_id=service_principal_id,
orderby=orderby,
select=select,
expand=expand)
def applications_service_principal_list_oauth2_permission_grant(client,
service_principal_id,
orderby=None,
select=None,
expand=None):
return client.list_oauth2_permission_grants(service_principal_id=service_principal_id,
orderby=orderby,
select=select,
expand=expand)
def applications_service_principal_list_owned_object(client,
service_principal_id,
orderby=None,
select=None,
expand=None):
return client.list_owned_objects(service_principal_id=service_principal_id,
orderby=orderby,
select=select,
expand=expand)
def applications_service_principal_list_owner(client,
service_principal_id,
orderby=None,
select=None,
expand=None):
return client.list_owners(service_principal_id=service_principal_id,
orderby=orderby,
select=select,
expand=expand)
def applications_service_principal_list_ref_claim_mapping_policy(client,
service_principal_id,
orderby=None):
return client.list_ref_claims_mapping_policies(service_principal_id=service_principal_id,
orderby=orderby)
def applications_service_principal_list_ref_created_object(client,
service_principal_id,
orderby=None):
return client.list_ref_created_objects(service_principal_id=service_principal_id,
orderby=orderby)
def applications_service_principal_list_ref_home_realm_discovery_policy(client,
service_principal_id,
orderby=None):
return client.list_ref_home_realm_discovery_policies(service_principal_id=service_principal_id,
orderby=orderby)
def applications_service_principal_list_ref_member_of(client,
service_principal_id,
orderby=None):
return client.list_ref_member_of(service_principal_id=service_principal_id,
orderby=orderby)
def applications_service_principal_list_ref_oauth2_permission_grant(client,
service_principal_id,
orderby=None):
return client.list_ref_oauth2_permission_grants(service_principal_id=service_principal_id,
orderby=orderby)
def applications_service_principal_list_ref_owned_object(client,
service_principal_id,
orderby=None):
return client.list_ref_owned_objects(service_principal_id=service_principal_id,
orderby=orderby)
def applications_service_principal_list_ref_owner(client,
service_principal_id,
orderby=None):
return client.list_ref_owners(service_principal_id=service_principal_id,
orderby=orderby)
def applications_service_principal_list_ref_token_issuance_policy(client,
service_principal_id,
orderby=None):
return client.list_ref_token_issuance_policies(service_principal_id=service_principal_id,
orderby=orderby)
def applications_service_principal_list_ref_token_lifetime_policy(client,
service_principal_id,
orderby=None):
return client.list_ref_token_lifetime_policies(service_principal_id=service_principal_id,
orderby=orderby)
def applications_service_principal_list_ref_transitive_member_of(client,
service_principal_id,
orderby=None):
return client.list_ref_transitive_member_of(service_principal_id=service_principal_id,
orderby=orderby)
def applications_service_principal_list_token_issuance_policy(client,
service_principal_id,
orderby=None,
select=None,
expand=None):
return client.list_token_issuance_policies(service_principal_id=service_principal_id,
orderby=orderby,
select=select,
expand=expand)
def applications_service_principal_list_token_lifetime_policy(client,
service_principal_id,
orderby=None,
select=None,
expand=None):
return client.list_token_lifetime_policies(service_principal_id=service_principal_id,
orderby=orderby,
select=select,
expand=expand)
def applications_service_principal_list_transitive_member_of(client,
service_principal_id,
orderby=None,
select=None,
expand=None):
return client.list_transitive_member_of(service_principal_id=service_principal_id,
orderby=orderby,
select=select,
expand=expand)
def applications_service_principal_remove_key(client,
service_principal_id,
key_id=None,
proof=None):
body = {}
body['key_id'] = key_id
body['proof'] = proof
return client.remove_key(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_remove_password(client,
service_principal_id,
key_id=None):
body = {}
body['key_id'] = key_id
return client.remove_password(service_principal_id=service_principal_id,
body=body)
def applications_service_principal_restore(client,
service_principal_id):
return client.restore(service_principal_id=service_principal_id)
def applications_service_principal_show_app_role_assigned_to(client,
service_principal_id,
app_role_assignment_id,
select=None,
expand=None):
return client.get_app_role_assigned_to(service_principal_id=service_principal_id,
app_role_assignment_id=app_role_assignment_id,
select=select,
expand=expand)
def applications_service_principal_show_app_role_assignment(client,
service_principal_id,
app_role_assignment_id,
select=None,
expand=None):
return client.get_app_role_assignments(service_principal_id=service_principal_id,
app_role_assignment_id=app_role_assignment_id,
select=select,
expand=expand)
def applications_service_principal_show_endpoint(client,
service_principal_id,
endpoint_id,
select=None,
expand=None):
return client.get_endpoints(service_principal_id=service_principal_id,
endpoint_id=endpoint_id,
select=select,
expand=expand)
def applications_service_principal_update_app_role_assigned_to(client,
service_principal_id,
app_role_assignment_id,
id_=None,
deleted_date_time=None,
app_role_id=None,
created_date_time=None,
principal_display_name=None,
principal_id=None,
principal_type=None,
resource_display_name=None,
resource_id=None):
body = {}
body['id'] = id_
body['deleted_date_time'] = deleted_date_time
body['app_role_id'] = app_role_id
body['created_date_time'] = created_date_time
body['principal_display_name'] = principal_display_name
body['principal_id'] = principal_id
body['principal_type'] = principal_type
body['resource_display_name'] = resource_display_name
body['resource_id'] = resource_id
return client.update_app_role_assigned_to(service_principal_id=service_principal_id,
app_role_assignment_id=app_role_assignment_id,
body=body)
def applications_service_principal_update_app_role_assignment(client,
service_principal_id,
app_role_assignment_id,
id_=None,
deleted_date_time=None,
app_role_id=None,
created_date_time=None,
principal_display_name=None,
principal_id=None,
principal_type=None,
resource_display_name=None,
resource_id=None):
body = {}
body['id'] = id_
body['deleted_date_time'] = deleted_date_time
body['app_role_id'] = app_role_id
body['created_date_time'] = created_date_time
body['principal_display_name'] = principal_display_name
body['principal_id'] = principal_id
body['principal_type'] = principal_type
body['resource_display_name'] = resource_display_name
body['resource_id'] = resource_id
return client.update_app_role_assignments(service_principal_id=service_principal_id,
app_role_assignment_id=app_role_assignment_id,
body=body)
def applications_service_principal_update_endpoint(client,
service_principal_id,
endpoint_id,
id_=None,
deleted_date_time=None,
capability=None,
provider_id=None,
provider_name=None,
provider_resource_id=None,
uri=None):
body = {}
body['id'] = id_
body['deleted_date_time'] = deleted_date_time
body['capability'] = capability
body['provider_id'] = provider_id
body['provider_name'] = provider_name
body['provider_resource_id'] = provider_resource_id
body['uri'] = uri
return client.update_endpoints(service_principal_id=service_principal_id,
endpoint_id=endpoint_id,
body=body)
def applications_service_principal_validate_property(client,
entity_type=None,
display_name=None,
mail_nickname=None,
on_behalf_of_user_id=None):
body = {}
body['entity_type'] = entity_type
body['display_name'] = display_name
body['mail_nickname'] = mail_nickname
body['on_behalf_of_user_id'] = on_behalf_of_user_id
return client.validate_properties(body=body)
def applications_user_create_app_role_assignment(client,
user_id,
id_=None,
deleted_date_time=None,
app_role_id=None,
created_date_time=None,
principal_display_name=None,
principal_id=None,
principal_type=None,
resource_display_name=None,
resource_id=None):
body = {}
body['id'] = id_
body['deleted_date_time'] = deleted_date_time
body['app_role_id'] = app_role_id
body['created_date_time'] = created_date_time
body['principal_display_name'] = principal_display_name
body['principal_id'] = principal_id
body['principal_type'] = principal_type
body['resource_display_name'] = resource_display_name
body['resource_id'] = resource_id
return client.create_app_role_assignments(user_id=user_id,
body=body)
def applications_user_delete_app_role_assignment(client,
user_id,
app_role_assignment_id,
if_match=None):
return client.delete_app_role_assignments(user_id=user_id,
app_role_assignment_id=app_role_assignment_id,
if_match=if_match)
def applications_user_list_app_role_assignment(client,
user_id,
orderby=None,
select=None,
expand=None):
return client.list_app_role_assignments(user_id=user_id,
orderby=orderby,
select=select,
expand=expand)
def applications_user_show_app_role_assignment(client,
user_id,
app_role_assignment_id,
select=None,
expand=None):
return client.get_app_role_assignments(user_id=user_id,
app_role_assignment_id=app_role_assignment_id,
select=select,
expand=expand)
def applications_user_update_app_role_assignment(client,
user_id,
app_role_assignment_id,
id_=None,
deleted_date_time=None,
app_role_id=None,
created_date_time=None,
principal_display_name=None,
principal_id=None,
principal_type=None,
resource_display_name=None,
resource_id=None):
body = {}
body['id'] = id_
body['deleted_date_time'] = deleted_date_time
body['app_role_id'] = app_role_id
body['created_date_time'] = created_date_time
body['principal_display_name'] = principal_display_name
body['principal_id'] = principal_id
body['principal_type'] = principal_type
body['resource_display_name'] = resource_display_name
body['resource_id'] = resource_id
return client.update_app_role_assignments(user_id=user_id,
app_role_assignment_id=app_role_assignment_id,
body=body)
| 54.231608
| 122
| 0.441541
| 5,837
| 79,612
| 5.552681
| 0.035806
| 0.128845
| 0.094968
| 0.060257
| 0.929283
| 0.911481
| 0.88578
| 0.860294
| 0.830983
| 0.797075
| 0
| 0.000694
| 0.511343
| 79,612
| 1,467
| 123
| 54.268575
| 0.832429
| 0.00628
| 0
| 0.776423
| 0
| 0
| 0.052528
| 0.022984
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09187
| false
| 0.017886
| 0
| 0.061789
| 0.184553
| 0.003252
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
be1615f4f65b63f4da2aced557a173fdd95fbc40
| 4,023
|
py
|
Python
|
tests/probability2/discrete_distribution_conditional_test.py
|
rpazuki/algos
|
bca46326f58eb983db6efe55320bf95fcf2b895f
|
[
"MIT"
] | null | null | null |
tests/probability2/discrete_distribution_conditional_test.py
|
rpazuki/algos
|
bca46326f58eb983db6efe55320bf95fcf2b895f
|
[
"MIT"
] | 1
|
2020-08-12T06:56:59.000Z
|
2020-08-12T08:57:30.000Z
|
tests/probability2/discrete_distribution_conditional_test.py
|
chasing-entropy/algos
|
bca46326f58eb983db6efe55320bf95fcf2b895f
|
[
"MIT"
] | null | null | null |
from probability2.empirical_distributions import DiscreteDistribution
from tests.helpers import compare
def test_conditional_discrete_distribution():
# Four levels dist.
samples = {
("a", "x", 1, 33): 1,
("a", "x", 2, 33): 2,
("a", "x", 1, 44): 3,
("a", "x", 2, 44): 4,
("a", "y", 1, 33): 5,
("a", "y", 2, 33): 6,
("a", "y", 1, 44): 7,
("a", "y", 2, 44): 8,
("b", "x", 1, 33): 9,
("b", "x", 2, 33): 10,
("b", "x", 1, 44): 11,
("b", "x", 2, 44): 12,
("b", "y", 1, 33): 13,
("b", "y", 2, 33): 14,
("b", "y", 1, 44): 15,
("b", "y", 2, 44): 16,
}
disc_dist = DiscreteDistribution(samples)
con_disc_dist = disc_dist.condition_on("X2")
assert all(compare(con_disc_dist.conditional_rvs.names, ["X2"]))
assert all(compare(con_disc_dist.distributions["x"].names, ["X1", "X3", "X4"]))
assert all(compare(con_disc_dist.distributions["y"].names, ["X1", "X3", "X4"]))
assert con_disc_dist.frequency(("a", 1, 33), "x") == 1
assert con_disc_dist.frequency(("a", 1, 33), "y") == 5
assert con_disc_dist.frequency(("a", 1, 44), "x") == 3
assert con_disc_dist.frequency(("a", 1, 44), "y") == 7
assert con_disc_dist.frequency(("b", 1, 33), "x") == 9
assert con_disc_dist.frequency(("b", 1, 33), "y") == 13
assert con_disc_dist.frequency(("b", 1, 44), "x") == 11
assert con_disc_dist.frequency(("b", 1, 44), "y") == 15
assert con_disc_dist.frequency(("b", 2, 44), "x") == 12
assert con_disc_dist.frequency(("b", 2, 33), "y") == 14
assert con_disc_dist.probability(("a", 1, 33), "x") == 1 / 52
assert con_disc_dist.probability(("a", 1, 33), "y") == 5 / 84
assert con_disc_dist.probability(("a", 1, 44), "x") == 3 / 52
assert con_disc_dist.probability(("a", 1, 44), "y") == 7 / 84
assert con_disc_dist.probability(("b", 1, 33), "x") == 9 / 52
assert con_disc_dist.probability(("b", 1, 33), "y") == 13 / 84
assert con_disc_dist.probability(("b", 1, 44), "x") == 11 / 52
assert con_disc_dist.probability(("b", 1, 44), "y") == 15 / 84
assert con_disc_dist.probability(("b", 2, 44), "x") == 12 / 52
assert con_disc_dist.probability(("b", 2, 33), "y") == 14 / 84
def test_conditional_operator_discrete_distribution():
# Four levels dist.
samples = {
("a", "x", 1, 33): 1,
("a", "x", 2, 33): 2,
("a", "x", 1, 44): 3,
("a", "x", 2, 44): 4,
("a", "y", 1, 33): 5,
("a", "y", 2, 33): 6,
("a", "y", 1, 44): 7,
("a", "y", 2, 44): 8,
("b", "x", 1, 33): 9,
("b", "x", 2, 33): 10,
("b", "x", 1, 44): 11,
("b", "x", 2, 44): 12,
("b", "y", 1, 33): 13,
("b", "y", 2, 33): 14,
("b", "y", 1, 44): 15,
("b", "y", 2, 44): 16,
}
disc_dist = DiscreteDistribution(samples)
con_disc_dist = disc_dist | "X2"
assert all(compare(con_disc_dist.conditional_rvs.names, ["X2"]))
assert all(compare(con_disc_dist.distributions["x"].names, ["X1", "X3", "X4"]))
assert all(compare(con_disc_dist.distributions["y"].names, ["X1", "X3", "X4"]))
assert con_disc_dist.frequency(("a", 1, 33), "x") == 1
assert con_disc_dist.frequency(("a", 1, 33), "y") == 5
assert con_disc_dist.probability(("a", 1, 33), "x") == 1 / 52
assert con_disc_dist.probability(("a", 1, 33), "y") == 5 / 84
con_disc_dist = disc_dist | ("X2", "X3")
assert all(compare(con_disc_dist.conditional_rvs.names, ["X2", "X3"]))
assert all(compare(con_disc_dist.distributions[("x", 1)].names, ["X1", "X4"]))
assert all(compare(con_disc_dist.distributions[("x", 2)].names, ["X1", "X4"]))
assert all(compare(con_disc_dist.distributions[("y", 1)].names, ["X1", "X4"]))
assert all(compare(con_disc_dist.distributions[("y", 2)].names, ["X1", "X4"]))
assert con_disc_dist.frequency(("a", 33), ("x", 1)) == 1
assert con_disc_dist.probability(("a", 33), ("x", 1)) == 1 / 24
| 42.797872
| 83
| 0.52697
| 619
| 4,023
| 3.268174
| 0.0937
| 0.177954
| 0.217499
| 0.218487
| 0.924864
| 0.890756
| 0.867029
| 0.793376
| 0.589224
| 0.589224
| 0
| 0.101613
| 0.229431
| 4,023
| 93
| 84
| 43.258065
| 0.550968
| 0.0087
| 0
| 0.609756
| 0
| 0
| 0.045169
| 0
| 0
| 0
| 0
| 0
| 0.45122
| 1
| 0.02439
| false
| 0
| 0.02439
| 0
| 0.04878
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
077a92ec9cb5f685b10b44d9bbcf5a1c66cdfaf7
| 34,198
|
py
|
Python
|
tests/tests/test_psbt.py
|
stepansnigirev/embit
|
afd09f243eaa5f682222455f75fdc45589ad69bc
|
[
"MIT"
] | 24
|
2020-01-23T21:01:58.000Z
|
2022-01-31T14:59:40.000Z
|
tests/tests/test_psbt.py
|
stepansnigirev/embit
|
afd09f243eaa5f682222455f75fdc45589ad69bc
|
[
"MIT"
] | 16
|
2020-11-04T10:35:17.000Z
|
2021-10-02T07:35:04.000Z
|
tests/tests/test_psbt.py
|
stepansnigirev/embit
|
afd09f243eaa5f682222455f75fdc45589ad69bc
|
[
"MIT"
] | 15
|
2020-01-23T19:00:23.000Z
|
2022-02-21T20:36:17.000Z
|
# BIP174 Test vectors:
# https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki#Test_Vectors
from binascii import hexlify, unhexlify
from embit.bip32 import HDKey
from embit.ec import PublicKey
from embit.psbt import PSBT
from unittest import TestCase
INVALID_VECTORS = [
# Case: Network transaction, not PSBT format
"0200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf6000000006a473044022070b2245123e6bf474d60c5b50c043d4c691a5d2435f09a34a7662a9dc251790a022001329ca9dacf280bdf30740ec0390422422c81cb45839457aeb76fc12edd95b3012102657d118d3357b8e0f4c2cd46db7b39f6d9c38d9a70abcb9b2de5dc8dbfe4ce31feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300",
# Case: PSBT missing outputs
"70736274ff0100750200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf60000000000feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab30000000000",
# Case: PSBT where one input has a filled scriptSig in the unsigned tx
# (this test fails, which is probably fine for the purposes of this library)
# "70736274ff0100fd0a010200000002ab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be4000000006a47304402204759661797c01b036b25928948686218347d89864b719e1f7fcf57d1e511658702205309eabf56aa4d8891ffd111fdf1336f3a29da866d7f8486d75546ceedaf93190121035cdc61fc7ba971c0b501a646a2a83b102cb43881217ca682dc86e2d73fa88292feffffffab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40100000000feffffff02603bea0b000000001976a914768a40bbd740cbe81d988e71de2a4d5c71396b1d88ac8e240000000000001976a9146f4620b553fa095e721b9ee0efe9fa039cca459788ac00000000000001012000e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787010416001485d13537f2e265405a34dbafa9e3dda01fb82308000000",
# Case: PSBT where inputs and outputs are provided but without an unsigned tx
"70736274ff000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab30000000000",
# Case: PSBT with duplicate keys in an input
"70736274ff0100750200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf60000000000feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab30000000001003f0200000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000ffffffff010000000000000000036a010000000000000000",
# Case: PSBT With invalid global transaction typed key
"70736274ff020001550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
# Case: PSBT With invalid input witness utxo typed key
"70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac000000000002010020955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
# Case: PSBT With invalid pubkey length for input partial signature typed key
"70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87210203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd46304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
# Case: PSBT With invalid redeemscript typed key
"70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a01020400220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
# Case: PSBT With invalid witnessscript typed key
"70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d568102050047522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
# Case: PSBT With invalid bip32 typed key
"70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae210603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd10b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
# Case: PSBT With invalid non-witness utxo typed key
"70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f0000000000020000bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000107da00473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae0001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870107232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b20289030108da0400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
# Case: PSBT With invalid final scriptsig typed key
"70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f618765000000020700da00473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae0001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870107232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b20289030108da0400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
# Case: PSBT With invalid final script witness typed key
"70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000107da00473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae0001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870107232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903020800da0400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
# Case: PSBT With invalid pubkey in output BIP 32 derivation paths typed key
"70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f6187650000000107da00473044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01483045022100f61038b308dc1da865a34852746f015772934208c6d24454393cd99bdf2217770220056e675a675a6d0a02b85b14e5e29074d8a25a9b5760bea2816f661910a006ea01475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae0001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870107232200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b20289030108da0400473044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01473044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d20147522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae00210203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca58710d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000",
# Case: PSBT With invalid input sighash type typed key
"70736274ff0100730200000001301ae986e516a1ec8ac5b4bc6573d32f83b465e23ad76167d68b38e730b4dbdb0000000000ffffffff02747b01000000000017a91403aa17ae882b5d0d54b25d63104e4ffece7b9ea2876043993b0000000017a914b921b1ba6f722e4bfa83b6557a3139986a42ec8387000000000001011f00ca9a3b00000000160014d2d94b64ae08587eefc8eeb187c601e939f9037c0203000100000000010016001462e9e982fff34dd8239610316b090cd2a3b747cb000100220020876bad832f1d168015ed41232a9ea65a1815d9ef13c0ef8759f64b5b2b278a65010125512103b7ce23a01c5b4bf00a642537cdfabb315b668332867478ef51309d2bd57f8a8751ae00",
# Case: PSBT With invalid output redeemScript typed key
"70736274ff0100730200000001301ae986e516a1ec8ac5b4bc6573d32f83b465e23ad76167d68b38e730b4dbdb0000000000ffffffff02747b01000000000017a91403aa17ae882b5d0d54b25d63104e4ffece7b9ea2876043993b0000000017a914b921b1ba6f722e4bfa83b6557a3139986a42ec8387000000000001011f00ca9a3b00000000160014d2d94b64ae08587eefc8eeb187c601e939f9037c0002000016001462e9e982fff34dd8239610316b090cd2a3b747cb000100220020876bad832f1d168015ed41232a9ea65a1815d9ef13c0ef8759f64b5b2b278a65010125512103b7ce23a01c5b4bf00a642537cdfabb315b668332867478ef51309d2bd57f8a8751ae00",
# Case: PSBT With invalid output witnessScript typed key
"70736274ff0100730200000001301ae986e516a1ec8ac5b4bc6573d32f83b465e23ad76167d68b38e730b4dbdb0000000000ffffffff02747b01000000000017a91403aa17ae882b5d0d54b25d63104e4ffece7b9ea2876043993b0000000017a914b921b1ba6f722e4bfa83b6557a3139986a42ec8387000000000001011f00ca9a3b00000000160014d2d94b64ae08587eefc8eeb187c601e939f9037c00010016001462e9e982fff34dd8239610316b090cd2a3b747cb000100220020876bad832f1d168015ed41232a9ea65a1815d9ef13c0ef8759f64b5b2b278a6521010025512103b7ce23a01c5b4bf00a642537cdfabb315b668332867478ef51309d06d57f8a8751ae00",
]
VALID_VECTORS = [
# Case: PSBT with one P2PKH input. Outputs are empty
"70736274ff0100750200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf60000000000feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab300000000000000",
# Case: PSBT with one P2PKH input and one P2SH-P2WPKH input. First input is signed and finalized. Outputs are empty
"70736274ff0100a00200000002ab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40000000000feffffffab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40100000000feffffff02603bea0b000000001976a914768a40bbd740cbe81d988e71de2a4d5c71396b1d88ac8e240000000000001976a9146f4620b553fa095e721b9ee0efe9fa039cca459788ac000000000001076a47304402204759661797c01b036b25928948686218347d89864b719e1f7fcf57d1e511658702205309eabf56aa4d8891ffd111fdf1336f3a29da866d7f8486d75546ceedaf93190121035cdc61fc7ba971c0b501a646a2a83b102cb43881217ca682dc86e2d73fa882920001012000e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787010416001485d13537f2e265405a34dbafa9e3dda01fb82308000000",
# Case: PSBT with one P2PKH input which has a non-final scriptSig and has a sighash type specified. Outputs are empty
"70736274ff0100750200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf60000000000feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e1300000100fda5010100000000010289a3c71eab4d20e0371bbba4cc698fa295c9463afa2e397f8533ccb62f9567e50100000017160014be18d152a9b012039daf3da7de4f53349eecb985ffffffff86f8aa43a71dff1448893a530a7237ef6b4608bbb2dd2d0171e63aec6a4890b40100000017160014fe3e9ef1a745e974d902c4355943abcb34bd5353ffffffff0200c2eb0b000000001976a91485cff1097fd9e008bb34af709c62197b38978a4888ac72fef84e2c00000017a914339725ba21efd62ac753a9bcd067d6c7a6a39d05870247304402202712be22e0270f394f568311dc7ca9a68970b8025fdd3b240229f07f8a5f3a240220018b38d7dcd314e734c9276bd6fb40f673325bc4baa144c800d2f2f02db2765c012103d2e15674941bad4a996372cb87e1856d3652606d98562fe39c5e9e7e413f210502483045022100d12b852d85dcd961d2f5f4ab660654df6eedcc794c0c33ce5cc309ffb5fce58d022067338a8e0e1725c197fb1a88af59f51e44e4255b20167c8684031c05d1f2592a01210223b72beef0965d10be0778efecd61fcac6f79a4ea169393380734464f84f2ab30000000001030401000000000000",
# Case: PSBT with one P2PKH input and one P2SH-P2WPKH input both with non-final scriptSigs. P2SH-P2WPKH input's redeemScript is available. Outputs filled.
"70736274ff0100a00200000002ab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40000000000feffffffab0949a08c5af7c49b8212f417e2f15ab3f5c33dcf153821a8139f877a5b7be40100000000feffffff02603bea0b000000001976a914768a40bbd740cbe81d988e71de2a4d5c71396b1d88ac8e240000000000001976a9146f4620b553fa095e721b9ee0efe9fa039cca459788ac00000000000100df0200000001268171371edff285e937adeea4b37b78000c0566cbb3ad64641713ca42171bf6000000006a473044022070b2245123e6bf474d60c5b50c043d4c691a5d2435f09a34a7662a9dc251790a022001329ca9dacf280bdf30740ec0390422422c81cb45839457aeb76fc12edd95b3012102657d118d3357b8e0f4c2cd46db7b39f6d9c38d9a70abcb9b2de5dc8dbfe4ce31feffffff02d3dff505000000001976a914d0c59903c5bac2868760e90fd521a4665aa7652088ac00e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787b32e13000001012000e1f5050000000017a9143545e6e33b832c47050f24d3eeb93c9c03948bc787010416001485d13537f2e265405a34dbafa9e3dda01fb8230800220202ead596687ca806043edc3de116cdf29d5e9257c196cd055cf698c8d02bf24e9910b4a6ba670000008000000080020000800022020394f62be9df19952c5587768aeb7698061ad2c4a25c894f47d8c162b4d7213d0510b4a6ba6700000080010000800200008000",
# Case: PSBT with one P2SH-P2WSH input of a 2-of-2 multisig, redeemScript, witnessScript, and keypaths are available. Contains one signature.
"70736274ff0100550200000001279a2323a5dfb51fc45f220fa58b0fc13e1e3342792a85d7e36cd6333b5cbc390000000000ffffffff01a05aea0b000000001976a914ffe9c0061097cc3b636f2cb0460fa4fc427d2b4588ac0000000000010120955eea0b0000000017a9146345200f68d189e1adc0df1c4d16ea8f14c0dbeb87220203b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4646304302200424b58effaaa694e1559ea5c93bbfd4a89064224055cdf070b6771469442d07021f5c8eb0fea6516d60b8acb33ad64ede60e8785bfb3aa94b99bdf86151db9a9a010104220020771fd18ad459666dd49f3d564e3dbc42f4c84774e360ada16816a8ed488d5681010547522103b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd462103de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd52ae220603b1341ccba7683b6af4f1238cd6e97e7167d569fac47f1e48d47541844355bd4610b4a6ba67000000800000008004000080220603de55d1e1dac805e3f8a58c1fbf9b94c02f3dbaafe127fefca4995f26f82083bd10b4a6ba670000008000000080050000800000",
# Case: PSBT with one P2WSH input of a 2-of-2 multisig. witnessScript, keypaths, and global xpubs are available. Contains no signatures. Outputs filled.
"70736274ff01005202000000019dfc6628c26c5899fe1bd3dc338665bfd55d7ada10f6220973df2d386dec12760100000000ffffffff01f03dcd1d000000001600147b3a00bfdc14d27795c2b74901d09da6ef133579000000004f01043587cf02da3fd0088000000097048b1ad0445b1ec8275517727c87b4e4ebc18a203ffa0f94c01566bd38e9000351b743887ee1d40dc32a6043724f2d6459b3b5a4d73daec8fbae0472f3bc43e20cd90c6a4fae000080000000804f01043587cf02da3fd00880000001b90452427139cd78c2cff2444be353cd58605e3e513285e528b407fae3f6173503d30a5e97c8adbc557dac2ad9a7e39c1722ebac69e668b6f2667cc1d671c83cab0cd90c6a4fae000080010000800001012b0065cd1d000000002200202c5486126c4978079a814e13715d65f36459e4d6ccaded266d0508645bafa6320105475221029da12cdb5b235692b91536afefe5c91c3ab9473d8e43b533836ab456299c88712103372b34234ed7cf9c1fea5d05d441557927be9542b162eb02e1ab2ce80224c00b52ae2206029da12cdb5b235692b91536afefe5c91c3ab9473d8e43b533836ab456299c887110d90c6a4fae0000800000008000000000220603372b34234ed7cf9c1fea5d05d441557927be9542b162eb02e1ab2ce80224c00b10d90c6a4fae0000800100008000000000002202039eff1f547a1d5f92dfa2ba7af6ac971a4bd03ba4a734b03156a256b8ad3a1ef910ede45cc500000080000000800100008000",
# Case: PSBT with unknown types in the inputs.
"70736274ff01003f0200000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000ffffffff010000000000000000036a010000000000000a0f0102030405060708090f0102030405060708090a0b0c0d0e0f0000",
# Case: PSBT with `PSBT_GLOBAL_XPUB`.
"70736274ff01009d0100000002710ea76ab45c5cb6438e607e59cc037626981805ae9e0dfd9089012abb0be5350100000000ffffffff190994d6a8b3c8c82ccbcfb2fba4106aa06639b872a8d447465c0d42588d6d670000000000ffffffff0200e1f505000000001976a914b6bc2c0ee5655a843d79afedd0ccc3f7dd64340988ac605af405000000001600141188ef8e4ce0449eaac8fb141cbf5a1176e6a088000000004f010488b21e039e530cac800000003dbc8a5c9769f031b17e77fea1518603221a18fd18f2b9a54c6c8c1ac75cbc3502f230584b155d1c7f1cd45120a653c48d650b431b67c5b2c13f27d7142037c1691027569c503100008000000080000000800001011f00e1f5050000000016001433b982f91b28f160c920b4ab95e58ce50dda3a4a220203309680f33c7de38ea6a47cd4ecd66f1f5a49747c6ffb8808ed09039243e3ad5c47304402202d704ced830c56a909344bd742b6852dccd103e963bae92d38e75254d2bb424502202d86c437195df46c0ceda084f2a291c3da2d64070f76bf9b90b195e7ef28f77201220603309680f33c7de38ea6a47cd4ecd66f1f5a49747c6ffb8808ed09039243e3ad5c1827569c5031000080000000800000008000000000010000000001011f00e1f50500000000160014388fb944307eb77ef45197d0b0b245e079f011de220202c777161f73d0b7c72b9ee7bde650293d13f095bc7656ad1f525da5fd2e10b11047304402204cb1fb5f869c942e0e26100576125439179ae88dca8a9dc3ba08f7953988faa60220521f49ca791c27d70e273c9b14616985909361e25be274ea200d7e08827e514d01220602c777161f73d0b7c72b9ee7bde650293d13f095bc7656ad1f525da5fd2e10b1101827569c5031000080000000800000008000000000000000000000220202d20ca502ee289686d21815bd43a80637b0698e1fbcdbe4caed445f6c1a0a90ef1827569c50310000800000008000000080000000000400000000",
]
class PSBTTest(TestCase):
def test_invalid_psbts(self):
for psbt_str in INVALID_VECTORS:
psbt_bytes = unhexlify(psbt_str)
try:
PSBT.parse(psbt_bytes)
except:
continue # succeed if exception was thrown
self.fail("Invalid psbt was parsed successfully: {}".format(psbt_str))
def test_valid_psbts(self):
for psbt_str in VALID_VECTORS:
psbt_bytes = unhexlify(psbt_str)
psbt_act = PSBT.parse(psbt_bytes)
msg = "Valid psbt changed after being parsed & serialized: {}".format(
psbt_str
)
psbt_act.verify(ignore_missing=True)
self.assertEqual(psbt_act.serialize(), psbt_bytes, msg)
def test_sign(self):
"""Parses a PSBT, signs both inputs (1 segwit and 1 legacy), and verifies each signature is correct"""
xkey = HDKey.from_base58(
"tprv8ZgxMBicQKsPd9TeAdPADNnSyH9SSUUbTVeFszDE23Ki6TBB5nCefAdHkK8Fm3qMQR6sHwA56zqRmKmxnHk37JkiFzvncDqoKmPWubu7hDF"
)
psbt_str = "70736274ff01009a020000000258e87a21b56daf0c23be8e7070456c336f7cbaa5c8757924f545887bb2abdd750000000000ffffffff838d0427d0ec650a68aa46bb0b098aea4422c071b2ca78352a077959d07cea1d0100000000ffffffff0270aaf00800000000160014d85c2b71d0060b09c9886aeb815e50991dda124d00e1f5050000000016001400aea9a2e5f0f876a588df5546e8742d1d87008f00000000000100bb0200000001aad73931018bd25f84ae400b68848be09db706eac2ac18298babee71ab656f8b0000000048473044022058f6fc7c6a33e1b31548d481c826c015bd30135aad42cd67790dab66d2ad243b02204a1ced2604c6735b6393e5b41691dd78b00f0c5942fb9f751856faa938157dba01feffffff0280f0fa020000000017a9140fb9463421696b82c833af241c78c17ddbde493487d0f20a270100000017a91429ca74f8a08f81999428185c97b5d852e4063f618765000000010304010000000104475221029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f2102dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d752ae2206029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f10d90c6a4f000000800000008000000080220602dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d710d90c6a4f0000008000000080010000800001012000c2eb0b0000000017a914b7f5faf40e3d40a5a459b1db3535f2b72fa921e8870103040100000001042200208c2353173743b595dfb4a07b72ba8e42e3797da74e87fe7d9d7497e3b2028903010547522103089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc21023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7352ae2206023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e7310d90c6a4f000000800000008003000080220603089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc10d90c6a4f00000080000000800200008000220203a9a4c37f5996d3aa25dbac6b570af0650394492942460b354753ed9eeca5877110d90c6a4f000000800000008004000080002202027f6399757d2eff55a136ad02c684b1838b6556e5f1b6b34282a94b6b5005109610d90c6a4f00000080000000800500008000"
exp_partial_sigs = [
{
"029583bf39ae0a609747ad199addd634fa6108559d6c5cd39b4c2183f1ab96e07f": "3044022074018ad4180097b873323c0015720b3684cc8123891048e7dbcd9b55ad679c99022073d369b740e3eb53dcefa33823c8070514ca55a7dd9544f157c167913261118c01",
"02dab61ff49a14db6a7d02b0cd1fbb78fc4b18312b5b4e54dae4dba2fbfef536d7": "30440220631a989fe738a92ad01986023312c19214fe2802b39e5cbc1ac3678806c692c3022039db6c387bd267716dfdb3d4d8da50b8e85d213326ba7c7daaa4c0ce41eb922301",
},
{
"03089dc10c7ac6db54f91329af617333db388cead0c231f723379d1b99030b02dc": "3044022062eb7a556107a7c73f45ac4ab5a1dddf6f7075fb1275969a7f383efff784bcb202200c05dbb7470dbf2f08557dd356c7325c1ed30913e996cd3840945db12228da5f01",
"023add904f3d6dcf59ddb906b0dee23529b7ffb9ed50e5e86151926860221f0e73": "3044022065f45ba5998b59a27ffe1a7bed016af1f1f90d54b3aa8f7450aa5f56a25103bd02207f724703ad1edb96680b284b56d4ffcb88f7fb759eabbe08aa30f29b851383d201",
},
]
# check with both compressed parsing and uncompressed
for compress in [False, True]:
psbt = PSBT.parse(unhexlify(psbt_str), compress=compress)
if compress:
self.assertTrue(len(psbt.serialize()) < len(unhexlify(psbt_str)))
psbt.sign_with(xkey)
for i in range(len(psbt.inputs)):
inp = psbt.inputs[i]
self.assertEqual(len(inp.partial_sigs), len(exp_partial_sigs[i]))
for act_pub, act_sig in inp.partial_sigs.items():
act_pub_str = hexlify(act_pub.serialize()).decode("utf-8")
self.assertIn(act_pub_str, exp_partial_sigs[i].keys())
self.assertEqual(
hexlify(act_sig).decode("utf-8"), exp_partial_sigs[i][act_pub_str]
)
| 282.628099
| 1,961
| 0.948827
| 605
| 34,198
| 53.547107
| 0.340496
| 0.006174
| 0.008149
| 0.007624
| 0.015249
| 0.009199
| 0.004383
| 0.004383
| 0.00284
| 0.00284
| 0
| 0.621697
| 0.039359
| 34,198
| 120
| 1,962
| 284.983333
| 0.364422
| 0.082227
| 0
| 0.024691
| 0
| 0
| 0.927236
| 0.923919
| 0
| 1
| 0
| 0
| 0.061728
| 1
| 0.037037
| false
| 0
| 0.061728
| 0
| 0.111111
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
07bf90ace95d8d3e699d064203c63bcb9683bfe6
| 43,038
|
py
|
Python
|
pcdet/models/backbones_3d/votr_backbone.py
|
ocNflag/point2seq
|
710686f576b3df5469a06c66860758b25f852dbd
|
[
"Apache-2.0"
] | 21
|
2022-03-24T09:37:38.000Z
|
2022-03-31T13:21:54.000Z
|
pcdet/models/backbones_3d/votr_backbone.py
|
ocNflag/point2seq
|
710686f576b3df5469a06c66860758b25f852dbd
|
[
"Apache-2.0"
] | null | null | null |
pcdet/models/backbones_3d/votr_backbone.py
|
ocNflag/point2seq
|
710686f576b3df5469a06c66860758b25f852dbd
|
[
"Apache-2.0"
] | 1
|
2022-03-24T09:37:48.000Z
|
2022-03-24T09:37:48.000Z
|
import torch
import torch.nn as nn
from ...ops.votr_ops import votr_utils
from pcdet.models.backbones_2d.swin_helpers import GELU
def scatter_nd(indices, updates, shape):
"""pytorch edition of tensorflow scatter_nd.
this function don't contain except handle code. so use this carefully
when indice repeats, don't support repeat add which is supported
in tensorflow.
"""
ret = torch.zeros(*shape, dtype=updates.dtype, device=updates.device)
ndim = indices.shape[-1]
output_shape = list(indices.shape[:-1]) + shape[indices.shape[-1]:]
flatted_indices = indices.view(-1, ndim)
slices = [flatted_indices[:, i] for i in range(ndim)]
slices += [Ellipsis]
ret[slices] = updates.view(*output_shape)
return ret
class SparseTensor(object):
def __init__(self, features, indices, spatial_shape, voxel_size, point_cloud_range, batch_size, hash_size,
map_table=None, gather_dict=None):
self.features = features
self.indices = indices
self.spatial_shape = spatial_shape # [x, y, z]
self.batch_size = batch_size
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
self.hash_size = hash_size
self.gather_dict = gather_dict
self.map_table = self.build_map_table() if not map_table else map_table
@torch.no_grad()
def build_map_table(self):
bs_cnt = torch.zeros(self.batch_size).int()
for i in range(self.batch_size):
bs_cnt[i] = (self.indices[:, 0] == i).sum().item()
bs_cnt = bs_cnt.to(self.indices.device)
map_table = votr_utils.build_hash_table(
self.batch_size,
self.hash_size,
self.spatial_shape,
self.indices,
bs_cnt,
)
return map_table
def dense(self, channels_first=True):
reverse_spatial_shape = self.spatial_shape[::-1] # (ZYX)
output_shape = [self.batch_size] + list(
reverse_spatial_shape) + [self.features.shape[1]]
res = scatter_nd(
self.indices.to(self.features.device).long(), self.features,
output_shape)
if not channels_first:
return res
ndim = len(reverse_spatial_shape)
trans_params = list(range(0, ndim + 1))
trans_params.insert(1, ndim + 1)
return res.permute(*trans_params).contiguous()
class Attention3d(nn.Module):
def __init__(self, input_channels, output_channels, ff_channels, dropout, num_heads, attention_modes):
super(Attention3d, self).__init__()
self.attention_modes = attention_modes
self.mhead_attention = nn.MultiheadAttention(
embed_dim=input_channels,
num_heads=num_heads,
dropout=dropout,
)
self.drop_out = nn.Dropout(dropout)
self.linear1 = nn.Linear(input_channels, ff_channels)
self.linear2 = nn.Linear(ff_channels, input_channels)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = GELU()
self.output_layer = nn.Sequential(
nn.Linear(input_channels, output_channels),
nn.LayerNorm(output_channels),
GELU()
)
@torch.no_grad()
def with_bs_cnt(self, indices, batch_size):
bs_cnt = torch.zeros(batch_size).int()
for i in range(batch_size):
bs_cnt[i] = (indices[:, 0] == i).sum().item()
bs_cnt = bs_cnt.to(indices.device)
return bs_cnt
@torch.no_grad()
def with_coords(self, indices, point_cloud_range, voxel_size):
voxel_size = torch.tensor(voxel_size).unsqueeze(0).to(indices.device)
min_range = torch.tensor(point_cloud_range[0:3]).unsqueeze(0).to(indices.device)
coords = (indices[:, [3, 2, 1]].float() + 0.5) * voxel_size + min_range
return coords
def forward(self, sp_tensor):
raise NotImplementedError
class SparseAttention3d(Attention3d):
def __init__(self, input_channels, output_channels, ff_channels, dropout, num_heads, attention_modes, strides,
num_ds_voxels,
use_relative_coords=False, use_pooled_feature=False, use_no_query_coords=False):
super(SparseAttention3d, self).__init__(input_channels, output_channels, ff_channels, dropout, num_heads,
attention_modes)
self.use_relative_coords = use_relative_coords
self.use_pooled_features = use_pooled_feature
self.use_no_query_coords = use_no_query_coords
self.strides = strides
self.num_ds_voxels = num_ds_voxels
self.norm = nn.LayerNorm(input_channels)
if not self.use_no_query_coords:
self.q_pos_proj = nn.Sequential(
nn.Linear(3, input_channels),
GELU(),
)
self.k_pos_proj = nn.Sequential(
nn.Conv1d(3, input_channels, 1),
GELU(),
)
@torch.no_grad()
def create_gather_dict(self, attention_modes, map_table, voxel_indices, spatial_shape):
_gather_dict = {}
for attention_mode in attention_modes:
if attention_mode.NAME == 'LocalAttention':
attend_size = attention_mode.SIZE
attend_range = attention_mode.RANGE
_gather_indices = votr_utils.sparse_local_attention_hash_indices(spatial_shape, attend_size,
attend_range, self.strides, map_table,
voxel_indices)
elif attention_mode.NAME == 'StridedAttention':
attend_size = attention_mode.SIZE
range_spec = attention_mode.RANGE_SPEC
_gather_indices = votr_utils.sparse_strided_attention_hash_indices(spatial_shape, attend_size,
range_spec, self.strides, map_table,
voxel_indices)
else:
raise NotImplementedError
_gather_mask = (_gather_indices < 0)
# _gather_indices[_gather_indices < 0] = 0
_gather_dict[attention_mode.NAME] = [_gather_indices, _gather_mask]
return _gather_dict
@torch.no_grad()
def downsample(self, sp_tensor):
x_shape = sp_tensor.spatial_shape[0] // self.strides[0]
y_shape = sp_tensor.spatial_shape[1] // self.strides[1]
z_shape = sp_tensor.spatial_shape[2] // self.strides[2]
new_spatial_shape = [x_shape, y_shape, z_shape]
new_indices, new_map_table = votr_utils.hash_table_down_sample(self.strides, self.num_ds_voxels,
sp_tensor.batch_size, sp_tensor.hash_size,
new_spatial_shape, sp_tensor.indices)
return new_spatial_shape, new_indices, new_map_table
def forward(self, sp_tensor):
new_spatial_shape, new_indices, new_map_table = self.downsample(sp_tensor)
vx, vy, vz = sp_tensor.voxel_size
new_voxel_size = [vx * self.strides[0], vy * self.strides[1], vz * self.strides[2]]
gather_dict = self.create_gather_dict(self.attention_modes, sp_tensor.map_table, new_indices,
sp_tensor.spatial_shape)
voxel_features = sp_tensor.features
v_bs_cnt = self.with_bs_cnt(sp_tensor.indices, sp_tensor.batch_size)
k_bs_cnt = self.with_bs_cnt(new_indices, sp_tensor.batch_size)
a_key_indices, a_key_mask = [], []
for attention_idx, attetion_mode in enumerate(self.attention_modes):
key_indices, key_mask = gather_dict[attetion_mode.NAME]
a_key_indices.append(key_indices)
a_key_mask.append(key_mask)
key_indices = torch.cat(a_key_indices, dim=1)
key_mask = torch.cat(a_key_mask, dim=1)
key_features = votr_utils.grouping_operation(voxel_features, v_bs_cnt, key_indices, k_bs_cnt)
voxel_coords = self.with_coords(sp_tensor.indices, sp_tensor.point_cloud_range, sp_tensor.voxel_size)
key_coords = votr_utils.grouping_operation(voxel_coords, v_bs_cnt, key_indices, k_bs_cnt)
query_coords = self.with_coords(new_indices, sp_tensor.point_cloud_range, new_voxel_size)
if self.use_pooled_features:
pooled_query_features = key_features.max(dim=-1)[0]
pooled_query_features = pooled_query_features.unsqueeze(0)
if self.use_no_query_coords:
query_features = pooled_query_features
else:
query_features = self.q_pos_proj(query_coords).unsqueeze(0)
query_features = query_features + pooled_query_features
else:
query_features = self.q_pos_proj(query_coords).unsqueeze(0)
if self.use_relative_coords:
key_coords = key_coords - query_coords.unsqueeze(-1) # (N, 3, size)
key_pos_emb = self.k_pos_proj(key_coords)
key_features = key_features + key_pos_emb
key_features = key_features.permute(2, 0, 1).contiguous() # (size, N1+N2, C)
attend_features, attend_weights = self.mhead_attention(
query=query_features,
key=key_features,
value=key_features,
key_padding_mask=key_mask,
)
attend_features = self.drop_out(attend_features)
new_features = attend_features.squeeze(0)
act_features = self.linear2(self.dropout1(self.activation(self.linear1(new_features))))
new_features = new_features + self.dropout2(act_features)
new_features = self.norm(new_features)
new_features = self.output_layer(new_features)
# update sp_tensor
sp_tensor.features = new_features
sp_tensor.indices = new_indices
sp_tensor.spatial_shape = new_spatial_shape
sp_tensor.voxel_size = new_voxel_size
del sp_tensor.map_table
sp_tensor.gather_dict = None
sp_tensor.map_table = new_map_table
return sp_tensor
class SubMAttention3d(Attention3d):
def __init__(self, input_channels, output_channels, ff_channels, dropout, num_heads, attention_modes,
use_pos_emb=True, use_relative_coords=False, use_no_query_coords=False):
super(SubMAttention3d, self).__init__(input_channels, output_channels, ff_channels, dropout, num_heads,
attention_modes)
self.use_relative_coords = use_relative_coords
self.use_no_query_coords = use_no_query_coords
self.use_pos_emb = use_pos_emb
self.norm1 = nn.LayerNorm(input_channels)
self.norm2 = nn.LayerNorm(input_channels)
if self.use_pos_emb:
if not self.use_no_query_coords:
self.q_pos_proj = nn.Sequential(
nn.Linear(3, input_channels),
GELU(),
)
self.k_pos_proj = nn.Sequential(
nn.Conv1d(3, input_channels, 1),
GELU(),
)
@torch.no_grad()
def create_gather_dict(self, attention_modes, map_table, voxel_indices, spatial_shape):
_gather_dict = {}
for attention_mode in attention_modes:
if attention_mode.NAME == 'LocalAttention':
attend_size = attention_mode.SIZE
attend_range = attention_mode.RANGE
_gather_indices = votr_utils.subm_local_attention_hash_indices(spatial_shape, attend_size, attend_range,
map_table, voxel_indices)
elif attention_mode.NAME == 'StridedAttention':
attend_size = attention_mode.SIZE
range_spec = attention_mode.RANGE_SPEC
_gather_indices = votr_utils.subm_strided_attention_hash_indices(spatial_shape, attend_size, range_spec,
map_table, voxel_indices)
else:
raise NotImplementedError
_gather_mask = (_gather_indices < 0)
# _gather_indices[_gather_indices < 0] = 0
_gather_dict[attention_mode.NAME] = [_gather_indices, _gather_mask]
return _gather_dict
def forward(self, sp_tensor):
if not sp_tensor.gather_dict:
sp_tensor.gather_dict = self.create_gather_dict(self.attention_modes, sp_tensor.map_table,
sp_tensor.indices, sp_tensor.spatial_shape)
voxel_features = sp_tensor.features
v_bs_cnt = self.with_bs_cnt(sp_tensor.indices, sp_tensor.batch_size)
k_bs_cnt = v_bs_cnt.clone()
a_key_indices, a_key_mask = [], []
for attention_idx, attetion_mode in enumerate(self.attention_modes):
key_indices, key_mask = sp_tensor.gather_dict[attetion_mode.NAME]
a_key_indices.append(key_indices)
a_key_mask.append(key_mask)
key_indices = torch.cat(a_key_indices, dim=1)
key_mask = torch.cat(a_key_mask, dim=1)
query_features = voxel_features.unsqueeze(0) # (1, N1+N2, C)
key_features = votr_utils.grouping_operation(voxel_features, v_bs_cnt, key_indices, k_bs_cnt)
if self.use_pos_emb:
voxel_coords = self.with_coords(sp_tensor.indices, sp_tensor.point_cloud_range, sp_tensor.voxel_size)
key_coords = votr_utils.grouping_operation(voxel_coords, v_bs_cnt, key_indices, k_bs_cnt)
if self.use_relative_coords:
key_coords = key_coords - voxel_coords.unsqueeze(-1)
key_pos_emb = self.k_pos_proj(key_coords)
key_features = key_features + key_pos_emb
if self.use_no_query_coords:
pass
else:
query_pos_emb = self.q_pos_proj(voxel_coords).unsqueeze(0)
query_features = query_features + query_pos_emb
key_features = key_features.permute(2, 0, 1).contiguous() # (size, N1+N2, C)
attend_features, attend_weights = self.mhead_attention(
query=query_features,
key=key_features,
value=key_features,
key_padding_mask=key_mask,
)
attend_features = self.drop_out(attend_features)
voxel_features = voxel_features + attend_features.squeeze(0)
voxel_features = self.norm1(voxel_features)
act_features = self.linear2(self.dropout1(self.activation(self.linear1(voxel_features))))
voxel_features = voxel_features + self.dropout2(act_features)
voxel_features = self.norm2(voxel_features)
voxel_features = self.output_layer(voxel_features)
sp_tensor.features = voxel_features
return sp_tensor
class AttentionResBlock(nn.Module):
def __init__(self, model_cfg, use_relative_coords=False, use_pooled_feature=False, use_no_query_coords=False):
super(AttentionResBlock, self).__init__()
sp_cfg = model_cfg.SP_CFGS
self.sp_attention = SparseAttention3d(
input_channels=sp_cfg.CHANNELS[0],
output_channels=sp_cfg.CHANNELS[2],
ff_channels=sp_cfg.CHANNELS[1],
dropout=sp_cfg.DROPOUT,
num_heads=sp_cfg.NUM_HEADS,
attention_modes=sp_cfg.ATTENTION,
strides=sp_cfg.STRIDE,
num_ds_voxels=sp_cfg.NUM_DS_VOXELS,
use_relative_coords=use_relative_coords,
use_pooled_feature=use_pooled_feature,
use_no_query_coords=use_no_query_coords,
)
subm_cfg = model_cfg.SUBM_CFGS
self.subm_attention_modules = nn.ModuleList()
for i in range(subm_cfg.NUM_BLOCKS):
self.subm_attention_modules.append(SubMAttention3d(
input_channels=subm_cfg.CHANNELS[0],
output_channels=subm_cfg.CHANNELS[2],
ff_channels=subm_cfg.CHANNELS[1],
dropout=subm_cfg.DROPOUT,
num_heads=subm_cfg.NUM_HEADS,
attention_modes=subm_cfg.ATTENTION,
use_pos_emb=subm_cfg.USE_POS_EMB,
use_relative_coords=use_relative_coords,
use_no_query_coords=use_no_query_coords,
))
def forward(self, sp_tensor):
sp_tensor = self.sp_attention(sp_tensor)
indentity_features = sp_tensor.features
for subm_module in self.subm_attention_modules:
sp_tensor = subm_module(sp_tensor)
sp_tensor.features += indentity_features
return sp_tensor
class VoxelTransformer(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, voxel_size, point_cloud_range):
super(VoxelTransformer, self).__init__()
self.model_cfg = model_cfg
self.use_relative_coords = self.model_cfg.get('USE_RELATIVE_COORDS', False)
self.use_pooled_feature = self.model_cfg.get('USE_POOLED_FEATURE', False)
self.use_no_query_coords = self.model_cfg.get('USE_NO_QUERY_COORDS', False)
self.grid_size = grid_size
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
self.input_transform = nn.Sequential(
nn.Linear(input_channels, 16),
nn.LayerNorm(16),
GELU()
)
self.backbone = nn.ModuleList()
for param in self.model_cfg.PARAMS:
self.backbone.append(
AttentionResBlock(param, self.use_relative_coords, self.use_pooled_feature, self.use_no_query_coords))
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
def forward(self, batch_dict):
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
voxel_features = self.input_transform(voxel_features)
sp_tensor = SparseTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.grid_size,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range,
batch_size=batch_size,
hash_size=self.model_cfg.HASH_SIZE,
map_table=None,
gather_dict=None,
)
for attention_block in self.backbone:
sp_tensor = attention_block(sp_tensor)
batch_dict.update({
'encoded_spconv_tensor': sp_tensor,
'encoded_spconv_tensor_stride': 8
})
return batch_dict
class SparseAttention3dv2(Attention3d):
def __init__(self, input_channels, output_channels, ff_channels, dropout, num_heads, attention_modes, strides,
num_ds_voxels,
use_relative_coords=False, use_pooled_feature=False, use_no_query_coords=False, global_mode=None):
super(SparseAttention3dv2, self).__init__(input_channels, output_channels, ff_channels, dropout, num_heads,
attention_modes)
self.use_relative_coords = use_relative_coords
self.use_pooled_features = use_pooled_feature
self.use_no_query_coords = use_no_query_coords
self.strides = strides
self.num_ds_voxels = num_ds_voxels
self.norm = nn.LayerNorm(input_channels)
if not self.use_no_query_coords:
self.q_pos_proj = nn.Sequential(
nn.Linear(3, input_channels),
GELU(),
)
self.k_pos_proj = nn.Sequential(
nn.Conv1d(3, input_channels, 1),
GELU(),
)
self.global_mode = global_mode
if self.global_mode:
self.global_feat2coords = nn.Sequential(
nn.Linear(input_channels, input_channels),
nn.LayerNorm(input_channels),
GELU(),
nn.Linear(input_channels, input_channels),
nn.LayerNorm(input_channels),
GELU(),
nn.Linear(input_channels, 3)
)
def global_attention(self, query_features, query_indices, batch_size):
"""
Args:
query_features: (N, C)
query_indices: (N, 4)
Returns:
global_features: (N, K, C) K is global size
global_coords: (N, K, 3)
"""
assert self.global_mode is not None
K = self.global_mode.SIZE
global_features, global_coords = [], []
for batch_idx in range(batch_size):
query_features_single = query_features[query_indices[:, 0] == batch_idx, :]
num_voxels_single = query_features_single.shape[0]
if self.global_mode.TYPE == 'TopK':
global_features_single = torch.topk(query_features_single, K, dim=0)[0] # (K, C)
global_coords_single = self.global_feat2coords(global_features_single) # (K, 3)
elif self.global_mode.TYPE == 'SVD':
with torch.no_grad():
pca_features = query_features_single.transpose(0, 1).contiguous() # (C, N)
_, _, v = torch.pca_lowrank(pca_features, q=K) # (N, K)
global_features_single = torch.matmul(pca_features, v).transpose(0, 1).contiguous() # (K, C)
global_coords_single = self.global_feat2coords(global_features_single)
else:
raise NotImplementedError
global_features_single = global_features_single.transpose(0, 1).contiguous() # (C, K)
global_coords_single = global_coords_single.transpose(0, 1).contiguous() # (3, K)
global_features_single = global_features_single.unsqueeze(0).repeat(num_voxels_single, 1, 1) # (N, C, K)
global_coords_single = global_coords_single.unsqueeze(0).repeat(num_voxels_single, 1, 1) # (N, 3, K)
global_features.append(global_features_single)
global_coords.append(global_coords_single)
global_features = torch.cat(global_features, dim=0)
global_coords = torch.cat(global_coords, dim=0)
return global_features, global_coords
@torch.no_grad()
def create_gather_dict(self, attention_modes, map_table, voxel_indices, spatial_shape):
_gather_dict = {}
for attention_mode in attention_modes:
if attention_mode.NAME == 'LocalAttention':
attend_size = attention_mode.SIZE
attend_range = attention_mode.RANGE
_gather_indices = votr_utils.sparse_local_attention_hash_indices(spatial_shape, attend_size,
attend_range, self.strides, map_table,
voxel_indices)
elif attention_mode.NAME == 'StridedAttention':
attend_size = attention_mode.SIZE
range_spec = attention_mode.RANGE_SPEC
_gather_indices = votr_utils.sparse_strided_attention_hash_indices(spatial_shape, attend_size,
range_spec, self.strides, map_table,
voxel_indices)
else:
raise NotImplementedError
_gather_mask = (_gather_indices < 0)
# _gather_indices[_gather_indices < 0] = 0
_gather_dict[attention_mode.NAME] = [_gather_indices, _gather_mask]
return _gather_dict
@torch.no_grad()
def downsample(self, sp_tensor):
x_shape = sp_tensor.spatial_shape[0] // self.strides[0]
y_shape = sp_tensor.spatial_shape[1] // self.strides[1]
z_shape = sp_tensor.spatial_shape[2] // self.strides[2]
new_spatial_shape = [x_shape, y_shape, z_shape]
new_indices, new_map_table = votr_utils.hash_table_down_sample(self.strides, self.num_ds_voxels,
sp_tensor.batch_size, sp_tensor.hash_size,
new_spatial_shape, sp_tensor.indices)
return new_spatial_shape, new_indices, new_map_table
def forward(self, sp_tensor):
new_spatial_shape, new_indices, new_map_table = self.downsample(sp_tensor)
vx, vy, vz = sp_tensor.voxel_size
new_voxel_size = [vx * self.strides[0], vy * self.strides[1], vz * self.strides[2]]
gather_dict = self.create_gather_dict(self.attention_modes, sp_tensor.map_table, new_indices,
sp_tensor.spatial_shape)
voxel_features = sp_tensor.features
v_bs_cnt = self.with_bs_cnt(sp_tensor.indices, sp_tensor.batch_size)
k_bs_cnt = self.with_bs_cnt(new_indices, sp_tensor.batch_size)
a_key_indices, a_key_mask = [], []
for attention_idx, attetion_mode in enumerate(self.attention_modes):
key_indices, key_mask = gather_dict[attetion_mode.NAME]
a_key_indices.append(key_indices)
a_key_mask.append(key_mask)
key_indices = torch.cat(a_key_indices, dim=1)
key_mask = torch.cat(a_key_mask, dim=1)
key_features = votr_utils.grouping_operation(voxel_features, v_bs_cnt, key_indices, k_bs_cnt)
voxel_coords = self.with_coords(sp_tensor.indices, sp_tensor.point_cloud_range, sp_tensor.voxel_size)
key_coords = votr_utils.grouping_operation(voxel_coords, v_bs_cnt, key_indices, k_bs_cnt)
query_coords = self.with_coords(new_indices, sp_tensor.point_cloud_range, new_voxel_size)
if self.use_pooled_features:
pooled_query_features = key_features.max(dim=-1)[0]
pooled_query_features = pooled_query_features.unsqueeze(0)
if self.use_no_query_coords:
query_features = pooled_query_features
else:
query_features = self.q_pos_proj(query_coords).unsqueeze(0)
query_features = query_features + pooled_query_features
else:
query_features = self.q_pos_proj(query_coords).unsqueeze(0)
if self.global_mode:
global_key_features, global_key_coords = self.global_attention(query_features=query_features.squeeze(0),
query_indices=new_indices,
batch_size=sp_tensor.batch_size)
key_features = torch.cat([key_features, global_key_features], dim=2) # (N, C, size+K)
key_coords = torch.cat([key_coords, global_key_coords], dim=2) # (N, 3, size+K)
global_key_mask = torch.zeros((key_mask.shape[0], self.global_mode.SIZE), dtype=key_mask.dtype).to(
key_mask.device)
key_mask = torch.cat([key_mask, global_key_mask], dim=1)
if self.use_relative_coords:
key_coords = key_coords - query_coords.unsqueeze(-1) # (N, 3, size)
key_pos_emb = self.k_pos_proj(key_coords)
key_features = key_features + key_pos_emb
key_features = key_features.permute(2, 0, 1).contiguous() # (size, N1+N2, C)
attend_features, attend_weights = self.mhead_attention(
query=query_features,
key=key_features,
value=key_features,
key_padding_mask=key_mask,
)
attend_features = self.drop_out(attend_features)
new_features = attend_features.squeeze(0)
act_features = self.linear2(self.dropout1(self.activation(self.linear1(new_features))))
new_features = new_features + self.dropout2(act_features)
new_features = self.norm(new_features)
new_features = self.output_layer(new_features)
# update sp_tensor
sp_tensor.features = new_features
sp_tensor.indices = new_indices
sp_tensor.spatial_shape = new_spatial_shape
sp_tensor.voxel_size = new_voxel_size
del sp_tensor.map_table
sp_tensor.gather_dict = None
sp_tensor.map_table = new_map_table
return sp_tensor
class SubMAttention3dv2(Attention3d):
def __init__(self, input_channels, output_channels, ff_channels, dropout, num_heads, attention_modes,
use_pos_emb=True, use_relative_coords=False, use_no_query_coords=False, global_mode=None):
super(SubMAttention3dv2, self).__init__(input_channels, output_channels, ff_channels, dropout, num_heads,
attention_modes)
self.use_relative_coords = use_relative_coords
self.use_no_query_coords = use_no_query_coords
self.use_pos_emb = use_pos_emb
self.norm1 = nn.LayerNorm(input_channels)
self.norm2 = nn.LayerNorm(input_channels)
if self.use_pos_emb:
if not self.use_no_query_coords:
self.q_pos_proj = nn.Sequential(
nn.Linear(3, input_channels),
GELU(),
)
self.k_pos_proj = nn.Sequential(
nn.Conv1d(3, input_channels, 1),
GELU(),
)
self.global_mode = global_mode
if self.global_mode:
self.global_feat2coords = nn.Sequential(
nn.Linear(input_channels, input_channels),
nn.LayerNorm(input_channels),
GELU(),
nn.Linear(input_channels, input_channels),
nn.LayerNorm(input_channels),
GELU(),
nn.Linear(input_channels, 3)
)
def global_attention(self, query_features, query_indices, batch_size):
"""
Args:
query_features: (N, C)
query_indices: (N, 4)
Returns:
global_features: (N, K, C) K is global size
global_coords: (N, K, 3)
"""
assert self.global_mode is not None
K = self.global_mode.SIZE
global_features, global_coords = [], []
for batch_idx in range(batch_size):
query_features_single = query_features[query_indices[:, 0] == batch_idx, :]
num_voxels_single = query_features_single.shape[0]
if self.global_mode.TYPE == 'TopK':
global_features_single = torch.topk(query_features_single, K, dim=0)[0] # (K, C)
global_coords_single = self.global_feat2coords(global_features_single) # (K, 3)
elif self.global_mode.TYPE == 'SVD':
with torch.no_grad():
pca_features = query_features_single.transpose(0, 1).contiguous() # (C, N)
_, _, v = torch.pca_lowrank(pca_features, q=K) # (N, K)
global_features_single = torch.matmul(pca_features, v).transpose(0, 1).contiguous() # (K, C)
global_coords_single = self.global_feat2coords(global_features_single)
else:
raise NotImplementedError
global_features_single = global_features_single.transpose(0, 1).contiguous() # (C, K)
global_coords_single = global_coords_single.transpose(0, 1).contiguous() # (3, K)
global_features_single = global_features_single.unsqueeze(0).repeat(num_voxels_single, 1, 1) # (N, C, K)
global_coords_single = global_coords_single.unsqueeze(0).repeat(num_voxels_single, 1, 1) # (N, 3, K)
global_features.append(global_features_single)
global_coords.append(global_coords_single)
global_features = torch.cat(global_features, dim=0)
global_coords = torch.cat(global_coords, dim=0)
return global_features, global_coords
@torch.no_grad()
def create_gather_dict(self, attention_modes, map_table, voxel_indices, spatial_shape):
_gather_dict = {}
for attention_mode in attention_modes:
if attention_mode.NAME == 'LocalAttention':
attend_size = attention_mode.SIZE
attend_range = attention_mode.RANGE
_gather_indices = votr_utils.subm_local_attention_hash_indices(spatial_shape, attend_size, attend_range,
map_table, voxel_indices)
elif attention_mode.NAME == 'StridedAttention':
attend_size = attention_mode.SIZE
range_spec = attention_mode.RANGE_SPEC
_gather_indices = votr_utils.subm_strided_attention_hash_indices(spatial_shape, attend_size, range_spec,
map_table, voxel_indices)
else:
raise NotImplementedError
_gather_mask = (_gather_indices < 0)
# _gather_indices[_gather_indices < 0] = 0
_gather_dict[attention_mode.NAME] = [_gather_indices, _gather_mask]
return _gather_dict
def forward(self, sp_tensor):
if not sp_tensor.gather_dict:
sp_tensor.gather_dict = self.create_gather_dict(self.attention_modes, sp_tensor.map_table,
sp_tensor.indices, sp_tensor.spatial_shape)
voxel_features = sp_tensor.features
v_bs_cnt = self.with_bs_cnt(sp_tensor.indices, sp_tensor.batch_size)
k_bs_cnt = v_bs_cnt.clone()
a_key_indices, a_key_mask = [], []
for attention_idx, attetion_mode in enumerate(self.attention_modes):
key_indices, key_mask = sp_tensor.gather_dict[attetion_mode.NAME]
a_key_indices.append(key_indices)
a_key_mask.append(key_mask)
key_indices = torch.cat(a_key_indices, dim=1)
key_mask = torch.cat(a_key_mask, dim=1)
query_features = voxel_features.unsqueeze(0) # (1, N1+N2, C)
key_features = votr_utils.grouping_operation(voxel_features, v_bs_cnt, key_indices, k_bs_cnt)
if self.global_mode:
global_key_features, global_key_coords = self.global_attention(query_features=query_features.squeeze(0),
query_indices=sp_tensor.indices,
batch_size=sp_tensor.batch_size)
key_features = torch.cat([key_features, global_key_features], dim=2) # (N, C, size+K)
global_key_mask = torch.zeros((key_mask.shape[0], self.global_mode.SIZE), dtype=key_mask.dtype).to(
key_mask.device)
key_mask = torch.cat([key_mask, global_key_mask], dim=1)
if self.use_pos_emb:
voxel_coords = self.with_coords(sp_tensor.indices, sp_tensor.point_cloud_range, sp_tensor.voxel_size)
key_coords = votr_utils.grouping_operation(voxel_coords, v_bs_cnt, key_indices, k_bs_cnt)
# added
if self.global_mode:
key_coords = torch.cat([key_coords, global_key_coords], dim=2) # (N, 3, size+K)
if self.use_relative_coords:
key_coords = key_coords - voxel_coords.unsqueeze(-1)
key_pos_emb = self.k_pos_proj(key_coords)
key_features = key_features + key_pos_emb
if self.use_no_query_coords:
pass
else:
query_pos_emb = self.q_pos_proj(voxel_coords).unsqueeze(0)
query_features = query_features + query_pos_emb
key_features = key_features.permute(2, 0, 1).contiguous() # (size, N1+N2, C)
attend_features, attend_weights = self.mhead_attention(
query=query_features,
key=key_features,
value=key_features,
key_padding_mask=key_mask,
)
attend_features = self.drop_out(attend_features)
voxel_features = voxel_features + attend_features.squeeze(0)
voxel_features = self.norm1(voxel_features)
act_features = self.linear2(self.dropout1(self.activation(self.linear1(voxel_features))))
voxel_features = voxel_features + self.dropout2(act_features)
voxel_features = self.norm2(voxel_features)
voxel_features = self.output_layer(voxel_features)
sp_tensor.features = voxel_features
return sp_tensor
class AttentionResBlockv2(nn.Module):
def __init__(self, model_cfg, use_relative_coords=False, use_pooled_feature=False, use_no_query_coords=False):
super(AttentionResBlockv2, self).__init__()
sp_cfg = model_cfg.SP_CFGS
self.sp_attention = SparseAttention3dv2(
input_channels=sp_cfg.CHANNELS[0],
output_channels=sp_cfg.CHANNELS[2],
ff_channels=sp_cfg.CHANNELS[1],
dropout=sp_cfg.DROPOUT,
num_heads=sp_cfg.NUM_HEADS,
attention_modes=sp_cfg.ATTENTION,
strides=sp_cfg.STRIDE,
num_ds_voxels=sp_cfg.NUM_DS_VOXELS,
use_relative_coords=use_relative_coords,
use_pooled_feature=use_pooled_feature,
use_no_query_coords=use_no_query_coords,
global_mode=sp_cfg.get('GLOBAL_MODE', None),
)
subm_cfg = model_cfg.SUBM_CFGS
self.subm_attention_modules = nn.ModuleList()
for i in range(subm_cfg.NUM_BLOCKS):
self.subm_attention_modules.append(SubMAttention3dv2(
input_channels=subm_cfg.CHANNELS[0],
output_channels=subm_cfg.CHANNELS[2],
ff_channels=subm_cfg.CHANNELS[1],
dropout=subm_cfg.DROPOUT,
num_heads=subm_cfg.NUM_HEADS,
attention_modes=subm_cfg.ATTENTION,
use_pos_emb=subm_cfg.USE_POS_EMB,
use_relative_coords=use_relative_coords,
use_no_query_coords=use_no_query_coords,
global_mode=subm_cfg.get('GLOBAL_MODE', None),
))
def forward(self, sp_tensor):
sp_tensor = self.sp_attention(sp_tensor)
indentity_features = sp_tensor.features
for subm_module in self.subm_attention_modules:
sp_tensor = subm_module(sp_tensor)
sp_tensor.features += indentity_features
return sp_tensor
class VoxelTransformerV2(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, voxel_size, point_cloud_range):
super(VoxelTransformerV2, self).__init__()
self.model_cfg = model_cfg
self.use_relative_coords = self.model_cfg.get('USE_RELATIVE_COORDS', False)
self.use_pooled_feature = self.model_cfg.get('USE_POOLED_FEATURE', False)
self.use_no_query_coords = self.model_cfg.get('USE_NO_QUERY_COORDS', False)
self.grid_size = grid_size
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
self.input_transform = nn.Sequential(
nn.Linear(input_channels, 16),
nn.LayerNorm(16),
nn.ReLU()
)
self.backbone = nn.ModuleList()
for param in self.model_cfg.PARAMS:
self.backbone.append(
AttentionResBlockv2(param, self.use_relative_coords, self.use_pooled_feature, self.use_no_query_coords))
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
def forward(self, batch_dict):
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
voxel_features = self.input_transform(voxel_features)
sp_tensor = SparseTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.grid_size,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range,
batch_size=batch_size,
hash_size=self.model_cfg.HASH_SIZE,
map_table=None,
gather_dict=None,
)
for attention_block in self.backbone:
sp_tensor = attention_block(sp_tensor)
batch_dict.update({
'encoded_spconv_tensor': sp_tensor,
'encoded_spconv_tensor_stride': 8
})
return batch_dict
class SparseConvTensor(object):
def __init__(self, features, indices):
self.features = features
self.indices = indices
class VoxelTransformerV3(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, voxel_size, point_cloud_range):
super(VoxelTransformerV3, self).__init__()
self.model_cfg = model_cfg
self.use_relative_coords = self.model_cfg.get('USE_RELATIVE_COORDS', False)
self.use_pooled_feature = self.model_cfg.get('USE_POOLED_FEATURE', False)
self.use_no_query_coords = self.model_cfg.get('USE_NO_QUERY_COORDS', False)
self.grid_size = grid_size
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
self.input_transform = nn.Sequential(
nn.Linear(input_channels, 16),
nn.LayerNorm(16),
nn.ReLU()
)
self.backbone = nn.ModuleList()
for param in self.model_cfg.PARAMS:
self.backbone.append(
AttentionResBlock(param, self.use_relative_coords, self.use_pooled_feature, self.use_no_query_coords))
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
def forward(self, batch_dict):
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
voxel_features = self.input_transform(voxel_features)
x_convs = [SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
)]
sp_tensor = SparseTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.grid_size,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range,
batch_size=batch_size,
hash_size=self.model_cfg.HASH_SIZE,
map_table=None,
gather_dict=None,
)
for attention_block in self.backbone:
sp_tensor = attention_block(sp_tensor)
x_convs.append(SparseConvTensor(
features=sp_tensor.features,
indices=sp_tensor.indices,
))
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': x_convs[0],
'x_conv2': x_convs[1],
'x_conv3': x_convs[2],
'x_conv4': x_convs[3],
}
})
batch_dict.update({
'encoded_spconv_tensor': sp_tensor,
'encoded_spconv_tensor_stride': 8
})
return batch_dict
| 44.784599
| 120
| 0.626818
| 5,189
| 43,038
| 4.796107
| 0.050299
| 0.039217
| 0.015671
| 0.025073
| 0.90055
| 0.891068
| 0.882991
| 0.879254
| 0.877888
| 0.877888
| 0
| 0.009544
| 0.29158
| 43,038
| 960
| 121
| 44.83125
| 0.806717
| 0.023746
| 0
| 0.818066
| 0
| 0
| 0.015061
| 0.004064
| 0
| 0
| 0
| 0
| 0.002545
| 1
| 0.044529
| false
| 0.002545
| 0.005089
| 0
| 0.094148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
07d5e31c924c9235f1cd7ee2e5b7e352c13fb893
| 12,675
|
py
|
Python
|
src/PCA_scatter_plot.py
|
caticoa3/malaria_hero
|
cf6d437b13ed7611b3fee377a4d5aed6e190ddb1
|
[
"BSD-3-Clause"
] | 53
|
2018-11-05T20:58:43.000Z
|
2020-12-03T14:56:06.000Z
|
src/PCA_scatter_plot.py
|
caticoa3/malaria_hero
|
cf6d437b13ed7611b3fee377a4d5aed6e190ddb1
|
[
"BSD-3-Clause"
] | 1
|
2020-05-26T22:14:02.000Z
|
2020-05-26T22:14:02.000Z
|
src/PCA_scatter_plot.py
|
caticoa3/malaria_hero
|
cf6d437b13ed7611b3fee377a4d5aed6e190ddb1
|
[
"BSD-3-Clause"
] | 14
|
2018-11-06T04:55:34.000Z
|
2020-12-29T11:14:28.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 22:39:05 2018
@author: Carlos A Ariza, PhD
"""
#import pickle as pkl
import pandas as pd
from itertools import combinations
#import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import seaborn
#feat_df = pd.read_csv('../data/factors_n_bn_feat.csv',index_col= 0)
##
#feat_df = feat_df.iloc[:,0:20]
#mask = feat_df.loc[:,'label'].isin(['TUJ1','RIP'])
#feat_df = feat_df.loc[mask,:]
#
##
###y = feat_df.loc[:,'label']
##print('Number of samples for each label \n', feat_df.groupby('label')['label'].count())
#X = feat_df.loc[:,'x0':].values
#labels_DF=feat_df.iloc[:,0:7]
def plot_pca(X, labels_DF, enable_plotting=True):
# X = pd.read_pickle(pickled_PCA_file)
# labels_DF = pd.read_csv(labels)
n_comp = X.shape[1]
label = labels_DF.loc[:,'label']
label_set = set(label)
label_list = list(label_set)
print(label_set)
# -- plotting
if (enable_plotting) and (n_comp < 21):
print('plotting scatter plots...')
comp_list = list(combinations(range(X.shape[1]), 2))
if len(comp_list) < 20:
fig, subplots = plt.subplots(comp_list[-1][0],round(
(len(comp_list)+0.5)/comp_list[-1][0]),
squeeze=True,figsize=(15,8.5))
else:
fig, subplots = plt.subplots(comp_list[-1][0]-2,round(
len(comp_list)/(comp_list[-1][0]-2)),
figsize=(15,8.5))
subplots = subplots.ravel()
#Make labels to indicate predicted value
#-1 True positive
#-2 False positive
#1 True negative
#2 False negative
legend_dict = {label_list[0]:label_list[0],-2:'FP',label_list[1]:label_list[1],2:'FN'}
if 'y_true' in labels_DF.columns:
mask_TN = ((y_true_1s == data.loc[:,'label_svm']) & (y_true_1s == 1))
labels_DF.loc[mask_TN, 'TPTNFPFN'] = 1
mask_TP = ((y_true_1s == data['label_svm']) & (y_true_1s == -1))
labels_DF.loc[mask_TP,'TPTNFPFN'] = -1
mask_FN = ((y_true_1s != data['label_svm']) & (y_true_1s == 1))
labels_DF.loc[mask_FN,'TPTNFPFN'] = -2
mask_FP = ((y_true_1s != data['label_svm']) & (y_true_1s == -1))
labels_DF.loc[mask_FP,'TPTNFPFN'] = 2
colors = [y_true_1s, labels_DF.loc[:,'TPTNFPFN']]
order = [1,-1,-2,2] #bottom to top order for overlaping points on plot.
# print(labels_DF.groupby('TPTNFPFN').size())
else:
colors = [label]
order = [1,-1] #bottom to top order for overlaping points on plot.
def pairwise_scatter_plots(j, comp_list, zorders, grouped,legend_dict):
for i, n in enumerate(comp_list):
for key, group in grouped:
group.plot(ax=subplots[i], kind='scatter', x=n[0], y=n[1],
label=key, color=colors_dict[key],s=0.7,
zorder=zorders[key],legend=False, alpha=0.6)
subplots[i].tick_params(labelsize = 5, direction = 'in')
# labelbottom='off', axis='both', which='both',
# bottom='off', top='off')
# subplots[i].xaxis.set_ticklabels([])
# subplots[i].yaxis.set_ticklabels([])
subplots[i].xaxis.label.set_visible(False)
subplots[i].yaxis.label.set_visible(False)
subplots[i].text(0.5,0.9,'{0} vs {1}'.format(n[0],n[1]),
horizontalalignment='center',transform=subplots[i].transAxes,
size=8)
# -- adding figure legend
lp = lambda i: plt.plot([],color=colors_dict[i],ms=np.sqrt(25), mec="none",
label="{}".format(legend_dict[i]), ls="", marker="o")[0]
handles = [lp(i) for i in np.unique(color)]
# plt.figlegend(handles, bbox_to_anchor=(1.05, 0), loc='lower left',
# borderaxespad=0.)
plt.figlegend(loc='upper left', borderaxespad=0., ncol=4)
plt.suptitle('{} Principle Components'.format(n_comp))
fig.tight_layout()
plt.subplots_adjust(top=0.915, bottom=0.045, left=0.02, right=0.988,
hspace=0.2, wspace=0.1)
fig.savefig('../plots/{0}_PC-scatter_plots_{1}.png'.format(
n_comp,j), dpi=600)
PCAprojectedDF = pd.DataFrame(X)
colors_dict = {-2:'blue', label_list[0]:'red', label_list[1]:'green', 2:'orange'}
zorders = {-2:3, label_list[0]:2, label_list[1]:1, 2:4}
for j, color in enumerate(colors): #Plot with
PCAprojectedDF.loc[:,'label'] = color
grouped = PCAprojectedDF.groupby('label')
pairwise_scatter_plots(j=j, comp_list=comp_list, zorders=zorders,
grouped=grouped,legend_dict=legend_dict)
#Make one more plot with the True negatives plotted on top
# PCAprojectedDF.loc[:,'label'] = labels_DF.loc[:,'TPTNFPFN']
grouped = PCAprojectedDF.groupby('label')
zorders = {-2:2, label_list[0]:4, label_list[1]:1, 2:3}
pairwise_scatter_plots(j=2, comp_list=comp_list, zorders=zorders,
grouped=grouped,legend_dict=legend_dict)
print('...scatter plots saved in ../plots/ folder')
# --seaborn scatter matrix
## PCAprojectedDF = feat_df
# feature_names = feat_df.columns[7:]
# pp = seaborn.pairplot(feat_df, vars=feature_names,
# hue='group_idx', #hue_order = order, #size=5, #aspect=7,
# markers='.',
# plot_kws=dict(s=15, linewidth=0),
# grid_kws=dict(despine = False))
# pp.fig.set_size_inches(14.4,14.4) #(14.4,14.4)
## plt.rcParams['figure.figsize']=(10,10)
## for ax in pp.diag_axes: ax.set_visible(False)
## seaborn.axes_style()
# plt.tight_layout()
# plt.subplots_adjust(top=0.94,wspace=0.04, hspace=0.04, bottom=0.06,
# left= 0.04)
# plt.suptitle('{} Principle Components'.format(n_comp))
# pp.savefig('../plots/{0}_PC-seaborn_pairplots.png'.format(
# n_comp), dpi=500)
#plot_pca(X, enable_plotting=True,labels_DF=labels_DF)
def seaborn_pairwise_plot(feat_df, color_index=None,feature_names=None,
n_comp=None):
pp = seaborn.pairplot(feat_df, vars=feature_names,
hue=color_index, #hue_order = order, #size=5, #aspect=7,
markers='.',
plot_kws=dict(s=15, linewidth=0),
grid_kws=dict(despine = False))
pp.fig.set_size_inches(14.4,14.4) #(14.4,14.4)
# plt.rcParams['figure.figsize']=(10,10)
# for ax in pp.diag_axes: ax.set_visible(False)
# seaborn.axes_style()
plt.tight_layout()
plt.subplots_adjust(top=0.94,wspace=0.04, hspace=0.04, bottom=0.06,
left= 0.04)
plt.suptitle('{} Principle Components'.format(n_comp))
pp.savefig('../plots/{0}_PC-seaborn_pairplots.png'.format(n_comp),
dpi=500)
def caa_plot_pairs(X, labels_DF, **kwargs):
'''Pairwise plots of features. This is a custom ploting function that
differes from standard pairwise scatter matrices plots in libraries like
the seaborn or pandas's scatter_matrix(): only scatter plots - no
diagonal reduntant lower portion of the square.'''
n_comp = X.shape[1]
label = labels_DF.loc[:,'label']
label_set = set(label)
label_list = list(label_set)
print(label_set)
# -- plotting
if (n_comp < 21):
print('plotting scatter plots...')
comp_list = list(combinations(range(X.shape[1]), 2))
if len(comp_list) < 20:
fig, subplots = plt.subplots(comp_list[-1][0],round(
(len(comp_list)+0.5)/comp_list[-1][0]),
squeeze=True,figsize=(15,8.5))
else:
fig, subplots = plt.subplots(comp_list[-1][0]-2,round(
len(comp_list)/(comp_list[-1][0]-2)),
figsize=(15,8.5))
subplots = subplots.ravel()
#Make labels to indicate predicted value
#-1 True positive
#-2 False positive
#1 True negative
#2 False negative
legend_dict = {label_list[0]:label_list[0],-2:'FP',label_list[1]:label_list[1],2:'FN'}
if 'y_true' in labels_DF.columns:
mask_TN = ((y_true_1s == data.loc[:,'label_svm']) & (y_true_1s == 1))
labels_DF.loc[mask_TN, 'TPTNFPFN'] = 1
mask_TP = ((y_true_1s == data['label_svm']) & (y_true_1s == -1))
labels_DF.loc[mask_TP,'TPTNFPFN'] = -1
mask_FN = ((y_true_1s != data['label_svm']) & (y_true_1s == 1))
labels_DF.loc[mask_FN,'TPTNFPFN'] = -2
mask_FP = ((y_true_1s != data['label_svm']) & (y_true_1s == -1))
labels_DF.loc[mask_FP,'TPTNFPFN'] = 2
colors = [y_true_1s, labels_DF.loc[:,'TPTNFPFN']]
order = [1,-1,-2,2] #bottom to top order for overlaping points on plot.
# print(labels_DF.groupby('TPTNFPFN').size())
else:
colors = [label]
order = [1,-1] #bottom to top order for overlaping points on plot.
def pairwise_scatter_plots(j, comp_list, zorders, grouped,legend_dict):
for i, n in enumerate(comp_list):
for key, group in grouped:
group.plot(ax=subplots[i], kind='scatter', x=n[0], y=n[1],
label=key, color=colors_dict[key],s=0.7,
zorder=zorders[key],legend=False, alpha=0.6)
subplots[i].tick_params(labelsize = 5, direction = 'in')
# labelbottom='off', axis='both', which='both',
# bottom='off', top='off')
# subplots[i].xaxis.set_ticklabels([])
# subplots[i].yaxis.set_ticklabels([])
subplots[i].xaxis.label.set_visible(False)
subplots[i].yaxis.label.set_visible(False)
subplots[i].text(0.5,0.9,'{0} vs {1}'.format(n[0],n[1]),
horizontalalignment='center',transform=subplots[i].transAxes,
size=8)
# -- adding figure legend
lp = lambda i: plt.plot([],color=colors_dict[i],ms=np.sqrt(25), mec="none",
label="{}".format(legend_dict[i]), ls="", marker="o")[0]
handles = [lp(i) for i in np.unique(color)]
# plt.figlegend(handles, bbox_to_anchor=(1.05, 0), loc='lower left',
# borderaxespad=0.)
plt.figlegend(loc='upper left', borderaxespad=0., ncol=4)
plt.suptitle('{} Principle Components'.format(n_comp))
fig.tight_layout()
plt.subplots_adjust(top=0.915, bottom=0.045, left=0.02, right=0.988,
hspace=0.2, wspace=0.1)
fig.savefig('../plots/{0}_PC-scatter_plots_{1}.png'.format(
n_comp,j), dpi=600)
PCAprojectedDF = pd.DataFrame(X)
colors_dict = {-2:'blue', label_list[0]:'red', label_list[1]:'green', 2:'orange'}
zorders = {-2:3, label_list[0]:2, label_list[1]:1, 2:4}
for j, color in enumerate(colors): #Plot with
PCAprojectedDF.loc[:,'label'] = color
grouped = PCAprojectedDF.groupby('label')
pairwise_scatter_plots(j=j, comp_list=comp_list, zorders=zorders,
grouped=grouped,legend_dict=legend_dict)
#Make one more plot with the True negatives plotted on top
# PCAprojectedDF.loc[:,'label'] = labels_DF.loc[:,'TPTNFPFN']
grouped = PCAprojectedDF.groupby('label')
zorders = {-2:2, label_list[0]:4, label_list[1]:1, 2:3}
pairwise_scatter_plots(j=2, comp_list=comp_list, zorders=zorders,
grouped=grouped,legend_dict=legend_dict)
print('...scatter plots saved in ../plots/ folder')
| 48.377863
| 94
| 0.539803
| 1,638
| 12,675
| 4.008547
| 0.161172
| 0.034115
| 0.01919
| 0.012184
| 0.856229
| 0.852269
| 0.852269
| 0.852269
| 0.852269
| 0.839476
| 0
| 0.041935
| 0.313294
| 12,675
| 261
| 95
| 48.563218
| 0.712431
| 0.272584
| 0
| 0.858065
| 0
| 0
| 0.071083
| 0.012195
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.032258
| 0
| 0.064516
| 0.03871
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
07e5d01143b9cd5a6b21c49228a085bb03d51e68
| 1,010
|
py
|
Python
|
problem_008/number.py
|
Faerbit/ProjectEuler
|
c66c88c3da33e3744924fd3d7ae62e596d70c288
|
[
"MIT"
] | null | null | null |
problem_008/number.py
|
Faerbit/ProjectEuler
|
c66c88c3da33e3744924fd3d7ae62e596d70c288
|
[
"MIT"
] | null | null | null |
problem_008/number.py
|
Faerbit/ProjectEuler
|
c66c88c3da33e3744924fd3d7ae62e596d70c288
|
[
"MIT"
] | null | null | null |
number = 7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450
| 505
| 1,009
| 0.99604
| 2
| 1,010
| 503
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.993049
| 0.00297
| 1,010
| 1
| 1,010
| 1,010
| 0.005958
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
07ed1e8b80f4db09f5d1750f1078235be20ce1b4
| 29,916
|
py
|
Python
|
idaes/generic_models/properties/core/reactions/tests/test_equilibrium_forms.py
|
carldlaird/idaes-pse
|
cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f
|
[
"RSA-MD"
] | 112
|
2019-02-11T23:16:36.000Z
|
2022-03-23T20:59:57.000Z
|
idaes/generic_models/properties/core/reactions/tests/test_equilibrium_forms.py
|
carldlaird/idaes-pse
|
cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f
|
[
"RSA-MD"
] | 621
|
2019-03-01T14:44:12.000Z
|
2022-03-31T19:49:25.000Z
|
idaes/generic_models/properties/core/reactions/tests/test_equilibrium_forms.py
|
carldlaird/idaes-pse
|
cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f
|
[
"RSA-MD"
] | 154
|
2019-02-01T23:46:33.000Z
|
2022-03-23T15:07:10.000Z
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Tests for rate forms
"""
import pytest
from pyomo.environ import \
Block, ConcreteModel, Param, Var, units as pyunits, value
from idaes.generic_models.properties.core.generic.generic_reaction import \
GenericReactionParameterBlock, ConcentrationForm
from idaes.generic_models.properties.core.reactions.equilibrium_forms import *
from idaes.core import SolidPhase
from idaes.core.util.testing import PhysicalParameterTestBlock
from idaes.core.util.misc import add_object_reference
from idaes.core.util.math import safe_log, smooth_max
from idaes.core.util.exceptions import ConfigurationError
@pytest.mark.unit
def test_power_law_equil_no_order():
m = ConcreteModel()
# # Add a test thermo package for validation
m.pparams = PhysicalParameterTestBlock()
# Add a solid phase for testing
m.pparams.sol = SolidPhase()
m.thermo = m.pparams.build_state_block([1])
# Create a dummy reaction parameter block
m.rparams = GenericReactionParameterBlock(default={
"property_package": m.pparams,
"base_units": {"time": pyunits.s,
"mass": pyunits.kg,
"amount": pyunits.mol,
"length": pyunits.m,
"temperature": pyunits.K},
"equilibrium_reactions": {
"r1": {"stoichiometry": {("p1", "c1"): -1,
("p1", "c2"): 2,
("sol", "c1"): -3,
("sol", "c2"): 4},
"equilibrium_form": power_law_equil,
"concentration_form": ConcentrationForm.moleFraction}}})
# Create a dummy state block
m.rxn = Block([1])
add_object_reference(
m.rxn[1], "phase_component_set", m.pparams._phase_component_set)
add_object_reference(m.rxn[1], "params", m.rparams)
add_object_reference(m.rxn[1], "state_ref", m.thermo[1])
m.rxn[1].k_eq = Var(["r1"], initialize=1)
power_law_equil.build_parameters(
m.rparams.reaction_r1,
m.rparams.config.equilibrium_reactions["r1"])
# Check parameter construction
assert isinstance(m.rparams.reaction_r1.reaction_order, Var)
assert len(m.rparams.reaction_r1.reaction_order) == 6
assert m.rparams.reaction_r1.reaction_order["p1", "c1"].value == -1
assert m.rparams.reaction_r1.reaction_order["p1", "c2"].value == 2
assert m.rparams.reaction_r1.reaction_order["p2", "c1"].value == 0
assert m.rparams.reaction_r1.reaction_order["p2", "c2"].value == 0
# Solids should have zero order, as they are excluded
assert m.rparams.reaction_r1.reaction_order["sol", "c1"].value == 0
assert m.rparams.reaction_r1.reaction_order["sol", "c2"].value == 0
# Check reaction form
rform = power_law_equil.return_expression(
m.rxn[1], m.rparams.reaction_r1, "r1", 300)
assert str(rform) == str(
m.rxn[1].k_eq["r1"] == (
m.thermo[1].mole_frac_phase_comp["p1", "c1"] **
m.rparams.reaction_r1.reaction_order["p1", "c1"] *
m.thermo[1].mole_frac_phase_comp["p1", "c2"] **
m.rparams.reaction_r1.reaction_order["p1", "c2"]))
@pytest.mark.unit
def test_power_law_equil_with_order():
m = ConcreteModel()
# # Add a test thermo package for validation
m.pparams = PhysicalParameterTestBlock()
# Add a solid phase for testing
m.pparams.sol = SolidPhase()
m.thermo = m.pparams.build_state_block([1])
# Create a dummy reaction parameter block
m.rparams = GenericReactionParameterBlock(default={
"property_package": m.pparams,
"base_units": {"time": pyunits.s,
"mass": pyunits.kg,
"amount": pyunits.mol,
"length": pyunits.m,
"temperature": pyunits.K},
"equilibrium_reactions": {
"r1": {"stoichiometry": {("p1", "c1"): -1,
("p1", "c2"): 2,
("sol", "c1"): -3,
("sol", "c2"): 4},
"equilibrium_form": power_law_equil,
"concentration_form": ConcentrationForm.moleFraction,
"parameter_data": {
"reaction_order": {("p1", "c1"): 1,
("p1", "c2"): 2,
("p2", "c1"): 3,
("p2", "c2"): 4,
("sol", "c1"): 5,
("sol", "c2"): 6}}}}})
# Create a dummy state block
m.rxn = Block([1])
add_object_reference(
m.rxn[1], "phase_component_set", m.pparams._phase_component_set)
add_object_reference(m.rxn[1], "params", m.rparams)
add_object_reference(m.rxn[1], "state_ref", m.thermo[1])
m.rxn[1].k_eq = Var(["r1"], initialize=1)
power_law_equil.build_parameters(
m.rparams.reaction_r1,
m.rparams.config.equilibrium_reactions["r1"])
# Check parameter construction
assert isinstance(m.rparams.reaction_r1.reaction_order, Var)
assert len(m.rparams.reaction_r1.reaction_order) == 6
assert m.rparams.reaction_r1.reaction_order["p1", "c1"].value == 1
assert m.rparams.reaction_r1.reaction_order["p1", "c2"].value == 2
assert m.rparams.reaction_r1.reaction_order["p2", "c1"].value == 3
assert m.rparams.reaction_r1.reaction_order["p2", "c2"].value == 4
assert m.rparams.reaction_r1.reaction_order["sol", "c1"].value == 5
assert m.rparams.reaction_r1.reaction_order["sol", "c2"].value == 6
# Check reaction form
rform = power_law_equil.return_expression(
m.rxn[1], m.rparams.reaction_r1, "r1", 300)
assert str(rform) == str(
m.rxn[1].k_eq["r1"] == (
m.thermo[1].mole_frac_phase_comp["p1", "c1"] **
m.rparams.reaction_r1.reaction_order["p1", "c1"] *
m.thermo[1].mole_frac_phase_comp["p1", "c2"] **
m.rparams.reaction_r1.reaction_order["p1", "c2"] *
m.thermo[1].mole_frac_phase_comp["p2", "c1"] **
m.rparams.reaction_r1.reaction_order["p2", "c1"] *
m.thermo[1].mole_frac_phase_comp["p2", "c2"] **
m.rparams.reaction_r1.reaction_order["p2", "c2"] *
m.thermo[1].mole_frac_phase_comp["sol", "c1"] **
m.rparams.reaction_r1.reaction_order["sol", "c1"] *
m.thermo[1].mole_frac_phase_comp["sol", "c2"] **
m.rparams.reaction_r1.reaction_order["sol", "c2"]))
@pytest.mark.unit
def test_log_power_law_equil_no_order():
m = ConcreteModel()
# # Add a test thermo package for validation
m.pparams = PhysicalParameterTestBlock()
# Add a solid phase for testing
m.pparams.sol = SolidPhase()
m.thermo = m.pparams.build_state_block([1])
m.thermo[1].log_mole_frac_phase_comp = Var(m.pparams._phase_component_set)
# Create a dummy reaction parameter block
m.rparams = GenericReactionParameterBlock(default={
"property_package": m.pparams,
"base_units": {"time": pyunits.s,
"mass": pyunits.kg,
"amount": pyunits.mol,
"length": pyunits.m,
"temperature": pyunits.K},
"equilibrium_reactions": {
"r1": {"stoichiometry": {("p1", "c1"): -1,
("p1", "c2"): 2,
("sol", "c1"): -3,
("sol", "c2"): 4},
"equilibrium_form": log_power_law_equil,
"concentration_form": ConcentrationForm.moleFraction}}})
# Create a dummy state block
m.rxn = Block([1])
add_object_reference(
m.rxn[1], "phase_component_set", m.pparams._phase_component_set)
add_object_reference(m.rxn[1], "params", m.rparams)
add_object_reference(m.rxn[1], "state_ref", m.thermo[1])
m.rxn[1].log_k_eq = Var(["r1"], initialize=1)
# Check parameter construction
assert isinstance(m.rparams.reaction_r1.reaction_order, Var)
assert len(m.rparams.reaction_r1.reaction_order) == 6
assert m.rparams.reaction_r1.reaction_order["p1", "c1"].value == -1
assert m.rparams.reaction_r1.reaction_order["p1", "c2"].value == 2
assert m.rparams.reaction_r1.reaction_order["p2", "c1"].value == 0
assert m.rparams.reaction_r1.reaction_order["p2", "c2"].value == 0
# Solids should have zero order, as they are excluded
assert m.rparams.reaction_r1.reaction_order["sol", "c1"].value == 0
assert m.rparams.reaction_r1.reaction_order["sol", "c2"].value == 0
# Check reaction form
rform = log_power_law_equil.return_expression(
m.rxn[1], m.rparams.reaction_r1, "r1", 300)
assert str(rform) == str(
m.rxn[1].log_k_eq["r1"] ==
m.rparams.reaction_r1.reaction_order["p1", "c1"] *
m.thermo[1].log_mole_frac_phase_comp["p1", "c1"] +
m.rparams.reaction_r1.reaction_order["p1", "c2"] *
m.thermo[1].log_mole_frac_phase_comp["p1", "c2"])
@pytest.mark.unit
def test_log_power_law_equil_with_order():
m = ConcreteModel()
# # Add a test thermo package for validation
m.pparams = PhysicalParameterTestBlock()
# Add a solid phase for testing
m.pparams.sol = SolidPhase()
m.thermo = m.pparams.build_state_block([1])
m.thermo[1].log_mole_frac_phase_comp = Var(m.pparams._phase_component_set)
# Create a dummy reaction parameter block
m.rparams = GenericReactionParameterBlock(default={
"property_package": m.pparams,
"base_units": {"time": pyunits.s,
"mass": pyunits.kg,
"amount": pyunits.mol,
"length": pyunits.m,
"temperature": pyunits.K},
"equilibrium_reactions": {
"r1": {"stoichiometry": {("p1", "c1"): -1,
("p1", "c2"): 2,
("sol", "c1"): -3,
("sol", "c2"): 4},
"equilibrium_form": log_power_law_equil,
"concentration_form": ConcentrationForm.moleFraction,
"parameter_data": {
"reaction_order": {("p1", "c1"): 1,
("p1", "c2"): 2,
("p2", "c1"): 3,
("p2", "c2"): 4,
("sol", "c1"): 5,
("sol", "c2"): 6}}}}})
# Create a dummy state block
m.rxn = Block([1])
add_object_reference(
m.rxn[1], "phase_component_set", m.pparams._phase_component_set)
add_object_reference(m.rxn[1], "params", m.rparams)
add_object_reference(m.rxn[1], "state_ref", m.thermo[1])
m.rxn[1].log_k_eq = Var(["r1"], initialize=1)
# Check parameter construction
assert isinstance(m.rparams.reaction_r1.reaction_order, Var)
assert len(m.rparams.reaction_r1.reaction_order) == 6
assert m.rparams.reaction_r1.reaction_order["p1", "c1"].value == 1
assert m.rparams.reaction_r1.reaction_order["p1", "c2"].value == 2
assert m.rparams.reaction_r1.reaction_order["p2", "c1"].value == 3
assert m.rparams.reaction_r1.reaction_order["p2", "c2"].value == 4
assert m.rparams.reaction_r1.reaction_order["sol", "c1"].value == 5
assert m.rparams.reaction_r1.reaction_order["sol", "c2"].value == 6
# Check reaction form
rform = log_power_law_equil.return_expression(
m.rxn[1], m.rparams.reaction_r1, "r1", 300)
assert str(rform) == str(
m.rxn[1].log_k_eq["r1"] ==
m.rparams.reaction_r1.reaction_order["p1", "c1"] *
m.thermo[1].log_mole_frac_phase_comp["p1", "c1"] +
m.rparams.reaction_r1.reaction_order["p1", "c2"] *
m.thermo[1].log_mole_frac_phase_comp["p1", "c2"] +
m.rparams.reaction_r1.reaction_order["p2", "c1"] *
m.thermo[1].log_mole_frac_phase_comp["p2", "c1"] +
m.rparams.reaction_r1.reaction_order["p2", "c2"] *
m.thermo[1].log_mole_frac_phase_comp["p2", "c2"] +
m.rparams.reaction_r1.reaction_order["sol", "c1"] *
m.thermo[1].log_mole_frac_phase_comp["sol", "c1"] +
m.rparams.reaction_r1.reaction_order["sol", "c2"] *
m.thermo[1].log_mole_frac_phase_comp["sol", "c2"])
@pytest.mark.unit
def test_solubility_no_order():
m = ConcreteModel()
# # Add a test thermo package for validation
m.pparams = PhysicalParameterTestBlock()
# Add a solid phase for testing
m.pparams.sol = SolidPhase()
m.thermo = m.pparams.build_state_block([1])
# Create a dummy reaction parameter block
m.rparams = GenericReactionParameterBlock(default={
"property_package": m.pparams,
"base_units": {"time": pyunits.s,
"mass": pyunits.kg,
"amount": pyunits.mol,
"length": pyunits.m,
"temperature": pyunits.K},
"equilibrium_reactions": {
"r1": {"stoichiometry": {("p1", "c1"): -1,
("p1", "c2"): 2,
("sol", "c1"): -3,
("sol", "c2"): 4},
"equilibrium_form": solubility_product,
"concentration_form": ConcentrationForm.moleFraction}}})
# Create a dummy state block
m.rxn = Block([1])
add_object_reference(
m.rxn[1], "phase_component_set", m.pparams._phase_component_set)
add_object_reference(m.rxn[1], "params", m.rparams)
add_object_reference(m.rxn[1], "state_ref", m.thermo[1])
m.rxn[1].k_eq = Var(["r1"], initialize=1)
# Check parameter construction
assert isinstance(m.rparams.reaction_r1.eps, Param)
assert value(m.rparams.reaction_r1.eps) == 1e-4
assert isinstance(m.rparams.reaction_r1.reaction_order, Var)
assert len(m.rparams.reaction_r1.reaction_order) == 6
assert m.rparams.reaction_r1.reaction_order["p1", "c1"].value == -1
assert m.rparams.reaction_r1.reaction_order["p1", "c2"].value == 2
assert m.rparams.reaction_r1.reaction_order["p2", "c1"].value == 0
assert m.rparams.reaction_r1.reaction_order["p2", "c2"].value == 0
# Solids should have zero order, as they are excluded
assert m.rparams.reaction_r1.reaction_order["sol", "c1"].value == 0
assert m.rparams.reaction_r1.reaction_order["sol", "c2"].value == 0
# Check reaction form
rform = solubility_product.return_expression(
m.rxn[1], m.rparams.reaction_r1, "r1", 300)
s = ((m.thermo[1].flow_mol_phase_comp["sol", "c1"] +
m.thermo[1].flow_mol_phase_comp["sol", "c2"]) /
(pyunits.mol/pyunits.s))
Q = (m.rxn[1].k_eq["r1"] -
(m.thermo[1].mole_frac_phase_comp["p1", "c1"] **
m.rparams.reaction_r1.reaction_order["p1", "c1"] *
m.thermo[1].mole_frac_phase_comp["p1", "c2"] **
m.rparams.reaction_r1.reaction_order["p1", "c2"]))
assert str(rform) == str(
s - smooth_max(0, s-Q, m.rparams.reaction_r1.eps) == 0)
@pytest.mark.unit
def test_solubility_product_with_order():
m = ConcreteModel()
# # Add a test thermo package for validation
m.pparams = PhysicalParameterTestBlock()
# Add a solid phase for testing
m.pparams.sol = SolidPhase()
m.thermo = m.pparams.build_state_block([1])
# Create a dummy reaction parameter block
m.rparams = GenericReactionParameterBlock(default={
"property_package": m.pparams,
"base_units": {"time": pyunits.s,
"mass": pyunits.kg,
"amount": pyunits.mol,
"length": pyunits.m,
"temperature": pyunits.K},
"equilibrium_reactions": {
"r1": {"stoichiometry": {("p1", "c1"): -1,
("p1", "c2"): 2,
("sol", "c1"): -3,
("sol", "c2"): 4},
"equilibrium_form": solubility_product,
"concentration_form": ConcentrationForm.moleFraction,
"parameter_data": {
"reaction_order": {("p1", "c1"): 1,
("p1", "c2"): 2,
("p2", "c1"): 3,
("p2", "c2"): 4,
("sol", "c1"): 5,
("sol", "c2"): 6}}}}})
# Create a dummy state block
m.rxn = Block([1])
add_object_reference(
m.rxn[1], "phase_component_set", m.pparams._phase_component_set)
add_object_reference(m.rxn[1], "params", m.rparams)
add_object_reference(m.rxn[1], "state_ref", m.thermo[1])
m.rxn[1].k_eq = Var(["r1"], initialize=1)
# Check parameter construction
assert isinstance(m.rparams.reaction_r1.eps, Param)
assert value(m.rparams.reaction_r1.eps) == 1e-4
assert isinstance(m.rparams.reaction_r1.reaction_order, Var)
assert len(m.rparams.reaction_r1.reaction_order) == 6
assert m.rparams.reaction_r1.reaction_order["p1", "c1"].value == 1
assert m.rparams.reaction_r1.reaction_order["p1", "c2"].value == 2
assert m.rparams.reaction_r1.reaction_order["p2", "c1"].value == 3
assert m.rparams.reaction_r1.reaction_order["p2", "c2"].value == 4
assert m.rparams.reaction_r1.reaction_order["sol", "c1"].value == 5
assert m.rparams.reaction_r1.reaction_order["sol", "c2"].value == 6
# Check reaction form
rform = solubility_product.return_expression(
m.rxn[1], m.rparams.reaction_r1, "r1", 300)
s = ((m.thermo[1].flow_mol_phase_comp["sol", "c1"] +
m.thermo[1].flow_mol_phase_comp["sol", "c2"]) /
(pyunits.mol/pyunits.s))
Q = (m.rxn[1].k_eq["r1"] -
(m.thermo[1].mole_frac_phase_comp["p1", "c1"] **
m.rparams.reaction_r1.reaction_order["p1", "c1"] *
m.thermo[1].mole_frac_phase_comp["p1", "c2"] **
m.rparams.reaction_r1.reaction_order["p1", "c2"] *
m.thermo[1].mole_frac_phase_comp["p2", "c1"] **
m.rparams.reaction_r1.reaction_order["p2", "c1"] *
m.thermo[1].mole_frac_phase_comp["p2", "c2"] **
m.rparams.reaction_r1.reaction_order["p2", "c2"] *
m.thermo[1].mole_frac_phase_comp["sol", "c1"] **
m.rparams.reaction_r1.reaction_order["sol", "c1"] *
m.thermo[1].mole_frac_phase_comp["sol", "c2"] **
m.rparams.reaction_r1.reaction_order["sol", "c2"]))
assert str(rform) == str(
s - smooth_max(0, s-Q, m.rparams.reaction_r1.eps) == 0)
@pytest.mark.unit
def test_solubility_no_solids():
m = ConcreteModel()
# # Add a test thermo package for validation
m.pparams = PhysicalParameterTestBlock()
m.thermo = m.pparams.build_state_block([1])
# Create a dummy reaction parameter block
m.rparams = GenericReactionParameterBlock(default={
"property_package": m.pparams,
"base_units": {"time": pyunits.s,
"mass": pyunits.kg,
"amount": pyunits.mol,
"length": pyunits.m,
"temperature": pyunits.K},
"equilibrium_reactions": {
"r1": {"stoichiometry": {("p1", "c1"): -1,
("p1", "c2"): 2},
"equilibrium_form": solubility_product,
"concentration_form": ConcentrationForm.moleFraction}}})
# Create a dummy state block
m.rxn = Block([1])
add_object_reference(
m.rxn[1], "phase_component_set", m.pparams._phase_component_set)
add_object_reference(m.rxn[1], "params", m.rparams)
add_object_reference(m.rxn[1], "state_ref", m.thermo[1])
m.rxn[1].k_eq = Var(["r1"], initialize=1)
solubility_product.build_parameters(
m.rparams.reaction_r1,
m.rparams.config.equilibrium_reactions["r1"])
# Check reaction form - should raise exception
with pytest.raises(ConfigurationError,
match="did not find a solid phase component for "
"precipitation reaction r1. This is likely due to the "
"reaction configuration."):
solubility_product.return_expression(
m.rxn[1], m.rparams.reaction_r1, "r1", 300)
@pytest.mark.unit
def test_log_solubility_no_order():
m = ConcreteModel()
# # Add a test thermo package for validation
m.pparams = PhysicalParameterTestBlock()
# Add a solid phase for testing
m.pparams.sol = SolidPhase()
m.thermo = m.pparams.build_state_block([1])
# Create a dummy reaction parameter block
m.rparams = GenericReactionParameterBlock(default={
"property_package": m.pparams,
"base_units": {"time": pyunits.s,
"mass": pyunits.kg,
"amount": pyunits.mol,
"length": pyunits.m,
"temperature": pyunits.K},
"equilibrium_reactions": {
"r1": {"stoichiometry": {("p1", "c1"): -1,
("p1", "c2"): 2,
("sol", "c1"): -3,
("sol", "c2"): 4},
"equilibrium_form": log_solubility_product,
"concentration_form": ConcentrationForm.moleFraction}}})
# Create a dummy state block
m.rxn = Block([1])
add_object_reference(
m.rxn[1], "phase_component_set", m.pparams._phase_component_set)
add_object_reference(m.rxn[1], "params", m.rparams)
add_object_reference(m.rxn[1], "state_ref", m.thermo[1])
m.rxn[1].log_k_eq = Var(["r1"], initialize=1)
m.thermo[1].log_mole_frac_phase_comp = Var(m.pparams._phase_component_set,
initialize=1)
# Check parameter construction
assert isinstance(m.rparams.reaction_r1.eps, Param)
assert value(m.rparams.reaction_r1.eps) == 1e-4
assert isinstance(m.rparams.reaction_r1.reaction_order, Var)
assert len(m.rparams.reaction_r1.reaction_order) == 6
assert m.rparams.reaction_r1.reaction_order["p1", "c1"].value == -1
assert m.rparams.reaction_r1.reaction_order["p1", "c2"].value == 2
assert m.rparams.reaction_r1.reaction_order["p2", "c1"].value == 0
assert m.rparams.reaction_r1.reaction_order["p2", "c2"].value == 0
# Solids should have zero order, as they are excluded
assert m.rparams.reaction_r1.reaction_order["sol", "c1"].value == 0
assert m.rparams.reaction_r1.reaction_order["sol", "c2"].value == 0
# Check reaction form
rform = log_solubility_product.return_expression(
m.rxn[1], m.rparams.reaction_r1, "r1", 300)
s = ((m.thermo[1].flow_mol_phase_comp["sol", "c1"] +
m.thermo[1].flow_mol_phase_comp["sol", "c2"]) /
(pyunits.mol/pyunits.s))
Q = (m.rxn[1].log_k_eq["r1"] -
(m.thermo[1].log_mole_frac_phase_comp["p1", "c1"] *
m.rparams.reaction_r1.reaction_order["p1", "c1"] +
m.thermo[1].log_mole_frac_phase_comp["p1", "c2"] *
m.rparams.reaction_r1.reaction_order["p1", "c2"]))
assert str(rform) == str(
s - smooth_max(0, s-Q, m.rparams.reaction_r1.eps) == 0)
@pytest.mark.unit
def test_log_solubility_product_with_order():
m = ConcreteModel()
# # Add a test thermo package for validation
m.pparams = PhysicalParameterTestBlock()
# Add a solid phase for testing
m.pparams.sol = SolidPhase()
m.thermo = m.pparams.build_state_block([1])
# Create a dummy reaction parameter block
m.rparams = GenericReactionParameterBlock(default={
"property_package": m.pparams,
"base_units": {"time": pyunits.s,
"mass": pyunits.kg,
"amount": pyunits.mol,
"length": pyunits.m,
"temperature": pyunits.K},
"equilibrium_reactions": {
"r1": {"stoichiometry": {("p1", "c1"): -1,
("p1", "c2"): 2,
("sol", "c1"): -3,
("sol", "c2"): 4},
"equilibrium_form": log_solubility_product,
"concentration_form": ConcentrationForm.moleFraction,
"parameter_data": {
"reaction_order": {("p1", "c1"): 1,
("p1", "c2"): 2,
("p2", "c1"): 3,
("p2", "c2"): 4,
("sol", "c1"): 5,
("sol", "c2"): 6}}}}})
# Create a dummy state block
m.rxn = Block([1])
add_object_reference(
m.rxn[1], "phase_component_set", m.pparams._phase_component_set)
add_object_reference(m.rxn[1], "params", m.rparams)
add_object_reference(m.rxn[1], "state_ref", m.thermo[1])
m.rxn[1].log_k_eq = Var(["r1"], initialize=1)
m.thermo[1].log_mole_frac_phase_comp = Var(m.pparams._phase_component_set,
initialize=1)
# Check parameter construction
assert isinstance(m.rparams.reaction_r1.eps, Param)
assert value(m.rparams.reaction_r1.eps) == 1e-4
assert isinstance(m.rparams.reaction_r1.reaction_order, Var)
assert len(m.rparams.reaction_r1.reaction_order) == 6
assert m.rparams.reaction_r1.reaction_order["p1", "c1"].value == 1
assert m.rparams.reaction_r1.reaction_order["p1", "c2"].value == 2
assert m.rparams.reaction_r1.reaction_order["p2", "c1"].value == 3
assert m.rparams.reaction_r1.reaction_order["p2", "c2"].value == 4
assert m.rparams.reaction_r1.reaction_order["sol", "c1"].value == 5
assert m.rparams.reaction_r1.reaction_order["sol", "c2"].value == 6
# Check reaction form
rform = log_solubility_product.return_expression(
m.rxn[1], m.rparams.reaction_r1, "r1", 300)
s = ((m.thermo[1].flow_mol_phase_comp["sol", "c1"] +
m.thermo[1].flow_mol_phase_comp["sol", "c2"]) /
(pyunits.mol/pyunits.s))
Q = (m.rxn[1].log_k_eq["r1"] -
(m.thermo[1].log_mole_frac_phase_comp["p1", "c1"] *
m.rparams.reaction_r1.reaction_order["p1", "c1"] +
m.thermo[1].log_mole_frac_phase_comp["p1", "c2"] *
m.rparams.reaction_r1.reaction_order["p1", "c2"] +
m.thermo[1].log_mole_frac_phase_comp["p2", "c1"] *
m.rparams.reaction_r1.reaction_order["p2", "c1"] +
m.thermo[1].log_mole_frac_phase_comp["p2", "c2"] *
m.rparams.reaction_r1.reaction_order["p2", "c2"] +
m.thermo[1].log_mole_frac_phase_comp["sol", "c1"] *
m.rparams.reaction_r1.reaction_order["sol", "c1"] +
m.thermo[1].log_mole_frac_phase_comp["sol", "c2"] *
m.rparams.reaction_r1.reaction_order["sol", "c2"]))
assert str(rform) == str(
s - smooth_max(0, s-Q, m.rparams.reaction_r1.eps) == 0)
@pytest.mark.unit
def test_log_solubility_no_solids():
m = ConcreteModel()
# # Add a test thermo package for validation
m.pparams = PhysicalParameterTestBlock()
m.thermo = m.pparams.build_state_block([1])
# Create a dummy reaction parameter block
m.rparams = GenericReactionParameterBlock(default={
"property_package": m.pparams,
"base_units": {"time": pyunits.s,
"mass": pyunits.kg,
"amount": pyunits.mol,
"length": pyunits.m,
"temperature": pyunits.K},
"equilibrium_reactions": {
"r1": {"stoichiometry": {("p1", "c1"): -1,
("p1", "c2"): 2},
"equilibrium_form": log_solubility_product,
"concentration_form": ConcentrationForm.moleFraction}}})
# Create a dummy state block
m.rxn = Block([1])
add_object_reference(
m.rxn[1], "phase_component_set", m.pparams._phase_component_set)
add_object_reference(m.rxn[1], "params", m.rparams)
add_object_reference(m.rxn[1], "state_ref", m.thermo[1])
m.rxn[1].log_k_eq = Var(["r1"], initialize=1)
m.thermo[1].log_mole_frac_phase_comp = Var(m.pparams._phase_component_set,
initialize=1)
log_solubility_product.build_parameters(
m.rparams.reaction_r1,
m.rparams.config.equilibrium_reactions["r1"])
# Check reaction form - should raise exception
with pytest.raises(ConfigurationError,
match="did not find a solid phase component for "
"precipitation reaction r1. This is likely due to the "
"reaction configuration."):
log_solubility_product.return_expression(
m.rxn[1], m.rparams.reaction_r1, "r1", 300)
| 42.314003
| 81
| 0.577517
| 3,652
| 29,916
| 4.529847
| 0.054491
| 0.070604
| 0.117996
| 0.132745
| 0.95019
| 0.95019
| 0.945838
| 0.944569
| 0.93526
| 0.93526
| 0
| 0.035798
| 0.273533
| 29,916
| 706
| 82
| 42.373938
| 0.725395
| 0.087946
| 0
| 0.921415
| 0
| 0
| 0.103714
| 0.007767
| 0
| 0
| 0
| 0
| 0.157171
| 1
| 0.019646
| false
| 0
| 0.017682
| 0
| 0.037328
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ed18a151284774c5c9888e170f90ee3d65f5297a
| 9,397
|
py
|
Python
|
src/privacyraven/models/pytorch.py
|
pwang00/PrivacyRaven
|
50d18f403f92c4b6ddb14be438584b9746f6bad6
|
[
"Apache-2.0"
] | 121
|
2020-09-01T21:55:22.000Z
|
2022-03-28T11:38:43.000Z
|
src/privacyraven/models/pytorch.py
|
pwang00/PrivacyRaven
|
50d18f403f92c4b6ddb14be438584b9746f6bad6
|
[
"Apache-2.0"
] | 49
|
2020-09-08T19:02:16.000Z
|
2022-02-11T20:52:40.000Z
|
src/privacyraven/models/pytorch.py
|
pwang00/PrivacyRaven
|
50d18f403f92c4b6ddb14be438584b9746f6bad6
|
[
"Apache-2.0"
] | 16
|
2020-09-09T00:45:30.000Z
|
2022-02-25T01:53:53.000Z
|
"""
These models will be depreciated soon. Use at your own risk.
"""
import os
import numpy as np
import pytorch_lightning as pl
import torch
from torch import nn
from torch.nn import functional as F
from torchvision import datasets, models, transforms
from tqdm import tqdm
class ThreeLayerClassifier(pl.LightningModule):
def __init__(self, hparams):
"""Defines a three layer fully connected neural network"""
super(ThreeLayerClassifier, self).__init__()
self.hparams = hparams
self.layer_1 = torch.nn.Linear(self.hparams["input_size"], 128)
self.layer_2 = torch.nn.Linear(128, 256)
self.layer_3 = torch.nn.Linear(256, self.hparams["targets"])
def forward(self, x):
"""Establishes the neural network's forward pass
Parameters:
x: A Torch tensor of the input data
Returns:
output probability vector for classes"""
batch_size, channels, width, height = x.size()
# Input Layer: (batch_size, 1, 28, 28) -> (batch_size, 1*28*28)
x = x.view(batch_size, -1)
# Layer 1: (batch_size, 1*28*28) -> (batch_size, 128)
x = self.layer_1(x)
x = torch.relu(x)
# Layer 2: (batch_size, 128) -> (batch_size, 256)
x = self.layer_2(x)
x = torch.relu(x)
# Layer 3: (batch_size, 256) -> (batch_size, 10)
x = self.layer_3(x)
x = torch.log_softmax(x, dim=1)
return x
def cross_entropy_loss(self, logits, labels):
"""Calculates loss- the difference between model predictions and true labels
Parameters:
logits: A Torch tensor of model output predictions
labels: A Torch tensor of true values for predictions
Returns:
Cross entropy loss"""
return F.cross_entropy(logits, labels)
def training_step(self, train_batch, batch_idx):
"""Pushes training data batch through model and calculates loss in loop
Parameters:
train_batch: A Torch tensor of a batch of training data from training dataloader
batch_idx: An integer of the index of batch in contention
Returns:
Formatted string with cross entropy loss and training logs"""
x, y = train_batch
logits = self.forward(x)
loss = self.cross_entropy_loss(logits, y)
logs = {"train_loss": loss}
return {"loss": loss, "log": logs}
def validation_step(self, val_batch, batch_idx):
"""Pushes validation data batch through model and calculates loss in loop
Parameters:
val_batch: A Torch tensor batch of validation data from validation dataloader
batch_idx: An integer of the index of batch in contention
Returns:
Formatted string with resultant cross entropy loss"""
x, y = val_batch
logits = self.forward(x)
loss = self.cross_entropy_loss(logits, y)
targets_hat = torch.argmax(logits, dim=1)
n_correct_pred = torch.sum(y == targets_hat).item()
return {"val_loss": loss, "n_correct_pred": n_correct_pred, "n_pred": len(x)}
def validation_epoch_end(self, outputs):
"""Returns validation step results at the end of the epoch
Parameters:
outputs: An array with the result of validation step for each batch
Returns:
Formatted string with resultant metrics"""
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
tensorboard_logs = {"val_loss": avg_loss}
return {"avg_val_loss": avg_loss, "log": tensorboard_logs}
def configure_optimizers(self):
"""Sets up the optimization scheme"""
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
def test_step(self, batch, batch_idx):
"""Pushes test data into the model and returns relevant metrics
Parameters:
batch: A Torch tensor of a batch of test data
batch_idx: An integer of the index of batch in contention
Returns:
Formatted string with relevant metrics"""
x, y = batch
y_hat = self(x)
targets_hat = torch.argmax(y_hat, dim=1)
n_correct_pred = torch.sum(y == targets_hat).item()
return {
"test_loss": F.cross_entropy(y_hat, y),
"n_correct_pred": n_correct_pred,
"n_pred": len(x),
}
def test_epoch_end(self, outputs):
"""Returns test step results at the end of the epoch
Parameters:
outputs: An array with the result of test step for each batch
Returns:
Formatted string with resultant metrics"""
avg_loss = torch.stack([x["test_loss"] for x in outputs]).mean()
tensorboard_logs = {"test_loss": avg_loss}
return {"avg_test_loss": avg_loss, "log": tensorboard_logs}
class ImagenetTransferLearning(pl.LightningModule):
def __init__(self, hparams):
"""Create a classifier with a pretrained MobileNet backbone"""
super(ImagenetTransferLearning, self).__init__()
self.hparams = hparams
self.feature_extractor = models.mobilenet_v2(pretrained=True)
self.feature_extractor.eval()
# Establish classifier
# self.layer_1 = torch.nn.Linear(hparams["input_size"], 128)
self.layer_1 = torch.nn.Linear(1000, 128)
self.layer_2 = torch.nn.Linear(128, 256)
self.layer_3 = torch.nn.Linear(256, hparams["targets"])
def forward(self, x):
"""Establishes the neural network's forward pass
Parameters:
x: A Torch tensor of the input image
Returns:
Output probability vector for classes
"""
x = self.feature_extractor(x)
batch_size, hidden = x.size()
x = self.layer_1(x)
x = torch.relu(x)
x = self.layer_2(x)
x = torch.relu(x)
x = self.layer_3(x)
x = torch.log_softmax(x, dim=1)
return x
def nll_loss(self, logits, labels):
"""Calculates loss
Parameters:
logits: A Torch tensor of the model output predictions
labels: A Torch tensor of the true values for predictions
Returns:
Loss"""
return F.nll_loss(logits, labels)
def training_step(self, train_batch, batch_idx):
"""Pushes training data batch through model and calculates loss in loop
Parameters:
train_batch: A Torch tensor with the batch of training data
batch_idx: An integer of the index of batch in contention
Returns:
Formatted string with cross entropy loss and training logs"""
x, y = train_batch
logits = self.forward(x)
loss = self.nll_loss(logits, y)
logs = {"train_loss": loss}
return {"loss": loss, "log": logs}
def validation_step(self, val_batch, batch_idx):
"""Pushes validation data batch through model and calculates loss in loop
Parameters:
val_batch: A Torch tensor of a batch of validation data
batch_idx: An integer of the index of batch in contention
Returns:
Formatted string with resultant cross entropy loss"""
x, y = val_batch
logits = self.forward(x)
loss = self.nll_loss(logits, y)
targets_hat = torch.argmax(logits, dim=1)
n_correct_pred = torch.sum(y == targets_hat).item()
return {"val_loss": loss, "n_correct_pred": n_correct_pred, "n_pred": len(x)}
def validation_epoch_end(self, outputs):
"""Returns validation step results at the end of the epoch
Parameters:
outputs: An array of the result of validation step for each batch
Returns:
Formatted string with resultant metrics
"""
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
tensorboard_logs = {"val_loss": avg_loss}
return {"avg_val_loss": avg_loss, "log": tensorboard_logs}
def configure_optimizers(self):
"""Sets up the optimization scheme"""
optimizer = torch.optim.Adam(
self.parameters(), lr=self.hparams["learning_rate"]
)
return optimizer
def test_step(self, batch, batch_idx):
"""Pushes test data into the model and returns relevant metrics
Parameters:
batch: A Torch tensor of a batch of test data from test dataloader
batch_idx: An integer of the index of batch in contention
Returns:
Formatted string with relevant metrics"""
x, y = batch
y_hat = self(x)
targets_hat = torch.argmax(y_hat, dim=1)
n_correct_pred = torch.sum(y == targets_hat).item()
return {
"test_loss": F.nll_loss(y_hat, y),
"n_correct_pred": n_correct_pred,
"n_pred": len(x),
}
def test_epoch_end(self, outputs):
"""Returns test step results at the end of the epoch
Parameters:
outputs: An array with the results of test step for each batch
Returns:
Formatted string with resultant metrics"""
avg_loss = torch.stack([x["test_loss"] for x in outputs]).mean()
tensorboard_logs = {"test_loss": avg_loss}
return {"avg_test_loss": avg_loss, "log": tensorboard_logs}
| 35.730038
| 92
| 0.624029
| 1,240
| 9,397
| 4.578226
| 0.133871
| 0.013211
| 0.025366
| 0.024661
| 0.830897
| 0.818038
| 0.739123
| 0.73102
| 0.7127
| 0.696495
| 0
| 0.012842
| 0.287326
| 9,397
| 262
| 93
| 35.866412
| 0.834851
| 0.385655
| 0
| 0.711864
| 0
| 0
| 0.061284
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152542
| false
| 0
| 0.067797
| 0
| 0.372881
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ed269e6a9d12d5c62fb10b7968a872bb3eb71e6b
| 6,280
|
py
|
Python
|
scielomanager/accounts/tests.py
|
jamilatta/scielo-manager
|
d506c6828ba9b1089faa164bc42ba29a0f228e61
|
[
"BSD-2-Clause"
] | null | null | null |
scielomanager/accounts/tests.py
|
jamilatta/scielo-manager
|
d506c6828ba9b1089faa164bc42ba29a0f228e61
|
[
"BSD-2-Clause"
] | null | null | null |
scielomanager/accounts/tests.py
|
jamilatta/scielo-manager
|
d506c6828ba9b1089faa164bc42ba29a0f228e61
|
[
"BSD-2-Clause"
] | null | null | null |
# coding:utf-8
"""
Use this module to write functional tests for the view-functions, only!
"""
from django_webtest import WebTest
from django.core.urlresolvers import reverse
from django_factory_boy import auth
from journalmanager.tests import modelfactories
from journalmanager.tests.tests_forms import _makePermission
HASH_FOR_123 = 'sha1$93d45$5f366b56ce0444bfea0f5634c7ce8248508c9799'
class LoginForm(WebTest):
def test_the_user_must_provide_his_credentials(self):
form = self.app.get(reverse('journalmanager.user_login')).forms[0]
form['username'] = ''
form['password'] = ''
response = form.submit()
self.assertTrue('not a valid username or password' in response.body)
self.assertTemplateUsed(response, 'registration/login.html')
def test_right_username_and_wrong_password(self):
user = auth.UserF(username='foo',
password=HASH_FOR_123,
is_active=True)
form = self.app.get(reverse('journalmanager.user_login')).forms[0]
form['username'] = 'foo'
form['password'] = 'baz'
response = form.submit()
self.assertTrue('not a valid username or password' in response.body)
self.assertTemplateUsed(response, 'registration/login.html')
def test_wrong_username_and_right_password(self):
user = auth.UserF(username='foo',
password=HASH_FOR_123,
is_active=True)
form = self.app.get(reverse('journalmanager.user_login')).forms[0]
form['username'] = 'fuu'
form['password'] = '123'
response = form.submit()
self.assertTrue('not a valid username or password' in response.body)
self.assertTemplateUsed(response, 'registration/login.html')
def test_right_username_and_right_password(self):
user = auth.UserF(username='foo',
password=HASH_FOR_123,
is_active=True)
collection = modelfactories.CollectionFactory.create()
collection.add_user(user)
form = self.app.get(reverse('journalmanager.user_login')).forms[0]
form['username'] = 'foo'
form['password'] = '123'
response = form.submit().follow()
self.assertTemplateUsed(response, 'journalmanager/home_journal.html')
def test_right_username_and_right_password_for_inactive_user(self):
user = auth.UserF(username='foo',
password=HASH_FOR_123,
is_active=False)
collection = modelfactories.CollectionFactory.create()
collection.add_user(user)
form = self.app.get(reverse('journalmanager.user_login')).forms[0]
form['username'] = 'foo'
form['password'] = '123'
response = form.submit()
self.assertTrue('not a valid username or password' in response.body)
self.assertTemplateUsed(response, 'registration/login.html')
def test_redirect_to_restricted_page_after_successful_login(self):
user = auth.UserF(is_active=True)
perm = _makePermission(perm='list_journal', model='journal')
user.user_permissions.add(perm)
collection = modelfactories.CollectionFactory.create()
collection.add_user(user)
page = self.app.get(reverse('journal.index'), user=user)
page.mustcontain('no items')
class UserMyAccountTests(WebTest):
def test_logged_user_access_my_account(self):
user = auth.UserF(is_active=True)
response = self.app.get(reverse('journalmanager.my_account'), user=user)
self.assertTemplateUsed(response, 'accounts/my_account.html')
def test_not_logged_user_acess_my_account(self):
response = self.app.get(reverse('journalmanager.my_account')).follow()
self.assertTemplateUsed(response, 'registration/login.html')
def test_logged_user_access_user_configuration(self):
user = auth.UserF(is_active=True)
response = self.app.get(reverse('journalmanager.password_change'), user=user)
self.assertTemplateUsed(response, 'accounts/password_change.html')
def test_logged_user_change_password_right_password(self):
user = auth.UserF(username='foo',
password=HASH_FOR_123,
is_active=False)
form = self.app.get(reverse('journalmanager.password_change'), user=user).forms['chg_pwd']
form['password'] = 123
form['new_password'] = 321
form['new_password_again'] = 321
response = form.submit().follow()
self.assertTemplateUsed(response, 'accounts/my_account.html')
def test_logged_user_change_password_wrong_password(self):
user = auth.UserF(username='foo',
password=HASH_FOR_123,
is_active=False)
form = self.app.get(reverse('journalmanager.password_change'), user=user).forms['chg_pwd']
form['password'] = 1234
form['new_password'] = 321
form['new_password_again'] = 321
response = form.submit().follow()
self.assertTemplateUsed(response, 'accounts/password_change.html')
def test_logged_user_change_password_wrong_new_password(self):
user = auth.UserF(username='foo',
password=HASH_FOR_123,
is_active=False)
form = self.app.get(reverse('journalmanager.password_change'), user=user).forms['chg_pwd']
form['password'] = 123
form['new_password'] = 321123
form['new_password_again'] = 321
response = form.submit().follow()
self.assertTemplateUsed(response, 'accounts/password_change.html')
def test_logged_user_change_password_wrong_new_password_again(self):
user = auth.UserF(username='foo',
password=HASH_FOR_123,
is_active=False)
form = self.app.get(reverse('journalmanager.password_change'), user=user).forms['chg_pwd']
form['password'] = 123
form['new_password'] = 321
form['new_password_again'] = 321321
response = form.submit().follow()
self.assertTemplateUsed(response, 'accounts/password_change.html')
| 35.681818
| 98
| 0.651433
| 699
| 6,280
| 5.632332
| 0.160229
| 0.023114
| 0.03302
| 0.056134
| 0.811786
| 0.80188
| 0.799848
| 0.783338
| 0.720599
| 0.695199
| 0
| 0.024441
| 0.237739
| 6,280
| 175
| 99
| 35.885714
| 0.797995
| 0.013535
| 0
| 0.692308
| 0
| 0
| 0.187975
| 0.111039
| 0
| 0
| 0
| 0
| 0.136752
| 1
| 0.111111
| false
| 0.393162
| 0.042735
| 0
| 0.17094
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
ed29d6f281e9c2db2e160dc6f97b79842b88bc07
| 30,261
|
py
|
Python
|
shoptimizer_api/optimizers_builtin/title_word_order_optimizer_test.py
|
isabella232/shoptimizer
|
3c3bfc200c4b1cd6b03c3e408bbbfcbda9c38ed4
|
[
"Apache-2.0"
] | 27
|
2020-08-21T05:59:29.000Z
|
2022-03-30T17:26:44.000Z
|
shoptimizer_api/optimizers_builtin/title_word_order_optimizer_test.py
|
isabella232/shoptimizer
|
3c3bfc200c4b1cd6b03c3e408bbbfcbda9c38ed4
|
[
"Apache-2.0"
] | null | null | null |
shoptimizer_api/optimizers_builtin/title_word_order_optimizer_test.py
|
isabella232/shoptimizer
|
3c3bfc200c4b1cd6b03c3e408bbbfcbda9c38ed4
|
[
"Apache-2.0"
] | 20
|
2020-09-14T08:38:11.000Z
|
2022-03-13T22:37:40.000Z
|
# coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for title_word_order_optimizer."""
from absl.testing import parameterized
import unittest.mock as mock
from optimizers_builtin import title_word_order_optimizer
from test_data import requests_bodies
from typing import List, Dict
from util import app_util
import constants
# GPC ID IS 201
_PROPER_GPC_CATEGORY_EN = 'Apparel & Accessories > Jewelry > Watches'
# GPC ID is 201
_PROPER_GPC_CATEGORY_JA = ('ファッション・アクセサリー > '
'ジュエリー > 腕時計')
# GPC ID is 5598
_GPC_CATEGORY_LEVEL_4_JA = ('ファッション・アクセサリー > '
'衣料品 > アウター > '
'コート・ジャケット')
@mock.patch('util.promo_text_remover._PROMO_TEXT_REMOVAL_CONFIG_FILE_NAME',
'promo_text_removal_optimizer_config_{}_test')
@mock.patch(
'optimizers_builtin.title_word_order_optimizer._GPC_STRING_TO_ID_MAPPING_CONFIG_FILE_NAME',
'gpc_string_to_id_mapping_{}_test')
@mock.patch(
'optimizers_builtin.title_word_order_optimizer._TITLE_WORD_ORDER_CONFIG_FILE_NAME',
'title_word_order_config_{}_test')
@mock.patch(
'optimizers_builtin.title_word_order_optimizer._TITLE_WORD_ORDER_BLOCKLIST_FILE_NAME',
'title_word_order_blocklist_{}_test')
@mock.patch(
'optimizers_builtin.title_word_order_optimizer._TITLE_WORD_ORDER_OPTIONS_FILE_NAME',
'title_word_order_options_test')
@mock.patch(
'optimizers_builtin.title_word_order_optimizer._TITLE_WORD_ORDER_DICTIONARY_FILE_NAME',
'title_word_order_dictionary_test')
class TitleWordOrderOptimizerTest(parameterized.TestCase):
def setUp(self):
super(TitleWordOrderOptimizerTest, self).setUp()
app_util.setup_test_app()
title_word_order_optimizer.CUSTOM_TEXT_TOKENIZER = None
self.optimizer = title_word_order_optimizer.TitleWordOrderOptimizer()
def test_process_copies_highest_performing_keyword_to_front_of_title(self):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': 'Some title with heavy_keyword in the middle',
'googleProductCategory': _PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = ('[heavy_keyword] Some title with heavy_keyword in the '
'middle')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
def test_process_copies_multiple_performing_keywords_to_front_of_title(self):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': 'Some title with multiple keywords heavy_keyword '
'heavy_keyword_2 in the middle',
'googleProductCategory': _PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = (
'[heavy_keyword_2][heavy_keyword] Some title with multiple keywords '
'heavy_keyword heavy_keyword_2 in the middle')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
def test_process_copies_multiple_performing_keywords_to_front_of_title_in_descending_order_of_weight(
self):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title':
'Some title with multiple keywords keyword2 keyword1 in the '
'middle',
'googleProductCategory': _PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = (
'[keyword1][keyword2] Some title with multiple keywords keyword2 '
'keyword1 in the middle')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
def test_process_copies_at_most_three_performing_keywords_to_front_of_title(
self):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': 'Some title with multiple keywords keyword2 keyword1 '
'heavy_keyword heavy_keyword_2 in the middle',
'googleProductCategory': _PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = (
'[keyword1][keyword2][heavy_keyword_2] Some title with multiple '
'keywords keyword2 keyword1 heavy_keyword heavy_keyword_2 in the '
'middle')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
def test_process_does_not_modify_title_when_the_google_product_category_is_not_in_the_config(
self):
original_title = 'Some title with heavy_keyword in the middle'
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': 'Some title with heavy_keyword in the middle',
'googleProductCategory': 'DIY用品 > DIY小物類',
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
self.assertEqual(0, optimization_result.num_of_products_optimized)
def test_process_does_not_modify_title_when_the_google_product_category_is_in_the_config_but_no_keywords(
self):
original_title = 'Some title with no target keywords in the middle'
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': 'Some title with no target keywords in the middle',
'googleProductCategory': _PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
self.assertEqual(0, optimization_result.num_of_products_optimized)
def test_process_moves_keyword_if_title_more_than_max_title_length(self):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title':
'a' * (title_word_order_optimizer._MAX_TITLE_LENGTH -
len(' heavy_keyword')) + ' heavy_keyword',
'googleProductCategory':
_PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = '[heavy_keyword] ' + 'a' * (
title_word_order_optimizer._MAX_TITLE_LENGTH - len(' heavy_keyword'))
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
def test_process_skips_one_character_wmm_keyword(self):
original_title = 'a' * constants.TITLE_CHARS_VISIBLE_TO_USER_EN + (
('Some title with single a character keyword'))
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': _PROPER_GPC_CATEGORY_EN
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
self.assertEqual(0, optimization_result.num_of_products_optimized)
@parameterized.named_parameters([{
'testcase_name':
'partial_match',
'original_title':
'a' * constants.TITLE_CHARS_VISIBLE_TO_USER_JA +
'有名ブランドTシャツ',
'expected_title':
'a' * constants.TITLE_CHARS_VISIBLE_TO_USER_JA +
'有名ブランドTシャツ'
}, {
'testcase_name':
'accurate_match',
'original_title':
'a' * constants.TITLE_CHARS_VISIBLE_TO_USER_JA +
' 有名ブランドシャツ',
'expected_title':
'[シャツ] ' + 'a' * constants.TITLE_CHARS_VISIBLE_TO_USER_JA +
' 有名ブランドシャツ'
}])
def test_wmm_keyword_is_copied_only_with_accurate_match(
self, original_title, expected_title):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': _PROPER_GPC_CATEGORY_JA
})
optimized_data, _ = self.optimizer.process(original_data,
constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
self.assertEqual(expected_title, product['title'])
@parameterized.named_parameters([{
'testcase_name': 'one_word_excluded_then_added_back',
'original_title':
'レッド・スニーカー、ブランド: '
'カイナ、モデル:エオファース、色:レッド',
'expected_title':
'[カイナ][エオファース] '
'レッド・スニーカー、ブランド: '
'カイナ、モデル:エオファース、色:レッド'
}, {
'testcase_name':
'keyword_kaina_already_within_japanese_threshold_chars_no_change_to_title',
'original_title':
'レッド・、カイナ,スニーカー,ブランド:、色:レッド',
'expected_title':
'レッド・、カイナ,スニーカー,ブランド:、色:レッド'
}, {
'testcase_name':
'keyword_kaina_right_at_the_limit_of_japanese_threshold_chars_no_change_to_title',
'original_title':
'レッド・レッド123レッド1,カイナ,ブランド:、色:レッド',
'expected_title':
'レッド・レッド123レッド1,カイナ,ブランド:、色:レッド'
}, {
'testcase_name':
'keyword_kaina_is_partially_in_the_japanese_threshold_chars_and_partially_out_we_copy_it_to_front_title',
'original_title':
'レッド2・レッド1,レッド321,カイナ,ブランド:、色:レッド',
'expected_title':
'[カイナ] '
'レッド2・レッド1,レッド321,カイナ,ブランド:、色:レッド'
}, {
'testcase_name':
'keyword_kaina_is_right_out_of_the_japanese_threshold_chars_we_copy_it_to_front_title',
'original_title':
'レッド21・レッド12,レッド12,カイナ,ブランド:、色:レッド',
'expected_title':
'[カイナ] '
'レッド21・レッド12,レッド12,カイナ,ブランド:、色:レッド'
}])
def test_scenario_jp_wmm_keyword_in_first_18_char_of_title(
self, original_title, expected_title):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': _PROPER_GPC_CATEGORY_JA
})
optimized_data, _ = self.optimizer.process(original_data,
constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
self.assertEqual(expected_title, product['title'])
@mock.patch(
'optimizers_builtin.title_word_order_optimizer.TitleWordOrderOptimizer._optimization_includes_description',
return_value=True)
def test_wmm_keyword_in_description_is_copied_to_title_when_options_toggle_is_on(
self, _):
description = 'とても良い カイナ とても良い'
original_title = ('レッド・スニーカー、ブランド: '
'色:レッド')
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'description': description,
'googleProductCategory': _PROPER_GPC_CATEGORY_JA
})
optimized_data, _ = self.optimizer.process(original_data,
constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
expected_title = ('[カイナ] '
'レッド・スニーカー、ブランド: '
'色:レッド')
self.assertEqual(expected_title, product['title'])
@mock.patch(
'optimizers_builtin.title_word_order_optimizer.TitleWordOrderOptimizer._optimization_includes_description',
return_value=False)
def test_wmm_keyword_in_description_is_not_copied_when_options_toggle_is_off(
self, _):
description = 'とても良い カイナ とても良い'
original_title = ('レッド・スニーカー、ブランド: '
'、色:レッド')
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'description': description,
'googleProductCategory': _PROPER_GPC_CATEGORY_JA
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
self.assertEqual(0, optimization_result.num_of_products_optimized)
@parameterized.named_parameters([{
'testcase_name': 'wmm_word_in_product_type_should_move_to_front_title',
'original_title': 'レッド・スニーカー、ブランド: '
'モデル:エオファース、色:レッド',
'product_types': ['シャツ'],
'expected_title': '[シャツ][エオファース] '
'レッド・スニーカー、ブランド: '
'モデル:エオファース、色:レッド'
}, {
'testcase_name': 'wmm_word_in_product_type_list_move_to_front_title',
'original_title': 'レッド・スニーカー、ブランド: '
'モデル:エオファース、色:レッド',
'product_types': ['シャツ', 'セーター', 'ジャケット'],
'expected_title': '[シャツ][エオファース] '
'レッド・スニーカー、ブランド: '
'モデル:エオファース、色:レッド'
}])
@mock.patch(
'optimizers_builtin.title_word_order_optimizer.TitleWordOrderOptimizer._optimization_includes_product_types',
return_value=True)
def test_wmm_keyword_in_product_types_is_copied_to_title_when_options_toggle_is_on(
self, _, original_title, product_types, expected_title):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'productTypes': product_types,
'googleProductCategory': _PROPER_GPC_CATEGORY_JA
})
optimized_data, _ = self.optimizer.process(original_data,
constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
self.assertEqual(expected_title, product['title'])
@mock.patch(
'optimizers_builtin.title_word_order_optimizer.TitleWordOrderOptimizer._optimization_includes_product_types',
return_value=False)
def test_wmm_keyword_in_product_types_is_not_copied_to_title_when_options_toggle_is_off(
self, _):
original_title = ('レッド・スニーカー、ブランド: '
'色:レッド')
product_types = ['シャツ']
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'productTypes': product_types,
'googleProductCategory': _PROPER_GPC_CATEGORY_JA
})
optimized_data, _ = self.optimizer.process(original_data,
constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
@parameterized.named_parameters([{
'testcase_name':
'japanese_title',
'original_title':
'a' * constants.TITLE_CHARS_VISIBLE_TO_USER_JA +
'タイトルブロック'
}, {
'testcase_name':
'check_case_insensitive',
'original_title':
'a' * constants.TITLE_CHARS_VISIBLE_TO_USER_JA + 'Title Block'
}])
def test_wmm_keyword_in_blocklist_is_not_copied_to_front(
self, original_title):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': _PROPER_GPC_CATEGORY_JA
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
self.assertEqual(0, optimization_result.num_of_products_optimized)
@parameterized.named_parameters([{
'testcase_name':
'japanese_title_hiroo_mobile_should_move_to_front',
'original_title':
'あなたの携帯電話のために最高の取引をしたいですか?広尾 モバイルを使う'
}])
def test_dictionary_term_file_help_tokenize_japanese_title_properly(
self, original_title):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': _PROPER_GPC_CATEGORY_JA
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
expected_title = '[広尾 モバイル] あなたの携帯電話のために最高の取引をしたいですか?広尾 モバイルを使う'
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
@parameterized.named_parameters([{
'testcase_name':
'non_japanese_title_hiroo_mobile_should_move_to_front',
'original_title':
'You want the best mobile phone deal? Come get Hiroo Mobile now!'
}])
def test_dictionary_term_file_help_tokenize_non_japanese_title_properly(
self, original_title):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': _PROPER_GPC_CATEGORY_EN
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = ('[Hiroo Mobile] You want the best mobile phone deal? '
'Come get Hiroo Mobile now!')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
@mock.patch(
'optimizers_builtin.title_word_order_optimizer.TitleWordOrderOptimizer._get_optimization_level',
return_value=title_word_order_optimizer._OptimizationLevel.AGGRESSIVE)
def test_keywords_in_gpc_level_3_is_copied_to_front_when_gpc_level_is_deeper_than_3_and_optimization_level_is_aggressive(
self, _):
original_title = '寒い冬からあなたを守る!モデル:ジャケット、カラー:ブラック、防寒仕様ダウンジャケット'
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': _GPC_CATEGORY_LEVEL_4_JA
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
expected_title = f'[防寒][ダウンジャケット] {original_title}'
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
@mock.patch(
'optimizers_builtin.title_word_order_optimizer.TitleWordOrderOptimizer._get_optimization_level',
return_value=title_word_order_optimizer._OptimizationLevel.STANDARD)
def test_optimization_is_skipped_when_gpc_level_is_deeper_than_3_and_optimization_level_is_standard(
self, _):
original_title = '寒い冬からあなたを守る!モデル:ジャケット、カラー:ブラック、防寒仕様ダウンジャケット'
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': _GPC_CATEGORY_LEVEL_4_JA
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
self.assertEqual(0, optimization_result.num_of_products_optimized)
def test_process_interprets_valid_gpc_id_and_copies_performant_keyword(self):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': 'Some title with heavy_keyword in the middle',
'googleProductCategory': '201',
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = ('[heavy_keyword] Some title with heavy_keyword in the '
'middle')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
def test_process_ignores_invalid_gpc_id_and_does_nothing(self):
original_title = 'Some title with heavy_keyword in the middle'
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': '202',
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
self.assertEqual(0, optimization_result.num_of_products_optimized)
def test_promo_text_dont_get_move_to_the_front(self):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title':
'寒い冬からあなたを守る!モデル:ジャケット、[送料無料] , カイナ ,カラー:ブラック、防寒仕様ダウンジャケット',
'googleProductCategory':
_PROPER_GPC_CATEGORY_JA,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_JA)
product = optimized_data['entries'][0]['product']
expected_title = (
'[カイナ] '
'寒い冬からあなたを守る!モデル:ジャケット、[送料無料]'
' , カイナ '
',カラー:ブラック、防寒仕様ダウンジャケット')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
@mock.patch(
'optimizers_builtin.title_word_order_optimizer.TitleWordOrderOptimizer._get_keywords_position',
return_value=title_word_order_optimizer._KeywordsPosition.BACK)
def test_keywords_are_appended_in_descending_order_of_weight_when_keywords_position_is_back(
self, _):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title':
'Some title with multiple keywords keyword2 keyword1 in the '
'middle',
'googleProductCategory': _PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = ('Some title with multiple keywords keyword2 '
'keyword1 in the middle [keyword1][keyword2]')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
@mock.patch(
'optimizers_builtin.title_word_order_optimizer.TitleWordOrderOptimizer._get_keywords_position',
return_value=title_word_order_optimizer._KeywordsPosition.BACK)
def test_at_most_three_keywords_are_appended_weight_when_keywords_position_is_back(
self, _):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': 'Some title with multiple keywords keyword2 keyword1 '
'heavy_keyword heavy_keyword_2 in the middle',
'googleProductCategory': _PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = (
'Some title with multiple '
'keywords keyword2 keyword1 heavy_keyword heavy_keyword_2 in the '
'middle [keyword1][keyword2][heavy_keyword_2]')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
@mock.patch(
'optimizers_builtin.title_word_order_optimizer.TitleWordOrderOptimizer._get_keywords_position',
return_value=title_word_order_optimizer._KeywordsPosition.BACK)
def test_title_is_not_optimized_if_title_more_than_max_title_length_when_keywords_position_is_back(
self, _):
original_title = 'a' * (title_word_order_optimizer._MAX_TITLE_LENGTH -
len(' heavy_keyword')) + ' heavy_keyword'
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': _PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
self.assertEqual(original_title, product['title'])
self.assertEqual(0, optimization_result.num_of_products_optimized)
@mock.patch(
'optimizers_builtin.title_word_order_optimizer.TitleWordOrderOptimizer._get_keywords_position',
return_value=title_word_order_optimizer._KeywordsPosition.BACK)
def test_a_number_of_keywords_are_appended_so_that_title_length_does_not_exceed_max_length_when_keywords_position_is_back(
self, _):
original_title = 'a' * (title_word_order_optimizer._MAX_TITLE_LENGTH - len(
' keyword1 keyword2 [keyword1]')) + ' keyword1 keyword2'
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': original_title,
'googleProductCategory': _PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
# [keyword2] is not appended.
expected_title = 'a' * (title_word_order_optimizer._MAX_TITLE_LENGTH - len(
' keyword1 keyword2 [keyword1]')) + ' keyword1 keyword2 [keyword1]'
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
def _set_test_variables(module: 'Module'):
"""Sets the module variables for testing outside a Flask environment."""
module.GPC_STRING_TO_ID_MAPPING_CONFIG = {
'Apparel & Accessories': 166,
'Apparel & Accessories > Jewelry': 188,
'Apparel & Accessories > Jewelry > Watches': 201,
}
module.TITLE_WORD_ORDER_CONFIG = {
'201': [{
'keyword': 'keyword1',
'weight': 0.8
}, {
'keyword': 'keyword2',
'weight': 0.7
}, {
'keyword': 'heavy_keyword',
'weight': 0.5
}, {
'keyword': 'heavy_keyword_2',
'weight': 0.6
}, {
'keyword': 'a',
'weight': 0.4
}, {
'keyword': 'magic',
'weight': 0.3
}],
'632': [{
'keyword': 'keyword1',
'weight': 0.5
}, {
'keyword': 'keyword2',
'weight': 0.7
}]
}
module.BLOCKLIST_CONFIG = {}
module.TITLE_WORD_ORDER_OPTIONS_CONFIG = {
'descriptionIncluded': False,
'productTypesIncluded': False,
'optimizationLevel': 'standard'
}
module.TITLE_WORD_ORDER_DICTIONARY_CONFIG = []
@mock.patch(
'optimizers_builtin.title_word_order_optimizer._TITLE_WORD_ORDER_OPTIONS_FILE_NAME',
'title_word_order_options_test')
class TitleWordOrderOptimizerNoFlaskTest(parameterized.TestCase):
"""Tests TitleWordOrderOptimizer running outside a Flask context."""
def setUp(self):
super(TitleWordOrderOptimizerNoFlaskTest, self).setUp()
# Explicitly no Flask context setup here.
_set_test_variables(module=title_word_order_optimizer)
self.optimizer = title_word_order_optimizer.TitleWordOrderOptimizer()
def test_process_copies_highest_performing_keyword_to_front_of_title(self):
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': 'Some title with heavy_keyword in the middle',
'googleProductCategory': _PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = ('[heavy_keyword] Some title with heavy_keyword in the '
'middle')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
def test_process_uses_custom_text_tokenizer(self):
title_word_order_optimizer.CUSTOM_TEXT_TOKENIZER = _custom_text_tokenizer
original_data = requests_bodies.build_request_body(
properties_to_be_updated={
'title': 'Some,title,with,heavy_keyword,with,commas',
'googleProductCategory': _PROPER_GPC_CATEGORY_EN,
})
optimized_data, optimization_result = self.optimizer.process(
original_data, constants.LANGUAGE_CODE_EN)
product = optimized_data['entries'][0]['product']
expected_title = (
'[heavy_keyword] Some,title,with,heavy_keyword,with,commas')
self.assertEqual(expected_title, product['title'])
self.assertEqual(1, optimization_result.num_of_products_optimized)
def _custom_text_tokenizer(text: str, lang: str,
dictionary_terms: Dict[str, str]) -> List[str]:
"""Helper function to split text by a comma."""
del lang
del dictionary_terms
return text.split(',')
| 40.673387
| 124
| 0.702257
| 3,431
| 30,261
| 5.757505
| 0.098805
| 0.034018
| 0.03331
| 0.039587
| 0.848436
| 0.829452
| 0.822011
| 0.797864
| 0.769414
| 0.75772
| 0
| 0.00811
| 0.205413
| 30,261
| 743
| 125
| 40.728129
| 0.81256
| 0.029345
| 0
| 0.725363
| 0
| 0
| 0.260653
| 0.123099
| 0.001616
| 0
| 0
| 0
| 0.082391
| 1
| 0.051696
| false
| 0
| 0.011309
| 0
| 0.067851
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ed84e3a3f7152bbf582ccf3c0d90d0da006e4561
| 36,655
|
py
|
Python
|
appengine/findit/handlers/flake/detection/test/flake_detection_utils_test.py
|
xswz8015/infra
|
f956b78ce4c39cc76acdda47601b86794ae0c1ba
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/findit/handlers/flake/detection/test/flake_detection_utils_test.py
|
xswz8015/infra
|
f956b78ce4c39cc76acdda47601b86794ae0c1ba
|
[
"BSD-3-Clause"
] | 7
|
2022-02-15T01:11:37.000Z
|
2022-03-02T12:46:13.000Z
|
appengine/findit/handlers/flake/detection/test/flake_detection_utils_test.py
|
NDevTK/chromium-infra
|
d38e088e158d81f7f2065a38aa1ea1894f735ec4
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from datetime import datetime
import mock
from handlers.flake.detection import flake_detection_utils
from libs import analysis_status
from libs import time_util
from model.flake.analysis.flake_culprit import FlakeCulprit
from model.flake.analysis.master_flake_analysis import MasterFlakeAnalysis
from model.flake.detection.flake_occurrence import FlakeOccurrence
from model.flake.flake import Flake
from model.flake.flake_issue import FlakeIssue
from model.flake.flake_type import FlakeType
from model.test_location import TestLocation
from model.test_inventory import LuciTest
from waterfall.test.wf_testcase import WaterfallTestCase
class FlakeDetectionUtilsTest(WaterfallTestCase):
@mock.patch.object(time_util, 'GetUTCNow', return_value=datetime(2018, 1, 3))
@mock.patch.object(
time_util, 'GetDatetimeBeforeNow', return_value=datetime(2017, 12, 27))
def testGetFlakeInformation(self, *_):
flake_issue = FlakeIssue.Create(monorail_project='chromium', issue_id=900)
flake_issue.last_updated_time_by_flake_detection = datetime(2018, 1, 1)
flake_issue.last_updated_time_in_monorail = datetime(2018, 1, 2)
flake_issue.status = 'Started'
flake_issue.put()
luci_project = 'chromium'
step_ui_name = 'step'
test_name = 'test'
normalized_step_name = 'normalized_step_name'
normalized_test_name = 'normalized_test_name'
test_label_name = 'test_label'
flake = Flake.Create(
luci_project=luci_project,
normalized_step_name=normalized_step_name,
normalized_test_name=normalized_test_name,
test_label_name=test_label_name,
)
flake.component = 'Mock>Component'
flake.test_location = TestLocation()
flake.test_location.file_path = '../../some/test/path/a.cc'
flake.test_location.line_number = 42
flake.flake_issue_key = flake_issue.key
flake.flake_score_last_week = 10
flake.put()
build_id = 123
luci_bucket = 'try'
luci_builder = 'luci builder'
legacy_master_name = 'buildbot master'
legacy_build_number = 999
time_happened = datetime(2018, 1, 1)
gerrit_cl_id = 98765
occurrence = FlakeOccurrence.Create(
flake_type=FlakeType.CQ_FALSE_REJECTION,
build_id=build_id,
step_ui_name=step_ui_name,
test_name=test_name,
luci_project=luci_project,
luci_bucket=luci_bucket,
luci_builder=luci_builder,
legacy_master_name=legacy_master_name,
legacy_build_number=legacy_build_number,
time_happened=time_happened,
gerrit_cl_id=gerrit_cl_id,
parent_flake_key=flake.key)
occurrence.time_detected = datetime(2018, 1, 1)
occurrence.put()
occurrence2 = FlakeOccurrence.Create(
flake_type=FlakeType.RETRY_WITH_PATCH,
build_id=124,
step_ui_name=step_ui_name,
test_name=test_name,
luci_project=luci_project,
luci_bucket=luci_bucket,
luci_builder='luci builder 2',
legacy_master_name=legacy_master_name,
legacy_build_number=legacy_build_number,
time_happened=datetime(2018, 1, 2, 3),
gerrit_cl_id=gerrit_cl_id,
parent_flake_key=flake.key)
occurrence2.time_detected = datetime(2018, 1, 2, 3)
occurrence2.put()
occurrence3 = FlakeOccurrence.Create(
flake_type=FlakeType.CQ_FALSE_REJECTION,
build_id=125,
step_ui_name=step_ui_name,
test_name=test_name,
luci_project=luci_project,
luci_bucket=luci_bucket,
luci_builder='luci builder 2',
legacy_master_name=legacy_master_name,
legacy_build_number=legacy_build_number,
time_happened=datetime(2018, 1, 2, 2),
gerrit_cl_id=gerrit_cl_id,
parent_flake_key=flake.key)
occurrence3.time_detected = datetime(2018, 1, 2, 2)
occurrence3.put()
occurrence4 = FlakeOccurrence.Create(
flake_type=FlakeType.CQ_HIDDEN_FLAKE,
build_id=126,
step_ui_name=step_ui_name,
test_name=test_name,
luci_project=luci_project,
luci_bucket=luci_bucket,
luci_builder='luci builder 2',
legacy_master_name=legacy_master_name,
legacy_build_number=legacy_build_number,
time_happened=datetime(2018, 1, 2, 2),
gerrit_cl_id=gerrit_cl_id,
parent_flake_key=flake.key)
occurrence4.time_detected = datetime(2018, 1, 2, 2)
occurrence4.put()
occurrence5 = FlakeOccurrence.Create(
flake_type=FlakeType.CI_FAILED_STEP,
build_id=127,
step_ui_name=step_ui_name,
test_name=test_name,
luci_project=luci_project,
luci_bucket=luci_bucket,
luci_builder='luci builder 2',
legacy_master_name=legacy_master_name,
legacy_build_number=legacy_build_number,
time_happened=datetime(2018, 1, 2, 2),
gerrit_cl_id=-1,
parent_flake_key=flake.key)
occurrence5.time_detected = datetime(2018, 1, 2, 2)
occurrence5.put()
occurrence6 = FlakeOccurrence.Create(
flake_type=FlakeType.CI_FAILED_STEP,
build_id=128,
step_ui_name=step_ui_name,
test_name=test_name,
luci_project=luci_project,
luci_bucket=luci_bucket,
luci_builder='luci builder 2',
legacy_master_name=legacy_master_name,
legacy_build_number=legacy_build_number,
time_happened=datetime(2017, 1, 2, 2),
gerrit_cl_id=-1,
parent_flake_key=flake.key)
occurrence6.time_detected = datetime(2017, 1, 2, 2)
occurrence6.put()
culprit1 = FlakeCulprit.Create('chromium', 'rev1', 123456, 'culprit_url')
culprit1.put()
analysis = MasterFlakeAnalysis.Create(legacy_master_name, luci_builder,
legacy_build_number, step_ui_name,
test_name)
analysis.bug_id = 900
analysis.culprit_urlsafe_key = culprit1.key.urlsafe()
analysis.confidence_in_culprit = 0.98
analysis.put()
culprit2 = FlakeCulprit.Create('chromium', 'rev2', 123457, 'culprit_url')
culprit2.put()
analysis_1 = MasterFlakeAnalysis.Create(legacy_master_name, luci_builder,
legacy_build_number - 1,
step_ui_name, test_name)
analysis_1.bug_id = 900
analysis_1.culprit_urlsafe_key = culprit2.key.urlsafe()
analysis_1.put()
expected_flake_dict = {
'luci_project':
'chromium',
'normalized_step_name':
'normalized_step_name',
'normalized_test_name':
'normalized_test_name',
'test_label_name':
'test_label',
'flake_issue_key':
flake_issue.key,
'last_occurred_time':
None,
'last_test_location_based_tag_update_time':
None,
'false_rejection_count_last_week':
0,
'impacted_cl_count_last_week':
0,
'archived':
False,
'flake_counts_last_week': [
{
'flake_type': 'cq false rejection',
'impacted_cl_count': 0,
'occurrence_count': 0
},
{
'flake_type': 'cq step level retry',
'impacted_cl_count': 0,
'occurrence_count': 0
},
{
'flake_type': 'cq hidden flake',
'impacted_cl_count': 0,
'occurrence_count': 0
},
{
'flake_type': 'ci failed step',
'impacted_cl_count': 0,
'occurrence_count': 0
},
],
'flake_score_last_week':
10,
'flake_issue': {
'flake_culprit_key':
None,
'monorail_project':
'chromium',
'issue_id':
900,
'last_updated_time_by_flake_detection':
datetime(2018, 1, 1),
'issue_link': ('https://monorail-prod.appspot.com/p/chromium/'
'issues/detail?id=900'),
'merge_destination_key':
None,
'last_updated_time_in_monorail':
'1 day, 00:00:00',
'last_updated_time_with_analysis_results':
None,
'create_time_in_monorail':
None,
'labels': [],
'status':
'Started',
},
'component':
'Mock>Component',
'test_location': {
'file_path': '../../some/test/path/a.cc',
'line_number': 42,
},
'tags': [],
'culprits': [{
'revision': 'rev1',
'commit_position': culprit1.commit_position,
'culprit_key': culprit1.key.urlsafe()
}],
'sample_analysis':
None,
'occurrences': [{
'group_by_field':
'luci builder 2',
'occurrences': [
{
'flake_type': 'cq step level retry',
'build_id': '124',
'step_ui_name': step_ui_name,
'test_name': test_name,
'tags': [],
'build_configuration': {
'luci_project': 'chromium',
'luci_bucket': 'try',
'luci_builder': 'luci builder 2',
'legacy_master_name': 'buildbot master',
'legacy_build_number': 999
},
'time_happened': '2018-01-02 03:00:00 UTC',
'time_detected': '2018-01-02 03:00:00 UTC',
'gerrit_cl_id': gerrit_cl_id
},
{
'flake_type': 'cq false rejection',
'build_id': '125',
'step_ui_name': step_ui_name,
'test_name': test_name,
'tags': [],
'build_configuration': {
'luci_project': 'chromium',
'luci_bucket': 'try',
'luci_builder': 'luci builder 2',
'legacy_master_name': 'buildbot master',
'legacy_build_number': 999
},
'time_happened': '2018-01-02 02:00:00 UTC',
'time_detected': '2018-01-02 02:00:00 UTC',
'gerrit_cl_id': gerrit_cl_id
},
{
'flake_type': 'ci failed step',
'build_id': '127',
'step_ui_name': step_ui_name,
'test_name': test_name,
'tags': [],
'build_configuration': {
'luci_project': 'chromium',
'luci_bucket': 'try',
'luci_builder': 'luci builder 2',
'legacy_master_name': 'buildbot master',
'legacy_build_number': 999
},
'time_happened': '2018-01-02 02:00:00 UTC',
'time_detected': '2018-01-02 02:00:00 UTC',
'gerrit_cl_id': -1,
},
{
'flake_type': 'cq hidden flake',
'build_id': '126',
'step_ui_name': step_ui_name,
'test_name': test_name,
'tags': [],
'build_configuration': {
'luci_project': 'chromium',
'luci_bucket': 'try',
'luci_builder': 'luci builder 2',
'legacy_master_name': 'buildbot master',
'legacy_build_number': 999
},
'time_happened': '2018-01-02 02:00:00 UTC',
'time_detected': '2018-01-02 02:00:00 UTC',
'gerrit_cl_id': gerrit_cl_id,
},
]
},
{
'group_by_field':
'luci builder',
'occurrences': [{
'flake_type': 'cq false rejection',
'build_id': '123',
'step_ui_name': step_ui_name,
'test_name': test_name,
'tags': [],
'build_configuration': {
'luci_project': 'chromium',
'luci_bucket': 'try',
'luci_builder': 'luci builder',
'legacy_master_name': 'buildbot master',
'legacy_build_number': 999
},
'time_happened': '2018-01-01 00:00:00 UTC',
'time_detected': '2018-01-01 00:00:00 UTC',
'gerrit_cl_id': gerrit_cl_id
}]
}],
}
self.assertEqual(expected_flake_dict,
flake_detection_utils.GetFlakeInformation(flake, 6))
@mock.patch.object(time_util, 'GetUTCNow', return_value=datetime(2019, 1, 3))
@mock.patch.object(
time_util, 'GetDatetimeBeforeNow', return_value=datetime(2018, 12, 27))
def testGetFlakeInformationOldFlakes(self, *_):
flake_issue = FlakeIssue.Create(monorail_project='chromium', issue_id=900)
flake_issue.last_updated_time_by_flake_detection = datetime(2018, 1, 1)
flake_issue.last_updated_time_in_monorail = datetime(2018, 1, 2)
flake_issue.status = 'Started'
flake_issue.put()
luci_project = 'chromium'
step_ui_name = 'step'
test_name = 'test'
normalized_step_name = 'normalized_step_name'
normalized_test_name = 'normalized_test_name'
test_label_name = 'test_label'
flake = Flake.Create(
luci_project=luci_project,
normalized_step_name=normalized_step_name,
normalized_test_name=normalized_test_name,
test_label_name=test_label_name,
)
flake.component = 'Mock>Component'
flake.test_location = TestLocation()
flake.test_location.file_path = '../../some/test/path/a.cc'
flake.test_location.line_number = 42
flake.flake_issue_key = flake_issue.key
flake.archived = True
flake.put()
build_id = 123
luci_bucket = 'try'
luci_builder = 'luci builder'
legacy_master_name = 'buildbot master'
legacy_build_number = 999
time_happened = datetime(2018, 1, 1)
gerrit_cl_id = 98765
occurrence = FlakeOccurrence.Create(
flake_type=FlakeType.CQ_FALSE_REJECTION,
build_id=build_id,
step_ui_name=step_ui_name,
test_name=test_name,
luci_project=luci_project,
luci_bucket=luci_bucket,
luci_builder=luci_builder,
legacy_master_name=legacy_master_name,
legacy_build_number=legacy_build_number,
time_happened=time_happened,
gerrit_cl_id=gerrit_cl_id,
parent_flake_key=flake.key)
occurrence.time_detected = datetime(2018, 1, 1)
occurrence.put()
occurrence2 = FlakeOccurrence.Create(
flake_type=FlakeType.RETRY_WITH_PATCH,
build_id=124,
step_ui_name=step_ui_name,
test_name=test_name,
luci_project=luci_project,
luci_bucket=luci_bucket,
luci_builder='luci builder 2',
legacy_master_name=legacy_master_name,
legacy_build_number=legacy_build_number,
time_happened=datetime(2018, 1, 2, 3),
gerrit_cl_id=gerrit_cl_id,
parent_flake_key=flake.key)
occurrence2.time_detected = datetime(2018, 1, 2, 3)
occurrence2.put()
occurrence3 = FlakeOccurrence.Create(
flake_type=FlakeType.CQ_FALSE_REJECTION,
build_id=125,
step_ui_name=step_ui_name,
test_name=test_name,
luci_project=luci_project,
luci_bucket=luci_bucket,
luci_builder='luci builder 2',
legacy_master_name=legacy_master_name,
legacy_build_number=legacy_build_number,
time_happened=datetime(2018, 1, 2, 2),
gerrit_cl_id=gerrit_cl_id,
parent_flake_key=flake.key)
occurrence3.time_detected = datetime(2018, 1, 2, 2)
occurrence3.put()
occurrence4 = FlakeOccurrence.Create(
flake_type=FlakeType.CQ_HIDDEN_FLAKE,
build_id=126,
step_ui_name=step_ui_name,
test_name=test_name,
luci_project=luci_project,
luci_bucket=luci_bucket,
luci_builder='luci builder 2',
legacy_master_name=legacy_master_name,
legacy_build_number=legacy_build_number,
time_happened=datetime(2018, 1, 2, 4),
gerrit_cl_id=gerrit_cl_id,
parent_flake_key=flake.key)
occurrence4.time_detected = datetime(2018, 1, 2, 4)
occurrence4.put()
occurrence5 = FlakeOccurrence.Create(
flake_type=FlakeType.CI_FAILED_STEP,
build_id=127,
step_ui_name=step_ui_name,
test_name=test_name,
luci_project=luci_project,
luci_bucket=luci_bucket,
luci_builder='luci builder 2',
legacy_master_name=legacy_master_name,
legacy_build_number=legacy_build_number,
time_happened=datetime(2018, 1, 2, 5),
gerrit_cl_id=-1,
parent_flake_key=flake.key)
occurrence5.time_detected = datetime(2018, 1, 2, 5)
occurrence5.put()
culprit1 = FlakeCulprit.Create('chromium', 'rev1', 123456, 'culprit_url')
culprit1.put()
analysis = MasterFlakeAnalysis.Create(legacy_master_name, luci_builder,
legacy_build_number, step_ui_name,
test_name)
analysis.bug_id = 900
analysis.culprit_urlsafe_key = culprit1.key.urlsafe()
analysis.confidence_in_culprit = 0.98
analysis.put()
culprit2 = FlakeCulprit.Create('chromium', 'rev2', 123457, 'culprit_url')
culprit2.put()
analysis_1 = MasterFlakeAnalysis.Create(legacy_master_name, luci_builder,
legacy_build_number - 1,
step_ui_name, test_name)
analysis_1.bug_id = 900
analysis_1.culprit_urlsafe_key = culprit2.key.urlsafe()
analysis_1.put()
expected_flake_dict = {
'luci_project':
'chromium',
'normalized_step_name':
'normalized_step_name',
'normalized_test_name':
'normalized_test_name',
'test_label_name':
'test_label',
'flake_issue_key':
flake_issue.key,
'last_occurred_time':
None,
'last_test_location_based_tag_update_time':
None,
'false_rejection_count_last_week':
0,
'impacted_cl_count_last_week':
0,
'archived':
True,
'flake_counts_last_week': [
{
'flake_type': 'cq false rejection',
'impacted_cl_count': 0,
'occurrence_count': 0
},
{
'flake_type': 'cq step level retry',
'impacted_cl_count': 0,
'occurrence_count': 0
},
{
'flake_type': 'cq hidden flake',
'impacted_cl_count': 0,
'occurrence_count': 0
},
{
'flake_type': 'ci failed step',
'impacted_cl_count': 0,
'occurrence_count': 0
},
],
'flake_score_last_week':
0,
'flake_issue': {
'flake_culprit_key':
None,
'monorail_project':
'chromium',
'issue_id':
900,
'last_updated_time_by_flake_detection':
datetime(2018, 1, 1),
'issue_link': ('https://monorail-prod.appspot.com/p/chromium/'
'issues/detail?id=900'),
'merge_destination_key':
None,
'last_updated_time_in_monorail':
'366 days, 00:00:00',
'last_updated_time_with_analysis_results':
None,
'create_time_in_monorail':
None,
'labels': [],
'status':
'Started',
},
'component':
'Mock>Component',
'test_location': {
'file_path': '../../some/test/path/a.cc',
'line_number': 42,
},
'tags': [],
'culprits': [{
'revision': 'rev1',
'commit_position': culprit1.commit_position,
'culprit_key': culprit1.key.urlsafe()
}],
'sample_analysis':
None,
'occurrences': [{
'group_by_field':
'luci builder 2',
'occurrences': [
{
'flake_type': 'ci failed step',
'build_id': '127',
'step_ui_name': step_ui_name,
'test_name': test_name,
'tags': [],
'build_configuration': {
'luci_project': 'chromium',
'luci_bucket': 'try',
'luci_builder': 'luci builder 2',
'legacy_master_name': 'buildbot master',
'legacy_build_number': 999
},
'time_happened': '2018-01-02 05:00:00 UTC',
'time_detected': '2018-01-02 05:00:00 UTC',
'gerrit_cl_id': -1,
},
{
'flake_type': 'cq hidden flake',
'build_id': '126',
'step_ui_name': step_ui_name,
'test_name': test_name,
'tags': [],
'build_configuration': {
'luci_project': 'chromium',
'luci_bucket': 'try',
'luci_builder': 'luci builder 2',
'legacy_master_name': 'buildbot master',
'legacy_build_number': 999
},
'time_happened': '2018-01-02 04:00:00 UTC',
'time_detected': '2018-01-02 04:00:00 UTC',
'gerrit_cl_id': gerrit_cl_id,
},
{
'flake_type': 'cq step level retry',
'build_id': '124',
'step_ui_name': step_ui_name,
'test_name': test_name,
'tags': [],
'build_configuration': {
'luci_project': 'chromium',
'luci_bucket': 'try',
'luci_builder': 'luci builder 2',
'legacy_master_name': 'buildbot master',
'legacy_build_number': 999
},
'time_happened': '2018-01-02 03:00:00 UTC',
'time_detected': '2018-01-02 03:00:00 UTC',
'gerrit_cl_id': gerrit_cl_id
},
{
'flake_type': 'cq false rejection',
'build_id': '125',
'step_ui_name': step_ui_name,
'test_name': test_name,
'tags': [],
'build_configuration': {
'luci_project': 'chromium',
'luci_bucket': 'try',
'luci_builder': 'luci builder 2',
'legacy_master_name': 'buildbot master',
'legacy_build_number': 999
},
'time_happened': '2018-01-02 02:00:00 UTC',
'time_detected': '2018-01-02 02:00:00 UTC',
'gerrit_cl_id': gerrit_cl_id
},
]
},
{
'group_by_field':
'luci builder',
'occurrences': [{
'flake_type': 'cq false rejection',
'build_id': '123',
'step_ui_name': step_ui_name,
'test_name': test_name,
'tags': [],
'build_configuration': {
'luci_project': 'chromium',
'luci_bucket': 'try',
'luci_builder': 'luci builder',
'legacy_master_name': 'buildbot master',
'legacy_build_number': 999
},
'time_happened': '2018-01-01 00:00:00 UTC',
'time_detected': '2018-01-01 00:00:00 UTC',
'gerrit_cl_id': gerrit_cl_id
}]
}],
}
self.assertEqual(expected_flake_dict,
flake_detection_utils.GetFlakeInformation(flake, 5))
def testGetFlakeInformationNoOccurrences(self):
luci_project = 'chromium'
normalized_step_name = 'normalized_step_name'
normalized_test_name = 'normalized_test_name_2'
test_label_name = 'test_label'
flake = Flake.Create(
luci_project=luci_project,
normalized_step_name=normalized_step_name,
normalized_test_name=normalized_test_name,
test_label_name=test_label_name)
flake.put()
self.assertIsNone(flake_detection_utils.GetFlakeInformation(flake, None))
@mock.patch.object(time_util, 'GetUTCNow', return_value=datetime(2018, 1, 3))
@mock.patch.object(
time_util, 'GetDatetimeBeforeNow', return_value=datetime(2017, 12, 27))
def testGetFlakeInformationClosedIssue(self, *_):
flake_issue = FlakeIssue.Create(monorail_project='chromium', issue_id=900)
flake_issue.last_updated_time_by_flake_detection = datetime(2018, 1, 1)
flake_issue.last_updated_time_in_monorail = datetime(2018, 1, 2)
flake_issue.status = 'WontFix'
flake_issue.put()
luci_project = 'chromium'
step_ui_name = 'step'
test_name = 'test'
normalized_step_name = 'normalized_step_name'
normalized_test_name = 'normalized_test_name_3'
test_label_name = 'test_label'
flake = Flake.Create(
luci_project=luci_project,
normalized_step_name=normalized_step_name,
normalized_test_name=normalized_test_name,
test_label_name=test_label_name)
flake.flake_issue_key = flake_issue.key
flake.put()
build_id = 123
luci_bucket = 'try'
luci_builder = 'luci builder'
legacy_master_name = 'buildbot master'
legacy_build_number = 999
time_happened = datetime(2018, 1, 1)
gerrit_cl_id = 98765
occurrence = FlakeOccurrence.Create(
flake_type=FlakeType.CQ_FALSE_REJECTION,
build_id=build_id,
step_ui_name=step_ui_name,
test_name=test_name,
luci_project=luci_project,
luci_bucket=luci_bucket,
luci_builder=luci_builder,
legacy_master_name=legacy_master_name,
legacy_build_number=legacy_build_number,
time_happened=time_happened,
gerrit_cl_id=gerrit_cl_id,
parent_flake_key=flake.key)
occurrence.time_detected = datetime(2018, 1, 1)
occurrence.put()
expected_flake_dict = {
'luci_project':
'chromium',
'normalized_step_name':
normalized_step_name,
'normalized_test_name':
normalized_test_name,
'test_label_name':
test_label_name,
'flake_issue_key':
flake_issue.key,
'last_occurred_time':
None,
'last_test_location_based_tag_update_time':
None,
'false_rejection_count_last_week':
0,
'impacted_cl_count_last_week':
0,
'archived':
False,
'flake_counts_last_week': [
{
'flake_type': 'cq false rejection',
'impacted_cl_count': 0,
'occurrence_count': 0
},
{
'flake_type': 'cq step level retry',
'impacted_cl_count': 0,
'occurrence_count': 0
},
{
'flake_type': 'cq hidden flake',
'impacted_cl_count': 0,
'occurrence_count': 0
},
{
'flake_type': 'ci failed step',
'impacted_cl_count': 0,
'occurrence_count': 0
},
],
'flake_score_last_week':
0,
'component':
None,
'test_location':
None,
'tags': [],
'occurrences': [{
'group_by_field':
'luci builder',
'occurrences': [{
'flake_type': 'cq false rejection',
'build_id': '123',
'step_ui_name': step_ui_name,
'test_name': test_name,
'tags': [],
'build_configuration': {
'luci_project': 'chromium',
'luci_bucket': 'try',
'luci_builder': 'luci builder',
'legacy_master_name': 'buildbot master',
'legacy_build_number': 999
},
'time_happened': '2018-01-01 00:00:00 UTC',
'time_detected': '2018-01-01 00:00:00 UTC',
'gerrit_cl_id': gerrit_cl_id
}]
}],
}
self.assertEqual(expected_flake_dict,
flake_detection_utils.GetFlakeInformation(flake, 1))
@mock.patch.object(time_util, 'GetUTCNow', return_value=datetime(2018, 1, 3))
@mock.patch.object(
time_util, 'GetDatetimeBeforeNow', return_value=datetime(2017, 12, 27))
def testGetFlakeInformationNoIssue(self, *_):
luci_project = 'chromium'
step_ui_name = 'step'
test_name = 'test'
normalized_step_name = 'normalized_step_name'
normalized_test_name = 'normalized_test_name_3'
test_label_name = 'test_label'
flake = Flake.Create(
luci_project=luci_project,
normalized_step_name=normalized_step_name,
normalized_test_name=normalized_test_name,
test_label_name=test_label_name)
flake.put()
build_id = 123
luci_bucket = 'try'
luci_builder = 'luci builder'
legacy_master_name = 'buildbot master'
legacy_build_number = 999
time_happened = datetime(2018, 1, 1)
gerrit_cl_id = 98765
occurrence = FlakeOccurrence.Create(
flake_type=FlakeType.CQ_FALSE_REJECTION,
build_id=build_id,
step_ui_name=step_ui_name,
test_name=test_name,
luci_project=luci_project,
luci_bucket=luci_bucket,
luci_builder=luci_builder,
legacy_master_name=legacy_master_name,
legacy_build_number=legacy_build_number,
time_happened=time_happened,
gerrit_cl_id=gerrit_cl_id,
parent_flake_key=flake.key)
occurrence.time_detected = datetime(2018, 1, 1)
occurrence.put()
expected_flake_dict = {
'luci_project':
'chromium',
'normalized_step_name':
normalized_step_name,
'normalized_test_name':
normalized_test_name,
'test_label_name':
test_label_name,
'flake_issue_key':
None,
'last_occurred_time':
None,
'last_test_location_based_tag_update_time':
None,
'false_rejection_count_last_week':
0,
'impacted_cl_count_last_week':
0,
'archived':
False,
'flake_counts_last_week': [
{
'flake_type': 'cq false rejection',
'impacted_cl_count': 0,
'occurrence_count': 0
},
{
'flake_type': 'cq step level retry',
'impacted_cl_count': 0,
'occurrence_count': 0
},
{
'flake_type': 'cq hidden flake',
'impacted_cl_count': 0,
'occurrence_count': 0
},
{
'flake_type': 'ci failed step',
'impacted_cl_count': 0,
'occurrence_count': 0
},
],
'flake_score_last_week':
0,
'component':
None,
'test_location':
None,
'tags': [],
'occurrences': [{
'group_by_field':
'luci builder',
'occurrences': [{
'flake_type': 'cq false rejection',
'build_id': '123',
'step_ui_name': step_ui_name,
'test_name': test_name,
'tags': [],
'build_configuration': {
'luci_project': 'chromium',
'luci_bucket': 'try',
'luci_builder': 'luci builder',
'legacy_master_name': 'buildbot master',
'legacy_build_number': 999
},
'time_happened': '2018-01-01 00:00:00 UTC',
'time_detected': '2018-01-01 00:00:00 UTC',
'gerrit_cl_id': gerrit_cl_id
}]
}],
}
self.assertEqual(expected_flake_dict,
flake_detection_utils.GetFlakeInformation(flake, 1))
def testGetFlakeAnalysesResultsNoAnalyses(self):
self.assertEqual(([], None),
flake_detection_utils._GetFlakeAnalysesResults(123))
def testGetFlakeAnalysesResultsFailedToGetCulprit(self):
analysis = MasterFlakeAnalysis.Create('m', 'b', 12345, 's', 't')
analysis.bug_id = 123
analysis.culprit_urlsafe_key = 'culprit.key'
analysis.status = analysis_status.COMPLETED
analysis.put()
self.assertEqual(([], {
'status': 'Completed, no culprit found',
'analysis_key': analysis.key.urlsafe()
}), flake_detection_utils._GetFlakeAnalysesResults(123))
def testGetFlakeAnalysesResultsShowRunningAnalysis(self):
analysis_1 = MasterFlakeAnalysis.Create('m', 'b', 12345, 's', 't')
analysis_1.bug_id = 123
analysis_1.status = analysis_status.RUNNING
analysis_1.put()
analysis_2 = MasterFlakeAnalysis.Create('m', 'b', 12345, 's', 't')
analysis_2.bug_id = 123
analysis_2.status = analysis_status.ERROR
analysis_2.put()
self.assertEqual(([], {
'status': 'Running',
'analysis_key': analysis_1.key.urlsafe()
}), flake_detection_utils._GetFlakeAnalysesResults(123))
def testGetFlakeAnalysesResultsShowPendingAnalysis(self):
analysis_1 = MasterFlakeAnalysis.Create('m', 'b', 12345, 's', 't')
analysis_1.bug_id = 123
analysis_1.put()
self.assertEqual(([], {
'status': 'Pending',
'analysis_key': analysis_1.key.urlsafe()
}), flake_detection_utils._GetFlakeAnalysesResults(123))
def testGetFlakeAnalysesResultsNotShowErrorAnalysis(self):
analysis_2 = MasterFlakeAnalysis.Create('m', 'b', 12345, 's', 't')
analysis_2.bug_id = 123
analysis_2.status = analysis_status.ERROR
analysis_2.put()
self.assertEqual(([], {}),
flake_detection_utils._GetFlakeAnalysesResults(123))
def testRemoveDisabledFlakes(self):
flake_1 = Flake.Create('a', 'b', '1', 'test_label')
flake_2 = Flake.Create('a', 'b', '2', 'test_label')
flake_3 = Flake.Create('a', 'b', '3', 'test_label')
luci_test_1 = LuciTest(
key=LuciTest.CreateKey('a', 'b', 1),
disabled_test_variants={('config',)},
)
luci_test_1.put()
luci_test_2 = LuciTest(
key=LuciTest.CreateKey('a', 'b', 2),
disabled_test_variants=set(),
)
luci_test_2.put()
self.assertEqual([flake_2, flake_3],
flake_detection_utils.RemoveDisabledFlakes(
[flake_1, flake_2, flake_3]))
| 36.581836
| 79
| 0.547429
| 3,735
| 36,655
| 4.996252
| 0.060509
| 0.036868
| 0.031081
| 0.021757
| 0.903542
| 0.898719
| 0.889181
| 0.888645
| 0.878999
| 0.876266
| 0
| 0.045382
| 0.354358
| 36,655
| 1,001
| 80
| 36.618382
| 0.743134
| 0.004229
| 0
| 0.833686
| 0
| 0
| 0.212051
| 0.028113
| 0
| 0
| 0
| 0
| 0.011653
| 1
| 0.011653
| false
| 0
| 0.014831
| 0
| 0.027542
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
92e3ad13b0f5ab447b292e4a64d10c876d1507fa
| 177
|
py
|
Python
|
bats/util.py
|
uncommoncode/bats
|
f338966bb29d9c8d0921e0b76605095436a748e7
|
[
"BSD-2-Clause"
] | 1
|
2020-03-14T17:51:41.000Z
|
2020-03-14T17:51:41.000Z
|
bats/util.py
|
uncommoncode/bats
|
f338966bb29d9c8d0921e0b76605095436a748e7
|
[
"BSD-2-Clause"
] | null | null | null |
bats/util.py
|
uncommoncode/bats
|
f338966bb29d9c8d0921e0b76605095436a748e7
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy as np
def generate_sine(frequency, duration, sample_rate):
t = np.arange(duration * sample_rate) / sample_rate
return np.sin(2.0 * np.pi * frequency * t)
| 25.285714
| 55
| 0.706215
| 28
| 177
| 4.321429
| 0.642857
| 0.247934
| 0.297521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013889
| 0.186441
| 177
| 6
| 56
| 29.5
| 0.826389
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.