hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c4a74a5b3ae9815d30ff73f4c0fa8d735514d1e
| 27,110
|
py
|
Python
|
src/twisted/conch/test/keydata.py
|
seanicus64/twisted
|
c0f1394c7bfb04d97c725a353a1f678fa6a1c602
|
[
"MIT",
"Unlicense"
] | 32
|
2019-11-14T07:49:33.000Z
|
2022-02-16T00:49:22.000Z
|
src/twisted/conch/test/keydata.py
|
seanicus64/twisted
|
c0f1394c7bfb04d97c725a353a1f678fa6a1c602
|
[
"MIT",
"Unlicense"
] | 9
|
2019-09-06T18:21:59.000Z
|
2022-01-13T03:04:11.000Z
|
src/twisted/conch/test/keydata.py
|
seanicus64/twisted
|
c0f1394c7bfb04d97c725a353a1f678fa6a1c602
|
[
"MIT",
"Unlicense"
] | 16
|
2019-06-25T13:26:43.000Z
|
2022-03-07T07:29:12.000Z
|
# -*- test-case-name: twisted.conch.test.test_keys -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
# pylint: disable=I0011,C0103,W9401,W9402
"""
Data used by test_keys as well as others.
"""
from __future__ import absolute_import, division
from twisted.python.compat import long, _b64decodebytes as decodebytes
RSAData = {
'n': long('269413617238113438198661010376758399219880277968382122687862697'
'296942471209955603071120391975773283844560230371884389952067978'
'789684135947515341209478065209455427327369102356204259106807047'
'964139525310539133073743116175821417513079706301100600025815509'
'786721808719302671068052414466483676821987505720384645561708425'
'794379383191274856941628512616355437197560712892001107828247792'
'561858327085521991407807015047750218508971611590850575870321007'
'991909043252470730134547038841839367764074379439843108550888709'
'430958143271417044750314742880542002948053835745429446485015316'
'60749404403945254975473896534482849256068133525751'),
'e': long(65537),
'd': long('420335724286999695680502438485489819800002417295071059780489811'
'840828351636754206234982682752076205397047218449504537476523960'
'987613148307573487322720481066677105211155388802079519869249746'
'774085882219244493290663802569201213676433159425782937159766786'
'329742053214957933941260042101377175565683849732354700525628975'
'239000548651346620826136200952740446562751690924335365940810658'
'931238410612521441739702170503547025018016868116037053013935451'
'477930426013703886193016416453215950072147440344656137718959053'
'897268663969428680144841987624962928576808352739627262941675617'
'7724661940425316604626522633351193810751757014073'),
'p': long('152689878451107675391723141129365667732639179427453246378763774'
'448531436802867910180261906924087589684175595016060014593521649'
'964959248408388984465569934780790357826811592229318702991401054'
'226302790395714901636384511513449977061729214247279176398290513'
'085108930550446985490864812445551198848562639933888780317'),
'q': long('176444974592327996338888725079951900172097062203378367409936859'
'072670162290963119826394224277287608693818012745872307600855894'
'647300295516866118620024751601329775653542084052616260193174546'
'400544176890518564317596334518015173606460860373958663673307503'
'231977779632583864454001476729233959405710696795574874403'),
'u': long('936018002388095842969518498561007090965136403384715613439364803'
'229386793506402222847415019772053080458257034241832795210460612'
'924445085372678524176842007912276654532773301546269997020970818'
'155956828553418266110329867222673040098885651348225673298948529'
'93885224775891490070400861134282266967852120152546563278')
}
DSAData = {
'g': long("10253261326864117157640690761723586967382334319435778695"
"29171533815411392477819921538350732400350395446211982054"
"96512489289702949127531056893725702005035043292195216541"
"11525058911428414042792836395195432445511200566318251789"
"10575695836669396181746841141924498545494149998282951407"
"18645344764026044855941864175"),
'p': long("10292031726231756443208850082191198787792966516790381991"
"77502076899763751166291092085666022362525614129374702633"
"26262930887668422949051881895212412718444016917144560705"
"45675251775747156453237145919794089496168502517202869160"
"78674893099371444940800865897607102159386345313384716752"
"18590012064772045092956919481"),
'q': long(1393384845225358996250882900535419012502712821577),
'x': long(1220877188542930584999385210465204342686893855021),
'y': long("14604423062661947579790240720337570315008549983452208015"
"39426429789435409684914513123700756086453120500041882809"
"10283610277194188071619191739512379408443695946763554493"
"86398594314468629823767964702559709430618263927529765769"
"10270265745700231533660131769648708944711006508965764877"
"684264272082256183140297951")
}
ECDatanistp256 = {
'x': long('762825130203920963171185031449647317742997734817505505433829043'
'45687059013883'),
'y': long('815431978646028526322656647694416475343443758943143196810611371'
'59310646683104'),
'privateValue': long('3463874347721034170096400845565569825355565567882605'
'9678074967909361042656500'),
'curve': b'ecdsa-sha2-nistp256'
}
ECDatanistp384 = {
'privateValue': long('280814107134858470598753916394807521398239633534281633982576099083'
'35787109896602102090002196616273211495718603965098'),
'x': long('10036914308591746758780165503819213553101287571902957054148542'
'504671046744460374996612408381962208627004841444205030'),
'y': long('17337335659928075994560513699823544906448896792102247714689323'
'575406618073069185107088229463828921069465902299522926'),
'curve': b'ecdsa-sha2-nistp384'
}
ECDatanistp521 = {
'x': long('12944742826257420846659527752683763193401384271391513286022917'
'29910013082920512632908350502247952686156279140016049549948975'
'670668730618745449113644014505462'),
'y': long('10784108810271976186737587749436295782985563640368689081052886'
'16296815984553198866894145509329328086635278430266482551941240'
'591605833440825557820439734509311'),
'privateValue': long('662751235215460886290293902658128847495347691199214706697089140769'
'672273950767961331442265530524063943548846724348048614239791498442'
'5997823106818915698960565'),
'curve': b'ecdsa-sha2-nistp521'
}
privateECDSA_openssh521 = b"""-----BEGIN EC PRIVATE KEY-----
MIHcAgEBBEIAjn0lSVF6QweS4bjOGP9RHwqxUiTastSE0MVuLtFvkxygZqQ712oZ
ewMvqKkxthMQgxzSpGtRBcmkL7RqZ94+18qgBwYFK4EEACOhgYkDgYYABAFpX/6B
mxxglwD+VpEvw0hcyxVzLxNnMGzxZGF7xmNj8nlF7M+TQctdlR2Xv/J+AgIeVGmB
j2p84bkV9jBzrUNJEACsJjttZw8NbUrhxjkLT/3rMNtuwjE4vLja0P7DMTE0EV8X
f09ETdku/z/1tOSSrSvRwmUcM9nQUJtHHAZlr5Q0fw==
-----END EC PRIVATE KEY-----"""
publicECDSA_openssh521 = (b"ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA"
b"1MjEAAAAIbmlzdHA1MjEAAACFBAFpX/6BmxxglwD+VpEvw0hcyxVzLxNnMGzxZGF7xmNj8nlF7"
b"M+TQctdlR2Xv/J+AgIeVGmBj2p84bkV9jBzrUNJEACsJjttZw8NbUrhxjkLT/3rMNtuwjE4vLja"
b"0P7DMTE0EV8Xf09ETdku/z/1tOSSrSvRwmUcM9nQUJtHHAZlr5Q0fw== comment")
privateECDSA_openssh384 = b"""-----BEGIN EC PRIVATE KEY-----
MIGkAgEBBDAtAi7I8j73WCX20qUM5hhHwHuFzYWYYILs2Sh8UZ+awNkARZ/Fu2LU
LLl5RtOQpbWgBwYFK4EEACKhZANiAATU17sA9P5FRwSknKcFsjjsk0+E3CeXPYX0
Tk/M0HK3PpWQWgrO8JdRHP9eFE9O/23P8BumwFt7F/AvPlCzVd35VfraFT0o4cCW
G0RqpQ+np31aKmeJshkcYALEchnU+tQ=
-----END EC PRIVATE KEY-----"""
publicECDSA_openssh384 = (b"ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzOD"
b"QAAAAIbmlzdHAzODQAAABhBNTXuwD0/kVHBKScpwWyOOyTT4TcJ5c9hfROT8zQcrc+lZBaCs7wl"
b"1Ec/14UT07/bc/wG6bAW3sX8C8+ULNV3flV+toVPSjhwJYbRGqlD6enfVoqZ4myGRxgAsRyGdT61A== comment")
publicECDSA_openssh = (b"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAA"
b"AAIbmlzdHAyNTYAAABBBKimX1DZ7+Qj0SpfePMbo1pb6yGkAb5l7duC1l855yD7tEfQfqk7bc7v"
b"46We1hLMyz6ObUBYgkN/34n42F4vpeA= comment")
privateECDSA_openssh = b"""-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIEyU1YOT2JxxofwbJXIjGftdNcJK55aQdNrhIt2xYQz0oAoGCCqGSM49
AwEHoUQDQgAEqKZfUNnv5CPRKl948xujWlvrIaQBvmXt24LWXznnIPu0R9B+qTtt
zu/jpZ7WEszLPo5tQFiCQ3/fifjYXi+l4A==
-----END EC PRIVATE KEY-----"""
publicRSA_openssh = (b"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVaqx4I9bWG+wloV"
b"DEd2NQhEUBVUIUKirg0GDu1OmjrUr6OQZehFV1XwA2v2+qKj+DJjfBaS5b/fDz0n3WmM06QHjVy"
b"qgYwBGTJAkMgUyP95ztExZqpATpSXfD5FVks3loniwI66zoBC0hdwWnju9TMA2l5bs9auIJNm/9"
b"NNN9b0b/h9qpKSeq/631heY+Grh6HUqx6sBa9zDfH8Kk5O8/kUmWQNUZdy03w17snaY6RKXCpCn"
b"d1bqcPUWzxiwYZNW6Pd+rf81CrKfxGAugWBViC6QqbkPD5ASfNaNHjkbtM6Vlvbw7KW4CC1ffdO"
b"gTtDc1foNfICZgptyti8ZseZj3 comment")
privateRSA_openssh = b'''-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEA1WqseCPW1hvsJaFQxHdjUIRFAVVCFCoq4NBg7tTpo61K+jkG
XoRVdV8ANr9vqio/gyY3wWkuW/3w89J91pjNOkB41cqoGMARkyQJDIFMj/ec7RMW
aqQE6Ul3w+RVZLN5aJ4sCOus6AQtIXcFp47vUzANpeW7PWriCTZv/TTTfW9G/4fa
qSknqv+t9YXmPhq4eh1KserAWvcw3x/CpOTvP5FJlkDVGXctN8Ne7J2mOkSlwqQp
3dW6nD1Fs8YsGGTVuj3fq3/NQqyn8RgLoFgVYgukKm5Dw+QEnzWjR45G7TOlZb28
OyluAgtX33ToE7Q3NX6DXyAmYKbcrYvGbHmY9wIDAQABAoIBACFMCGaiKNW0+44P
chuFCQC58k438BxXS+NRf54jp+Q6mFUb6ot6mB682Lqx+YkSGGCs6MwLTglaQGq6
L5n4syRghLnOaZWa+eL8H1FNJxXbKyet77RprL59EOuGR3BztACHlRU7N/nnFOeA
u2geG+bdu3NjuWfmsid/z88wm8KY/dkYNi82LvE9gXqf4QMtR9s0UWI53U/prKiL
2dbzhMQXuXGdBghCeE27xSr0w1jNVSvtvjNfBOp75gQkY/It1z0bbNWcY0MvkoiN
Pm7aGDfYDyVniR25RjReyc7Ei+2SWjMHD9+GCPmS6dvrOAg2yc3NCgFIWzk+esrG
gKnc1DkCgYEA2XAG2OK81HiRUJTUwRuJOGxGZFpRoJoHPUiPA1HMaxKOfRqxZedx
dTngMgV1jRhMr5OxSbFmX3hietEMyuZNQ7Oc9Gt95gyY3M8hYo7VLhLeBK7XJG6D
MaIVokQ9IqliJiK5su1UCp0Ig6cHDf8ZGI7Yqx3aSJwxaBGhZm3j2B0CgYEA+0QX
i6Q2vh43Haf2YWwExKrdeD4HjB4zAq4DFIeDeuWefQhnqPKqvxJwz3Kpp8cLHYjV
IP2cY8pHMFVOi8TP9H8WpJISdKEJwsRunIwz76Xl9+ArrU9cEaoahDdb/Xrqw818
sMjkH1Rjtcev3/QJp/zHJfxc6ZHXksWYHlbTsSMCgYBRr+mSn5QLSoRlPpSzO5IQ
tXS4jMnvyQ4BMvovaBKhAyauz1FoFEwmmyikAjMIX+GncJgBNHleUo7Ezza8H0tV
rOvBU4TH4WGoStSi/0ANgB8SqVDAKhh1lAwGmxZQqEvsQc177/dLyXUCaMSYuIaI
GFpD5wIzlyJkk4MMRSp87QKBgGlmN8ZA3SHFBPOwuD5HlHx2/C3rPzk8lcNDAVHE
Qpfz6Bakxu7s1EkQUDgE7jvN19DMzDJpkAegG1qf/jHNHjp+cR4ZlBpOTwzfX1LV
0Rdu7NectlWd244hX7wkiLb8r6vw76QssNyfhrADEriL4t0PwO4jPUpQ/i+4KUZY
v7YnAoGBAIVLG3qbEhA3nh+tXtr+xpb+3zVruTTiFpeSJgm9vXAgA6c1vS0boNIH
RyvU3qioBwcbuAQgpydBPGw5OelBzucXHdFMXLw90iYm/mrW/Uhyrkb6e8PTGWBE
HaUTp4D1YynUel0GBxZd9os9y2Q64oRaTYwGLS2dHOuDTHg9eVTO
-----END RSA PRIVATE KEY-----'''
# Some versions of OpenSSH generate these (slightly different keys): the PKCS#1
# structure is wrapped in an extra ASN.1 SEQUENCE and there's an empty SEQUENCE
# following it. It is not any standard key format and was probably a bug in
# OpenSSH at some point.
privateRSA_openssh_alternate = b"""-----BEGIN RSA PRIVATE KEY-----
MIIEqTCCBKMCAQACggEBANVqrHgj1tYb7CWhUMR3Y1CERQFVQhQqKuDQYO7U6aOtSvo5Bl6EVXVf
ADa/b6oqP4MmN8FpLlv98PPSfdaYzTpAeNXKqBjAEZMkCQyBTI/3nO0TFmqkBOlJd8PkVWSzeWie
LAjrrOgELSF3BaeO71MwDaXluz1q4gk2b/00031vRv+H2qkpJ6r/rfWF5j4auHodSrHqwFr3MN8f
wqTk7z+RSZZA1Rl3LTfDXuydpjpEpcKkKd3Vupw9RbPGLBhk1bo936t/zUKsp/EYC6BYFWILpCpu
Q8PkBJ81o0eORu0zpWW9vDspbgILV9906BO0NzV+g18gJmCm3K2Lxmx5mPcCAwEAAQKCAQAhTAhm
oijVtPuOD3IbhQkAufJON/AcV0vjUX+eI6fkOphVG+qLepgevNi6sfmJEhhgrOjMC04JWkBqui+Z
+LMkYIS5zmmVmvni/B9RTScV2ysnre+0aay+fRDrhkdwc7QAh5UVOzf55xTngLtoHhvm3btzY7ln
5rInf8/PMJvCmP3ZGDYvNi7xPYF6n+EDLUfbNFFiOd1P6ayoi9nW84TEF7lxnQYIQnhNu8Uq9MNY
zVUr7b4zXwTqe+YEJGPyLdc9G2zVnGNDL5KIjT5u2hg32A8lZ4kduUY0XsnOxIvtklozBw/fhgj5
kunb6zgINsnNzQoBSFs5PnrKxoCp3NQ5AoGBANlwBtjivNR4kVCU1MEbiThsRmRaUaCaBz1IjwNR
zGsSjn0asWXncXU54DIFdY0YTK+TsUmxZl94YnrRDMrmTUOznPRrfeYMmNzPIWKO1S4S3gSu1yRu
gzGiFaJEPSKpYiYiubLtVAqdCIOnBw3/GRiO2Ksd2kicMWgRoWZt49gdAoGBAPtEF4ukNr4eNx2n
9mFsBMSq3Xg+B4weMwKuAxSHg3rlnn0IZ6jyqr8ScM9yqafHCx2I1SD9nGPKRzBVTovEz/R/FqSS
EnShCcLEbpyMM++l5ffgK61PXBGqGoQ3W/166sPNfLDI5B9UY7XHr9/0Caf8xyX8XOmR15LFmB5W
07EjAoGAUa/pkp+UC0qEZT6UszuSELV0uIzJ78kOATL6L2gSoQMmrs9RaBRMJpsopAIzCF/hp3CY
ATR5XlKOxM82vB9LVazrwVOEx+FhqErUov9ADYAfEqlQwCoYdZQMBpsWUKhL7EHNe+/3S8l1AmjE
mLiGiBhaQ+cCM5ciZJODDEUqfO0CgYBpZjfGQN0hxQTzsLg+R5R8dvwt6z85PJXDQwFRxEKX8+gW
pMbu7NRJEFA4BO47zdfQzMwyaZAHoBtan/4xzR46fnEeGZQaTk8M319S1dEXbuzXnLZVnduOIV+8
JIi2/K+r8O+kLLDcn4awAxK4i+LdD8DuIz1KUP4vuClGWL+2JwKBgQCFSxt6mxIQN54frV7a/saW
/t81a7k04haXkiYJvb1wIAOnNb0tG6DSB0cr1N6oqAcHG7gEIKcnQTxsOTnpQc7nFx3RTFy8PdIm
Jv5q1v1Icq5G+nvD0xlgRB2lE6eA9WMp1HpdBgcWXfaLPctkOuKEWk2MBi0tnRzrg0x4PXlUzjAA
-----END RSA PRIVATE KEY-----"""
# Encrypted with the passphrase 'encrypted'
privateRSA_openssh_encrypted = b"""-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,FFFFFFFFFFFFFFFF
qRwdy/fEJQbRccoyOdhJCQv5FpsbSJWtvOTLjrURDrjpO0WvOsvwV/ltLAZtD8b3
CSlgi8eGZk+rFKNMn1uUWuVeQR3Mfn2PE2hgB0Qc0HkdfG92cnVzoRjKRQxxJM5o
F4/1MSLhwQxLF53DHWfyzK8rFNUBRyRRBFevXwPfbp6DhFy+TkqOcrLufJT7x7BC
MvMNjILVhNr9hnIMkzmAiBo9lhJ0CHdWhcQHaX7Wk6UcGNDsYrwBoLlKPQlEGMU1
2scCi1UAbCilVLyUpHFuQeCzJHBl0YuLjwmo1kSv9U5E4ysh/FIuRf3aW91OK1+y
5rPE+zJYnzNAZ/Rl9xqtvTh8hlMwjdt582HbiJb6zJw5/NEz9OKyaJjjqZsZQ1TY
GOhV0Oqwx54jpkCh0XwJ2ofmcNuiO8LoUVBE124pa6ePSxLorqBhtq9nTgwUv1Mx
lFioAM6xGBSugCRZgbapHAw0M/I8Fa95A2ocRgx0N6TMp3ZBVltovb8pZrAGld3L
T5VWGDl3ZX/eM8YlXDupewOpb56g55Aevl0jERktqJdl+g9D+PXnCsJgxMcJbfl5
tWY9KoMxV+2Fj68SHdr/acCp7xgUMwHvVLFfYCeK/hpEe9O8vOAMTlXuq9zCMDAx
kL2kcSFbZHerc4TijtjXEALo06gYHEcLRtI6lvYrzbbmpCD7J7AnMzS3SQ2FzbFM
GARKfxBnYp0dZTDiY5HE45r8xWzUupoFcIuxKtuhBEtT7H2Ynv1NmU9qQRj8C1U5
LgM3lVEkrh4o1aBIAqX0OA4do08h2pdp9v0E4HKtCjSL5EBC6xrxmIY6b6dwCDLU
n16mv4jeKGy2IKvnF9r8HXdUG0yisNGxeq5Uf7STGH5KcCRrJCBZbawAbwURaLRo
HVydDP+5uEMMVjULpYgtuNo6gw6NczOhzgYAm3v2ZMjmZ8gclOsbRrH74XiOV8kd
89oYN5yNHD2EBqP5271kbmsYZ3VwBGN+HUdWIDi3gbFfHcmy59YQt09mZIMMwD7r
nRK+AKfBnNeMK9yZkkTRs3FwY4ZJdYn58pWfe4DNqMI7U5BQ9QZnLkfGLUqWtghX
jrselJrtSEMkll+feFf7jxiCKAwC/cWe1cvukjYPA6k75Wv7RaDENfwMlZtN+pfl
pzKqId20HhMNOceaeZagL+xzM1RRj+VcXR9BWfHI2AXZPcpTGAJwDOoQA64L9YGW
7QzxHmcDTlV0e59CpAdCLj//rQxFpYnuuJlwV2YyHYnvzfxsVge7u5ApcuBpNTjf
N46Heh24xXfqfM7OcO3BB71VfcvCNQavsp70PAtj4loShX6FpzatNX0iZasB988E
TtuiJ+9e7vH0xEhBLJIUJT9LvMto25KLHFHwSQXKEXM+hiY2nfObc1Cku4lBmy+7
uWpZrh3hkmKUtfdiyeqUUj1ypwZ6boZO1UZo0xTrpubmKQEvd2957YrEEVsi0LeB
uEzSlUXrwIV7Qw2VhoLxIaCyl5j4nOKetUeAjqVNi9makh0x4Ion5osxxYvYS9s/
Y48ATMnwm3+CdN6LE3IykHyHs7JuZmawWDR4CKJB6M1r0X+Xwgs0tQ==
-----END RSA PRIVATE KEY-----"""
# Encrypted with the passphrase 'testxp'. NB: this key was generated by
# OpenSSH, so it doesn't use the same key data as the other keys here.
privateRSA_openssh_encrypted_aes = b"""-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-128-CBC,0673309A6ACCAB4B77DEE1C1E536AC26
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----"""
publicRSA_lsh = (
b'{KDEwOnB1YmxpYy1rZXkoMTQ6cnNhLXBrY3MxLXNoYTEoMTpuMjU3OgDVaqx4I9bWG+wloVD'
b'Ed2NQhEUBVUIUKirg0GDu1OmjrUr6OQZehFV1XwA2v2+qKj+DJjfBaS5b/fDz0n3WmM06QHj'
b'VyqgYwBGTJAkMgUyP95ztExZqpATpSXfD5FVks3loniwI66zoBC0hdwWnju9TMA2l5bs9auI'
b'JNm/9NNN9b0b/h9qpKSeq/631heY+Grh6HUqx6sBa9zDfH8Kk5O8/kUmWQNUZdy03w17snaY'
b'6RKXCpCnd1bqcPUWzxiwYZNW6Pd+rf81CrKfxGAugWBViC6QqbkPD5ASfNaNHjkbtM6Vlvbw'
b'7KW4CC1ffdOgTtDc1foNfICZgptyti8ZseZj3KSgxOmUzOgEAASkpKQ==}'
)
privateRSA_lsh = (
b"(11:private-key(9:rsa-pkcs1(1:n257:\x00\xd5j\xacx#\xd6\xd6\x1b\xec%\xa1P"
b"\xc4wcP\x84E\x01UB\x14**\xe0\xd0`\xee\xd4\xe9\xa3\xadJ\xfa9\x06^\x84Uu_"
b"\x006\xbfo\xaa*?\x83&7\xc1i.[\xfd\xf0\xf3\xd2}\xd6\x98\xcd:@x\xd5\xca"
b"\xa8\x18\xc0\x11\x93$\t\x0c\x81L\x8f\xf7\x9c\xed\x13\x16j\xa4\x04\xe9Iw"
b"\xc3\xe4Ud\xb3yh\x9e,\x08\xeb\xac\xe8\x04-!w\x05\xa7\x8e\xefS0\r\xa5\xe5"
b"\xbb=j\xe2\t6o\xfd4\xd3}oF\xff\x87\xda\xa9)'\xaa\xff\xad\xf5\x85\xe6>"
b"\x1a\xb8z\x1dJ\xb1\xea\xc0Z\xf70\xdf\x1f\xc2\xa4\xe4\xef?\x91I\x96@\xd5"
b"\x19w-7\xc3^\xec\x9d\xa6:D\xa5\xc2\xa4)\xdd\xd5\xba\x9c=E\xb3\xc6,\x18d"
b"\xd5\xba=\xdf\xab\x7f\xcdB\xac\xa7\xf1\x18\x0b\xa0X\x15b\x0b\xa4*nC\xc3"
b"\xe4\x04\x9f5\xa3G\x8eF\xed3\xa5e\xbd\xbc;)n\x02\x0bW\xdft\xe8\x13\xb475"
b"~\x83_ &`\xa6\xdc\xad\x8b\xc6ly\x98\xf7)(1:e3:\x01\x00\x01)(1:d256:!L"
b"\x08f\xa2(\xd5\xb4\xfb\x8e\x0fr\x1b\x85\t\x00\xb9\xf2N7\xf0\x1cWK\xe3Q"
b"\x7f\x9e#\xa7\xe4:\x98U\x1b\xea\x8bz\x98\x1e\xbc\xd8\xba\xb1\xf9\x89\x12"
b"\x18`\xac\xe8\xcc\x0bN\tZ@j\xba/\x99\xf8\xb3$`\x84\xb9\xcei\x95\x9a\xf9"
b"\xe2\xfc\x1fQM'\x15\xdb+'\xad\xef\xb4i\xac\xbe}\x10\xeb\x86Gps\xb4\x00"
b"\x87\x95\x15;7\xf9\xe7\x14\xe7\x80\xbbh\x1e\x1b\xe6\xdd\xbbsc\xb9g\xe6"
b"\xb2'\x7f\xcf\xcf0\x9b\xc2\x98\xfd\xd9\x186/6.\xf1=\x81z\x9f\xe1\x03-G"
b"\xdb4Qb9\xddO\xe9\xac\xa8\x8b\xd9\xd6\xf3\x84\xc4\x17\xb9q\x9d\x06\x08Bx"
b"M\xbb\xc5*\xf4\xc3X\xcdU+\xed\xbe3_\x04\xea{\xe6\x04$c\xf2-\xd7=\x1bl"
b"\xd5\x9ccC/\x92\x88\x8d>n\xda\x187\xd8\x0f%g\x89\x1d\xb9F4^\xc9\xce\xc4"
b"\x8b\xed\x92Z3\x07\x0f\xdf\x86\x08\xf9\x92\xe9\xdb\xeb8\x086\xc9\xcd\xcd"
b"\n\x01H[9>z\xca\xc6\x80\xa9\xdc\xd49)(1:p129:\x00\xfbD\x17\x8b\xa46\xbe"
b"\x1e7\x1d\xa7\xf6al\x04\xc4\xaa\xddx>\x07\x8c\x1e3\x02\xae\x03\x14\x87"
b"\x83z\xe5\x9e}\x08g\xa8\xf2\xaa\xbf\x12p\xcfr\xa9\xa7\xc7\x0b\x1d\x88"
b"\xd5 \xfd\x9cc\xcaG0UN\x8b\xc4\xcf\xf4\x7f\x16\xa4\x92\x12t\xa1\t\xc2"
b"\xc4n\x9c\x8c3\xef\xa5\xe5\xf7\xe0+\xadO\\\x11\xaa\x1a\x847[\xfdz\xea"
b"\xc3\xcd|\xb0\xc8\xe4\x1fTc\xb5\xc7\xaf\xdf\xf4\t\xa7\xfc\xc7%\xfc\\\xe9"
b"\x91\xd7\x92\xc5\x98\x1eV\xd3\xb1#)(1:q129:\x00\xd9p\x06\xd8\xe2\xbc\xd4"
b"x\x91P\x94\xd4\xc1\x1b\x898lFdZQ\xa0\x9a\x07=H\x8f\x03Q\xcck\x12\x8e}"
b"\x1a\xb1e\xe7qu9\xe02\x05u\x8d\x18L\xaf\x93\xb1I\xb1f_xbz\xd1\x0c\xca"
b"\xe6MC\xb3\x9c\xf4k}\xe6\x0c\x98\xdc\xcf!b\x8e\xd5.\x12\xde\x04\xae\xd7$"
b"n\x831\xa2\x15\xa2D=\"\xa9b&\"\xb9\xb2\xedT\n\x9d\x08\x83\xa7\x07\r\xff"
b"\x19\x18\x8e\xd8\xab\x1d\xdaH\x9c1h\x11\xa1fm\xe3\xd8\x1d)(1:a128:if7"
b"\xc6@\xdd!\xc5\x04\xf3\xb0\xb8>G\x94|v\xfc-\xeb?9<\x95\xc3C\x01Q\xc4B"
b"\x97\xf3\xe8\x16\xa4\xc6\xee\xec\xd4I\x10P8\x04\xee;\xcd\xd7\xd0\xcc\xcc"
b"2i\x90\x07\xa0\x1bZ\x9f\xfe1\xcd\x1e:~q\x1e\x19\x94\x1aNO\x0c\xdf_R\xd5"
b"\xd1\x17n\xec\xd7\x9c\xb6U\x9d\xdb\x8e!_\xbc$\x88\xb6\xfc\xaf\xab\xf0"
b"\xef\xa4,\xb0\xdc\x9f\x86\xb0\x03\x12\xb8\x8b\xe2\xdd\x0f\xc0\xee#=JP"
b"\xfe/\xb8)FX\xbf\xb6')(1:b128:Q\xaf\xe9\x92\x9f\x94\x0bJ\x84e>\x94\xb3;"
b"\x92\x10\xb5t\xb8\x8c\xc9\xef\xc9\x0e\x012\xfa/h\x12\xa1\x03&\xae\xcfQh"
b"\x14L&\x9b(\xa4\x023\x08_\xe1\xa7p\x98\x014y^R\x8e\xc4\xcf6\xbc\x1fKU"
b"\xac\xeb\xc1S\x84\xc7\xe1a\xa8J\xd4\xa2\xff@\r\x80\x1f\x12\xa9P\xc0*\x18"
b"u\x94\x0c\x06\x9b\x16P\xa8K\xecA\xcd{\xef\xf7K\xc9u\x02h\xc4\x98\xb8\x86"
b"\x88\x18ZC\xe7\x023\x97\"d\x93\x83\x0cE*|\xed)(1:c129:\x00\x85K\x1bz\x9b"
b"\x12\x107\x9e\x1f\xad^\xda\xfe\xc6\x96\xfe\xdf5k\xb94\xe2\x16\x97\x92&\t"
b"\xbd\xbdp \x03\xa75\xbd-\x1b\xa0\xd2\x07G+\xd4\xde\xa8\xa8\x07\x07\x1b"
b"\xb8\x04 \xa7'A<l99\xe9A\xce\xe7\x17\x1d\xd1L\\\xbc=\xd2&&\xfej\xd6\xfd"
b"Hr\xaeF\xfa{\xc3\xd3\x19`D\x1d\xa5\x13\xa7\x80\xf5c)\xd4z]\x06\x07\x16]"
b"\xf6\x8b=\xcbd:\xe2\x84ZM\x8c\x06--\x9d\x1c\xeb\x83Lx=yT\xce)))"
)
privateRSA_agentv3 = (
b"\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x03\x01\x00\x01\x00\x00\x01\x00!L"
b"\x08f\xa2(\xd5\xb4\xfb\x8e\x0fr\x1b\x85\t\x00\xb9\xf2N7\xf0\x1cWK\xe3Q"
b"\x7f\x9e#\xa7\xe4:\x98U\x1b\xea\x8bz\x98\x1e\xbc\xd8\xba\xb1\xf9\x89\x12"
b"\x18`\xac\xe8\xcc\x0bN\tZ@j\xba/\x99\xf8\xb3$`\x84\xb9\xcei\x95\x9a\xf9"
b"\xe2\xfc\x1fQM'\x15\xdb+'\xad\xef\xb4i\xac\xbe}\x10\xeb\x86Gps\xb4\x00"
b"\x87\x95\x15;7\xf9\xe7\x14\xe7\x80\xbbh\x1e\x1b\xe6\xdd\xbbsc\xb9g\xe6"
b"\xb2'\x7f\xcf\xcf0\x9b\xc2\x98\xfd\xd9\x186/6.\xf1=\x81z\x9f\xe1\x03-G"
b"\xdb4Qb9\xddO\xe9\xac\xa8\x8b\xd9\xd6\xf3\x84\xc4\x17\xb9q\x9d\x06\x08Bx"
b"M\xbb\xc5*\xf4\xc3X\xcdU+\xed\xbe3_\x04\xea{\xe6\x04$c\xf2-\xd7=\x1bl"
b"\xd5\x9ccC/\x92\x88\x8d>n\xda\x187\xd8\x0f%g\x89\x1d\xb9F4^\xc9\xce\xc4"
b"\x8b\xed\x92Z3\x07\x0f\xdf\x86\x08\xf9\x92\xe9\xdb\xeb8\x086\xc9\xcd\xcd"
b"\n\x01H[9>z\xca\xc6\x80\xa9\xdc\xd49\x00\x00\x01\x01\x00\xd5j\xacx#\xd6"
b"\xd6\x1b\xec%\xa1P\xc4wcP\x84E\x01UB\x14**\xe0\xd0`\xee\xd4\xe9\xa3\xadJ"
b"\xfa9\x06^\x84Uu_\x006\xbfo\xaa*?\x83&7\xc1i.[\xfd\xf0\xf3\xd2}\xd6\x98"
b"\xcd:@x\xd5\xca\xa8\x18\xc0\x11\x93$\t\x0c\x81L\x8f\xf7\x9c\xed\x13\x16j"
b"\xa4\x04\xe9Iw\xc3\xe4Ud\xb3yh\x9e,\x08\xeb\xac\xe8\x04-!w\x05\xa7\x8e"
b"\xefS0\r\xa5\xe5\xbb=j\xe2\t6o\xfd4\xd3}oF\xff\x87\xda\xa9)'\xaa\xff\xad"
b"\xf5\x85\xe6>\x1a\xb8z\x1dJ\xb1\xea\xc0Z\xf70\xdf\x1f\xc2\xa4\xe4\xef?"
b"\x91I\x96@\xd5\x19w-7\xc3^\xec\x9d\xa6:D\xa5\xc2\xa4)\xdd\xd5\xba\x9c=E"
b"\xb3\xc6,\x18d\xd5\xba=\xdf\xab\x7f\xcdB\xac\xa7\xf1\x18\x0b\xa0X\x15b"
b"\x0b\xa4*nC\xc3\xe4\x04\x9f5\xa3G\x8eF\xed3\xa5e\xbd\xbc;)n\x02\x0bW\xdf"
b"t\xe8\x13\xb475~\x83_ &`\xa6\xdc\xad\x8b\xc6ly\x98\xf7\x00\x00\x00\x81"
b"\x00\x85K\x1bz\x9b\x12\x107\x9e\x1f\xad^\xda\xfe\xc6\x96\xfe\xdf5k\xb94"
b"\xe2\x16\x97\x92&\t\xbd\xbdp \x03\xa75\xbd-\x1b\xa0\xd2\x07G+\xd4\xde"
b"\xa8\xa8\x07\x07\x1b\xb8\x04 \xa7'A<l99\xe9A\xce\xe7\x17\x1d\xd1L\\\xbc="
b"\xd2&&\xfej\xd6\xfdHr\xaeF\xfa{\xc3\xd3\x19`D\x1d\xa5\x13\xa7\x80\xf5c)"
b"\xd4z]\x06\x07\x16]\xf6\x8b=\xcbd:\xe2\x84ZM\x8c\x06--\x9d\x1c\xeb\x83Lx"
b"=yT\xce\x00\x00\x00\x81\x00\xd9p\x06\xd8\xe2\xbc\xd4x\x91P\x94\xd4\xc1"
b"\x1b\x898lFdZQ\xa0\x9a\x07=H\x8f\x03Q\xcck\x12\x8e}\x1a\xb1e\xe7qu9\xe02"
b"\x05u\x8d\x18L\xaf\x93\xb1I\xb1f_xbz\xd1\x0c\xca\xe6MC\xb3\x9c\xf4k}\xe6"
b"\x0c\x98\xdc\xcf!b\x8e\xd5.\x12\xde\x04\xae\xd7$n\x831\xa2\x15\xa2D=\""
b"\xa9b&\"\xb9\xb2\xedT\n\x9d\x08\x83\xa7\x07\r\xff\x19\x18\x8e\xd8\xab"
b"\x1d\xdaH\x9c1h\x11\xa1fm\xe3\xd8\x1d\x00\x00\x00\x81\x00\xfbD\x17\x8b"
b"\xa46\xbe\x1e7\x1d\xa7\xf6al\x04\xc4\xaa\xddx>\x07\x8c\x1e3\x02\xae\x03"
b"\x14\x87\x83z\xe5\x9e}\x08g\xa8\xf2\xaa\xbf\x12p\xcfr\xa9\xa7\xc7\x0b"
b"\x1d\x88\xd5 \xfd\x9cc\xcaG0UN\x8b\xc4\xcf\xf4\x7f\x16\xa4\x92\x12t\xa1"
b"\t\xc2\xc4n\x9c\x8c3\xef\xa5\xe5\xf7\xe0+\xadO\\\x11\xaa\x1a\x847[\xfdz"
b"\xea\xc3\xcd|\xb0\xc8\xe4\x1fTc\xb5\xc7\xaf\xdf\xf4\t\xa7\xfc\xc7%\xfc\\"
b"\xe9\x91\xd7\x92\xc5\x98\x1eV\xd3\xb1#"
)
publicDSA_openssh = b"""\
ssh-dss AAAAB3NzaC1kc3MAAACBAJKQOsVERVDQIpANHH+JAAylo9\
LvFYmFFVMIuHFGlZpIL7sh3IMkqy+cssINM/lnHD3fmsAyLlUXZtt6PD9LgZRazsPOgptuH+Gu48G\
+yFuE8l0fVVUivos/MmYVJ66qT99htcZKatrTWZnpVW7gFABoqw+he2LZ0gkeU0+Sx9a5AAAAFQD0\
EYmTNaFJ8CS0+vFSF4nYcyEnSQAAAIEAkgLjxHJAE7qFWdTqf7EZngu7jAGmdB9k3YzMHe1ldMxEB\
7zNw5aOnxjhoYLtiHeoEcOk2XOyvnE+VfhIWwWAdOiKRTEZlmizkvhGbq0DCe2EPMXirjqWACI5nD\
ioQX1oEMonR8N3AEO5v9SfBqS2Q9R6OBr6lf04RvwpHZ0UGu8AAACAAhRpxGMIWEyaEh8YnjiazQT\
NEpklRZqeBGo1gotJggNmVaIQNIClGlLyCi359efEUuQcZ9SXxM59P+hecc/GU/GHakW5YWE4dP2G\
gdgMQWC7S6WFIXePGGXqNQDdWxlX8umhenvQqa1PnKrFRhDrJw8Z7GjdHxflsxCEmXPoLN8= \
comment\
"""
privateDSA_openssh = b"""\
-----BEGIN DSA PRIVATE KEY-----
MIIBvAIBAAKBgQCSkDrFREVQ0CKQDRx/iQAMpaPS7xWJhRVTCLhxRpWaSC+7IdyD
JKsvnLLCDTP5Zxw935rAMi5VF2bbejw/S4GUWs7DzoKbbh/hruPBvshbhPJdH1VV
Ir6LPzJmFSeuqk/fYbXGSmra01mZ6VVu4BQAaKsPoXti2dIJHlNPksfWuQIVAPQR
iZM1oUnwJLT68VIXidhzISdJAoGBAJIC48RyQBO6hVnU6n+xGZ4Lu4wBpnQfZN2M
zB3tZXTMRAe8zcOWjp8Y4aGC7Yh3qBHDpNlzsr5xPlX4SFsFgHToikUxGZZos5L4
Rm6tAwnthDzF4q46lgAiOZw4qEF9aBDKJ0fDdwBDub/UnwaktkPUejga+pX9OEb8
KR2dFBrvAoGAAhRpxGMIWEyaEh8YnjiazQTNEpklRZqeBGo1gotJggNmVaIQNICl
GlLyCi359efEUuQcZ9SXxM59P+hecc/GU/GHakW5YWE4dP2GgdgMQWC7S6WFIXeP
GGXqNQDdWxlX8umhenvQqa1PnKrFRhDrJw8Z7GjdHxflsxCEmXPoLN8CFQDV2gbL
czUdxCus0pfEP1bddaXRLQ==
-----END DSA PRIVATE KEY-----\
"""
publicDSA_lsh = decodebytes(b"""\
e0tERXdPbkIxWW14cFl5MXJaWGtvTXpwa2MyRW9NVHB3TVRJNU9nQ1NrRHJGUkVWUTBDS1FEUngv
aVFBTXBhUFM3eFdKaFJWVENMaHhScFdhU0MrN0lkeURKS3N2bkxMQ0RUUDVaeHc5MzVyQU1pNVZG
MmJiZWp3L1M0R1VXczdEem9LYmJoL2hydVBCdnNoYmhQSmRIMVZWSXI2TFB6Sm1GU2V1cWsvZlli
WEdTbXJhMDFtWjZWVnU0QlFBYUtzUG9YdGkyZElKSGxOUGtzZld1U2tvTVRweE1qRTZBUFFSaVpN
MW9VbndKTFQ2OFZJWGlkaHpJU2RKS1NneE9tY3hNams2QUpJQzQ4UnlRQk82aFZuVTZuK3hHWjRM
dTR3QnBuUWZaTjJNekIzdFpYVE1SQWU4emNPV2pwOFk0YUdDN1loM3FCSERwTmx6c3I1eFBsWDRT
RnNGZ0hUb2lrVXhHWlpvczVMNFJtNnRBd250aER6RjRxNDZsZ0FpT1p3NHFFRjlhQkRLSjBmRGR3
QkR1Yi9Vbndha3RrUFVlamdhK3BYOU9FYjhLUjJkRkJydktTZ3hPbmt4TWpnNkFoUnB4R01JV0V5
YUVoOFluamlhelFUTkVwa2xSWnFlQkdvMWdvdEpnZ05tVmFJUU5JQ2xHbEx5Q2kzNTllZkVVdVFj
WjlTWHhNNTlQK2hlY2MvR1UvR0hha1c1WVdFNGRQMkdnZGdNUVdDN1M2V0ZJWGVQR0dYcU5RRGRX
eGxYOHVtaGVudlFxYTFQbktyRlJoRHJKdzhaN0dqZEh4ZmxzeENFbVhQb0xOOHBLU2s9fQ==
""")
privateDSA_lsh = decodebytes(b"""\
KDExOnByaXZhdGUta2V5KDM6ZHNhKDE6cDEyOToAkpA6xURFUNAikA0cf4kADKWj0u8ViYUVUwi4
cUaVmkgvuyHcgySrL5yywg0z+WccPd+awDIuVRdm23o8P0uBlFrOw86Cm24f4a7jwb7IW4TyXR9V
VSK+iz8yZhUnrqpP32G1xkpq2tNZmelVbuAUAGirD6F7YtnSCR5TT5LH1rkpKDE6cTIxOgD0EYmT
NaFJ8CS0+vFSF4nYcyEnSSkoMTpnMTI5OgCSAuPEckATuoVZ1Op/sRmeC7uMAaZ0H2TdjMwd7WV0
zEQHvM3Dlo6fGOGhgu2Id6gRw6TZc7K+cT5V+EhbBYB06IpFMRmWaLOS+EZurQMJ7YQ8xeKuOpYA
IjmcOKhBfWgQyidHw3cAQ7m/1J8GpLZD1Ho4GvqV/ThG/CkdnRQa7ykoMTp5MTI4OgIUacRjCFhM
mhIfGJ44ms0EzRKZJUWangRqNYKLSYIDZlWiEDSApRpS8got+fXnxFLkHGfUl8TOfT/oXnHPxlPx
h2pFuWFhOHT9hoHYDEFgu0ulhSF3jxhl6jUA3VsZV/LpoXp70KmtT5yqxUYQ6ycPGexo3R8X5bMQ
hJlz6CzfKSgxOngyMToA1doGy3M1HcQrrNKXxD9W3XWl0S0pKSk=
""")
privateDSA_agentv3 = decodebytes(b"""\
AAAAB3NzaC1kc3MAAACBAJKQOsVERVDQIpANHH+JAAylo9LvFYmFFVMIuHFGlZpIL7sh3IMkqy+c
ssINM/lnHD3fmsAyLlUXZtt6PD9LgZRazsPOgptuH+Gu48G+yFuE8l0fVVUivos/MmYVJ66qT99h
tcZKatrTWZnpVW7gFABoqw+he2LZ0gkeU0+Sx9a5AAAAFQD0EYmTNaFJ8CS0+vFSF4nYcyEnSQAA
AIEAkgLjxHJAE7qFWdTqf7EZngu7jAGmdB9k3YzMHe1ldMxEB7zNw5aOnxjhoYLtiHeoEcOk2XOy
vnE+VfhIWwWAdOiKRTEZlmizkvhGbq0DCe2EPMXirjqWACI5nDioQX1oEMonR8N3AEO5v9SfBqS2
Q9R6OBr6lf04RvwpHZ0UGu8AAACAAhRpxGMIWEyaEh8YnjiazQTNEpklRZqeBGo1gotJggNmVaIQ
NIClGlLyCi359efEUuQcZ9SXxM59P+hecc/GU/GHakW5YWE4dP2GgdgMQWC7S6WFIXePGGXqNQDd
WxlX8umhenvQqa1PnKrFRhDrJw8Z7GjdHxflsxCEmXPoLN8AAAAVANXaBstzNR3EK6zSl8Q/Vt11
pdEt
""")
__all__ = ['DSAData', 'RSAData', 'privateDSA_agentv3', 'privateDSA_lsh',
'privateDSA_openssh', 'privateRSA_agentv3', 'privateRSA_lsh',
'privateRSA_openssh', 'publicDSA_lsh', 'publicDSA_openssh',
'publicRSA_lsh', 'publicRSA_openssh', 'privateRSA_openssh_alternate']
| 61.058559
| 92
| 0.819993
|
from __future__ import absolute_import, division
from twisted.python.compat import long, _b64decodebytes as decodebytes
RSAData = {
'n': long('269413617238113438198661010376758399219880277968382122687862697'
'296942471209955603071120391975773283844560230371884389952067978'
'789684135947515341209478065209455427327369102356204259106807047'
'964139525310539133073743116175821417513079706301100600025815509'
'786721808719302671068052414466483676821987505720384645561708425'
'794379383191274856941628512616355437197560712892001107828247792'
'561858327085521991407807015047750218508971611590850575870321007'
'991909043252470730134547038841839367764074379439843108550888709'
'430958143271417044750314742880542002948053835745429446485015316'
'60749404403945254975473896534482849256068133525751'),
'e': long(65537),
'd': long('420335724286999695680502438485489819800002417295071059780489811'
'840828351636754206234982682752076205397047218449504537476523960'
'987613148307573487322720481066677105211155388802079519869249746'
'774085882219244493290663802569201213676433159425782937159766786'
'329742053214957933941260042101377175565683849732354700525628975'
'239000548651346620826136200952740446562751690924335365940810658'
'931238410612521441739702170503547025018016868116037053013935451'
'477930426013703886193016416453215950072147440344656137718959053'
'897268663969428680144841987624962928576808352739627262941675617'
'7724661940425316604626522633351193810751757014073'),
'p': long('152689878451107675391723141129365667732639179427453246378763774'
'448531436802867910180261906924087589684175595016060014593521649'
'964959248408388984465569934780790357826811592229318702991401054'
'226302790395714901636384511513449977061729214247279176398290513'
'085108930550446985490864812445551198848562639933888780317'),
'q': long('176444974592327996338888725079951900172097062203378367409936859'
'072670162290963119826394224277287608693818012745872307600855894'
'647300295516866118620024751601329775653542084052616260193174546'
'400544176890518564317596334518015173606460860373958663673307503'
'231977779632583864454001476729233959405710696795574874403'),
'u': long('936018002388095842969518498561007090965136403384715613439364803'
'229386793506402222847415019772053080458257034241832795210460612'
'924445085372678524176842007912276654532773301546269997020970818'
'155956828553418266110329867222673040098885651348225673298948529'
'93885224775891490070400861134282266967852120152546563278')
}
DSAData = {
'g': long("10253261326864117157640690761723586967382334319435778695"
"29171533815411392477819921538350732400350395446211982054"
"96512489289702949127531056893725702005035043292195216541"
"11525058911428414042792836395195432445511200566318251789"
"10575695836669396181746841141924498545494149998282951407"
"18645344764026044855941864175"),
'p': long("10292031726231756443208850082191198787792966516790381991"
"77502076899763751166291092085666022362525614129374702633"
"26262930887668422949051881895212412718444016917144560705"
"45675251775747156453237145919794089496168502517202869160"
"78674893099371444940800865897607102159386345313384716752"
"18590012064772045092956919481"),
'q': long(1393384845225358996250882900535419012502712821577),
'x': long(1220877188542930584999385210465204342686893855021),
'y': long("14604423062661947579790240720337570315008549983452208015"
"39426429789435409684914513123700756086453120500041882809"
"10283610277194188071619191739512379408443695946763554493"
"86398594314468629823767964702559709430618263927529765769"
"10270265745700231533660131769648708944711006508965764877"
"684264272082256183140297951")
}
ECDatanistp256 = {
'x': long('762825130203920963171185031449647317742997734817505505433829043'
'45687059013883'),
'y': long('815431978646028526322656647694416475343443758943143196810611371'
'59310646683104'),
'privateValue': long('3463874347721034170096400845565569825355565567882605'
'9678074967909361042656500'),
'curve': b'ecdsa-sha2-nistp256'
}
ECDatanistp384 = {
'privateValue': long('280814107134858470598753916394807521398239633534281633982576099083'
'35787109896602102090002196616273211495718603965098'),
'x': long('10036914308591746758780165503819213553101287571902957054148542'
'504671046744460374996612408381962208627004841444205030'),
'y': long('17337335659928075994560513699823544906448896792102247714689323'
'575406618073069185107088229463828921069465902299522926'),
'curve': b'ecdsa-sha2-nistp384'
}
ECDatanistp521 = {
'x': long('12944742826257420846659527752683763193401384271391513286022917'
'29910013082920512632908350502247952686156279140016049549948975'
'670668730618745449113644014505462'),
'y': long('10784108810271976186737587749436295782985563640368689081052886'
'16296815984553198866894145509329328086635278430266482551941240'
'591605833440825557820439734509311'),
'privateValue': long('662751235215460886290293902658128847495347691199214706697089140769'
'672273950767961331442265530524063943548846724348048614239791498442'
'5997823106818915698960565'),
'curve': b'ecdsa-sha2-nistp521'
}
privateECDSA_openssh521 = b"""-----BEGIN EC PRIVATE KEY-----
MIHcAgEBBEIAjn0lSVF6QweS4bjOGP9RHwqxUiTastSE0MVuLtFvkxygZqQ712oZ
ewMvqKkxthMQgxzSpGtRBcmkL7RqZ94+18qgBwYFK4EEACOhgYkDgYYABAFpX/6B
mxxglwD+VpEvw0hcyxVzLxNnMGzxZGF7xmNj8nlF7M+TQctdlR2Xv/J+AgIeVGmB
j2p84bkV9jBzrUNJEACsJjttZw8NbUrhxjkLT/3rMNtuwjE4vLja0P7DMTE0EV8X
f09ETdku/z/1tOSSrSvRwmUcM9nQUJtHHAZlr5Q0fw==
-----END EC PRIVATE KEY-----"""
publicECDSA_openssh521 = (b"ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA"
b"1MjEAAAAIbmlzdHA1MjEAAACFBAFpX/6BmxxglwD+VpEvw0hcyxVzLxNnMGzxZGF7xmNj8nlF7"
b"M+TQctdlR2Xv/J+AgIeVGmBj2p84bkV9jBzrUNJEACsJjttZw8NbUrhxjkLT/3rMNtuwjE4vLja"
b"0P7DMTE0EV8Xf09ETdku/z/1tOSSrSvRwmUcM9nQUJtHHAZlr5Q0fw== comment")
privateECDSA_openssh384 = b"""-----BEGIN EC PRIVATE KEY-----
MIGkAgEBBDAtAi7I8j73WCX20qUM5hhHwHuFzYWYYILs2Sh8UZ+awNkARZ/Fu2LU
LLl5RtOQpbWgBwYFK4EEACKhZANiAATU17sA9P5FRwSknKcFsjjsk0+E3CeXPYX0
Tk/M0HK3PpWQWgrO8JdRHP9eFE9O/23P8BumwFt7F/AvPlCzVd35VfraFT0o4cCW
G0RqpQ+np31aKmeJshkcYALEchnU+tQ=
-----END EC PRIVATE KEY-----"""
publicECDSA_openssh384 = (b"ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzOD"
b"QAAAAIbmlzdHAzODQAAABhBNTXuwD0/kVHBKScpwWyOOyTT4TcJ5c9hfROT8zQcrc+lZBaCs7wl"
b"1Ec/14UT07/bc/wG6bAW3sX8C8+ULNV3flV+toVPSjhwJYbRGqlD6enfVoqZ4myGRxgAsRyGdT61A== comment")
publicECDSA_openssh = (b"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAA"
b"AAIbmlzdHAyNTYAAABBBKimX1DZ7+Qj0SpfePMbo1pb6yGkAb5l7duC1l855yD7tEfQfqk7bc7v"
b"46We1hLMyz6ObUBYgkN/34n42F4vpeA= comment")
privateECDSA_openssh = b"""-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIEyU1YOT2JxxofwbJXIjGftdNcJK55aQdNrhIt2xYQz0oAoGCCqGSM49
AwEHoUQDQgAEqKZfUNnv5CPRKl948xujWlvrIaQBvmXt24LWXznnIPu0R9B+qTtt
zu/jpZ7WEszLPo5tQFiCQ3/fifjYXi+l4A==
-----END EC PRIVATE KEY-----"""
publicRSA_openssh = (b"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVaqx4I9bWG+wloV"
b"DEd2NQhEUBVUIUKirg0GDu1OmjrUr6OQZehFV1XwA2v2+qKj+DJjfBaS5b/fDz0n3WmM06QHjVy"
b"qgYwBGTJAkMgUyP95ztExZqpATpSXfD5FVks3loniwI66zoBC0hdwWnju9TMA2l5bs9auIJNm/9"
b"NNN9b0b/h9qpKSeq/631heY+Grh6HUqx6sBa9zDfH8Kk5O8/kUmWQNUZdy03w17snaY6RKXCpCn"
b"d1bqcPUWzxiwYZNW6Pd+rf81CrKfxGAugWBViC6QqbkPD5ASfNaNHjkbtM6Vlvbw7KW4CC1ffdO"
b"gTtDc1foNfICZgptyti8ZseZj3 comment")
privateRSA_openssh = b'''-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEA1WqseCPW1hvsJaFQxHdjUIRFAVVCFCoq4NBg7tTpo61K+jkG
XoRVdV8ANr9vqio/gyY3wWkuW/3w89J91pjNOkB41cqoGMARkyQJDIFMj/ec7RMW
aqQE6Ul3w+RVZLN5aJ4sCOus6AQtIXcFp47vUzANpeW7PWriCTZv/TTTfW9G/4fa
qSknqv+t9YXmPhq4eh1KserAWvcw3x/CpOTvP5FJlkDVGXctN8Ne7J2mOkSlwqQp
3dW6nD1Fs8YsGGTVuj3fq3/NQqyn8RgLoFgVYgukKm5Dw+QEnzWjR45G7TOlZb28
OyluAgtX33ToE7Q3NX6DXyAmYKbcrYvGbHmY9wIDAQABAoIBACFMCGaiKNW0+44P
chuFCQC58k438BxXS+NRf54jp+Q6mFUb6ot6mB682Lqx+YkSGGCs6MwLTglaQGq6
L5n4syRghLnOaZWa+eL8H1FNJxXbKyet77RprL59EOuGR3BztACHlRU7N/nnFOeA
u2geG+bdu3NjuWfmsid/z88wm8KY/dkYNi82LvE9gXqf4QMtR9s0UWI53U/prKiL
2dbzhMQXuXGdBghCeE27xSr0w1jNVSvtvjNfBOp75gQkY/It1z0bbNWcY0MvkoiN
Pm7aGDfYDyVniR25RjReyc7Ei+2SWjMHD9+GCPmS6dvrOAg2yc3NCgFIWzk+esrG
gKnc1DkCgYEA2XAG2OK81HiRUJTUwRuJOGxGZFpRoJoHPUiPA1HMaxKOfRqxZedx
dTngMgV1jRhMr5OxSbFmX3hietEMyuZNQ7Oc9Gt95gyY3M8hYo7VLhLeBK7XJG6D
MaIVokQ9IqliJiK5su1UCp0Ig6cHDf8ZGI7Yqx3aSJwxaBGhZm3j2B0CgYEA+0QX
i6Q2vh43Haf2YWwExKrdeD4HjB4zAq4DFIeDeuWefQhnqPKqvxJwz3Kpp8cLHYjV
IP2cY8pHMFVOi8TP9H8WpJISdKEJwsRunIwz76Xl9+ArrU9cEaoahDdb/Xrqw818
sMjkH1Rjtcev3/QJp/zHJfxc6ZHXksWYHlbTsSMCgYBRr+mSn5QLSoRlPpSzO5IQ
tXS4jMnvyQ4BMvovaBKhAyauz1FoFEwmmyikAjMIX+GncJgBNHleUo7Ezza8H0tV
rOvBU4TH4WGoStSi/0ANgB8SqVDAKhh1lAwGmxZQqEvsQc177/dLyXUCaMSYuIaI
GFpD5wIzlyJkk4MMRSp87QKBgGlmN8ZA3SHFBPOwuD5HlHx2/C3rPzk8lcNDAVHE
Qpfz6Bakxu7s1EkQUDgE7jvN19DMzDJpkAegG1qf/jHNHjp+cR4ZlBpOTwzfX1LV
0Rdu7NectlWd244hX7wkiLb8r6vw76QssNyfhrADEriL4t0PwO4jPUpQ/i+4KUZY
v7YnAoGBAIVLG3qbEhA3nh+tXtr+xpb+3zVruTTiFpeSJgm9vXAgA6c1vS0boNIH
RyvU3qioBwcbuAQgpydBPGw5OelBzucXHdFMXLw90iYm/mrW/Uhyrkb6e8PTGWBE
HaUTp4D1YynUel0GBxZd9os9y2Q64oRaTYwGLS2dHOuDTHg9eVTO
-----END RSA PRIVATE KEY-----'''
# following it. It is not any standard key format and was probably a bug in
# OpenSSH at some point.
privateRSA_openssh_alternate = b"""-----BEGIN RSA PRIVATE KEY-----
MIIEqTCCBKMCAQACggEBANVqrHgj1tYb7CWhUMR3Y1CERQFVQhQqKuDQYO7U6aOtSvo5Bl6EVXVf
ADa/b6oqP4MmN8FpLlv98PPSfdaYzTpAeNXKqBjAEZMkCQyBTI/3nO0TFmqkBOlJd8PkVWSzeWie
LAjrrOgELSF3BaeO71MwDaXluz1q4gk2b/00031vRv+H2qkpJ6r/rfWF5j4auHodSrHqwFr3MN8f
wqTk7z+RSZZA1Rl3LTfDXuydpjpEpcKkKd3Vupw9RbPGLBhk1bo936t/zUKsp/EYC6BYFWILpCpu
Q8PkBJ81o0eORu0zpWW9vDspbgILV9906BO0NzV+g18gJmCm3K2Lxmx5mPcCAwEAAQKCAQAhTAhm
oijVtPuOD3IbhQkAufJON/AcV0vjUX+eI6fkOphVG+qLepgevNi6sfmJEhhgrOjMC04JWkBqui+Z
+LMkYIS5zmmVmvni/B9RTScV2ysnre+0aay+fRDrhkdwc7QAh5UVOzf55xTngLtoHhvm3btzY7ln
5rInf8/PMJvCmP3ZGDYvNi7xPYF6n+EDLUfbNFFiOd1P6ayoi9nW84TEF7lxnQYIQnhNu8Uq9MNY
zVUr7b4zXwTqe+YEJGPyLdc9G2zVnGNDL5KIjT5u2hg32A8lZ4kduUY0XsnOxIvtklozBw/fhgj5
kunb6zgINsnNzQoBSFs5PnrKxoCp3NQ5AoGBANlwBtjivNR4kVCU1MEbiThsRmRaUaCaBz1IjwNR
zGsSjn0asWXncXU54DIFdY0YTK+TsUmxZl94YnrRDMrmTUOznPRrfeYMmNzPIWKO1S4S3gSu1yRu
gzGiFaJEPSKpYiYiubLtVAqdCIOnBw3/GRiO2Ksd2kicMWgRoWZt49gdAoGBAPtEF4ukNr4eNx2n
9mFsBMSq3Xg+B4weMwKuAxSHg3rlnn0IZ6jyqr8ScM9yqafHCx2I1SD9nGPKRzBVTovEz/R/FqSS
EnShCcLEbpyMM++l5ffgK61PXBGqGoQ3W/166sPNfLDI5B9UY7XHr9/0Caf8xyX8XOmR15LFmB5W
07EjAoGAUa/pkp+UC0qEZT6UszuSELV0uIzJ78kOATL6L2gSoQMmrs9RaBRMJpsopAIzCF/hp3CY
ATR5XlKOxM82vB9LVazrwVOEx+FhqErUov9ADYAfEqlQwCoYdZQMBpsWUKhL7EHNe+/3S8l1AmjE
mLiGiBhaQ+cCM5ciZJODDEUqfO0CgYBpZjfGQN0hxQTzsLg+R5R8dvwt6z85PJXDQwFRxEKX8+gW
pMbu7NRJEFA4BO47zdfQzMwyaZAHoBtan/4xzR46fnEeGZQaTk8M319S1dEXbuzXnLZVnduOIV+8
JIi2/K+r8O+kLLDcn4awAxK4i+LdD8DuIz1KUP4vuClGWL+2JwKBgQCFSxt6mxIQN54frV7a/saW
/t81a7k04haXkiYJvb1wIAOnNb0tG6DSB0cr1N6oqAcHG7gEIKcnQTxsOTnpQc7nFx3RTFy8PdIm
Jv5q1v1Icq5G+nvD0xlgRB2lE6eA9WMp1HpdBgcWXfaLPctkOuKEWk2MBi0tnRzrg0x4PXlUzjAA
-----END RSA PRIVATE KEY-----"""
# Encrypted with the passphrase 'encrypted'
privateRSA_openssh_encrypted = b"""-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,FFFFFFFFFFFFFFFF
qRwdy/fEJQbRccoyOdhJCQv5FpsbSJWtvOTLjrURDrjpO0WvOsvwV/ltLAZtD8b3
CSlgi8eGZk+rFKNMn1uUWuVeQR3Mfn2PE2hgB0Qc0HkdfG92cnVzoRjKRQxxJM5o
F4/1MSLhwQxLF53DHWfyzK8rFNUBRyRRBFevXwPfbp6DhFy+TkqOcrLufJT7x7BC
MvMNjILVhNr9hnIMkzmAiBo9lhJ0CHdWhcQHaX7Wk6UcGNDsYrwBoLlKPQlEGMU1
2scCi1UAbCilVLyUpHFuQeCzJHBl0YuLjwmo1kSv9U5E4ysh/FIuRf3aW91OK1+y
5rPE+zJYnzNAZ/Rl9xqtvTh8hlMwjdt582HbiJb6zJw5/NEz9OKyaJjjqZsZQ1TY
GOhV0Oqwx54jpkCh0XwJ2ofmcNuiO8LoUVBE124pa6ePSxLorqBhtq9nTgwUv1Mx
lFioAM6xGBSugCRZgbapHAw0M/I8Fa95A2ocRgx0N6TMp3ZBVltovb8pZrAGld3L
T5VWGDl3ZX/eM8YlXDupewOpb56g55Aevl0jERktqJdl+g9D+PXnCsJgxMcJbfl5
tWY9KoMxV+2Fj68SHdr/acCp7xgUMwHvVLFfYCeK/hpEe9O8vOAMTlXuq9zCMDAx
kL2kcSFbZHerc4TijtjXEALo06gYHEcLRtI6lvYrzbbmpCD7J7AnMzS3SQ2FzbFM
GARKfxBnYp0dZTDiY5HE45r8xWzUupoFcIuxKtuhBEtT7H2Ynv1NmU9qQRj8C1U5
LgM3lVEkrh4o1aBIAqX0OA4do08h2pdp9v0E4HKtCjSL5EBC6xrxmIY6b6dwCDLU
n16mv4jeKGy2IKvnF9r8HXdUG0yisNGxeq5Uf7STGH5KcCRrJCBZbawAbwURaLRo
HVydDP+5uEMMVjULpYgtuNo6gw6NczOhzgYAm3v2ZMjmZ8gclOsbRrH74XiOV8kd
89oYN5yNHD2EBqP5271kbmsYZ3VwBGN+HUdWIDi3gbFfHcmy59YQt09mZIMMwD7r
nRK+AKfBnNeMK9yZkkTRs3FwY4ZJdYn58pWfe4DNqMI7U5BQ9QZnLkfGLUqWtghX
jrselJrtSEMkll+feFf7jxiCKAwC/cWe1cvukjYPA6k75Wv7RaDENfwMlZtN+pfl
pzKqId20HhMNOceaeZagL+xzM1RRj+VcXR9BWfHI2AXZPcpTGAJwDOoQA64L9YGW
7QzxHmcDTlV0e59CpAdCLj//rQxFpYnuuJlwV2YyHYnvzfxsVge7u5ApcuBpNTjf
N46Heh24xXfqfM7OcO3BB71VfcvCNQavsp70PAtj4loShX6FpzatNX0iZasB988E
TtuiJ+9e7vH0xEhBLJIUJT9LvMto25KLHFHwSQXKEXM+hiY2nfObc1Cku4lBmy+7
uWpZrh3hkmKUtfdiyeqUUj1ypwZ6boZO1UZo0xTrpubmKQEvd2957YrEEVsi0LeB
uEzSlUXrwIV7Qw2VhoLxIaCyl5j4nOKetUeAjqVNi9makh0x4Ion5osxxYvYS9s/
Y48ATMnwm3+CdN6LE3IykHyHs7JuZmawWDR4CKJB6M1r0X+Xwgs0tQ==
-----END RSA PRIVATE KEY-----"""
# Encrypted with the passphrase 'testxp'. NB: this key was generated by
# OpenSSH, so it doesn't use the same key data as the other keys here.
privateRSA_openssh_encrypted_aes = b"""-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-128-CBC,0673309A6ACCAB4B77DEE1C1E536AC26
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----"""
publicRSA_lsh = (
b'{KDEwOnB1YmxpYy1rZXkoMTQ6cnNhLXBrY3MxLXNoYTEoMTpuMjU3OgDVaqx4I9bWG+wloVD'
b'Ed2NQhEUBVUIUKirg0GDu1OmjrUr6OQZehFV1XwA2v2+qKj+DJjfBaS5b/fDz0n3WmM06QHj'
b'VyqgYwBGTJAkMgUyP95ztExZqpATpSXfD5FVks3loniwI66zoBC0hdwWnju9TMA2l5bs9auI'
b'JNm/9NNN9b0b/h9qpKSeq/631heY+Grh6HUqx6sBa9zDfH8Kk5O8/kUmWQNUZdy03w17snaY'
b'6RKXCpCnd1bqcPUWzxiwYZNW6Pd+rf81CrKfxGAugWBViC6QqbkPD5ASfNaNHjkbtM6Vlvbw'
b'7KW4CC1ffdOgTtDc1foNfICZgptyti8ZseZj3KSgxOmUzOgEAASkpKQ==}'
)
privateRSA_lsh = (
b"(11:private-key(9:rsa-pkcs1(1:n257:\x00\xd5j\xacx#\xd6\xd6\x1b\xec%\xa1P"
b"\xc4wcP\x84E\x01UB\x14**\xe0\xd0`\xee\xd4\xe9\xa3\xadJ\xfa9\x06^\x84Uu_"
b"\x006\xbfo\xaa*?\x83&7\xc1i.[\xfd\xf0\xf3\xd2}\xd6\x98\xcd:@x\xd5\xca"
b"\xa8\x18\xc0\x11\x93$\t\x0c\x81L\x8f\xf7\x9c\xed\x13\x16j\xa4\x04\xe9Iw"
b"\xc3\xe4Ud\xb3yh\x9e,\x08\xeb\xac\xe8\x04-!w\x05\xa7\x8e\xefS0\r\xa5\xe5"
b"\xbb=j\xe2\t6o\xfd4\xd3}oF\xff\x87\xda\xa9)'\xaa\xff\xad\xf5\x85\xe6>"
b"\x1a\xb8z\x1dJ\xb1\xea\xc0Z\xf70\xdf\x1f\xc2\xa4\xe4\xef?\x91I\x96@\xd5"
b"\x19w-7\xc3^\xec\x9d\xa6:D\xa5\xc2\xa4)\xdd\xd5\xba\x9c=E\xb3\xc6,\x18d"
b"\xd5\xba=\xdf\xab\x7f\xcdB\xac\xa7\xf1\x18\x0b\xa0X\x15b\x0b\xa4*nC\xc3"
b"\xe4\x04\x9f5\xa3G\x8eF\xed3\xa5e\xbd\xbc;)n\x02\x0bW\xdft\xe8\x13\xb475"
b"~\x83_ &`\xa6\xdc\xad\x8b\xc6ly\x98\xf7)(1:e3:\x01\x00\x01)(1:d256:!L"
b"\x08f\xa2(\xd5\xb4\xfb\x8e\x0fr\x1b\x85\t\x00\xb9\xf2N7\xf0\x1cWK\xe3Q"
b"\x7f\x9e#\xa7\xe4:\x98U\x1b\xea\x8bz\x98\x1e\xbc\xd8\xba\xb1\xf9\x89\x12"
b"\x18`\xac\xe8\xcc\x0bN\tZ@j\xba/\x99\xf8\xb3$`\x84\xb9\xcei\x95\x9a\xf9"
b"\xe2\xfc\x1fQM'\x15\xdb+'\xad\xef\xb4i\xac\xbe}\x10\xeb\x86Gps\xb4\x00"
b"\x87\x95\x15;7\xf9\xe7\x14\xe7\x80\xbbh\x1e\x1b\xe6\xdd\xbbsc\xb9g\xe6"
b"\xb2'\x7f\xcf\xcf0\x9b\xc2\x98\xfd\xd9\x186/6.\xf1=\x81z\x9f\xe1\x03-G"
b"\xdb4Qb9\xddO\xe9\xac\xa8\x8b\xd9\xd6\xf3\x84\xc4\x17\xb9q\x9d\x06\x08Bx"
b"M\xbb\xc5*\xf4\xc3X\xcdU+\xed\xbe3_\x04\xea{\xe6\x04$c\xf2-\xd7=\x1bl"
b"\xd5\x9ccC/\x92\x88\x8d>n\xda\x187\xd8\x0f%g\x89\x1d\xb9F4^\xc9\xce\xc4"
b"\x8b\xed\x92Z3\x07\x0f\xdf\x86\x08\xf9\x92\xe9\xdb\xeb8\x086\xc9\xcd\xcd"
b"\n\x01H[9>z\xca\xc6\x80\xa9\xdc\xd49)(1:p129:\x00\xfbD\x17\x8b\xa46\xbe"
b"\x1e7\x1d\xa7\xf6al\x04\xc4\xaa\xddx>\x07\x8c\x1e3\x02\xae\x03\x14\x87"
b"\x83z\xe5\x9e}\x08g\xa8\xf2\xaa\xbf\x12p\xcfr\xa9\xa7\xc7\x0b\x1d\x88"
b"\xd5 \xfd\x9cc\xcaG0UN\x8b\xc4\xcf\xf4\x7f\x16\xa4\x92\x12t\xa1\t\xc2"
b"\xc4n\x9c\x8c3\xef\xa5\xe5\xf7\xe0+\xadO\\\x11\xaa\x1a\x847[\xfdz\xea"
b"\xc3\xcd|\xb0\xc8\xe4\x1fTc\xb5\xc7\xaf\xdf\xf4\t\xa7\xfc\xc7%\xfc\\\xe9"
b"\x91\xd7\x92\xc5\x98\x1eV\xd3\xb1#)(1:q129:\x00\xd9p\x06\xd8\xe2\xbc\xd4"
b"x\x91P\x94\xd4\xc1\x1b\x898lFdZQ\xa0\x9a\x07=H\x8f\x03Q\xcck\x12\x8e}"
b"\x1a\xb1e\xe7qu9\xe02\x05u\x8d\x18L\xaf\x93\xb1I\xb1f_xbz\xd1\x0c\xca"
b"\xe6MC\xb3\x9c\xf4k}\xe6\x0c\x98\xdc\xcf!b\x8e\xd5.\x12\xde\x04\xae\xd7$"
b"n\x831\xa2\x15\xa2D=\"\xa9b&\"\xb9\xb2\xedT\n\x9d\x08\x83\xa7\x07\r\xff"
b"\x19\x18\x8e\xd8\xab\x1d\xdaH\x9c1h\x11\xa1fm\xe3\xd8\x1d)(1:a128:if7"
b"\xc6@\xdd!\xc5\x04\xf3\xb0\xb8>G\x94|v\xfc-\xeb?9<\x95\xc3C\x01Q\xc4B"
b"\x97\xf3\xe8\x16\xa4\xc6\xee\xec\xd4I\x10P8\x04\xee;\xcd\xd7\xd0\xcc\xcc"
b"2i\x90\x07\xa0\x1bZ\x9f\xfe1\xcd\x1e:~q\x1e\x19\x94\x1aNO\x0c\xdf_R\xd5"
b"\xd1\x17n\xec\xd7\x9c\xb6U\x9d\xdb\x8e!_\xbc$\x88\xb6\xfc\xaf\xab\xf0"
b"\xef\xa4,\xb0\xdc\x9f\x86\xb0\x03\x12\xb8\x8b\xe2\xdd\x0f\xc0\xee#=JP"
b"\xfe/\xb8)FX\xbf\xb6')(1:b128:Q\xaf\xe9\x92\x9f\x94\x0bJ\x84e>\x94\xb3;"
b"\x92\x10\xb5t\xb8\x8c\xc9\xef\xc9\x0e\x012\xfa/h\x12\xa1\x03&\xae\xcfQh"
b"\x14L&\x9b(\xa4\x023\x08_\xe1\xa7p\x98\x014y^R\x8e\xc4\xcf6\xbc\x1fKU"
b"\xac\xeb\xc1S\x84\xc7\xe1a\xa8J\xd4\xa2\xff@\r\x80\x1f\x12\xa9P\xc0*\x18"
b"u\x94\x0c\x06\x9b\x16P\xa8K\xecA\xcd{\xef\xf7K\xc9u\x02h\xc4\x98\xb8\x86"
b"\x88\x18ZC\xe7\x023\x97\"d\x93\x83\x0cE*|\xed)(1:c129:\x00\x85K\x1bz\x9b"
b"\x12\x107\x9e\x1f\xad^\xda\xfe\xc6\x96\xfe\xdf5k\xb94\xe2\x16\x97\x92&\t"
b"\xbd\xbdp \x03\xa75\xbd-\x1b\xa0\xd2\x07G+\xd4\xde\xa8\xa8\x07\x07\x1b"
b"\xb8\x04 \xa7'A<l99\xe9A\xce\xe7\x17\x1d\xd1L\\\xbc=\xd2&&\xfej\xd6\xfd"
b"Hr\xaeF\xfa{\xc3\xd3\x19`D\x1d\xa5\x13\xa7\x80\xf5c)\xd4z]\x06\x07\x16]"
b"\xf6\x8b=\xcbd:\xe2\x84ZM\x8c\x06--\x9d\x1c\xeb\x83Lx=yT\xce)))"
)
privateRSA_agentv3 = (
b"\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x03\x01\x00\x01\x00\x00\x01\x00!L"
b"\x08f\xa2(\xd5\xb4\xfb\x8e\x0fr\x1b\x85\t\x00\xb9\xf2N7\xf0\x1cWK\xe3Q"
b"\x7f\x9e
b"\x18`\xac\xe8\xcc\x0bN\tZ@j\xba/\x99\xf8\xb3$`\x84\xb9\xcei\x95\x9a\xf9"
b"\xe2\xfc\x1fQM'\x15\xdb+'\xad\xef\xb4i\xac\xbe}\x10\xeb\x86Gps\xb4\x00"
b"\x87\x95\x15;7\xf9\xe7\x14\xe7\x80\xbbh\x1e\x1b\xe6\xdd\xbbsc\xb9g\xe6"
b"\xb2'\x7f\xcf\xcf0\x9b\xc2\x98\xfd\xd9\x186/6.\xf1=\x81z\x9f\xe1\x03-G"
b"\xdb4Qb9\xddO\xe9\xac\xa8\x8b\xd9\xd6\xf3\x84\xc4\x17\xb9q\x9d\x06\x08Bx"
b"M\xbb\xc5*\xf4\xc3X\xcdU+\xed\xbe3_\x04\xea{\xe6\x04$c\xf2-\xd7=\x1bl"
b"\xd5\x9ccC/\x92\x88\x8d>n\xda\x187\xd8\x0f%g\x89\x1d\xb9F4^\xc9\xce\xc4"
b"\x8b\xed\x92Z3\x07\x0f\xdf\x86\x08\xf9\x92\xe9\xdb\xeb8\x086\xc9\xcd\xcd"
b"\n\x01H[9>z\xca\xc6\x80\xa9\xdc\xd49\x00\x00\x01\x01\x00\xd5j\xacx#\xd6"
b"\xd6\x1b\xec%\xa1P\xc4wcP\x84E\x01UB\x14**\xe0\xd0`\xee\xd4\xe9\xa3\xadJ"
b"\xfa9\x06^\x84Uu_\x006\xbfo\xaa*?\x83&7\xc1i.[\xfd\xf0\xf3\xd2}\xd6\x98"
b"\xcd:@x\xd5\xca\xa8\x18\xc0\x11\x93$\t\x0c\x81L\x8f\xf7\x9c\xed\x13\x16j"
b"\xa4\x04\xe9Iw\xc3\xe4Ud\xb3yh\x9e,\x08\xeb\xac\xe8\x04-!w\x05\xa7\x8e"
b"\xefS0\r\xa5\xe5\xbb=j\xe2\t6o\xfd4\xd3}oF\xff\x87\xda\xa9)'\xaa\xff\xad"
b"\xf5\x85\xe6>\x1a\xb8z\x1dJ\xb1\xea\xc0Z\xf70\xdf\x1f\xc2\xa4\xe4\xef?"
b"\x91I\x96@\xd5\x19w-7\xc3^\xec\x9d\xa6:D\xa5\xc2\xa4)\xdd\xd5\xba\x9c=E"
b"\xb3\xc6,\x18d\xd5\xba=\xdf\xab\x7f\xcdB\xac\xa7\xf1\x18\x0b\xa0X\x15b"
b"\x0b\xa4*nC\xc3\xe4\x04\x9f5\xa3G\x8eF\xed3\xa5e\xbd\xbc;)n\x02\x0bW\xdf"
b"t\xe8\x13\xb475~\x83_ &`\xa6\xdc\xad\x8b\xc6ly\x98\xf7\x00\x00\x00\x81"
b"\x00\x85K\x1bz\x9b\x12\x107\x9e\x1f\xad^\xda\xfe\xc6\x96\xfe\xdf5k\xb94"
b"\xe2\x16\x97\x92&\t\xbd\xbdp \x03\xa75\xbd-\x1b\xa0\xd2\x07G+\xd4\xde"
b"\xa8\xa8\x07\x07\x1b\xb8\x04 \xa7'A<l99\xe9A\xce\xe7\x17\x1d\xd1L\\\xbc="
b"\xd2&&\xfej\xd6\xfdHr\xaeF\xfa{\xc3\xd3\x19`D\x1d\xa5\x13\xa7\x80\xf5c)"
b"\xd4z]\x06\x07\x16]\xf6\x8b=\xcbd:\xe2\x84ZM\x8c\x06--\x9d\x1c\xeb\x83Lx"
b"=yT\xce\x00\x00\x00\x81\x00\xd9p\x06\xd8\xe2\xbc\xd4x\x91P\x94\xd4\xc1"
b"\x1b\x898lFdZQ\xa0\x9a\x07=H\x8f\x03Q\xcck\x12\x8e}\x1a\xb1e\xe7qu9\xe02"
b"\x05u\x8d\x18L\xaf\x93\xb1I\xb1f_xbz\xd1\x0c\xca\xe6MC\xb3\x9c\xf4k}\xe6"
b"\x0c\x98\xdc\xcf!b\x8e\xd5.\x12\xde\x04\xae\xd7$n\x831\xa2\x15\xa2D=\""
b"\xa9b&\"\xb9\xb2\xedT\n\x9d\x08\x83\xa7\x07\r\xff\x19\x18\x8e\xd8\xab"
b"\x1d\xdaH\x9c1h\x11\xa1fm\xe3\xd8\x1d\x00\x00\x00\x81\x00\xfbD\x17\x8b"
b"\xa46\xbe\x1e7\x1d\xa7\xf6al\x04\xc4\xaa\xddx>\x07\x8c\x1e3\x02\xae\x03"
b"\x14\x87\x83z\xe5\x9e}\x08g\xa8\xf2\xaa\xbf\x12p\xcfr\xa9\xa7\xc7\x0b"
b"\x1d\x88\xd5 \xfd\x9cc\xcaG0UN\x8b\xc4\xcf\xf4\x7f\x16\xa4\x92\x12t\xa1"
b"\t\xc2\xc4n\x9c\x8c3\xef\xa5\xe5\xf7\xe0+\xadO\\\x11\xaa\x1a\x847[\xfdz"
b"\xea\xc3\xcd|\xb0\xc8\xe4\x1fTc\xb5\xc7\xaf\xdf\xf4\t\xa7\xfc\xc7%\xfc\\"
b"\xe9\x91\xd7\x92\xc5\x98\x1eV\xd3\xb1#"
)
publicDSA_openssh = b"""\
ssh-dss AAAAB3NzaC1kc3MAAACBAJKQOsVERVDQIpANHH+JAAylo9\
LvFYmFFVMIuHFGlZpIL7sh3IMkqy+cssINM/lnHD3fmsAyLlUXZtt6PD9LgZRazsPOgptuH+Gu48G\
+yFuE8l0fVVUivos/MmYVJ66qT99htcZKatrTWZnpVW7gFABoqw+he2LZ0gkeU0+Sx9a5AAAAFQD0\
EYmTNaFJ8CS0+vFSF4nYcyEnSQAAAIEAkgLjxHJAE7qFWdTqf7EZngu7jAGmdB9k3YzMHe1ldMxEB\
7zNw5aOnxjhoYLtiHeoEcOk2XOyvnE+VfhIWwWAdOiKRTEZlmizkvhGbq0DCe2EPMXirjqWACI5nD\
ioQX1oEMonR8N3AEO5v9SfBqS2Q9R6OBr6lf04RvwpHZ0UGu8AAACAAhRpxGMIWEyaEh8YnjiazQT\
NEpklRZqeBGo1gotJggNmVaIQNIClGlLyCi359efEUuQcZ9SXxM59P+hecc/GU/GHakW5YWE4dP2G\
gdgMQWC7S6WFIXePGGXqNQDdWxlX8umhenvQqa1PnKrFRhDrJw8Z7GjdHxflsxCEmXPoLN8= \
comment\
"""
privateDSA_openssh = b"""\
-----BEGIN DSA PRIVATE KEY-----
MIIBvAIBAAKBgQCSkDrFREVQ0CKQDRx/iQAMpaPS7xWJhRVTCLhxRpWaSC+7IdyD
JKsvnLLCDTP5Zxw935rAMi5VF2bbejw/S4GUWs7DzoKbbh/hruPBvshbhPJdH1VV
Ir6LPzJmFSeuqk/fYbXGSmra01mZ6VVu4BQAaKsPoXti2dIJHlNPksfWuQIVAPQR
iZM1oUnwJLT68VIXidhzISdJAoGBAJIC48RyQBO6hVnU6n+xGZ4Lu4wBpnQfZN2M
zB3tZXTMRAe8zcOWjp8Y4aGC7Yh3qBHDpNlzsr5xPlX4SFsFgHToikUxGZZos5L4
Rm6tAwnthDzF4q46lgAiOZw4qEF9aBDKJ0fDdwBDub/UnwaktkPUejga+pX9OEb8
KR2dFBrvAoGAAhRpxGMIWEyaEh8YnjiazQTNEpklRZqeBGo1gotJggNmVaIQNICl
GlLyCi359efEUuQcZ9SXxM59P+hecc/GU/GHakW5YWE4dP2GgdgMQWC7S6WFIXeP
GGXqNQDdWxlX8umhenvQqa1PnKrFRhDrJw8Z7GjdHxflsxCEmXPoLN8CFQDV2gbL
czUdxCus0pfEP1bddaXRLQ==
-----END DSA PRIVATE KEY-----\
"""
publicDSA_lsh = decodebytes(b"""\
e0tERXdPbkIxWW14cFl5MXJaWGtvTXpwa2MyRW9NVHB3TVRJNU9nQ1NrRHJGUkVWUTBDS1FEUngv
aVFBTXBhUFM3eFdKaFJWVENMaHhScFdhU0MrN0lkeURKS3N2bkxMQ0RUUDVaeHc5MzVyQU1pNVZG
MmJiZWp3L1M0R1VXczdEem9LYmJoL2hydVBCdnNoYmhQSmRIMVZWSXI2TFB6Sm1GU2V1cWsvZlli
WEdTbXJhMDFtWjZWVnU0QlFBYUtzUG9YdGkyZElKSGxOUGtzZld1U2tvTVRweE1qRTZBUFFSaVpN
MW9VbndKTFQ2OFZJWGlkaHpJU2RKS1NneE9tY3hNams2QUpJQzQ4UnlRQk82aFZuVTZuK3hHWjRM
dTR3QnBuUWZaTjJNekIzdFpYVE1SQWU4emNPV2pwOFk0YUdDN1loM3FCSERwTmx6c3I1eFBsWDRT
RnNGZ0hUb2lrVXhHWlpvczVMNFJtNnRBd250aER6RjRxNDZsZ0FpT1p3NHFFRjlhQkRLSjBmRGR3
QkR1Yi9Vbndha3RrUFVlamdhK3BYOU9FYjhLUjJkRkJydktTZ3hPbmt4TWpnNkFoUnB4R01JV0V5
YUVoOFluamlhelFUTkVwa2xSWnFlQkdvMWdvdEpnZ05tVmFJUU5JQ2xHbEx5Q2kzNTllZkVVdVFj
WjlTWHhNNTlQK2hlY2MvR1UvR0hha1c1WVdFNGRQMkdnZGdNUVdDN1M2V0ZJWGVQR0dYcU5RRGRX
eGxYOHVtaGVudlFxYTFQbktyRlJoRHJKdzhaN0dqZEh4ZmxzeENFbVhQb0xOOHBLU2s9fQ==
""")
privateDSA_lsh = decodebytes(b"""\
KDExOnByaXZhdGUta2V5KDM6ZHNhKDE6cDEyOToAkpA6xURFUNAikA0cf4kADKWj0u8ViYUVUwi4
cUaVmkgvuyHcgySrL5yywg0z+WccPd+awDIuVRdm23o8P0uBlFrOw86Cm24f4a7jwb7IW4TyXR9V
VSK+iz8yZhUnrqpP32G1xkpq2tNZmelVbuAUAGirD6F7YtnSCR5TT5LH1rkpKDE6cTIxOgD0EYmT
NaFJ8CS0+vFSF4nYcyEnSSkoMTpnMTI5OgCSAuPEckATuoVZ1Op/sRmeC7uMAaZ0H2TdjMwd7WV0
zEQHvM3Dlo6fGOGhgu2Id6gRw6TZc7K+cT5V+EhbBYB06IpFMRmWaLOS+EZurQMJ7YQ8xeKuOpYA
IjmcOKhBfWgQyidHw3cAQ7m/1J8GpLZD1Ho4GvqV/ThG/CkdnRQa7ykoMTp5MTI4OgIUacRjCFhM
mhIfGJ44ms0EzRKZJUWangRqNYKLSYIDZlWiEDSApRpS8got+fXnxFLkHGfUl8TOfT/oXnHPxlPx
h2pFuWFhOHT9hoHYDEFgu0ulhSF3jxhl6jUA3VsZV/LpoXp70KmtT5yqxUYQ6ycPGexo3R8X5bMQ
hJlz6CzfKSgxOngyMToA1doGy3M1HcQrrNKXxD9W3XWl0S0pKSk=
""")
privateDSA_agentv3 = decodebytes(b"""\
AAAAB3NzaC1kc3MAAACBAJKQOsVERVDQIpANHH+JAAylo9LvFYmFFVMIuHFGlZpIL7sh3IMkqy+c
ssINM/lnHD3fmsAyLlUXZtt6PD9LgZRazsPOgptuH+Gu48G+yFuE8l0fVVUivos/MmYVJ66qT99h
tcZKatrTWZnpVW7gFABoqw+he2LZ0gkeU0+Sx9a5AAAAFQD0EYmTNaFJ8CS0+vFSF4nYcyEnSQAA
AIEAkgLjxHJAE7qFWdTqf7EZngu7jAGmdB9k3YzMHe1ldMxEB7zNw5aOnxjhoYLtiHeoEcOk2XOy
vnE+VfhIWwWAdOiKRTEZlmizkvhGbq0DCe2EPMXirjqWACI5nDioQX1oEMonR8N3AEO5v9SfBqS2
Q9R6OBr6lf04RvwpHZ0UGu8AAACAAhRpxGMIWEyaEh8YnjiazQTNEpklRZqeBGo1gotJggNmVaIQ
NIClGlLyCi359efEUuQcZ9SXxM59P+hecc/GU/GHakW5YWE4dP2GgdgMQWC7S6WFIXePGGXqNQDd
WxlX8umhenvQqa1PnKrFRhDrJw8Z7GjdHxflsxCEmXPoLN8AAAAVANXaBstzNR3EK6zSl8Q/Vt11
pdEt
""")
__all__ = ['DSAData', 'RSAData', 'privateDSA_agentv3', 'privateDSA_lsh',
'privateDSA_openssh', 'privateRSA_agentv3', 'privateRSA_lsh',
'privateRSA_openssh', 'publicDSA_lsh', 'publicDSA_openssh',
'publicRSA_lsh', 'publicRSA_openssh', 'privateRSA_openssh_alternate']
| true
| true
|
1c4a75959aba89da3bf8a9fad406f6e1008f3883
| 7,491
|
py
|
Python
|
cmsplugin_filer_link2/models.py
|
tobifroe/djangocms-link2
|
75780b0259df5d403b4648522404cae9768f76d2
|
[
"BSD-3-Clause"
] | null | null | null |
cmsplugin_filer_link2/models.py
|
tobifroe/djangocms-link2
|
75780b0259df5d403b4648522404cae9768f76d2
|
[
"BSD-3-Clause"
] | null | null | null |
cmsplugin_filer_link2/models.py
|
tobifroe/djangocms-link2
|
75780b0259df5d403b4648522404cae9768f76d2
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.urls.exceptions import NoReverseMatch
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from cms.models import CMSPlugin
from filer.fields.file import FilerFileField
from djangocms_attributes_field.fields import AttributesField
from cmsplugin_filer_link2.fields import Select2PageField
from cmsplugin_filer_link2.validators import validate_anchor_id
DEFULT_LINK_STYLES = (
(" ", "Default"),
)
LINK_STYLES = getattr(settings, "FILER_LINK_STYLES", DEFULT_LINK_STYLES)
EXCLUDED_KEYS = ['class', 'href', 'target', ]
@python_2_unicode_compatible
class FilerLink2Plugin(CMSPlugin):
name = models.CharField(_('name'), max_length=255)
url = models.CharField(_('url'), blank=True, null=True, max_length=2000,
help_text=_('The url must specify the protocol, e.g. https://DOMAIN.tld'))
page_link = Select2PageField(
verbose_name=_('page'),
blank=True,
null=True,
on_delete=models.SET_NULL,
)
persistent_page_link = models.CharField(_('internal url'), blank=True, null=True, max_length=2000)
mailto = models.EmailField(_('mailto'), blank=True, null=True, max_length=254)
link_style = models.CharField(_('link style'), max_length=255,
choices=LINK_STYLES, default=LINK_STYLES[0][0])
new_window = models.BooleanField(_('new window?'), default=False,
help_text=_('Do you want this link to open a new window?'))
file = FilerFileField(blank=True, null=True, on_delete=models.SET_NULL)
link_attributes = AttributesField(excluded_keys=EXCLUDED_KEYS, blank=True,
help_text=_('Optional. Adds HTML attributes to the rendered link.'))
encrypt_mailto = models.BooleanField(_('Encryption of Mailto'), default=False,
help_text=_('Encrypt the mailto, as protection against bots collecting mails '
'addresses.'))
anchor_id = models.CharField(
_('Anchor ID'),
blank=True,
max_length=100,
validators=[validate_anchor_id]
)
cmsplugin_ptr = models.OneToOneField(
to=CMSPlugin,
on_delete=models.CASCADE,
related_name='%(app_label)s_%(class)s',
parent_link=True,
)
def __str__(self):
return self.name
def clean(self):
super(FilerLink2Plugin, self).clean()
configured_destinations = [d for d in
('url', 'page_link', 'mailto', 'file')
if getattr(self, d) is not None and getattr(self, d) != '']
if len(configured_destinations) == 0:
raise ValidationError(_('Please choose a destination'))
elif len(configured_destinations) > 1:
raise ValidationError(
_('Please only choose one destination! You set: {}'.format(', '.join(configured_destinations))))
def save(self, *args, **kwargs):
super(FilerLink2Plugin, self).save(*args, **kwargs)
# delete link health state
LinkHealthState.objects.filter(link=self).delete()
def get_encrypted_mailto(self):
name, domain = self.mailto.split('@')
return 'javascript:window.location.href = \'mailto:\' + [\'{}\', \'{}\'].join(\'@\')'.format(name, domain)
def get_name(self):
if self.encrypt_mailto and self.mailto:
if self.name == self.mailto:
name, domain = self.name.split('@')
# escape name and domain for security reasons
return mark_safe('{}<!---->@<!---->{}'.format(escape(name), escape(domain)))
else:
return self.name
def get_link(self):
if self.file:
link = self.file.url
elif self.mailto:
if self.encrypt_mailto:
link = _(self.get_encrypted_mailto())
else:
link = 'mailto:{}'.format(_(self.mailto))
elif self.url:
link = _(self.url)
elif self.page_link:
try:
link = self.page_link.get_absolute_url()
except NoReverseMatch:
# if this internal link doesn't work anymore, we mark it not reachable
self.set_linkstate(LinkHealthState.NOT_REACHABLE)
# return old internal link and send user to 404
link = self.persistent_page_link
else:
# check if the target page has been moved or renamed and update accordingly
if link != self.persistent_page_link:
self.persistent_page_link = link
self.save()
elif self.persistent_page_link:
# happens when this link instance pointed to a removed page
self.set_linkstate(LinkHealthState.NOT_REACHABLE)
link = self.persistent_page_link
else:
link = ''
# Append anchor ID to url
if self.anchor_id:
link += '#{}'.format(self.anchor_id)
return link or ''
def set_linkstate(self, state):
if state is None:
LinkHealthState.objects.filter(link=self).delete()
else:
LinkHealthState.objects.update_or_create(link=self, defaults={'state': state})
def get_linkstate(self):
try:
return self.linkhealth.state
except ObjectDoesNotExist:
return None
@property
def active_destination(self):
""" The active destination determines which destination tab should be set to active. If the field is not set
yet, we return None
:return: field_name: str
"""
configured_destinations = [d for d in
('url', 'page_link', 'mailto', 'file')
if getattr(self, d) is not None and getattr(self, d) != '']
if len(configured_destinations) == 0:
return None
return configured_destinations[0]
@python_2_unicode_compatible
class LinkHealthState(models.Model):
NOT_REACHABLE = '4xx'
REDIRECT = '3xx'
SERVER_ERROR = '5xx'
BAD_CONFIGURED = 'bad'
TIMEOUT = 'to'
LINK_STATES = (
(REDIRECT, _('Redirected')),
(NOT_REACHABLE, _('Not reachable')),
(SERVER_ERROR, _('Server error')),
(BAD_CONFIGURED, _('Bad configured')),
(TIMEOUT, _('Timeout')),
)
link = models.OneToOneField(
FilerLink2Plugin,
on_delete=models.CASCADE,
unique=True,
related_name='linkhealth',
verbose_name=_('Link name')
)
state = models.CharField(max_length=3, choices=LINK_STATES, verbose_name=_('State'))
detected = models.DateTimeField(auto_now=True, verbose_name=_('Detected on'),
help_text=_('Date and time when the faulty link state was detected.'))
def __str__(self):
return _(u'Link state for: {}').format(self.link.name)
class Meta:
verbose_name = _('Link Health State')
verbose_name_plural = _('Link Health States')
| 38.415385
| 119
| 0.611
|
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.urls.exceptions import NoReverseMatch
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from cms.models import CMSPlugin
from filer.fields.file import FilerFileField
from djangocms_attributes_field.fields import AttributesField
from cmsplugin_filer_link2.fields import Select2PageField
from cmsplugin_filer_link2.validators import validate_anchor_id
DEFULT_LINK_STYLES = (
(" ", "Default"),
)
LINK_STYLES = getattr(settings, "FILER_LINK_STYLES", DEFULT_LINK_STYLES)
EXCLUDED_KEYS = ['class', 'href', 'target', ]
@python_2_unicode_compatible
class FilerLink2Plugin(CMSPlugin):
name = models.CharField(_('name'), max_length=255)
url = models.CharField(_('url'), blank=True, null=True, max_length=2000,
help_text=_('The url must specify the protocol, e.g. https://DOMAIN.tld'))
page_link = Select2PageField(
verbose_name=_('page'),
blank=True,
null=True,
on_delete=models.SET_NULL,
)
persistent_page_link = models.CharField(_('internal url'), blank=True, null=True, max_length=2000)
mailto = models.EmailField(_('mailto'), blank=True, null=True, max_length=254)
link_style = models.CharField(_('link style'), max_length=255,
choices=LINK_STYLES, default=LINK_STYLES[0][0])
new_window = models.BooleanField(_('new window?'), default=False,
help_text=_('Do you want this link to open a new window?'))
file = FilerFileField(blank=True, null=True, on_delete=models.SET_NULL)
link_attributes = AttributesField(excluded_keys=EXCLUDED_KEYS, blank=True,
help_text=_('Optional. Adds HTML attributes to the rendered link.'))
encrypt_mailto = models.BooleanField(_('Encryption of Mailto'), default=False,
help_text=_('Encrypt the mailto, as protection against bots collecting mails '
'addresses.'))
anchor_id = models.CharField(
_('Anchor ID'),
blank=True,
max_length=100,
validators=[validate_anchor_id]
)
cmsplugin_ptr = models.OneToOneField(
to=CMSPlugin,
on_delete=models.CASCADE,
related_name='%(app_label)s_%(class)s',
parent_link=True,
)
def __str__(self):
return self.name
def clean(self):
super(FilerLink2Plugin, self).clean()
configured_destinations = [d for d in
('url', 'page_link', 'mailto', 'file')
if getattr(self, d) is not None and getattr(self, d) != '']
if len(configured_destinations) == 0:
raise ValidationError(_('Please choose a destination'))
elif len(configured_destinations) > 1:
raise ValidationError(
_('Please only choose one destination! You set: {}'.format(', '.join(configured_destinations))))
def save(self, *args, **kwargs):
super(FilerLink2Plugin, self).save(*args, **kwargs)
LinkHealthState.objects.filter(link=self).delete()
def get_encrypted_mailto(self):
name, domain = self.mailto.split('@')
return 'javascript:window.location.href = \'mailto:\' + [\'{}\', \'{}\'].join(\'@\')'.format(name, domain)
def get_name(self):
if self.encrypt_mailto and self.mailto:
if self.name == self.mailto:
name, domain = self.name.split('@')
return mark_safe('{}<!---->@<!---->{}'.format(escape(name), escape(domain)))
else:
return self.name
def get_link(self):
if self.file:
link = self.file.url
elif self.mailto:
if self.encrypt_mailto:
link = _(self.get_encrypted_mailto())
else:
link = 'mailto:{}'.format(_(self.mailto))
elif self.url:
link = _(self.url)
elif self.page_link:
try:
link = self.page_link.get_absolute_url()
except NoReverseMatch:
self.set_linkstate(LinkHealthState.NOT_REACHABLE)
# return old internal link and send user to 404
link = self.persistent_page_link
else:
# check if the target page has been moved or renamed and update accordingly
if link != self.persistent_page_link:
self.persistent_page_link = link
self.save()
elif self.persistent_page_link:
# happens when this link instance pointed to a removed page
self.set_linkstate(LinkHealthState.NOT_REACHABLE)
link = self.persistent_page_link
else:
link = ''
# Append anchor ID to url
if self.anchor_id:
link += '
return link or ''
def set_linkstate(self, state):
if state is None:
LinkHealthState.objects.filter(link=self).delete()
else:
LinkHealthState.objects.update_or_create(link=self, defaults={'state': state})
def get_linkstate(self):
try:
return self.linkhealth.state
except ObjectDoesNotExist:
return None
@property
def active_destination(self):
configured_destinations = [d for d in
('url', 'page_link', 'mailto', 'file')
if getattr(self, d) is not None and getattr(self, d) != '']
if len(configured_destinations) == 0:
return None
return configured_destinations[0]
@python_2_unicode_compatible
class LinkHealthState(models.Model):
NOT_REACHABLE = '4xx'
REDIRECT = '3xx'
SERVER_ERROR = '5xx'
BAD_CONFIGURED = 'bad'
TIMEOUT = 'to'
LINK_STATES = (
(REDIRECT, _('Redirected')),
(NOT_REACHABLE, _('Not reachable')),
(SERVER_ERROR, _('Server error')),
(BAD_CONFIGURED, _('Bad configured')),
(TIMEOUT, _('Timeout')),
)
link = models.OneToOneField(
FilerLink2Plugin,
on_delete=models.CASCADE,
unique=True,
related_name='linkhealth',
verbose_name=_('Link name')
)
state = models.CharField(max_length=3, choices=LINK_STATES, verbose_name=_('State'))
detected = models.DateTimeField(auto_now=True, verbose_name=_('Detected on'),
help_text=_('Date and time when the faulty link state was detected.'))
def __str__(self):
return _(u'Link state for: {}').format(self.link.name)
class Meta:
verbose_name = _('Link Health State')
verbose_name_plural = _('Link Health States')
| true
| true
|
1c4a75e892d60a1d12ff20ec2dff431ecbc0002b
| 257
|
py
|
Python
|
lib/errata/decorators.py
|
adarshtri/art-dashboard-server
|
c6c61147d49aa43b6e2892ce07d8a115c1478b0c
|
[
"Apache-2.0"
] | 1
|
2020-09-21T06:48:47.000Z
|
2020-09-21T06:48:47.000Z
|
lib/errata/decorators.py
|
adarshtri/art-dashboard-server
|
c6c61147d49aa43b6e2892ce07d8a115c1478b0c
|
[
"Apache-2.0"
] | 5
|
2021-02-05T19:43:08.000Z
|
2021-06-04T23:23:29.000Z
|
lib/errata/decorators.py
|
adarshtri/art-dashboard-server
|
c6c61147d49aa43b6e2892ce07d8a115c1478b0c
|
[
"Apache-2.0"
] | 6
|
2021-02-06T07:21:37.000Z
|
2021-06-07T12:40:37.000Z
|
from lib.errata.kerberos import handle_kinit
import functools
def update_keytab(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
handle_kinit()
func_ret = func(*args, **kwargs)
return func_ret
return wrapper
| 21.416667
| 44
| 0.673152
|
from lib.errata.kerberos import handle_kinit
import functools
def update_keytab(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
handle_kinit()
func_ret = func(*args, **kwargs)
return func_ret
return wrapper
| true
| true
|
1c4a76055960b4b25afce3760c920e7649074f56
| 516
|
py
|
Python
|
Evulation_Script.py
|
OpenVessel/RedTinSaintBernard-for-BraTS2021-challenge
|
dafe6f33ff6269869377d01a014ab1528b0f1c1d
|
[
"MIT"
] | null | null | null |
Evulation_Script.py
|
OpenVessel/RedTinSaintBernard-for-BraTS2021-challenge
|
dafe6f33ff6269869377d01a014ab1528b0f1c1d
|
[
"MIT"
] | null | null | null |
Evulation_Script.py
|
OpenVessel/RedTinSaintBernard-for-BraTS2021-challenge
|
dafe6f33ff6269869377d01a014ab1528b0f1c1d
|
[
"MIT"
] | null | null | null |
### How the output of the model will be evulated is with
## Dice score
##
#https://pypi.org/project/MedPy/0.4.0/
## https://loli.github.io/medpy/_modules/medpy/metric/binary.html
### HAUSDORFF DISTANCE
###https://en.wikipedia.org/wiki/Hausdorff_distance
## https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
## DICE SCORE
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.dice.html
# https://www.kaggle.com/c/understanding_cloud_organization/discussion/114093
| 28.666667
| 87
| 0.753876
| true
| true
|
|
1c4a763ee76b213d17c59a4865ede3a3b254073f
| 1,805
|
py
|
Python
|
delta/data/frontend/cepstrum_test.py
|
luffywalf/delta
|
7eb4e3be578a680737616efff6858d280595ff48
|
[
"Apache-2.0"
] | 1
|
2019-10-27T08:15:22.000Z
|
2019-10-27T08:15:22.000Z
|
delta/data/frontend/cepstrum_test.py
|
luffywalf/delta
|
7eb4e3be578a680737616efff6858d280595ff48
|
[
"Apache-2.0"
] | null | null | null |
delta/data/frontend/cepstrum_test.py
|
luffywalf/delta
|
7eb4e3be578a680737616efff6858d280595ff48
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import os
from pathlib import Path
from delta.data.frontend.read_wav import ReadWav
from delta.data.frontend.cepstrum import Cepstrum
import numpy as np
class CepstrumTest(tf.test.TestCase):
def test_cepstrum(self):
wav_path = str(
Path(os.environ['MAIN_ROOT']).joinpath('delta/layers/ops/data/sm1_cln.wav'))
with self.session():
read_wav = ReadWav.params().instantiate()
input_data, sample_rate = read_wav.call(wav_path)
cepstrum = Cepstrum.params({'window_length':0.025}).instantiate()
cepstrum_test = cepstrum(input_data, sample_rate)
output_true = np.array(
[[0.525808, 0.579537, 0.159656, 0.014726, -0.1866810],
[0.225988, 1.557304, 3.381828, 0.132935, 0.7128600],
[-1.832759, -1.045178, 0.753158, 0.116107, -0.9307780],
[-0.696277, 1.333355, 1.590942, 2.041829, -0.0805630],
[-0.377375, 2.984320, 0.036302, 3.676640, 1.1709290]])
self.assertAllClose(cepstrum_test.eval()[15:20, 7:12], output_true)
if __name__ == '__main__':
tf.test.main()
| 37.604167
| 82
| 0.677562
|
import tensorflow as tf
import os
from pathlib import Path
from delta.data.frontend.read_wav import ReadWav
from delta.data.frontend.cepstrum import Cepstrum
import numpy as np
class CepstrumTest(tf.test.TestCase):
def test_cepstrum(self):
wav_path = str(
Path(os.environ['MAIN_ROOT']).joinpath('delta/layers/ops/data/sm1_cln.wav'))
with self.session():
read_wav = ReadWav.params().instantiate()
input_data, sample_rate = read_wav.call(wav_path)
cepstrum = Cepstrum.params({'window_length':0.025}).instantiate()
cepstrum_test = cepstrum(input_data, sample_rate)
output_true = np.array(
[[0.525808, 0.579537, 0.159656, 0.014726, -0.1866810],
[0.225988, 1.557304, 3.381828, 0.132935, 0.7128600],
[-1.832759, -1.045178, 0.753158, 0.116107, -0.9307780],
[-0.696277, 1.333355, 1.590942, 2.041829, -0.0805630],
[-0.377375, 2.984320, 0.036302, 3.676640, 1.1709290]])
self.assertAllClose(cepstrum_test.eval()[15:20, 7:12], output_true)
if __name__ == '__main__':
tf.test.main()
| true
| true
|
1c4a77766df324feef1c4233c8317b9f927125b0
| 14,874
|
py
|
Python
|
gmqtt/mqtt/handler.py
|
liamdiprose/gmqtt
|
4fb92a9629fc57c885f5e07c46b951c4f45e9572
|
[
"MIT"
] | null | null | null |
gmqtt/mqtt/handler.py
|
liamdiprose/gmqtt
|
4fb92a9629fc57c885f5e07c46b951c4f45e9572
|
[
"MIT"
] | null | null | null |
gmqtt/mqtt/handler.py
|
liamdiprose/gmqtt
|
4fb92a9629fc57c885f5e07c46b951c4f45e9572
|
[
"MIT"
] | null | null | null |
import asyncio
import logging
import struct
import time
from asyncio import iscoroutinefunction
from collections import defaultdict
from copy import deepcopy
from functools import partial
from .utils import unpack_variable_byte_integer, IdGenerator, run_coroutine_or_function
from .property import Property
from .protocol import MQTTProtocol
from .constants import MQTTCommands, PubAckReasonCode, PubRecReasonCode, DEFAULT_CONFIG
from .constants import MQTTv311, MQTTv50
logger = logging.getLogger(__name__)
def _empty_callback(*args, **kwargs):
pass
class MQTTError(Exception):
pass
class MQTTConnectError(MQTTError):
__messages__ = {
1: "Connection Refused: unacceptable protocol version",
2: "Connection Refused: identifier rejected",
3: "Connection Refused: broker unavailable",
4: "Connection Refused: bad user name or password",
5: "Connection Refused: not authorised",
10: 'Cannot handle CONNACK package',
128: "Connection Refused: Unspecified error",
129: "Connection Refused: Malformed Packet",
130: "Connection Refused: Protocol Error",
131: "Connection Refused: Implementation specific error",
132: "Connection Refused: Unsupported Protocol Version",
133: "Connection Refused: Client Identifier not valid",
134: "Connection Refused: Bad User Name or Password",
135: "Connection Refused: Not authorized",
136: "Connection Refused: Server unavailable",
137: "Connection Refused: Server busy",
138: "Connection Refused: Banned",
140: "Connection Refused: Bad authentication method",
144: "Connection Refused: Topic Name invalid",
149: "Connection Refused: Packet too large",
151: "Connection Refused: Quota exceeded",
153: "Connection Refused: Payload format invalid",
154: "Connection Refused: Retain not supported",
155: "Connection Refused: QoS not supported",
156: "Connection Refused: Use another server",
157: "Connection Refused: Server moved",
159: "Connection Refused: Connection rate exceeded",
}
def __init__(self, code):
self._code = code
self.message = self.__messages__.get(code, 'Unknown error')\
def __str__(self):
return "code {} ({})".format(self._code, self.message)
class EventCallback(object):
def __init__(self, *args, **kwargs):
super(EventCallback, self).__init__()
self._connected = asyncio.Event()
self._on_connected_callback = _empty_callback
self._on_disconnected_callback = _empty_callback
self._on_message_callback = _empty_callback
self._on_subscribe_callback = _empty_callback
self._on_unsubscribe_callback = _empty_callback
self._config = deepcopy(DEFAULT_CONFIG)
self._reconnects_config_cache = None
self.failed_connections = 0
def _temporatily_stop_reconnect(self):
self._reconnects_config_cache = self._config['reconnect_retries']
self.stop_reconnect()
def _restore_config(self):
if self._reconnects_config_cache is not None:
self._config['reconnect_retries'] = self._reconnects_config_cache
def stop_reconnect(self):
self._config['reconnect_retries'] = 0
def set_config(self, config):
self._config.update(config)
@property
def _reconnect(self):
if self.reconnect_retries == -1:
return True
return bool(self.reconnect_retries)
@property
def reconnect_delay(self):
return self._config['reconnect_delay']
@property
def reconnect_retries(self):
return self._config['reconnect_retries']
@property
def on_subscribe(self):
return self._on_subscribe_callback
@on_subscribe.setter
def on_subscribe(self, cb):
if not callable(cb):
raise ValueError
self._on_subscribe_callback = cb
@property
def on_connect(self):
return self._on_connected_callback
@on_connect.setter
def on_connect(self, cb):
if not callable(cb):
raise ValueError
self._on_connected_callback = cb
@property
def on_message(self):
return self._on_message_callback
@on_message.setter
def on_message(self, cb):
if not callable(cb):
raise ValueError
self._on_message_callback = cb
@property
def on_disconnect(self):
return self._on_disconnected_callback
@on_disconnect.setter
def on_disconnect(self, cb):
if not callable(cb):
raise ValueError
self._on_disconnected_callback = cb
@property
def on_unsubscribe(self):
return self._on_unsubscribe_callback
@on_unsubscribe.setter
def on_unsubscribe(self, cb):
if not callable(cb):
raise ValueError
self._on_unsubscribe_callback = cb
class MqttPackageHandler(EventCallback):
def __init__(self, *args, **kwargs):
super(MqttPackageHandler, self).__init__(*args, **kwargs)
self._messages_in = {}
self._handler_cache = {}
self._error = None
self._connection = None
self._id_generator = IdGenerator(max=kwargs.get('receive_maximum', 65535))
if self.protocol_version == MQTTv50:
self._optimistic_acknowledgement = kwargs.get('optimistic_acknowledgement', True)
else:
self._optimistic_acknowledgement = True
def _send_command_with_mid(self, cmd, mid, dup, reason_code=0):
raise NotImplementedError
def _remove_message_from_query(self, mid):
raise NotImplementedError
def _send_puback(self, mid, reason_code=0):
self._send_command_with_mid(MQTTCommands.PUBACK, mid, False, reason_code=reason_code)
def _send_pubrec(self, mid, reason_code=0):
self._send_command_with_mid(MQTTCommands.PUBREC, mid, False, reason_code=reason_code)
def _send_pubrel(self, mid, dup, reason_code=0):
self._send_command_with_mid(MQTTCommands.PUBREL | 2, mid, dup, reason_code=reason_code)
def __get_handler__(self, cmd):
cmd_type = cmd & 0xF0
if cmd_type not in self._handler_cache:
handler_name = '_handle_{}_packet'.format(MQTTCommands(cmd_type).name.lower())
self._handler_cache[cmd_type] = getattr(self, handler_name, self._default_handler)
return self._handler_cache[cmd_type]
def _handle_packet(self, cmd, packet):
logger.debug('[CMD %s] %s', hex(cmd), packet)
handler = self.__get_handler__(cmd)
handler(cmd, packet)
self._last_msg_in = time.monotonic()
def _handle_exception_in_future(self, future):
if not future.exception():
return
self.on_disconnect(self, packet=None, exc=future.exception())
def _default_handler(self, cmd, packet):
logger.warning('[UNKNOWN CMD] %s %s', hex(cmd), packet)
def _handle_disconnect_packet(self, cmd, packet):
if self._reconnect:
future = asyncio.ensure_future(self.reconnect(delay=True))
future.add_done_callback(self._handle_exception_in_future)
self.on_disconnect(self, packet)
def _parse_properties(self, packet):
if self.protocol_version < MQTTv50:
# If protocol is version is less than 5.0, there is no properties in packet
return {}, packet
properties_len, left_packet = unpack_variable_byte_integer(packet)
packet = left_packet[:properties_len]
left_packet = left_packet[properties_len:]
properties_dict = defaultdict(list)
while packet:
property_identifier, = struct.unpack("!B", packet[:1])
property_obj = Property.factory(id_=property_identifier)
if property_obj is None:
logger.critical('[PROPERTIES] received invalid property id {}, disconnecting'.format(property_identifier))
return None, None
result, packet = property_obj.loads(packet[1:])
for k, v in result.items():
properties_dict[k].append(v)
properties_dict = dict(properties_dict)
return properties_dict, left_packet
def _handle_connack_packet(self, cmd, packet):
self._connected.set()
(flags, result) = struct.unpack("!BB", packet[:2])
if result != 0:
logger.warning('[CONNACK] %s', hex(result))
self.failed_connections += 1
if result == 1 and self.protocol_version == MQTTv50:
logger.info('[CONNACK] Downgrading to MQTT 3.1 protocol version')
MQTTProtocol.proto_ver = MQTTv311
future = asyncio.ensure_future(self.reconnect(delay=True))
future.add_done_callback(self._handle_exception_in_future)
return
else:
self._error = MQTTConnectError(result)
if self._reconnect:
asyncio.ensure_future(self.reconnect(delay=True))
return
else:
self.failed_connections = 0
if len(packet) > 2:
properties, _ = self._parse_properties(packet[2:])
if properties is None:
self._error = MQTTConnectError(10)
asyncio.ensure_future(self.disconnect())
self._connack_properties = properties
# TODO: Implement checking for the flags and results
# see 3.2.2.3 Connect Return code of the http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.pdf
logger.debug('[CONNACK] flags: %s, result: %s', hex(flags), hex(result))
self.on_connect(self, flags, result, self.properties)
def _handle_publish_packet(self, cmd, raw_packet):
header = cmd
dup = (header & 0x08) >> 3
qos = (header & 0x06) >> 1
retain = header & 0x01
pack_format = "!H" + str(len(raw_packet) - 2) + 's'
(slen, packet) = struct.unpack(pack_format, raw_packet)
pack_format = '!' + str(slen) + 's' + str(len(packet) - slen) + 's'
(topic, packet) = struct.unpack(pack_format, packet)
if not topic:
logger.warning('[MQTT ERR PROTO] topic name is empty')
return
try:
print_topic = topic.decode('utf-8')
except UnicodeDecodeError as exc:
logger.warning('[INVALID CHARACTER IN TOPIC] %s', topic, exc_info=exc)
print_topic = topic
payload = packet
logger.debug('[RECV %s with QoS: %s] %s', print_topic, qos, payload)
if qos > 0:
pack_format = "!H" + str(len(packet) - 2) + 's'
(mid, packet) = struct.unpack(pack_format, packet)
else:
mid = None
properties, packet = self._parse_properties(packet)
properties['dup'] = dup
properties['retain'] = retain
if packet is None:
logger.critical('[INVALID MESSAGE] skipping: {}'.format(raw_packet))
return
if qos == 0:
run_coroutine_or_function(self.on_message, self, print_topic, packet, qos, properties)
elif qos == 1:
self._handle_qos_1_publish_packet(mid, packet, print_topic, properties)
elif qos == 2:
self._handle_qos_2_publish_packet(mid, packet, print_topic, properties)
self._id_generator.free_id(mid)
def _handle_qos_2_publish_packet(self, mid, packet, print_topic, properties):
if self._optimistic_acknowledgement:
self._send_pubrec(mid)
run_coroutine_or_function(self.on_message, self, print_topic, packet, 2, properties)
else:
run_coroutine_or_function(self.on_message, self, print_topic, packet, 2, properties,
callback=partial(self.__handle_publish_callback, qos=2, mid=mid))
def __handle_publish_callback(self, f, qos=None, mid=None):
reason_code = f.result()
if reason_code not in (c.value for c in PubRecReasonCode):
raise ValueError('Invalid PUBREC reason code {}'.format(reason_code))
if qos == 2:
self._send_pubrec(mid, reason_code=reason_code)
else:
self._send_puback(mid, reason_code=reason_code)
self._id_generator.free_id(mid)
def _handle_qos_1_publish_packet(self, mid, packet, print_topic, properties):
if self._optimistic_acknowledgement:
self._send_puback(mid)
run_coroutine_or_function(self.on_message, self, print_topic, packet, 1, properties)
else:
run_coroutine_or_function(self.on_message, self, print_topic, packet, 1, properties,
callback=partial(self.__handle_publish_callback, qos=1, mid=mid))
def __call__(self, cmd, packet):
try:
result = self._handle_packet(cmd, packet)
except Exception as exc:
logger.error('[ERROR HANDLE PKG]', exc_info=exc)
result = None
return result
def _handle_suback_packet(self, cmd, raw_packet):
pack_format = "!H" + str(len(raw_packet) - 2) + 's'
(mid, packet) = struct.unpack(pack_format, raw_packet)
pack_format = "!" + "B" * len(packet)
granted_qos = struct.unpack(pack_format, packet)
logger.info('[SUBACK] %s %s', mid, granted_qos)
self.on_subscribe(self, mid, granted_qos)
self._id_generator.free_id(mid)
def _handle_unsuback_packet(self, cmd, raw_packet):
pack_format = "!H" + str(len(raw_packet) - 2) + 's'
(mid, packet) = struct.unpack(pack_format, raw_packet)
pack_format = "!" + "B" * len(packet)
granted_qos = struct.unpack(pack_format, packet)
logger.info('[UNSUBACK] %s %s', mid, granted_qos)
self.on_unsubscribe(self, mid, granted_qos)
self._id_generator.free_id(mid)
def _handle_pingreq_packet(self, cmd, packet):
logger.debug('[PING REQUEST] %s %s', hex(cmd), packet)
pass
def _handle_pingresp_packet(self, cmd, packet):
logger.debug('[PONG REQUEST] %s %s', hex(cmd), packet)
def _handle_puback_packet(self, cmd, packet):
(mid, ) = struct.unpack("!H", packet[:2])
# TODO: For MQTT 5.0 parse reason code and properties
logger.info('[RECEIVED PUBACK FOR] %s', mid)
self._id_generator.free_id(mid)
self._remove_message_from_query(mid)
def _handle_pubcomp_packet(self, cmd, packet):
pass
def _handle_pubrec_packet(self, cmd, packet):
pass
def _handle_pubrel_packet(self, cmd, packet):
mid, = struct.unpack("!H", packet)
self._id_generator.free_id(mid)
if mid not in self._messages_in:
return
topic, payload, qos = self._messages_in[mid]
| 36.189781
| 122
| 0.646699
|
import asyncio
import logging
import struct
import time
from asyncio import iscoroutinefunction
from collections import defaultdict
from copy import deepcopy
from functools import partial
from .utils import unpack_variable_byte_integer, IdGenerator, run_coroutine_or_function
from .property import Property
from .protocol import MQTTProtocol
from .constants import MQTTCommands, PubAckReasonCode, PubRecReasonCode, DEFAULT_CONFIG
from .constants import MQTTv311, MQTTv50
logger = logging.getLogger(__name__)
def _empty_callback(*args, **kwargs):
pass
class MQTTError(Exception):
pass
class MQTTConnectError(MQTTError):
__messages__ = {
1: "Connection Refused: unacceptable protocol version",
2: "Connection Refused: identifier rejected",
3: "Connection Refused: broker unavailable",
4: "Connection Refused: bad user name or password",
5: "Connection Refused: not authorised",
10: 'Cannot handle CONNACK package',
128: "Connection Refused: Unspecified error",
129: "Connection Refused: Malformed Packet",
130: "Connection Refused: Protocol Error",
131: "Connection Refused: Implementation specific error",
132: "Connection Refused: Unsupported Protocol Version",
133: "Connection Refused: Client Identifier not valid",
134: "Connection Refused: Bad User Name or Password",
135: "Connection Refused: Not authorized",
136: "Connection Refused: Server unavailable",
137: "Connection Refused: Server busy",
138: "Connection Refused: Banned",
140: "Connection Refused: Bad authentication method",
144: "Connection Refused: Topic Name invalid",
149: "Connection Refused: Packet too large",
151: "Connection Refused: Quota exceeded",
153: "Connection Refused: Payload format invalid",
154: "Connection Refused: Retain not supported",
155: "Connection Refused: QoS not supported",
156: "Connection Refused: Use another server",
157: "Connection Refused: Server moved",
159: "Connection Refused: Connection rate exceeded",
}
def __init__(self, code):
self._code = code
self.message = self.__messages__.get(code, 'Unknown error')\
def __str__(self):
return "code {} ({})".format(self._code, self.message)
class EventCallback(object):
def __init__(self, *args, **kwargs):
super(EventCallback, self).__init__()
self._connected = asyncio.Event()
self._on_connected_callback = _empty_callback
self._on_disconnected_callback = _empty_callback
self._on_message_callback = _empty_callback
self._on_subscribe_callback = _empty_callback
self._on_unsubscribe_callback = _empty_callback
self._config = deepcopy(DEFAULT_CONFIG)
self._reconnects_config_cache = None
self.failed_connections = 0
def _temporatily_stop_reconnect(self):
self._reconnects_config_cache = self._config['reconnect_retries']
self.stop_reconnect()
def _restore_config(self):
if self._reconnects_config_cache is not None:
self._config['reconnect_retries'] = self._reconnects_config_cache
def stop_reconnect(self):
self._config['reconnect_retries'] = 0
def set_config(self, config):
self._config.update(config)
@property
def _reconnect(self):
if self.reconnect_retries == -1:
return True
return bool(self.reconnect_retries)
@property
def reconnect_delay(self):
return self._config['reconnect_delay']
@property
def reconnect_retries(self):
return self._config['reconnect_retries']
@property
def on_subscribe(self):
return self._on_subscribe_callback
@on_subscribe.setter
def on_subscribe(self, cb):
if not callable(cb):
raise ValueError
self._on_subscribe_callback = cb
@property
def on_connect(self):
return self._on_connected_callback
@on_connect.setter
def on_connect(self, cb):
if not callable(cb):
raise ValueError
self._on_connected_callback = cb
@property
def on_message(self):
return self._on_message_callback
@on_message.setter
def on_message(self, cb):
if not callable(cb):
raise ValueError
self._on_message_callback = cb
@property
def on_disconnect(self):
return self._on_disconnected_callback
@on_disconnect.setter
def on_disconnect(self, cb):
if not callable(cb):
raise ValueError
self._on_disconnected_callback = cb
@property
def on_unsubscribe(self):
return self._on_unsubscribe_callback
@on_unsubscribe.setter
def on_unsubscribe(self, cb):
if not callable(cb):
raise ValueError
self._on_unsubscribe_callback = cb
class MqttPackageHandler(EventCallback):
def __init__(self, *args, **kwargs):
super(MqttPackageHandler, self).__init__(*args, **kwargs)
self._messages_in = {}
self._handler_cache = {}
self._error = None
self._connection = None
self._id_generator = IdGenerator(max=kwargs.get('receive_maximum', 65535))
if self.protocol_version == MQTTv50:
self._optimistic_acknowledgement = kwargs.get('optimistic_acknowledgement', True)
else:
self._optimistic_acknowledgement = True
def _send_command_with_mid(self, cmd, mid, dup, reason_code=0):
raise NotImplementedError
def _remove_message_from_query(self, mid):
raise NotImplementedError
def _send_puback(self, mid, reason_code=0):
self._send_command_with_mid(MQTTCommands.PUBACK, mid, False, reason_code=reason_code)
def _send_pubrec(self, mid, reason_code=0):
self._send_command_with_mid(MQTTCommands.PUBREC, mid, False, reason_code=reason_code)
def _send_pubrel(self, mid, dup, reason_code=0):
self._send_command_with_mid(MQTTCommands.PUBREL | 2, mid, dup, reason_code=reason_code)
def __get_handler__(self, cmd):
cmd_type = cmd & 0xF0
if cmd_type not in self._handler_cache:
handler_name = '_handle_{}_packet'.format(MQTTCommands(cmd_type).name.lower())
self._handler_cache[cmd_type] = getattr(self, handler_name, self._default_handler)
return self._handler_cache[cmd_type]
def _handle_packet(self, cmd, packet):
logger.debug('[CMD %s] %s', hex(cmd), packet)
handler = self.__get_handler__(cmd)
handler(cmd, packet)
self._last_msg_in = time.monotonic()
def _handle_exception_in_future(self, future):
if not future.exception():
return
self.on_disconnect(self, packet=None, exc=future.exception())
def _default_handler(self, cmd, packet):
logger.warning('[UNKNOWN CMD] %s %s', hex(cmd), packet)
def _handle_disconnect_packet(self, cmd, packet):
if self._reconnect:
future = asyncio.ensure_future(self.reconnect(delay=True))
future.add_done_callback(self._handle_exception_in_future)
self.on_disconnect(self, packet)
def _parse_properties(self, packet):
if self.protocol_version < MQTTv50:
return {}, packet
properties_len, left_packet = unpack_variable_byte_integer(packet)
packet = left_packet[:properties_len]
left_packet = left_packet[properties_len:]
properties_dict = defaultdict(list)
while packet:
property_identifier, = struct.unpack("!B", packet[:1])
property_obj = Property.factory(id_=property_identifier)
if property_obj is None:
logger.critical('[PROPERTIES] received invalid property id {}, disconnecting'.format(property_identifier))
return None, None
result, packet = property_obj.loads(packet[1:])
for k, v in result.items():
properties_dict[k].append(v)
properties_dict = dict(properties_dict)
return properties_dict, left_packet
def _handle_connack_packet(self, cmd, packet):
self._connected.set()
(flags, result) = struct.unpack("!BB", packet[:2])
if result != 0:
logger.warning('[CONNACK] %s', hex(result))
self.failed_connections += 1
if result == 1 and self.protocol_version == MQTTv50:
logger.info('[CONNACK] Downgrading to MQTT 3.1 protocol version')
MQTTProtocol.proto_ver = MQTTv311
future = asyncio.ensure_future(self.reconnect(delay=True))
future.add_done_callback(self._handle_exception_in_future)
return
else:
self._error = MQTTConnectError(result)
if self._reconnect:
asyncio.ensure_future(self.reconnect(delay=True))
return
else:
self.failed_connections = 0
if len(packet) > 2:
properties, _ = self._parse_properties(packet[2:])
if properties is None:
self._error = MQTTConnectError(10)
asyncio.ensure_future(self.disconnect())
self._connack_properties = properties
logger.debug('[CONNACK] flags: %s, result: %s', hex(flags), hex(result))
self.on_connect(self, flags, result, self.properties)
def _handle_publish_packet(self, cmd, raw_packet):
header = cmd
dup = (header & 0x08) >> 3
qos = (header & 0x06) >> 1
retain = header & 0x01
pack_format = "!H" + str(len(raw_packet) - 2) + 's'
(slen, packet) = struct.unpack(pack_format, raw_packet)
pack_format = '!' + str(slen) + 's' + str(len(packet) - slen) + 's'
(topic, packet) = struct.unpack(pack_format, packet)
if not topic:
logger.warning('[MQTT ERR PROTO] topic name is empty')
return
try:
print_topic = topic.decode('utf-8')
except UnicodeDecodeError as exc:
logger.warning('[INVALID CHARACTER IN TOPIC] %s', topic, exc_info=exc)
print_topic = topic
payload = packet
logger.debug('[RECV %s with QoS: %s] %s', print_topic, qos, payload)
if qos > 0:
pack_format = "!H" + str(len(packet) - 2) + 's'
(mid, packet) = struct.unpack(pack_format, packet)
else:
mid = None
properties, packet = self._parse_properties(packet)
properties['dup'] = dup
properties['retain'] = retain
if packet is None:
logger.critical('[INVALID MESSAGE] skipping: {}'.format(raw_packet))
return
if qos == 0:
run_coroutine_or_function(self.on_message, self, print_topic, packet, qos, properties)
elif qos == 1:
self._handle_qos_1_publish_packet(mid, packet, print_topic, properties)
elif qos == 2:
self._handle_qos_2_publish_packet(mid, packet, print_topic, properties)
self._id_generator.free_id(mid)
def _handle_qos_2_publish_packet(self, mid, packet, print_topic, properties):
if self._optimistic_acknowledgement:
self._send_pubrec(mid)
run_coroutine_or_function(self.on_message, self, print_topic, packet, 2, properties)
else:
run_coroutine_or_function(self.on_message, self, print_topic, packet, 2, properties,
callback=partial(self.__handle_publish_callback, qos=2, mid=mid))
def __handle_publish_callback(self, f, qos=None, mid=None):
reason_code = f.result()
if reason_code not in (c.value for c in PubRecReasonCode):
raise ValueError('Invalid PUBREC reason code {}'.format(reason_code))
if qos == 2:
self._send_pubrec(mid, reason_code=reason_code)
else:
self._send_puback(mid, reason_code=reason_code)
self._id_generator.free_id(mid)
def _handle_qos_1_publish_packet(self, mid, packet, print_topic, properties):
if self._optimistic_acknowledgement:
self._send_puback(mid)
run_coroutine_or_function(self.on_message, self, print_topic, packet, 1, properties)
else:
run_coroutine_or_function(self.on_message, self, print_topic, packet, 1, properties,
callback=partial(self.__handle_publish_callback, qos=1, mid=mid))
def __call__(self, cmd, packet):
try:
result = self._handle_packet(cmd, packet)
except Exception as exc:
logger.error('[ERROR HANDLE PKG]', exc_info=exc)
result = None
return result
def _handle_suback_packet(self, cmd, raw_packet):
pack_format = "!H" + str(len(raw_packet) - 2) + 's'
(mid, packet) = struct.unpack(pack_format, raw_packet)
pack_format = "!" + "B" * len(packet)
granted_qos = struct.unpack(pack_format, packet)
logger.info('[SUBACK] %s %s', mid, granted_qos)
self.on_subscribe(self, mid, granted_qos)
self._id_generator.free_id(mid)
def _handle_unsuback_packet(self, cmd, raw_packet):
pack_format = "!H" + str(len(raw_packet) - 2) + 's'
(mid, packet) = struct.unpack(pack_format, raw_packet)
pack_format = "!" + "B" * len(packet)
granted_qos = struct.unpack(pack_format, packet)
logger.info('[UNSUBACK] %s %s', mid, granted_qos)
self.on_unsubscribe(self, mid, granted_qos)
self._id_generator.free_id(mid)
def _handle_pingreq_packet(self, cmd, packet):
logger.debug('[PING REQUEST] %s %s', hex(cmd), packet)
pass
def _handle_pingresp_packet(self, cmd, packet):
logger.debug('[PONG REQUEST] %s %s', hex(cmd), packet)
def _handle_puback_packet(self, cmd, packet):
(mid, ) = struct.unpack("!H", packet[:2])
logger.info('[RECEIVED PUBACK FOR] %s', mid)
self._id_generator.free_id(mid)
self._remove_message_from_query(mid)
def _handle_pubcomp_packet(self, cmd, packet):
pass
def _handle_pubrec_packet(self, cmd, packet):
pass
def _handle_pubrel_packet(self, cmd, packet):
mid, = struct.unpack("!H", packet)
self._id_generator.free_id(mid)
if mid not in self._messages_in:
return
topic, payload, qos = self._messages_in[mid]
| true
| true
|
1c4a78d8ce31fc6b52765819f449138247bee001
| 1,552
|
py
|
Python
|
examples/example1.py
|
lilydia/py-linkedin-jobs-scraper
|
a015739a7a25e8dd035f06ba629c6a48f376db43
|
[
"MIT"
] | null | null | null |
examples/example1.py
|
lilydia/py-linkedin-jobs-scraper
|
a015739a7a25e8dd035f06ba629c6a48f376db43
|
[
"MIT"
] | null | null | null |
examples/example1.py
|
lilydia/py-linkedin-jobs-scraper
|
a015739a7a25e8dd035f06ba629c6a48f376db43
|
[
"MIT"
] | 1
|
2021-11-16T21:59:10.000Z
|
2021-11-16T21:59:10.000Z
|
from linkedin_jobs_scraper import LinkedinScraper
from linkedin_jobs_scraper.events import Events, EventData
from linkedin_jobs_scraper.query import Query, QueryOptions, QueryFilters
from linkedin_jobs_scraper.filters import RelevanceFilters, TimeFilters, TypeFilters, ExperienceLevelFilters
def on_data(data: EventData):
print('[ON_DATA]', data.job_function)
def on_error(error):
print('[ON_ERROR]', error)
def on_end():
print('[ON_END]')
scraper = LinkedinScraper(
chrome_options=None, # You can pass your custom Chrome options here
max_workers=1, # How many threads will be spawn to run queries concurrently (one Chrome driver for each thread)
slow_mo=0.4, # Slow down the scraper to avoid 'Too many requests (429)' errors
)
# Add event listeners
scraper.on(Events.DATA, on_data)
scraper.on(Events.ERROR, on_error)
scraper.on(Events.END, on_end)
queries = [
Query(
options=QueryOptions(
optimize=True, # Blocks requests for resources like images and stylesheet
limit=0 # Limit the number of jobs to scrape
)
),
Query(
query='Engineer',
options=QueryOptions(
locations=['Toronto, Ontario, Canada'],
optimize=False,
limit=5,
filters=QueryFilters(
relevance=RelevanceFilters.RECENT,
time=TimeFilters.MONTH,
type=[TypeFilters.FULL_TIME, TypeFilters.INTERNSHIP],
experience=None,
)
)
),
]
scraper.run(queries)
| 28.740741
| 116
| 0.671392
|
from linkedin_jobs_scraper import LinkedinScraper
from linkedin_jobs_scraper.events import Events, EventData
from linkedin_jobs_scraper.query import Query, QueryOptions, QueryFilters
from linkedin_jobs_scraper.filters import RelevanceFilters, TimeFilters, TypeFilters, ExperienceLevelFilters
def on_data(data: EventData):
print('[ON_DATA]', data.job_function)
def on_error(error):
print('[ON_ERROR]', error)
def on_end():
print('[ON_END]')
scraper = LinkedinScraper(
chrome_options=None,
max_workers=1,
slow_mo=0.4,
)
scraper.on(Events.DATA, on_data)
scraper.on(Events.ERROR, on_error)
scraper.on(Events.END, on_end)
queries = [
Query(
options=QueryOptions(
optimize=True,
limit=0
)
),
Query(
query='Engineer',
options=QueryOptions(
locations=['Toronto, Ontario, Canada'],
optimize=False,
limit=5,
filters=QueryFilters(
relevance=RelevanceFilters.RECENT,
time=TimeFilters.MONTH,
type=[TypeFilters.FULL_TIME, TypeFilters.INTERNSHIP],
experience=None,
)
)
),
]
scraper.run(queries)
| true
| true
|
1c4a78f4dcf40fc5ffc5474c684e0cddfad04df8
| 698
|
py
|
Python
|
prime.py
|
CooperPair/end_to_end_encryption
|
8eab4c76b0cb5bcba36442c08f9dbb336b476117
|
[
"MIT"
] | 1
|
2019-01-26T16:20:09.000Z
|
2019-01-26T16:20:09.000Z
|
prime.py
|
CooperPair/end_to_end_encryption
|
8eab4c76b0cb5bcba36442c08f9dbb336b476117
|
[
"MIT"
] | null | null | null |
prime.py
|
CooperPair/end_to_end_encryption
|
8eab4c76b0cb5bcba36442c08f9dbb336b476117
|
[
"MIT"
] | null | null | null |
import math
def isPrime(num):
# Return True if Number is prime else false
# isPrime is slower than primeSieve()
if num < 2:
return False
for i in range(2, int(math.sqrt(num))+1):
if num%i == 0:
return False
return True
def primeSieve():
sieve = [True]*sieveSize # list of Boolean True that is the length of sievesize.
sieve[0] = False # since 0 and 1 are not prime numbre
sieve[1] = False
#create the sieve
for i in range(2, int(math.sqrt(sieveSize)) + 1):
pointer = i*2
while pointer < sieveSize:
sieve[pointer] = False
pointer += i
# compile the list of primes:
primes = []
for i in range(sieveSize):
if sieve[i] == True:
primes.append(i)
return primes
| 20.529412
| 81
| 0.670487
|
import math
def isPrime(num):
if num < 2:
return False
for i in range(2, int(math.sqrt(num))+1):
if num%i == 0:
return False
return True
def primeSieve():
sieve = [True]*sieveSize
sieve[0] = False
sieve[1] = False
for i in range(2, int(math.sqrt(sieveSize)) + 1):
pointer = i*2
while pointer < sieveSize:
sieve[pointer] = False
pointer += i
primes = []
for i in range(sieveSize):
if sieve[i] == True:
primes.append(i)
return primes
| true
| true
|
1c4a79bd34e32dc0bc69909503c6fba71fdbe9ab
| 13,311
|
py
|
Python
|
scripts/run_fever_scoring.py
|
salesforce/DialFact
|
d400b250147e45c106b18e52254b1060f7c1575d
|
[
"BSD-3-Clause"
] | 10
|
2021-11-08T00:37:57.000Z
|
2022-03-28T12:19:29.000Z
|
scripts/run_fever_scoring.py
|
salesforce/DialFact
|
d400b250147e45c106b18e52254b1060f7c1575d
|
[
"BSD-3-Clause"
] | 1
|
2022-01-24T18:37:17.000Z
|
2022-01-24T18:37:17.000Z
|
scripts/run_fever_scoring.py
|
salesforce/DialFact
|
d400b250147e45c106b18e52254b1060f7c1575d
|
[
"BSD-3-Clause"
] | 1
|
2022-03-22T08:56:04.000Z
|
2022-03-22T08:56:04.000Z
|
import argparse
import sys
import jsonlines
from tqdm import tqdm
import logging
import json
import torch
import torch.nn.functional as F
import jsonlines
import random
import os
import numpy as np
from scipy.special import softmax
# os.environ["NCCL_SHM_DISABLE"] = "1"
from tqdm import tqdm
from typing import List
from sklearn.metrics import f1_score, precision_score, recall_score
from datasets import Dataset
from torch.utils.data import Dataset, DataLoader
from transformers import AutoConfig, AutoTokenizer, AutoModelForSequenceClassification, get_cosine_schedule_with_warmup
from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer, EvalPrediction, default_data_collator, set_seed
from transformers import InputExample, PreTrainedTokenizer, InputFeatures
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
LABELS = ["SUPPORTS", "REFUTES", "NOT ENOUGH INFO"]
def get_json_lines(inp_file):
lines = []
with jsonlines.open(inp_file) as reader:
for obj in reader:
lines.append(obj)
return lines
def write_json_lines(output_file_name, list_data, output_folder):
with jsonlines.open(output_folder+ output_file_name, mode='w') as writer:
for dataline in list_data:
writer.write(dataline)
class ClassificationModel():
def __init__(self, num_labels=2, max_length=256, model_name_or_path='albert-large-v2', config_name=None, tokenizer_name=None):
NUM_LABELS = num_labels
self.max_seq_length = 256
self.model_name_or_path = model_name_or_path
self.config_name = config_name
self.tokenizer_name = tokenizer_name
self.max_length = max_length
config = AutoConfig.from_pretrained(
self.config_name if self.config_name else self.model_name_or_path,
num_labels=NUM_LABELS,
# cache_dir='.cache/',
)
add_prefix_space = False
if 'roberta' in self.model_name_or_path:
add_prefix_space = True
self.tokenizer = AutoTokenizer.from_pretrained(
self.tokenizer_name if self.tokenizer_name else self.model_name_or_path,
# cache_dir=model_args.cache_dir,
add_prefix_space=True,
# use_fast=True,
)
self.model = AutoModelForSequenceClassification.from_pretrained(
self.model_name_or_path,
from_tf=bool(".ckpt" in self.model_name_or_path),
config=config,
# cache_dir=args.cache_dir,
)
def get_string_text(self, tokens_a, tokens_b):
max_num_tokens = self.max_seq_length - 3
total_length = len(tokens_a) + len(tokens_b)
if total_length > max_num_tokens:
len_b = len(tokens_b)
a_begin = max_num_tokens - len_b
tokens_a = tokens_a[-a_begin:]
try:
assert len(tokens_a) + len(tokens_b) <= max_num_tokens
assert len(tokens_a) >= 1
except:
import pdb;
pdb.set_trace()
print('some problem with preproc')
# assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append(self.tokenizer.cls_token)
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append(self.tokenizer.sep_token)
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append(self.tokenizer.sep_token)
segment_ids.append(1)
return tokens, segment_ids
def tokenize_function_test(self, examples):
# Remove empty lines
# examples["text"] = [line for line in examples["text"] if len(line) > 0 and not line.isspace()]
# examples = [line for line in examples if len(line) > 0 and not line.isspace()]
all_texts = []
all_segment_ids = []
all_labels = []
# import pdb;pdb.set_trace()
processed = []
items = []
# keys = list(examples.keys())
# for i in range(len(examples[keys[0]])):
# ex = {}
# for k in keys:
# ex[k] = examples[k][i]
# items.append(ex)
# import pdb;pdb.set_trace()
items = examples
max_seq_length = 216
for example in items:
first_tokens = self.tokenizer.tokenize(example['actual'])
for sent2 in example['prediction']:
sec_tokens = self.tokenizer.tokenize(sent2)
tokens = ["[CLS]"] + first_tokens + ["[SEP]"] + sec_tokens
if len(sec_tokens) + len(first_tokens) > max_seq_length - 1:
tokens = tokens[:(max_seq_length - 1)]
tokens = tokens + ["[SEP]"]
segment_ids = [0] * (len(first_tokens) + 2)
segment_ids += [1] * (len(sec_tokens) + 1)
all_texts.append(tokens)
all_segment_ids.append(segment_ids)
tokenized = self.tokenizer.batch_encode_plus(
all_texts,
padding='max_length',
truncation=True,
max_length=max_seq_length,
is_split_into_words=True,
return_special_tokens_mask=True,
add_special_tokens=False,
)
# print(len(tokenized['input_ids']))
padded_length = len(tokenized['input_ids'][0])
all_segment_ids = [x + [0] * (padded_length - len(x)) for x in all_segment_ids]
tokenized['token_type_ids'] = all_segment_ids
# tokenized['label'] = all_labels
return tokenized
def tokenize_function(self, examples, sent2_type='evidence_touse', sent1_type='prediction'):
all_texts = []
all_segment_ids = []
all_labels = []
processed = []
items = []
max_seq_length = 216
for example in examples:
evidence_data = example[sent2_type]
sent2 = evidence_data
for p, sent1 in enumerate(example[sent1_type]):
if type(evidence_data) is list:
sent2 = example[sent2_type][p]
items.append([sent2, sent1])
# import pdb;pdb.set_trace()
try:
batch_encoding = self.tokenizer(
[(example[0], example[1])
for example in items],
max_length=self.max_length,
padding="max_length",
truncation=True,
)
except:
import pdb;pdb.set_trace()
# import pdb;pdb.set_trace()
features = []
input1 = list(batch_encoding.keys())[0]
num_inputs = len(batch_encoding[input1])
for i in range(num_inputs):
inputs = {k: batch_encoding[k][i] for k in batch_encoding}
feature = InputFeatures(**inputs)
features.append(feature)
return features
def tokenize_function_data(self, examples, sent2_type='evidence_touse', sent1_type='response'):
all_texts = []
all_segment_ids = []
all_labels = []
processed = []
items = []
max_seq_length = 216
for example in examples:
evidence_data = example[sent2_type]
sent2 = evidence_data
sent1 = example[sent1_type]
items.append([sent2, sent1])
# import pdb;pdb.set_trace()
try:
batch_encoding = self.tokenizer(
[(ex[0], ex[1])
for ex in items],
max_length=self.max_length,
padding="max_length",
truncation=True,
)
except:
import pdb;pdb.set_trace()
# import pdb;pdb.set_trace()
features = []
input1 = list(batch_encoding.keys())[0]
num_inputs = len(batch_encoding[input1])
for i in range(num_inputs):
inputs = {k: batch_encoding[k][i] for k in batch_encoding}
feature = InputFeatures(**inputs)
features.append(feature)
return features
def create_data_loader(tokenized_eval_dataset, batch_size):
return DataLoader(
tokenized_eval_dataset,
batch_size=batch_size,
num_workers=4,
collate_fn=default_data_collator
)
def score_testdata(args, classification_model_dnli, testdata):
tokenized_eval_dataset = classification_model_dnli.tokenize_function_data(testdata, sent1_type=args.response_tag)
# import pdb;pdb.set_trace()
# tdataset = Dataset.from_dict(tokenized_eval_dataset)
# test_data_loader = create_data_loader(tdataset, args.batch_size)
test_data_loader = create_data_loader(tokenized_eval_dataset, args.batch_size)
all_scores = []
parsed = 0
for idx, d in enumerate(tqdm(test_data_loader)):
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
token_type_ids = d["token_type_ids"].to(device)
outputs = classification_model_dnli.model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids
)
outputs = softmax(outputs['logits'].tolist(),axis=1)
for oidx, out in enumerate(outputs):
softmax_l1 = out.tolist()
# dnli_score = [x[0] for x in softmax_l1]
# print(softmax_l1)s
# all_scores+=softmax_l1
testdata[parsed][args.typeprefix+'fever_score'] = softmax_l1
parsed+=1
def score_data(args, classification_model_dnli, max_evidences=5):
testdata = get_json_lines(args.input_file)
for i, datapoint in enumerate(tqdm(testdata)):
# lines = datapoint[args.response_tag]
if 'evidence_list' in datapoint:
all_evidences = datapoint['evidence_list'][:max_evidences]
# for e, evilist in enumerate(datapoint['evidence_list'][:max_evidences]):
# all_evidences = evilist#datapoint['evidence_list']
# print(all_evidences)
# print(['title: ' + x[0] + ' content: ' + x[2] for x in all_evidences])
all_evidence_texts = ['title: ' + x[0] + ' content: ' + x[2] for x in all_evidences]
# evidence_text = ' ### '.join(all_evidence_texts)
evidence_text = ' '.join(all_evidence_texts)
datapoint['evidence_touse'] = evidence_text
if args.claim_only:
datapoint['evidence_touse'] = ''
# import pdb;pdb.set_trace()
if len(datapoint[args.response_tag])==0:
continue
score_testdata(args, classification_model_dnli, testdata)
# scores = lm_scores(lines, model, tokenizer, device)
# datapoint['dnli_score'] = scores
write_json_lines(args.preds_file, testdata, args.output_folder)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--cuda_device', type=int, help='id of GPU to use', default=0)
parser.add_argument('-m', '--model', type=str, help='model name to use', default='colloquial_bert_large/')
parser.add_argument('-i', '--input_file', type=str, help='path to the file containing the evaluation data', required=True)
parser.add_argument('-o', '--preds_file', type=str, help='output file to save the results')
parser.add_argument('--output_folder', type=str, help='output file to save the results', default='colloquialfeverscores/')
parser.add_argument('--response_tag', type=str, help='tag', default='response')
parser.add_argument('--batch_size', type=int, help='batch size', default=20)
parser.add_argument('--claim_only', action='store_true', default=False, help='Disables evidence')
parser.add_argument('--max_seq_length', type=int, help='batch size', default=256)
parser.add_argument('--knowledgeformat', type=str, help='tag', default='') # wikijoin
parser.add_argument('--typeprefix', type=str, help='tag', default='')
parser.add_argument('--outputprefix', type=str, help='tag', default='')
# parser.add_argument('-append', action='store_true', help='allow append to previous run', default=False)
args = parser.parse_args()
if args.preds_file is None:
args.preds_file = args.input_file.split('/')[-1]
args.preds_file = args.outputprefix + args.preds_file
# assert(not os.path.exists(args.preds_file))
if args.cuda_device>=0:
device = 'cuda:'+str(args.cuda_device)
else:
device = 'cpu'
args.device = device
classification_model_dnli = ClassificationModel(num_labels=3,model_name_or_path=args.model)
classification_model_dnli.model = classification_model_dnli.model.to(device)
print('model loaded')
classification_model_dnli.model.eval()
score_data(args, classification_model_dnli)
# python fever_scoring.py -i ../post_generation/contextagg_maskfill_mix1_wow_test_tsc_200_t1.5.jsonl --output_folder vitamincscores/ -m tals/albert-xlarge-vitaminc
# python fever_scoring.py -i ../post_generation/contextagg_maskfill_mix1_wow_test_tsc_200_t1.5.jsonl --knowledgeformat wikijoin --typeprefix colloq_ --output_folder colloquialfeverscores/ -m colloquial_bert_large
| 39.853293
| 212
| 0.631207
|
import argparse
import sys
import jsonlines
from tqdm import tqdm
import logging
import json
import torch
import torch.nn.functional as F
import jsonlines
import random
import os
import numpy as np
from scipy.special import softmax
from tqdm import tqdm
from typing import List
from sklearn.metrics import f1_score, precision_score, recall_score
from datasets import Dataset
from torch.utils.data import Dataset, DataLoader
from transformers import AutoConfig, AutoTokenizer, AutoModelForSequenceClassification, get_cosine_schedule_with_warmup
from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer, EvalPrediction, default_data_collator, set_seed
from transformers import InputExample, PreTrainedTokenizer, InputFeatures
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
LABELS = ["SUPPORTS", "REFUTES", "NOT ENOUGH INFO"]
def get_json_lines(inp_file):
lines = []
with jsonlines.open(inp_file) as reader:
for obj in reader:
lines.append(obj)
return lines
def write_json_lines(output_file_name, list_data, output_folder):
with jsonlines.open(output_folder+ output_file_name, mode='w') as writer:
for dataline in list_data:
writer.write(dataline)
class ClassificationModel():
def __init__(self, num_labels=2, max_length=256, model_name_or_path='albert-large-v2', config_name=None, tokenizer_name=None):
NUM_LABELS = num_labels
self.max_seq_length = 256
self.model_name_or_path = model_name_or_path
self.config_name = config_name
self.tokenizer_name = tokenizer_name
self.max_length = max_length
config = AutoConfig.from_pretrained(
self.config_name if self.config_name else self.model_name_or_path,
num_labels=NUM_LABELS,
)
add_prefix_space = False
if 'roberta' in self.model_name_or_path:
add_prefix_space = True
self.tokenizer = AutoTokenizer.from_pretrained(
self.tokenizer_name if self.tokenizer_name else self.model_name_or_path,
add_prefix_space=True,
)
self.model = AutoModelForSequenceClassification.from_pretrained(
self.model_name_or_path,
from_tf=bool(".ckpt" in self.model_name_or_path),
config=config,
)
def get_string_text(self, tokens_a, tokens_b):
max_num_tokens = self.max_seq_length - 3
total_length = len(tokens_a) + len(tokens_b)
if total_length > max_num_tokens:
len_b = len(tokens_b)
a_begin = max_num_tokens - len_b
tokens_a = tokens_a[-a_begin:]
try:
assert len(tokens_a) + len(tokens_b) <= max_num_tokens
assert len(tokens_a) >= 1
except:
import pdb;
pdb.set_trace()
print('some problem with preproc')
tokens = []
segment_ids = []
tokens.append(self.tokenizer.cls_token)
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append(self.tokenizer.sep_token)
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append(self.tokenizer.sep_token)
segment_ids.append(1)
return tokens, segment_ids
def tokenize_function_test(self, examples):
all_texts = []
all_segment_ids = []
all_labels = []
processed = []
items = []
items = examples
max_seq_length = 216
for example in items:
first_tokens = self.tokenizer.tokenize(example['actual'])
for sent2 in example['prediction']:
sec_tokens = self.tokenizer.tokenize(sent2)
tokens = ["[CLS]"] + first_tokens + ["[SEP]"] + sec_tokens
if len(sec_tokens) + len(first_tokens) > max_seq_length - 1:
tokens = tokens[:(max_seq_length - 1)]
tokens = tokens + ["[SEP]"]
segment_ids = [0] * (len(first_tokens) + 2)
segment_ids += [1] * (len(sec_tokens) + 1)
all_texts.append(tokens)
all_segment_ids.append(segment_ids)
tokenized = self.tokenizer.batch_encode_plus(
all_texts,
padding='max_length',
truncation=True,
max_length=max_seq_length,
is_split_into_words=True,
return_special_tokens_mask=True,
add_special_tokens=False,
)
padded_length = len(tokenized['input_ids'][0])
all_segment_ids = [x + [0] * (padded_length - len(x)) for x in all_segment_ids]
tokenized['token_type_ids'] = all_segment_ids
return tokenized
def tokenize_function(self, examples, sent2_type='evidence_touse', sent1_type='prediction'):
all_texts = []
all_segment_ids = []
all_labels = []
processed = []
items = []
max_seq_length = 216
for example in examples:
evidence_data = example[sent2_type]
sent2 = evidence_data
for p, sent1 in enumerate(example[sent1_type]):
if type(evidence_data) is list:
sent2 = example[sent2_type][p]
items.append([sent2, sent1])
try:
batch_encoding = self.tokenizer(
[(example[0], example[1])
for example in items],
max_length=self.max_length,
padding="max_length",
truncation=True,
)
except:
import pdb;pdb.set_trace()
features = []
input1 = list(batch_encoding.keys())[0]
num_inputs = len(batch_encoding[input1])
for i in range(num_inputs):
inputs = {k: batch_encoding[k][i] for k in batch_encoding}
feature = InputFeatures(**inputs)
features.append(feature)
return features
def tokenize_function_data(self, examples, sent2_type='evidence_touse', sent1_type='response'):
all_texts = []
all_segment_ids = []
all_labels = []
processed = []
items = []
max_seq_length = 216
for example in examples:
evidence_data = example[sent2_type]
sent2 = evidence_data
sent1 = example[sent1_type]
items.append([sent2, sent1])
try:
batch_encoding = self.tokenizer(
[(ex[0], ex[1])
for ex in items],
max_length=self.max_length,
padding="max_length",
truncation=True,
)
except:
import pdb;pdb.set_trace()
features = []
input1 = list(batch_encoding.keys())[0]
num_inputs = len(batch_encoding[input1])
for i in range(num_inputs):
inputs = {k: batch_encoding[k][i] for k in batch_encoding}
feature = InputFeatures(**inputs)
features.append(feature)
return features
def create_data_loader(tokenized_eval_dataset, batch_size):
return DataLoader(
tokenized_eval_dataset,
batch_size=batch_size,
num_workers=4,
collate_fn=default_data_collator
)
def score_testdata(args, classification_model_dnli, testdata):
tokenized_eval_dataset = classification_model_dnli.tokenize_function_data(testdata, sent1_type=args.response_tag)
test_data_loader = create_data_loader(tokenized_eval_dataset, args.batch_size)
all_scores = []
parsed = 0
for idx, d in enumerate(tqdm(test_data_loader)):
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
token_type_ids = d["token_type_ids"].to(device)
outputs = classification_model_dnli.model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids
)
outputs = softmax(outputs['logits'].tolist(),axis=1)
for oidx, out in enumerate(outputs):
softmax_l1 = out.tolist()
testdata[parsed][args.typeprefix+'fever_score'] = softmax_l1
parsed+=1
def score_data(args, classification_model_dnli, max_evidences=5):
testdata = get_json_lines(args.input_file)
for i, datapoint in enumerate(tqdm(testdata)):
if 'evidence_list' in datapoint:
all_evidences = datapoint['evidence_list'][:max_evidences]
all_evidence_texts = ['title: ' + x[0] + ' content: ' + x[2] for x in all_evidences]
evidence_text = ' '.join(all_evidence_texts)
datapoint['evidence_touse'] = evidence_text
if args.claim_only:
datapoint['evidence_touse'] = ''
if len(datapoint[args.response_tag])==0:
continue
score_testdata(args, classification_model_dnli, testdata)
write_json_lines(args.preds_file, testdata, args.output_folder)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--cuda_device', type=int, help='id of GPU to use', default=0)
parser.add_argument('-m', '--model', type=str, help='model name to use', default='colloquial_bert_large/')
parser.add_argument('-i', '--input_file', type=str, help='path to the file containing the evaluation data', required=True)
parser.add_argument('-o', '--preds_file', type=str, help='output file to save the results')
parser.add_argument('--output_folder', type=str, help='output file to save the results', default='colloquialfeverscores/')
parser.add_argument('--response_tag', type=str, help='tag', default='response')
parser.add_argument('--batch_size', type=int, help='batch size', default=20)
parser.add_argument('--claim_only', action='store_true', default=False, help='Disables evidence')
parser.add_argument('--max_seq_length', type=int, help='batch size', default=256)
parser.add_argument('--knowledgeformat', type=str, help='tag', default='')
parser.add_argument('--typeprefix', type=str, help='tag', default='')
parser.add_argument('--outputprefix', type=str, help='tag', default='')
args = parser.parse_args()
if args.preds_file is None:
args.preds_file = args.input_file.split('/')[-1]
args.preds_file = args.outputprefix + args.preds_file
if args.cuda_device>=0:
device = 'cuda:'+str(args.cuda_device)
else:
device = 'cpu'
args.device = device
classification_model_dnli = ClassificationModel(num_labels=3,model_name_or_path=args.model)
classification_model_dnli.model = classification_model_dnli.model.to(device)
print('model loaded')
classification_model_dnli.model.eval()
score_data(args, classification_model_dnli)
| true
| true
|
1c4a7a0b653d94fb65060004a335ac768d0fbace
| 5,776
|
py
|
Python
|
sdk/yapily/models/account_statement.py
|
yapily/yapily-sdk-python
|
c09930c44e8795e270e2846a2c0fb783200df76a
|
[
"MIT"
] | 11
|
2018-05-18T14:38:49.000Z
|
2021-09-08T13:24:37.000Z
|
sdk/yapily/models/account_statement.py
|
yapily/yapily-sdk-python
|
c09930c44e8795e270e2846a2c0fb783200df76a
|
[
"MIT"
] | 5
|
2019-10-23T15:06:33.000Z
|
2021-08-03T21:18:50.000Z
|
sdk/yapily/models/account_statement.py
|
yapily/yapily-sdk-python
|
c09930c44e8795e270e2846a2c0fb783200df76a
|
[
"MIT"
] | 8
|
2019-04-27T00:02:18.000Z
|
2021-11-21T02:54:12.000Z
|
# coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
The version of the OpenAPI document: 1.154.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from yapily.configuration import Configuration
class AccountStatement(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'start_date_time': 'datetime',
'end_date_time': 'datetime',
'creation_date_time': 'datetime'
}
attribute_map = {
'id': 'id',
'start_date_time': 'startDateTime',
'end_date_time': 'endDateTime',
'creation_date_time': 'creationDateTime'
}
def __init__(self, id=None, start_date_time=None, end_date_time=None, creation_date_time=None, local_vars_configuration=None): # noqa: E501
"""AccountStatement - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._start_date_time = None
self._end_date_time = None
self._creation_date_time = None
self.discriminator = None
if id is not None:
self.id = id
if start_date_time is not None:
self.start_date_time = start_date_time
if end_date_time is not None:
self.end_date_time = end_date_time
if creation_date_time is not None:
self.creation_date_time = creation_date_time
@property
def id(self):
"""Gets the id of this AccountStatement. # noqa: E501
:return: The id of this AccountStatement. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this AccountStatement.
:param id: The id of this AccountStatement. # noqa: E501
:type: str
"""
self._id = id
@property
def start_date_time(self):
"""Gets the start_date_time of this AccountStatement. # noqa: E501
:return: The start_date_time of this AccountStatement. # noqa: E501
:rtype: datetime
"""
return self._start_date_time
@start_date_time.setter
def start_date_time(self, start_date_time):
"""Sets the start_date_time of this AccountStatement.
:param start_date_time: The start_date_time of this AccountStatement. # noqa: E501
:type: datetime
"""
self._start_date_time = start_date_time
@property
def end_date_time(self):
"""Gets the end_date_time of this AccountStatement. # noqa: E501
:return: The end_date_time of this AccountStatement. # noqa: E501
:rtype: datetime
"""
return self._end_date_time
@end_date_time.setter
def end_date_time(self, end_date_time):
"""Sets the end_date_time of this AccountStatement.
:param end_date_time: The end_date_time of this AccountStatement. # noqa: E501
:type: datetime
"""
self._end_date_time = end_date_time
@property
def creation_date_time(self):
"""Gets the creation_date_time of this AccountStatement. # noqa: E501
:return: The creation_date_time of this AccountStatement. # noqa: E501
:rtype: datetime
"""
return self._creation_date_time
@creation_date_time.setter
def creation_date_time(self, creation_date_time):
"""Sets the creation_date_time of this AccountStatement.
:param creation_date_time: The creation_date_time of this AccountStatement. # noqa: E501
:type: datetime
"""
self._creation_date_time = creation_date_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AccountStatement):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AccountStatement):
return True
return self.to_dict() != other.to_dict()
| 29.025126
| 158
| 0.608899
|
import pprint
import re
import six
from yapily.configuration import Configuration
class AccountStatement(object):
openapi_types = {
'id': 'str',
'start_date_time': 'datetime',
'end_date_time': 'datetime',
'creation_date_time': 'datetime'
}
attribute_map = {
'id': 'id',
'start_date_time': 'startDateTime',
'end_date_time': 'endDateTime',
'creation_date_time': 'creationDateTime'
}
def __init__(self, id=None, start_date_time=None, end_date_time=None, creation_date_time=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._start_date_time = None
self._end_date_time = None
self._creation_date_time = None
self.discriminator = None
if id is not None:
self.id = id
if start_date_time is not None:
self.start_date_time = start_date_time
if end_date_time is not None:
self.end_date_time = end_date_time
if creation_date_time is not None:
self.creation_date_time = creation_date_time
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def start_date_time(self):
return self._start_date_time
@start_date_time.setter
def start_date_time(self, start_date_time):
self._start_date_time = start_date_time
@property
def end_date_time(self):
return self._end_date_time
@end_date_time.setter
def end_date_time(self, end_date_time):
self._end_date_time = end_date_time
@property
def creation_date_time(self):
return self._creation_date_time
@creation_date_time.setter
def creation_date_time(self, creation_date_time):
self._creation_date_time = creation_date_time
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, AccountStatement):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, AccountStatement):
return True
return self.to_dict() != other.to_dict()
| true
| true
|
1c4a7a49f884812045349d0f5b1b1b674809dc4f
| 1,968
|
py
|
Python
|
CuSVD/testcases/gen_testcase.py
|
praeclarumjj3/CuML
|
1c812d3b07a11c3a69a284d9960058a874d97bfa
|
[
"MIT"
] | 2
|
2021-03-04T13:38:00.000Z
|
2021-04-09T13:40:00.000Z
|
CuSVD/testcases/gen_testcase.py
|
praeclarumjj3/CuML
|
1c812d3b07a11c3a69a284d9960058a874d97bfa
|
[
"MIT"
] | null | null | null |
CuSVD/testcases/gen_testcase.py
|
praeclarumjj3/CuML
|
1c812d3b07a11c3a69a284d9960058a874d97bfa
|
[
"MIT"
] | 2
|
2020-12-13T18:21:18.000Z
|
2021-08-17T06:55:56.000Z
|
#!/usr/bin/python3
#########################################################################
# Generate M x N matrix of real numbers and store #
# the the matrix in file named 'testcase_<M>_<N>' #
# Parameters: #
# M :no of rows (samples) in matrix #
# N :no of coulmns (features) in matrix #
# lrange, urange :range of matrix elements ie #
# forall 0<=i<M, 0<=j<N #
# lrange <= matrix[i][j] <= urange #
# Format of output file: #
# ----------------------------------------------------------------- #
# | M N #
# | D[0][0] D[0][1] ... D[0][N-1] D[1][0] ... D[M-1][N-1] #
# ----------------------------------------------------------------- #
#########################################################################
from random import uniform
from sklearn.preprocessing import StandardScaler
M = 1000 # number of rows (samples) in input matrix D
N = 300 # number of columns (features) in input matrix
lrange = -100000 # lrange <= element of matrix
urange = 100000 # element of matrix <= urange
# generate the matrix
D = []
for i in range(M):
temp = []
for j in range(N):
temp.append(uniform(lrange, urange))
D.append(temp)
# standardize
X_std = StandardScaler().fit_transform(D)
filename = 'testcase_' + str(M) + '_' + str(N) #output filename
file = open(filename, 'w')
# write size of matrix in first line of file
file.write(str(M) + ' ' +str(N) + '\n')
# write space separated matrix elements
for i in range(M):
for j in range(N):
file.write('%.7f ' %(X_std[i][j]))
file.close()
| 38.588235
| 73
| 0.399898
| true
| true
|
|
1c4a7b6fc80a80c5c89ab92e7173ca90f412ed2c
| 1,717
|
py
|
Python
|
examples/clusteredparent/sync.py
|
rahulchheda/metac
|
40b547db945dd1a45cb2d62205d1c59e278bd678
|
[
"Apache-2.0"
] | 577
|
2018-03-22T01:31:59.000Z
|
2022-02-16T15:19:12.000Z
|
examples/clusteredparent/sync.py
|
DalavanCloud/metacontroller
|
98610b6e15cbe8b6ccd3bca0928dce0ce54c548d
|
[
"Apache-2.0"
] | 391
|
2020-05-19T09:33:07.000Z
|
2022-03-31T17:27:18.000Z
|
examples/clusteredparent/sync.py
|
DalavanCloud/metacontroller
|
98610b6e15cbe8b6ccd3bca0928dce0ce54c548d
|
[
"Apache-2.0"
] | 117
|
2018-03-22T01:40:47.000Z
|
2022-03-25T08:57:53.000Z
|
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import json
def new_rolebinding(name):
rolebinding = {}
rolebinding['apiVersion'] = 'rbac.authorization.k8s.io/v1'
rolebinding['kind'] = 'RoleBinding'
rolebinding['metadata'] = {}
rolebinding['metadata']['name'] = name
rolebinding['metadata']['namespace'] = "default"
rolebinding['subjects'] = [{'kind': 'ServiceAccount', 'name': 'default', 'namespace': 'default'}]
rolebinding['roleRef'] = {'kind': 'ClusterRole', 'name': name, 'apiGroup': 'rbac.authorization.k8s.io'}
return rolebinding
class Controller(BaseHTTPRequestHandler):
def sync(self, clusterrole, children):
return {'attachments': [new_rolebinding(clusterrole['metadata']['name'])] }
def do_POST(self):
observed = json.loads(self.rfile.read(int(self.headers.getheader('content-length'))))
desired = self.sync(observed['object'], observed['attachments'])
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(desired))
HTTPServer(('', 80), Controller).serve_forever()
| 37.326087
| 105
| 0.726849
|
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import json
def new_rolebinding(name):
rolebinding = {}
rolebinding['apiVersion'] = 'rbac.authorization.k8s.io/v1'
rolebinding['kind'] = 'RoleBinding'
rolebinding['metadata'] = {}
rolebinding['metadata']['name'] = name
rolebinding['metadata']['namespace'] = "default"
rolebinding['subjects'] = [{'kind': 'ServiceAccount', 'name': 'default', 'namespace': 'default'}]
rolebinding['roleRef'] = {'kind': 'ClusterRole', 'name': name, 'apiGroup': 'rbac.authorization.k8s.io'}
return rolebinding
class Controller(BaseHTTPRequestHandler):
def sync(self, clusterrole, children):
return {'attachments': [new_rolebinding(clusterrole['metadata']['name'])] }
def do_POST(self):
observed = json.loads(self.rfile.read(int(self.headers.getheader('content-length'))))
desired = self.sync(observed['object'], observed['attachments'])
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(desired))
HTTPServer(('', 80), Controller).serve_forever()
| true
| true
|
1c4a7b98fa5883d11c06bf2f920e1ba0096ba6ec
| 5,226
|
py
|
Python
|
services/backend/app/api/v1/endpoints/marks.py
|
moxxiq/online-diary
|
5949cb5631d49622a31885519a880b17a0816988
|
[
"MIT"
] | null | null | null |
services/backend/app/api/v1/endpoints/marks.py
|
moxxiq/online-diary
|
5949cb5631d49622a31885519a880b17a0816988
|
[
"MIT"
] | null | null | null |
services/backend/app/api/v1/endpoints/marks.py
|
moxxiq/online-diary
|
5949cb5631d49622a31885519a880b17a0816988
|
[
"MIT"
] | null | null | null |
from fastapi import APIRouter, status, HTTPException, Path, Depends
import app.core.crud as crud
from app.core.authorization import get_current_user, get_current_user_with_scopes
from app.core.schemas.users import UserWithID
from app.core.schemas.marks import Mark, MarkDB, MarkContent
router = APIRouter()
@router.post("/marks", response_model=MarkDB, status_code=status.HTTP_201_CREATED)
async def create_marks(payload: Mark, current_user: UserWithID = Depends(get_current_user_with_scopes([1, 2]))):
# TODO: remove vulnerability so that student could get mark for work of other classes work
mark_in_db = await crud.marks.get_by_work_student(work_id=payload.work_id, student_id=payload.student_id)
if mark_in_db:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="Mark is already set")
if (current_user.get("type") == 2) and (current_user.get("id") != (await crud.works.get_teacher_of_the_work(payload.work_id)).get("user_id")):
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail="Teacher is not allowed to set the marks for other teacher works")
mark_id = await crud.marks.post(payload)
response_object = await crud.marks.get(mark_id)
return response_object
@router.patch("/marks/{id}", response_model=MarkDB, status_code=status.HTTP_200_OK)
async def correct_marks(payload: MarkContent, id: int = Path(..., gt=0), current_user: UserWithID = Depends(get_current_user_with_scopes([1, 2]))):
# TODO: remove vulnerability so that student could get mark for work of other classes work
# TODO: remove vulnerability so that another teacher can correct marks of other teachers
await crud.marks.patch(id, payload)
response_object = await crud.marks.get(id)
return response_object
@router.put("/marks/{id}", response_model=MarkDB, status_code=status.HTTP_200_OK)
async def correct_or_create_marks(payload: Mark, id: int = Path(..., gt=0), current_user: UserWithID = Depends(get_current_user_with_scopes([1, 2]))):
# TODO: remove vulnerability so that student could get mark for work of other classes work
if (current_user.get("type") == 2) and (current_user.get("id") != (await crud.works.get_teacher_of_the_work(payload.work_id)).get("user_id")):
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail="Teacher is not allowed to set the marks for other teacher works")
await crud.marks.put(id, payload)
response_object = await crud.marks.get(id)
return response_object
@router.get("/marks/{id}", response_model=MarkDB)
async def read_marks(id: int = Path(..., gt=0), current_user: UserWithID = Depends(get_current_user_with_scopes([1, 2, 3]))):
mark_in_db = await crud.marks.get(id)
if not mark_in_db:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Mark not set")
if current_user.get("type") not in [1, 2]:
if current_user.get("id") != mark_in_db.get("student_id"):
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Student is not allowed to see the marks of others")
if (current_user.get("id") != (await crud.works.get_teacher_of_the_work(mark_in_db.get("work_id"))).get("user_id")) and (current_user.get("type") == 2):
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail="Teacher is not allowed to see the marks of other teachers")
return mark_in_db
@router.delete("/marks/{id}", response_model=MarkDB)
async def delete_marks(id: int = Path(..., gt=0), current_user: UserWithID = Depends(get_current_user_with_scopes([1, 2]))):
mark_in_db = await crud.marks.get(id)
if not mark_in_db:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Mark not found")
if (current_user.get("id") != (await crud.works.get_teacher_of_the_work(mark_in_db.get("work_id"))).get("user_id")) and (current_user.get("type") == 2):
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail="Techer is not allowed to delete the marks of other teachers")
await crud.marks.delete(id)
return mark_in_db
@router.get("/works/{work_id}/students/{student_id}/marks", response_model=MarkDB)
async def read_work_student_marks(work_id: int = Path(..., gt=0), student_id: int = Path(..., gt=0), current_user: UserWithID = Depends(get_current_user_with_scopes([1, 2, 3]))):
mark_in_db = await crud.marks.get_by_work_student(work_id, student_id)
if not mark_in_db:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Mark not set")
if current_user.get("type") not in [1, 2]:
if current_user.get("id") != mark_in_db.get("student_id"):
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Student is not allowed to see the marks of others")
if (current_user.get("id") != (await crud.works.get_teacher_of_the_work(mark_in_db.get("work_id"))).get("user_id")) and (current_user.get("type") == 2):
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail="Techer is not allowed to see the marks of other teachers")
return mark_in_db
| 67.87013
| 178
| 0.726177
|
from fastapi import APIRouter, status, HTTPException, Path, Depends
import app.core.crud as crud
from app.core.authorization import get_current_user, get_current_user_with_scopes
from app.core.schemas.users import UserWithID
from app.core.schemas.marks import Mark, MarkDB, MarkContent
router = APIRouter()
@router.post("/marks", response_model=MarkDB, status_code=status.HTTP_201_CREATED)
async def create_marks(payload: Mark, current_user: UserWithID = Depends(get_current_user_with_scopes([1, 2]))):
mark_in_db = await crud.marks.get_by_work_student(work_id=payload.work_id, student_id=payload.student_id)
if mark_in_db:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="Mark is already set")
if (current_user.get("type") == 2) and (current_user.get("id") != (await crud.works.get_teacher_of_the_work(payload.work_id)).get("user_id")):
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail="Teacher is not allowed to set the marks for other teacher works")
mark_id = await crud.marks.post(payload)
response_object = await crud.marks.get(mark_id)
return response_object
@router.patch("/marks/{id}", response_model=MarkDB, status_code=status.HTTP_200_OK)
async def correct_marks(payload: MarkContent, id: int = Path(..., gt=0), current_user: UserWithID = Depends(get_current_user_with_scopes([1, 2]))):
await crud.marks.patch(id, payload)
response_object = await crud.marks.get(id)
return response_object
@router.put("/marks/{id}", response_model=MarkDB, status_code=status.HTTP_200_OK)
async def correct_or_create_marks(payload: Mark, id: int = Path(..., gt=0), current_user: UserWithID = Depends(get_current_user_with_scopes([1, 2]))):
if (current_user.get("type") == 2) and (current_user.get("id") != (await crud.works.get_teacher_of_the_work(payload.work_id)).get("user_id")):
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail="Teacher is not allowed to set the marks for other teacher works")
await crud.marks.put(id, payload)
response_object = await crud.marks.get(id)
return response_object
@router.get("/marks/{id}", response_model=MarkDB)
async def read_marks(id: int = Path(..., gt=0), current_user: UserWithID = Depends(get_current_user_with_scopes([1, 2, 3]))):
mark_in_db = await crud.marks.get(id)
if not mark_in_db:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Mark not set")
if current_user.get("type") not in [1, 2]:
if current_user.get("id") != mark_in_db.get("student_id"):
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Student is not allowed to see the marks of others")
if (current_user.get("id") != (await crud.works.get_teacher_of_the_work(mark_in_db.get("work_id"))).get("user_id")) and (current_user.get("type") == 2):
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail="Teacher is not allowed to see the marks of other teachers")
return mark_in_db
@router.delete("/marks/{id}", response_model=MarkDB)
async def delete_marks(id: int = Path(..., gt=0), current_user: UserWithID = Depends(get_current_user_with_scopes([1, 2]))):
mark_in_db = await crud.marks.get(id)
if not mark_in_db:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Mark not found")
if (current_user.get("id") != (await crud.works.get_teacher_of_the_work(mark_in_db.get("work_id"))).get("user_id")) and (current_user.get("type") == 2):
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail="Techer is not allowed to delete the marks of other teachers")
await crud.marks.delete(id)
return mark_in_db
@router.get("/works/{work_id}/students/{student_id}/marks", response_model=MarkDB)
async def read_work_student_marks(work_id: int = Path(..., gt=0), student_id: int = Path(..., gt=0), current_user: UserWithID = Depends(get_current_user_with_scopes([1, 2, 3]))):
mark_in_db = await crud.marks.get_by_work_student(work_id, student_id)
if not mark_in_db:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Mark not set")
if current_user.get("type") not in [1, 2]:
if current_user.get("id") != mark_in_db.get("student_id"):
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Student is not allowed to see the marks of others")
if (current_user.get("id") != (await crud.works.get_teacher_of_the_work(mark_in_db.get("work_id"))).get("user_id")) and (current_user.get("type") == 2):
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail="Techer is not allowed to see the marks of other teachers")
return mark_in_db
| true
| true
|
1c4a7c6f7957c6d59fda330bbe04e8fea41006a6
| 1,933
|
py
|
Python
|
plots/cv_over_a/qif/plot.py
|
ModelDBRepository/228604
|
8f641f73bcac2700b476663fe656fcad7d63470d
|
[
"BSD-3-Clause"
] | null | null | null |
plots/cv_over_a/qif/plot.py
|
ModelDBRepository/228604
|
8f641f73bcac2700b476663fe656fcad7d63470d
|
[
"BSD-3-Clause"
] | null | null | null |
plots/cv_over_a/qif/plot.py
|
ModelDBRepository/228604
|
8f641f73bcac2700b476663fe656fcad7d63470d
|
[
"BSD-3-Clause"
] | null | null | null |
import analytics.shot_noise_driven.if_neuron as ana
import analytics.gaussian_white_noise_driven.if_neuron as gwnana
import pylab as pl
import numpy as np
import param_scan.io
import param_scan.parameter_sets
import param_scan.simulation_run
from param_scan.io import InDirectory
import os
from latex_param_values import LatexParamValues
from grace_plot import GracePlot
def with_tau_m(tau_m, prms):
""" convert back from dimnesionless units """
p = dict(prms)
p["tau_m"] = tau_m
p["rin_e"] = prms["rin_e"] / tau_m
p["tr"] = prms["tr"] * tau_m
p["df"] = prms["df"] / tau_m
p["dt"] = prms["dt"] * tau_m
p["f_c"] = prms["f_c"] / tau_m
p["f_max"] = prms["f_max"] / tau_m
p["f_sig"] = prms["f_sig"] / tau_m
p["r_sample"] = prms["r_sample"] / tau_m
return p
tau_m = 0.02 # s
with InDirectory(os.path.abspath(__file__)):
rfn, r = param_scan.io.load_newest_run(d="./sim")
prms = r["parameters"]
rfn_theo, r_theo = param_scan.io.load_newest_run_matching({}, d="./theo")
rfn_da, r_da = param_scan.io.load_newest_run_matching({}, d="./diffapp")
gr = GracePlot("plot")
a, rin_e, cv = param_scan.simulation_run.read_values(r, param_scan.parameter_sets.unroll(prms), ["a_e", "rin_e", "cv"], "stdout", ignore_errors=True)
gr.plot(a, cv)
a_theo, rin_e_theo, cv_theo = param_scan.simulation_run.read_values(r_theo, param_scan.parameter_sets.unroll(r_theo["parameters"]), ["a_e", "rin_e", "cv"], "stdout", ignore_errors=True)
gr.plot(a_theo, cv_theo)
a_da, rin_e_da, cv_da = param_scan.simulation_run.read_values(r_da, param_scan.parameter_sets.unroll(r_da["parameters"]), ["a_e", "rin_e", "cv"], "stdout", ignore_errors=True)
gr.plot(a_da, cv_da)
gr.plot(a_theo, [1 for a in a_theo])
gr.plot(a_theo, [1./3**0.5 for a in a_theo])
gr.save()
LatexParamValues().write("paramvalues.tex", with_tau_m(tau_m * 1000, prms))
| 37.173077
| 189
| 0.682359
|
import analytics.shot_noise_driven.if_neuron as ana
import analytics.gaussian_white_noise_driven.if_neuron as gwnana
import pylab as pl
import numpy as np
import param_scan.io
import param_scan.parameter_sets
import param_scan.simulation_run
from param_scan.io import InDirectory
import os
from latex_param_values import LatexParamValues
from grace_plot import GracePlot
def with_tau_m(tau_m, prms):
p = dict(prms)
p["tau_m"] = tau_m
p["rin_e"] = prms["rin_e"] / tau_m
p["tr"] = prms["tr"] * tau_m
p["df"] = prms["df"] / tau_m
p["dt"] = prms["dt"] * tau_m
p["f_c"] = prms["f_c"] / tau_m
p["f_max"] = prms["f_max"] / tau_m
p["f_sig"] = prms["f_sig"] / tau_m
p["r_sample"] = prms["r_sample"] / tau_m
return p
tau_m = 0.02
with InDirectory(os.path.abspath(__file__)):
rfn, r = param_scan.io.load_newest_run(d="./sim")
prms = r["parameters"]
rfn_theo, r_theo = param_scan.io.load_newest_run_matching({}, d="./theo")
rfn_da, r_da = param_scan.io.load_newest_run_matching({}, d="./diffapp")
gr = GracePlot("plot")
a, rin_e, cv = param_scan.simulation_run.read_values(r, param_scan.parameter_sets.unroll(prms), ["a_e", "rin_e", "cv"], "stdout", ignore_errors=True)
gr.plot(a, cv)
a_theo, rin_e_theo, cv_theo = param_scan.simulation_run.read_values(r_theo, param_scan.parameter_sets.unroll(r_theo["parameters"]), ["a_e", "rin_e", "cv"], "stdout", ignore_errors=True)
gr.plot(a_theo, cv_theo)
a_da, rin_e_da, cv_da = param_scan.simulation_run.read_values(r_da, param_scan.parameter_sets.unroll(r_da["parameters"]), ["a_e", "rin_e", "cv"], "stdout", ignore_errors=True)
gr.plot(a_da, cv_da)
gr.plot(a_theo, [1 for a in a_theo])
gr.plot(a_theo, [1./3**0.5 for a in a_theo])
gr.save()
LatexParamValues().write("paramvalues.tex", with_tau_m(tau_m * 1000, prms))
| true
| true
|
1c4a7c8b617700a230076e999868d1a98b6e2fb1
| 312
|
py
|
Python
|
desafios/Ex023.py
|
FelipeAlafy/Python
|
da2374e55e8aa84e4ca6d9c7bf8dafeb546a4742
|
[
"MIT"
] | null | null | null |
desafios/Ex023.py
|
FelipeAlafy/Python
|
da2374e55e8aa84e4ca6d9c7bf8dafeb546a4742
|
[
"MIT"
] | null | null | null |
desafios/Ex023.py
|
FelipeAlafy/Python
|
da2374e55e8aa84e4ca6d9c7bf8dafeb546a4742
|
[
"MIT"
] | null | null | null |
num = int(input("Digite um número entre 0 e 9999: "))
u = num // 1 % 10
d = num // 10 % 10
c = num // 100 % 10
m = num // 1000 % 10
print("\033[36munidade: {}\033[m".format(u))
print("\033[33mdezena: {}\033[m".format(d))
print("\033[32mcentena: {}\033[m".format(c))
print("\033[31mmilhar: {}\033[m".format(m))
| 31.2
| 53
| 0.589744
|
num = int(input("Digite um número entre 0 e 9999: "))
u = num // 1 % 10
d = num // 10 % 10
c = num // 100 % 10
m = num // 1000 % 10
print("\033[36munidade: {}\033[m".format(u))
print("\033[33mdezena: {}\033[m".format(d))
print("\033[32mcentena: {}\033[m".format(c))
print("\033[31mmilhar: {}\033[m".format(m))
| true
| true
|
1c4a7c8d6ad4b05645b9c03988afe18bb514fbae
| 59
|
py
|
Python
|
packages/Python/modern_robotics/__init__.py
|
Nutellaman/ModernRobotics
|
88c94eec1e0e4eedbd3ae32819664179a9a5a6ba
|
[
"MIT"
] | 1,126
|
2016-10-10T19:04:47.000Z
|
2022-03-31T21:22:58.000Z
|
packages/Python/modern_robotics/__init__.py
|
Nutellaman/ModernRobotics
|
88c94eec1e0e4eedbd3ae32819664179a9a5a6ba
|
[
"MIT"
] | 34
|
2017-10-11T04:52:38.000Z
|
2022-03-17T18:23:05.000Z
|
packages/Python/modern_robotics/__init__.py
|
Nutellaman/ModernRobotics
|
88c94eec1e0e4eedbd3ae32819664179a9a5a6ba
|
[
"MIT"
] | 631
|
2016-10-11T03:43:36.000Z
|
2022-03-24T21:41:47.000Z
|
from .__version__ import __version__
from .core import *
| 11.8
| 36
| 0.779661
|
from .__version__ import __version__
from .core import *
| true
| true
|
1c4a7cf006f6295e328795766e19f2fe14ead264
| 2,259
|
py
|
Python
|
examples/brainsimulator_agent/components/visual_area_component.py
|
masayoshi-nakamura/CognitiveArchitectureLecture
|
5e036b48e92f266062eb7be8a366e754dee24f2c
|
[
"Apache-2.0"
] | 4
|
2016-03-13T03:01:28.000Z
|
2016-03-31T02:51:56.000Z
|
examples/brainsimulator_agent/components/visual_area_component.py
|
masayoshi-nakamura/CognitiveArchitectureLecture
|
5e036b48e92f266062eb7be8a366e754dee24f2c
|
[
"Apache-2.0"
] | null | null | null |
examples/brainsimulator_agent/components/visual_area_component.py
|
masayoshi-nakamura/CognitiveArchitectureLecture
|
5e036b48e92f266062eb7be8a366e754dee24f2c
|
[
"Apache-2.0"
] | null | null | null |
import brica1
import numpy as np
import pygazebo.msg.poses_stamped_pb2
import pickle
class VisualAreaComponent(brica1.Component):
def __init__(self):
super(VisualAreaComponent, self).__init__()
self.last_position = np.array((0, 0))
def __position_to_area_id(self, pos2d):
x = pos2d[0]
y = pos2d[1]
radius = 1
maze_width = 1
if x*x + y*y < radius*radius:
return (0, 0)
areaIdX = 0
if x < maze_width*0.5:
areaIdX = -1
if x > maze_width*0.5:
areaIdX = 1
areaIdY = 0
if y < maze_width*0.5:
areaIdY = -1
if y > maze_width*0.5:
areaIdY = 1
return (areaIdX, areaIdY)
def get_server_response(self):
return self.server_response
def callback(self, data):
pose = pygazebo.msg.poses_stamped_pb2.PosesStamped()
message = pose.FromString(data)
turtlebot_id = 0
if message.pose[turtlebot_id].name != "turtlebot":
raise Exception("message.pose[0].name is not turtlbot")
position = np.array((
message.pose[turtlebot_id].position.x,
message.pose[turtlebot_id].position.y))
orientation = np.array((
message.pose[turtlebot_id].orientation.x,
message.pose[turtlebot_id].orientation.y,
message.pose[turtlebot_id].orientation.z,
message.pose[turtlebot_id].orientation.w))
vel = self.last_position - position
self.last_position = position
self.set_state("out_body_velocity",
np.array((vel[0], vel[1])).astype(np.float32))
self.set_state("out_body_position",
position.astype(np.float32))
self.set_state("out_body_orientation",
orientation.astype(np.float32))
self.server_response = {"out_body_velocity":vel.tolist(),
"out_body_position":position.tolist(),
"out_body_orientation":orientation.tolist()}
#print self.server_response
def fire(self):
for key in self.states.keys():
self.results[key] = self.states[key]
| 31.375
| 76
| 0.575476
|
import brica1
import numpy as np
import pygazebo.msg.poses_stamped_pb2
import pickle
class VisualAreaComponent(brica1.Component):
def __init__(self):
super(VisualAreaComponent, self).__init__()
self.last_position = np.array((0, 0))
def __position_to_area_id(self, pos2d):
x = pos2d[0]
y = pos2d[1]
radius = 1
maze_width = 1
if x*x + y*y < radius*radius:
return (0, 0)
areaIdX = 0
if x < maze_width*0.5:
areaIdX = -1
if x > maze_width*0.5:
areaIdX = 1
areaIdY = 0
if y < maze_width*0.5:
areaIdY = -1
if y > maze_width*0.5:
areaIdY = 1
return (areaIdX, areaIdY)
def get_server_response(self):
return self.server_response
def callback(self, data):
pose = pygazebo.msg.poses_stamped_pb2.PosesStamped()
message = pose.FromString(data)
turtlebot_id = 0
if message.pose[turtlebot_id].name != "turtlebot":
raise Exception("message.pose[0].name is not turtlbot")
position = np.array((
message.pose[turtlebot_id].position.x,
message.pose[turtlebot_id].position.y))
orientation = np.array((
message.pose[turtlebot_id].orientation.x,
message.pose[turtlebot_id].orientation.y,
message.pose[turtlebot_id].orientation.z,
message.pose[turtlebot_id].orientation.w))
vel = self.last_position - position
self.last_position = position
self.set_state("out_body_velocity",
np.array((vel[0], vel[1])).astype(np.float32))
self.set_state("out_body_position",
position.astype(np.float32))
self.set_state("out_body_orientation",
orientation.astype(np.float32))
self.server_response = {"out_body_velocity":vel.tolist(),
"out_body_position":position.tolist(),
"out_body_orientation":orientation.tolist()}
def fire(self):
for key in self.states.keys():
self.results[key] = self.states[key]
| true
| true
|
1c4a7d5d68e7e6f208af698969098f6dd360551f
| 932
|
py
|
Python
|
pycantonese/tests/test_yale.py
|
chaaklau/pycantonese
|
94694fea2f3c3405d3b6bb6d504a56bb05a6496c
|
[
"MIT"
] | 124
|
2019-08-12T13:10:43.000Z
|
2022-03-24T18:35:58.000Z
|
pycantonese/tests/test_yale.py
|
chaaklau/pycantonese
|
94694fea2f3c3405d3b6bb6d504a56bb05a6496c
|
[
"MIT"
] | 13
|
2019-09-03T17:08:49.000Z
|
2021-12-28T21:37:17.000Z
|
pycantonese/tests/test_yale.py
|
chaaklau/pycantonese
|
94694fea2f3c3405d3b6bb6d504a56bb05a6496c
|
[
"MIT"
] | 15
|
2019-08-09T04:03:01.000Z
|
2022-03-17T10:18:21.000Z
|
import pytest
from pycantonese import jyutping_to_yale
from pycantonese.jyutping.parse_jyutping import ONSETS, NUCLEI, CODAS
from pycantonese.jyutping.yale import (
ONSETS_YALE,
NUCLEI_YALE,
CODAS_YALE,
)
def test_correct_onset_set():
assert set(ONSETS_YALE.keys()) == ONSETS
def test_correct_nucleus_set():
assert set(NUCLEI_YALE.keys()) == NUCLEI
def test_correct_coda_set():
assert set(CODAS_YALE.keys()) == CODAS
@pytest.mark.parametrize("input_", ["", None])
def test_null_input(input_):
assert jyutping_to_yale(input_) == []
@pytest.mark.parametrize("input_", ["", None])
def test_null_input_as_list_false(input_):
assert jyutping_to_yale(input_, as_list=False) == ""
def test_jyutping_to_yale_m4goi1():
assert jyutping_to_yale("m4goi1") == ["m̀h", "gōi"]
def test_jyutping_to_yale_gwong2dung1waa2():
assert jyutping_to_yale("gwong2dung1waa2") == ["gwóng", "dūng", "wá"]
| 23.3
| 73
| 0.73176
|
import pytest
from pycantonese import jyutping_to_yale
from pycantonese.jyutping.parse_jyutping import ONSETS, NUCLEI, CODAS
from pycantonese.jyutping.yale import (
ONSETS_YALE,
NUCLEI_YALE,
CODAS_YALE,
)
def test_correct_onset_set():
assert set(ONSETS_YALE.keys()) == ONSETS
def test_correct_nucleus_set():
assert set(NUCLEI_YALE.keys()) == NUCLEI
def test_correct_coda_set():
assert set(CODAS_YALE.keys()) == CODAS
@pytest.mark.parametrize("input_", ["", None])
def test_null_input(input_):
assert jyutping_to_yale(input_) == []
@pytest.mark.parametrize("input_", ["", None])
def test_null_input_as_list_false(input_):
assert jyutping_to_yale(input_, as_list=False) == ""
def test_jyutping_to_yale_m4goi1():
assert jyutping_to_yale("m4goi1") == ["m̀h", "gōi"]
def test_jyutping_to_yale_gwong2dung1waa2():
assert jyutping_to_yale("gwong2dung1waa2") == ["gwóng", "dūng", "wá"]
| true
| true
|
1c4a7d8f4b20f462196786fe00a5de5533884057
| 184
|
py
|
Python
|
molsysmt/tools/openmm_Modeller/is_openmm_Modeller.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
molsysmt/tools/openmm_Modeller/is_openmm_Modeller.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
molsysmt/tools/openmm_Modeller/is_openmm_Modeller.py
|
dprada/molsysmt
|
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
|
[
"MIT"
] | null | null | null |
_item_fullname_='openmm.Modeller'
def is_openmm_Modeller(item):
item_fullname = item.__class__.__module__+'.'+item.__class__.__name__
return _item_fullname_==item_fullname
| 20.444444
| 73
| 0.788043
|
_item_fullname_='openmm.Modeller'
def is_openmm_Modeller(item):
item_fullname = item.__class__.__module__+'.'+item.__class__.__name__
return _item_fullname_==item_fullname
| true
| true
|
1c4a7e2fe78d848f06c3a08a7f2a2ea29679e094
| 1,158
|
py
|
Python
|
setup.py
|
topshed/m8tricks
|
51a55f0cdbb167252048101bc2aadf547a5730da
|
[
"MIT"
] | 10
|
2019-07-31T17:33:10.000Z
|
2022-01-08T09:07:01.000Z
|
setup.py
|
topshed/m8tricks
|
51a55f0cdbb167252048101bc2aadf547a5730da
|
[
"MIT"
] | 3
|
2019-08-18T19:25:20.000Z
|
2022-01-07T12:19:23.000Z
|
setup.py
|
topshed/m8tricks
|
51a55f0cdbb167252048101bc2aadf547a5730da
|
[
"MIT"
] | 1
|
2022-01-08T09:03:42.000Z
|
2022-01-08T09:03:42.000Z
|
"Setup script for the m8tricks package"
import sys
from setuptools import setup, find_packages
if not sys.version_info >= (3, 5):
raise RuntimeError('This application requires Python 3.5 or later')
def main():
"Executes setup when this script is the top-level"
import m8tricks as app
from pathlib import Path
with Path(__file__).with_name('project.rst').open() as project:
setup(
name=app.__project__,
version=app.__version__,
description=app.__doc__,
long_description=project.read(),
classifiers=app.__classifiers__,
author=app.__author__,
author_email=app.__author_email__,
url=app.__url__,
license=[
c.rsplit('::', 1)[1].strip()
for c in app.__classifiers__
if c.startswith('License ::')
][0],
keywords=app.__keywords__,
packages=find_packages(),
include_package_data=True,
install_requires=app.__requires__,
entry_points=app.__entry_points__,
)
if __name__ == '__main__':
main()
| 28.95
| 71
| 0.598446
|
import sys
from setuptools import setup, find_packages
if not sys.version_info >= (3, 5):
raise RuntimeError('This application requires Python 3.5 or later')
def main():
import m8tricks as app
from pathlib import Path
with Path(__file__).with_name('project.rst').open() as project:
setup(
name=app.__project__,
version=app.__version__,
description=app.__doc__,
long_description=project.read(),
classifiers=app.__classifiers__,
author=app.__author__,
author_email=app.__author_email__,
url=app.__url__,
license=[
c.rsplit('::', 1)[1].strip()
for c in app.__classifiers__
if c.startswith('License ::')
][0],
keywords=app.__keywords__,
packages=find_packages(),
include_package_data=True,
install_requires=app.__requires__,
entry_points=app.__entry_points__,
)
if __name__ == '__main__':
main()
| true
| true
|
1c4a805580426d4b5885f1258cc4efdfad25817c
| 2,870
|
py
|
Python
|
src/topology/general/nn_circuit.py
|
Dreamonic/shor-algorithm
|
19a4d95f0f19809cd3fe1db4d834ff3a02fba68d
|
[
"MIT"
] | null | null | null |
src/topology/general/nn_circuit.py
|
Dreamonic/shor-algorithm
|
19a4d95f0f19809cd3fe1db4d834ff3a02fba68d
|
[
"MIT"
] | null | null | null |
src/topology/general/nn_circuit.py
|
Dreamonic/shor-algorithm
|
19a4d95f0f19809cd3fe1db4d834ff3a02fba68d
|
[
"MIT"
] | null | null | null |
from math import ceil, sqrt
from projectq.ops import Swap
from src.topology.general.circuit import Circuit, LongDistanceAlgorithm, Statistics, Restrictions, Node
from src.topology.general.qubit import QubitHandler, QubitType
class GridSwap(LongDistanceAlgorithm):
def prepare(self, circuit, src, tgt, **kwargs):
path = self.find_shortest_path(circuit, src, tgt)
self.swap_path(circuit, path)
def teardown(self, circuit, src, tgt, **kwargs):
path = self.find_shortest_path(circuit, src, tgt)
self.swap_path(circuit, reversed(path))
def find_shortest_path(self, circuit, src, tgt):
q = [[src]]
while len(q) != 0:
path = q.pop(0)
cur = path[-1]
if cur == tgt:
return path
for n in circuit.graph[cur]:
new_path = list(path)
new_path.append(n)
q.append(new_path)
def swap_path(self, circuit, path):
prev = None
for i in path:
if prev is not None:
circuit.apply_two_qubit_gate(Swap, prev.name, i.name)
self.swap(prev, i)
prev = i
class GridCircuit(Circuit):
def __init__(self, engine, n, graph: dict = None, stats: Statistics = None, restrictions: Restrictions = None,
handlers: [QubitHandler] = None, ld_gate_algorithm=None):
super().__init__(graph=graph, stats=stats, restrictions=restrictions, handlers=handlers,
ld_gate_algorithm=ld_gate_algorithm)
self.create_grid(engine, ceil(sqrt(n)), ceil(n / sqrt(n)))
self.add_edges(ceil(sqrt(n)), ceil(n / sqrt(n)))
def create_grid(self, engine, x, y):
for idx in range(x):
for idy in range(y):
node = Node("log_" + str(idx) + "_" + str(idy), engine.allocate_qubit(), QubitType.LOGICAL,
restrictions=self.restrictions)
self.add_node(node)
def add_edges(self, x, y):
for idx in range(1, x - 1):
for idy in range(0, y):
self.add_edge(self.node("log_" + str(idx) + "_" + str(idy)),
self.node("log_" + str(idx - 1) + "_" + str(idy)))
self.add_edge(self.node("log_" + str(idx) + "_" + str(idy)),
self.node("log_" + str(idx + 1) + "_" + str(idy)))
for idx in range(0, x):
for idy in range(1, y - 1):
self.add_edge(self.node("log_" + str(idx) + "_" + str(idy)),
self.node("log_" + str(idx) + "_" + str(idy - 1)))
self.add_edge(self.node("log_" + str(idx) + "_" + str(idy)),
self.node("log_" + str(idx) + "_" + str(idy + 1)))
| 41
| 115
| 0.529965
|
from math import ceil, sqrt
from projectq.ops import Swap
from src.topology.general.circuit import Circuit, LongDistanceAlgorithm, Statistics, Restrictions, Node
from src.topology.general.qubit import QubitHandler, QubitType
class GridSwap(LongDistanceAlgorithm):
def prepare(self, circuit, src, tgt, **kwargs):
path = self.find_shortest_path(circuit, src, tgt)
self.swap_path(circuit, path)
def teardown(self, circuit, src, tgt, **kwargs):
path = self.find_shortest_path(circuit, src, tgt)
self.swap_path(circuit, reversed(path))
def find_shortest_path(self, circuit, src, tgt):
q = [[src]]
while len(q) != 0:
path = q.pop(0)
cur = path[-1]
if cur == tgt:
return path
for n in circuit.graph[cur]:
new_path = list(path)
new_path.append(n)
q.append(new_path)
def swap_path(self, circuit, path):
prev = None
for i in path:
if prev is not None:
circuit.apply_two_qubit_gate(Swap, prev.name, i.name)
self.swap(prev, i)
prev = i
class GridCircuit(Circuit):
def __init__(self, engine, n, graph: dict = None, stats: Statistics = None, restrictions: Restrictions = None,
handlers: [QubitHandler] = None, ld_gate_algorithm=None):
super().__init__(graph=graph, stats=stats, restrictions=restrictions, handlers=handlers,
ld_gate_algorithm=ld_gate_algorithm)
self.create_grid(engine, ceil(sqrt(n)), ceil(n / sqrt(n)))
self.add_edges(ceil(sqrt(n)), ceil(n / sqrt(n)))
def create_grid(self, engine, x, y):
for idx in range(x):
for idy in range(y):
node = Node("log_" + str(idx) + "_" + str(idy), engine.allocate_qubit(), QubitType.LOGICAL,
restrictions=self.restrictions)
self.add_node(node)
def add_edges(self, x, y):
for idx in range(1, x - 1):
for idy in range(0, y):
self.add_edge(self.node("log_" + str(idx) + "_" + str(idy)),
self.node("log_" + str(idx - 1) + "_" + str(idy)))
self.add_edge(self.node("log_" + str(idx) + "_" + str(idy)),
self.node("log_" + str(idx + 1) + "_" + str(idy)))
for idx in range(0, x):
for idy in range(1, y - 1):
self.add_edge(self.node("log_" + str(idx) + "_" + str(idy)),
self.node("log_" + str(idx) + "_" + str(idy - 1)))
self.add_edge(self.node("log_" + str(idx) + "_" + str(idy)),
self.node("log_" + str(idx) + "_" + str(idy + 1)))
| true
| true
|
1c4a80ada0266eea6ace4af79ce1968081a0a12c
| 5,578
|
py
|
Python
|
reconbf/modules/test_nova.py
|
fallenpegasus/reconbf
|
bfd15bef549f011a3de885c3267d4f718223b798
|
[
"Apache-2.0"
] | 45
|
2016-08-12T21:37:25.000Z
|
2022-03-29T00:21:29.000Z
|
reconbf/modules/test_nova.py
|
fallenpegasus/reconbf
|
bfd15bef549f011a3de885c3267d4f718223b798
|
[
"Apache-2.0"
] | 20
|
2016-08-11T07:42:28.000Z
|
2016-09-09T13:33:47.000Z
|
reconbf/modules/test_nova.py
|
fallenpegasus/reconbf
|
bfd15bef549f011a3de885c3267d4f718223b798
|
[
"Apache-2.0"
] | 6
|
2016-08-25T06:31:38.000Z
|
2019-09-11T04:29:36.000Z
|
# Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from reconbf.lib import test_class
from reconbf.lib.result import GroupTestResult
from reconbf.lib.result import Result
from reconbf.lib.result import TestResult
from reconbf.lib import utils
import grp
import os
import pwd
def _conf_location():
return {'dir': '/etc/nova'}
def _conf_details():
config = _conf_location().copy()
config['user'] = 'root'
config['group'] = 'root'
return config
@test_class.explanation("""
Protection name: Config permissions
Check: Are nova config permissions ok
Purpose: Nova config files contain authentication
details and need to be protected. Ensure that
they're only available to the service.
""")
@test_class.set_mapping("OpenStack:Check-Compute-01",
"OpenStack:Check-Compute-02")
@test_class.takes_config(_conf_details)
def config_permission(config):
try:
user = pwd.getpwnam(config['user'])
except KeyError:
return TestResult(Result.SKIP,
'Could not find user "%s"' % config['user'])
try:
group = grp.getgrnam(config['group'])
except KeyError:
return TestResult(Result.SKIP,
'Could not find group "%s"' % config['group'])
result = GroupTestResult()
files = ['nova.conf', 'api-paste.ini', 'policy.json', 'rootwrap.conf']
for f in files:
path = os.path.join(config['dir'], f)
result.add_result(path,
utils.validate_permissions(path, 0o640, user.pw_uid,
group.gr_gid))
return result
@test_class.explanation("""
Protection name: Authentication strategy
Check: Make sure proper authentication is used
Purpose: There are multiple authentication backends
available. Nova should be configured to authenticate
against keystone rather than test backends.
""")
@test_class.set_mapping("OpenStack:Check-Compute-03")
@test_class.takes_config(_conf_location)
def nova_auth(config):
try:
path = os.path.join(config['dir'], 'nova.conf')
nova_conf = utils.parse_openstack_ini(path)
except EnvironmentError:
return TestResult(Result.SKIP, 'cannot read nova config files')
auth = nova_conf.get('DEFAULT', {}).get('auth_strategy', 'keystone')
if auth != 'keystone':
return TestResult(Result.FAIL,
'authentication should be done by keystone')
else:
return TestResult(Result.PASS)
@test_class.explanation("""
Protection name: Keystone api access
Check: Does Keystone access use secure connection
Purpose: OpenStack components communicate with each other
using various protocols and the communication might
involve sensitive / confidential data. An attacker may
try to eavesdrop on the channel in order to get access to
sensitive information. Thus all the components must
communicate with each other using a secured communication
protocol.
""")
@test_class.set_mapping("OpenStack:Check-Compute-04")
@test_class.takes_config(_conf_location)
def keystone_secure(config):
try:
path = os.path.join(config['dir'], 'nova.conf')
nova_conf = utils.parse_openstack_ini(path)
except EnvironmentError:
return TestResult(Result.SKIP, 'cannot read nova config files')
protocol = nova_conf.get('keystone_authtoken', {}).get('auth_protocol',
'https')
identity = nova_conf.get('keystone_authtoken', {}).get('identity_uri',
'https:')
if not identity.startswith('https:'):
return TestResult(Result.FAIL, 'keystone access is not secure')
if protocol != 'https':
return TestResult(Result.FAIL, 'keystone access is not secure')
return TestResult(Result.PASS)
@test_class.explanation("""
Protection name: Glance api access
Check: Does Glance access use secure connection
Purpose: OpenStack components communicate with each other
using various protocols and the communication might
involve sensitive / confidential data. An attacker may
try to eavesdrop on the channel in order to get access to
sensitive information. Thus all the components must
communicate with each other using a secured communication
protocol.
""")
@test_class.set_mapping("OpenStack:Check-Compute-05")
@test_class.takes_config(_conf_location)
def glance_secure(config):
try:
path = os.path.join(config['dir'], 'nova.conf')
nova_conf = utils.parse_openstack_ini(path)
except EnvironmentError:
return TestResult(Result.SKIP, 'cannot read nova config files')
insecure = nova_conf.get('glance', {}).get(
'api_insecure', 'False').lower() == 'true'
if insecure:
return TestResult(Result.FAIL, 'glance access is not secure')
else:
return TestResult(Result.PASS)
| 34.645963
| 78
| 0.677483
|
from reconbf.lib import test_class
from reconbf.lib.result import GroupTestResult
from reconbf.lib.result import Result
from reconbf.lib.result import TestResult
from reconbf.lib import utils
import grp
import os
import pwd
def _conf_location():
return {'dir': '/etc/nova'}
def _conf_details():
config = _conf_location().copy()
config['user'] = 'root'
config['group'] = 'root'
return config
@test_class.explanation("""
Protection name: Config permissions
Check: Are nova config permissions ok
Purpose: Nova config files contain authentication
details and need to be protected. Ensure that
they're only available to the service.
""")
@test_class.set_mapping("OpenStack:Check-Compute-01",
"OpenStack:Check-Compute-02")
@test_class.takes_config(_conf_details)
def config_permission(config):
try:
user = pwd.getpwnam(config['user'])
except KeyError:
return TestResult(Result.SKIP,
'Could not find user "%s"' % config['user'])
try:
group = grp.getgrnam(config['group'])
except KeyError:
return TestResult(Result.SKIP,
'Could not find group "%s"' % config['group'])
result = GroupTestResult()
files = ['nova.conf', 'api-paste.ini', 'policy.json', 'rootwrap.conf']
for f in files:
path = os.path.join(config['dir'], f)
result.add_result(path,
utils.validate_permissions(path, 0o640, user.pw_uid,
group.gr_gid))
return result
@test_class.explanation("""
Protection name: Authentication strategy
Check: Make sure proper authentication is used
Purpose: There are multiple authentication backends
available. Nova should be configured to authenticate
against keystone rather than test backends.
""")
@test_class.set_mapping("OpenStack:Check-Compute-03")
@test_class.takes_config(_conf_location)
def nova_auth(config):
try:
path = os.path.join(config['dir'], 'nova.conf')
nova_conf = utils.parse_openstack_ini(path)
except EnvironmentError:
return TestResult(Result.SKIP, 'cannot read nova config files')
auth = nova_conf.get('DEFAULT', {}).get('auth_strategy', 'keystone')
if auth != 'keystone':
return TestResult(Result.FAIL,
'authentication should be done by keystone')
else:
return TestResult(Result.PASS)
@test_class.explanation("""
Protection name: Keystone api access
Check: Does Keystone access use secure connection
Purpose: OpenStack components communicate with each other
using various protocols and the communication might
involve sensitive / confidential data. An attacker may
try to eavesdrop on the channel in order to get access to
sensitive information. Thus all the components must
communicate with each other using a secured communication
protocol.
""")
@test_class.set_mapping("OpenStack:Check-Compute-04")
@test_class.takes_config(_conf_location)
def keystone_secure(config):
try:
path = os.path.join(config['dir'], 'nova.conf')
nova_conf = utils.parse_openstack_ini(path)
except EnvironmentError:
return TestResult(Result.SKIP, 'cannot read nova config files')
protocol = nova_conf.get('keystone_authtoken', {}).get('auth_protocol',
'https')
identity = nova_conf.get('keystone_authtoken', {}).get('identity_uri',
'https:')
if not identity.startswith('https:'):
return TestResult(Result.FAIL, 'keystone access is not secure')
if protocol != 'https':
return TestResult(Result.FAIL, 'keystone access is not secure')
return TestResult(Result.PASS)
@test_class.explanation("""
Protection name: Glance api access
Check: Does Glance access use secure connection
Purpose: OpenStack components communicate with each other
using various protocols and the communication might
involve sensitive / confidential data. An attacker may
try to eavesdrop on the channel in order to get access to
sensitive information. Thus all the components must
communicate with each other using a secured communication
protocol.
""")
@test_class.set_mapping("OpenStack:Check-Compute-05")
@test_class.takes_config(_conf_location)
def glance_secure(config):
try:
path = os.path.join(config['dir'], 'nova.conf')
nova_conf = utils.parse_openstack_ini(path)
except EnvironmentError:
return TestResult(Result.SKIP, 'cannot read nova config files')
insecure = nova_conf.get('glance', {}).get(
'api_insecure', 'False').lower() == 'true'
if insecure:
return TestResult(Result.FAIL, 'glance access is not secure')
else:
return TestResult(Result.PASS)
| true
| true
|
1c4a8104321bcc1796f75a2f5207296f38219830
| 674
|
py
|
Python
|
19th/sharing-taxi-fare/solution.py
|
WooJin1993/coding_test
|
ec9dc2dc768fe45700b4c0695b16535c0a824f6e
|
[
"MIT"
] | null | null | null |
19th/sharing-taxi-fare/solution.py
|
WooJin1993/coding_test
|
ec9dc2dc768fe45700b4c0695b16535c0a824f6e
|
[
"MIT"
] | null | null | null |
19th/sharing-taxi-fare/solution.py
|
WooJin1993/coding_test
|
ec9dc2dc768fe45700b4c0695b16535c0a824f6e
|
[
"MIT"
] | null | null | null |
# 문제: https://programmers.co.kr/learn/courses/30/lessons/72413
# --- 풀이 ---
# INF = sys.maxsize 하는 경우, 효율성 테스트 1개 실패
# 계산 시간은 INF 크기에도 영향을 받는다.
from itertools import product
def solution(n, s, a, b, fares):
result = []
INF = 1e12
graph = [[INF] * (n+1) for _ in range(n + 1)]
for i in range(1, n + 1):
graph[i][i] = 0
for c, d, f in fares:
graph[c][d] = graph[d][c] = f
for k, i, j in product(range(1, n + 1), repeat=3):
graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])
for mid in range(1, n + 1):
result.append(graph[s][mid] + graph[mid][a] + graph[mid][b])
return min(result)
| 25.923077
| 68
| 0.531157
|
from itertools import product
def solution(n, s, a, b, fares):
result = []
INF = 1e12
graph = [[INF] * (n+1) for _ in range(n + 1)]
for i in range(1, n + 1):
graph[i][i] = 0
for c, d, f in fares:
graph[c][d] = graph[d][c] = f
for k, i, j in product(range(1, n + 1), repeat=3):
graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])
for mid in range(1, n + 1):
result.append(graph[s][mid] + graph[mid][a] + graph[mid][b])
return min(result)
| true
| true
|
1c4a8168020be394142ebda10e6e5df8bb4b44fb
| 6,210
|
py
|
Python
|
storm_analysis/diagnostics/spliner_2d/configure.py
|
bintulab/storm-analysis
|
71ae493cbd17ddb97938d0ae2032d97a0eaa76b2
|
[
"CNRI-Python"
] | null | null | null |
storm_analysis/diagnostics/spliner_2d/configure.py
|
bintulab/storm-analysis
|
71ae493cbd17ddb97938d0ae2032d97a0eaa76b2
|
[
"CNRI-Python"
] | null | null | null |
storm_analysis/diagnostics/spliner_2d/configure.py
|
bintulab/storm-analysis
|
71ae493cbd17ddb97938d0ae2032d97a0eaa76b2
|
[
"CNRI-Python"
] | null | null | null |
#!/usr/bin/env python
"""
Configure folder for Spliner testing.
Hazen 09/17
"""
import argparse
import inspect
import numpy
import os
import storm_analysis
import storm_analysis.sa_library.parameters as parameters
import storm_analysis.sa_library.sa_h5py as saH5Py
import storm_analysis.simulator.background as background
import storm_analysis.simulator.camera as camera
import storm_analysis.simulator.emitters_on_grid as emittersOnGrid
import storm_analysis.simulator.emitters_uniform_random as emittersUniformRandom
import storm_analysis.simulator.photophysics as photophysics
import storm_analysis.simulator.psf as psf
import storm_analysis.simulator.simulate as simulate
import storm_analysis.spliner.measure_psf as measurePSF
import storm_analysis.spliner.psf_to_spline as psfToSpline
import storm_analysis.diagnostics.spliner_2d.settings as settings
def testingParameters(cal_file = None):
"""
Create a Spliner parameters object.
"""
params = parameters.ParametersSpliner()
params.setAttr("max_frame", "int", -1)
params.setAttr("start_frame", "int", -1)
params.setAttr("background_sigma", "float", 8.0)
if cal_file is not None:
params.setAttr("camera_calibration", "filename", cal_file)
else:
params.setAttr("camera_gain", "float", settings.camera_gain)
params.setAttr("camera_offset", "float", settings.camera_offset)
params.setAttr("find_max_radius", "int", 5)
params.setAttr("iterations", "int", settings.iterations)
params.setAttr("no_fitting", "int", 0)
params.setAttr("pixel_size", "float", settings.pixel_size)
params.setAttr("sigma", "float", 1.5)
params.setAttr("spline", "filename", "psf.spline")
params.setAttr("threshold", "float", 6.0)
# Don't do tracking.
params.setAttr("descriptor", "string", "1")
params.setAttr("radius", "float", "0.0")
# Don't do drift-correction.
params.setAttr("d_scale", "int", 2)
params.setAttr("drift_correction", "int", 0)
params.setAttr("frame_step", "int", 500)
params.setAttr("z_correction", "int", 0)
# 'peak_locations' testing.
if hasattr(settings, "peak_locations") and (settings.peak_locations is not None):
params.setAttr("peak_locations", "filename", settings.peak_locations)
return params
def configure(no_splines, cal_file = None):
# Create sCMOS calibration file if requested.
#
if cal_file is not None:
offset = numpy.zeros((settings.y_size, settings.x_size)) + settings.camera_offset
variance = numpy.ones((settings.y_size, settings.x_size))
gain = numpy.ones((settings.y_size, settings.x_size)) * settings.camera_gain
rqe = numpy.ones((settings.y_size, settings.x_size))
numpy.save(cal_file, [offset, variance, gain, rqe, 2])
# Create parameters file for analysis.
#
print("Creating XML file.")
params = testingParameters(cal_file = cal_file)
params.toXMLFile("spliner.xml")
# Create localization on a grid file.
#
print("Creating gridded localization.")
emittersOnGrid.emittersOnGrid("grid_list.hdf5",
settings.nx,
settings.ny,
1.5,
20,
0.0,
0.0)
# Create randomly located localizations file.
#
print("Creating random localization.")
emittersUniformRandom.emittersUniformRandom("random_list.hdf5",
1.0,
settings.margin,
settings.x_size,
settings.y_size,
0.0)
# Create sparser grid for PSF measurement.
#
print("Creating data for PSF measurement.")
emittersOnGrid.emittersOnGrid("sparse_list.hdf5",
6,
3,
1.5,
40,
0.0,
0.0)
if no_splines:
return
# Create beads.txt file for spline measurement.
#
with saH5Py.SAH5Py("sparse_list.hdf5") as h5:
locs = h5.getLocalizations()
numpy.savetxt("beads.txt", numpy.transpose(numpy.vstack((locs['x'], locs['y']))))
# Create simulated data for PSF measurement.
#
bg_f = lambda s, x, y, i3 : background.UniformBackground(s, x, y, i3, photons = 10)
cam_f = lambda s, x, y, i3 : camera.Ideal(s, x, y, i3, 100.)
pp_f = lambda s, x, y, i3 : photophysics.AlwaysOn(s, x, y, i3, 20000.0)
psf_f = lambda s, x, y, i3 : psf.GaussianPSF(s, x, y, i3, settings.pixel_size)
sim = simulate.Simulate(background_factory = bg_f,
camera_factory = cam_f,
photophysics_factory = pp_f,
psf_factory = psf_f,
dither = True,
x_size = settings.x_size,
y_size = settings.y_size)
sim.simulate("spline_2d.tif", "sparse_list.hdf5", 5)
# Measure the PSF.
#
print("Measuring PSF.")
psf_name = "psf.psf"
measurePSF.measurePSF("spline_2d.tif",
"na",
"sparse_list.hdf5",
psf_name,
want2d = True,
aoi_size = int(settings.spline_size + 1),
pixel_size = settings.pixel_size * 1.0e-3)
# Measure the Spline.
#
if True:
print("Measuring Spline.")
psfToSpline.psfToSpline(psf_name, "psf.spline", settings.spline_size)
if (__name__ == "__main__"):
parser = argparse.ArgumentParser(description = 'Spline diagnostics configuration.')
parser.add_argument('--no-splines', dest='no_splines', action='store_true', default = False)
args = parser.parse_args()
configure(args.no_splines)
| 35.689655
| 96
| 0.585829
|
import argparse
import inspect
import numpy
import os
import storm_analysis
import storm_analysis.sa_library.parameters as parameters
import storm_analysis.sa_library.sa_h5py as saH5Py
import storm_analysis.simulator.background as background
import storm_analysis.simulator.camera as camera
import storm_analysis.simulator.emitters_on_grid as emittersOnGrid
import storm_analysis.simulator.emitters_uniform_random as emittersUniformRandom
import storm_analysis.simulator.photophysics as photophysics
import storm_analysis.simulator.psf as psf
import storm_analysis.simulator.simulate as simulate
import storm_analysis.spliner.measure_psf as measurePSF
import storm_analysis.spliner.psf_to_spline as psfToSpline
import storm_analysis.diagnostics.spliner_2d.settings as settings
def testingParameters(cal_file = None):
params = parameters.ParametersSpliner()
params.setAttr("max_frame", "int", -1)
params.setAttr("start_frame", "int", -1)
params.setAttr("background_sigma", "float", 8.0)
if cal_file is not None:
params.setAttr("camera_calibration", "filename", cal_file)
else:
params.setAttr("camera_gain", "float", settings.camera_gain)
params.setAttr("camera_offset", "float", settings.camera_offset)
params.setAttr("find_max_radius", "int", 5)
params.setAttr("iterations", "int", settings.iterations)
params.setAttr("no_fitting", "int", 0)
params.setAttr("pixel_size", "float", settings.pixel_size)
params.setAttr("sigma", "float", 1.5)
params.setAttr("spline", "filename", "psf.spline")
params.setAttr("threshold", "float", 6.0)
params.setAttr("descriptor", "string", "1")
params.setAttr("radius", "float", "0.0")
# Don't do drift-correction.
params.setAttr("d_scale", "int", 2)
params.setAttr("drift_correction", "int", 0)
params.setAttr("frame_step", "int", 500)
params.setAttr("z_correction", "int", 0)
if hasattr(settings, "peak_locations") and (settings.peak_locations is not None):
params.setAttr("peak_locations", "filename", settings.peak_locations)
return params
def configure(no_splines, cal_file = None):
if cal_file is not None:
offset = numpy.zeros((settings.y_size, settings.x_size)) + settings.camera_offset
variance = numpy.ones((settings.y_size, settings.x_size))
gain = numpy.ones((settings.y_size, settings.x_size)) * settings.camera_gain
rqe = numpy.ones((settings.y_size, settings.x_size))
numpy.save(cal_file, [offset, variance, gain, rqe, 2])
print("Creating XML file.")
params = testingParameters(cal_file = cal_file)
params.toXMLFile("spliner.xml")
print("Creating gridded localization.")
emittersOnGrid.emittersOnGrid("grid_list.hdf5",
settings.nx,
settings.ny,
1.5,
20,
0.0,
0.0)
print("Creating random localization.")
emittersUniformRandom.emittersUniformRandom("random_list.hdf5",
1.0,
settings.margin,
settings.x_size,
settings.y_size,
0.0)
print("Creating data for PSF measurement.")
emittersOnGrid.emittersOnGrid("sparse_list.hdf5",
6,
3,
1.5,
40,
0.0,
0.0)
if no_splines:
return
with saH5Py.SAH5Py("sparse_list.hdf5") as h5:
locs = h5.getLocalizations()
numpy.savetxt("beads.txt", numpy.transpose(numpy.vstack((locs['x'], locs['y']))))
bg_f = lambda s, x, y, i3 : background.UniformBackground(s, x, y, i3, photons = 10)
cam_f = lambda s, x, y, i3 : camera.Ideal(s, x, y, i3, 100.)
pp_f = lambda s, x, y, i3 : photophysics.AlwaysOn(s, x, y, i3, 20000.0)
psf_f = lambda s, x, y, i3 : psf.GaussianPSF(s, x, y, i3, settings.pixel_size)
sim = simulate.Simulate(background_factory = bg_f,
camera_factory = cam_f,
photophysics_factory = pp_f,
psf_factory = psf_f,
dither = True,
x_size = settings.x_size,
y_size = settings.y_size)
sim.simulate("spline_2d.tif", "sparse_list.hdf5", 5)
print("Measuring PSF.")
psf_name = "psf.psf"
measurePSF.measurePSF("spline_2d.tif",
"na",
"sparse_list.hdf5",
psf_name,
want2d = True,
aoi_size = int(settings.spline_size + 1),
pixel_size = settings.pixel_size * 1.0e-3)
if True:
print("Measuring Spline.")
psfToSpline.psfToSpline(psf_name, "psf.spline", settings.spline_size)
if (__name__ == "__main__"):
parser = argparse.ArgumentParser(description = 'Spline diagnostics configuration.')
parser.add_argument('--no-splines', dest='no_splines', action='store_true', default = False)
args = parser.parse_args()
configure(args.no_splines)
| true
| true
|
1c4a82c3e3b8ab680786c7cb5945c0ac48353bc2
| 5,065
|
py
|
Python
|
GUIMonkey/Steps.py
|
Setyadjih/GUIMonkey
|
b1299663ee84de688663c08040d3411c78fb7fe1
|
[
"MIT"
] | null | null | null |
GUIMonkey/Steps.py
|
Setyadjih/GUIMonkey
|
b1299663ee84de688663c08040d3411c78fb7fe1
|
[
"MIT"
] | 7
|
2021-04-27T03:39:32.000Z
|
2021-09-03T05:31:38.000Z
|
GUIMonkey/Steps.py
|
Setyadjih/GUIMonkey
|
b1299663ee84de688663c08040d3411c78fb7fe1
|
[
"MIT"
] | null | null | null |
import time
from abc import ABC, abstractmethod
import pyautogui
from lib.logger import get_logger
class StepBase(ABC):
"""Base class for steps. We mainly want the execute interface"""
@abstractmethod
def __init__(self, step_name: str = None, logger=None):
self.name = step_name if step_name else self.__class__.__name__
self.data = dict()
self.flags = {
# Require Flags
"require": False,
"require_key": None,
# output Flags
"output": False,
"output_key": None,
}
self.logger = logger if logger else get_logger(self.name)
@abstractmethod
def execute(self):
self.logger.debug(f"Executing {self.name}...")
# TODO: This system seems fragile. How should in and out data be handled?
def require_data(self, require_bool=False, require_key=None):
self.flags["require"] = require_bool
self.flags["require_key"] = require_key
self.data[require_key] = None
def output_data(self, pass_bool=False, output_key=None):
self.flags["output"] = pass_bool
self.flags["output_key"] = output_key
self.data[output_key] = None
class KeyPress(StepBase):
def __init__(self, key, mod=None, step_name=None, logger=None):
super(KeyPress, self).__init__(step_name, logger)
self.key = key
self.mod = mod
def execute(self):
super(KeyPress, self).execute()
if self.mod:
pyautogui.keyDown(self.mod)
pyautogui.press(self.key)
pyautogui.keyUp(self.mod)
else:
pyautogui.press(self.key)
class WaitForImage(StepBase):
def __init__(self, image, timeout=30, step_name=None, logger=None):
super(WaitForImage, self).__init__(step_name, logger)
self.timeout = timeout
self.require_data(True, "image")
self.output_data(True, "image_loc")
self.data["image"] = image
def execute(self):
super(WaitForImage, self).execute()
start = time.time()
current = time.time()
while current - start < self.timeout:
time.sleep(3)
image = self.data["image"]
image_loc = pyautogui.locateCenterOnScreen(image, confidence=0.9)
if image_loc:
self.logger.debug("Found!")
self.data["image_loc"] = image_loc
return
else:
current = time.time()
self.logger.debug(f"Did not find image...{int(current - start)}/{self.timeout}")
self.logger.warning("Search timed out, returning...")
return
class Delay(StepBase):
def __init__(self, delay=0.5, step_name=None, logger=None):
super(Delay, self).__init__(step_name, logger)
self.delay = delay
def execute(self):
super(Delay, self).execute()
time.sleep(self.delay)
class MoveToButton(StepBase):
def __init__(self, button, step_name=None, logger=None):
super(MoveToButton, self).__init__(step_name, logger)
self.button = button
def execute(self):
super(MoveToButton, self).execute()
button_loc = pyautogui.locateCenterOnScreen(self.button, confidence=0.9)
pyautogui.moveTo(button_loc[0], button_loc[1])
class ClickOnButton(StepBase):
def __init__(self, button, click_num=1, step_name=None, logger=None):
super(ClickOnButton, self).__init__(step_name, logger)
self.require_data(True, "image_loc")
self.button = button
self.click_num = click_num
def execute(self):
super(ClickOnButton, self).execute()
button = pyautogui.locateCenterOnScreen(self.button, confidence=0.9)
for i in range(self.click_num):
pyautogui.click(button[0], button[1])
class Write(StepBase):
def __init__(self, text, enter=False, step_name=None, logger=None):
super(Write, self).__init__(step_name, logger)
self.text = text
self.enter = enter
def execute(self):
super(Write, self).execute()
pyautogui.write(self.text)
if self.enter:
pyautogui.press("enter")
class WaitForLoading(StepBase):
def __init__(self, loading_image, trigger_max=3, step_name=None, logger=None):
super(WaitForLoading, self).__init__(step_name, logger)
self.loading_image = loading_image
self.trigger_max = trigger_max
def execute(self):
super(WaitForLoading, self).execute()
trigger = 0
while trigger < self.trigger_max:
load1 = pyautogui.locateCenterOnScreen("resources/CLO_loading.png", confidence=0.9)
if load1:
self.logger.debug("found loading, waiting...")
trigger = 0
pyautogui.moveTo(load1)
time.sleep(3)
else:
trigger += 1
time.sleep(1)
self.logger.debug(f"Did not find loading, triggering ({trigger} / " f"{self.trigger_max})")
| 31.855346
| 107
| 0.615597
|
import time
from abc import ABC, abstractmethod
import pyautogui
from lib.logger import get_logger
class StepBase(ABC):
@abstractmethod
def __init__(self, step_name: str = None, logger=None):
self.name = step_name if step_name else self.__class__.__name__
self.data = dict()
self.flags = {
"require": False,
"require_key": None,
"output": False,
"output_key": None,
}
self.logger = logger if logger else get_logger(self.name)
@abstractmethod
def execute(self):
self.logger.debug(f"Executing {self.name}...")
def require_data(self, require_bool=False, require_key=None):
self.flags["require"] = require_bool
self.flags["require_key"] = require_key
self.data[require_key] = None
def output_data(self, pass_bool=False, output_key=None):
self.flags["output"] = pass_bool
self.flags["output_key"] = output_key
self.data[output_key] = None
class KeyPress(StepBase):
def __init__(self, key, mod=None, step_name=None, logger=None):
super(KeyPress, self).__init__(step_name, logger)
self.key = key
self.mod = mod
def execute(self):
super(KeyPress, self).execute()
if self.mod:
pyautogui.keyDown(self.mod)
pyautogui.press(self.key)
pyautogui.keyUp(self.mod)
else:
pyautogui.press(self.key)
class WaitForImage(StepBase):
def __init__(self, image, timeout=30, step_name=None, logger=None):
super(WaitForImage, self).__init__(step_name, logger)
self.timeout = timeout
self.require_data(True, "image")
self.output_data(True, "image_loc")
self.data["image"] = image
def execute(self):
super(WaitForImage, self).execute()
start = time.time()
current = time.time()
while current - start < self.timeout:
time.sleep(3)
image = self.data["image"]
image_loc = pyautogui.locateCenterOnScreen(image, confidence=0.9)
if image_loc:
self.logger.debug("Found!")
self.data["image_loc"] = image_loc
return
else:
current = time.time()
self.logger.debug(f"Did not find image...{int(current - start)}/{self.timeout}")
self.logger.warning("Search timed out, returning...")
return
class Delay(StepBase):
def __init__(self, delay=0.5, step_name=None, logger=None):
super(Delay, self).__init__(step_name, logger)
self.delay = delay
def execute(self):
super(Delay, self).execute()
time.sleep(self.delay)
class MoveToButton(StepBase):
def __init__(self, button, step_name=None, logger=None):
super(MoveToButton, self).__init__(step_name, logger)
self.button = button
def execute(self):
super(MoveToButton, self).execute()
button_loc = pyautogui.locateCenterOnScreen(self.button, confidence=0.9)
pyautogui.moveTo(button_loc[0], button_loc[1])
class ClickOnButton(StepBase):
def __init__(self, button, click_num=1, step_name=None, logger=None):
super(ClickOnButton, self).__init__(step_name, logger)
self.require_data(True, "image_loc")
self.button = button
self.click_num = click_num
def execute(self):
super(ClickOnButton, self).execute()
button = pyautogui.locateCenterOnScreen(self.button, confidence=0.9)
for i in range(self.click_num):
pyautogui.click(button[0], button[1])
class Write(StepBase):
def __init__(self, text, enter=False, step_name=None, logger=None):
super(Write, self).__init__(step_name, logger)
self.text = text
self.enter = enter
def execute(self):
super(Write, self).execute()
pyautogui.write(self.text)
if self.enter:
pyautogui.press("enter")
class WaitForLoading(StepBase):
def __init__(self, loading_image, trigger_max=3, step_name=None, logger=None):
super(WaitForLoading, self).__init__(step_name, logger)
self.loading_image = loading_image
self.trigger_max = trigger_max
def execute(self):
super(WaitForLoading, self).execute()
trigger = 0
while trigger < self.trigger_max:
load1 = pyautogui.locateCenterOnScreen("resources/CLO_loading.png", confidence=0.9)
if load1:
self.logger.debug("found loading, waiting...")
trigger = 0
pyautogui.moveTo(load1)
time.sleep(3)
else:
trigger += 1
time.sleep(1)
self.logger.debug(f"Did not find loading, triggering ({trigger} / " f"{self.trigger_max})")
| true
| true
|
1c4a847fa05d25e7f265a374d27586576fb10671
| 255
|
py
|
Python
|
aux/engine/actor/__init__.py
|
bischjer/auxiliary
|
e42d8a4af43c9bd4d816c03edc2465640635b46b
|
[
"BSD-3-Clause"
] | null | null | null |
aux/engine/actor/__init__.py
|
bischjer/auxiliary
|
e42d8a4af43c9bd4d816c03edc2465640635b46b
|
[
"BSD-3-Clause"
] | null | null | null |
aux/engine/actor/__init__.py
|
bischjer/auxiliary
|
e42d8a4af43c9bd4d816c03edc2465640635b46b
|
[
"BSD-3-Clause"
] | null | null | null |
from aux.engine.actor.reactor import Reactor
from aux.engine.actor.proactor import Proactor
from aux.engine.actor.coactor import Coactor
__all__ = ['Reactor',
'Proactor',
'Coactor']
class NoActorFoundError(Exception):pass
| 23.181818
| 46
| 0.705882
|
from aux.engine.actor.reactor import Reactor
from aux.engine.actor.proactor import Proactor
from aux.engine.actor.coactor import Coactor
__all__ = ['Reactor',
'Proactor',
'Coactor']
class NoActorFoundError(Exception):pass
| true
| true
|
1c4a849524c22465abea1613bfe918c2849cddd4
| 233
|
py
|
Python
|
nni/retiarii/trainer/pytorch/__init__.py
|
qfyin/nni
|
59a1ccf8eba68b94974e84fc3834f38d851faf89
|
[
"MIT"
] | 3
|
2021-02-23T14:01:43.000Z
|
2021-03-29T16:19:32.000Z
|
nni/retiarii/trainer/pytorch/__init__.py
|
qfyin/nni
|
59a1ccf8eba68b94974e84fc3834f38d851faf89
|
[
"MIT"
] | 1
|
2021-01-17T08:53:56.000Z
|
2021-01-17T08:53:56.000Z
|
nni/retiarii/trainer/pytorch/__init__.py
|
qfyin/nni
|
59a1ccf8eba68b94974e84fc3834f38d851faf89
|
[
"MIT"
] | 1
|
2020-12-21T11:15:54.000Z
|
2020-12-21T11:15:54.000Z
|
from .base import PyTorchImageClassificationTrainer, PyTorchMultiModelTrainer
from .darts import DartsTrainer
from .enas import EnasTrainer
from .proxyless import ProxylessTrainer
from .random import RandomTrainer, SinglePathTrainer
| 38.833333
| 77
| 0.875536
|
from .base import PyTorchImageClassificationTrainer, PyTorchMultiModelTrainer
from .darts import DartsTrainer
from .enas import EnasTrainer
from .proxyless import ProxylessTrainer
from .random import RandomTrainer, SinglePathTrainer
| true
| true
|
1c4a84b409b74bb514cb2f6f68ddd6f1e2431fe6
| 5,524
|
py
|
Python
|
build/lib/mpesa/api/mpesa_express.py
|
Arlus/python-mpesa
|
4113d9a59211c05f4c6881965710c4f67a5157b2
|
[
"MIT"
] | 26
|
2018-08-02T06:59:50.000Z
|
2022-01-15T16:46:39.000Z
|
build/lib/mpesa/api/mpesa_express.py
|
Arlus/python-mpesa
|
4113d9a59211c05f4c6881965710c4f67a5157b2
|
[
"MIT"
] | 1
|
2019-08-17T08:52:23.000Z
|
2019-08-17T08:52:23.000Z
|
build/lib/mpesa/api/mpesa_express.py
|
Arlus/python-mpesa
|
4113d9a59211c05f4c6881965710c4f67a5157b2
|
[
"MIT"
] | 38
|
2018-07-26T18:11:18.000Z
|
2022-01-02T10:10:54.000Z
|
import base64
import requests
from .auth import MpesaBase
import datetime
class MpesaExpress(MpesaBase):
def __init__(self, env="sandbox", app_key=None, app_secret=None, sandbox_url=None, live_url=None):
MpesaBase.__init__(self, env, app_key, app_secret, sandbox_url, live_url)
self.authentication_token = self.authenticate()
def stk_push(self, business_shortcode=None, passcode=None, amount=None, callback_url=None, reference_code=None,
phone_number=None, description=None):
"""This method uses Mpesa's Express API to initiate online payment on behalf of a customer..
**Args:**
- business_shortcode (int): The short code of the organization.
- passcode (str): Get from developer portal
- amount (int): The amount being transacted
- callback_url (str): A CallBack URL is a valid secure URL that is used to receive notifications from M-Pesa API.
- reference_code: Account Reference: This is an Alpha-Numeric parameter that is defined by your system as an Identifier of the transaction for CustomerPayBillOnline transaction type.
- phone_number: The Mobile Number to receive the STK Pin Prompt.
- description: This is any additional information/comment that can be sent along with the request from your system. MAX 13 characters
**Returns:**
- CustomerMessage (str):
- CheckoutRequestID (str):
- ResponseDescription (str):
- MerchantRequestID (str):
- ResponseCode (str):
"""
time = str(datetime.datetime.now()).split(".")[0].replace("-", "").replace(" ", "").replace(":", "")
password = "{0}{1}{2}".format(str(business_shortcode), str(passcode), time)
encoded = base64.b64encode(password)
payload = {
"BusinessShortCode": business_shortcode,
"Password": encoded,
"Timestamp": time,
"TransactionType": "CustomerPayBillOnline",
"Amount": amount,
"PartyA": int(phone_number),
"PartyB": business_shortcode,
"PhoneNumber": int(phone_number),
"CallBackURL": callback_url,
"AccountReference": reference_code,
"TransactionDesc": description
}
headers = {'Authorization': 'Bearer {0}'.format(self.authentication_token), 'Content-Type': "application/json"}
if self.env == "production":
base_safaricom_url = self.live_url
else:
base_safaricom_url = self.sandbox_url
saf_url = "{0}{1}".format(base_safaricom_url, "/mpesa/stkpush/v1/processrequest")
r = requests.post(saf_url, headers=headers, json=payload)
return r.json()
def query(self, business_shortcode=None, checkout_request_id=None, passcode=None):
"""This method uses Mpesa's Express API to check the status of a Lipa Na M-Pesa Online Payment..
**Args:**
- business_shortcode (int): This is organizations shortcode (Paybill or Buygoods - A 5 to 6 digit account number) used to identify an organization and receive the transaction.
- checkout_request_id (str): This is a global unique identifier of the processed checkout transaction request.
- passcode (str): Get from developer portal
**Returns:**
- CustomerMessage (str):
- CheckoutRequestID (str):
- ResponseDescription (str):
- MerchantRequestID (str):
- ResponseCode (str):
"""
time = str(datetime.datetime.now()).split(".")[0].replace("-", "").replace(" ", "").replace(":", "")
password = "{0}{1}{2}".format(str(business_shortcode), str(passcode), time)
encoded = base64.b64encode(password)
payload = {
"BusinessShortCode": business_shortcode,
"Password": encoded,
"Timestamp": time,
"CheckoutRequestID": checkout_request_id
}
headers = {'Authorization': 'Bearer {0}'.format(self.authentication_token), 'Content-Type': "application/json"}
if self.env == "production":
base_safaricom_url = self.live_url
else:
base_safaricom_url = self.sandbox_url
saf_url = "{0}{1}".format(base_safaricom_url, "/mpesa/stkpushquery/v1/query")
r = requests.post(saf_url, headers=headers, json=payload)
return r.json()
| 57.541667
| 238
| 0.509051
|
import base64
import requests
from .auth import MpesaBase
import datetime
class MpesaExpress(MpesaBase):
def __init__(self, env="sandbox", app_key=None, app_secret=None, sandbox_url=None, live_url=None):
MpesaBase.__init__(self, env, app_key, app_secret, sandbox_url, live_url)
self.authentication_token = self.authenticate()
def stk_push(self, business_shortcode=None, passcode=None, amount=None, callback_url=None, reference_code=None,
phone_number=None, description=None):
time = str(datetime.datetime.now()).split(".")[0].replace("-", "").replace(" ", "").replace(":", "")
password = "{0}{1}{2}".format(str(business_shortcode), str(passcode), time)
encoded = base64.b64encode(password)
payload = {
"BusinessShortCode": business_shortcode,
"Password": encoded,
"Timestamp": time,
"TransactionType": "CustomerPayBillOnline",
"Amount": amount,
"PartyA": int(phone_number),
"PartyB": business_shortcode,
"PhoneNumber": int(phone_number),
"CallBackURL": callback_url,
"AccountReference": reference_code,
"TransactionDesc": description
}
headers = {'Authorization': 'Bearer {0}'.format(self.authentication_token), 'Content-Type': "application/json"}
if self.env == "production":
base_safaricom_url = self.live_url
else:
base_safaricom_url = self.sandbox_url
saf_url = "{0}{1}".format(base_safaricom_url, "/mpesa/stkpush/v1/processrequest")
r = requests.post(saf_url, headers=headers, json=payload)
return r.json()
def query(self, business_shortcode=None, checkout_request_id=None, passcode=None):
time = str(datetime.datetime.now()).split(".")[0].replace("-", "").replace(" ", "").replace(":", "")
password = "{0}{1}{2}".format(str(business_shortcode), str(passcode), time)
encoded = base64.b64encode(password)
payload = {
"BusinessShortCode": business_shortcode,
"Password": encoded,
"Timestamp": time,
"CheckoutRequestID": checkout_request_id
}
headers = {'Authorization': 'Bearer {0}'.format(self.authentication_token), 'Content-Type': "application/json"}
if self.env == "production":
base_safaricom_url = self.live_url
else:
base_safaricom_url = self.sandbox_url
saf_url = "{0}{1}".format(base_safaricom_url, "/mpesa/stkpushquery/v1/query")
r = requests.post(saf_url, headers=headers, json=payload)
return r.json()
| true
| true
|
1c4a8557b221c053d9b0aaf2d4788aa69102a5c8
| 1,269
|
py
|
Python
|
yt_dlp/extractor/vidlox.py
|
mkg20001/yt-dlp
|
9518a0a6bed040844d5fd6e29c25328e5949ce94
|
[
"Unlicense"
] | 1
|
2021-08-24T17:03:03.000Z
|
2021-08-24T17:03:03.000Z
|
yt_dlp/extractor/vidlox.py
|
mkg20001/yt-dlp
|
9518a0a6bed040844d5fd6e29c25328e5949ce94
|
[
"Unlicense"
] | null | null | null |
yt_dlp/extractor/vidlox.py
|
mkg20001/yt-dlp
|
9518a0a6bed040844d5fd6e29c25328e5949ce94
|
[
"Unlicense"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class VIDLOXIE(InfoExtractor):
IE_NAME = 'vidlox'
_VALID_URL = r'https?://vidlox\.me/(embed-)?(?P<id>[a-z0-9]+).html'
_TEST = {
'url': 'https://vidlox.me/6wq8gciafziz.html',
'info_dict': {
'id': '6wq8gciafziz',
'title': 'md5:74c82229b059846a82628e60dcc661b5',
'ext': 'mp4',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'https://vidlox.me/%s.html' % video_id, video_id)
m3u8 = self._search_regex(
r'(https.+m3u8)',
webpage, 'm3u8')
title = self._search_regex(
r'<title>Watch (?P<title>.+)<\/title>',
webpage, 'title', group='title')
thumbnail = self._search_regex(
r'spriteSheetUrl = "(?P<thumbnail>https.+)"',
webpage, 'thumbnail', group='thumbnail')
formats = self._extract_m3u8_formats(m3u8, video_id, ext='mp4')
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
}
| 27.586957
| 71
| 0.547675
|
from __future__ import unicode_literals
from .common import InfoExtractor
class VIDLOXIE(InfoExtractor):
IE_NAME = 'vidlox'
_VALID_URL = r'https?://vidlox\.me/(embed-)?(?P<id>[a-z0-9]+).html'
_TEST = {
'url': 'https://vidlox.me/6wq8gciafziz.html',
'info_dict': {
'id': '6wq8gciafziz',
'title': 'md5:74c82229b059846a82628e60dcc661b5',
'ext': 'mp4',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'https://vidlox.me/%s.html' % video_id, video_id)
m3u8 = self._search_regex(
r'(https.+m3u8)',
webpage, 'm3u8')
title = self._search_regex(
r'<title>Watch (?P<title>.+)<\/title>',
webpage, 'title', group='title')
thumbnail = self._search_regex(
r'spriteSheetUrl = "(?P<thumbnail>https.+)"',
webpage, 'thumbnail', group='thumbnail')
formats = self._extract_m3u8_formats(m3u8, video_id, ext='mp4')
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
}
| true
| true
|
1c4a8748112916fe5593eae671d07b4837e59ab3
| 6,115
|
py
|
Python
|
configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x.py
|
Lechatelia/own_mmdet
|
eac5db1d1bee8eafe0ed46fa4bb61ca8605b502f
|
[
"Apache-2.0"
] | 24
|
2021-10-14T03:28:28.000Z
|
2022-03-29T09:30:04.000Z
|
configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x.py
|
Lechatelia/own_mmdet
|
eac5db1d1bee8eafe0ed46fa4bb61ca8605b502f
|
[
"Apache-2.0"
] | 4
|
2021-12-14T15:04:49.000Z
|
2022-02-19T09:54:42.000Z
|
configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x.py
|
Lechatelia/own_mmdet
|
eac5db1d1bee8eafe0ed46fa4bb61ca8605b502f
|
[
"Apache-2.0"
] | 4
|
2021-10-31T11:23:06.000Z
|
2021-12-17T06:38:50.000Z
|
# model settings
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet101',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
dict(
type='BFP',
in_channels=256,
num_levels=5,
refine_level=2,
refine_type='non_local')
],
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(
type='BalancedL1Loss',
alpha=0.5,
gamma=1.5,
beta=1.0,
loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=5,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='CombinedSampler',
num=512,
pos_fraction=0.25,
add_gt_as_proposals=True,
pos_sampler=dict(type='InstanceBalancedPosSampler'),
neg_sampler=dict(
type='IoUBalancedNegSampler',
floor_thr=-1,
floor_fraction=0,
num_bins=3)),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/libra_faster_rcnn_r101_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 31.358974
| 79
| 0.559935
|
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet101',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
dict(
type='BFP',
in_channels=256,
num_levels=5,
refine_level=2,
refine_type='non_local')
],
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(
type='BalancedL1Loss',
alpha=0.5,
gamma=1.5,
beta=1.0,
loss_weight=1.0)))
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=5,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='CombinedSampler',
num=512,
pos_fraction=0.25,
add_gt_as_proposals=True,
pos_sampler=dict(type='InstanceBalancedPosSampler'),
neg_sampler=dict(
type='IoUBalancedNegSampler',
floor_thr=-1,
floor_fraction=0,
num_bins=3)),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
)
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/libra_faster_rcnn_r101_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| true
| true
|
1c4a87f8e649f8a5932c3165dddf9b3f0aeca3cc
| 880
|
py
|
Python
|
mi/dataset/driver/ctdmo_ghqr/sio/test/test_ctdmo_ghqr_sio_telemetered_driver.py
|
cdobs/mi-instrument
|
99f9322a4afabc5dff9b0fad12166075efce838c
|
[
"BSD-2-Clause"
] | 1
|
2018-09-14T23:28:29.000Z
|
2018-09-14T23:28:29.000Z
|
mi/dataset/driver/ctdmo_ghqr/sio/test/test_ctdmo_ghqr_sio_telemetered_driver.py
|
cdobs/mi-instrument
|
99f9322a4afabc5dff9b0fad12166075efce838c
|
[
"BSD-2-Clause"
] | 33
|
2017-04-25T19:53:45.000Z
|
2022-03-18T17:42:18.000Z
|
mi/dataset/driver/ctdmo_ghqr/sio/test/test_ctdmo_ghqr_sio_telemetered_driver.py
|
cdobs/mi-instrument
|
99f9322a4afabc5dff9b0fad12166075efce838c
|
[
"BSD-2-Clause"
] | 31
|
2015-03-04T01:01:09.000Z
|
2020-10-28T14:42:12.000Z
|
import os
import unittest
from mi.core.log import get_logger
from mi.dataset.dataset_driver import ParticleDataHandler
from mi.dataset.driver.ctdmo_ghqr.sio.ctdmo_ghqr_sio_telemetered_driver import parse
from mi.dataset.driver.ctdmo_ghqr.sio.resource import RESOURCE_PATH
__author__ = 'mworden'
log = get_logger()
class SampleTest(unittest.TestCase):
def test_one(self):
source_file_path = os.path.join(RESOURCE_PATH, 'node59p1_3.ctdmo.dat')
particle_data_handler = ParticleDataHandler()
particle_data_handler = parse(None, source_file_path, particle_data_handler)
log.debug("SAMPLES: %s", particle_data_handler._samples)
log.debug("FAILURE: %s", particle_data_handler._failure)
self.assertEquals(particle_data_handler._failure, False)
if __name__ == '__main__':
test = SampleTest('test_one')
test.test_one()
| 28.387097
| 84
| 0.761364
|
import os
import unittest
from mi.core.log import get_logger
from mi.dataset.dataset_driver import ParticleDataHandler
from mi.dataset.driver.ctdmo_ghqr.sio.ctdmo_ghqr_sio_telemetered_driver import parse
from mi.dataset.driver.ctdmo_ghqr.sio.resource import RESOURCE_PATH
__author__ = 'mworden'
log = get_logger()
class SampleTest(unittest.TestCase):
def test_one(self):
source_file_path = os.path.join(RESOURCE_PATH, 'node59p1_3.ctdmo.dat')
particle_data_handler = ParticleDataHandler()
particle_data_handler = parse(None, source_file_path, particle_data_handler)
log.debug("SAMPLES: %s", particle_data_handler._samples)
log.debug("FAILURE: %s", particle_data_handler._failure)
self.assertEquals(particle_data_handler._failure, False)
if __name__ == '__main__':
test = SampleTest('test_one')
test.test_one()
| true
| true
|
1c4a898f084e68f38241c7b658081be92d7a651f
| 949
|
py
|
Python
|
examples/01/b.py
|
cjrh/aiosmartsock
|
a4ab5ffe5b673ada2a3002d7a9cb68ee1ea4a48f
|
[
"Apache-2.0"
] | 9
|
2019-03-25T23:25:08.000Z
|
2022-01-17T00:49:26.000Z
|
examples/01/b.py
|
cjrh/aiomsg
|
74b646675e3d7296f0334d3e17c1be0370c5d852
|
[
"Apache-2.0"
] | 33
|
2019-04-13T02:31:07.000Z
|
2022-03-21T19:12:14.000Z
|
examples/01/b.py
|
cjrh/aiosmartsock
|
a4ab5ffe5b673ada2a3002d7a9cb68ee1ea4a48f
|
[
"Apache-2.0"
] | 1
|
2021-04-26T09:07:36.000Z
|
2021-04-26T09:07:36.000Z
|
import logging
import asyncio
import aiomsg
import random
from colorama import init, Fore, Style
init()
logging.basicConfig(level="DEBUG")
async def main():
s = aiomsg.Søcket()
await s.connect()
async def r():
while True:
print("waiting for response...")
msg = await s.recv_string()
print(Fore.GREEN + f"Got back {msg}" + Style.RESET_ALL)
# assert msg == 'CALEB'
t = loop.create_task(r())
try:
while True:
print("sending...")
await s.send_string(Fore.BLUE + "caleb" + Style.RESET_ALL)
await asyncio.sleep(random.randint(0, 30))
except asyncio.CancelledError:
t.cancel()
await t
if __name__ == "__main__":
loop = asyncio.get_event_loop()
m = loop.create_task(main())
try:
loop.run_forever()
except KeyboardInterrupt:
pass
m.cancel()
loop.run_until_complete(m)
| 21.088889
| 70
| 0.591149
|
import logging
import asyncio
import aiomsg
import random
from colorama import init, Fore, Style
init()
logging.basicConfig(level="DEBUG")
async def main():
s = aiomsg.Søcket()
await s.connect()
async def r():
while True:
print("waiting for response...")
msg = await s.recv_string()
print(Fore.GREEN + f"Got back {msg}" + Style.RESET_ALL)
t = loop.create_task(r())
try:
while True:
print("sending...")
await s.send_string(Fore.BLUE + "caleb" + Style.RESET_ALL)
await asyncio.sleep(random.randint(0, 30))
except asyncio.CancelledError:
t.cancel()
await t
if __name__ == "__main__":
loop = asyncio.get_event_loop()
m = loop.create_task(main())
try:
loop.run_forever()
except KeyboardInterrupt:
pass
m.cancel()
loop.run_until_complete(m)
| true
| true
|
1c4a8a9465c9765af917e68961daf41b5e99fd15
| 1,270
|
py
|
Python
|
backend/api/urls.py
|
ChristchurchCityWeightlifting/lifter-api
|
a82b79c75106e7f4f8ea4b4e3e12d727213445e3
|
[
"MIT"
] | null | null | null |
backend/api/urls.py
|
ChristchurchCityWeightlifting/lifter-api
|
a82b79c75106e7f4f8ea4b4e3e12d727213445e3
|
[
"MIT"
] | 5
|
2022-03-07T08:30:47.000Z
|
2022-03-22T09:15:52.000Z
|
backend/api/urls.py
|
ChristchurchCityWeightlifting/lifter-api
|
a82b79c75106e7f4f8ea4b4e3e12d727213445e3
|
[
"MIT"
] | null | null | null |
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from rest_framework_nested.routers import NestedDefaultRouter
from api.views import AthleteViewSet, CompetitionViewSet, LiftViewSet, SessionViewSet
router = DefaultRouter(trailing_slash=False)
router.register(r"athletes", AthleteViewSet, "athletes")
router.register(r"competitions", CompetitionViewSet, "competitions")
# /athletes/<athlete pk>
# /competitions/<competition pk>
competitions_router = NestedDefaultRouter(
router, r"competitions", lookup="competitions"
)
competitions_router.register(
r"sessions", SessionViewSet, basename="competition-sessions"
)
# /competitions/<competition pk>/session/<session number>
sessions_router = NestedDefaultRouter(
competitions_router, r"sessions", lookup="sessions"
)
sessions_router.register(r"lifts", LiftViewSet, basename="session-lifts")
# /competitions/<competition pk>/sessions/<session number>/lifts/<lift pk>
urlpatterns = [
path("auth/", include("dj_rest_auth.urls")),
# path("auth/registration/", include("dj_rest_auth.registration.urls")), # block registration for now
path("", include(router.urls)),
path("", include(competitions_router.urls)),
path("", include(sessions_router.urls)),
]
| 36.285714
| 105
| 0.774803
|
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from rest_framework_nested.routers import NestedDefaultRouter
from api.views import AthleteViewSet, CompetitionViewSet, LiftViewSet, SessionViewSet
router = DefaultRouter(trailing_slash=False)
router.register(r"athletes", AthleteViewSet, "athletes")
router.register(r"competitions", CompetitionViewSet, "competitions")
competitions_router = NestedDefaultRouter(
router, r"competitions", lookup="competitions"
)
competitions_router.register(
r"sessions", SessionViewSet, basename="competition-sessions"
)
sessions_router = NestedDefaultRouter(
competitions_router, r"sessions", lookup="sessions"
)
sessions_router.register(r"lifts", LiftViewSet, basename="session-lifts")
urlpatterns = [
path("auth/", include("dj_rest_auth.urls")),
.urls)),
path("", include(competitions_router.urls)),
path("", include(sessions_router.urls)),
]
| true
| true
|
1c4a8b7192fcfef250b74c73b64732c53563f6c6
| 1,585
|
py
|
Python
|
kluctl/cli/seal_command_stubs.py
|
matzegebbe/kluctl
|
1b092b921e7301a30c99792b026634e099fbf15d
|
[
"Apache-2.0"
] | 26
|
2021-08-18T11:18:46.000Z
|
2022-03-16T09:28:43.000Z
|
kluctl/cli/seal_command_stubs.py
|
matzegebbe/kluctl
|
1b092b921e7301a30c99792b026634e099fbf15d
|
[
"Apache-2.0"
] | 4
|
2021-09-07T09:55:29.000Z
|
2022-03-03T09:05:01.000Z
|
kluctl/cli/seal_command_stubs.py
|
matzegebbe/kluctl
|
1b092b921e7301a30c99792b026634e099fbf15d
|
[
"Apache-2.0"
] | 4
|
2021-09-04T11:52:33.000Z
|
2022-03-16T09:18:20.000Z
|
import click
from click_option_group import optgroup
from kluctl.cli.main_cli_group import kluctl_project_args, cli_group
@cli_group.command("seal",
help="Seal secrets based on target's sealingConfig.\n\n"
"Loads all secrets from the specified secrets sets from the target's sealingConfig and "
"then renders the target, including all files with the `.sealme` extension. Then runs "
"kubeseal on each `.sealme` file and stores secrets in the directory specified by "
"`--local-sealed-secrets`, using the outputPattern from your deployment project.\n\n"
"If no `--target` is specified, sealing is performed for all targets.")
@kluctl_project_args()
@optgroup.group("Misc arguments")
@optgroup.option("--secrets-dir",
help="Specifies where to find unencrypted secret files. The given directory is NOT meant to be part "
"of your source repository! The given path only matters for secrets of type 'path'. Defaults "
"to the current working directory.",
default='.', type=click.Path(exists=True, file_okay=False))
@optgroup.option("--force-reseal",
help="Lets kluctl ignore secret hashes found in already sealed secrets and thus forces "
"resealing of those.",
is_flag=True)
@click.pass_obj
def seal_command_stub(obj, **kwargs):
from kluctl.seal.seal_command import seal_command
seal_command(obj, kwargs)
| 54.655172
| 118
| 0.643533
|
import click
from click_option_group import optgroup
from kluctl.cli.main_cli_group import kluctl_project_args, cli_group
@cli_group.command("seal",
help="Seal secrets based on target's sealingConfig.\n\n"
"Loads all secrets from the specified secrets sets from the target's sealingConfig and "
"then renders the target, including all files with the `.sealme` extension. Then runs "
"kubeseal on each `.sealme` file and stores secrets in the directory specified by "
"`--local-sealed-secrets`, using the outputPattern from your deployment project.\n\n"
"If no `--target` is specified, sealing is performed for all targets.")
@kluctl_project_args()
@optgroup.group("Misc arguments")
@optgroup.option("--secrets-dir",
help="Specifies where to find unencrypted secret files. The given directory is NOT meant to be part "
"of your source repository! The given path only matters for secrets of type 'path'. Defaults "
"to the current working directory.",
default='.', type=click.Path(exists=True, file_okay=False))
@optgroup.option("--force-reseal",
help="Lets kluctl ignore secret hashes found in already sealed secrets and thus forces "
"resealing of those.",
is_flag=True)
@click.pass_obj
def seal_command_stub(obj, **kwargs):
from kluctl.seal.seal_command import seal_command
seal_command(obj, kwargs)
| true
| true
|
1c4a8c258378ad8f2a962e64c0b2b5d11bc73837
| 375
|
py
|
Python
|
2015/10/part2.py
|
timofurrer/aoc-2020
|
446b688a57601d9891f520e43b7f822c373a6ff4
|
[
"MIT"
] | null | null | null |
2015/10/part2.py
|
timofurrer/aoc-2020
|
446b688a57601d9891f520e43b7f822c373a6ff4
|
[
"MIT"
] | null | null | null |
2015/10/part2.py
|
timofurrer/aoc-2020
|
446b688a57601d9891f520e43b7f822c373a6ff4
|
[
"MIT"
] | null | null | null |
from pathlib import Path
with (Path(__file__).parent / "input.txt").open() as puzzle_input_file:
puzzle_input_raw = puzzle_input_file.read()
import itertools
number = puzzle_input_raw
def look_and_say(look):
return ''.join(str(len([1 for _ in v])) + k for k, v in itertools.groupby(look))
for _ in range(50):
number = look_and_say(number)
print(len(number))
| 23.4375
| 84
| 0.72
|
from pathlib import Path
with (Path(__file__).parent / "input.txt").open() as puzzle_input_file:
puzzle_input_raw = puzzle_input_file.read()
import itertools
number = puzzle_input_raw
def look_and_say(look):
return ''.join(str(len([1 for _ in v])) + k for k, v in itertools.groupby(look))
for _ in range(50):
number = look_and_say(number)
print(len(number))
| true
| true
|
1c4a8d43ec29814bf33a1a684899d96c24b15fbe
| 13,571
|
py
|
Python
|
utils/visualize.py
|
vtekur/gnn_pathplanning
|
150ca315c214134eda8f5c5b55ce71da9360bcce
|
[
"MIT"
] | null | null | null |
utils/visualize.py
|
vtekur/gnn_pathplanning
|
150ca315c214134eda8f5c5b55ce71da9360bcce
|
[
"MIT"
] | null | null | null |
utils/visualize.py
|
vtekur/gnn_pathplanning
|
150ca315c214134eda8f5c5b55ce71da9360bcce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import yaml
import matplotlib
# matplotlib.use("Agg")
from matplotlib.patches import Circle, Rectangle, Arrow
from matplotlib.collections import PatchCollection
from matplotlib.patches import ConnectionPatch
from matplotlib.patches import FancyArrowPatch
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
from matplotlib import lines
import matplotlib.animation as manimation
import argparse
import math
import gc
import seaborn as sns
import time
import scipy.io as sio
import sys
np.set_printoptions(threshold=np.inf)
class Animation:
def __init__(self, config):
self.config = config
with open(config.map) as map_file:
self.data_map = yaml.load(map_file)
with open(config.schedule) as states_file:
self.schedule = yaml.load(states_file)
self.num_agents = len(self.data_map["agents"])
self.K = self.config.nGraphFilterTaps
self.ID_agent = self.config.id_chosenAgent
data_contents = sio.loadmat(config.GSO)
self.GSO = np.transpose(data_contents["gso"], (2, 3, 0, 1)).squeeze(3)
self.commRadius = data_contents["commRadius"]
self.maxLink = 500
aspect = self.data_map["map"]["dimensions"][0] / self.data_map["map"]["dimensions"][1]
self.fig = plt.figure(frameon=False, figsize=(4 * aspect, 4))
self.ax = self.fig.add_subplot(111, aspect='equal')
self.fig.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=None, hspace=None)
# self.ax.set_frame_on(False)
self.patches = []
self.artists = []
self.agents = dict()
self.commLink = dict()
self.agent_names = dict()
# self.list_color = self.get_cmap(self.num_agents)
self.list_color = sns.color_palette("hls", self.num_agents)
self.list_color_commLink = sns.color_palette("hls", 8) # self.K)
self.list_commLinkStyle = list(lines.lineStyles.keys())
# create boundary patch
xmin = -0.5
ymin = -0.5
xmax = self.data_map["map"]["dimensions"][0] - 0.5
ymax = self.data_map["map"]["dimensions"][1] - 0.5
# self.ax.relim()
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
# self.ax.set_xticks([])
# self.ax.set_yticks([])
# plt.axis('off')
# self.ax.axis('tight')
# self.ax.axis('off')
self.patches.append(Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, facecolor='none', edgecolor='black'))
for o in self.data_map["map"]["obstacles"]:
x, y = o[0], o[1]
self.patches.append(Rectangle((x - 0.5, y - 0.5), 1, 1, facecolor='black', edgecolor='black'))
# initialize communication Link
for id_link in range(self.maxLink):
#https://matplotlib.org/api/artist_api.html#module-matplotlib.lines
name_link = "{}".format(id_link)
# self.commLink[name_link] = FancyArrowPatch((0,0), (0,0),linewidth=2)
self.commLink[name_link] = plt.Line2D((0, 0), (0, 0), linewidth=2)
self.artists.append(self.commLink[name_link])
# print(self.schedule["schedule"])
# create agents:
self.T = 0
# draw goals first
for d, i in zip(self.data_map["agents"], range(0, self.num_agents)):
self.patches.append(
Rectangle((d["goal"][0] - 0.25, d["goal"][1] - 0.25), 0.6, 0.6, facecolor=self.list_color[i],
edgecolor=self.list_color[i], alpha=0.5))
for d, i in zip(self.data_map["agents"], range(0, self.num_agents)):
#https://matplotlib.org/api/artist_api.html#module-matplotlib.lines
name = d["name"]
self.agents[name] = Circle((d["start"][0], d["start"][1]), 0.4, facecolor=self.list_color[i],
edgecolor=self.list_color[i])
self.agents[name].original_face_color = self.list_color[i]
self.patches.append(self.agents[name])
self.T = max(self.T, self.schedule["schedule"][name][-1]["t"])
# set floating ID
self.agent_names[name] = self.ax.text(d["start"][0], d["start"][1], name.replace('agent', ''))
self.agent_names[name].set_horizontalalignment('center')
self.agent_names[name].set_verticalalignment('center')
self.artists.append(self.agent_names[name])
# self.ax.add_line(dotted_line)
# self.ax.set_axis_off()
# self.fig.axes[0].set_visible(False)
# self.fig.axes.get_yaxis().set_visible(False)
# self.fig.tight_layout()
self.anim = animation.FuncAnimation(self.fig, self.animate_func,
init_func=self.init_func,
frames=int(self.T + 1) * 10,
interval=100,
blit=True)
def get_cmap(self, n, name='hsv'):
'''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct
RGB color; the keyword argument name must be a standard mpl colormap name.'''
return plt.cm.get_cmap(name, n)
def save(self, file_name, speed):
self.anim.save(
file_name,
"ffmpeg",
fps=10 * speed,
dpi=200),8
# savefig_kwargs={"pad_inches": 0, "bbox_inches": "tight"})
def show(self):
plt.show()
def init_func(self):
for p in self.patches:
self.ax.add_patch(p)
for a in self.artists:
self.ax.add_artist(a)
return self.patches + self.artists
# def find_neighours(self, ID_selected_agent, step, level, max_level=self.K):
def get_currentGSO(self, step):
# module to get GSO
# print(self.GSO.shape)
GSO_current = self.GSO[:, :, step]
# print(GSO_current.shape)
gso_up_diag = np.triu(GSO_current)
# print(gso_up)
# return gso_up_diag
return GSO_current
def update_gso(self, gso_tmp, id_chosenAgent, id_neighborAgent):
gso_tmp[id_chosenAgent, id_neighborAgent] = 0
gso_tmp[id_neighborAgent, id_chosenAgent] = 0
return gso_tmp
def find_neighours(self, gso, id_chosenAgent):
# print(id_chosenAgent)
# print(gso)
ID_neighbor_robot = gso[id_chosenAgent,:].nonzero()[0]
# print(gso_up[ID_selected_agent,:])
# print(ID_neighbor_robot)
return ID_neighbor_robot, ID_neighbor_robot.shape[0]
def build_comm_link(self, store_list_line, gso, id_chosenAgent, index_hop):
if index_hop >= self.K:
# print('\n {}\n'.format(store_list_line))
return store_list_line
else:
# status_agent_currentHop = agents_array[id_chosenAgent]
id_neighbor_robot, num_neighbor = self.find_neighours(gso, id_chosenAgent)
# pos_agent_currentHop_array = np.array(status_agent_currentHop.center)
# repeat until K
for index in range(num_neighbor):
id_neighbor = id_neighbor_robot[index]
# status_agent_nextHop = agents_array[id_neighbor]
# pos_agent_nextHop_array = np.array(status_agent_nextHop.center)
# draw line (pos1,pos2)
# print('#### current hop {} / {}'.format(index_hop+1,self.K))
# print('\t {} <- \t{}'.format(id_chosenAgent, id_neighbor))
# print('\t {} <- \t{}'.format(status_agent_currentHop, status_agent_nextHop))
# posX_agent = (pos_agent_currentHop_array[0], pos_agent_nextHop_array[0])
# posY_agent = (pos_agent_currentHop_array[1], pos_agent_nextHop_array[1])
line = (index_hop + 1,index_hop-1, (id_chosenAgent, id_neighbor))
name_line = '{}-{}'.format(id_chosenAgent, id_neighbor)
store_list_line.update({name_line:line})
gso_new = self.update_gso(gso,id_chosenAgent,id_neighbor)
store_list_line = self.build_comm_link(store_list_line, gso_new, id_neighbor, index_hop+1)
return store_list_line
def get_linkPos(self,agents_array,id_chosenAgent,id_neighbor):
status_agent_currentHop = agents_array[id_chosenAgent]
pos_agent_currentHop_array = np.array(status_agent_currentHop.center)
status_agent_nextHop = agents_array[id_neighbor]
pos_agent_nextHop_array = np.array(status_agent_nextHop.center)
posX_agent = (pos_agent_currentHop_array[0], pos_agent_nextHop_array[0])
posY_agent = (pos_agent_currentHop_array[1], pos_agent_nextHop_array[1])
return (posX_agent, posY_agent)
def animate_func(self, i):
currentStep = i//10
if i%10 == 0:
gso_current = self.get_currentGSO(currentStep)
self.list_line = self.build_comm_link({}, gso_current, self.ID_agent, 1)
# print(self.list_line)
# print("time-frame:{}/{} - step:{}".format(i,int(self.T + 1) * 10, currentStep))
for agent_name in self.schedule["schedule"]:
agent = self.schedule["schedule"][agent_name]
# print(agent)
pos = self.getState(i / 10, agent)
p = (pos[0], pos[1])
self.agents[agent_name].center = p
self.agent_names[agent_name].set_position(p)
# reset all colors
for _, agent in self.agents.items():
agent.set_facecolor(agent.original_face_color)
# build communcation link
agents_array = [agent for _, agent in self.agents.items()]
id_link = 0
for key_link, line_info in self.list_line.items():
name_link = "{}".format(id_link)
index_hop, index_style, (id_chosenAgent, id_neighbor) = line_info
pos = self.get_linkPos(agents_array, id_chosenAgent, id_neighbor)
self.commLink[name_link].set_data(pos)
self.commLink[name_link].set_color(self.list_color_commLink[index_style])
self.commLink[name_link].set_linestyle(self.list_commLinkStyle[index_style])
# print(self.list_commLinkStyle[index_hop-2])
# print("{}/{}- {} - {}".format(index_hop, self.K, key_link, self.commLink[name_link]._posA_posB))
id_link += 1
id_link_reset = id_link
for id_link_rest in range(id_link_reset, self.maxLink):
name_link = "{}".format(id_link_rest)
self.commLink[name_link].set_data((0, 0), (0, 0))
# check drive-drive collisions
for id_m in range(0, len(agents_array)):
for id_n in range(id_m + 1, len(agents_array)):
# print(i,j)
d1 = agents_array[id_m]
d2 = agents_array[id_n]
pos1 = np.array(d1.center)
pos2 = np.array(d2.center)
# plt.plot(pos1, pos2, 'ro-')
if np.linalg.norm(pos1 - pos2) < 0.7:
d1.set_facecolor('red')
d2.set_facecolor('red')
print("COLLISION! (agent-agent) ({}, {})".format(id_m, id_n))
return self.patches + self.artists
def getState(self, t, d):
idx = 0
while idx < len(d) and d[idx]["t"] < t:
idx += 1
if idx == 0:
return np.array([float(d[0]["x"]), float(d[0]["y"])])
elif idx < len(d):
posLast = np.array([float(d[idx - 1]["x"]), float(d[idx - 1]["y"])])
posNext = np.array([float(d[idx]["x"]), float(d[idx]["y"])])
else:
return np.array([float(d[-1]["x"]), float(d[-1]["y"])])
dt = d[idx]["t"] - d[idx - 1]["t"]
t = (t - d[idx - 1]["t"]) / dt
pos = (posNext - posLast) * t + posLast
# print(pos)
return pos
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--local_dir', default='/Users/vtek/gnn_pathplanning/')
parser.add_argument('--nGraphFilterTaps', type=int, default=3)
parser.add_argument('--type')
parser.add_argument('--caseId', default='00000')
parser.add_argument("--speed", type=int, default=2, help="speedup-factor")
parser.add_argument('--log_time_trained', type=str, default='0')
parser.add_argument('--id_chosenAgent', type=int, default=0)
parser.add_argument('--failure_case', type=bool, default=False)
parser.add_argument('--name', default=None)
args = parser.parse_args()
if args.failure_case:
case_type = 'failure'
else:
case_type = 'success'
base_dir = args.local_dir + 'Results_best/AnimeDemo/{}/map20x20_rho1_10Agent/K{}_HS0/TR_M20p1_10Agent/{}/commR_6/'.format(args.type, args.nGraphFilterTaps,args.log_time_trained)
args.map = base_dir + 'input/{}Cases_ID{}.yaml'.format(case_type, args.caseId)
args.schedule = base_dir+'predict_{}/{}Cases_ID{}.yaml'.format(case_type,case_type,args.caseId)
args.GSO = base_dir+'GSO/{}Cases_ID{}.mat'.format(case_type,args.caseId)
if args.name:
args.video = args.local_dir + 'Results_best/' + '/video_K{}_{}_{}.mp4'.format(args.nGraphFilterTaps, args.type, args.name)
else:
args.video = args.local_dir + 'Results_best/' + '/video_K{}_{}.mp4'.format(args.nGraphFilterTaps, args.type)
animation = Animation(args)
if args.video:
print("Starting!")
animation.save(args.video, args.speed)
print("Ending!")
else:
animation.show()
| 40.150888
| 181
| 0.603419
|
import yaml
import matplotlib
from matplotlib.patches import Circle, Rectangle, Arrow
from matplotlib.collections import PatchCollection
from matplotlib.patches import ConnectionPatch
from matplotlib.patches import FancyArrowPatch
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
from matplotlib import lines
import matplotlib.animation as manimation
import argparse
import math
import gc
import seaborn as sns
import time
import scipy.io as sio
import sys
np.set_printoptions(threshold=np.inf)
class Animation:
def __init__(self, config):
self.config = config
with open(config.map) as map_file:
self.data_map = yaml.load(map_file)
with open(config.schedule) as states_file:
self.schedule = yaml.load(states_file)
self.num_agents = len(self.data_map["agents"])
self.K = self.config.nGraphFilterTaps
self.ID_agent = self.config.id_chosenAgent
data_contents = sio.loadmat(config.GSO)
self.GSO = np.transpose(data_contents["gso"], (2, 3, 0, 1)).squeeze(3)
self.commRadius = data_contents["commRadius"]
self.maxLink = 500
aspect = self.data_map["map"]["dimensions"][0] / self.data_map["map"]["dimensions"][1]
self.fig = plt.figure(frameon=False, figsize=(4 * aspect, 4))
self.ax = self.fig.add_subplot(111, aspect='equal')
self.fig.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=None, hspace=None)
self.patches = []
self.artists = []
self.agents = dict()
self.commLink = dict()
self.agent_names = dict()
self.list_color = sns.color_palette("hls", self.num_agents)
self.list_color_commLink = sns.color_palette("hls", 8)
self.list_commLinkStyle = list(lines.lineStyles.keys())
xmin = -0.5
ymin = -0.5
xmax = self.data_map["map"]["dimensions"][0] - 0.5
ymax = self.data_map["map"]["dimensions"][1] - 0.5
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
self.patches.append(Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, facecolor='none', edgecolor='black'))
for o in self.data_map["map"]["obstacles"]:
x, y = o[0], o[1]
self.patches.append(Rectangle((x - 0.5, y - 0.5), 1, 1, facecolor='black', edgecolor='black'))
for id_link in range(self.maxLink):
"{}".format(id_link)
self.commLink[name_link] = plt.Line2D((0, 0), (0, 0), linewidth=2)
self.artists.append(self.commLink[name_link])
self.T = 0
for d, i in zip(self.data_map["agents"], range(0, self.num_agents)):
self.patches.append(
Rectangle((d["goal"][0] - 0.25, d["goal"][1] - 0.25), 0.6, 0.6, facecolor=self.list_color[i],
edgecolor=self.list_color[i], alpha=0.5))
for d, i in zip(self.data_map["agents"], range(0, self.num_agents)):
ame"]
self.agents[name] = Circle((d["start"][0], d["start"][1]), 0.4, facecolor=self.list_color[i],
edgecolor=self.list_color[i])
self.agents[name].original_face_color = self.list_color[i]
self.patches.append(self.agents[name])
self.T = max(self.T, self.schedule["schedule"][name][-1]["t"])
self.agent_names[name] = self.ax.text(d["start"][0], d["start"][1], name.replace('agent', ''))
self.agent_names[name].set_horizontalalignment('center')
self.agent_names[name].set_verticalalignment('center')
self.artists.append(self.agent_names[name])
self.anim = animation.FuncAnimation(self.fig, self.animate_func,
init_func=self.init_func,
frames=int(self.T + 1) * 10,
interval=100,
blit=True)
def get_cmap(self, n, name='hsv'):
return plt.cm.get_cmap(name, n)
def save(self, file_name, speed):
self.anim.save(
file_name,
"ffmpeg",
fps=10 * speed,
dpi=200),8
def show(self):
plt.show()
def init_func(self):
for p in self.patches:
self.ax.add_patch(p)
for a in self.artists:
self.ax.add_artist(a)
return self.patches + self.artists
def get_currentGSO(self, step):
GSO_current = self.GSO[:, :, step]
gso_up_diag = np.triu(GSO_current)
return GSO_current
def update_gso(self, gso_tmp, id_chosenAgent, id_neighborAgent):
gso_tmp[id_chosenAgent, id_neighborAgent] = 0
gso_tmp[id_neighborAgent, id_chosenAgent] = 0
return gso_tmp
def find_neighours(self, gso, id_chosenAgent):
ID_neighbor_robot = gso[id_chosenAgent,:].nonzero()[0]
return ID_neighbor_robot, ID_neighbor_robot.shape[0]
def build_comm_link(self, store_list_line, gso, id_chosenAgent, index_hop):
if index_hop >= self.K:
return store_list_line
else:
id_neighbor_robot, num_neighbor = self.find_neighours(gso, id_chosenAgent)
for index in range(num_neighbor):
id_neighbor = id_neighbor_robot[index]
line = (index_hop + 1,index_hop-1, (id_chosenAgent, id_neighbor))
name_line = '{}-{}'.format(id_chosenAgent, id_neighbor)
store_list_line.update({name_line:line})
gso_new = self.update_gso(gso,id_chosenAgent,id_neighbor)
store_list_line = self.build_comm_link(store_list_line, gso_new, id_neighbor, index_hop+1)
return store_list_line
def get_linkPos(self,agents_array,id_chosenAgent,id_neighbor):
status_agent_currentHop = agents_array[id_chosenAgent]
pos_agent_currentHop_array = np.array(status_agent_currentHop.center)
status_agent_nextHop = agents_array[id_neighbor]
pos_agent_nextHop_array = np.array(status_agent_nextHop.center)
posX_agent = (pos_agent_currentHop_array[0], pos_agent_nextHop_array[0])
posY_agent = (pos_agent_currentHop_array[1], pos_agent_nextHop_array[1])
return (posX_agent, posY_agent)
def animate_func(self, i):
currentStep = i//10
if i%10 == 0:
gso_current = self.get_currentGSO(currentStep)
self.list_line = self.build_comm_link({}, gso_current, self.ID_agent, 1)
for agent_name in self.schedule["schedule"]:
agent = self.schedule["schedule"][agent_name]
pos = self.getState(i / 10, agent)
p = (pos[0], pos[1])
self.agents[agent_name].center = p
self.agent_names[agent_name].set_position(p)
for _, agent in self.agents.items():
agent.set_facecolor(agent.original_face_color)
agents_array = [agent for _, agent in self.agents.items()]
id_link = 0
for key_link, line_info in self.list_line.items():
name_link = "{}".format(id_link)
index_hop, index_style, (id_chosenAgent, id_neighbor) = line_info
pos = self.get_linkPos(agents_array, id_chosenAgent, id_neighbor)
self.commLink[name_link].set_data(pos)
self.commLink[name_link].set_color(self.list_color_commLink[index_style])
self.commLink[name_link].set_linestyle(self.list_commLinkStyle[index_style])
id_link += 1
id_link_reset = id_link
for id_link_rest in range(id_link_reset, self.maxLink):
name_link = "{}".format(id_link_rest)
self.commLink[name_link].set_data((0, 0), (0, 0))
for id_m in range(0, len(agents_array)):
for id_n in range(id_m + 1, len(agents_array)):
d1 = agents_array[id_m]
d2 = agents_array[id_n]
pos1 = np.array(d1.center)
pos2 = np.array(d2.center)
if np.linalg.norm(pos1 - pos2) < 0.7:
d1.set_facecolor('red')
d2.set_facecolor('red')
print("COLLISION! (agent-agent) ({}, {})".format(id_m, id_n))
return self.patches + self.artists
def getState(self, t, d):
idx = 0
while idx < len(d) and d[idx]["t"] < t:
idx += 1
if idx == 0:
return np.array([float(d[0]["x"]), float(d[0]["y"])])
elif idx < len(d):
posLast = np.array([float(d[idx - 1]["x"]), float(d[idx - 1]["y"])])
posNext = np.array([float(d[idx]["x"]), float(d[idx]["y"])])
else:
return np.array([float(d[-1]["x"]), float(d[-1]["y"])])
dt = d[idx]["t"] - d[idx - 1]["t"]
t = (t - d[idx - 1]["t"]) / dt
pos = (posNext - posLast) * t + posLast
return pos
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--local_dir', default='/Users/vtek/gnn_pathplanning/')
parser.add_argument('--nGraphFilterTaps', type=int, default=3)
parser.add_argument('--type')
parser.add_argument('--caseId', default='00000')
parser.add_argument("--speed", type=int, default=2, help="speedup-factor")
parser.add_argument('--log_time_trained', type=str, default='0')
parser.add_argument('--id_chosenAgent', type=int, default=0)
parser.add_argument('--failure_case', type=bool, default=False)
parser.add_argument('--name', default=None)
args = parser.parse_args()
if args.failure_case:
case_type = 'failure'
else:
case_type = 'success'
base_dir = args.local_dir + 'Results_best/AnimeDemo/{}/map20x20_rho1_10Agent/K{}_HS0/TR_M20p1_10Agent/{}/commR_6/'.format(args.type, args.nGraphFilterTaps,args.log_time_trained)
args.map = base_dir + 'input/{}Cases_ID{}.yaml'.format(case_type, args.caseId)
args.schedule = base_dir+'predict_{}/{}Cases_ID{}.yaml'.format(case_type,case_type,args.caseId)
args.GSO = base_dir+'GSO/{}Cases_ID{}.mat'.format(case_type,args.caseId)
if args.name:
args.video = args.local_dir + 'Results_best/' + '/video_K{}_{}_{}.mp4'.format(args.nGraphFilterTaps, args.type, args.name)
else:
args.video = args.local_dir + 'Results_best/' + '/video_K{}_{}.mp4'.format(args.nGraphFilterTaps, args.type)
animation = Animation(args)
if args.video:
print("Starting!")
animation.save(args.video, args.speed)
print("Ending!")
else:
animation.show()
| true
| true
|
1c4a8d92aa7936b2472071f2adda42a56ce328db
| 88,007
|
py
|
Python
|
src/testers/unicorn_test_aarch64.py
|
TehRick/Triton
|
397e42edb2fb7c3fea12be22f70c44e8d0859d57
|
[
"Apache-2.0"
] | null | null | null |
src/testers/unicorn_test_aarch64.py
|
TehRick/Triton
|
397e42edb2fb7c3fea12be22f70c44e8d0859d57
|
[
"Apache-2.0"
] | null | null | null |
src/testers/unicorn_test_aarch64.py
|
TehRick/Triton
|
397e42edb2fb7c3fea12be22f70c44e8d0859d57
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
## -*- coding: utf-8 -*-
import sys
import pprint
from triton import *
from unicorn import *
from unicorn.arm64_const import *
ADDR = 0x100000
STACK = 0x200000
HEAP = 0x300000
SIZE = 5 * 1024 * 1024
CODE = [
("\x80\x46\x82\xd2", "movz x0, #0x1234"),
("\x80\x46\xa2\xd2", "movz x0, #0x1234, lsl #16"),
("\x80\x46\xc2\xd2", "movz x0, #0x1234, lsl #32"),
("\x80\x46\xe2\xd2", "movz x0, #0x1234, lsl #48"),
("\x21\x64\x88\xd2", "movz x1, #0x4321"),
("\x21\x64\xa8\xd2", "movz x1, #0x4321, lsl #16"),
("\x21\x64\xc8\xd2", "movz x1, #0x4321, lsl #32"),
("\x21\x64\xe8\xd2", "movz x1, #0x4321, lsl #48"),
("\x21\x64\xe8\xd2", "movz x1, #0x4321, lsl #48"),
("\x21\x64\xc8\xd2", "movz x1, #0x4321, lsl #32"),
("\x21\x64\xa8\xd2", "movz x1, #0x4321, lsl #16"),
("\x21\x64\x88\xf2", "movk x1, #0x4321"),
("\x81\x46\xa2\xf2", "movk x1, #0x1234, lsl #16"),
("\x81\x04\xcf\xf2", "movk x1, #0x7824, lsl #32"),
("\x61\x8a\xf2\xf2", "movk x1, #0x9453, lsl #48"),
("\xe0\xcc\x8c\x52", "movz w0, #0x6667"),
("\xc0\xcc\xac\x72", "movk w0, #0x6666, lsl #16"),
("\x1f\x20\x03\xd5", "nop"),
("\x1f\x20\x03\xd5", "nop"),
("\x1f\x20\x03\xd5", "nop"),
("\x60\x00\x02\x8b", "add x0, x3, x2"),
("\x20\x00\x02\x8b", "add x0, x1, x2"),
("\x80\x46\xa2\xd2", "movz x0, #0x1234, lsl #16"),
("\x00\x00\x00\x8b", "add x0, x0, x0"),
("\x60\xc0\x22\x8b", "add x0, x3, w2, sxtw"),
("\x82\x46\x82\xd2", "movz x2, #0x1234"),
("\x01\xcf\x8a\xd2", "movz x1, #0x5678"),
("\x20\x80\x22\x8b", "add x0, x1, w2, sxtb"),
("\x20\xa0\x22\x8b", "add x0, x1, w2, sxth"),
("\x20\xc0\x22\x8b", "add x0, x1, w2, sxtw"),
("\x20\xe0\x22\x8b", "add x0, x1, x2, sxtx"),
("\x20\x00\x02\x8b", "add x0, x1, x2, lsl #0"),
("\x20\x04\x02\x8b", "add x0, x1, x2, lsl #1"),
("\x20\x20\x02\x8b", "add x0, x1, x2, lsl #8"),
("\x20\x40\x02\x8b", "add x0, x1, x2, lsl #16"),
("\x20\x80\x02\x8b", "add x0, x1, x2, lsl #32"),
("\x20\x84\x02\x8b", "add x0, x1, x2, lsl #33"),
("\x20\x88\x02\x8b", "add x0, x1, x2, lsl #34"),
("\x20\x00\x42\x8b", "add x0, x1, x2, lsr #0"),
("\x20\x04\x42\x8b", "add x0, x1, x2, lsr #1"),
("\x20\x20\x42\x8b", "add x0, x1, x2, lsr #8"),
("\x20\x40\x42\x8b", "add x0, x1, x2, lsr #16"),
("\x20\x80\x42\x8b", "add x0, x1, x2, lsr #32"),
("\x20\x84\x42\x8b", "add x0, x1, x2, lsr #33"),
("\x20\x88\x42\x8b", "add x0, x1, x2, lsr #34"),
("\x20\x20\x82\x8b", "add x0, x1, x2, asr #8"),
("\x20\x40\x82\x8b", "add x0, x1, x2, asr #16"),
("\x20\x80\x82\x8b", "add x0, x1, x2, asr #32"),
("\x20\x84\x82\x8b", "add x0, x1, x2, asr #33"),
("\x20\x88\x82\x8b", "add x0, x1, x2, asr #34"),
("\x20\x88\x82\x8b", "add x0, x1, x2, asr #34"),
("\x20\x88\x19\x91", "add x0, x1, #1634"),
("\x20\x58\x21\x91", "add x0, x1, #2134"),
("\x20\x58\x61\x91", "add x0, x1, #2134, lsl #12"),
("\x3f\x60\x22\x8b", "add sp, x1, x2"),
("\x60\x00\x02\xab", "adds x0, x3, x2"),
("\x20\x00\x02\xab", "adds x0, x1, x2"),
("\x80\x46\xa2\xd2", "movz x0, #0x1234, lsl #16"),
("\x00\x00\x00\xab", "adds x0, x0, x0"),
("\x60\xc0\x22\xab", "adds x0, x3, w2, sxtw"),
("\x82\x46\x82\xd2", "movz x2, #0x1234"),
("\x01\xcf\x8a\xd2", "movz x1, #0x5678"),
("\x20\x80\x22\xab", "adds x0, x1, w2, sxtb"),
("\x20\xa0\x22\xab", "adds x0, x1, w2, sxth"),
("\x20\xc0\x22\xab", "adds x0, x1, w2, sxtw"),
("\x20\xe0\x22\xab", "adds x0, x1, x2, sxtx"),
("\x20\x00\x02\xab", "adds x0, x1, x2, lsl #0"),
("\x20\x04\x02\xab", "adds x0, x1, x2, lsl #1"),
("\x20\x20\x02\xab", "adds x0, x1, x2, lsl #8"),
("\x20\x40\x02\xab", "adds x0, x1, x2, lsl #16"),
("\x20\x80\x02\xab", "adds x0, x1, x2, lsl #32"),
("\x20\x84\x02\xab", "adds x0, x1, x2, lsl #33"),
("\x20\x88\x02\xab", "adds x0, x1, x2, lsl #34"),
("\x20\x00\x42\xab", "adds x0, x1, x2, lsr #0"),
("\x20\x04\x42\xab", "adds x0, x1, x2, lsr #1"),
("\x20\x20\x42\xab", "adds x0, x1, x2, lsr #8"),
("\x20\x40\x42\xab", "adds x0, x1, x2, lsr #16"),
("\x20\x80\x42\xab", "adds x0, x1, x2, lsr #32"),
("\x20\x84\x42\xab", "adds x0, x1, x2, lsr #33"),
("\x20\x88\x42\xab", "adds x0, x1, x2, lsr #34"),
("\x20\x20\x82\xab", "adds x0, x1, x2, asr #8"),
("\x20\x40\x82\xab", "adds x0, x1, x2, asr #16"),
("\x20\x80\x82\xab", "adds x0, x1, x2, asr #32"),
("\x20\x84\x82\xab", "adds x0, x1, x2, asr #33"),
("\x20\x88\x82\xab", "adds x0, x1, x2, asr #34"),
("\x20\x88\x82\xab", "adds x0, x1, x2, asr #34"),
("\x20\x88\x19\xb1", "adds x0, x1, #1634"),
("\x20\x58\x21\xb1", "adds x0, x1, #2134"),
("\x20\x58\x61\xb1", "adds x0, x1, #2134, lsl #12"),
("\x00\x00\x00\xab", "adds x0, x0, x0"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x00\x04\x00\xb1", "adds x0, x0, #1"),
("\x20\x00\x02\x9a", "adc x0, x1, x2"),
("\x20\x00\x02\x1a", "adc w0, w1, w2"),
("\x20\x1a\x09\x30", "adr x0, #0x12345"),
("\xe1\xff\x7f\x70", "adr x1, #0xfffff"),
("\xc1\x7c\x00\xd0", "adrp x1, #0xf9a000"),
("\x41\x0c\x00\xf0", "adrp x1, #0x18b000"),
("\xe1\xff\x9f\xd2", "movz x1, #0xffff"),
("\x22\x00\x80\xd2", "movz x2, #0x1"),
("\x20\x1c\x40\x92", "and x0, x1, #0xff"),
("\x20\x00\x40\x92", "and x0, x1, #0x01"),
("\x20\x00\x7c\x92", "and x0, x1, #0x10"),
("\x20\x00\x02\x8a", "and x0, x1, x2"),
("\x20\x04\x02\x8a", "and x0, x1, x2, lsl #1"),
("\x20\x08\x02\x8a", "and x0, x1, x2, lsl #2"),
("\x20\x0c\x02\x8a", "and x0, x1, x2, lsl #3"),
("\x20\x10\x02\x8a", "and x0, x1, x2, lsl #4"),
("\x20\x1c\x40\xf2", "ands x0, x1, #0xff"),
("\x20\x00\x40\xf2", "ands x0, x1, #0x01"),
("\x20\x00\x7c\xf2", "ands x0, x1, #0x10"),
("\x20\x00\x02\xea", "ands x0, x1, x2"),
("\x20\x04\x02\xea", "ands x0, x1, x2, lsl #1"),
("\x20\x08\x02\xea", "ands x0, x1, x2, lsl #2"),
("\x20\x0c\x02\xea", "ands x0, x1, x2, lsl #3"),
("\x20\x10\x02\xea", "ands x0, x1, x2, lsl #4"),
("\x3f\x1c\x40\xf2", "tst x1, #0xff"),
("\x3f\x00\x40\xf2", "tst x1, #0x01"),
("\x3f\x00\x7c\xf2", "tst x1, #0x10"),
("\x3f\x00\x02\xea", "tst x1, x2"),
("\x3f\x04\x02\xea", "tst x1, x2, lsl #1"),
("\x3f\x08\x02\xea", "tst x1, x2, lsl #2"),
("\x3f\x0c\x02\xea", "tst x1, x2, lsl #3"),
("\x3f\x10\x02\xea", "tst x1, x2, lsl #4"),
("\x20\xfc\x41\x93", "asr x0, x1, #1"),
("\x20\xfc\x42\x93", "asr x0, x1, #2"),
("\x20\xfc\x43\x93", "asr x0, x1, #3"),
("\x20\xfc\x44\x93", "asr x0, x1, #4"),
("\x20\xfc\x44\x93", "asr x0, x1, #4"),
("\x20\xfc\x7f\x93", "asr x0, x1, #63"),
("\xe1\xff\x9f\xd2", "movz x1, #0xffff"),
("\x22\x00\x80\xd2", "movz x2, #0x1"),
("\x20\x28\xc2\x9a", "asr x0, x1, x2"),
("\x42\x00\x80\xd2", "movz x2, #0x2"),
("\x20\x28\xc2\x9a", "asr x0, x1, x2"),
("\x82\x46\x82\xd2", "movz x2, #0x1234"),
("\x01\xcf\x8a\xd2", "movz x1, #0x5678"),
("\x20\x80\x22\xcb", "sub x0, x1, w2, sxtb"),
("\x20\xa0\x22\xcb", "sub x0, x1, w2, sxth"),
("\x20\xc0\x22\xcb", "sub x0, x1, w2, sxtw"),
("\x20\xe0\x22\xcb", "sub x0, x1, x2, sxtx"),
("\x20\x00\x02\xcb", "sub x0, x1, x2, lsl #0"),
("\x20\x04\x02\xcb", "sub x0, x1, x2, lsl #1"),
("\x20\x20\x02\xcb", "sub x0, x1, x2, lsl #8"),
("\x20\x40\x02\xcb", "sub x0, x1, x2, lsl #16"),
("\x20\x80\x02\xcb", "sub x0, x1, x2, lsl #32"),
("\x20\x84\x02\xcb", "sub x0, x1, x2, lsl #33"),
("\x20\x88\x02\xcb", "sub x0, x1, x2, lsl #34"),
("\x20\x00\x42\xcb", "sub x0, x1, x2, lsr #0"),
("\x20\x04\x42\xcb", "sub x0, x1, x2, lsr #1"),
("\x20\x20\x42\xcb", "sub x0, x1, x2, lsr #8"),
("\x20\x40\x42\xcb", "sub x0, x1, x2, lsr #16"),
("\x20\x80\x42\xcb", "sub x0, x1, x2, lsr #32"),
("\x20\x84\x42\xcb", "sub x0, x1, x2, lsr #33"),
("\x20\x88\x42\xcb", "sub x0, x1, x2, lsr #34"),
("\x20\x20\x82\xcb", "sub x0, x1, x2, asr #8"),
("\x20\x40\x82\xcb", "sub x0, x1, x2, asr #16"),
("\x20\x80\x82\xcb", "sub x0, x1, x2, asr #32"),
("\x20\x84\x82\xcb", "sub x0, x1, x2, asr #33"),
("\x20\x88\x82\xcb", "sub x0, x1, x2, asr #34"),
("\x20\x88\x82\xcb", "sub x0, x1, x2, asr #34"),
("\x20\x88\x19\xd1", "sub x0, x1, #1634"),
("\x20\x58\x21\xd1", "sub x0, x1, #2134"),
("\x20\x58\x61\xd1", "sub x0, x1, #2134, lsl #12"),
("\x82\x46\x82\xd2", "movz x2, #0x1234"),
("\x01\xcf\x8a\xd2", "movz x1, #0x5678"),
("\x20\x80\x22\xeb", "subs x0, x1, w2, sxtb"),
("\x20\xa0\x22\xeb", "subs x0, x1, w2, sxth"),
("\x20\xc0\x22\xeb", "subs x0, x1, w2, sxtw"),
("\x20\xe0\x22\xeb", "subs x0, x1, x2, sxtx"),
("\x20\x00\x02\xeb", "subs x0, x1, x2, lsl #0"),
("\x20\x04\x02\xeb", "subs x0, x1, x2, lsl #1"),
("\x20\x20\x02\xeb", "subs x0, x1, x2, lsl #8"),
("\x20\x40\x02\xeb", "subs x0, x1, x2, lsl #16"),
("\x20\x80\x02\xeb", "subs x0, x1, x2, lsl #32"),
("\x20\x84\x02\xeb", "subs x0, x1, x2, lsl #33"),
("\x20\x88\x02\xeb", "subs x0, x1, x2, lsl #34"),
("\x20\x00\x42\xeb", "subs x0, x1, x2, lsr #0"),
("\x20\x04\x42\xeb", "subs x0, x1, x2, lsr #1"),
("\x20\x20\x42\xeb", "subs x0, x1, x2, lsr #8"),
("\x20\x40\x42\xeb", "subs x0, x1, x2, lsr #16"),
("\x20\x80\x42\xeb", "subs x0, x1, x2, lsr #32"),
("\x20\x84\x42\xeb", "subs x0, x1, x2, lsr #33"),
("\x20\x88\x42\xeb", "subs x0, x1, x2, lsr #34"),
("\x20\x20\x82\xeb", "subs x0, x1, x2, asr #8"),
("\x20\x40\x82\xeb", "subs x0, x1, x2, asr #16"),
("\x20\x80\x82\xeb", "subs x0, x1, x2, asr #32"),
("\x20\x84\x82\xeb", "subs x0, x1, x2, asr #33"),
("\x20\x88\x82\xeb", "subs x0, x1, x2, asr #34"),
("\x20\x88\x82\xeb", "subs x0, x1, x2, asr #34"),
("\x20\x88\x19\xf1", "subs x0, x1, #1634"),
("\x20\x58\x21\xf1", "subs x0, x1, #2134"),
("\x20\x58\x61\xf1", "subs x0, x1, #2134, lsl #12"),
("\x20\x00\x02\xca", "eor x0, x1, x2, lsl #0"),
("\x20\x04\x02\xca", "eor x0, x1, x2, lsl #1"),
("\x20\x20\x02\xca", "eor x0, x1, x2, lsl #8"),
("\x20\x40\x02\xca", "eor x0, x1, x2, lsl #16"),
("\x20\x80\x02\xca", "eor x0, x1, x2, lsl #32"),
("\x20\x84\x02\xca", "eor x0, x1, x2, lsl #33"),
("\x20\x88\x02\xca", "eor x0, x1, x2, lsl #34"),
("\x20\x00\x42\xca", "eor x0, x1, x2, lsr #0"),
("\x20\x04\x42\xca", "eor x0, x1, x2, lsr #1"),
("\x20\x20\x42\xca", "eor x0, x1, x2, lsr #8"),
("\x20\x40\x42\xca", "eor x0, x1, x2, lsr #16"),
("\x20\x80\x42\xca", "eor x0, x1, x2, lsr #32"),
("\x20\x84\x42\xca", "eor x0, x1, x2, lsr #33"),
("\x20\x88\x42\xca", "eor x0, x1, x2, lsr #34"),
("\x20\x20\x82\xca", "eor x0, x1, x2, asr #8"),
("\x20\x40\x82\xca", "eor x0, x1, x2, asr #16"),
("\x20\x80\x82\xca", "eor x0, x1, x2, asr #32"),
("\x20\x84\x82\xca", "eor x0, x1, x2, asr #33"),
("\x20\x88\x82\xca", "eor x0, x1, x2, asr #34"),
("\x20\x88\x82\xca", "eor x0, x1, x2, asr #34"),
("\x20\x1c\x40\xd2", "eor x0, x1, #255"),
("\x20\x18\x40\xd2", "eor x0, x1, #0x7f"),
("\x20\x00\x40\xd2", "eor x0, x1, #1"),
("\x20\x00\x22\xca", "eon x0, x1, x2, lsl #0"),
("\x20\x04\x22\xca", "eon x0, x1, x2, lsl #1"),
("\x20\x20\x22\xca", "eon x0, x1, x2, lsl #8"),
("\x20\x40\x22\xca", "eon x0, x1, x2, lsl #16"),
("\x20\x80\x22\xca", "eon x0, x1, x2, lsl #32"),
("\x20\x84\x22\xca", "eon x0, x1, x2, lsl #33"),
("\x20\x88\x22\xca", "eon x0, x1, x2, lsl #34"),
("\x20\x00\x62\xca", "eon x0, x1, x2, lsr #0"),
("\x20\x04\x62\xca", "eon x0, x1, x2, lsr #1"),
("\x20\x20\x62\xca", "eon x0, x1, x2, lsr #8"),
("\x20\x40\x62\xca", "eon x0, x1, x2, lsr #16"),
("\x20\x80\x62\xca", "eon x0, x1, x2, lsr #32"),
("\x20\x84\x62\xca", "eon x0, x1, x2, lsr #33"),
("\x20\x88\x62\xca", "eon x0, x1, x2, lsr #34"),
("\x20\x20\xa2\xca", "eon x0, x1, x2, asr #8"),
("\x20\x40\xa2\xca", "eon x0, x1, x2, asr #16"),
("\x20\x80\xa2\xca", "eon x0, x1, x2, asr #32"),
("\x20\x84\xa2\xca", "eon x0, x1, x2, asr #33"),
("\x20\x88\xa2\xca", "eon x0, x1, x2, asr #34"),
("\x20\x88\xa2\xca", "eon x0, x1, x2, asr #34"),
("\x82\x46\x82\xd2", "movz x2, #0x1234"),
("\x01\xcf\x8a\xd2", "movz x1, #0x5678"),
("\x20\x00\x22\xaa", "orn x0, x1, x2"),
("\x40\x00\x21\xaa", "orn x0, x2, x1"),
("\x41\x00\x20\xaa", "orn x1, x2, x0"),
("\x01\x00\x22\xaa", "orn x1, x0, x2"),
("\x20\x04\x22\xaa", "orn x0, x1, x2, lsl #1"),
("\x20\x08\x22\xaa", "orn x0, x1, x2, lsl #2"),
("\x20\x0c\x22\xaa", "orn x0, x1, x2, lsl #3"),
("\x20\x04\xe2\xaa", "orn x0, x1, x2, ror #1"),
("\x20\x08\xe2\xaa", "orn x0, x1, x2, ror #2"),
("\x20\x0c\xe2\xaa", "orn x0, x1, x2, ror #3"),
("\x82\x46\x82\xd2", "movz x2, #0x1234"),
("\x01\xcf\x8a\xd2", "movz x1, #0x5678"),
("\x20\x00\x02\xaa", "orr x0, x1, x2"),
("\x40\x00\x01\xaa", "orr x0, x2, x1"),
("\x41\x00\x00\xaa", "orr x1, x2, x0"),
("\x01\x00\x02\xaa", "orr x1, x0, x2"),
("\x20\x04\x02\xaa", "orr x0, x1, x2, lsl #1"),
("\x20\x08\x02\xaa", "orr x0, x1, x2, lsl #2"),
("\x20\x0c\x02\xaa", "orr x0, x1, x2, lsl #3"),
("\x20\x04\xc2\xaa", "orr x0, x1, x2, ror #1"),
("\x20\x08\xc2\xaa", "orr x0, x1, x2, ror #2"),
("\x20\x0c\xc2\xaa", "orr x0, x1, x2, ror #3"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\x00\x40\xf9", "ldr x5, [x1]"),
("\x26\x04\x40\xf8", "ldr x6, [x1], #0"),
("\x27\x44\x40\xf8", "ldr x7, [x1], #4"),
("\x28\x68\x62\xf8", "ldr x8, [x1, x2]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x21\xc8\x00\x91", "add x1, x1, #50"), # HEAP+50 address
("\x29\x24\x5e\xf8", "ldr x9, [x1], #-30"),
("\x2a\x8c\x40\xf8", "ldr x10, [x1, #8]!"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\x03\x40\xf9", "ldr x11, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\x00\x40\x39", "ldrb w5, [x1]"),
("\x26\x04\x40\x38", "ldrb w6, [x1], #0"),
("\x27\x44\x40\x38", "ldrb w7, [x1], #4"),
("\x28\x68\x62\x38", "ldrb w8, [x1, x2]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x21\xc8\x00\x91", "add x1, x1, #50"), # HEAP+50 address
("\x29\x24\x5e\x38", "ldrb w9, [x1], #-30"),
("\x2a\x8c\x40\x38", "ldrb w10, [x1, #8]!"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\x03\x40\x39", "ldrb w11, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\x00\x40\x79", "ldrh w5, [x1]"),
("\x26\x04\x40\x78", "ldrh w6, [x1], #0"),
("\x27\x44\x40\x78", "ldrh w7, [x1], #4"),
("\x28\x68\x62\x78", "ldrh w8, [x1, x2]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x21\xc8\x00\x91", "add x1, x1, #50"), # HEAP+50 address
("\x29\x24\x5e\x78", "ldrh w9, [x1], #-30"),
("\x2a\x8c\x40\x78", "ldrh w10, [x1, #8]!"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\x03\x40\x79", "ldrh w11, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x24\x14\x40\xa9", "ldp x4, x5, [x1]"),
("\x25\x18\xc0\xa8", "ldp x5, x6, [x1], #0"),
("\x26\x9c\xc0\xa8", "ldp x6, x7, [x1], #8"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x21\xc8\x00\x91", "add x1, x1, #50"), # HEAP+50 address
("\x28\x24\xfe\xa8", "ldp x8, x9, [x1], #-32"),
("\x29\x28\xc1\xa9", "ldp x9, x10, [x1, #16]!"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xea\x2f\x40\xa9", "ldp x10, x11, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x24\x14\x40\x29", "ldp w4, w5, [x1]"),
("\x25\x18\xc0\x28", "ldp w5, w6, [x1], #0"),
("\x26\x1c\xc1\x28", "ldp w6, w7, [x1], #8"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x21\xc8\x00\x91", "add x1, x1, #50"), # HEAP+50 address
("\x28\x24\xfc\x28", "ldp w8, w9, [x1], #-32"),
("\x29\x28\xc2\x29", "ldp w9, w10, [x1, #16]!"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xea\x2f\x40\x29", "ldp w10, w11, [sp]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x21\x30\x00\x91", "add x1, x1, #12"), # STACK+12
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x10\x40\xf8", "ldur x0, [x1, #1]"),
("\x20\x20\x40\xf8", "ldur x0, [x1, #2]"),
("\x20\x30\x40\xf8", "ldur x0, [x1, #3]"),
("\x20\x40\x40\xf8", "ldur x0, [x1, #4]"),
("\x20\xf0\x5f\xf8", "ldur x0, [x1, #-1]"),
("\x20\xe0\x5f\xf8", "ldur x0, [x1, #-2]"),
("\x20\xd0\x5f\xf8", "ldur x0, [x1, #-3]"),
("\x20\xc0\x5f\xf8", "ldur x0, [x1, #-4]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x40\x38", "ldurb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x10\x40\x38", "ldurb w0, [x1, #1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x20\x40\x38", "ldurb w0, [x1, #2]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x30\x40\x38", "ldurb w0, [x1, #3]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x40\x40\x38", "ldurb w0, [x1, #4]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\xf0\x5f\x38", "ldurb w0, [x1, #0xffffffffffffffff]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\xe0\x5f\x38", "ldurb w0, [x1, #0xfffffffffffffffe]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\xd0\x5f\x38", "ldurb w0, [x1, #0xfffffffffffffffd]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\xc0\x5f\x38", "ldurb w0, [x1, #0xfffffffffffffffc]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x40\x78", "ldurh w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x10\x40\x78", "ldurh w0, [x1, #1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x20\x40\x78", "ldurh w0, [x1, #2]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x30\x40\x78", "ldurh w0, [x1, #3]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x40\x40\x78", "ldurh w0, [x1, #4]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\xf0\x5f\x78", "ldurh w0, [x1, #0xffffffffffffffff]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\xe0\x5f\x78", "ldurh w0, [x1, #0xfffffffffffffffe]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\xd0\x5f\x78", "ldurh w0, [x1, #0xfffffffffffffffd]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\xc0\x5f\x78", "ldurh w0, [x1, #0xfffffffffffffffc]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x21\x30\x00\x91", "add x1, x1, #12"), # STACK+12
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x38", "ldursb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\x38", "ldursb x0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x38", "ldursb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x78", "ldursh w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\x78", "ldursh x0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\xb8", "ldursw x0, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x38", "ldursb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\x38", "ldursb x0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x38", "ldursb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x78", "ldursh w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\x78", "ldursh x0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\xb8", "ldursw x0, [x1]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x21\x30\x00\x91", "add x1, x1, #12"), # STACK+12
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x39", "ldrsb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\x39", "ldrsb x0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x39", "ldrsb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x79", "ldrsh w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\x79", "ldrsh x0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\xb9", "ldrsw x0, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x39", "ldrsb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\x39", "ldrsb x0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x39", "ldrsb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x79", "ldrsh w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\x79", "ldrsh x0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\xb9", "ldrsw x0, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x02\x06\xa0\xd2", "movz x2, #0x30, lsl #16"), # HEAP address
("\x42\x78\x00\x91", "add x2, x2, #30"),
("\x23\x00\x40\xf8", "ldur x3, [x1]"),
("\x44\x00\x40\xf8", "ldur x4, [x2]"),
("\x60\x00\xc4\x93", "extr x0, x3, x4, #0"),
("\x60\x04\xc4\x93", "extr x0, x3, x4, #1"),
("\x60\x08\xc4\x93", "extr x0, x3, x4, #2"),
("\x60\x0c\xc4\x93", "extr x0, x3, x4, #2"),
("\x60\x78\xc4\x93", "extr x0, x3, x4, #30"),
("\x60\xfc\xc4\x93", "extr x0, x3, x4, #63"),
("\x60\x00\x84\x13", "extr w0, w3, w4, #0"),
("\x60\x04\x84\x13", "extr w0, w3, w4, #1"),
("\x60\x08\x84\x13", "extr w0, w3, w4, #2"),
("\x60\x0c\x84\x13", "extr w0, w3, w4, #3"),
("\x60\x7c\x84\x13", "extr w0, w3, w4, #31"),
("\x01\x00\x00\x14", "b #4"),
#("\x02\x00\x00\x14", "b #8"), # FIXME cannot handle this with
#("\x03\x00\x00\x14", "b #12"), # unicorn emulating only one
#("\x00\xd0\x48\x14", "b #0x1234000"), # instruction...
#("\x74\xbb\xff\x17", "b #-0x11230"), #
("\x20\x00\x00\x54" ,"b.eq #4"),
#("\x40\x00\x00\x54" ,"b.eq #8"),
("\x01\x00\x00\x94" ,"bl #4"),
("\x80\x0c\x90\xb7", "tbnz x0, #0x32, #0x190"),
("\x20\x00\x90\xb6", "tbz x0, #0x32, #4"),
("\x01\x00\x80\xd2", "movz x1, #0"),
("\x02\x06\xa0\xd2", "movz x2, #0x20, lsl #16"), # STACK address
("\xe1\x03\x02\xaa", "mov x1, x2"),
("\x3f\x00\x00\x91", "mov sp, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xe0\x03\x21\xaa", "mvn x0, x1"),
("\xe0\x03\x01\xcb", "neg x0, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x63\xa0\x84\xd2", "movz x3, #9475"),
("\x20\x0c\x02\x9b", "madd x0, x1, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\x7c\x02\x9b", "mul x0, x1, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x63\xa0\x84\xd2", "movz x3, #9475"),
("\x20\x8c\x02\x9b", "msub x0, x1, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\xfc\x02\x9b", "mneg x0, x1, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x63\xa0\x84\xd2", "movz x3, #9475"),
("\x64\xa0\x84\xd2", "movz x4, #9475"),
("\xe5\x24\x81\xd2", "movz x5, #2343"),
("\xa6\xaf\x81\xd2", "movz x6, #3453"),
("\x87\x3a\x82\xd2", "movz x7, #4564"),
("\xe8\x16\x84\xd2", "movz x8, #8375"),
("\xe9\xc1\x84\xd2", "movz x9, #9743"),
("\xea\xaa\x82\xd2", "movz x10, #5463"),
("\x2b\xf8\x80\xd2", "movz x11, #1985"),
("\x25\x00\x00\xf9", "str x5, [x1]"),
("\x26\x04\x00\xf8", "str x6, [x1], #0"),
("\x27\x44\x00\xf8", "str x7, [x1], #4"),
("\x28\x68\x22\xf8", "str x8, [x1, x2]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x21\xc8\x00\x91", "add x1, x1, #50"), # HEAP+50 address
("\x29\x24\x1e\xf8", "str x9, [x1], #-30"),
("\x2a\x8c\x00\xf8", "str x10, [x1, #8]!"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\x03\x00\xf9", "str x11, [sp]"),
("\x25\x00\x00\xf8", "stur x5, [x1]"),
("\x26\x00\x00\x38", "sturb w6, [x1]"),
("\x27\x00\x00\x78", "sturh w7, [x1]"),
("\x29\x00\x00\xf9", "str x9, [x1]"),
("\x2a\x00\x00\x39", "strb w10, [x1]"),
("\x2b\x00\x00\x79", "strh w11, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\xe5\x24\x81\xd2", "movz x5, #2343"),
("\xa6\xaf\x81\xd2", "movz x6, #3453"),
("\x87\x3a\x82\xd2", "movz x7, #4564"),
("\xe8\x16\x84\xd2", "movz x8, #8375"),
("\xe9\xc1\x84\xd2", "movz x9, #9743"),
("\xea\xaa\x82\xd2", "movz x10, #5463"),
("\x25\x18\x00\xa9", "stp x5, x6, [x1]"),
("\x27\x20\x80\xa8", "stp x7, x8, [x1], #0"),
("\x29\xa8\x80\xa8", "stp x9, x10, [x1], #8"),
("\x25\x20\x82\xa9", "stp x5, x8, [x1, #32]!"),
("\x26\x1c\x01\xa9", "stp x6, x7, [x1, #16]"),
("\x25\x18\x00\x29", "stp w5, w6, [x1]"),
("\x27\x20\x80\x28", "stp w7, w8, [x1], #0"),
("\x29\x28\x81\x28", "stp w9, w10, [x1], #8"),
("\x25\x20\x84\x29", "stp w5, w8, [x1, #32]!"),
("\x26\x1c\x02\x29", "stp w6, w7, [x1, #16]"),
("\xc1\xbd\x9b\xd2", "movz x1, #0xddee"),
("\x20\x1c\x40\x93", "sxtb x0, x1"),
("\x20\x3c\x40\x93", "sxth x0, x1"),
("\x20\x7c\x40\x93", "sxtw x0, x1"),
("\x20\x1c\x00\x53", "uxtb w0, w1"),
("\x20\x3c\x00\x53", "uxth w0, w1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\x00\x82\x9a", "csel x0, x1, x2, eq"),
("\x40\x00\x81\x9a", "csel x0, x2, x1, eq"),
("\x20\x10\x82\x9a", "csel x0, x1, x2, ne"),
("\x40\x10\x81\x9a", "csel x0, x2, x1, ne"),
("\x20\x04\x82\x9a", "csinc x0, x1, x2, eq"),
("\x40\x04\x81\x9a", "csinc x0, x2, x1, eq"),
("\x20\x14\x82\x9a", "csinc x0, x1, x2, ne"),
("\x40\x14\x81\x9a", "csinc x0, x2, x1, ne"),
("\x20\x04\x82\xda", "csneg x0, x1, x2, eq"),
("\x40\x04\x81\xda", "csneg x0, x2, x1, eq"),
("\x20\x14\x82\xda", "csneg x0, x1, x2, ne"),
("\x40\x14\x81\xda", "csneg x0, x2, x1, ne"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\x20\xf8\x7f\xd3", "lsl x0, x1, #1"),
("\x20\xf4\x7e\xd3", "lsl x0, x1, #2"),
("\x20\xf0\x7d\xd3", "lsl x0, x1, #3"),
("\x20\xec\x7c\xd3", "lsl x0, x1, #4"),
("\x20\xfc\x41\xd3", "lsr x0, x1, #1"),
("\x20\xfc\x42\xd3", "lsr x0, x1, #2"),
("\x20\xfc\x43\xd3", "lsr x0, x1, #3"),
("\x20\xfc\x44\xd3", "lsr x0, x1, #4"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\x20\xc2\x9a", "lsl x0, x1, x2"),
("\x20\x24\xc2\x9a", "lsr x0, x1, x2"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x3f\x00\x02\xeb", "cmp x1, x2"),
("\x5f\x00\x01\xeb", "cmp x2, x1"),
("\x01\x00\x80\xd2", "movz x1, #0"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x3f\x00\x02\xeb", "cmp x1, x2"),
("\x5f\x00\x01\xeb", "cmp x2, x1"),
("\x01\x00\x80\xd2", "movz x1, #0"),
("\x02\x00\x80\xd2", "movz x2, #0"),
("\x3f\x00\x02\xeb", "cmp x1, x2"),
("\x5f\x00\x01\xeb", "cmp x2, x1"),
("\xc1\x88\x83\xd2", "movz x1, #7238"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x3f\x00\x02\xeb", "cmp x1, x2"),
("\x5f\x00\x01\xeb", "cmp x2, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x3f\x00\x02\xab", "cmn x1, x2"),
("\x5f\x00\x01\xab", "cmn x2, x1"),
("\x01\x00\x80\xd2", "movz x1, #0"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x3f\x00\x02\xab", "cmn x1, x2"),
("\x5f\x00\x01\xab", "cmn x2, x1"),
("\x01\x00\x80\xd2", "movz x1, #0"),
("\x02\x00\x80\xd2", "movz x2, #0"),
("\x3f\x00\x02\xab", "cmn x1, x2"),
("\x5f\x00\x01\xab", "cmn x2, x1"),
("\xc1\x88\x83\xd2", "movz x1, #7238"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x3f\x00\x02\xab", "cmn x1, x2"),
("\x5f\x00\x01\xab", "cmn x2, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x63\xa0\x84\xd2", "movz x3, #9475"),
("\x20\x0c\xa2\x9b", "umaddl x0, w1, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x20\x8c\xa2\x9b", "umsubl x0, w1, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\xc1\xfd\xbf\xd2", "movz x1, #0xffee, lsl #16"),
("\x42\xd5\xbd\xd2", "movz x2, #0xeeaa, lsl #16"),
("\xa3\xd5\x9b\xd2", "movz x3, #0xdead"),
("\x20\x0c\x22\x9b", "smaddl x0, w1, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\xc1\xfd\xbf\xd2", "movz x1, #0xffee, lsl #16"),
("\x42\xd5\xbd\xd2", "movz x2, #0xeeaa, lsl #16"),
("\xa3\xd5\x9b\xd2", "movz x3, #0xdead"),
("\x20\x8c\x22\x9b", "smsubl x0, w1, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\xc1\xfd\xbf\xd2", "movz x1, #0xffee, lsl #16"),
("\x42\xd5\xbd\xd2", "movz x2, #0xeeaa, lsl #16"),
("\x20\x7c\x22\x9b", "smull x0, w1, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\xc1\xfd\xbf\xd2", "movz x1, #0xffee, lsl #16"),
("\x42\xd5\xbd\xd2", "movz x2, #0xeeaa, lsl #16"),
("\x20\x7c\x42\x9b", "smulh x0, x1, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x01\x06\xa0\x92", "movn x1, #0x30, lsl #16"),
("\x02\x02\x80\x92", "movn x2, #16"),
("\x63\xa0\x84\x92", "movn x3, #9475"),
("\x64\xa0\x84\x92", "movn x4, #9475"),
("\xe5\x24\x81\x92", "movn x5, #2343"),
("\xa6\xaf\x81\x92", "movn x6, #3453"),
("\x87\x3a\x82\x92", "movn x7, #4564"),
("\xe8\x16\x84\x92", "movn x8, #8375"),
("\xe9\xc1\x84\x92", "movn x9, #9743"),
("\xea\xaa\x82\x92", "movn x10, #5463"),
("\x2b\xf8\x80\x92", "movn x11, #1985"),
("\xc1\xfd\xff\xd2", "movz x1, #0xffee, lsl #48"),
("\x81\xb9\xdb\xf2", "movk x1, #0xddcc, lsl #32"),
("\x41\x75\xb7\xf2", "movk x1, #0xbbaa, lsl #16"),
("\x01\x31\x93\xf2", "movk x1, #0x9988"),
("\x20\x00\x40\xd3", "ubfx x0, x1, #0, #1"),
("\x20\x08\x40\xd3", "ubfx x0, x1, #0, #3"),
("\x20\x0c\x40\xd3", "ubfx x0, x1, #0, #4"),
("\x20\x10\x40\xd3", "ubfx x0, x1, #0, #5"),
("\x20\x78\x40\xd3", "ubfx x0, x1, #0, #31"),
("\x20\xf8\x40\xd3", "ubfx x0, x1, #0, #63"),
("\x20\xfc\x40\xd3", "ubfx x0, x1, #0, #64"),
("\x20\xfc\x41\xd3", "ubfx x0, x1, #1, #63"),
("\x20\xfc\x42\xd3", "ubfx x0, x1, #2, #62"),
("\x20\xfc\x43\xd3", "ubfx x0, x1, #3, #61"),
("\x20\xfc\x60\xd3", "ubfx x0, x1, #32, #32"),
("\x20\x4c\x4a\xd3", "ubfx x0, x1, #10, #10"),
("\xc1\xfd\xff\xd2", "movz x1, #0xffee, lsl #48"),
("\x81\xb9\xdb\xf2", "movk x1, #0xddcc, lsl #32"),
("\x41\x75\xb7\xf2", "movk x1, #0xbbaa, lsl #16"),
("\x01\x31\x93\xf2", "movk x1, #0x9988"),
("\x20\x00\x40\x93", "sbfx x0, x1, #0, #1"),
("\x20\x08\x40\x93", "sbfx x0, x1, #0, #3"),
("\x20\x0c\x40\x93", "sbfx x0, x1, #0, #4"),
("\x20\x10\x40\x93", "sbfx x0, x1, #0, #5"),
("\x20\x78\x40\x93", "sbfx x0, x1, #0, #31"),
("\x20\xf8\x40\x93", "sbfx x0, x1, #0, #63"),
("\x20\xfc\x40\x93", "sbfx x0, x1, #0, #64"),
("\x20\xfc\x41\x93", "sbfx x0, x1, #1, #63"),
("\x20\xfc\x42\x93", "sbfx x0, x1, #2, #62"),
("\x20\xfc\x43\x93", "sbfx x0, x1, #3, #61"),
("\x20\xfc\x60\x93", "sbfx x0, x1, #32, #32"),
("\x20\x4c\x4a\x93", "sbfx x0, x1, #10, #10"),
("\x20\x48\x49\x93", "sbfx x0, x1, #9, #10"),
("\x20\x40\x47\x93", "sbfx x0, x1, #7, #10"),
("\x20\x3c\x47\x93", "sbfx x0, x1, #7, #9"),
("\xc1\xfd\xbf\xd2", "movz x1, #0xffee, lsl #16"),
("\x42\xd5\xbd\xd2", "movz x2, #0xeeaa, lsl #16"),
("\x20\x00\x42\xfa", "ccmp x1, x2, 0, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x21\x00\x42\xfa", "ccmp x1, x2, 1, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x22\x00\x42\xfa", "ccmp x1, x2, 2, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x23\x00\x42\xfa", "ccmp x1, x2, 3, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x24\x00\x42\xfa", "ccmp x1, x2, 4, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x25\x00\x42\xfa", "ccmp x1, x2, 5, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x26\x00\x42\xfa", "ccmp x1, x2, 6, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x27\x00\x42\xfa", "ccmp x1, x2, 7, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x28\x00\x42\xfa", "ccmp x1, x2, 8, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x29\x00\x42\xfa", "ccmp x1, x2, 9, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2a\x00\x42\xfa", "ccmp x1, x2, 10, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2b\x00\x42\xfa", "ccmp x1, x2, 11, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2c\x00\x42\xfa", "ccmp x1, x2, 12, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2d\x00\x42\xfa", "ccmp x1, x2, 13, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2e\x00\x42\xfa", "ccmp x1, x2, 14, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2f\x00\x42\xfa", "ccmp x1, x2, 15, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\xc1\xfd\xbf\xd2", "movz x1, #0xffee, lsl #16"),
("\xc2\xfd\xbf\xd2", "movz x2, #0xffee, lsl #16"),
("\x20\x00\x42\xfa", "ccmp x1, x2, 0, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x21\x00\x42\xfa", "ccmp x1, x2, 1, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x22\x00\x42\xfa", "ccmp x1, x2, 2, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x23\x00\x42\xfa", "ccmp x1, x2, 3, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x24\x00\x42\xfa", "ccmp x1, x2, 4, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x25\x00\x42\xfa", "ccmp x1, x2, 5, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x26\x00\x42\xfa", "ccmp x1, x2, 6, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x27\x00\x42\xfa", "ccmp x1, x2, 7, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x28\x00\x42\xfa", "ccmp x1, x2, 8, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x29\x00\x42\xfa", "ccmp x1, x2, 9, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2a\x00\x42\xfa", "ccmp x1, x2, 10, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2b\x00\x42\xfa", "ccmp x1, x2, 11, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2c\x00\x42\xfa", "ccmp x1, x2, 12, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2d\x00\x42\xfa", "ccmp x1, x2, 13, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2e\x00\x42\xfa", "ccmp x1, x2, 14, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2f\x00\x42\xfa", "ccmp x1, x2, 15, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x41\x14\x82\x9a", "cinc x1, x2, eq"),
("\x41\x04\x82\x9a", "cinc x1, x2, ne"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\xc1\xfd\xff\xd2", "movz x1, #0xffee, lsl #48"),
("\x81\xb9\xdb\xf2", "movk x1, #0xddcc, lsl #32"),
("\x41\x75\xb7\xf2", "movk x1, #0xbbaa, lsl #16"),
("\x01\x31\x93\xf2", "movk x1, #0x9988"),
("\x20\xfc\x40\xd3", "ubfiz x0, x1, #0, #64"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\xf8\x7f\xd3", "ubfiz x0, x1, #1, #63"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\xf4\x7e\xd3", "ubfiz x0, x1, #2, #62"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\xf0\x7d\xd3", "ubfiz x0, x1, #3, #61"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\xec\x7c\xd3", "ubfiz x0, x1, #4, #60"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\xe8\x7b\xd3", "ubfiz x0, x1, #5, #59"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\xe4\x7a\xd3", "ubfiz x0, x1, #6, #58"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\xe0\x79\xd3", "ubfiz x0, x1, #7, #57"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\xdc\x78\xd3", "ubfiz x0, x1, #8, #56"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\x7c\x7a\xd3", "ubfiz x0, x1, #6, #32"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\x00\x78\xd3", "ubfiz x0, x1, #8, #1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\x00\x41\xd3", "ubfiz x0, x1, #63, #1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\x00\x18\x53", "ubfiz w0, w1, #8, #1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\x00\x01\x53", "ubfiz w0, w1, #31, #1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\x7c\x00\x53", "ubfiz w0, w1, #0, #32"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\x08\xc2\x9a", "udiv x0, x1, x2"),
("\x40\x08\xc1\x9a", "udiv x0, x2, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\x02\x00\x80\xd2", "movz x2, #0"),
("\x20\x08\xc2\x9a", "udiv x0, x1, x2"),
("\x40\x08\xc1\x9a", "udiv x0, x2, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\x0c\xc2\x9a", "sdiv x0, x1, x2"),
("\x40\x0c\xc1\x9a", "sdiv x0, x2, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\x02\x00\x80\xd2", "movz x2, #0"),
("\x20\x0c\xc2\x9a", "sdiv x0, x1, x2"),
("\x40\x0c\xc1\x9a", "sdiv x0, x2, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\x7c\xa2\x9b", "umull x0, w1, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\xc1\xfd\xff\xd2", "movz x1, #0xffee, lsl #48"),
("\x81\xb9\xdb\xf2", "movk x1, #0xddcc, lsl #32"),
("\x41\x75\xb7\xf2", "movk x1, #0xbbaa, lsl #16"),
("\x01\x31\x93\xf2", "movk x1, #0x9988"),
("\x20\x7c\xc1\x9b", "umulh x0, x1, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\xfc\xa2\x9b", "umnegl x0, w1, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\x2c\xc2\x9a", "ror x0, x1, x2"),
("\x40\x2c\xc1\x9a", "ror x0, x2, x1"),
("\x40\x00\xc2\x93", "ror x0, x2, #0"),
("\x40\x04\xc2\x93", "ror x0, x2, #1"),
("\x40\x08\xc2\x93", "ror x0, x2, #2"),
("\x40\x0c\xc2\x93", "ror x0, x2, #3"),
("\x40\x10\xc2\x93", "ror x0, x2, #4"),
("\x40\xf8\xc2\x93", "ror x0, x2, #62"),
("\x40\xfc\xc2\x93", "ror x0, x2, #63"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x01\x00\x80\xd2", "mov x1, #0"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x41\x00\x80\xd2", "mov x1, #1 << 1"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x81\x00\x80\xd2", "mov x1, #1 << 2"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x01\x00\x82\xd2", "mov x1, #1 << 12"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x01\x00\x82\xd2", "mov x1, #1 << 12"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x01\x00\xb0\xd2", "mov x1, #1 << 31"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x21\x00\xc0\xd2", "mov x1, #1 << 32"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x41\x00\xc0\xd2", "mov x1, #1 << 33"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x01\x00\xe8\xd2", "mov x1, #1 << 62"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x01\x00\xf0\xd2", "mov x1, #1 << 63"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x21\x00\x80\xd2", "mov x1, #1 << 64"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\xfc\xdf\xc8", "ldar x5, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x21\xc8\x00\x91", "add x1, x1, #50"), # HEAP+50 address
("\x29\xfc\xdf\xc8", "ldar x9, [x1]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\xff\xdf\xc8", "ldar x11, [sp]"),
("\xff\xff\xdf\xc8", "ldar xzr, [sp]"),
("\xe7\xff\xdf\x88", "ldar w7, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\xfc\xdf\x08", "ldarb w5, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x21\xc8\x00\x91", "add x1, x1, #50"), # HEAP+50 address
("\x29\xfc\xdf\x08", "ldarb w9, [x1]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\xff\xdf\x08", "ldarb w11, [sp]"),
("\xff\xff\xdf\x08", "ldarb wzr, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\xfc\xdf\x48", "ldarh w5, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x21\xc8\x00\x91", "add x1, x1, #50"), # HEAP+50 address
("\x29\xfc\xdf\x48", "ldarh w9, [x1]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\xff\xdf\x48", "ldarh w11, [sp]"),
("\xff\xff\xdf\x48", "ldarh wzr, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\xfc\x5f\xc8", "ldaxr x5, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x21\xc8\x00\x91", "add x1, x1, #50"), # HEAP+50 address
("\x29\xfc\x5f\xc8", "ldaxr x9, [x1]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\xff\x5f\xc8", "ldaxr x11, [sp]"),
("\xff\xff\x5f\xc8", "ldaxr xzr, [sp]"),
("\xe7\xff\x5f\x88", "ldaxr w7, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\xfc\x5f\x08", "ldaxrb w5, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x21\xc8\x00\x91", "add x1, x1, #50"), # HEAP+50 address
("\x29\xfc\x5f\x08", "ldaxrb w9, [x1]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\xff\x5f\x08", "ldaxrb w11, [sp]"),
("\xff\xff\x5f\x08", "ldaxrb wzr, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\xfc\x5f\x48", "ldaxrh w5, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x21\xc8\x00\x91", "add x1, x1, #50"), # HEAP+50 address
("\x29\xfc\x5f\x48", "ldaxrh w9, [x1]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\xff\x5f\x48", "ldaxrh w11, [sp]"),
("\xff\xff\x5f\x48", "ldaxrh wzr, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x63\xa0\x84\xd2", "movz x3, #9475"),
("\x64\xa0\x84\xd2", "movz x4, #9475"),
("\xe5\x24\x81\xd2", "movz x5, #2343"),
("\xa6\xaf\x81\xd2", "movz x6, #3453"),
("\x87\x3a\x82\xd2", "movz x7, #4564"),
("\xe8\x16\x84\xd2", "movz x8, #8375"),
("\xe9\xc1\x84\xd2", "movz x9, #9743"),
("\xea\xaa\x82\xd2", "movz x10, #5463"),
("\x2b\xf8\x80\xd2", "movz x11, #1985"),
("\x25\xfc\x9f\xc8", "stlr x5, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"), # HEAP address
("\x21\xc8\x00\x91", "add x1, x1, #50"), # HEAP+50 address
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\xff\x9f\xc8", "stlr x11, [sp]"),
("\x25\x00\x00\xf8", "stur x5, [x1]"),
("\x26\x00\x00\x38", "sturb w6, [x1]"),
("\x27\x00\x00\x78", "sturh w7, [x1]"),
("\x29\xfc\x9f\xc8", "stlr x9, [x1]"),
("\x2a\xfc\x9f\x08", "stlrb w10, [x1]"),
("\x2b\xfc\x9f\x48", "stlrh w11, [x1]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"), # STACK address
("\x21\x30\x00\x91", "add x1, x1, #12"), # STACK+12
("\x20\x7c\x5f\xc8", "ldxr x0, [x1]"),
("\x21\x30\x00\x91", "add x1, x1, #12"), # STACK+24
("\x20\x7c\x5f\x08", "ldxrb w0, [x1]"),
("\x21\x30\x00\x91", "add x1, x1, #12"), # STACK+36
("\x20\x7c\x5f\x48", "ldxrh w0, [x1]"),
("\xc1\xfd\xff\xd2", "movz x1, #0xffee, lsl #48"),
("\x81\xb9\xdb\xf2", "movk x1, #0xddcc, lsl #32"),
("\x41\x75\xb7\xf2", "movk x1, #0xbbaa, lsl #16"),
("\x01\x31\x93\xf2", "movk x1, #0x9988"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x20\x0c\xc0\xda", "rev x0, x1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x20\x08\xc0\x5a", "rev w0, w1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x20\x04\xc0\xda", "rev16 x0, x1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x20\x04\xc0\x5a", "rev16 w0, w1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x20\x08\xc0\xda", "rev32 x0, x1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x20\x00\xc0\xda", "rbit x0, x1"),
("\x20\x00\xc0\x5a", "rbit w0, w1"),
("\x20\x00\x80\xd2", "movz x0, #1"),
("\x20\x00\xc0\xda", "rbit x0, x1"),
("\x20\x00\xc0\x5a", "rbit w0, w1"),
]
def emu_with_unicorn(opcode, istate):
# Initialize emulator in aarch64 mode
mu = Uc(UC_ARCH_ARM64, UC_MODE_ARM)
# map memory for this emulation
mu.mem_map(ADDR, SIZE)
# write machine code to be emulated to memory
index = 0
for op, _ in CODE:
mu.mem_write(ADDR+index, op)
index += len(op)
mu.mem_write(STACK, bytes(istate['stack']))
mu.mem_write(HEAP, bytes(istate['heap']))
mu.reg_write(UC_ARM64_REG_X0, istate['x0'])
mu.reg_write(UC_ARM64_REG_X1, istate['x1'])
mu.reg_write(UC_ARM64_REG_X2, istate['x2'])
mu.reg_write(UC_ARM64_REG_X3, istate['x3'])
mu.reg_write(UC_ARM64_REG_X4, istate['x4'])
mu.reg_write(UC_ARM64_REG_X5, istate['x5'])
mu.reg_write(UC_ARM64_REG_X6, istate['x6'])
mu.reg_write(UC_ARM64_REG_X7, istate['x7'])
mu.reg_write(UC_ARM64_REG_X8, istate['x8'])
mu.reg_write(UC_ARM64_REG_X9, istate['x9'])
mu.reg_write(UC_ARM64_REG_X10, istate['x10'])
mu.reg_write(UC_ARM64_REG_X11, istate['x11'])
mu.reg_write(UC_ARM64_REG_X12, istate['x12'])
mu.reg_write(UC_ARM64_REG_X13, istate['x13'])
mu.reg_write(UC_ARM64_REG_X14, istate['x14'])
mu.reg_write(UC_ARM64_REG_X15, istate['x15'])
mu.reg_write(UC_ARM64_REG_X16, istate['x16'])
mu.reg_write(UC_ARM64_REG_X17, istate['x17'])
mu.reg_write(UC_ARM64_REG_X18, istate['x18'])
mu.reg_write(UC_ARM64_REG_X19, istate['x19'])
mu.reg_write(UC_ARM64_REG_X20, istate['x20'])
mu.reg_write(UC_ARM64_REG_X21, istate['x21'])
mu.reg_write(UC_ARM64_REG_X22, istate['x22'])
mu.reg_write(UC_ARM64_REG_X23, istate['x23'])
mu.reg_write(UC_ARM64_REG_X24, istate['x24'])
mu.reg_write(UC_ARM64_REG_X25, istate['x25'])
mu.reg_write(UC_ARM64_REG_X26, istate['x26'])
mu.reg_write(UC_ARM64_REG_X27, istate['x27'])
mu.reg_write(UC_ARM64_REG_X28, istate['x28'])
mu.reg_write(UC_ARM64_REG_X29, istate['x29'])
mu.reg_write(UC_ARM64_REG_X30, istate['x30'])
mu.reg_write(UC_ARM64_REG_PC, istate['pc'])
mu.reg_write(UC_ARM64_REG_SP, istate['sp'])
mu.reg_write(UC_ARM64_REG_NZCV, istate['n'] << 31 | istate['z'] << 30 | istate['c'] << 29 | istate['v'] << 28)
# emulate code in infinite time & unlimited instructions
mu.emu_start(istate['pc'], istate['pc'] + len(opcode))
ostate = {
"stack": mu.mem_read(STACK, 0x100),
"heap": mu.mem_read(HEAP, 0x100),
"x0": mu.reg_read(UC_ARM64_REG_X0),
"x1": mu.reg_read(UC_ARM64_REG_X1),
"x2": mu.reg_read(UC_ARM64_REG_X2),
"x3": mu.reg_read(UC_ARM64_REG_X3),
"x4": mu.reg_read(UC_ARM64_REG_X4),
"x5": mu.reg_read(UC_ARM64_REG_X5),
"x6": mu.reg_read(UC_ARM64_REG_X6),
"x7": mu.reg_read(UC_ARM64_REG_X7),
"x8": mu.reg_read(UC_ARM64_REG_X8),
"x9": mu.reg_read(UC_ARM64_REG_X9),
"x10": mu.reg_read(UC_ARM64_REG_X10),
"x11": mu.reg_read(UC_ARM64_REG_X11),
"x12": mu.reg_read(UC_ARM64_REG_X12),
"x13": mu.reg_read(UC_ARM64_REG_X13),
"x14": mu.reg_read(UC_ARM64_REG_X14),
"x15": mu.reg_read(UC_ARM64_REG_X15),
"x16": mu.reg_read(UC_ARM64_REG_X16),
"x17": mu.reg_read(UC_ARM64_REG_X17),
"x18": mu.reg_read(UC_ARM64_REG_X18),
"x19": mu.reg_read(UC_ARM64_REG_X19),
"x20": mu.reg_read(UC_ARM64_REG_X20),
"x21": mu.reg_read(UC_ARM64_REG_X21),
"x22": mu.reg_read(UC_ARM64_REG_X22),
"x23": mu.reg_read(UC_ARM64_REG_X23),
"x24": mu.reg_read(UC_ARM64_REG_X24),
"x25": mu.reg_read(UC_ARM64_REG_X25),
"x26": mu.reg_read(UC_ARM64_REG_X26),
"x27": mu.reg_read(UC_ARM64_REG_X27),
"x28": mu.reg_read(UC_ARM64_REG_X28),
"x29": mu.reg_read(UC_ARM64_REG_X29),
"x30": mu.reg_read(UC_ARM64_REG_X30),
"x30": mu.reg_read(UC_ARM64_REG_X30),
"pc": mu.reg_read(UC_ARM64_REG_PC),
"sp": mu.reg_read(UC_ARM64_REG_SP),
"n": ((mu.reg_read(UC_ARM64_REG_NZCV) >> 31) & 1),
"z": ((mu.reg_read(UC_ARM64_REG_NZCV) >> 30) & 1),
"c": ((mu.reg_read(UC_ARM64_REG_NZCV) >> 29) & 1),
"v": ((mu.reg_read(UC_ARM64_REG_NZCV) >> 28) & 1),
}
return ostate
def emu_with_triton(opcode, istate):
ctx = TritonContext()
ctx.setArchitecture(ARCH.AARCH64)
inst = Instruction(opcode)
inst.setAddress(istate['pc'])
ctx.setConcreteMemoryAreaValue(STACK, bytes(istate['stack']))
ctx.setConcreteMemoryAreaValue(HEAP, bytes(istate['heap']))
ctx.setConcreteRegisterValue(ctx.registers.x0, istate['x0'])
ctx.setConcreteRegisterValue(ctx.registers.x1, istate['x1'])
ctx.setConcreteRegisterValue(ctx.registers.x2, istate['x2'])
ctx.setConcreteRegisterValue(ctx.registers.x3, istate['x3'])
ctx.setConcreteRegisterValue(ctx.registers.x4, istate['x4'])
ctx.setConcreteRegisterValue(ctx.registers.x5, istate['x5'])
ctx.setConcreteRegisterValue(ctx.registers.x6, istate['x6'])
ctx.setConcreteRegisterValue(ctx.registers.x7, istate['x7'])
ctx.setConcreteRegisterValue(ctx.registers.x8, istate['x8'])
ctx.setConcreteRegisterValue(ctx.registers.x9, istate['x9'])
ctx.setConcreteRegisterValue(ctx.registers.x10, istate['x10'])
ctx.setConcreteRegisterValue(ctx.registers.x11, istate['x11'])
ctx.setConcreteRegisterValue(ctx.registers.x12, istate['x12'])
ctx.setConcreteRegisterValue(ctx.registers.x13, istate['x13'])
ctx.setConcreteRegisterValue(ctx.registers.x14, istate['x14'])
ctx.setConcreteRegisterValue(ctx.registers.x15, istate['x15'])
ctx.setConcreteRegisterValue(ctx.registers.x16, istate['x16'])
ctx.setConcreteRegisterValue(ctx.registers.x17, istate['x17'])
ctx.setConcreteRegisterValue(ctx.registers.x18, istate['x18'])
ctx.setConcreteRegisterValue(ctx.registers.x19, istate['x19'])
ctx.setConcreteRegisterValue(ctx.registers.x20, istate['x20'])
ctx.setConcreteRegisterValue(ctx.registers.x21, istate['x21'])
ctx.setConcreteRegisterValue(ctx.registers.x22, istate['x22'])
ctx.setConcreteRegisterValue(ctx.registers.x23, istate['x23'])
ctx.setConcreteRegisterValue(ctx.registers.x24, istate['x24'])
ctx.setConcreteRegisterValue(ctx.registers.x25, istate['x25'])
ctx.setConcreteRegisterValue(ctx.registers.x26, istate['x26'])
ctx.setConcreteRegisterValue(ctx.registers.x27, istate['x27'])
ctx.setConcreteRegisterValue(ctx.registers.x28, istate['x28'])
ctx.setConcreteRegisterValue(ctx.registers.x29, istate['x29'])
ctx.setConcreteRegisterValue(ctx.registers.x30, istate['x30'])
ctx.setConcreteRegisterValue(ctx.registers.pc, istate['pc'])
ctx.setConcreteRegisterValue(ctx.registers.sp, istate['sp'])
ctx.setConcreteRegisterValue(ctx.registers.n, istate['n'])
ctx.setConcreteRegisterValue(ctx.registers.z, istate['z'])
ctx.setConcreteRegisterValue(ctx.registers.c, istate['c'])
ctx.setConcreteRegisterValue(ctx.registers.v, istate['v'])
ctx.processing(inst)
#print
#print inst
#for x in inst.getSymbolicExpressions():
# print x
#print
ostate = {
"stack": ctx.getConcreteMemoryAreaValue(STACK, 0x100),
"heap": ctx.getConcreteMemoryAreaValue(HEAP, 0x100),
"x0": ctx.getSymbolicRegisterValue(ctx.registers.x0),
"x1": ctx.getSymbolicRegisterValue(ctx.registers.x1),
"x2": ctx.getSymbolicRegisterValue(ctx.registers.x2),
"x3": ctx.getSymbolicRegisterValue(ctx.registers.x3),
"x4": ctx.getSymbolicRegisterValue(ctx.registers.x4),
"x5": ctx.getSymbolicRegisterValue(ctx.registers.x5),
"x6": ctx.getSymbolicRegisterValue(ctx.registers.x6),
"x7": ctx.getSymbolicRegisterValue(ctx.registers.x7),
"x8": ctx.getSymbolicRegisterValue(ctx.registers.x8),
"x9": ctx.getSymbolicRegisterValue(ctx.registers.x9),
"x10": ctx.getSymbolicRegisterValue(ctx.registers.x10),
"x11": ctx.getSymbolicRegisterValue(ctx.registers.x11),
"x12": ctx.getSymbolicRegisterValue(ctx.registers.x12),
"x13": ctx.getSymbolicRegisterValue(ctx.registers.x13),
"x14": ctx.getSymbolicRegisterValue(ctx.registers.x14),
"x15": ctx.getSymbolicRegisterValue(ctx.registers.x15),
"x16": ctx.getSymbolicRegisterValue(ctx.registers.x16),
"x17": ctx.getSymbolicRegisterValue(ctx.registers.x17),
"x18": ctx.getSymbolicRegisterValue(ctx.registers.x18),
"x19": ctx.getSymbolicRegisterValue(ctx.registers.x19),
"x20": ctx.getSymbolicRegisterValue(ctx.registers.x20),
"x21": ctx.getSymbolicRegisterValue(ctx.registers.x21),
"x22": ctx.getSymbolicRegisterValue(ctx.registers.x22),
"x23": ctx.getSymbolicRegisterValue(ctx.registers.x23),
"x24": ctx.getSymbolicRegisterValue(ctx.registers.x24),
"x25": ctx.getSymbolicRegisterValue(ctx.registers.x25),
"x26": ctx.getSymbolicRegisterValue(ctx.registers.x26),
"x27": ctx.getSymbolicRegisterValue(ctx.registers.x27),
"x28": ctx.getSymbolicRegisterValue(ctx.registers.x28),
"x29": ctx.getSymbolicRegisterValue(ctx.registers.x29),
"x30": ctx.getSymbolicRegisterValue(ctx.registers.x30),
"x30": ctx.getSymbolicRegisterValue(ctx.registers.x30),
"pc": ctx.getSymbolicRegisterValue(ctx.registers.pc),
"sp": ctx.getSymbolicRegisterValue(ctx.registers.sp),
"n": ctx.getSymbolicRegisterValue(ctx.registers.n),
"z": ctx.getSymbolicRegisterValue(ctx.registers.z),
"c": ctx.getSymbolicRegisterValue(ctx.registers.c),
"v": ctx.getSymbolicRegisterValue(ctx.registers.v),
}
return ostate
def diff_state(state1, state2):
for k, v in list(state1.items()):
if (k == 'heap' or k == 'stack') and v != state2[k]:
print('\t%s: (UC) != (TT)' %(k))
elif not (k == 'heap' or k == 'stack') and v != state2[k]:
print('\t%s: %#x (UC) != %#x (TT)' %(k, v, state2[k]))
return
if __name__ == '__main__':
# initial state
state = {
"stack": "".join([chr(255 - i) for i in range(256)]),
"heap": "".join([chr(i) for i in range(256)]),
"x0": 0,
"x1": 0,
"x2": 0,
"x3": 0,
"x4": 0,
"x5": 0,
"x6": 0,
"x7": 0,
"x8": 0,
"x9": 0,
"x10": 0,
"x11": 0,
"x12": 0,
"x13": 0,
"x14": 0,
"x15": 0,
"x16": 0,
"x17": 0,
"x18": 0,
"x19": 0,
"x20": 0,
"x21": 0,
"x22": 0,
"x23": 0,
"x24": 0,
"x25": 0,
"x26": 0,
"x27": 0,
"x28": 0,
"x29": 0,
"x30": 0,
"x30": 0,
"pc": ADDR,
"sp": STACK,
"n": 0,
"z": 0,
"c": 0,
"v": 0,
}
for opcode, disassembly in CODE:
try:
uc_state = emu_with_unicorn(opcode, state)
tt_state = emu_with_triton(opcode, state)
except Exception as e:
print('[KO] %s' %(disassembly))
print('\t%s' %(e))
sys.exit(-1)
if uc_state != tt_state:
print('[KO] %s' %(disassembly))
diff_state(uc_state, tt_state)
sys.exit(-1)
print('[OK] %s' %(disassembly))
state = tt_state
sys.exit(0)
| 42.577165
| 114
| 0.508482
|
int
from triton import *
from unicorn import *
from unicorn.arm64_const import *
ADDR = 0x100000
STACK = 0x200000
HEAP = 0x300000
SIZE = 5 * 1024 * 1024
CODE = [
("\x80\x46\x82\xd2", "movz x0, #0x1234"),
("\x80\x46\xa2\xd2", "movz x0, #0x1234, lsl #16"),
("\x80\x46\xc2\xd2", "movz x0, #0x1234, lsl #32"),
("\x80\x46\xe2\xd2", "movz x0, #0x1234, lsl #48"),
("\x21\x64\x88\xd2", "movz x1, #0x4321"),
("\x21\x64\xa8\xd2", "movz x1, #0x4321, lsl #16"),
("\x21\x64\xc8\xd2", "movz x1, #0x4321, lsl #32"),
("\x21\x64\xe8\xd2", "movz x1, #0x4321, lsl #48"),
("\x21\x64\xe8\xd2", "movz x1, #0x4321, lsl #48"),
("\x21\x64\xc8\xd2", "movz x1, #0x4321, lsl #32"),
("\x21\x64\xa8\xd2", "movz x1, #0x4321, lsl #16"),
("\x21\x64\x88\xf2", "movk x1, #0x4321"),
("\x81\x46\xa2\xf2", "movk x1, #0x1234, lsl #16"),
("\x81\x04\xcf\xf2", "movk x1, #0x7824, lsl #32"),
("\x61\x8a\xf2\xf2", "movk x1, #0x9453, lsl #48"),
("\xe0\xcc\x8c\x52", "movz w0, #0x6667"),
("\xc0\xcc\xac\x72", "movk w0, #0x6666, lsl #16"),
("\x1f\x20\x03\xd5", "nop"),
("\x1f\x20\x03\xd5", "nop"),
("\x1f\x20\x03\xd5", "nop"),
("\x60\x00\x02\x8b", "add x0, x3, x2"),
("\x20\x00\x02\x8b", "add x0, x1, x2"),
("\x80\x46\xa2\xd2", "movz x0, #0x1234, lsl #16"),
("\x00\x00\x00\x8b", "add x0, x0, x0"),
("\x60\xc0\x22\x8b", "add x0, x3, w2, sxtw"),
("\x82\x46\x82\xd2", "movz x2, #0x1234"),
("\x01\xcf\x8a\xd2", "movz x1, #0x5678"),
("\x20\x80\x22\x8b", "add x0, x1, w2, sxtb"),
("\x20\xa0\x22\x8b", "add x0, x1, w2, sxth"),
("\x20\xc0\x22\x8b", "add x0, x1, w2, sxtw"),
("\x20\xe0\x22\x8b", "add x0, x1, x2, sxtx"),
("\x20\x00\x02\x8b", "add x0, x1, x2, lsl #0"),
("\x20\x04\x02\x8b", "add x0, x1, x2, lsl #1"),
("\x20\x20\x02\x8b", "add x0, x1, x2, lsl #8"),
("\x20\x40\x02\x8b", "add x0, x1, x2, lsl #16"),
("\x20\x80\x02\x8b", "add x0, x1, x2, lsl #32"),
("\x20\x84\x02\x8b", "add x0, x1, x2, lsl #33"),
("\x20\x88\x02\x8b", "add x0, x1, x2, lsl #34"),
("\x20\x00\x42\x8b", "add x0, x1, x2, lsr #0"),
("\x20\x04\x42\x8b", "add x0, x1, x2, lsr #1"),
("\x20\x20\x42\x8b", "add x0, x1, x2, lsr #8"),
("\x20\x40\x42\x8b", "add x0, x1, x2, lsr #16"),
("\x20\x80\x42\x8b", "add x0, x1, x2, lsr #32"),
("\x20\x84\x42\x8b", "add x0, x1, x2, lsr #33"),
("\x20\x88\x42\x8b", "add x0, x1, x2, lsr #34"),
("\x20\x20\x82\x8b", "add x0, x1, x2, asr #8"),
("\x20\x40\x82\x8b", "add x0, x1, x2, asr #16"),
("\x20\x80\x82\x8b", "add x0, x1, x2, asr #32"),
("\x20\x84\x82\x8b", "add x0, x1, x2, asr #33"),
("\x20\x88\x82\x8b", "add x0, x1, x2, asr #34"),
("\x20\x88\x82\x8b", "add x0, x1, x2, asr #34"),
("\x20\x88\x19\x91", "add x0, x1, #1634"),
("\x20\x58\x21\x91", "add x0, x1, #2134"),
("\x20\x58\x61\x91", "add x0, x1, #2134, lsl #12"),
("\x3f\x60\x22\x8b", "add sp, x1, x2"),
("\x60\x00\x02\xab", "adds x0, x3, x2"),
("\x20\x00\x02\xab", "adds x0, x1, x2"),
("\x80\x46\xa2\xd2", "movz x0, #0x1234, lsl #16"),
("\x00\x00\x00\xab", "adds x0, x0, x0"),
("\x60\xc0\x22\xab", "adds x0, x3, w2, sxtw"),
("\x82\x46\x82\xd2", "movz x2, #0x1234"),
("\x01\xcf\x8a\xd2", "movz x1, #0x5678"),
("\x20\x80\x22\xab", "adds x0, x1, w2, sxtb"),
("\x20\xa0\x22\xab", "adds x0, x1, w2, sxth"),
("\x20\xc0\x22\xab", "adds x0, x1, w2, sxtw"),
("\x20\xe0\x22\xab", "adds x0, x1, x2, sxtx"),
("\x20\x00\x02\xab", "adds x0, x1, x2, lsl #0"),
("\x20\x04\x02\xab", "adds x0, x1, x2, lsl #1"),
("\x20\x20\x02\xab", "adds x0, x1, x2, lsl #8"),
("\x20\x40\x02\xab", "adds x0, x1, x2, lsl #16"),
("\x20\x80\x02\xab", "adds x0, x1, x2, lsl #32"),
("\x20\x84\x02\xab", "adds x0, x1, x2, lsl #33"),
("\x20\x88\x02\xab", "adds x0, x1, x2, lsl #34"),
("\x20\x00\x42\xab", "adds x0, x1, x2, lsr #0"),
("\x20\x04\x42\xab", "adds x0, x1, x2, lsr #1"),
("\x20\x20\x42\xab", "adds x0, x1, x2, lsr #8"),
("\x20\x40\x42\xab", "adds x0, x1, x2, lsr #16"),
("\x20\x80\x42\xab", "adds x0, x1, x2, lsr #32"),
("\x20\x84\x42\xab", "adds x0, x1, x2, lsr #33"),
("\x20\x88\x42\xab", "adds x0, x1, x2, lsr #34"),
("\x20\x20\x82\xab", "adds x0, x1, x2, asr #8"),
("\x20\x40\x82\xab", "adds x0, x1, x2, asr #16"),
("\x20\x80\x82\xab", "adds x0, x1, x2, asr #32"),
("\x20\x84\x82\xab", "adds x0, x1, x2, asr #33"),
("\x20\x88\x82\xab", "adds x0, x1, x2, asr #34"),
("\x20\x88\x82\xab", "adds x0, x1, x2, asr #34"),
("\x20\x88\x19\xb1", "adds x0, x1, #1634"),
("\x20\x58\x21\xb1", "adds x0, x1, #2134"),
("\x20\x58\x61\xb1", "adds x0, x1, #2134, lsl #12"),
("\x00\x00\x00\xab", "adds x0, x0, x0"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x00\x04\x00\xb1", "adds x0, x0, #1"),
("\x20\x00\x02\x9a", "adc x0, x1, x2"),
("\x20\x00\x02\x1a", "adc w0, w1, w2"),
("\x20\x1a\x09\x30", "adr x0, #0x12345"),
("\xe1\xff\x7f\x70", "adr x1, #0xfffff"),
("\xc1\x7c\x00\xd0", "adrp x1, #0xf9a000"),
("\x41\x0c\x00\xf0", "adrp x1, #0x18b000"),
("\xe1\xff\x9f\xd2", "movz x1, #0xffff"),
("\x22\x00\x80\xd2", "movz x2, #0x1"),
("\x20\x1c\x40\x92", "and x0, x1, #0xff"),
("\x20\x00\x40\x92", "and x0, x1, #0x01"),
("\x20\x00\x7c\x92", "and x0, x1, #0x10"),
("\x20\x00\x02\x8a", "and x0, x1, x2"),
("\x20\x04\x02\x8a", "and x0, x1, x2, lsl #1"),
("\x20\x08\x02\x8a", "and x0, x1, x2, lsl #2"),
("\x20\x0c\x02\x8a", "and x0, x1, x2, lsl #3"),
("\x20\x10\x02\x8a", "and x0, x1, x2, lsl #4"),
("\x20\x1c\x40\xf2", "ands x0, x1, #0xff"),
("\x20\x00\x40\xf2", "ands x0, x1, #0x01"),
("\x20\x00\x7c\xf2", "ands x0, x1, #0x10"),
("\x20\x00\x02\xea", "ands x0, x1, x2"),
("\x20\x04\x02\xea", "ands x0, x1, x2, lsl #1"),
("\x20\x08\x02\xea", "ands x0, x1, x2, lsl #2"),
("\x20\x0c\x02\xea", "ands x0, x1, x2, lsl #3"),
("\x20\x10\x02\xea", "ands x0, x1, x2, lsl #4"),
("\x3f\x1c\x40\xf2", "tst x1, #0xff"),
("\x3f\x00\x40\xf2", "tst x1, #0x01"),
("\x3f\x00\x7c\xf2", "tst x1, #0x10"),
("\x3f\x00\x02\xea", "tst x1, x2"),
("\x3f\x04\x02\xea", "tst x1, x2, lsl #1"),
("\x3f\x08\x02\xea", "tst x1, x2, lsl #2"),
("\x3f\x0c\x02\xea", "tst x1, x2, lsl #3"),
("\x3f\x10\x02\xea", "tst x1, x2, lsl #4"),
("\x20\xfc\x41\x93", "asr x0, x1, #1"),
("\x20\xfc\x42\x93", "asr x0, x1, #2"),
("\x20\xfc\x43\x93", "asr x0, x1, #3"),
("\x20\xfc\x44\x93", "asr x0, x1, #4"),
("\x20\xfc\x44\x93", "asr x0, x1, #4"),
("\x20\xfc\x7f\x93", "asr x0, x1, #63"),
("\xe1\xff\x9f\xd2", "movz x1, #0xffff"),
("\x22\x00\x80\xd2", "movz x2, #0x1"),
("\x20\x28\xc2\x9a", "asr x0, x1, x2"),
("\x42\x00\x80\xd2", "movz x2, #0x2"),
("\x20\x28\xc2\x9a", "asr x0, x1, x2"),
("\x82\x46\x82\xd2", "movz x2, #0x1234"),
("\x01\xcf\x8a\xd2", "movz x1, #0x5678"),
("\x20\x80\x22\xcb", "sub x0, x1, w2, sxtb"),
("\x20\xa0\x22\xcb", "sub x0, x1, w2, sxth"),
("\x20\xc0\x22\xcb", "sub x0, x1, w2, sxtw"),
("\x20\xe0\x22\xcb", "sub x0, x1, x2, sxtx"),
("\x20\x00\x02\xcb", "sub x0, x1, x2, lsl #0"),
("\x20\x04\x02\xcb", "sub x0, x1, x2, lsl #1"),
("\x20\x20\x02\xcb", "sub x0, x1, x2, lsl #8"),
("\x20\x40\x02\xcb", "sub x0, x1, x2, lsl #16"),
("\x20\x80\x02\xcb", "sub x0, x1, x2, lsl #32"),
("\x20\x84\x02\xcb", "sub x0, x1, x2, lsl #33"),
("\x20\x88\x02\xcb", "sub x0, x1, x2, lsl #34"),
("\x20\x00\x42\xcb", "sub x0, x1, x2, lsr #0"),
("\x20\x04\x42\xcb", "sub x0, x1, x2, lsr #1"),
("\x20\x20\x42\xcb", "sub x0, x1, x2, lsr #8"),
("\x20\x40\x42\xcb", "sub x0, x1, x2, lsr #16"),
("\x20\x80\x42\xcb", "sub x0, x1, x2, lsr #32"),
("\x20\x84\x42\xcb", "sub x0, x1, x2, lsr #33"),
("\x20\x88\x42\xcb", "sub x0, x1, x2, lsr #34"),
("\x20\x20\x82\xcb", "sub x0, x1, x2, asr #8"),
("\x20\x40\x82\xcb", "sub x0, x1, x2, asr #16"),
("\x20\x80\x82\xcb", "sub x0, x1, x2, asr #32"),
("\x20\x84\x82\xcb", "sub x0, x1, x2, asr #33"),
("\x20\x88\x82\xcb", "sub x0, x1, x2, asr #34"),
("\x20\x88\x82\xcb", "sub x0, x1, x2, asr #34"),
("\x20\x88\x19\xd1", "sub x0, x1, #1634"),
("\x20\x58\x21\xd1", "sub x0, x1, #2134"),
("\x20\x58\x61\xd1", "sub x0, x1, #2134, lsl #12"),
("\x82\x46\x82\xd2", "movz x2, #0x1234"),
("\x01\xcf\x8a\xd2", "movz x1, #0x5678"),
("\x20\x80\x22\xeb", "subs x0, x1, w2, sxtb"),
("\x20\xa0\x22\xeb", "subs x0, x1, w2, sxth"),
("\x20\xc0\x22\xeb", "subs x0, x1, w2, sxtw"),
("\x20\xe0\x22\xeb", "subs x0, x1, x2, sxtx"),
("\x20\x00\x02\xeb", "subs x0, x1, x2, lsl #0"),
("\x20\x04\x02\xeb", "subs x0, x1, x2, lsl #1"),
("\x20\x20\x02\xeb", "subs x0, x1, x2, lsl #8"),
("\x20\x40\x02\xeb", "subs x0, x1, x2, lsl #16"),
("\x20\x80\x02\xeb", "subs x0, x1, x2, lsl #32"),
("\x20\x84\x02\xeb", "subs x0, x1, x2, lsl #33"),
("\x20\x88\x02\xeb", "subs x0, x1, x2, lsl #34"),
("\x20\x00\x42\xeb", "subs x0, x1, x2, lsr #0"),
("\x20\x04\x42\xeb", "subs x0, x1, x2, lsr #1"),
("\x20\x20\x42\xeb", "subs x0, x1, x2, lsr #8"),
("\x20\x40\x42\xeb", "subs x0, x1, x2, lsr #16"),
("\x20\x80\x42\xeb", "subs x0, x1, x2, lsr #32"),
("\x20\x84\x42\xeb", "subs x0, x1, x2, lsr #33"),
("\x20\x88\x42\xeb", "subs x0, x1, x2, lsr #34"),
("\x20\x20\x82\xeb", "subs x0, x1, x2, asr #8"),
("\x20\x40\x82\xeb", "subs x0, x1, x2, asr #16"),
("\x20\x80\x82\xeb", "subs x0, x1, x2, asr #32"),
("\x20\x84\x82\xeb", "subs x0, x1, x2, asr #33"),
("\x20\x88\x82\xeb", "subs x0, x1, x2, asr #34"),
("\x20\x88\x82\xeb", "subs x0, x1, x2, asr #34"),
("\x20\x88\x19\xf1", "subs x0, x1, #1634"),
("\x20\x58\x21\xf1", "subs x0, x1, #2134"),
("\x20\x58\x61\xf1", "subs x0, x1, #2134, lsl #12"),
("\x20\x00\x02\xca", "eor x0, x1, x2, lsl #0"),
("\x20\x04\x02\xca", "eor x0, x1, x2, lsl #1"),
("\x20\x20\x02\xca", "eor x0, x1, x2, lsl #8"),
("\x20\x40\x02\xca", "eor x0, x1, x2, lsl #16"),
("\x20\x80\x02\xca", "eor x0, x1, x2, lsl #32"),
("\x20\x84\x02\xca", "eor x0, x1, x2, lsl #33"),
("\x20\x88\x02\xca", "eor x0, x1, x2, lsl #34"),
("\x20\x00\x42\xca", "eor x0, x1, x2, lsr #0"),
("\x20\x04\x42\xca", "eor x0, x1, x2, lsr #1"),
("\x20\x20\x42\xca", "eor x0, x1, x2, lsr #8"),
("\x20\x40\x42\xca", "eor x0, x1, x2, lsr #16"),
("\x20\x80\x42\xca", "eor x0, x1, x2, lsr #32"),
("\x20\x84\x42\xca", "eor x0, x1, x2, lsr #33"),
("\x20\x88\x42\xca", "eor x0, x1, x2, lsr #34"),
("\x20\x20\x82\xca", "eor x0, x1, x2, asr #8"),
("\x20\x40\x82\xca", "eor x0, x1, x2, asr #16"),
("\x20\x80\x82\xca", "eor x0, x1, x2, asr #32"),
("\x20\x84\x82\xca", "eor x0, x1, x2, asr #33"),
("\x20\x88\x82\xca", "eor x0, x1, x2, asr #34"),
("\x20\x88\x82\xca", "eor x0, x1, x2, asr #34"),
("\x20\x1c\x40\xd2", "eor x0, x1, #255"),
("\x20\x18\x40\xd2", "eor x0, x1, #0x7f"),
("\x20\x00\x40\xd2", "eor x0, x1, #1"),
("\x20\x00\x22\xca", "eon x0, x1, x2, lsl #0"),
("\x20\x04\x22\xca", "eon x0, x1, x2, lsl #1"),
("\x20\x20\x22\xca", "eon x0, x1, x2, lsl #8"),
("\x20\x40\x22\xca", "eon x0, x1, x2, lsl #16"),
("\x20\x80\x22\xca", "eon x0, x1, x2, lsl #32"),
("\x20\x84\x22\xca", "eon x0, x1, x2, lsl #33"),
("\x20\x88\x22\xca", "eon x0, x1, x2, lsl #34"),
("\x20\x00\x62\xca", "eon x0, x1, x2, lsr #0"),
("\x20\x04\x62\xca", "eon x0, x1, x2, lsr #1"),
("\x20\x20\x62\xca", "eon x0, x1, x2, lsr #8"),
("\x20\x40\x62\xca", "eon x0, x1, x2, lsr #16"),
("\x20\x80\x62\xca", "eon x0, x1, x2, lsr #32"),
("\x20\x84\x62\xca", "eon x0, x1, x2, lsr #33"),
("\x20\x88\x62\xca", "eon x0, x1, x2, lsr #34"),
("\x20\x20\xa2\xca", "eon x0, x1, x2, asr #8"),
("\x20\x40\xa2\xca", "eon x0, x1, x2, asr #16"),
("\x20\x80\xa2\xca", "eon x0, x1, x2, asr #32"),
("\x20\x84\xa2\xca", "eon x0, x1, x2, asr #33"),
("\x20\x88\xa2\xca", "eon x0, x1, x2, asr #34"),
("\x20\x88\xa2\xca", "eon x0, x1, x2, asr #34"),
("\x82\x46\x82\xd2", "movz x2, #0x1234"),
("\x01\xcf\x8a\xd2", "movz x1, #0x5678"),
("\x20\x00\x22\xaa", "orn x0, x1, x2"),
("\x40\x00\x21\xaa", "orn x0, x2, x1"),
("\x41\x00\x20\xaa", "orn x1, x2, x0"),
("\x01\x00\x22\xaa", "orn x1, x0, x2"),
("\x20\x04\x22\xaa", "orn x0, x1, x2, lsl #1"),
("\x20\x08\x22\xaa", "orn x0, x1, x2, lsl #2"),
("\x20\x0c\x22\xaa", "orn x0, x1, x2, lsl #3"),
("\x20\x04\xe2\xaa", "orn x0, x1, x2, ror #1"),
("\x20\x08\xe2\xaa", "orn x0, x1, x2, ror #2"),
("\x20\x0c\xe2\xaa", "orn x0, x1, x2, ror #3"),
("\x82\x46\x82\xd2", "movz x2, #0x1234"),
("\x01\xcf\x8a\xd2", "movz x1, #0x5678"),
("\x20\x00\x02\xaa", "orr x0, x1, x2"),
("\x40\x00\x01\xaa", "orr x0, x2, x1"),
("\x41\x00\x00\xaa", "orr x1, x2, x0"),
("\x01\x00\x02\xaa", "orr x1, x0, x2"),
("\x20\x04\x02\xaa", "orr x0, x1, x2, lsl #1"),
("\x20\x08\x02\xaa", "orr x0, x1, x2, lsl #2"),
("\x20\x0c\x02\xaa", "orr x0, x1, x2, lsl #3"),
("\x20\x04\xc2\xaa", "orr x0, x1, x2, ror #1"),
("\x20\x08\xc2\xaa", "orr x0, x1, x2, ror #2"),
("\x20\x0c\xc2\xaa", "orr x0, x1, x2, ror #3"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\x00\x40\xf9", "ldr x5, [x1]"),
("\x26\x04\x40\xf8", "ldr x6, [x1], #0"),
("\x27\x44\x40\xf8", "ldr x7, [x1], #4"),
("\x28\x68\x62\xf8", "ldr x8, [x1, x2]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x21\xc8\x00\x91", "add x1, x1, #50"),
("\x29\x24\x5e\xf8", "ldr x9, [x1], #-30"),
("\x2a\x8c\x40\xf8", "ldr x10, [x1, #8]!"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\x03\x40\xf9", "ldr x11, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\x00\x40\x39", "ldrb w5, [x1]"),
("\x26\x04\x40\x38", "ldrb w6, [x1], #0"),
("\x27\x44\x40\x38", "ldrb w7, [x1], #4"),
("\x28\x68\x62\x38", "ldrb w8, [x1, x2]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x21\xc8\x00\x91", "add x1, x1, #50"),
("\x29\x24\x5e\x38", "ldrb w9, [x1], #-30"),
("\x2a\x8c\x40\x38", "ldrb w10, [x1, #8]!"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\x03\x40\x39", "ldrb w11, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\x00\x40\x79", "ldrh w5, [x1]"),
("\x26\x04\x40\x78", "ldrh w6, [x1], #0"),
("\x27\x44\x40\x78", "ldrh w7, [x1], #4"),
("\x28\x68\x62\x78", "ldrh w8, [x1, x2]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x21\xc8\x00\x91", "add x1, x1, #50"),
("\x29\x24\x5e\x78", "ldrh w9, [x1], #-30"),
("\x2a\x8c\x40\x78", "ldrh w10, [x1, #8]!"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\x03\x40\x79", "ldrh w11, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x24\x14\x40\xa9", "ldp x4, x5, [x1]"),
("\x25\x18\xc0\xa8", "ldp x5, x6, [x1], #0"),
("\x26\x9c\xc0\xa8", "ldp x6, x7, [x1], #8"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x21\xc8\x00\x91", "add x1, x1, #50"),
("\x28\x24\xfe\xa8", "ldp x8, x9, [x1], #-32"),
("\x29\x28\xc1\xa9", "ldp x9, x10, [x1, #16]!"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xea\x2f\x40\xa9", "ldp x10, x11, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x24\x14\x40\x29", "ldp w4, w5, [x1]"),
("\x25\x18\xc0\x28", "ldp w5, w6, [x1], #0"),
("\x26\x1c\xc1\x28", "ldp w6, w7, [x1], #8"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x21\xc8\x00\x91", "add x1, x1, #50"),
("\x28\x24\xfc\x28", "ldp w8, w9, [x1], #-32"),
("\x29\x28\xc2\x29", "ldp w9, w10, [x1, #16]!"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xea\x2f\x40\x29", "ldp w10, w11, [sp]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x21\x30\x00\x91", "add x1, x1, #12"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x10\x40\xf8", "ldur x0, [x1, #1]"),
("\x20\x20\x40\xf8", "ldur x0, [x1, #2]"),
("\x20\x30\x40\xf8", "ldur x0, [x1, #3]"),
("\x20\x40\x40\xf8", "ldur x0, [x1, #4]"),
("\x20\xf0\x5f\xf8", "ldur x0, [x1, #-1]"),
("\x20\xe0\x5f\xf8", "ldur x0, [x1, #-2]"),
("\x20\xd0\x5f\xf8", "ldur x0, [x1, #-3]"),
("\x20\xc0\x5f\xf8", "ldur x0, [x1, #-4]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x40\x38", "ldurb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x10\x40\x38", "ldurb w0, [x1, #1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x20\x40\x38", "ldurb w0, [x1, #2]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x30\x40\x38", "ldurb w0, [x1, #3]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x40\x40\x38", "ldurb w0, [x1, #4]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\xf0\x5f\x38", "ldurb w0, [x1, #0xffffffffffffffff]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\xe0\x5f\x38", "ldurb w0, [x1, #0xfffffffffffffffe]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\xd0\x5f\x38", "ldurb w0, [x1, #0xfffffffffffffffd]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\xc0\x5f\x38", "ldurb w0, [x1, #0xfffffffffffffffc]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x40\x78", "ldurh w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x10\x40\x78", "ldurh w0, [x1, #1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x20\x40\x78", "ldurh w0, [x1, #2]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x30\x40\x78", "ldurh w0, [x1, #3]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x40\x40\x78", "ldurh w0, [x1, #4]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\xf0\x5f\x78", "ldurh w0, [x1, #0xffffffffffffffff]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\xe0\x5f\x78", "ldurh w0, [x1, #0xfffffffffffffffe]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\xd0\x5f\x78", "ldurh w0, [x1, #0xfffffffffffffffd]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\xc0\x5f\x78", "ldurh w0, [x1, #0xfffffffffffffffc]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x21\x30\x00\x91", "add x1, x1, #12"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x38", "ldursb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\x38", "ldursb x0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x38", "ldursb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x78", "ldursh w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\x78", "ldursh x0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\xb8", "ldursw x0, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x38", "ldursb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\x38", "ldursb x0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x38", "ldursb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x78", "ldursh w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\x78", "ldursh x0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\xb8", "ldursw x0, [x1]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x21\x30\x00\x91", "add x1, x1, #12"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x39", "ldrsb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\x39", "ldrsb x0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x39", "ldrsb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x79", "ldrsh w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\x79", "ldrsh x0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\xb9", "ldrsw x0, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x39", "ldrsb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\x39", "ldrsb x0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x39", "ldrsb w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\xc0\x79", "ldrsh w0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\x79", "ldrsh x0, [x1]"),
("\x20\x00\x40\xf8", "ldur x0, [x1]"),
("\x20\x00\x80\xb9", "ldrsw x0, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x02\x06\xa0\xd2", "movz x2, #0x30, lsl #16"),
("\x42\x78\x00\x91", "add x2, x2, #30"),
("\x23\x00\x40\xf8", "ldur x3, [x1]"),
("\x44\x00\x40\xf8", "ldur x4, [x2]"),
("\x60\x00\xc4\x93", "extr x0, x3, x4, #0"),
("\x60\x04\xc4\x93", "extr x0, x3, x4, #1"),
("\x60\x08\xc4\x93", "extr x0, x3, x4, #2"),
("\x60\x0c\xc4\x93", "extr x0, x3, x4, #2"),
("\x60\x78\xc4\x93", "extr x0, x3, x4, #30"),
("\x60\xfc\xc4\x93", "extr x0, x3, x4, #63"),
("\x60\x00\x84\x13", "extr w0, w3, w4, #0"),
("\x60\x04\x84\x13", "extr w0, w3, w4, #1"),
("\x60\x08\x84\x13", "extr w0, w3, w4, #2"),
("\x60\x0c\x84\x13", "extr w0, w3, w4, #3"),
("\x60\x7c\x84\x13", "extr w0, w3, w4, #31"),
("\x01\x00\x00\x14", "b #4"),
\x94" ,"bl #4"),
("\x80\x0c\x90\xb7", "tbnz x0, #0x32, #0x190"),
("\x20\x00\x90\xb6", "tbz x0, #0x32, #4"),
("\x01\x00\x80\xd2", "movz x1, #0"),
("\x02\x06\xa0\xd2", "movz x2, #0x20, lsl #16"),
("\xe1\x03\x02\xaa", "mov x1, x2"),
("\x3f\x00\x00\x91", "mov sp, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xe0\x03\x21\xaa", "mvn x0, x1"),
("\xe0\x03\x01\xcb", "neg x0, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x63\xa0\x84\xd2", "movz x3, #9475"),
("\x20\x0c\x02\x9b", "madd x0, x1, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x00\x0c\x02\x9b", "madd x0, x0, x2, x3"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\x7c\x02\x9b", "mul x0, x1, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x00\x7c\x02\x9b", "mul x0, x0, x2"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x63\xa0\x84\xd2", "movz x3, #9475"),
("\x20\x8c\x02\x9b", "msub x0, x1, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x00\x8c\x02\x9b", "msub x0, x0, x2, x3"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\xfc\x02\x9b", "mneg x0, x1, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\x00\xfc\x02\x9b", "mneg x0, x0, x2"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x00\xfc\x02\x1b", "mneg w0, w0, w2"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x63\xa0\x84\xd2", "movz x3, #9475"),
("\x64\xa0\x84\xd2", "movz x4, #9475"),
("\xe5\x24\x81\xd2", "movz x5, #2343"),
("\xa6\xaf\x81\xd2", "movz x6, #3453"),
("\x87\x3a\x82\xd2", "movz x7, #4564"),
("\xe8\x16\x84\xd2", "movz x8, #8375"),
("\xe9\xc1\x84\xd2", "movz x9, #9743"),
("\xea\xaa\x82\xd2", "movz x10, #5463"),
("\x2b\xf8\x80\xd2", "movz x11, #1985"),
("\x25\x00\x00\xf9", "str x5, [x1]"),
("\x26\x04\x00\xf8", "str x6, [x1], #0"),
("\x27\x44\x00\xf8", "str x7, [x1], #4"),
("\x28\x68\x22\xf8", "str x8, [x1, x2]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x21\xc8\x00\x91", "add x1, x1, #50"),
("\x29\x24\x1e\xf8", "str x9, [x1], #-30"),
("\x2a\x8c\x00\xf8", "str x10, [x1, #8]!"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\x03\x00\xf9", "str x11, [sp]"),
("\x25\x00\x00\xf8", "stur x5, [x1]"),
("\x26\x00\x00\x38", "sturb w6, [x1]"),
("\x27\x00\x00\x78", "sturh w7, [x1]"),
("\x29\x00\x00\xf9", "str x9, [x1]"),
("\x2a\x00\x00\x39", "strb w10, [x1]"),
("\x2b\x00\x00\x79", "strh w11, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\xe5\x24\x81\xd2", "movz x5, #2343"),
("\xa6\xaf\x81\xd2", "movz x6, #3453"),
("\x87\x3a\x82\xd2", "movz x7, #4564"),
("\xe8\x16\x84\xd2", "movz x8, #8375"),
("\xe9\xc1\x84\xd2", "movz x9, #9743"),
("\xea\xaa\x82\xd2", "movz x10, #5463"),
("\x25\x18\x00\xa9", "stp x5, x6, [x1]"),
("\x27\x20\x80\xa8", "stp x7, x8, [x1], #0"),
("\x29\xa8\x80\xa8", "stp x9, x10, [x1], #8"),
("\x25\x20\x82\xa9", "stp x5, x8, [x1, #32]!"),
("\x26\x1c\x01\xa9", "stp x6, x7, [x1, #16]"),
("\x25\x18\x00\x29", "stp w5, w6, [x1]"),
("\x27\x20\x80\x28", "stp w7, w8, [x1], #0"),
("\x29\x28\x81\x28", "stp w9, w10, [x1], #8"),
("\x25\x20\x84\x29", "stp w5, w8, [x1, #32]!"),
("\x26\x1c\x02\x29", "stp w6, w7, [x1, #16]"),
("\xc1\xbd\x9b\xd2", "movz x1, #0xddee"),
("\x20\x1c\x40\x93", "sxtb x0, x1"),
("\x20\x3c\x40\x93", "sxth x0, x1"),
("\x20\x7c\x40\x93", "sxtw x0, x1"),
("\x20\x1c\x00\x53", "uxtb w0, w1"),
("\x20\x3c\x00\x53", "uxth w0, w1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\x00\x82\x9a", "csel x0, x1, x2, eq"),
("\x40\x00\x81\x9a", "csel x0, x2, x1, eq"),
("\x20\x10\x82\x9a", "csel x0, x1, x2, ne"),
("\x40\x10\x81\x9a", "csel x0, x2, x1, ne"),
("\x20\x04\x82\x9a", "csinc x0, x1, x2, eq"),
("\x40\x04\x81\x9a", "csinc x0, x2, x1, eq"),
("\x20\x14\x82\x9a", "csinc x0, x1, x2, ne"),
("\x40\x14\x81\x9a", "csinc x0, x2, x1, ne"),
("\x20\x04\x82\xda", "csneg x0, x1, x2, eq"),
("\x40\x04\x81\xda", "csneg x0, x2, x1, eq"),
("\x20\x14\x82\xda", "csneg x0, x1, x2, ne"),
("\x40\x14\x81\xda", "csneg x0, x2, x1, ne"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\x20\xf8\x7f\xd3", "lsl x0, x1, #1"),
("\x20\xf4\x7e\xd3", "lsl x0, x1, #2"),
("\x20\xf0\x7d\xd3", "lsl x0, x1, #3"),
("\x20\xec\x7c\xd3", "lsl x0, x1, #4"),
("\x20\xfc\x41\xd3", "lsr x0, x1, #1"),
("\x20\xfc\x42\xd3", "lsr x0, x1, #2"),
("\x20\xfc\x43\xd3", "lsr x0, x1, #3"),
("\x20\xfc\x44\xd3", "lsr x0, x1, #4"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\x20\xc2\x9a", "lsl x0, x1, x2"),
("\x20\x24\xc2\x9a", "lsr x0, x1, x2"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x3f\x00\x02\xeb", "cmp x1, x2"),
("\x5f\x00\x01\xeb", "cmp x2, x1"),
("\x01\x00\x80\xd2", "movz x1, #0"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x3f\x00\x02\xeb", "cmp x1, x2"),
("\x5f\x00\x01\xeb", "cmp x2, x1"),
("\x01\x00\x80\xd2", "movz x1, #0"),
("\x02\x00\x80\xd2", "movz x2, #0"),
("\x3f\x00\x02\xeb", "cmp x1, x2"),
("\x5f\x00\x01\xeb", "cmp x2, x1"),
("\xc1\x88\x83\xd2", "movz x1, #7238"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x3f\x00\x02\xeb", "cmp x1, x2"),
("\x5f\x00\x01\xeb", "cmp x2, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x3f\x00\x02\xab", "cmn x1, x2"),
("\x5f\x00\x01\xab", "cmn x2, x1"),
("\x01\x00\x80\xd2", "movz x1, #0"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x3f\x00\x02\xab", "cmn x1, x2"),
("\x5f\x00\x01\xab", "cmn x2, x1"),
("\x01\x00\x80\xd2", "movz x1, #0"),
("\x02\x00\x80\xd2", "movz x2, #0"),
("\x3f\x00\x02\xab", "cmn x1, x2"),
("\x5f\x00\x01\xab", "cmn x2, x1"),
("\xc1\x88\x83\xd2", "movz x1, #7238"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x3f\x00\x02\xab", "cmn x1, x2"),
("\x5f\x00\x01\xab", "cmn x2, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x63\xa0\x84\xd2", "movz x3, #9475"),
("\x20\x0c\xa2\x9b", "umaddl x0, w1, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x00\x0c\xa2\x9b", "umaddl x0, w0, w2, x3"),
("\x20\x8c\xa2\x9b", "umsubl x0, w1, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\x00\x8c\xa2\x9b", "umsubl x0, w0, w2, x3"),
("\xc1\xfd\xbf\xd2", "movz x1, #0xffee, lsl #16"),
("\x42\xd5\xbd\xd2", "movz x2, #0xeeaa, lsl #16"),
("\xa3\xd5\x9b\xd2", "movz x3, #0xdead"),
("\x20\x0c\x22\x9b", "smaddl x0, w1, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\x00\x0c\x22\x9b", "smaddl x0, w0, w2, x3"),
("\xc1\xfd\xbf\xd2", "movz x1, #0xffee, lsl #16"),
("\x42\xd5\xbd\xd2", "movz x2, #0xeeaa, lsl #16"),
("\xa3\xd5\x9b\xd2", "movz x3, #0xdead"),
("\x20\x8c\x22\x9b", "smsubl x0, w1, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\x00\x8c\x22\x9b", "smsubl x0, w0, w2, x3"),
("\xc1\xfd\xbf\xd2", "movz x1, #0xffee, lsl #16"),
("\x42\xd5\xbd\xd2", "movz x2, #0xeeaa, lsl #16"),
("\x20\x7c\x22\x9b", "smull x0, w1, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\x00\x7c\x22\x9b", "smull x0, w0, w2"),
("\xc1\xfd\xbf\xd2", "movz x1, #0xffee, lsl #16"),
("\x42\xd5\xbd\xd2", "movz x2, #0xeeaa, lsl #16"),
("\x20\x7c\x42\x9b", "smulh x0, x1, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x00\x7c\x42\x9b", "smulh x0, x0, x2"),
("\x01\x06\xa0\x92", "movn x1, #0x30, lsl #16"),
("\x02\x02\x80\x92", "movn x2, #16"),
("\x63\xa0\x84\x92", "movn x3, #9475"),
("\x64\xa0\x84\x92", "movn x4, #9475"),
("\xe5\x24\x81\x92", "movn x5, #2343"),
("\xa6\xaf\x81\x92", "movn x6, #3453"),
("\x87\x3a\x82\x92", "movn x7, #4564"),
("\xe8\x16\x84\x92", "movn x8, #8375"),
("\xe9\xc1\x84\x92", "movn x9, #9743"),
("\xea\xaa\x82\x92", "movn x10, #5463"),
("\x2b\xf8\x80\x92", "movn x11, #1985"),
("\xc1\xfd\xff\xd2", "movz x1, #0xffee, lsl #48"),
("\x81\xb9\xdb\xf2", "movk x1, #0xddcc, lsl #32"),
("\x41\x75\xb7\xf2", "movk x1, #0xbbaa, lsl #16"),
("\x01\x31\x93\xf2", "movk x1, #0x9988"),
("\x20\x00\x40\xd3", "ubfx x0, x1, #0, #1"),
("\x20\x08\x40\xd3", "ubfx x0, x1, #0, #3"),
("\x20\x0c\x40\xd3", "ubfx x0, x1, #0, #4"),
("\x20\x10\x40\xd3", "ubfx x0, x1, #0, #5"),
("\x20\x78\x40\xd3", "ubfx x0, x1, #0, #31"),
("\x20\xf8\x40\xd3", "ubfx x0, x1, #0, #63"),
("\x20\xfc\x40\xd3", "ubfx x0, x1, #0, #64"),
("\x20\xfc\x41\xd3", "ubfx x0, x1, #1, #63"),
("\x20\xfc\x42\xd3", "ubfx x0, x1, #2, #62"),
("\x20\xfc\x43\xd3", "ubfx x0, x1, #3, #61"),
("\x20\xfc\x60\xd3", "ubfx x0, x1, #32, #32"),
("\x20\x4c\x4a\xd3", "ubfx x0, x1, #10, #10"),
("\xc1\xfd\xff\xd2", "movz x1, #0xffee, lsl #48"),
("\x81\xb9\xdb\xf2", "movk x1, #0xddcc, lsl #32"),
("\x41\x75\xb7\xf2", "movk x1, #0xbbaa, lsl #16"),
("\x01\x31\x93\xf2", "movk x1, #0x9988"),
("\x20\x00\x40\x93", "sbfx x0, x1, #0, #1"),
("\x20\x08\x40\x93", "sbfx x0, x1, #0, #3"),
("\x20\x0c\x40\x93", "sbfx x0, x1, #0, #4"),
("\x20\x10\x40\x93", "sbfx x0, x1, #0, #5"),
("\x20\x78\x40\x93", "sbfx x0, x1, #0, #31"),
("\x20\xf8\x40\x93", "sbfx x0, x1, #0, #63"),
("\x20\xfc\x40\x93", "sbfx x0, x1, #0, #64"),
("\x20\xfc\x41\x93", "sbfx x0, x1, #1, #63"),
("\x20\xfc\x42\x93", "sbfx x0, x1, #2, #62"),
("\x20\xfc\x43\x93", "sbfx x0, x1, #3, #61"),
("\x20\xfc\x60\x93", "sbfx x0, x1, #32, #32"),
("\x20\x4c\x4a\x93", "sbfx x0, x1, #10, #10"),
("\x20\x48\x49\x93", "sbfx x0, x1, #9, #10"),
("\x20\x40\x47\x93", "sbfx x0, x1, #7, #10"),
("\x20\x3c\x47\x93", "sbfx x0, x1, #7, #9"),
("\xc1\xfd\xbf\xd2", "movz x1, #0xffee, lsl #16"),
("\x42\xd5\xbd\xd2", "movz x2, #0xeeaa, lsl #16"),
("\x20\x00\x42\xfa", "ccmp x1, x2, 0, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x21\x00\x42\xfa", "ccmp x1, x2, 1, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x22\x00\x42\xfa", "ccmp x1, x2, 2, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x23\x00\x42\xfa", "ccmp x1, x2, 3, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x24\x00\x42\xfa", "ccmp x1, x2, 4, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x25\x00\x42\xfa", "ccmp x1, x2, 5, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x26\x00\x42\xfa", "ccmp x1, x2, 6, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x27\x00\x42\xfa", "ccmp x1, x2, 7, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x28\x00\x42\xfa", "ccmp x1, x2, 8, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x29\x00\x42\xfa", "ccmp x1, x2, 9, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2a\x00\x42\xfa", "ccmp x1, x2, 10, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2b\x00\x42\xfa", "ccmp x1, x2, 11, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2c\x00\x42\xfa", "ccmp x1, x2, 12, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2d\x00\x42\xfa", "ccmp x1, x2, 13, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2e\x00\x42\xfa", "ccmp x1, x2, 14, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2f\x00\x42\xfa", "ccmp x1, x2, 15, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\xc1\xfd\xbf\xd2", "movz x1, #0xffee, lsl #16"),
("\xc2\xfd\xbf\xd2", "movz x2, #0xffee, lsl #16"),
("\x20\x00\x42\xfa", "ccmp x1, x2, 0, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x21\x00\x42\xfa", "ccmp x1, x2, 1, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x22\x00\x42\xfa", "ccmp x1, x2, 2, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x23\x00\x42\xfa", "ccmp x1, x2, 3, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x24\x00\x42\xfa", "ccmp x1, x2, 4, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x25\x00\x42\xfa", "ccmp x1, x2, 5, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x26\x00\x42\xfa", "ccmp x1, x2, 6, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x27\x00\x42\xfa", "ccmp x1, x2, 7, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x28\x00\x42\xfa", "ccmp x1, x2, 8, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x29\x00\x42\xfa", "ccmp x1, x2, 9, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2a\x00\x42\xfa", "ccmp x1, x2, 10, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2b\x00\x42\xfa", "ccmp x1, x2, 11, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2c\x00\x42\xfa", "ccmp x1, x2, 12, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2d\x00\x42\xfa", "ccmp x1, x2, 13, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2e\x00\x42\xfa", "ccmp x1, x2, 14, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x2f\x00\x42\xfa", "ccmp x1, x2, 15, eq"),
("\xe0\x17\x9f\x9a", "cset x0, eq"),
("\xe0\xb7\x9f\x9a", "cset x0, ge"),
("\xe0\xd7\x9f\x9a", "cset x0, gt"),
("\xe0\x97\x9f\x9a", "cset x0, hi"),
("\xe0\x37\x9f\x9a", "cset x0, hs"),
("\xe0\xc7\x9f\x9a", "cset x0, le"),
("\xe0\x27\x9f\x9a", "cset x0, lo"),
("\xe0\x87\x9f\x9a", "cset x0, ls"),
("\xe0\xa7\x9f\x9a", "cset x0, lt"),
("\xe0\x57\x9f\x9a", "cset x0, mi"),
("\xe0\x07\x9f\x9a", "cset x0, ne"),
("\xe0\x47\x9f\x9a", "cset x0, pl"),
("\xe0\x67\x9f\x9a", "cset x0, vc"),
("\xe0\x77\x9f\x9a", "cset x0, vs"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x41\x14\x82\x9a", "cinc x1, x2, eq"),
("\x41\x04\x82\x9a", "cinc x1, x2, ne"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\xc1\xfd\xff\xd2", "movz x1, #0xffee, lsl #48"),
("\x81\xb9\xdb\xf2", "movk x1, #0xddcc, lsl #32"),
("\x41\x75\xb7\xf2", "movk x1, #0xbbaa, lsl #16"),
("\x01\x31\x93\xf2", "movk x1, #0x9988"),
("\x20\xfc\x40\xd3", "ubfiz x0, x1, #0, #64"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\xf8\x7f\xd3", "ubfiz x0, x1, #1, #63"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\xf4\x7e\xd3", "ubfiz x0, x1, #2, #62"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\xf0\x7d\xd3", "ubfiz x0, x1, #3, #61"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\xec\x7c\xd3", "ubfiz x0, x1, #4, #60"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\xe8\x7b\xd3", "ubfiz x0, x1, #5, #59"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\xe4\x7a\xd3", "ubfiz x0, x1, #6, #58"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\xe0\x79\xd3", "ubfiz x0, x1, #7, #57"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\xdc\x78\xd3", "ubfiz x0, x1, #8, #56"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\x7c\x7a\xd3", "ubfiz x0, x1, #6, #32"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\x00\x78\xd3", "ubfiz x0, x1, #8, #1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\x00\x41\xd3", "ubfiz x0, x1, #63, #1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\x00\x18\x53", "ubfiz w0, w1, #8, #1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\x00\x01\x53", "ubfiz w0, w1, #31, #1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x00\x04\x00\xd1", "sub x0, x0, #1"),
("\x20\x7c\x00\x53", "ubfiz w0, w1, #0, #32"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\x08\xc2\x9a", "udiv x0, x1, x2"),
("\x40\x08\xc1\x9a", "udiv x0, x2, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\x02\x00\x80\xd2", "movz x2, #0"),
("\x20\x08\xc2\x9a", "udiv x0, x1, x2"),
("\x40\x08\xc1\x9a", "udiv x0, x2, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\x0c\xc2\x9a", "sdiv x0, x1, x2"),
("\x40\x0c\xc1\x9a", "sdiv x0, x2, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\x02\x00\x80\xd2", "movz x2, #0"),
("\x20\x0c\xc2\x9a", "sdiv x0, x1, x2"),
("\x40\x0c\xc1\x9a", "sdiv x0, x2, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\x7c\xa2\x9b", "umull x0, w1, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\x00\x7c\xa2\x9b", "umull x0, w0, w2"),
("\xc1\xfd\xff\xd2", "movz x1, #0xffee, lsl #48"),
("\x81\xb9\xdb\xf2", "movk x1, #0xddcc, lsl #32"),
("\x41\x75\xb7\xf2", "movk x1, #0xbbaa, lsl #16"),
("\x01\x31\x93\xf2", "movk x1, #0x9988"),
("\x20\x7c\xc1\x9b", "umulh x0, x1, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x00\x7c\xc1\x9b", "umulh x0, x0, x1"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\xfc\xa2\x9b", "umnegl x0, w1, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x00\xfc\xa2\x9b", "umnegl x0, w0, w2"),
("\x41\x9a\x80\xd2", "movz x1, #1234"),
("\xc2\x88\x83\xd2", "movz x2, #7238"),
("\x20\x2c\xc2\x9a", "ror x0, x1, x2"),
("\x40\x2c\xc1\x9a", "ror x0, x2, x1"),
("\x40\x00\xc2\x93", "ror x0, x2, #0"),
("\x40\x04\xc2\x93", "ror x0, x2, #1"),
("\x40\x08\xc2\x93", "ror x0, x2, #2"),
("\x40\x0c\xc2\x93", "ror x0, x2, #3"),
("\x40\x10\xc2\x93", "ror x0, x2, #4"),
("\x40\xf8\xc2\x93", "ror x0, x2, #62"),
("\x40\xfc\xc2\x93", "ror x0, x2, #63"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x01\x00\x80\xd2", "mov x1, #0"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x41\x00\x80\xd2", "mov x1, #1 << 1"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x81\x00\x80\xd2", "mov x1, #1 << 2"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x01\x00\x82\xd2", "mov x1, #1 << 12"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x01\x00\x82\xd2", "mov x1, #1 << 12"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x01\x00\xb0\xd2", "mov x1, #1 << 31"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x21\x00\xc0\xd2", "mov x1, #1 << 32"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x41\x00\xc0\xd2", "mov x1, #1 << 33"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x01\x00\xe8\xd2", "mov x1, #1 << 62"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x01\x00\xf0\xd2", "mov x1, #1 << 63"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x00\x00\x80\x92", "movn x0, #0"),
("\x21\x00\x80\xd2", "mov x1, #1 << 64"),
("\x20\x10\xc0\xda", "clz x0, x1"),
("\x20\x10\xc0\x5a", "clz w0, w1"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\xfc\xdf\xc8", "ldar x5, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x21\xc8\x00\x91", "add x1, x1, #50"),
("\x29\xfc\xdf\xc8", "ldar x9, [x1]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\xff\xdf\xc8", "ldar x11, [sp]"),
("\xff\xff\xdf\xc8", "ldar xzr, [sp]"),
("\xe7\xff\xdf\x88", "ldar w7, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\xfc\xdf\x08", "ldarb w5, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x21\xc8\x00\x91", "add x1, x1, #50"),
("\x29\xfc\xdf\x08", "ldarb w9, [x1]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\xff\xdf\x08", "ldarb w11, [sp]"),
("\xff\xff\xdf\x08", "ldarb wzr, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\xfc\xdf\x48", "ldarh w5, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x21\xc8\x00\x91", "add x1, x1, #50"),
("\x29\xfc\xdf\x48", "ldarh w9, [x1]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\xff\xdf\x48", "ldarh w11, [sp]"),
("\xff\xff\xdf\x48", "ldarh wzr, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\xfc\x5f\xc8", "ldaxr x5, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x21\xc8\x00\x91", "add x1, x1, #50"),
("\x29\xfc\x5f\xc8", "ldaxr x9, [x1]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\xff\x5f\xc8", "ldaxr x11, [sp]"),
("\xff\xff\x5f\xc8", "ldaxr xzr, [sp]"),
("\xe7\xff\x5f\x88", "ldaxr w7, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\xfc\x5f\x08", "ldaxrb w5, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x21\xc8\x00\x91", "add x1, x1, #50"),
("\x29\xfc\x5f\x08", "ldaxrb w9, [x1]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\xff\x5f\x08", "ldaxrb w11, [sp]"),
("\xff\xff\x5f\x08", "ldaxrb wzr, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x25\xfc\x5f\x48", "ldaxrh w5, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x21\xc8\x00\x91", "add x1, x1, #50"),
("\x29\xfc\x5f\x48", "ldaxrh w9, [x1]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\xff\x5f\x48", "ldaxrh w11, [sp]"),
("\xff\xff\x5f\x48", "ldaxrh wzr, [sp]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x02\x02\x80\xd2", "movz x2, #16"),
("\x63\xa0\x84\xd2", "movz x3, #9475"),
("\x64\xa0\x84\xd2", "movz x4, #9475"),
("\xe5\x24\x81\xd2", "movz x5, #2343"),
("\xa6\xaf\x81\xd2", "movz x6, #3453"),
("\x87\x3a\x82\xd2", "movz x7, #4564"),
("\xe8\x16\x84\xd2", "movz x8, #8375"),
("\xe9\xc1\x84\xd2", "movz x9, #9743"),
("\xea\xaa\x82\xd2", "movz x10, #5463"),
("\x2b\xf8\x80\xd2", "movz x11, #1985"),
("\x25\xfc\x9f\xc8", "stlr x5, [x1]"),
("\x01\x06\xa0\xd2", "movz x1, #0x30, lsl #16"),
("\x21\xc8\x00\x91", "add x1, x1, #50"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x3f\x10\x00\x91", "add sp, x1, #4"),
("\xeb\xff\x9f\xc8", "stlr x11, [sp]"),
("\x25\x00\x00\xf8", "stur x5, [x1]"),
("\x26\x00\x00\x38", "sturb w6, [x1]"),
("\x27\x00\x00\x78", "sturh w7, [x1]"),
("\x29\xfc\x9f\xc8", "stlr x9, [x1]"),
("\x2a\xfc\x9f\x08", "stlrb w10, [x1]"),
("\x2b\xfc\x9f\x48", "stlrh w11, [x1]"),
("\x01\x04\xa0\xd2", "movz x1, #0x20, lsl #16"),
("\x21\x30\x00\x91", "add x1, x1, #12"),
("\x20\x7c\x5f\xc8", "ldxr x0, [x1]"),
("\x21\x30\x00\x91", "add x1, x1, #12"),
("\x20\x7c\x5f\x08", "ldxrb w0, [x1]"),
("\x21\x30\x00\x91", "add x1, x1, #12"),
("\x20\x7c\x5f\x48", "ldxrh w0, [x1]"),
("\xc1\xfd\xff\xd2", "movz x1, #0xffee, lsl #48"),
("\x81\xb9\xdb\xf2", "movk x1, #0xddcc, lsl #32"),
("\x41\x75\xb7\xf2", "movk x1, #0xbbaa, lsl #16"),
("\x01\x31\x93\xf2", "movk x1, #0x9988"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x20\x0c\xc0\xda", "rev x0, x1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x20\x08\xc0\x5a", "rev w0, w1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x20\x04\xc0\xda", "rev16 x0, x1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x20\x04\xc0\x5a", "rev16 w0, w1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x20\x08\xc0\xda", "rev32 x0, x1"),
("\x00\x00\x80\xd2", "movz x0, #0"),
("\x20\x00\xc0\xda", "rbit x0, x1"),
("\x20\x00\xc0\x5a", "rbit w0, w1"),
("\x20\x00\x80\xd2", "movz x0, #1"),
("\x20\x00\xc0\xda", "rbit x0, x1"),
("\x20\x00\xc0\x5a", "rbit w0, w1"),
]
def emu_with_unicorn(opcode, istate):
mu = Uc(UC_ARCH_ARM64, UC_MODE_ARM)
mu.mem_map(ADDR, SIZE)
index = 0
for op, _ in CODE:
mu.mem_write(ADDR+index, op)
index += len(op)
mu.mem_write(STACK, bytes(istate['stack']))
mu.mem_write(HEAP, bytes(istate['heap']))
mu.reg_write(UC_ARM64_REG_X0, istate['x0'])
mu.reg_write(UC_ARM64_REG_X1, istate['x1'])
mu.reg_write(UC_ARM64_REG_X2, istate['x2'])
mu.reg_write(UC_ARM64_REG_X3, istate['x3'])
mu.reg_write(UC_ARM64_REG_X4, istate['x4'])
mu.reg_write(UC_ARM64_REG_X5, istate['x5'])
mu.reg_write(UC_ARM64_REG_X6, istate['x6'])
mu.reg_write(UC_ARM64_REG_X7, istate['x7'])
mu.reg_write(UC_ARM64_REG_X8, istate['x8'])
mu.reg_write(UC_ARM64_REG_X9, istate['x9'])
mu.reg_write(UC_ARM64_REG_X10, istate['x10'])
mu.reg_write(UC_ARM64_REG_X11, istate['x11'])
mu.reg_write(UC_ARM64_REG_X12, istate['x12'])
mu.reg_write(UC_ARM64_REG_X13, istate['x13'])
mu.reg_write(UC_ARM64_REG_X14, istate['x14'])
mu.reg_write(UC_ARM64_REG_X15, istate['x15'])
mu.reg_write(UC_ARM64_REG_X16, istate['x16'])
mu.reg_write(UC_ARM64_REG_X17, istate['x17'])
mu.reg_write(UC_ARM64_REG_X18, istate['x18'])
mu.reg_write(UC_ARM64_REG_X19, istate['x19'])
mu.reg_write(UC_ARM64_REG_X20, istate['x20'])
mu.reg_write(UC_ARM64_REG_X21, istate['x21'])
mu.reg_write(UC_ARM64_REG_X22, istate['x22'])
mu.reg_write(UC_ARM64_REG_X23, istate['x23'])
mu.reg_write(UC_ARM64_REG_X24, istate['x24'])
mu.reg_write(UC_ARM64_REG_X25, istate['x25'])
mu.reg_write(UC_ARM64_REG_X26, istate['x26'])
mu.reg_write(UC_ARM64_REG_X27, istate['x27'])
mu.reg_write(UC_ARM64_REG_X28, istate['x28'])
mu.reg_write(UC_ARM64_REG_X29, istate['x29'])
mu.reg_write(UC_ARM64_REG_X30, istate['x30'])
mu.reg_write(UC_ARM64_REG_PC, istate['pc'])
mu.reg_write(UC_ARM64_REG_SP, istate['sp'])
mu.reg_write(UC_ARM64_REG_NZCV, istate['n'] << 31 | istate['z'] << 30 | istate['c'] << 29 | istate['v'] << 28)
mu.emu_start(istate['pc'], istate['pc'] + len(opcode))
ostate = {
"stack": mu.mem_read(STACK, 0x100),
"heap": mu.mem_read(HEAP, 0x100),
"x0": mu.reg_read(UC_ARM64_REG_X0),
"x1": mu.reg_read(UC_ARM64_REG_X1),
"x2": mu.reg_read(UC_ARM64_REG_X2),
"x3": mu.reg_read(UC_ARM64_REG_X3),
"x4": mu.reg_read(UC_ARM64_REG_X4),
"x5": mu.reg_read(UC_ARM64_REG_X5),
"x6": mu.reg_read(UC_ARM64_REG_X6),
"x7": mu.reg_read(UC_ARM64_REG_X7),
"x8": mu.reg_read(UC_ARM64_REG_X8),
"x9": mu.reg_read(UC_ARM64_REG_X9),
"x10": mu.reg_read(UC_ARM64_REG_X10),
"x11": mu.reg_read(UC_ARM64_REG_X11),
"x12": mu.reg_read(UC_ARM64_REG_X12),
"x13": mu.reg_read(UC_ARM64_REG_X13),
"x14": mu.reg_read(UC_ARM64_REG_X14),
"x15": mu.reg_read(UC_ARM64_REG_X15),
"x16": mu.reg_read(UC_ARM64_REG_X16),
"x17": mu.reg_read(UC_ARM64_REG_X17),
"x18": mu.reg_read(UC_ARM64_REG_X18),
"x19": mu.reg_read(UC_ARM64_REG_X19),
"x20": mu.reg_read(UC_ARM64_REG_X20),
"x21": mu.reg_read(UC_ARM64_REG_X21),
"x22": mu.reg_read(UC_ARM64_REG_X22),
"x23": mu.reg_read(UC_ARM64_REG_X23),
"x24": mu.reg_read(UC_ARM64_REG_X24),
"x25": mu.reg_read(UC_ARM64_REG_X25),
"x26": mu.reg_read(UC_ARM64_REG_X26),
"x27": mu.reg_read(UC_ARM64_REG_X27),
"x28": mu.reg_read(UC_ARM64_REG_X28),
"x29": mu.reg_read(UC_ARM64_REG_X29),
"x30": mu.reg_read(UC_ARM64_REG_X30),
"x30": mu.reg_read(UC_ARM64_REG_X30),
"pc": mu.reg_read(UC_ARM64_REG_PC),
"sp": mu.reg_read(UC_ARM64_REG_SP),
"n": ((mu.reg_read(UC_ARM64_REG_NZCV) >> 31) & 1),
"z": ((mu.reg_read(UC_ARM64_REG_NZCV) >> 30) & 1),
"c": ((mu.reg_read(UC_ARM64_REG_NZCV) >> 29) & 1),
"v": ((mu.reg_read(UC_ARM64_REG_NZCV) >> 28) & 1),
}
return ostate
def emu_with_triton(opcode, istate):
ctx = TritonContext()
ctx.setArchitecture(ARCH.AARCH64)
inst = Instruction(opcode)
inst.setAddress(istate['pc'])
ctx.setConcreteMemoryAreaValue(STACK, bytes(istate['stack']))
ctx.setConcreteMemoryAreaValue(HEAP, bytes(istate['heap']))
ctx.setConcreteRegisterValue(ctx.registers.x0, istate['x0'])
ctx.setConcreteRegisterValue(ctx.registers.x1, istate['x1'])
ctx.setConcreteRegisterValue(ctx.registers.x2, istate['x2'])
ctx.setConcreteRegisterValue(ctx.registers.x3, istate['x3'])
ctx.setConcreteRegisterValue(ctx.registers.x4, istate['x4'])
ctx.setConcreteRegisterValue(ctx.registers.x5, istate['x5'])
ctx.setConcreteRegisterValue(ctx.registers.x6, istate['x6'])
ctx.setConcreteRegisterValue(ctx.registers.x7, istate['x7'])
ctx.setConcreteRegisterValue(ctx.registers.x8, istate['x8'])
ctx.setConcreteRegisterValue(ctx.registers.x9, istate['x9'])
ctx.setConcreteRegisterValue(ctx.registers.x10, istate['x10'])
ctx.setConcreteRegisterValue(ctx.registers.x11, istate['x11'])
ctx.setConcreteRegisterValue(ctx.registers.x12, istate['x12'])
ctx.setConcreteRegisterValue(ctx.registers.x13, istate['x13'])
ctx.setConcreteRegisterValue(ctx.registers.x14, istate['x14'])
ctx.setConcreteRegisterValue(ctx.registers.x15, istate['x15'])
ctx.setConcreteRegisterValue(ctx.registers.x16, istate['x16'])
ctx.setConcreteRegisterValue(ctx.registers.x17, istate['x17'])
ctx.setConcreteRegisterValue(ctx.registers.x18, istate['x18'])
ctx.setConcreteRegisterValue(ctx.registers.x19, istate['x19'])
ctx.setConcreteRegisterValue(ctx.registers.x20, istate['x20'])
ctx.setConcreteRegisterValue(ctx.registers.x21, istate['x21'])
ctx.setConcreteRegisterValue(ctx.registers.x22, istate['x22'])
ctx.setConcreteRegisterValue(ctx.registers.x23, istate['x23'])
ctx.setConcreteRegisterValue(ctx.registers.x24, istate['x24'])
ctx.setConcreteRegisterValue(ctx.registers.x25, istate['x25'])
ctx.setConcreteRegisterValue(ctx.registers.x26, istate['x26'])
ctx.setConcreteRegisterValue(ctx.registers.x27, istate['x27'])
ctx.setConcreteRegisterValue(ctx.registers.x28, istate['x28'])
ctx.setConcreteRegisterValue(ctx.registers.x29, istate['x29'])
ctx.setConcreteRegisterValue(ctx.registers.x30, istate['x30'])
ctx.setConcreteRegisterValue(ctx.registers.pc, istate['pc'])
ctx.setConcreteRegisterValue(ctx.registers.sp, istate['sp'])
ctx.setConcreteRegisterValue(ctx.registers.n, istate['n'])
ctx.setConcreteRegisterValue(ctx.registers.z, istate['z'])
ctx.setConcreteRegisterValue(ctx.registers.c, istate['c'])
ctx.setConcreteRegisterValue(ctx.registers.v, istate['v'])
ctx.processing(inst)
ostate = {
"stack": ctx.getConcreteMemoryAreaValue(STACK, 0x100),
"heap": ctx.getConcreteMemoryAreaValue(HEAP, 0x100),
"x0": ctx.getSymbolicRegisterValue(ctx.registers.x0),
"x1": ctx.getSymbolicRegisterValue(ctx.registers.x1),
"x2": ctx.getSymbolicRegisterValue(ctx.registers.x2),
"x3": ctx.getSymbolicRegisterValue(ctx.registers.x3),
"x4": ctx.getSymbolicRegisterValue(ctx.registers.x4),
"x5": ctx.getSymbolicRegisterValue(ctx.registers.x5),
"x6": ctx.getSymbolicRegisterValue(ctx.registers.x6),
"x7": ctx.getSymbolicRegisterValue(ctx.registers.x7),
"x8": ctx.getSymbolicRegisterValue(ctx.registers.x8),
"x9": ctx.getSymbolicRegisterValue(ctx.registers.x9),
"x10": ctx.getSymbolicRegisterValue(ctx.registers.x10),
"x11": ctx.getSymbolicRegisterValue(ctx.registers.x11),
"x12": ctx.getSymbolicRegisterValue(ctx.registers.x12),
"x13": ctx.getSymbolicRegisterValue(ctx.registers.x13),
"x14": ctx.getSymbolicRegisterValue(ctx.registers.x14),
"x15": ctx.getSymbolicRegisterValue(ctx.registers.x15),
"x16": ctx.getSymbolicRegisterValue(ctx.registers.x16),
"x17": ctx.getSymbolicRegisterValue(ctx.registers.x17),
"x18": ctx.getSymbolicRegisterValue(ctx.registers.x18),
"x19": ctx.getSymbolicRegisterValue(ctx.registers.x19),
"x20": ctx.getSymbolicRegisterValue(ctx.registers.x20),
"x21": ctx.getSymbolicRegisterValue(ctx.registers.x21),
"x22": ctx.getSymbolicRegisterValue(ctx.registers.x22),
"x23": ctx.getSymbolicRegisterValue(ctx.registers.x23),
"x24": ctx.getSymbolicRegisterValue(ctx.registers.x24),
"x25": ctx.getSymbolicRegisterValue(ctx.registers.x25),
"x26": ctx.getSymbolicRegisterValue(ctx.registers.x26),
"x27": ctx.getSymbolicRegisterValue(ctx.registers.x27),
"x28": ctx.getSymbolicRegisterValue(ctx.registers.x28),
"x29": ctx.getSymbolicRegisterValue(ctx.registers.x29),
"x30": ctx.getSymbolicRegisterValue(ctx.registers.x30),
"x30": ctx.getSymbolicRegisterValue(ctx.registers.x30),
"pc": ctx.getSymbolicRegisterValue(ctx.registers.pc),
"sp": ctx.getSymbolicRegisterValue(ctx.registers.sp),
"n": ctx.getSymbolicRegisterValue(ctx.registers.n),
"z": ctx.getSymbolicRegisterValue(ctx.registers.z),
"c": ctx.getSymbolicRegisterValue(ctx.registers.c),
"v": ctx.getSymbolicRegisterValue(ctx.registers.v),
}
return ostate
def diff_state(state1, state2):
for k, v in list(state1.items()):
if (k == 'heap' or k == 'stack') and v != state2[k]:
print('\t%s: (UC) != (TT)' %(k))
elif not (k == 'heap' or k == 'stack') and v != state2[k]:
print('\t%s: %#x (UC) != %#x (TT)' %(k, v, state2[k]))
return
if __name__ == '__main__':
state = {
"stack": "".join([chr(255 - i) for i in range(256)]),
"heap": "".join([chr(i) for i in range(256)]),
"x0": 0,
"x1": 0,
"x2": 0,
"x3": 0,
"x4": 0,
"x5": 0,
"x6": 0,
"x7": 0,
"x8": 0,
"x9": 0,
"x10": 0,
"x11": 0,
"x12": 0,
"x13": 0,
"x14": 0,
"x15": 0,
"x16": 0,
"x17": 0,
"x18": 0,
"x19": 0,
"x20": 0,
"x21": 0,
"x22": 0,
"x23": 0,
"x24": 0,
"x25": 0,
"x26": 0,
"x27": 0,
"x28": 0,
"x29": 0,
"x30": 0,
"x30": 0,
"pc": ADDR,
"sp": STACK,
"n": 0,
"z": 0,
"c": 0,
"v": 0,
}
for opcode, disassembly in CODE:
try:
uc_state = emu_with_unicorn(opcode, state)
tt_state = emu_with_triton(opcode, state)
except Exception as e:
print('[KO] %s' %(disassembly))
print('\t%s' %(e))
sys.exit(-1)
if uc_state != tt_state:
print('[KO] %s' %(disassembly))
diff_state(uc_state, tt_state)
sys.exit(-1)
print('[OK] %s' %(disassembly))
state = tt_state
sys.exit(0)
| true
| true
|
1c4a8e4c20813902ae05d3230d53ca2da1867ab2
| 432
|
py
|
Python
|
vendor/migrations/0006_vendor_sam_url.py
|
brethauer/mirage
|
396f61206bf76f997c0535277af918058aa1b827
|
[
"CC0-1.0"
] | null | null | null |
vendor/migrations/0006_vendor_sam_url.py
|
brethauer/mirage
|
396f61206bf76f997c0535277af918058aa1b827
|
[
"CC0-1.0"
] | null | null | null |
vendor/migrations/0006_vendor_sam_url.py
|
brethauer/mirage
|
396f61206bf76f997c0535277af918058aa1b827
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('vendor', '0005_vendor_sam_exclusion'),
]
operations = [
migrations.AddField(
model_name='vendor',
name='sam_url',
field=models.URLField(null=True),
preserve_default=True,
),
]
| 20.571429
| 48
| 0.597222
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('vendor', '0005_vendor_sam_exclusion'),
]
operations = [
migrations.AddField(
model_name='vendor',
name='sam_url',
field=models.URLField(null=True),
preserve_default=True,
),
]
| true
| true
|
1c4a8e8166224c62899fba51d3fc55b147695668
| 2,248
|
py
|
Python
|
tests/unit/trace/test_status.py
|
bshaffer/opencensus-python
|
c624558c6829982d3464a5df29b48952f1fe23bc
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/trace/test_status.py
|
bshaffer/opencensus-python
|
c624558c6829982d3464a5df29b48952f1fe23bc
|
[
"Apache-2.0"
] | 1
|
2021-06-10T23:59:36.000Z
|
2021-06-10T23:59:36.000Z
|
tests/unit/trace/test_status.py
|
bshaffer/opencensus-python
|
c624558c6829982d3464a5df29b48952f1fe23bc
|
[
"Apache-2.0"
] | 1
|
2019-09-01T06:00:13.000Z
|
2019-09-01T06:00:13.000Z
|
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from google.rpc import code_pb2
from opencensus.trace import status as status_module
class TestStatus(unittest.TestCase):
def test_constructor(self):
code = 100
message = 'test message'
status = status_module.Status(code=code, message=message)
self.assertEqual(status.code, code)
self.assertEqual(status.message, message)
self.assertIsNone(status.details)
def test_format_status_json_with_details(self):
code = 100
message = 'test message'
details = [
{
'@type': 'string',
'field1': 'value',
},
]
status = status_module.Status(
code=code, message=message, details=details)
status_json = status.format_status_json()
expected_status_json = {
'code': code,
'message': message,
'details': details
}
self.assertEqual(expected_status_json, status_json)
def test_format_status_json_without_details(self):
code = 100
message = 'test message'
status = status_module.Status(code=code, message=message)
status_json = status.format_status_json()
expected_status_json = {
'code': code,
'message': message
}
self.assertEqual(expected_status_json, status_json)
def test_create_from_exception(self):
message = 'test message'
exc = ValueError(message)
status = status_module.Status.from_exception(exc)
self.assertEqual(status.message, message)
self.assertEqual(status.code, code_pb2.UNKNOWN)
| 30.794521
| 74
| 0.656139
|
import unittest
from google.rpc import code_pb2
from opencensus.trace import status as status_module
class TestStatus(unittest.TestCase):
def test_constructor(self):
code = 100
message = 'test message'
status = status_module.Status(code=code, message=message)
self.assertEqual(status.code, code)
self.assertEqual(status.message, message)
self.assertIsNone(status.details)
def test_format_status_json_with_details(self):
code = 100
message = 'test message'
details = [
{
'@type': 'string',
'field1': 'value',
},
]
status = status_module.Status(
code=code, message=message, details=details)
status_json = status.format_status_json()
expected_status_json = {
'code': code,
'message': message,
'details': details
}
self.assertEqual(expected_status_json, status_json)
def test_format_status_json_without_details(self):
code = 100
message = 'test message'
status = status_module.Status(code=code, message=message)
status_json = status.format_status_json()
expected_status_json = {
'code': code,
'message': message
}
self.assertEqual(expected_status_json, status_json)
def test_create_from_exception(self):
message = 'test message'
exc = ValueError(message)
status = status_module.Status.from_exception(exc)
self.assertEqual(status.message, message)
self.assertEqual(status.code, code_pb2.UNKNOWN)
| true
| true
|
1c4a900d6a3357b0f159b0070ace473e54b8c9e3
| 81
|
py
|
Python
|
CodeWars/7 Kyu/Coding 3min- Father and Son.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/Coding 3min- Father and Son.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/Coding 3min- Father and Son.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
def sc(s):
xs = set(s)
return ''.join(c for c in s if c.swapcase() in xs)
| 27
| 54
| 0.555556
|
def sc(s):
xs = set(s)
return ''.join(c for c in s if c.swapcase() in xs)
| true
| true
|
1c4a91d777b5ea2a8f62b27d050a042816f1964a
| 9,570
|
py
|
Python
|
var/spack/repos/builtin/packages/mvapich2/package.py
|
NickRF/spack
|
edecdc3ace7cbf5df2dcc090da3d1827c4099ebc
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/mvapich2/package.py
|
NickRF/spack
|
edecdc3ace7cbf5df2dcc090da3d1827c4099ebc
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/mvapich2/package.py
|
NickRF/spack
|
edecdc3ace7cbf5df2dcc090da3d1827c4099ebc
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
from spack import *
class Mvapich2(AutotoolsPackage):
"""MVAPICH2 is an MPI implementation for Infiniband networks."""
homepage = "http://mvapich.cse.ohio-state.edu/"
url = "http://mvapich.cse.ohio-state.edu/download/mvapich/mv2/mvapich2-2.2.tar.gz"
list_url = "http://mvapich.cse.ohio-state.edu/downloads/"
version('2.3rc2', '6fcf22fe2a16023b462ef57614daa357')
version('2.3rc1', '386d79ae36b2136d203826465ad8b6cc')
version('2.3a', '87c3fbf8a755b53806fa9ecb21453445')
# Prefer the latest stable release
version('2.3', sha256='01d5fb592454ddd9ecc17e91c8983b6aea0e7559aa38f410b111c8ef385b50dd', preferred=True)
version('2.2', '939b65ebe5b89a5bc822cdab0f31f96e')
version('2.1', '0095ceecb19bbb7fb262131cb9c2cdd6')
version('2.0', '9fbb68a4111a8b6338e476dc657388b4')
provides('mpi')
provides('mpi@:3.0')
variant('debug', default=False,
description='Enable debug info and error messages at run-time')
variant('cuda', default=False,
description='Enable CUDA extension')
variant('regcache', default=True,
description='Enable memory registration cache')
# Accepted values are:
# single - No threads (MPI_THREAD_SINGLE)
# funneled - Only the main thread calls MPI (MPI_THREAD_FUNNELED)
# serialized - User serializes calls to MPI (MPI_THREAD_SERIALIZED)
# multiple - Fully multi-threaded (MPI_THREAD_MULTIPLE)
# runtime - Alias to "multiple"
variant(
'threads',
default='multiple',
values=('single', 'funneled', 'serialized', 'multiple'),
multi=False,
description='Control the level of thread support'
)
# 32 is needed when job size exceeds 32768 cores
variant(
'ch3_rank_bits',
default='32',
values=('16', '32'),
multi=False,
description='Number of bits allocated to the rank field (16 or 32)'
)
variant(
'process_managers',
description='List of the process managers to activate',
values=disjoint_sets(
('auto',), ('slurm',), ('hydra', 'gforker', 'remshell')
).prohibit_empty_set().with_error(
"'slurm' or 'auto' cannot be activated along with "
"other process managers"
).with_default('auto').with_non_feature_values('auto'),
)
variant(
'fabrics',
description='The fabric enabled for this build',
default='psm',
values=(
'psm', 'sock', 'nemesisib', 'nemesis', 'mrail', 'nemesisibtcp',
'nemesistcpib', 'nemesisofi'
)
)
variant(
'alloca',
default=False,
description='Use alloca to allocate temporary memory if available'
)
variant(
'file_systems',
description='List of the ROMIO file systems to activate',
values=auto_or_any_combination_of('lustre', 'gpfs', 'nfs', 'ufs'),
)
depends_on('findutils', type='build')
depends_on('bison', type='build')
depends_on('pkgconfig', type='build')
depends_on('zlib')
depends_on('libpciaccess', when=(sys.platform != 'darwin'))
depends_on('libxml2')
depends_on('cuda', when='+cuda')
depends_on('psm', when='fabrics=psm')
depends_on('rdma-core', when='fabrics=mrail')
depends_on('rdma-core', when='fabrics=nemesisib')
depends_on('rdma-core', when='fabrics=nemesistcpib')
depends_on('rdma-core', when='fabrics=nemesisibtcp')
depends_on('libfabric', when='fabrics=nemesisofi')
filter_compiler_wrappers(
'mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort', relative_root='bin'
)
@property
def libs(self):
query_parameters = self.spec.last_query.extra_parameters
libraries = ['libmpi']
if 'cxx' in query_parameters:
libraries = ['libmpicxx'] + libraries
return find_libraries(
libraries, root=self.prefix, shared=True, recursive=True
)
@property
def process_manager_options(self):
spec = self.spec
other_pms = []
for x in ('hydra', 'gforker', 'remshell'):
if 'process_managers={0}'.format(x) in spec:
other_pms.append(x)
opts = []
if len(other_pms) > 0:
opts = ['--with-pm=%s' % ':'.join(other_pms)]
# See: http://slurm.schedmd.com/mpi_guide.html#mvapich2
if 'process_managers=slurm' in spec:
opts = [
'--with-pmi=pmi2',
'--with-pm=slurm'
]
return opts
@property
def network_options(self):
opts = []
# From here on I can suppose that only one variant has been selected
if 'fabrics=psm' in self.spec:
opts = [
"--with-device=ch3:psm",
"--with-psm={0}".format(self.spec['psm'].prefix)
]
elif 'fabrics=sock' in self.spec:
opts = ["--with-device=ch3:sock"]
elif 'fabrics=nemesistcpib' in self.spec:
opts = ["--with-device=ch3:nemesis:tcp,ib"]
elif 'fabrics=nemesisibtcp' in self.spec:
opts = ["--with-device=ch3:nemesis:ib,tcp"]
elif 'fabrics=nemesisib' in self.spec:
opts = ["--with-device=ch3:nemesis:ib"]
elif 'fabrics=nemesis' in self.spec:
opts = ["--with-device=ch3:nemesis"]
elif 'fabrics=mrail' in self.spec:
opts = ["--with-device=ch3:mrail", "--with-rdma=gen2",
"--disable-mcast"]
elif 'fabrics=nemesisofi' in self.spec:
opts = ["--with-device=ch3:nemesis:ofi",
"--with-ofi={0}".format(self.spec['libfabric'].prefix)]
return opts
@property
def file_system_options(self):
spec = self.spec
fs = []
for x in ('lustre', 'gpfs', 'nfs', 'ufs'):
if 'file_systems={0}'.format(x) in spec:
fs.append(x)
opts = []
if len(fs) > 0:
opts.append('--with-file-system=%s' % '+'.join(fs))
return opts
def setup_environment(self, spack_env, run_env):
spec = self.spec
# mvapich2 configure fails when F90 and F90FLAGS are set
spack_env.unset('F90')
spack_env.unset('F90FLAGS')
if 'process_managers=slurm' in spec:
run_env.set('SLURM_MPI_TYPE', 'pmi2')
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
spack_env.set('MPICC', join_path(self.prefix.bin, 'mpicc'))
spack_env.set('MPICXX', join_path(self.prefix.bin, 'mpicxx'))
spack_env.set('MPIF77', join_path(self.prefix.bin, 'mpif77'))
spack_env.set('MPIF90', join_path(self.prefix.bin, 'mpif90'))
spack_env.set('MPICH_CC', spack_cc)
spack_env.set('MPICH_CXX', spack_cxx)
spack_env.set('MPICH_F77', spack_f77)
spack_env.set('MPICH_F90', spack_fc)
spack_env.set('MPICH_FC', spack_fc)
def setup_dependent_package(self, module, dependent_spec):
self.spec.mpicc = join_path(self.prefix.bin, 'mpicc')
self.spec.mpicxx = join_path(self.prefix.bin, 'mpicxx')
self.spec.mpifc = join_path(self.prefix.bin, 'mpif90')
self.spec.mpif77 = join_path(self.prefix.bin, 'mpif77')
self.spec.mpicxx_shared_libs = [
join_path(self.prefix.lib, 'libmpicxx.{0}'.format(dso_suffix)),
join_path(self.prefix.lib, 'libmpi.{0}'.format(dso_suffix))
]
@run_before('configure')
def die_without_fortran(self):
# Until we can pass variants such as +fortran through virtual
# dependencies depends_on('mpi'), require Fortran compiler to
# avoid delayed build errors in dependents.
if (self.compiler.f77 is None) or (self.compiler.fc is None):
raise InstallError(
'Mvapich2 requires both C and Fortran compilers!'
)
def configure_args(self):
spec = self.spec
args = [
'--enable-shared',
'--enable-romio',
'--disable-silent-rules',
'--disable-new-dtags',
'--enable-fortran=all',
"--enable-threads={0}".format(spec.variants['threads'].value),
"--with-ch3-rank-bits={0}".format(
spec.variants['ch3_rank_bits'].value),
]
args.extend(self.enable_or_disable('alloca'))
if '+debug' in self.spec:
args.extend([
'--disable-fast',
'--enable-error-checking=runtime',
'--enable-error-messages=all',
# Permits debugging with TotalView
'--enable-g=dbg',
'--enable-debuginfo'
])
else:
args.append('--enable-fast=all')
if '+cuda' in self.spec:
args.extend([
'--enable-cuda',
'--with-cuda={0}'.format(spec['cuda'].prefix)
])
else:
args.append('--disable-cuda')
if '+regcache' in self.spec:
args.append('--enable-registration-cache')
else:
args.append('--disable-registration-cache')
args.extend(self.process_manager_options)
args.extend(self.network_options)
args.extend(self.file_system_options)
return args
| 35.054945
| 109
| 0.591536
|
import sys
from spack import *
class Mvapich2(AutotoolsPackage):
homepage = "http://mvapich.cse.ohio-state.edu/"
url = "http://mvapich.cse.ohio-state.edu/download/mvapich/mv2/mvapich2-2.2.tar.gz"
list_url = "http://mvapich.cse.ohio-state.edu/downloads/"
version('2.3rc2', '6fcf22fe2a16023b462ef57614daa357')
version('2.3rc1', '386d79ae36b2136d203826465ad8b6cc')
version('2.3a', '87c3fbf8a755b53806fa9ecb21453445')
version('2.3', sha256='01d5fb592454ddd9ecc17e91c8983b6aea0e7559aa38f410b111c8ef385b50dd', preferred=True)
version('2.2', '939b65ebe5b89a5bc822cdab0f31f96e')
version('2.1', '0095ceecb19bbb7fb262131cb9c2cdd6')
version('2.0', '9fbb68a4111a8b6338e476dc657388b4')
provides('mpi')
provides('mpi@:3.0')
variant('debug', default=False,
description='Enable debug info and error messages at run-time')
variant('cuda', default=False,
description='Enable CUDA extension')
variant('regcache', default=True,
description='Enable memory registration cache')
variant(
'threads',
default='multiple',
values=('single', 'funneled', 'serialized', 'multiple'),
multi=False,
description='Control the level of thread support'
)
variant(
'ch3_rank_bits',
default='32',
values=('16', '32'),
multi=False,
description='Number of bits allocated to the rank field (16 or 32)'
)
variant(
'process_managers',
description='List of the process managers to activate',
values=disjoint_sets(
('auto',), ('slurm',), ('hydra', 'gforker', 'remshell')
).prohibit_empty_set().with_error(
"'slurm' or 'auto' cannot be activated along with "
"other process managers"
).with_default('auto').with_non_feature_values('auto'),
)
variant(
'fabrics',
description='The fabric enabled for this build',
default='psm',
values=(
'psm', 'sock', 'nemesisib', 'nemesis', 'mrail', 'nemesisibtcp',
'nemesistcpib', 'nemesisofi'
)
)
variant(
'alloca',
default=False,
description='Use alloca to allocate temporary memory if available'
)
variant(
'file_systems',
description='List of the ROMIO file systems to activate',
values=auto_or_any_combination_of('lustre', 'gpfs', 'nfs', 'ufs'),
)
depends_on('findutils', type='build')
depends_on('bison', type='build')
depends_on('pkgconfig', type='build')
depends_on('zlib')
depends_on('libpciaccess', when=(sys.platform != 'darwin'))
depends_on('libxml2')
depends_on('cuda', when='+cuda')
depends_on('psm', when='fabrics=psm')
depends_on('rdma-core', when='fabrics=mrail')
depends_on('rdma-core', when='fabrics=nemesisib')
depends_on('rdma-core', when='fabrics=nemesistcpib')
depends_on('rdma-core', when='fabrics=nemesisibtcp')
depends_on('libfabric', when='fabrics=nemesisofi')
filter_compiler_wrappers(
'mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort', relative_root='bin'
)
@property
def libs(self):
query_parameters = self.spec.last_query.extra_parameters
libraries = ['libmpi']
if 'cxx' in query_parameters:
libraries = ['libmpicxx'] + libraries
return find_libraries(
libraries, root=self.prefix, shared=True, recursive=True
)
@property
def process_manager_options(self):
spec = self.spec
other_pms = []
for x in ('hydra', 'gforker', 'remshell'):
if 'process_managers={0}'.format(x) in spec:
other_pms.append(x)
opts = []
if len(other_pms) > 0:
opts = ['--with-pm=%s' % ':'.join(other_pms)]
if 'process_managers=slurm' in spec:
opts = [
'--with-pmi=pmi2',
'--with-pm=slurm'
]
return opts
@property
def network_options(self):
opts = []
if 'fabrics=psm' in self.spec:
opts = [
"--with-device=ch3:psm",
"--with-psm={0}".format(self.spec['psm'].prefix)
]
elif 'fabrics=sock' in self.spec:
opts = ["--with-device=ch3:sock"]
elif 'fabrics=nemesistcpib' in self.spec:
opts = ["--with-device=ch3:nemesis:tcp,ib"]
elif 'fabrics=nemesisibtcp' in self.spec:
opts = ["--with-device=ch3:nemesis:ib,tcp"]
elif 'fabrics=nemesisib' in self.spec:
opts = ["--with-device=ch3:nemesis:ib"]
elif 'fabrics=nemesis' in self.spec:
opts = ["--with-device=ch3:nemesis"]
elif 'fabrics=mrail' in self.spec:
opts = ["--with-device=ch3:mrail", "--with-rdma=gen2",
"--disable-mcast"]
elif 'fabrics=nemesisofi' in self.spec:
opts = ["--with-device=ch3:nemesis:ofi",
"--with-ofi={0}".format(self.spec['libfabric'].prefix)]
return opts
@property
def file_system_options(self):
spec = self.spec
fs = []
for x in ('lustre', 'gpfs', 'nfs', 'ufs'):
if 'file_systems={0}'.format(x) in spec:
fs.append(x)
opts = []
if len(fs) > 0:
opts.append('--with-file-system=%s' % '+'.join(fs))
return opts
def setup_environment(self, spack_env, run_env):
spec = self.spec
spack_env.unset('F90')
spack_env.unset('F90FLAGS')
if 'process_managers=slurm' in spec:
run_env.set('SLURM_MPI_TYPE', 'pmi2')
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
spack_env.set('MPICC', join_path(self.prefix.bin, 'mpicc'))
spack_env.set('MPICXX', join_path(self.prefix.bin, 'mpicxx'))
spack_env.set('MPIF77', join_path(self.prefix.bin, 'mpif77'))
spack_env.set('MPIF90', join_path(self.prefix.bin, 'mpif90'))
spack_env.set('MPICH_CC', spack_cc)
spack_env.set('MPICH_CXX', spack_cxx)
spack_env.set('MPICH_F77', spack_f77)
spack_env.set('MPICH_F90', spack_fc)
spack_env.set('MPICH_FC', spack_fc)
def setup_dependent_package(self, module, dependent_spec):
self.spec.mpicc = join_path(self.prefix.bin, 'mpicc')
self.spec.mpicxx = join_path(self.prefix.bin, 'mpicxx')
self.spec.mpifc = join_path(self.prefix.bin, 'mpif90')
self.spec.mpif77 = join_path(self.prefix.bin, 'mpif77')
self.spec.mpicxx_shared_libs = [
join_path(self.prefix.lib, 'libmpicxx.{0}'.format(dso_suffix)),
join_path(self.prefix.lib, 'libmpi.{0}'.format(dso_suffix))
]
@run_before('configure')
def die_without_fortran(self):
if (self.compiler.f77 is None) or (self.compiler.fc is None):
raise InstallError(
'Mvapich2 requires both C and Fortran compilers!'
)
def configure_args(self):
spec = self.spec
args = [
'--enable-shared',
'--enable-romio',
'--disable-silent-rules',
'--disable-new-dtags',
'--enable-fortran=all',
"--enable-threads={0}".format(spec.variants['threads'].value),
"--with-ch3-rank-bits={0}".format(
spec.variants['ch3_rank_bits'].value),
]
args.extend(self.enable_or_disable('alloca'))
if '+debug' in self.spec:
args.extend([
'--disable-fast',
'--enable-error-checking=runtime',
'--enable-error-messages=all',
'--enable-g=dbg',
'--enable-debuginfo'
])
else:
args.append('--enable-fast=all')
if '+cuda' in self.spec:
args.extend([
'--enable-cuda',
'--with-cuda={0}'.format(spec['cuda'].prefix)
])
else:
args.append('--disable-cuda')
if '+regcache' in self.spec:
args.append('--enable-registration-cache')
else:
args.append('--disable-registration-cache')
args.extend(self.process_manager_options)
args.extend(self.network_options)
args.extend(self.file_system_options)
return args
| true
| true
|
1c4a92c5094d36b0d5c48b0030aed8a13467ea5b
| 371
|
py
|
Python
|
arxiv/base/tests/test_factory.py
|
ibnesayeed/arxiv-base
|
9f49302370272792a0afc78debd039d249844c6c
|
[
"MIT"
] | 23
|
2019-01-10T22:01:18.000Z
|
2022-02-02T10:28:25.000Z
|
arxiv/base/tests/test_factory.py
|
ibnesayeed/arxiv-base
|
9f49302370272792a0afc78debd039d249844c6c
|
[
"MIT"
] | 57
|
2018-12-17T16:45:38.000Z
|
2021-12-14T14:20:58.000Z
|
arxiv/base/tests/test_factory.py
|
cul-it/arxiv-base-ui
|
a5beadf44c24f72e21313299bfafc1ffb9d28ac7
|
[
"MIT"
] | 5
|
2019-01-10T22:01:28.000Z
|
2021-11-05T12:25:31.000Z
|
from unittest import TestCase
from flask import Flask
from arxiv.base.factory import create_web_app
class TestBaseAppFactory(TestCase):
"""Tests for :mod:`arxiv.base.factory`."""
def test_create_web_app(self):
""":func:`.create_web_app` generates a :class:`.Flask` instance."""
app = create_web_app()
self.assertIsInstance(app, Flask)
| 26.5
| 75
| 0.703504
|
from unittest import TestCase
from flask import Flask
from arxiv.base.factory import create_web_app
class TestBaseAppFactory(TestCase):
def test_create_web_app(self):
app = create_web_app()
self.assertIsInstance(app, Flask)
| true
| true
|
1c4a940d0cdd6c7b7b1ab2fcfcbb3402c6214e07
| 18,464
|
py
|
Python
|
pypy3.9-v7.3.9-win64/Lib/sqlite3/test/userfunctions.py
|
LawrenceZ1A/MultipurposeProject
|
54d5898301d01c33dd771b29e2e19e20d3875a21
|
[
"Apache-2.0"
] | null | null | null |
pypy3.9-v7.3.9-win64/Lib/sqlite3/test/userfunctions.py
|
LawrenceZ1A/MultipurposeProject
|
54d5898301d01c33dd771b29e2e19e20d3875a21
|
[
"Apache-2.0"
] | 1
|
2022-02-22T00:59:49.000Z
|
2022-02-22T00:59:49.000Z
|
pypy3.9-v7.3.9-win64/Lib/sqlite3/test/userfunctions.py
|
LawrenceZ1A/MultipurposeProject
|
54d5898301d01c33dd771b29e2e19e20d3875a21
|
[
"Apache-2.0"
] | 1
|
2022-03-30T11:42:37.000Z
|
2022-03-30T11:42:37.000Z
|
# pysqlite2/test/userfunctions.py: tests for user-defined functions and
# aggregates.
#
# Copyright (C) 2005-2007 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import unittest
import unittest.mock
import sqlite3 as sqlite
def func_returntext():
return "foo"
def func_returntextwithnull():
return "1\x002"
def func_returnunicode():
return "bar"
def func_returnint():
return 42
def func_returnfloat():
return 3.14
def func_returnnull():
return None
def func_returnblob():
return b"blob"
def func_returnlonglong():
return 1<<31
def func_raiseexception():
5/0
def func_isstring(v):
return type(v) is str
def func_isint(v):
return type(v) is int
def func_isfloat(v):
return type(v) is float
def func_isnone(v):
return type(v) is type(None)
def func_isblob(v):
return isinstance(v, (bytes, memoryview))
def func_islonglong(v):
return isinstance(v, int) and v >= 1<<31
def func(*args):
return len(args)
class AggrNoStep:
def __init__(self):
pass
def finalize(self):
return 1
class AggrNoFinalize:
def __init__(self):
pass
def step(self, x):
pass
class AggrExceptionInInit:
def __init__(self):
5/0
def step(self, x):
pass
def finalize(self):
pass
class AggrExceptionInStep:
def __init__(self):
pass
def step(self, x):
5/0
def finalize(self):
return 42
class AggrExceptionInFinalize:
def __init__(self):
pass
def step(self, x):
pass
def finalize(self):
5/0
class AggrCheckType:
def __init__(self):
self.val = None
def step(self, whichType, val):
theType = {"str": str, "int": int, "float": float, "None": type(None),
"blob": bytes}
self.val = int(theType[whichType] is type(val))
def finalize(self):
return self.val
class AggrCheckTypes:
def __init__(self):
self.val = 0
def step(self, whichType, *vals):
theType = {"str": str, "int": int, "float": float, "None": type(None),
"blob": bytes}
for val in vals:
self.val += int(theType[whichType] is type(val))
def finalize(self):
return self.val
class AggrSum:
def __init__(self):
self.val = 0.0
def step(self, val):
self.val += val
def finalize(self):
return self.val
class AggrText:
def __init__(self):
self.txt = ""
def step(self, txt):
self.txt = self.txt + txt
def finalize(self):
return self.txt
class FunctionTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.con.create_function("returntext", 0, func_returntext)
self.con.create_function("returntextwithnull", 0, func_returntextwithnull)
self.con.create_function("returnunicode", 0, func_returnunicode)
self.con.create_function("returnint", 0, func_returnint)
self.con.create_function("returnfloat", 0, func_returnfloat)
self.con.create_function("returnnull", 0, func_returnnull)
self.con.create_function("returnblob", 0, func_returnblob)
self.con.create_function("returnlonglong", 0, func_returnlonglong)
self.con.create_function("raiseexception", 0, func_raiseexception)
self.con.create_function("isstring", 1, func_isstring)
self.con.create_function("isint", 1, func_isint)
self.con.create_function("isfloat", 1, func_isfloat)
self.con.create_function("isnone", 1, func_isnone)
self.con.create_function("isblob", 1, func_isblob)
self.con.create_function("islonglong", 1, func_islonglong)
self.con.create_function("spam", -1, func)
self.con.execute("create table test(t text)")
def tearDown(self):
self.con.close()
def CheckFuncErrorOnCreate(self):
with self.assertRaises(sqlite.OperationalError):
self.con.create_function("bla", -100, lambda x: 2*x)
def CheckFuncRefCount(self):
def getfunc():
def f():
return 1
return f
f = getfunc()
globals()["foo"] = f
# self.con.create_function("reftest", 0, getfunc())
self.con.create_function("reftest", 0, f)
cur = self.con.cursor()
cur.execute("select reftest()")
def CheckFuncReturnText(self):
cur = self.con.cursor()
cur.execute("select returntext()")
val = cur.fetchone()[0]
self.assertEqual(type(val), str)
self.assertEqual(val, "foo")
def CheckFuncReturnTextWithNullChar(self):
cur = self.con.cursor()
res = cur.execute("select returntextwithnull()").fetchone()[0]
self.assertEqual(type(res), str)
self.assertEqual(res, "1\x002")
def CheckFuncReturnUnicode(self):
cur = self.con.cursor()
cur.execute("select returnunicode()")
val = cur.fetchone()[0]
self.assertEqual(type(val), str)
self.assertEqual(val, "bar")
def CheckFuncReturnInt(self):
cur = self.con.cursor()
cur.execute("select returnint()")
val = cur.fetchone()[0]
self.assertEqual(type(val), int)
self.assertEqual(val, 42)
def CheckFuncReturnFloat(self):
cur = self.con.cursor()
cur.execute("select returnfloat()")
val = cur.fetchone()[0]
self.assertEqual(type(val), float)
if val < 3.139 or val > 3.141:
self.fail("wrong value")
def CheckFuncReturnNull(self):
cur = self.con.cursor()
cur.execute("select returnnull()")
val = cur.fetchone()[0]
self.assertEqual(type(val), type(None))
self.assertEqual(val, None)
def CheckFuncReturnBlob(self):
cur = self.con.cursor()
cur.execute("select returnblob()")
val = cur.fetchone()[0]
self.assertEqual(type(val), bytes)
self.assertEqual(val, b"blob")
def CheckFuncReturnLongLong(self):
cur = self.con.cursor()
cur.execute("select returnlonglong()")
val = cur.fetchone()[0]
self.assertEqual(val, 1<<31)
def CheckFuncException(self):
cur = self.con.cursor()
with self.assertRaises(sqlite.OperationalError) as cm:
cur.execute("select raiseexception()")
cur.fetchone()
self.assertEqual(str(cm.exception), 'user-defined function raised exception')
def CheckParamString(self):
cur = self.con.cursor()
for text in ["foo", str()]:
with self.subTest(text=text):
cur.execute("select isstring(?)", (text,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamInt(self):
cur = self.con.cursor()
cur.execute("select isint(?)", (42,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamFloat(self):
cur = self.con.cursor()
cur.execute("select isfloat(?)", (3.14,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamNone(self):
cur = self.con.cursor()
cur.execute("select isnone(?)", (None,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamBlob(self):
cur = self.con.cursor()
cur.execute("select isblob(?)", (memoryview(b"blob"),))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamLongLong(self):
cur = self.con.cursor()
cur.execute("select islonglong(?)", (1<<42,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAnyArguments(self):
cur = self.con.cursor()
cur.execute("select spam(?, ?)", (1, 2))
val = cur.fetchone()[0]
self.assertEqual(val, 2)
# Regarding deterministic functions:
#
# Between 3.8.3 and 3.15.0, deterministic functions were only used to
# optimize inner loops, so for those versions we can only test if the
# sqlite machinery has factored out a call or not. From 3.15.0 and onward,
# deterministic functions were permitted in WHERE clauses of partial
# indices, which allows testing based on syntax, iso. the query optimizer.
@unittest.skipIf(sqlite.sqlite_version_info < (3, 8, 3), "Requires SQLite 3.8.3 or higher")
def CheckFuncNonDeterministic(self):
mock = unittest.mock.Mock(return_value=None)
self.con.create_function("nondeterministic", 0, mock, deterministic=False)
if sqlite.sqlite_version_info < (3, 15, 0):
self.con.execute("select nondeterministic() = nondeterministic()")
self.assertEqual(mock.call_count, 2)
else:
with self.assertRaises(sqlite.OperationalError):
self.con.execute("create index t on test(t) where nondeterministic() is not null")
@unittest.skipIf(sqlite.sqlite_version_info < (3, 8, 3), "Requires SQLite 3.8.3 or higher")
def CheckFuncDeterministic(self):
mock = unittest.mock.Mock(return_value=None)
self.con.create_function("deterministic", 0, mock, deterministic=True)
if sqlite.sqlite_version_info < (3, 15, 0):
self.con.execute("select deterministic() = deterministic()")
self.assertEqual(mock.call_count, 1)
else:
try:
self.con.execute("create index t on test(t) where deterministic() is not null")
except sqlite.OperationalError:
self.fail("Unexpected failure while creating partial index")
@unittest.skipIf(sqlite.sqlite_version_info >= (3, 8, 3), "SQLite < 3.8.3 needed")
def CheckFuncDeterministicNotSupported(self):
with self.assertRaises(sqlite.NotSupportedError):
self.con.create_function("deterministic", 0, int, deterministic=True)
def CheckFuncDeterministicKeywordOnly(self):
with self.assertRaises(TypeError):
self.con.create_function("deterministic", 0, int, True)
class AggregateTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
cur = self.con.cursor()
cur.execute("""
create table test(
t text,
i integer,
f float,
n,
b blob
)
""")
cur.execute("insert into test(t, i, f, n, b) values (?, ?, ?, ?, ?)",
("foo", 5, 3.14, None, memoryview(b"blob"),))
self.con.create_aggregate("nostep", 1, AggrNoStep)
self.con.create_aggregate("nofinalize", 1, AggrNoFinalize)
self.con.create_aggregate("excInit", 1, AggrExceptionInInit)
self.con.create_aggregate("excStep", 1, AggrExceptionInStep)
self.con.create_aggregate("excFinalize", 1, AggrExceptionInFinalize)
self.con.create_aggregate("checkType", 2, AggrCheckType)
self.con.create_aggregate("checkTypes", -1, AggrCheckTypes)
self.con.create_aggregate("mysum", 1, AggrSum)
self.con.create_aggregate("aggtxt", 1, AggrText)
def tearDown(self):
#self.cur.close()
#self.con.close()
pass
def CheckAggrErrorOnCreate(self):
with self.assertRaises(sqlite.OperationalError):
self.con.create_function("bla", -100, AggrSum)
def CheckAggrNoStep(self):
# XXX it's better to raise OperationalError in order to stop
# the query earlier.
cur = self.con.cursor()
with self.assertRaises(sqlite.OperationalError) as cm:
cur.execute("select nostep(t) from test")
self.assertEqual(str(cm.exception), "user-defined aggregate's 'step' method raised error")
def CheckAggrNoFinalize(self):
cur = self.con.cursor()
with self.assertRaises(sqlite.OperationalError) as cm:
cur.execute("select nofinalize(t) from test")
val = cur.fetchone()[0]
self.assertEqual(str(cm.exception), "user-defined aggregate's 'finalize' method raised error")
def CheckAggrExceptionInInit(self):
cur = self.con.cursor()
with self.assertRaises(sqlite.OperationalError) as cm:
cur.execute("select excInit(t) from test")
val = cur.fetchone()[0]
self.assertEqual(str(cm.exception), "user-defined aggregate's '__init__' method raised error")
def CheckAggrExceptionInStep(self):
cur = self.con.cursor()
with self.assertRaises(sqlite.OperationalError) as cm:
cur.execute("select excStep(t) from test")
val = cur.fetchone()[0]
self.assertEqual(str(cm.exception), "user-defined aggregate's 'step' method raised error")
def CheckAggrExceptionInFinalize(self):
cur = self.con.cursor()
with self.assertRaises(sqlite.OperationalError) as cm:
cur.execute("select excFinalize(t) from test")
val = cur.fetchone()[0]
self.assertEqual(str(cm.exception), "user-defined aggregate's 'finalize' method raised error")
def CheckAggrCheckParamStr(self):
cur = self.con.cursor()
cur.execute("select checkTypes('str', ?, ?)", ("foo", str()))
val = cur.fetchone()[0]
self.assertEqual(val, 2)
def CheckAggrCheckParamInt(self):
cur = self.con.cursor()
cur.execute("select checkType('int', ?)", (42,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckParamsInt(self):
cur = self.con.cursor()
cur.execute("select checkTypes('int', ?, ?)", (42, 24))
val = cur.fetchone()[0]
self.assertEqual(val, 2)
def CheckAggrCheckParamFloat(self):
cur = self.con.cursor()
cur.execute("select checkType('float', ?)", (3.14,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckParamNone(self):
cur = self.con.cursor()
cur.execute("select checkType('None', ?)", (None,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckParamBlob(self):
cur = self.con.cursor()
cur.execute("select checkType('blob', ?)", (memoryview(b"blob"),))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckAggrSum(self):
cur = self.con.cursor()
cur.execute("delete from test")
cur.executemany("insert into test(i) values (?)", [(10,), (20,), (30,)])
cur.execute("select mysum(i) from test")
val = cur.fetchone()[0]
self.assertEqual(val, 60)
def CheckAggrText(self):
cur = self.con.cursor()
for txt in ["foo", "1\x002"]:
with self.subTest(txt=txt):
cur.execute("select aggtxt(?) from test", (txt,))
val = cur.fetchone()[0]
self.assertEqual(val, txt)
class AuthorizerTests(unittest.TestCase):
@staticmethod
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
return sqlite.SQLITE_DENY
if arg2 == 'c2' or arg1 == 't2':
return sqlite.SQLITE_DENY
return sqlite.SQLITE_OK
def setUp(self):
self.con = sqlite.connect(":memory:")
self.con.executescript("""
create table t1 (c1, c2);
create table t2 (c1, c2);
insert into t1 (c1, c2) values (1, 2);
insert into t2 (c1, c2) values (4, 5);
""")
# For our security test:
self.con.execute("select c2 from t2")
self.con.set_authorizer(self.authorizer_cb)
def tearDown(self):
pass
def test_table_access(self):
with self.assertRaises(sqlite.DatabaseError) as cm:
self.con.execute("select * from t2")
self.assertIn('prohibited', str(cm.exception))
def test_column_access(self):
with self.assertRaises(sqlite.DatabaseError) as cm:
self.con.execute("select c2 from t1")
self.assertIn('prohibited', str(cm.exception))
class AuthorizerRaiseExceptionTests(AuthorizerTests):
@staticmethod
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
raise ValueError
if arg2 == 'c2' or arg1 == 't2':
raise ValueError
return sqlite.SQLITE_OK
class AuthorizerIllegalTypeTests(AuthorizerTests):
@staticmethod
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
return 0.0
if arg2 == 'c2' or arg1 == 't2':
return 0.0
return sqlite.SQLITE_OK
class AuthorizerLargeIntegerTests(AuthorizerTests):
@staticmethod
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
return 2**32
if arg2 == 'c2' or arg1 == 't2':
return 2**32
return sqlite.SQLITE_OK
def suite():
function_suite = unittest.makeSuite(FunctionTests, "Check")
aggregate_suite = unittest.makeSuite(AggregateTests, "Check")
authorizer_suite = unittest.makeSuite(AuthorizerTests)
return unittest.TestSuite((
function_suite,
aggregate_suite,
authorizer_suite,
unittest.makeSuite(AuthorizerRaiseExceptionTests),
unittest.makeSuite(AuthorizerIllegalTypeTests),
unittest.makeSuite(AuthorizerLargeIntegerTests),
))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
| 33.755027
| 102
| 0.620667
|
import unittest
import unittest.mock
import sqlite3 as sqlite
def func_returntext():
return "foo"
def func_returntextwithnull():
return "1\x002"
def func_returnunicode():
return "bar"
def func_returnint():
return 42
def func_returnfloat():
return 3.14
def func_returnnull():
return None
def func_returnblob():
return b"blob"
def func_returnlonglong():
return 1<<31
def func_raiseexception():
5/0
def func_isstring(v):
return type(v) is str
def func_isint(v):
return type(v) is int
def func_isfloat(v):
return type(v) is float
def func_isnone(v):
return type(v) is type(None)
def func_isblob(v):
return isinstance(v, (bytes, memoryview))
def func_islonglong(v):
return isinstance(v, int) and v >= 1<<31
def func(*args):
return len(args)
class AggrNoStep:
def __init__(self):
pass
def finalize(self):
return 1
class AggrNoFinalize:
def __init__(self):
pass
def step(self, x):
pass
class AggrExceptionInInit:
def __init__(self):
5/0
def step(self, x):
pass
def finalize(self):
pass
class AggrExceptionInStep:
def __init__(self):
pass
def step(self, x):
5/0
def finalize(self):
return 42
class AggrExceptionInFinalize:
def __init__(self):
pass
def step(self, x):
pass
def finalize(self):
5/0
class AggrCheckType:
def __init__(self):
self.val = None
def step(self, whichType, val):
theType = {"str": str, "int": int, "float": float, "None": type(None),
"blob": bytes}
self.val = int(theType[whichType] is type(val))
def finalize(self):
return self.val
class AggrCheckTypes:
def __init__(self):
self.val = 0
def step(self, whichType, *vals):
theType = {"str": str, "int": int, "float": float, "None": type(None),
"blob": bytes}
for val in vals:
self.val += int(theType[whichType] is type(val))
def finalize(self):
return self.val
class AggrSum:
def __init__(self):
self.val = 0.0
def step(self, val):
self.val += val
def finalize(self):
return self.val
class AggrText:
def __init__(self):
self.txt = ""
def step(self, txt):
self.txt = self.txt + txt
def finalize(self):
return self.txt
class FunctionTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.con.create_function("returntext", 0, func_returntext)
self.con.create_function("returntextwithnull", 0, func_returntextwithnull)
self.con.create_function("returnunicode", 0, func_returnunicode)
self.con.create_function("returnint", 0, func_returnint)
self.con.create_function("returnfloat", 0, func_returnfloat)
self.con.create_function("returnnull", 0, func_returnnull)
self.con.create_function("returnblob", 0, func_returnblob)
self.con.create_function("returnlonglong", 0, func_returnlonglong)
self.con.create_function("raiseexception", 0, func_raiseexception)
self.con.create_function("isstring", 1, func_isstring)
self.con.create_function("isint", 1, func_isint)
self.con.create_function("isfloat", 1, func_isfloat)
self.con.create_function("isnone", 1, func_isnone)
self.con.create_function("isblob", 1, func_isblob)
self.con.create_function("islonglong", 1, func_islonglong)
self.con.create_function("spam", -1, func)
self.con.execute("create table test(t text)")
def tearDown(self):
self.con.close()
def CheckFuncErrorOnCreate(self):
with self.assertRaises(sqlite.OperationalError):
self.con.create_function("bla", -100, lambda x: 2*x)
def CheckFuncRefCount(self):
def getfunc():
def f():
return 1
return f
f = getfunc()
globals()["foo"] = f
self.con.create_function("reftest", 0, f)
cur = self.con.cursor()
cur.execute("select reftest()")
def CheckFuncReturnText(self):
cur = self.con.cursor()
cur.execute("select returntext()")
val = cur.fetchone()[0]
self.assertEqual(type(val), str)
self.assertEqual(val, "foo")
def CheckFuncReturnTextWithNullChar(self):
cur = self.con.cursor()
res = cur.execute("select returntextwithnull()").fetchone()[0]
self.assertEqual(type(res), str)
self.assertEqual(res, "1\x002")
def CheckFuncReturnUnicode(self):
cur = self.con.cursor()
cur.execute("select returnunicode()")
val = cur.fetchone()[0]
self.assertEqual(type(val), str)
self.assertEqual(val, "bar")
def CheckFuncReturnInt(self):
cur = self.con.cursor()
cur.execute("select returnint()")
val = cur.fetchone()[0]
self.assertEqual(type(val), int)
self.assertEqual(val, 42)
def CheckFuncReturnFloat(self):
cur = self.con.cursor()
cur.execute("select returnfloat()")
val = cur.fetchone()[0]
self.assertEqual(type(val), float)
if val < 3.139 or val > 3.141:
self.fail("wrong value")
def CheckFuncReturnNull(self):
cur = self.con.cursor()
cur.execute("select returnnull()")
val = cur.fetchone()[0]
self.assertEqual(type(val), type(None))
self.assertEqual(val, None)
def CheckFuncReturnBlob(self):
cur = self.con.cursor()
cur.execute("select returnblob()")
val = cur.fetchone()[0]
self.assertEqual(type(val), bytes)
self.assertEqual(val, b"blob")
def CheckFuncReturnLongLong(self):
cur = self.con.cursor()
cur.execute("select returnlonglong()")
val = cur.fetchone()[0]
self.assertEqual(val, 1<<31)
def CheckFuncException(self):
cur = self.con.cursor()
with self.assertRaises(sqlite.OperationalError) as cm:
cur.execute("select raiseexception()")
cur.fetchone()
self.assertEqual(str(cm.exception), 'user-defined function raised exception')
def CheckParamString(self):
cur = self.con.cursor()
for text in ["foo", str()]:
with self.subTest(text=text):
cur.execute("select isstring(?)", (text,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamInt(self):
cur = self.con.cursor()
cur.execute("select isint(?)", (42,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamFloat(self):
cur = self.con.cursor()
cur.execute("select isfloat(?)", (3.14,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamNone(self):
cur = self.con.cursor()
cur.execute("select isnone(?)", (None,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamBlob(self):
cur = self.con.cursor()
cur.execute("select isblob(?)", (memoryview(b"blob"),))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamLongLong(self):
cur = self.con.cursor()
cur.execute("select islonglong(?)", (1<<42,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAnyArguments(self):
cur = self.con.cursor()
cur.execute("select spam(?, ?)", (1, 2))
val = cur.fetchone()[0]
self.assertEqual(val, 2)
@unittest.skipIf(sqlite.sqlite_version_info < (3, 8, 3), "Requires SQLite 3.8.3 or higher")
def CheckFuncNonDeterministic(self):
mock = unittest.mock.Mock(return_value=None)
self.con.create_function("nondeterministic", 0, mock, deterministic=False)
if sqlite.sqlite_version_info < (3, 15, 0):
self.con.execute("select nondeterministic() = nondeterministic()")
self.assertEqual(mock.call_count, 2)
else:
with self.assertRaises(sqlite.OperationalError):
self.con.execute("create index t on test(t) where nondeterministic() is not null")
@unittest.skipIf(sqlite.sqlite_version_info < (3, 8, 3), "Requires SQLite 3.8.3 or higher")
def CheckFuncDeterministic(self):
mock = unittest.mock.Mock(return_value=None)
self.con.create_function("deterministic", 0, mock, deterministic=True)
if sqlite.sqlite_version_info < (3, 15, 0):
self.con.execute("select deterministic() = deterministic()")
self.assertEqual(mock.call_count, 1)
else:
try:
self.con.execute("create index t on test(t) where deterministic() is not null")
except sqlite.OperationalError:
self.fail("Unexpected failure while creating partial index")
@unittest.skipIf(sqlite.sqlite_version_info >= (3, 8, 3), "SQLite < 3.8.3 needed")
def CheckFuncDeterministicNotSupported(self):
with self.assertRaises(sqlite.NotSupportedError):
self.con.create_function("deterministic", 0, int, deterministic=True)
def CheckFuncDeterministicKeywordOnly(self):
with self.assertRaises(TypeError):
self.con.create_function("deterministic", 0, int, True)
class AggregateTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
cur = self.con.cursor()
cur.execute("""
create table test(
t text,
i integer,
f float,
n,
b blob
)
""")
cur.execute("insert into test(t, i, f, n, b) values (?, ?, ?, ?, ?)",
("foo", 5, 3.14, None, memoryview(b"blob"),))
self.con.create_aggregate("nostep", 1, AggrNoStep)
self.con.create_aggregate("nofinalize", 1, AggrNoFinalize)
self.con.create_aggregate("excInit", 1, AggrExceptionInInit)
self.con.create_aggregate("excStep", 1, AggrExceptionInStep)
self.con.create_aggregate("excFinalize", 1, AggrExceptionInFinalize)
self.con.create_aggregate("checkType", 2, AggrCheckType)
self.con.create_aggregate("checkTypes", -1, AggrCheckTypes)
self.con.create_aggregate("mysum", 1, AggrSum)
self.con.create_aggregate("aggtxt", 1, AggrText)
def tearDown(self):
pass
def CheckAggrErrorOnCreate(self):
with self.assertRaises(sqlite.OperationalError):
self.con.create_function("bla", -100, AggrSum)
def CheckAggrNoStep(self):
# the query earlier.
cur = self.con.cursor()
with self.assertRaises(sqlite.OperationalError) as cm:
cur.execute("select nostep(t) from test")
self.assertEqual(str(cm.exception), "user-defined aggregate's 'step' method raised error")
def CheckAggrNoFinalize(self):
cur = self.con.cursor()
with self.assertRaises(sqlite.OperationalError) as cm:
cur.execute("select nofinalize(t) from test")
val = cur.fetchone()[0]
self.assertEqual(str(cm.exception), "user-defined aggregate's 'finalize' method raised error")
def CheckAggrExceptionInInit(self):
cur = self.con.cursor()
with self.assertRaises(sqlite.OperationalError) as cm:
cur.execute("select excInit(t) from test")
val = cur.fetchone()[0]
self.assertEqual(str(cm.exception), "user-defined aggregate's '__init__' method raised error")
def CheckAggrExceptionInStep(self):
cur = self.con.cursor()
with self.assertRaises(sqlite.OperationalError) as cm:
cur.execute("select excStep(t) from test")
val = cur.fetchone()[0]
self.assertEqual(str(cm.exception), "user-defined aggregate's 'step' method raised error")
def CheckAggrExceptionInFinalize(self):
cur = self.con.cursor()
with self.assertRaises(sqlite.OperationalError) as cm:
cur.execute("select excFinalize(t) from test")
val = cur.fetchone()[0]
self.assertEqual(str(cm.exception), "user-defined aggregate's 'finalize' method raised error")
def CheckAggrCheckParamStr(self):
cur = self.con.cursor()
cur.execute("select checkTypes('str', ?, ?)", ("foo", str()))
val = cur.fetchone()[0]
self.assertEqual(val, 2)
def CheckAggrCheckParamInt(self):
cur = self.con.cursor()
cur.execute("select checkType('int', ?)", (42,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckParamsInt(self):
cur = self.con.cursor()
cur.execute("select checkTypes('int', ?, ?)", (42, 24))
val = cur.fetchone()[0]
self.assertEqual(val, 2)
def CheckAggrCheckParamFloat(self):
cur = self.con.cursor()
cur.execute("select checkType('float', ?)", (3.14,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckParamNone(self):
cur = self.con.cursor()
cur.execute("select checkType('None', ?)", (None,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckParamBlob(self):
cur = self.con.cursor()
cur.execute("select checkType('blob', ?)", (memoryview(b"blob"),))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckAggrSum(self):
cur = self.con.cursor()
cur.execute("delete from test")
cur.executemany("insert into test(i) values (?)", [(10,), (20,), (30,)])
cur.execute("select mysum(i) from test")
val = cur.fetchone()[0]
self.assertEqual(val, 60)
def CheckAggrText(self):
cur = self.con.cursor()
for txt in ["foo", "1\x002"]:
with self.subTest(txt=txt):
cur.execute("select aggtxt(?) from test", (txt,))
val = cur.fetchone()[0]
self.assertEqual(val, txt)
class AuthorizerTests(unittest.TestCase):
@staticmethod
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
return sqlite.SQLITE_DENY
if arg2 == 'c2' or arg1 == 't2':
return sqlite.SQLITE_DENY
return sqlite.SQLITE_OK
def setUp(self):
self.con = sqlite.connect(":memory:")
self.con.executescript("""
create table t1 (c1, c2);
create table t2 (c1, c2);
insert into t1 (c1, c2) values (1, 2);
insert into t2 (c1, c2) values (4, 5);
""")
self.con.execute("select c2 from t2")
self.con.set_authorizer(self.authorizer_cb)
def tearDown(self):
pass
def test_table_access(self):
with self.assertRaises(sqlite.DatabaseError) as cm:
self.con.execute("select * from t2")
self.assertIn('prohibited', str(cm.exception))
def test_column_access(self):
with self.assertRaises(sqlite.DatabaseError) as cm:
self.con.execute("select c2 from t1")
self.assertIn('prohibited', str(cm.exception))
class AuthorizerRaiseExceptionTests(AuthorizerTests):
@staticmethod
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
raise ValueError
if arg2 == 'c2' or arg1 == 't2':
raise ValueError
return sqlite.SQLITE_OK
class AuthorizerIllegalTypeTests(AuthorizerTests):
@staticmethod
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
return 0.0
if arg2 == 'c2' or arg1 == 't2':
return 0.0
return sqlite.SQLITE_OK
class AuthorizerLargeIntegerTests(AuthorizerTests):
@staticmethod
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
return 2**32
if arg2 == 'c2' or arg1 == 't2':
return 2**32
return sqlite.SQLITE_OK
def suite():
function_suite = unittest.makeSuite(FunctionTests, "Check")
aggregate_suite = unittest.makeSuite(AggregateTests, "Check")
authorizer_suite = unittest.makeSuite(AuthorizerTests)
return unittest.TestSuite((
function_suite,
aggregate_suite,
authorizer_suite,
unittest.makeSuite(AuthorizerRaiseExceptionTests),
unittest.makeSuite(AuthorizerIllegalTypeTests),
unittest.makeSuite(AuthorizerLargeIntegerTests),
))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
| true
| true
|
1c4a96188ff3db551b42c8a2d3e9be167f0e99f7
| 653
|
py
|
Python
|
server/user/migrations/0006_auto_20201019_2248.py
|
MetLee/hackergame
|
571b5407e0644169a2f9b3907a0a1d93138ba436
|
[
"MIT"
] | 48
|
2018-09-30T11:07:52.000Z
|
2021-12-07T03:32:59.000Z
|
server/user/migrations/0006_auto_20201019_2248.py
|
MetLee/hackergame
|
571b5407e0644169a2f9b3907a0a1d93138ba436
|
[
"MIT"
] | 100
|
2018-10-13T18:37:25.000Z
|
2021-11-11T12:14:45.000Z
|
server/user/migrations/0006_auto_20201019_2248.py
|
MetLee/hackergame
|
571b5407e0644169a2f9b3907a0a1d93138ba436
|
[
"MIT"
] | 11
|
2018-10-08T14:59:33.000Z
|
2022-03-02T03:21:09.000Z
|
# Generated by Django 3.1.2 on 2020-10-19 14:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0005_auto_20191011_1842'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'default_permissions': (), 'permissions': [('full', '管理个人信息'), ('view_ustc', '查看中国科学技术大学个人信息'), ('view_zju', '查看浙江大学个人信息'), ('view_hit', '查看哈尔滨工业大学个人信息'), ('view_xjtu', '查看西安交通大学个人信息'), ('view_cqu', '查看重庆大学个人信息'), ('view_bupt', '查看北京邮电大学个人信息'), ('view_jlu', '查看吉林大学个人信息'), ('view_neu', '查看东北大学个人信息'), ('view_nuaa', '查看南京航空航天大学个人信息')]},
),
]
| 36.277778
| 356
| 0.623277
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0005_auto_20191011_1842'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'default_permissions': (), 'permissions': [('full', '管理个人信息'), ('view_ustc', '查看中国科学技术大学个人信息'), ('view_zju', '查看浙江大学个人信息'), ('view_hit', '查看哈尔滨工业大学个人信息'), ('view_xjtu', '查看西安交通大学个人信息'), ('view_cqu', '查看重庆大学个人信息'), ('view_bupt', '查看北京邮电大学个人信息'), ('view_jlu', '查看吉林大学个人信息'), ('view_neu', '查看东北大学个人信息'), ('view_nuaa', '查看南京航空航天大学个人信息')]},
),
]
| true
| true
|
1c4a96d25b3a08d0d02147c25f00e9fdc0faa420
| 1,458
|
py
|
Python
|
ca_qc_laval/people.py
|
dcycle/scrapers-ca
|
4c7a6cd01d603221b5b3b7a400d2e5ca0c6e916f
|
[
"MIT"
] | null | null | null |
ca_qc_laval/people.py
|
dcycle/scrapers-ca
|
4c7a6cd01d603221b5b3b7a400d2e5ca0c6e916f
|
[
"MIT"
] | null | null | null |
ca_qc_laval/people.py
|
dcycle/scrapers-ca
|
4c7a6cd01d603221b5b3b7a400d2e5ca0c6e916f
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from utils import CSVScraper
class LavalPersonScraper(CSVScraper):
# https://www.donneesquebec.ca/recherche/fr/dataset/liste-des-elus
csv_url = 'https://www.donneesquebec.ca/recherche/dataset/8fe69713-fade-4751-a0b4-7e57a81886b1/resource/bb38e19e-26ab-495c-a0f7-ed6b3268b6e6/download/cusersapp.netappdatalocaltemp288c1490-df30-472a-8170-dd06728f449alistedeselus2013-2017.csv'
encoding = 'utf-8-sig'
locale = 'fr'
# Laval also removes accents and cedillas from data.
corrections = {
'district name': {
'Concorde - Bois-de-Boulogne': 'Concorde-Bois-de-Boulogne',
"L'Abord-a-Plouffe": 'Abord-à-Plouffe',
"L'Oree-des-bois": "L'Orée-des-Bois",
'Laval-les-Iles': 'Laval-les-Îles',
'Marc-Aurele-Fortin': 'Marc-Aurèle-Fortin',
'Saint-Francois': 'Saint-François',
'Sainte-Dorothee': 'Sainte-Dorothée',
},
}
# Absurdly, Laval has decided "les en-têtes ne comportent pas de
# caractères accentués ou d'espaces" and includes a byte order mark.
def header_converter(self, s):
s = super(LavalPersonScraper, self).header_converter(s.replace('-', ' '))
return {
'role': 'primary role',
'prenom': 'first name',
'localite': 'locality',
'telephone': 'phone',
'telecopieur': 'fax',
'url photo': 'photo url',
}.get(s, s)
| 41.657143
| 245
| 0.620027
|
from utils import CSVScraper
class LavalPersonScraper(CSVScraper):
csv_url = 'https://www.donneesquebec.ca/recherche/dataset/8fe69713-fade-4751-a0b4-7e57a81886b1/resource/bb38e19e-26ab-495c-a0f7-ed6b3268b6e6/download/cusersapp.netappdatalocaltemp288c1490-df30-472a-8170-dd06728f449alistedeselus2013-2017.csv'
encoding = 'utf-8-sig'
locale = 'fr'
corrections = {
'district name': {
'Concorde - Bois-de-Boulogne': 'Concorde-Bois-de-Boulogne',
"L'Abord-a-Plouffe": 'Abord-à-Plouffe',
"L'Oree-des-bois": "L'Orée-des-Bois",
'Laval-les-Iles': 'Laval-les-Îles',
'Marc-Aurele-Fortin': 'Marc-Aurèle-Fortin',
'Saint-Francois': 'Saint-François',
'Sainte-Dorothee': 'Sainte-Dorothée',
},
}
# Absurdly, Laval has decided "les en-têtes ne comportent pas de
# caractères accentués ou d'espaces" and includes a byte order mark.
def header_converter(self, s):
s = super(LavalPersonScraper, self).header_converter(s.replace('-', ' '))
return {
'role': 'primary role',
'prenom': 'first name',
'localite': 'locality',
'telephone': 'phone',
'telecopieur': 'fax',
'url photo': 'photo url',
}.get(s, s)
| true
| true
|
1c4a970d54f96d0345653b8eb6c51e13009e1d1c
| 414
|
py
|
Python
|
migrations/versions/384da3b88896_new_contact_email.py
|
clld/waab
|
9693da8887cf8498a47bc41250a2a048595f89f3
|
[
"Apache-2.0"
] | 2
|
2015-05-11T13:29:04.000Z
|
2017-12-23T04:15:02.000Z
|
migrations/versions/384da3b88896_new_contact_email.py
|
clld/waab
|
9693da8887cf8498a47bc41250a2a048595f89f3
|
[
"Apache-2.0"
] | null | null | null |
migrations/versions/384da3b88896_new_contact_email.py
|
clld/waab
|
9693da8887cf8498a47bc41250a2a048595f89f3
|
[
"Apache-2.0"
] | 1
|
2015-12-06T22:03:18.000Z
|
2015-12-06T22:03:18.000Z
|
"""new contact email
Revision ID: 384da3b88896
Revises: 55912b4a9d56
Create Date: 2015-12-09 11:35:03.872107
"""
# revision identifiers, used by Alembic.
revision = '384da3b88896'
down_revision = '55912b4a9d56'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("update dataset set contact = 'f.c.seifart@uva.nl'")
def downgrade():
pass
| 16.56
| 67
| 0.736715
|
revision = '384da3b88896'
down_revision = '55912b4a9d56'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute("update dataset set contact = 'f.c.seifart@uva.nl'")
def downgrade():
pass
| true
| true
|
1c4a978548c2fd3c6dc15d9d6849941890ca2470
| 1,984
|
py
|
Python
|
COS498_FinalPrj/extr_pdf.py
|
melkimble/COS498
|
c09ab1ee61f46ebd7b8b9a645787e474df62c3e2
|
[
"MIT"
] | null | null | null |
COS498_FinalPrj/extr_pdf.py
|
melkimble/COS498
|
c09ab1ee61f46ebd7b8b9a645787e474df62c3e2
|
[
"MIT"
] | null | null | null |
COS498_FinalPrj/extr_pdf.py
|
melkimble/COS498
|
c09ab1ee61f46ebd7b8b9a645787e474df62c3e2
|
[
"MIT"
] | null | null | null |
'''
https://stackoverflow.com/questions/26494211/extracting-text-from-a-pdf-file-using-pdfminer-in-python
https://github.com/pdfminer/pdfminer.six
'''
import io
import os
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
def convert_pdf_to_txt(path):
rsrcmgr = PDFResourceManager()
retstr = io.StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = open(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos = set()
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages,
password=password,
caching=caching,
check_extractable=True):
interpreter.process_page(page)
text = retstr.getvalue()
fp.close()
device.close()
retstr.close()
return text
def pdfs2Array(testDataFolder):
# empty array that will contain all parsed pdf files for testing.
parsed_pdfs = []
fname=[]
for pdf in os.listdir(testDataFolder):
if pdf.endswith(".pdf"):
try:
ThePDFFile = (os.path.join(testDataFolder, pdf))
# pdf file converted to text
test = convert_pdf_to_txt(ThePDFFile)
# list of parsed pdf files
parsed_pdfs.append(test)
# list of pdf filenames
fname.append(ThePDFFile)
except Exception:
# if pdf cannot be read, continue to the next pdf
continue
return(parsed_pdfs,fname)
'''
text = convert_pdf_to_txt("D:/Dropbox/01_School/18SP/COS498/FinalProject/TestingData/Adam and Michelle Campbell, Pulpit Harbor Salt Pond, N. Haven.pdf")
#print(text)
'''
| 32
| 152
| 0.634073
|
import io
import os
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
def convert_pdf_to_txt(path):
rsrcmgr = PDFResourceManager()
retstr = io.StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = open(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos = set()
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages,
password=password,
caching=caching,
check_extractable=True):
interpreter.process_page(page)
text = retstr.getvalue()
fp.close()
device.close()
retstr.close()
return text
def pdfs2Array(testDataFolder):
parsed_pdfs = []
fname=[]
for pdf in os.listdir(testDataFolder):
if pdf.endswith(".pdf"):
try:
ThePDFFile = (os.path.join(testDataFolder, pdf))
test = convert_pdf_to_txt(ThePDFFile)
parsed_pdfs.append(test)
fname.append(ThePDFFile)
except Exception:
continue
return(parsed_pdfs,fname)
| true
| true
|
1c4a97869018cd998d4a5dc0c14d4c8520f52232
| 851
|
py
|
Python
|
my_boilerplate_django_admin/core/helpers/string.py
|
diegoMasin/my-boilerplate-djangoadmin
|
ef84516957b4742a8459519ace15e37107691456
|
[
"MIT"
] | null | null | null |
my_boilerplate_django_admin/core/helpers/string.py
|
diegoMasin/my-boilerplate-djangoadmin
|
ef84516957b4742a8459519ace15e37107691456
|
[
"MIT"
] | null | null | null |
my_boilerplate_django_admin/core/helpers/string.py
|
diegoMasin/my-boilerplate-djangoadmin
|
ef84516957b4742a8459519ace15e37107691456
|
[
"MIT"
] | null | null | null |
def is_cpf(cpf):
# Obtém apenas os números do CPF, ignorando pontuações
numbers = [int(digit) for digit in cpf if digit.isdigit()]
# Verifica se o CPF possui 11 números:
if len(numbers) != 11:
return False
# Verifica se todos os números são repetidos
if len(list(dict.fromkeys(numbers))) == 1:
return False
# Validação do primeiro dígito verificador:
sum_of_products = sum(a * b for a, b in zip(numbers[0:9], range(10, 1, -1)))
expected_digit = (sum_of_products * 10 % 11) % 10
if numbers[9] != expected_digit:
return False
# Validação do segundo dígito verificador:
sum_of_products = sum(a * b for a, b in zip(numbers[0:10], range(11, 1, -1)))
expected_digit = (sum_of_products * 10 % 11) % 10
if numbers[10] != expected_digit:
return False
return True
| 32.730769
| 81
| 0.642773
|
def is_cpf(cpf):
numbers = [int(digit) for digit in cpf if digit.isdigit()]
if len(numbers) != 11:
return False
if len(list(dict.fromkeys(numbers))) == 1:
return False
sum_of_products = sum(a * b for a, b in zip(numbers[0:9], range(10, 1, -1)))
expected_digit = (sum_of_products * 10 % 11) % 10
if numbers[9] != expected_digit:
return False
sum_of_products = sum(a * b for a, b in zip(numbers[0:10], range(11, 1, -1)))
expected_digit = (sum_of_products * 10 % 11) % 10
if numbers[10] != expected_digit:
return False
return True
| true
| true
|
1c4a980951b00ae54b20a33ead879e7bc50123b1
| 1,172
|
py
|
Python
|
opentech/settings/production.py
|
JakabGy/hypha
|
32634080ba1cb369f07f27f6616041e4eca8dbf2
|
[
"BSD-3-Clause"
] | null | null | null |
opentech/settings/production.py
|
JakabGy/hypha
|
32634080ba1cb369f07f27f6616041e4eca8dbf2
|
[
"BSD-3-Clause"
] | null | null | null |
opentech/settings/production.py
|
JakabGy/hypha
|
32634080ba1cb369f07f27f6616041e4eca8dbf2
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from .base import * # noqa
# Disable debug mode
DEBUG = False
# Configuration from environment variables
env = os.environ.copy()
# Alternatively, you can set these in a local.py file on the server
try:
from .local import * # noqa
except ImportError:
pass
# Mailgun configuration.
if 'MAILGUN_API_KEY' in env:
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
ANYMAIL = {
"MAILGUN_API_KEY": env['MAILGUN_API_KEY'],
"MAILGUN_SENDER_DOMAIN": env.get('EMAIL_HOST', None),
"WEBHOOK_SECRET": env.get('ANYMAIL_WEBHOOK_SECRET', None)
}
# Sentry configuration.
if 'SENTRY_DSN' in env:
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
sentry_sdk.init(
dsn=env['SENTRY_DSN'],
environment=env.get('SENTRY_ENVIRONMENT', None),
integrations=[DjangoIntegration(), CeleryIntegration()]
)
# Heroku configuration.
# Set ON_HEROKU to true in Config Vars or via cli "heroku config:set ON_HEROKU=true".
if 'ON_HEROKU' in env:
import django_heroku
django_heroku.settings(locals())
| 27.904762
| 85
| 0.716724
|
import os
from .base import *
DEBUG = False
env = os.environ.copy()
try:
from .local import *
except ImportError:
pass
if 'MAILGUN_API_KEY' in env:
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
ANYMAIL = {
"MAILGUN_API_KEY": env['MAILGUN_API_KEY'],
"MAILGUN_SENDER_DOMAIN": env.get('EMAIL_HOST', None),
"WEBHOOK_SECRET": env.get('ANYMAIL_WEBHOOK_SECRET', None)
}
if 'SENTRY_DSN' in env:
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
sentry_sdk.init(
dsn=env['SENTRY_DSN'],
environment=env.get('SENTRY_ENVIRONMENT', None),
integrations=[DjangoIntegration(), CeleryIntegration()]
)
if 'ON_HEROKU' in env:
import django_heroku
django_heroku.settings(locals())
| true
| true
|
1c4a9a3776b6c9427c9e3dbf1a0cdddedadfcf4c
| 1,811
|
py
|
Python
|
setup.py
|
mcanu/django-jstemplate
|
9af1e22681a318d077f4908b5c92d901e163772f
|
[
"BSD-3-Clause"
] | 11
|
2015-04-02T00:24:05.000Z
|
2020-08-13T01:57:57.000Z
|
setup.py
|
mcanu/django-jstemplate
|
9af1e22681a318d077f4908b5c92d901e163772f
|
[
"BSD-3-Clause"
] | 7
|
2015-03-03T09:54:34.000Z
|
2021-04-04T14:35:03.000Z
|
setup.py
|
mcanu/django-jstemplate
|
9af1e22681a318d077f4908b5c92d901e163772f
|
[
"BSD-3-Clause"
] | 8
|
2015-01-01T18:56:32.000Z
|
2019-05-07T18:23:04.000Z
|
from os.path import join, dirname, abspath
from setuptools import setup, find_packages
here = dirname(abspath(__file__))
long_description = (open(join(here, "README.rst")).read() + "\n\n" +
open(join(here, "CHANGES.rst")).read() + "\n\n" +
open(join(here, "TODO.rst")).read())
def get_version():
fh = open(join(here, "jstemplate", "__init__.py"))
try:
for line in fh.readlines():
if line.startswith("__version__ ="):
return line.split("=")[1].strip().strip('"')
finally:
fh.close()
setup(
name="django-jstemplate",
version=get_version(),
description="A Django template tag for embedding Mustache.js templates -- or other JavaScript templates -- safely.",
long_description=long_description,
author="Mjumbe Wawatu Ukweli",
author_email="mjumbewu@gmail.com",
url="https://github.com/mjumbewu/django-jstemplate/",
packages=find_packages(),
package_data={'jstemplate': ['static/libs/*.js']},
install_requires=[
'Django >= 1.3',
'six'
],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Framework :: Django",
],
zip_safe=False,
tests_require=["Django>=1.3", "mock", "six"],
test_suite="runtests.runtests"
)
| 34.169811
| 120
| 0.595803
|
from os.path import join, dirname, abspath
from setuptools import setup, find_packages
here = dirname(abspath(__file__))
long_description = (open(join(here, "README.rst")).read() + "\n\n" +
open(join(here, "CHANGES.rst")).read() + "\n\n" +
open(join(here, "TODO.rst")).read())
def get_version():
fh = open(join(here, "jstemplate", "__init__.py"))
try:
for line in fh.readlines():
if line.startswith("__version__ ="):
return line.split("=")[1].strip().strip('"')
finally:
fh.close()
setup(
name="django-jstemplate",
version=get_version(),
description="A Django template tag for embedding Mustache.js templates -- or other JavaScript templates -- safely.",
long_description=long_description,
author="Mjumbe Wawatu Ukweli",
author_email="mjumbewu@gmail.com",
url="https://github.com/mjumbewu/django-jstemplate/",
packages=find_packages(),
package_data={'jstemplate': ['static/libs/*.js']},
install_requires=[
'Django >= 1.3',
'six'
],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Framework :: Django",
],
zip_safe=False,
tests_require=["Django>=1.3", "mock", "six"],
test_suite="runtests.runtests"
)
| true
| true
|
1c4a9b7da7a8569e9efe0ece18d83db13ce8a2ef
| 16,231
|
py
|
Python
|
models/resnet.py
|
zyyhhxx/convNet.pytorch
|
85f65f80b6d75810077c54bd3a8c9094cc2a26f9
|
[
"MIT"
] | 2
|
2020-07-02T14:21:01.000Z
|
2021-01-04T01:52:09.000Z
|
models/resnet.py
|
zyyhhxx/convNet.pytorch
|
85f65f80b6d75810077c54bd3a8c9094cc2a26f9
|
[
"MIT"
] | null | null | null |
models/resnet.py
|
zyyhhxx/convNet.pytorch
|
85f65f80b6d75810077c54bd3a8c9094cc2a26f9
|
[
"MIT"
] | 1
|
2019-10-02T07:19:26.000Z
|
2019-10-02T07:19:26.000Z
|
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import math
from .modules.se import SEBlock
from .modules.checkpoint import CheckpointModule
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from utils.mixup import MixUp
__all__ = ['resnet', 'resnet_se']
def init_model(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
for m in model.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
model.fc.weight.data.normal_(0, 0.01)
model.fc.bias.data.zero_()
def weight_decay_config(value=1e-4, log=False):
return {'name': 'WeightDecay',
'value': value,
'log': log,
'filter': {'parameter_name': lambda n: not n.endswith('bias'),
'module': lambda m: not isinstance(m, nn.BatchNorm2d)}
}
def mixsize_config(sz, base_size, base_batch, base_duplicates, adapt_batch, adapt_duplicates):
assert adapt_batch or adapt_duplicates or sz == base_size
batch_size = base_batch
duplicates = base_duplicates
if adapt_batch and adapt_duplicates:
scale = base_size/sz
else:
scale = (base_size/sz)**2
if scale * duplicates < 0.5:
adapt_duplicates = False
adapt_batch = True
if adapt_batch:
batch_size = int(round(scale * base_batch))
if adapt_duplicates:
duplicates = int(round(scale * duplicates))
duplicates = max(1, duplicates)
return {
'input_size': sz,
'batch_size': batch_size,
'duplicates': duplicates
}
def ramp_up_fn(lr0, lrT, T):
rate = (lrT - lr0) / T
return "lambda t: {'lr': %s + t * %s}" % (lr0, rate)
def conv3x3(in_planes, out_planes, stride=1, groups=1, bias=False):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=bias)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, expansion=1,
downsample=None, groups=1, residual_block=None, dropout=0.):
super(BasicBlock, self).__init__()
dropout = 0 if dropout is None else dropout
self.conv1 = conv3x3(inplanes, planes, stride, groups=groups)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, expansion * planes, groups=groups)
self.bn2 = nn.BatchNorm2d(expansion * planes)
self.downsample = downsample
self.residual_block = residual_block
self.stride = stride
self.expansion = expansion
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.dropout(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(residual)
if self.residual_block is not None:
residual = self.residual_block(residual)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
def __init__(self, inplanes, planes, stride=1, expansion=4, downsample=None, groups=1, residual_block=None, dropout=0.):
super(Bottleneck, self).__init__()
dropout = 0 if dropout is None else dropout
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride=stride, groups=groups)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(
planes, planes * expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * expansion)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(dropout)
self.downsample = downsample
self.residual_block = residual_block
self.stride = stride
self.expansion = expansion
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.dropout(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.dropout(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(residual)
if self.residual_block is not None:
residual = self.residual_block(residual)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self):
super(ResNet, self).__init__()
def _make_layer(self, block, planes, blocks, expansion=1, stride=1, groups=1, residual_block=None, dropout=None, mixup=False):
downsample = None
out_planes = planes * expansion
if stride != 1 or self.inplanes != out_planes:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, out_planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * expansion),
)
if residual_block is not None:
residual_block = residual_block(out_planes)
layers = []
layers.append(block(self.inplanes, planes, stride, expansion=expansion,
downsample=downsample, groups=groups, residual_block=residual_block, dropout=dropout))
self.inplanes = planes * expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, expansion=expansion, groups=groups,
residual_block=residual_block, dropout=dropout))
if mixup:
layers.append(MixUp())
return nn.Sequential(*layers)
def features(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
return x.view(x.size(0), -1)
def forward(self, x):
x = self.features(x)
x = self.fc(x)
return x
class ResNet_imagenet(ResNet):
num_train_images = 1281167
def __init__(self, num_classes=1000, inplanes=64,
block=Bottleneck, residual_block=None, layers=[3, 4, 23, 3],
width=[64, 128, 256, 512], expansion=4, groups=[1, 1, 1, 1],
regime='normal', scale_lr=1, ramp_up_lr=True, checkpoint_segments=0, mixup=False,
base_devices=4, base_device_batch=64, base_duplicates=1, base_image_size=224, mix_size_regime='D+'):
super(ResNet_imagenet, self).__init__()
self.inplanes = inplanes
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
for i in range(len(layers)):
layer = self._make_layer(block=block, planes=width[i], blocks=layers[i], expansion=expansion,
stride=1 if i == 0 else 2, residual_block=residual_block, groups=groups[i],
mixup=mixup)
if checkpoint_segments > 0:
layer_checkpoint_segments = min(checkpoint_segments, layers[i])
layer = CheckpointModule(layer, layer_checkpoint_segments)
setattr(self, 'layer%s' % str(i + 1), layer)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(width[-1] * expansion, num_classes)
init_model(self)
batch_size = base_devices * base_device_batch
num_steps_epoch = math.floor(self.num_train_images / batch_size)
# base regime
self.regime = [
{'epoch': 0, 'optimizer': 'SGD', 'lr': scale_lr * 1e-1,
'momentum': 0.9, 'regularizer': weight_decay_config(1e-4)},
{'epoch': 30, 'lr': scale_lr * 1e-2},
{'epoch': 60, 'lr': scale_lr * 1e-3},
{'epoch': 80, 'lr': scale_lr * 1e-4}
]
if 'cutmix' in regime:
self.regime = [
{'epoch': 0, 'optimizer': 'SGD', 'lr': scale_lr * 1e-1,
'momentum': 0.9, 'regularizer': weight_decay_config(1e-4)},
{'epoch': 75, 'lr': scale_lr * 1e-2},
{'epoch': 150, 'lr': scale_lr * 1e-3},
{'epoch': 225, 'lr': scale_lr * 1e-4}
]
# Sampled regimes from "Mix & Match: training convnets with mixed image sizes for improved accuracy, speed and scale resiliency"
if 'sampled' in regime:
# add gradient smoothing
self.regime[0]['regularizer'] = [{'name': 'GradSmooth', 'momentum': 0.9, 'log': False},
weight_decay_config(1e-4)]
ramp_up_lr = False
self.data_regime = None
def size_config(size): return mixsize_config(size, base_size=base_image_size, base_batch=base_device_batch, base_duplicates=base_duplicates,
adapt_batch=mix_size_regime == 'B+', adapt_duplicates=mix_size_regime == 'D+')
increment = int(base_image_size / 7)
if '144' in regime:
self.sampled_data_regime = [
(0.1, size_config(base_image_size+increment)),
(0.1, size_config(base_image_size)),
(0.6, size_config(base_image_size - 3*increment)),
(0.2, size_config(base_image_size - 4*increment)),
]
else: # sampled-224
self.sampled_data_regime = [
(0.8/6, size_config(base_image_size - 3*increment)),
(0.8/6, size_config(base_image_size - 2*increment)),
(0.8/6, size_config(base_image_size - increment)),
(0.2, size_config(base_image_size)),
(0.8/6, size_config(base_image_size + increment)),
(0.8/6, size_config(base_image_size + 2*increment)),
(0.8/6, size_config(base_image_size + 3*increment)),
]
self.data_eval_regime = [
{'epoch': 0, 'input_size': base_image_size}
]
if ramp_up_lr and scale_lr > 1: # add learning rate ramp-up
self.regime[0]['step_lambda'] = ramp_up_fn(0.1, 0.1 * scale_lr,
num_steps_epoch * 5)
self.regime.insert(1, {'epoch': 5, 'lr': scale_lr * 1e-1})
class ResNet_cifar(ResNet):
def __init__(self, num_classes=10, inplanes=16,
block=BasicBlock, depth=18, width=[16, 32, 64],
groups=[1, 1, 1], residual_block=None, regime='normal', dropout=None, mixup=False):
super(ResNet_cifar, self).__init__()
self.inplanes = inplanes
n = int((depth - 2) / 6)
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = lambda x: x
self.layer1 = self._make_layer(block, width[0], n, groups=groups[0],
residual_block=residual_block, dropout=dropout, mixup=mixup)
self.layer2 = self._make_layer(block, width[1], n, stride=2, groups=groups[1],
residual_block=residual_block, dropout=dropout, mixup=mixup)
self.layer3 = self._make_layer(block, width[2], n, stride=2, groups=groups[2],
residual_block=residual_block, dropout=dropout, mixup=mixup)
self.layer4 = lambda x: x
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(width[-1], num_classes)
init_model(self)
self.regime = [
{'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-1, 'momentum': 0.9,
'regularizer': weight_decay_config(1e-4)},
{'epoch': 81, 'lr': 1e-2},
{'epoch': 122, 'lr': 1e-3},
{'epoch': 164, 'lr': 1e-4}
]
if 'wide-resnet' in regime:
self.regime = [
{'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-1, 'momentum': 0.9,
'regularizer': weight_decay_config(5e-4)},
{'epoch': 60, 'lr': 2e-2},
{'epoch': 120, 'lr': 4e-3},
{'epoch': 160, 'lr': 8e-4}
]
# Sampled regimes from "Mix & Match: training convnets with mixed image sizes for improved accuracy, speed and scale resiliency"
if 'sampled' in regime:
adapt_batch = True if 'B+' in regime else False
adapt_duplicates = True if ('D+' in regime or not adapt_batch) \
else False
def size_config(size): return mixsize_config(size, base_size=32, base_batch=64, base_duplicates=1,
adapt_batch=adapt_batch, adapt_duplicates=adapt_duplicates)
# add gradient smoothing
self.regime[0]['regularizer'] = [{'name': 'GradSmooth', 'momentum': 0.9, 'log': False},
weight_decay_config(1e-4)]
self.data_regime = None
self.sampled_data_regime = [
(0.3, size_config(32)),
(0.2, size_config(48)),
(0.3, size_config(24)),
(0.2, size_config(16)),
]
self.data_eval_regime = [
{'epoch': 0, 'input_size': 32, 'scale_size': 32}
]
def resnet(**config):
dataset = config.pop('dataset', 'imagenet')
if config.pop('quantize', False):
from .modules.quantize import QConv2d, QLinear, RangeBN
torch.nn.Linear = QLinear
torch.nn.Conv2d = QConv2d
torch.nn.BatchNorm2d = RangeBN
bn_norm = config.pop('bn_norm', None)
if bn_norm is not None:
from .modules.lp_norm import L1BatchNorm2d, TopkBatchNorm2d
if bn_norm == 'L1':
torch.nn.BatchNorm2d = L1BatchNorm2d
if bn_norm == 'TopK':
torch.nn.BatchNorm2d = TopkBatchNorm2d
if 'imagenet' in dataset:
config.setdefault('num_classes', 1000)
depth = config.pop('depth', 50)
if depth == 18:
config.update(dict(block=BasicBlock,
layers=[2, 2, 2, 2],
expansion=1))
if depth == 34:
config.update(dict(block=BasicBlock,
layers=[3, 4, 6, 3],
expansion=1))
if depth == 50:
config.update(dict(block=Bottleneck, layers=[3, 4, 6, 3]))
if depth == 101:
config.update(dict(block=Bottleneck, layers=[3, 4, 23, 3]))
if depth == 152:
config.update(dict(block=Bottleneck, layers=[3, 8, 36, 3]))
if depth == 200:
config.update(dict(block=Bottleneck, layers=[3, 24, 36, 3]))
return ResNet_imagenet(**config)
elif dataset == 'cifar10':
config.setdefault('num_classes', 10)
config.setdefault('depth', 44)
return ResNet_cifar(block=BasicBlock, **config)
elif dataset == 'cifar100':
config.setdefault('num_classes', 100)
config.setdefault('depth', 44)
return ResNet_cifar(block=BasicBlock, **config)
def resnet_se(**config):
config['residual_block'] = SEBlock
return resnet(**config)
| 38.462085
| 152
| 0.566385
|
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import math
from .modules.se import SEBlock
from .modules.checkpoint import CheckpointModule
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from utils.mixup import MixUp
__all__ = ['resnet', 'resnet_se']
def init_model(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
for m in model.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
model.fc.weight.data.normal_(0, 0.01)
model.fc.bias.data.zero_()
def weight_decay_config(value=1e-4, log=False):
return {'name': 'WeightDecay',
'value': value,
'log': log,
'filter': {'parameter_name': lambda n: not n.endswith('bias'),
'module': lambda m: not isinstance(m, nn.BatchNorm2d)}
}
def mixsize_config(sz, base_size, base_batch, base_duplicates, adapt_batch, adapt_duplicates):
assert adapt_batch or adapt_duplicates or sz == base_size
batch_size = base_batch
duplicates = base_duplicates
if adapt_batch and adapt_duplicates:
scale = base_size/sz
else:
scale = (base_size/sz)**2
if scale * duplicates < 0.5:
adapt_duplicates = False
adapt_batch = True
if adapt_batch:
batch_size = int(round(scale * base_batch))
if adapt_duplicates:
duplicates = int(round(scale * duplicates))
duplicates = max(1, duplicates)
return {
'input_size': sz,
'batch_size': batch_size,
'duplicates': duplicates
}
def ramp_up_fn(lr0, lrT, T):
rate = (lrT - lr0) / T
return "lambda t: {'lr': %s + t * %s}" % (lr0, rate)
def conv3x3(in_planes, out_planes, stride=1, groups=1, bias=False):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=bias)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, expansion=1,
downsample=None, groups=1, residual_block=None, dropout=0.):
super(BasicBlock, self).__init__()
dropout = 0 if dropout is None else dropout
self.conv1 = conv3x3(inplanes, planes, stride, groups=groups)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, expansion * planes, groups=groups)
self.bn2 = nn.BatchNorm2d(expansion * planes)
self.downsample = downsample
self.residual_block = residual_block
self.stride = stride
self.expansion = expansion
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.dropout(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(residual)
if self.residual_block is not None:
residual = self.residual_block(residual)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
def __init__(self, inplanes, planes, stride=1, expansion=4, downsample=None, groups=1, residual_block=None, dropout=0.):
super(Bottleneck, self).__init__()
dropout = 0 if dropout is None else dropout
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride=stride, groups=groups)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(
planes, planes * expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * expansion)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(dropout)
self.downsample = downsample
self.residual_block = residual_block
self.stride = stride
self.expansion = expansion
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.dropout(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.dropout(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(residual)
if self.residual_block is not None:
residual = self.residual_block(residual)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self):
super(ResNet, self).__init__()
def _make_layer(self, block, planes, blocks, expansion=1, stride=1, groups=1, residual_block=None, dropout=None, mixup=False):
downsample = None
out_planes = planes * expansion
if stride != 1 or self.inplanes != out_planes:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, out_planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * expansion),
)
if residual_block is not None:
residual_block = residual_block(out_planes)
layers = []
layers.append(block(self.inplanes, planes, stride, expansion=expansion,
downsample=downsample, groups=groups, residual_block=residual_block, dropout=dropout))
self.inplanes = planes * expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, expansion=expansion, groups=groups,
residual_block=residual_block, dropout=dropout))
if mixup:
layers.append(MixUp())
return nn.Sequential(*layers)
def features(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
return x.view(x.size(0), -1)
def forward(self, x):
x = self.features(x)
x = self.fc(x)
return x
class ResNet_imagenet(ResNet):
num_train_images = 1281167
def __init__(self, num_classes=1000, inplanes=64,
block=Bottleneck, residual_block=None, layers=[3, 4, 23, 3],
width=[64, 128, 256, 512], expansion=4, groups=[1, 1, 1, 1],
regime='normal', scale_lr=1, ramp_up_lr=True, checkpoint_segments=0, mixup=False,
base_devices=4, base_device_batch=64, base_duplicates=1, base_image_size=224, mix_size_regime='D+'):
super(ResNet_imagenet, self).__init__()
self.inplanes = inplanes
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
for i in range(len(layers)):
layer = self._make_layer(block=block, planes=width[i], blocks=layers[i], expansion=expansion,
stride=1 if i == 0 else 2, residual_block=residual_block, groups=groups[i],
mixup=mixup)
if checkpoint_segments > 0:
layer_checkpoint_segments = min(checkpoint_segments, layers[i])
layer = CheckpointModule(layer, layer_checkpoint_segments)
setattr(self, 'layer%s' % str(i + 1), layer)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(width[-1] * expansion, num_classes)
init_model(self)
batch_size = base_devices * base_device_batch
num_steps_epoch = math.floor(self.num_train_images / batch_size)
self.regime = [
{'epoch': 0, 'optimizer': 'SGD', 'lr': scale_lr * 1e-1,
'momentum': 0.9, 'regularizer': weight_decay_config(1e-4)},
{'epoch': 30, 'lr': scale_lr * 1e-2},
{'epoch': 60, 'lr': scale_lr * 1e-3},
{'epoch': 80, 'lr': scale_lr * 1e-4}
]
if 'cutmix' in regime:
self.regime = [
{'epoch': 0, 'optimizer': 'SGD', 'lr': scale_lr * 1e-1,
'momentum': 0.9, 'regularizer': weight_decay_config(1e-4)},
{'epoch': 75, 'lr': scale_lr * 1e-2},
{'epoch': 150, 'lr': scale_lr * 1e-3},
{'epoch': 225, 'lr': scale_lr * 1e-4}
]
if 'sampled' in regime:
self.regime[0]['regularizer'] = [{'name': 'GradSmooth', 'momentum': 0.9, 'log': False},
weight_decay_config(1e-4)]
ramp_up_lr = False
self.data_regime = None
def size_config(size): return mixsize_config(size, base_size=base_image_size, base_batch=base_device_batch, base_duplicates=base_duplicates,
adapt_batch=mix_size_regime == 'B+', adapt_duplicates=mix_size_regime == 'D+')
increment = int(base_image_size / 7)
if '144' in regime:
self.sampled_data_regime = [
(0.1, size_config(base_image_size+increment)),
(0.1, size_config(base_image_size)),
(0.6, size_config(base_image_size - 3*increment)),
(0.2, size_config(base_image_size - 4*increment)),
]
else:
self.sampled_data_regime = [
(0.8/6, size_config(base_image_size - 3*increment)),
(0.8/6, size_config(base_image_size - 2*increment)),
(0.8/6, size_config(base_image_size - increment)),
(0.2, size_config(base_image_size)),
(0.8/6, size_config(base_image_size + increment)),
(0.8/6, size_config(base_image_size + 2*increment)),
(0.8/6, size_config(base_image_size + 3*increment)),
]
self.data_eval_regime = [
{'epoch': 0, 'input_size': base_image_size}
]
if ramp_up_lr and scale_lr > 1:
self.regime[0]['step_lambda'] = ramp_up_fn(0.1, 0.1 * scale_lr,
num_steps_epoch * 5)
self.regime.insert(1, {'epoch': 5, 'lr': scale_lr * 1e-1})
class ResNet_cifar(ResNet):
def __init__(self, num_classes=10, inplanes=16,
block=BasicBlock, depth=18, width=[16, 32, 64],
groups=[1, 1, 1], residual_block=None, regime='normal', dropout=None, mixup=False):
super(ResNet_cifar, self).__init__()
self.inplanes = inplanes
n = int((depth - 2) / 6)
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = lambda x: x
self.layer1 = self._make_layer(block, width[0], n, groups=groups[0],
residual_block=residual_block, dropout=dropout, mixup=mixup)
self.layer2 = self._make_layer(block, width[1], n, stride=2, groups=groups[1],
residual_block=residual_block, dropout=dropout, mixup=mixup)
self.layer3 = self._make_layer(block, width[2], n, stride=2, groups=groups[2],
residual_block=residual_block, dropout=dropout, mixup=mixup)
self.layer4 = lambda x: x
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(width[-1], num_classes)
init_model(self)
self.regime = [
{'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-1, 'momentum': 0.9,
'regularizer': weight_decay_config(1e-4)},
{'epoch': 81, 'lr': 1e-2},
{'epoch': 122, 'lr': 1e-3},
{'epoch': 164, 'lr': 1e-4}
]
if 'wide-resnet' in regime:
self.regime = [
{'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-1, 'momentum': 0.9,
'regularizer': weight_decay_config(5e-4)},
{'epoch': 60, 'lr': 2e-2},
{'epoch': 120, 'lr': 4e-3},
{'epoch': 160, 'lr': 8e-4}
]
if 'sampled' in regime:
adapt_batch = True if 'B+' in regime else False
adapt_duplicates = True if ('D+' in regime or not adapt_batch) \
else False
def size_config(size): return mixsize_config(size, base_size=32, base_batch=64, base_duplicates=1,
adapt_batch=adapt_batch, adapt_duplicates=adapt_duplicates)
self.regime[0]['regularizer'] = [{'name': 'GradSmooth', 'momentum': 0.9, 'log': False},
weight_decay_config(1e-4)]
self.data_regime = None
self.sampled_data_regime = [
(0.3, size_config(32)),
(0.2, size_config(48)),
(0.3, size_config(24)),
(0.2, size_config(16)),
]
self.data_eval_regime = [
{'epoch': 0, 'input_size': 32, 'scale_size': 32}
]
def resnet(**config):
dataset = config.pop('dataset', 'imagenet')
if config.pop('quantize', False):
from .modules.quantize import QConv2d, QLinear, RangeBN
torch.nn.Linear = QLinear
torch.nn.Conv2d = QConv2d
torch.nn.BatchNorm2d = RangeBN
bn_norm = config.pop('bn_norm', None)
if bn_norm is not None:
from .modules.lp_norm import L1BatchNorm2d, TopkBatchNorm2d
if bn_norm == 'L1':
torch.nn.BatchNorm2d = L1BatchNorm2d
if bn_norm == 'TopK':
torch.nn.BatchNorm2d = TopkBatchNorm2d
if 'imagenet' in dataset:
config.setdefault('num_classes', 1000)
depth = config.pop('depth', 50)
if depth == 18:
config.update(dict(block=BasicBlock,
layers=[2, 2, 2, 2],
expansion=1))
if depth == 34:
config.update(dict(block=BasicBlock,
layers=[3, 4, 6, 3],
expansion=1))
if depth == 50:
config.update(dict(block=Bottleneck, layers=[3, 4, 6, 3]))
if depth == 101:
config.update(dict(block=Bottleneck, layers=[3, 4, 23, 3]))
if depth == 152:
config.update(dict(block=Bottleneck, layers=[3, 8, 36, 3]))
if depth == 200:
config.update(dict(block=Bottleneck, layers=[3, 24, 36, 3]))
return ResNet_imagenet(**config)
elif dataset == 'cifar10':
config.setdefault('num_classes', 10)
config.setdefault('depth', 44)
return ResNet_cifar(block=BasicBlock, **config)
elif dataset == 'cifar100':
config.setdefault('num_classes', 100)
config.setdefault('depth', 44)
return ResNet_cifar(block=BasicBlock, **config)
def resnet_se(**config):
config['residual_block'] = SEBlock
return resnet(**config)
| true
| true
|
1c4a9d5d339e6b97fd46492825604da475aeac37
| 6,136
|
py
|
Python
|
lib/lib/Cryptodome/SelfTest/Cipher/test_Blowfish.py
|
Zhangxi-Lam/alfred-google-keep
|
9f8f891e103f33a7e749907fe5cbfcf779131c8e
|
[
"MIT"
] | 2,557
|
2016-07-19T22:20:45.000Z
|
2022-01-25T10:53:35.000Z
|
lib/lib/Cryptodome/SelfTest/Cipher/test_Blowfish.py
|
Zhangxi-Lam/alfred-google-keep
|
9f8f891e103f33a7e749907fe5cbfcf779131c8e
|
[
"MIT"
] | 1,360
|
2016-07-20T02:06:42.000Z
|
2021-07-27T12:46:40.000Z
|
lib/lib/Cryptodome/SelfTest/Cipher/test_Blowfish.py
|
Zhangxi-Lam/alfred-google-keep
|
9f8f891e103f33a7e749907fe5cbfcf779131c8e
|
[
"MIT"
] | 607
|
2016-07-20T03:34:04.000Z
|
2022-01-05T14:57:09.000Z
|
# -*- coding: utf-8 -*-
#
# SelfTest/Cipher/test_Blowfish.py: Self-test for the Blowfish cipher
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Cryptodome.Cipher.Blowfish"""
import unittest
from Cryptodome.Util.py3compat import bchr
from Cryptodome.Cipher import Blowfish
# This is a list of (plaintext, ciphertext, key) tuples.
test_data = [
# Test vectors from http://www.schneier.com/code/vectors.txt
('0000000000000000', '4ef997456198dd78', '0000000000000000'),
('ffffffffffffffff', '51866fd5b85ecb8a', 'ffffffffffffffff'),
('1000000000000001', '7d856f9a613063f2', '3000000000000000'),
('1111111111111111', '2466dd878b963c9d', '1111111111111111'),
('1111111111111111', '61f9c3802281b096', '0123456789abcdef'),
('0123456789abcdef', '7d0cc630afda1ec7', '1111111111111111'),
('0000000000000000', '4ef997456198dd78', '0000000000000000'),
('0123456789abcdef', '0aceab0fc6a0a28d', 'fedcba9876543210'),
('01a1d6d039776742', '59c68245eb05282b', '7ca110454a1a6e57'),
('5cd54ca83def57da', 'b1b8cc0b250f09a0', '0131d9619dc1376e'),
('0248d43806f67172', '1730e5778bea1da4', '07a1133e4a0b2686'),
('51454b582ddf440a', 'a25e7856cf2651eb', '3849674c2602319e'),
('42fd443059577fa2', '353882b109ce8f1a', '04b915ba43feb5b6'),
('059b5e0851cf143a', '48f4d0884c379918', '0113b970fd34f2ce'),
('0756d8e0774761d2', '432193b78951fc98', '0170f175468fb5e6'),
('762514b829bf486a', '13f04154d69d1ae5', '43297fad38e373fe'),
('3bdd119049372802', '2eedda93ffd39c79', '07a7137045da2a16'),
('26955f6835af609a', 'd887e0393c2da6e3', '04689104c2fd3b2f'),
('164d5e404f275232', '5f99d04f5b163969', '37d06bb516cb7546'),
('6b056e18759f5cca', '4a057a3b24d3977b', '1f08260d1ac2465e'),
('004bd6ef09176062', '452031c1e4fada8e', '584023641aba6176'),
('480d39006ee762f2', '7555ae39f59b87bd', '025816164629b007'),
('437540c8698f3cfa', '53c55f9cb49fc019', '49793ebc79b3258f'),
('072d43a077075292', '7a8e7bfa937e89a3', '4fb05e1515ab73a7'),
('02fe55778117f12a', 'cf9c5d7a4986adb5', '49e95d6d4ca229bf'),
('1d9d5c5018f728c2', 'd1abb290658bc778', '018310dc409b26d6'),
('305532286d6f295a', '55cb3774d13ef201', '1c587f1c13924fef'),
('0123456789abcdef', 'fa34ec4847b268b2', '0101010101010101'),
('0123456789abcdef', 'a790795108ea3cae', '1f1f1f1f0e0e0e0e'),
('0123456789abcdef', 'c39e072d9fac631d', 'e0fee0fef1fef1fe'),
('ffffffffffffffff', '014933e0cdaff6e4', '0000000000000000'),
('0000000000000000', 'f21e9a77b71c49bc', 'ffffffffffffffff'),
('0000000000000000', '245946885754369a', '0123456789abcdef'),
('ffffffffffffffff', '6b5c5a9c5d9e0a5a', 'fedcba9876543210'),
#('fedcba9876543210', 'f9ad597c49db005e', 'f0'),
#('fedcba9876543210', 'e91d21c1d961a6d6', 'f0e1'),
#('fedcba9876543210', 'e9c2b70a1bc65cf3', 'f0e1d2'),
#('fedcba9876543210', 'be1e639408640f05', 'f0e1d2c3'),
('fedcba9876543210', 'b39e44481bdb1e6e', 'f0e1d2c3b4'),
('fedcba9876543210', '9457aa83b1928c0d', 'f0e1d2c3b4a5'),
('fedcba9876543210', '8bb77032f960629d', 'f0e1d2c3b4a596'),
('fedcba9876543210', 'e87a244e2cc85e82', 'f0e1d2c3b4a59687'),
('fedcba9876543210', '15750e7a4f4ec577', 'f0e1d2c3b4a5968778'),
('fedcba9876543210', '122ba70b3ab64ae0', 'f0e1d2c3b4a596877869'),
('fedcba9876543210', '3a833c9affc537f6', 'f0e1d2c3b4a5968778695a'),
('fedcba9876543210', '9409da87a90f6bf2', 'f0e1d2c3b4a5968778695a4b'),
('fedcba9876543210', '884f80625060b8b4', 'f0e1d2c3b4a5968778695a4b3c'),
('fedcba9876543210', '1f85031c19e11968', 'f0e1d2c3b4a5968778695a4b3c2d'),
('fedcba9876543210', '79d9373a714ca34f', 'f0e1d2c3b4a5968778695a4b3c2d1e'),
('fedcba9876543210', '93142887ee3be15c',
'f0e1d2c3b4a5968778695a4b3c2d1e0f'),
('fedcba9876543210', '03429e838ce2d14b',
'f0e1d2c3b4a5968778695a4b3c2d1e0f00'),
('fedcba9876543210', 'a4299e27469ff67b',
'f0e1d2c3b4a5968778695a4b3c2d1e0f0011'),
('fedcba9876543210', 'afd5aed1c1bc96a8',
'f0e1d2c3b4a5968778695a4b3c2d1e0f001122'),
('fedcba9876543210', '10851c0e3858da9f',
'f0e1d2c3b4a5968778695a4b3c2d1e0f00112233'),
('fedcba9876543210', 'e6f51ed79b9db21f',
'f0e1d2c3b4a5968778695a4b3c2d1e0f0011223344'),
('fedcba9876543210', '64a6e14afd36b46f',
'f0e1d2c3b4a5968778695a4b3c2d1e0f001122334455'),
('fedcba9876543210', '80c7d7d45a5479ad',
'f0e1d2c3b4a5968778695a4b3c2d1e0f00112233445566'),
('fedcba9876543210', '05044b62fa52d080',
'f0e1d2c3b4a5968778695a4b3c2d1e0f0011223344556677'),
]
class KeyLength(unittest.TestCase):
def runTest(self):
self.assertRaises(ValueError, Blowfish.new, bchr(0) * 4,
Blowfish.MODE_ECB)
self.assertRaises(ValueError, Blowfish.new, bchr(0) * 57,
Blowfish.MODE_ECB)
def get_tests(config={}):
from common import make_block_tests
tests = make_block_tests(Blowfish, "Blowfish", test_data)
tests.append(KeyLength())
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
| 49.088
| 79
| 0.71105
|
import unittest
from Cryptodome.Util.py3compat import bchr
from Cryptodome.Cipher import Blowfish
test_data = [
('0000000000000000', '4ef997456198dd78', '0000000000000000'),
('ffffffffffffffff', '51866fd5b85ecb8a', 'ffffffffffffffff'),
('1000000000000001', '7d856f9a613063f2', '3000000000000000'),
('1111111111111111', '2466dd878b963c9d', '1111111111111111'),
('1111111111111111', '61f9c3802281b096', '0123456789abcdef'),
('0123456789abcdef', '7d0cc630afda1ec7', '1111111111111111'),
('0000000000000000', '4ef997456198dd78', '0000000000000000'),
('0123456789abcdef', '0aceab0fc6a0a28d', 'fedcba9876543210'),
('01a1d6d039776742', '59c68245eb05282b', '7ca110454a1a6e57'),
('5cd54ca83def57da', 'b1b8cc0b250f09a0', '0131d9619dc1376e'),
('0248d43806f67172', '1730e5778bea1da4', '07a1133e4a0b2686'),
('51454b582ddf440a', 'a25e7856cf2651eb', '3849674c2602319e'),
('42fd443059577fa2', '353882b109ce8f1a', '04b915ba43feb5b6'),
('059b5e0851cf143a', '48f4d0884c379918', '0113b970fd34f2ce'),
('0756d8e0774761d2', '432193b78951fc98', '0170f175468fb5e6'),
('762514b829bf486a', '13f04154d69d1ae5', '43297fad38e373fe'),
('3bdd119049372802', '2eedda93ffd39c79', '07a7137045da2a16'),
('26955f6835af609a', 'd887e0393c2da6e3', '04689104c2fd3b2f'),
('164d5e404f275232', '5f99d04f5b163969', '37d06bb516cb7546'),
('6b056e18759f5cca', '4a057a3b24d3977b', '1f08260d1ac2465e'),
('004bd6ef09176062', '452031c1e4fada8e', '584023641aba6176'),
('480d39006ee762f2', '7555ae39f59b87bd', '025816164629b007'),
('437540c8698f3cfa', '53c55f9cb49fc019', '49793ebc79b3258f'),
('072d43a077075292', '7a8e7bfa937e89a3', '4fb05e1515ab73a7'),
('02fe55778117f12a', 'cf9c5d7a4986adb5', '49e95d6d4ca229bf'),
('1d9d5c5018f728c2', 'd1abb290658bc778', '018310dc409b26d6'),
('305532286d6f295a', '55cb3774d13ef201', '1c587f1c13924fef'),
('0123456789abcdef', 'fa34ec4847b268b2', '0101010101010101'),
('0123456789abcdef', 'a790795108ea3cae', '1f1f1f1f0e0e0e0e'),
('0123456789abcdef', 'c39e072d9fac631d', 'e0fee0fef1fef1fe'),
('ffffffffffffffff', '014933e0cdaff6e4', '0000000000000000'),
('0000000000000000', 'f21e9a77b71c49bc', 'ffffffffffffffff'),
('0000000000000000', '245946885754369a', '0123456789abcdef'),
('ffffffffffffffff', '6b5c5a9c5d9e0a5a', 'fedcba9876543210'),
('fedcba9876543210', 'b39e44481bdb1e6e', 'f0e1d2c3b4'),
('fedcba9876543210', '9457aa83b1928c0d', 'f0e1d2c3b4a5'),
('fedcba9876543210', '8bb77032f960629d', 'f0e1d2c3b4a596'),
('fedcba9876543210', 'e87a244e2cc85e82', 'f0e1d2c3b4a59687'),
('fedcba9876543210', '15750e7a4f4ec577', 'f0e1d2c3b4a5968778'),
('fedcba9876543210', '122ba70b3ab64ae0', 'f0e1d2c3b4a596877869'),
('fedcba9876543210', '3a833c9affc537f6', 'f0e1d2c3b4a5968778695a'),
('fedcba9876543210', '9409da87a90f6bf2', 'f0e1d2c3b4a5968778695a4b'),
('fedcba9876543210', '884f80625060b8b4', 'f0e1d2c3b4a5968778695a4b3c'),
('fedcba9876543210', '1f85031c19e11968', 'f0e1d2c3b4a5968778695a4b3c2d'),
('fedcba9876543210', '79d9373a714ca34f', 'f0e1d2c3b4a5968778695a4b3c2d1e'),
('fedcba9876543210', '93142887ee3be15c',
'f0e1d2c3b4a5968778695a4b3c2d1e0f'),
('fedcba9876543210', '03429e838ce2d14b',
'f0e1d2c3b4a5968778695a4b3c2d1e0f00'),
('fedcba9876543210', 'a4299e27469ff67b',
'f0e1d2c3b4a5968778695a4b3c2d1e0f0011'),
('fedcba9876543210', 'afd5aed1c1bc96a8',
'f0e1d2c3b4a5968778695a4b3c2d1e0f001122'),
('fedcba9876543210', '10851c0e3858da9f',
'f0e1d2c3b4a5968778695a4b3c2d1e0f00112233'),
('fedcba9876543210', 'e6f51ed79b9db21f',
'f0e1d2c3b4a5968778695a4b3c2d1e0f0011223344'),
('fedcba9876543210', '64a6e14afd36b46f',
'f0e1d2c3b4a5968778695a4b3c2d1e0f001122334455'),
('fedcba9876543210', '80c7d7d45a5479ad',
'f0e1d2c3b4a5968778695a4b3c2d1e0f00112233445566'),
('fedcba9876543210', '05044b62fa52d080',
'f0e1d2c3b4a5968778695a4b3c2d1e0f0011223344556677'),
]
class KeyLength(unittest.TestCase):
def runTest(self):
self.assertRaises(ValueError, Blowfish.new, bchr(0) * 4,
Blowfish.MODE_ECB)
self.assertRaises(ValueError, Blowfish.new, bchr(0) * 57,
Blowfish.MODE_ECB)
def get_tests(config={}):
from common import make_block_tests
tests = make_block_tests(Blowfish, "Blowfish", test_data)
tests.append(KeyLength())
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
| true
| true
|
1c4a9dab8a4dee81d5f84fa57fcef0380cf1a15a
| 5,951
|
py
|
Python
|
google/auth/crypt/_python_rsa.py
|
renovate-bot/google-auth-library-python
|
a4cf9b1bf461a3fb35432e42f4d8bc1a8ff7bc97
|
[
"Apache-2.0"
] | 4
|
2021-10-20T12:39:09.000Z
|
2022-02-26T15:02:08.000Z
|
google/auth/crypt/_python_rsa.py
|
renovate-bot/google-auth-library-python
|
a4cf9b1bf461a3fb35432e42f4d8bc1a8ff7bc97
|
[
"Apache-2.0"
] | null | null | null |
google/auth/crypt/_python_rsa.py
|
renovate-bot/google-auth-library-python
|
a4cf9b1bf461a3fb35432e42f4d8bc1a8ff7bc97
|
[
"Apache-2.0"
] | 1
|
2021-10-20T13:47:10.000Z
|
2021-10-20T13:47:10.000Z
|
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pure-Python RSA cryptography implementation.
Uses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages
to parse PEM files storing PKCS#1 or PKCS#8 keys as well as
certificates. There is no support for p12 files.
"""
from __future__ import absolute_import
import io
from pyasn1.codec.der import decoder
from pyasn1_modules import pem
from pyasn1_modules.rfc2459 import Certificate
from pyasn1_modules.rfc5208 import PrivateKeyInfo
import rsa
from google.auth import _helpers
from google.auth.crypt import base
_POW2 = (128, 64, 32, 16, 8, 4, 2, 1)
_CERTIFICATE_MARKER = b"-----BEGIN CERTIFICATE-----"
_PKCS1_MARKER = ("-----BEGIN RSA PRIVATE KEY-----", "-----END RSA PRIVATE KEY-----")
_PKCS8_MARKER = ("-----BEGIN PRIVATE KEY-----", "-----END PRIVATE KEY-----")
_PKCS8_SPEC = PrivateKeyInfo()
def _bit_list_to_bytes(bit_list):
"""Converts an iterable of 1s and 0s to bytes.
Combines the list 8 at a time, treating each group of 8 bits
as a single byte.
Args:
bit_list (Sequence): Sequence of 1s and 0s.
Returns:
bytes: The decoded bytes.
"""
num_bits = len(bit_list)
byte_vals = bytearray()
for start in range(0, num_bits, 8):
curr_bits = bit_list[start : start + 8]
char_val = sum(val * digit for val, digit in zip(_POW2, curr_bits))
byte_vals.append(char_val)
return bytes(byte_vals)
class RSAVerifier(base.Verifier):
"""Verifies RSA cryptographic signatures using public keys.
Args:
public_key (rsa.key.PublicKey): The public key used to verify
signatures.
"""
def __init__(self, public_key):
self._pubkey = public_key
@_helpers.copy_docstring(base.Verifier)
def verify(self, message, signature):
message = _helpers.to_bytes(message)
try:
return rsa.pkcs1.verify(message, signature, self._pubkey)
except (ValueError, rsa.pkcs1.VerificationError):
return False
@classmethod
def from_string(cls, public_key):
"""Construct an Verifier instance from a public key or public
certificate string.
Args:
public_key (Union[str, bytes]): The public key in PEM format or the
x509 public key certificate.
Returns:
google.auth.crypt._python_rsa.RSAVerifier: The constructed verifier.
Raises:
ValueError: If the public_key can't be parsed.
"""
public_key = _helpers.to_bytes(public_key)
is_x509_cert = _CERTIFICATE_MARKER in public_key
# If this is a certificate, extract the public key info.
if is_x509_cert:
der = rsa.pem.load_pem(public_key, "CERTIFICATE")
asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
if remaining != b"":
raise ValueError("Unused bytes", remaining)
cert_info = asn1_cert["tbsCertificate"]["subjectPublicKeyInfo"]
key_bytes = _bit_list_to_bytes(cert_info["subjectPublicKey"])
pubkey = rsa.PublicKey.load_pkcs1(key_bytes, "DER")
else:
pubkey = rsa.PublicKey.load_pkcs1(public_key, "PEM")
return cls(pubkey)
class RSASigner(base.Signer, base.FromServiceAccountMixin):
"""Signs messages with an RSA private key.
Args:
private_key (rsa.key.PrivateKey): The private key to sign with.
key_id (str): Optional key ID used to identify this private key. This
can be useful to associate the private key with its associated
public key or certificate.
"""
def __init__(self, private_key, key_id=None):
self._key = private_key
self._key_id = key_id
@property
@_helpers.copy_docstring(base.Signer)
def key_id(self):
return self._key_id
@_helpers.copy_docstring(base.Signer)
def sign(self, message):
message = _helpers.to_bytes(message)
return rsa.pkcs1.sign(message, self._key, "SHA-256")
@classmethod
def from_string(cls, key, key_id=None):
"""Construct an Signer instance from a private key in PEM format.
Args:
key (str): Private key in PEM format.
key_id (str): An optional key id used to identify the private key.
Returns:
google.auth.crypt.Signer: The constructed signer.
Raises:
ValueError: If the key cannot be parsed as PKCS#1 or PKCS#8 in
PEM format.
"""
key = _helpers.from_bytes(key) # PEM expects str in Python 3
marker_id, key_bytes = pem.readPemBlocksFromFile(
io.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER
)
# Key is in pkcs1 format.
if marker_id == 0:
private_key = rsa.key.PrivateKey.load_pkcs1(key_bytes, format="DER")
# Key is in pkcs8.
elif marker_id == 1:
key_info, remaining = decoder.decode(key_bytes, asn1Spec=_PKCS8_SPEC)
if remaining != b"":
raise ValueError("Unused bytes", remaining)
private_key_info = key_info.getComponentByName("privateKey")
private_key = rsa.key.PrivateKey.load_pkcs1(
private_key_info.asOctets(), format="DER"
)
else:
raise ValueError("No key could be detected.")
return cls(private_key, key_id=key_id)
| 34.005714
| 84
| 0.65384
|
from __future__ import absolute_import
import io
from pyasn1.codec.der import decoder
from pyasn1_modules import pem
from pyasn1_modules.rfc2459 import Certificate
from pyasn1_modules.rfc5208 import PrivateKeyInfo
import rsa
from google.auth import _helpers
from google.auth.crypt import base
_POW2 = (128, 64, 32, 16, 8, 4, 2, 1)
_CERTIFICATE_MARKER = b"-----BEGIN CERTIFICATE-----"
_PKCS1_MARKER = ("-----BEGIN RSA PRIVATE KEY-----", "-----END RSA PRIVATE KEY-----")
_PKCS8_MARKER = ("-----BEGIN PRIVATE KEY-----", "-----END PRIVATE KEY-----")
_PKCS8_SPEC = PrivateKeyInfo()
def _bit_list_to_bytes(bit_list):
num_bits = len(bit_list)
byte_vals = bytearray()
for start in range(0, num_bits, 8):
curr_bits = bit_list[start : start + 8]
char_val = sum(val * digit for val, digit in zip(_POW2, curr_bits))
byte_vals.append(char_val)
return bytes(byte_vals)
class RSAVerifier(base.Verifier):
def __init__(self, public_key):
self._pubkey = public_key
@_helpers.copy_docstring(base.Verifier)
def verify(self, message, signature):
message = _helpers.to_bytes(message)
try:
return rsa.pkcs1.verify(message, signature, self._pubkey)
except (ValueError, rsa.pkcs1.VerificationError):
return False
@classmethod
def from_string(cls, public_key):
public_key = _helpers.to_bytes(public_key)
is_x509_cert = _CERTIFICATE_MARKER in public_key
if is_x509_cert:
der = rsa.pem.load_pem(public_key, "CERTIFICATE")
asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
if remaining != b"":
raise ValueError("Unused bytes", remaining)
cert_info = asn1_cert["tbsCertificate"]["subjectPublicKeyInfo"]
key_bytes = _bit_list_to_bytes(cert_info["subjectPublicKey"])
pubkey = rsa.PublicKey.load_pkcs1(key_bytes, "DER")
else:
pubkey = rsa.PublicKey.load_pkcs1(public_key, "PEM")
return cls(pubkey)
class RSASigner(base.Signer, base.FromServiceAccountMixin):
def __init__(self, private_key, key_id=None):
self._key = private_key
self._key_id = key_id
@property
@_helpers.copy_docstring(base.Signer)
def key_id(self):
return self._key_id
@_helpers.copy_docstring(base.Signer)
def sign(self, message):
message = _helpers.to_bytes(message)
return rsa.pkcs1.sign(message, self._key, "SHA-256")
@classmethod
def from_string(cls, key, key_id=None):
key = _helpers.from_bytes(key)
marker_id, key_bytes = pem.readPemBlocksFromFile(
io.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER
)
if marker_id == 0:
private_key = rsa.key.PrivateKey.load_pkcs1(key_bytes, format="DER")
elif marker_id == 1:
key_info, remaining = decoder.decode(key_bytes, asn1Spec=_PKCS8_SPEC)
if remaining != b"":
raise ValueError("Unused bytes", remaining)
private_key_info = key_info.getComponentByName("privateKey")
private_key = rsa.key.PrivateKey.load_pkcs1(
private_key_info.asOctets(), format="DER"
)
else:
raise ValueError("No key could be detected.")
return cls(private_key, key_id=key_id)
| true
| true
|
1c4a9db4d968f272c0318dc71a033cc4af40f6ef
| 6,241
|
py
|
Python
|
biotrees/shape/__init__.py
|
bielr/biotrees
|
dc588888cec557d9522fc6faaa560488a95c946d
|
[
"Apache-2.0"
] | null | null | null |
biotrees/shape/__init__.py
|
bielr/biotrees
|
dc588888cec557d9522fc6faaa560488a95c946d
|
[
"Apache-2.0"
] | null | null | null |
biotrees/shape/__init__.py
|
bielr/biotrees
|
dc588888cec557d9522fc6faaa560488a95c946d
|
[
"Apache-2.0"
] | null | null | null |
from biotrees.util import iter_merge, skip_nth
"""
A `Shape` represents a topological tree. The data structure implemented here is of recursive type: a `Shape` can be
either a leaf or a list of `Shape` objects. Leaves are not distinguishable, but we know that they are leaves.
We choose a sorted shape to be the class representant of all shapes isomorphic to it.
"""
__all__ = ['Shape']
class Shape(object):
LEAF = None # filled in after class def
CHERRY = None # filled in after class def
"""
A `Shape` instance is either a leaf or a list of `Shape` instances that hang from a root.
"""
def __init__(self, children=None):
"""
Create a new `Shape` object.
The boolean is_leaf is True if the object is a leaf; it is False otherwise.
:param children: `list` instance.
:return: `Shape` instance.
"""
assert children is None or len(children) > 0
self.children = children
def is_leaf(self):
return self.children is None
def clone(self):
"""
Returns `Shape` instance which is exactly the same as self.
:return: `Shape` instance.
"""
if self.is_leaf():
return Shape.LEAF
else:
return Shape([ch.clone() for ch in self.children])
def _is_sorted(self):
if self.is_leaf():
return True
children = self.children
return all(ch._is_sorted() for ch in children) and \
all(ch1 <= ch2 for ch1, ch2 in zip(children[:-1], children[1:]))
def _sort(self):
"""
Sorts self using lexicographical order.
"""
if self.is_leaf():
return
for t in self.children:
t._sort()
self.children.sort()
def compare(self, t2):
"""
Compare self with another `Shape` object. We use lexicographical order in order to compare two `Shape` instances.
Leaves in this case are indistinguishable. It returns anint c, which is 0 if self and T2 are equal, < 0 if
self < T2, and > 0 if self > T2.
:param t2: `Shape` instance.
:return: `int` instance.
"""
if self.is_leaf() and t2.is_leaf():
return 0
elif self.is_leaf():
return -1
elif t2.is_leaf():
return 1
else:
c = len(self.children) - len(t2.children)
if c != 0:
return c
for t1, t2 in zip(self.children, t2.children):
c = t1.compare(t2)
if c != 0:
return c
return c
def __lt__(self, t2):
"""
Uses the comparing method above to decide if self is less than T2.
:param t2: the `Shape` object against which we compare self.
:return: `bool` instance.
"""
return self.compare(t2) < 0
def __le__(self, t2):
"""
Uses the comparing method above to decide if self is less or equal than T2.
:param t2: the `Shape` object against which we compare self.
:return: `bool` instance.
"""
return self.compare(t2) <= 0
def __eq__(self, t2):
"""
Uses the comparing method above to decide if self is equal to T2.
:param t2: the `Shape` object against which we compare self.
:return: `bool` instance.
"""
return self.compare(t2) == 0
def __ne__(self, t2):
"""
Uses the comparing method above to decide if self is not equal to T2.
:param t2: the `Shape` object against which we compare self.
:return: `bool` instance.
"""
return self.compare(t2) != 0
def __ge__(self, t2):
"""
Uses the comparing method above to decide if self is greater or equal than T2.
:param t2: the `Shape` object against which we compare self.
:return: `bool` instance.
"""
return self.compare(t2) >= 0
def __gt__(self, t2):
"""
Uses the comparing method above to decide if self is greater than T2.
:param t2: the `Shape` object against which we compare self.
:return: `bool` instance.
"""
return self.compare(t2) > 0
def __str__(self):
from biotrees.shape.newick import to_newick
return to_newick(self)
def __repr__(self):
return str(self)
def shape(self):
"""
Returns the `Shape` associated to self. Namely, it "forgets" the labels of the leafs.
:return: `Shape` instance.
"""
return self
Shape.LEAF = Shape()
Shape.CHERRY = Shape([Shape.LEAF, Shape.LEAF])
def is_binary(t):
"""
Returns True if t is a binary shape.
:return: `bool` instance
"""
return t.is_leaf() or \
(len(t.children) == 2 and all(is_binary(ch) for t in t.children))
def count_leaves(t):
"""
Returns the number of leaves in t.
:return: `int` instance.
"""
if t.is_leaf():
return 1
else:
return sum(count_leaves(t) for t in t.children)
def get_depth(t):
"""
Returns an integer representing the maximal depth of the shape, from the root to one
of its furthest leaves.
:return: `int` instance.
"""
if t.is_leaf():
return 0
else:
return max(get_depth(ch) for ch in t.children) + 1
def leaf_depths(t):
"""
Returns a generator of integers representing the depth of each leaf in the tree
:return: generator of integers
"""
if t.is_leaf():
yield 0
else:
for ch in t.children:
for depth in leaf_depths(ch):
yield depth+1
def get_leaf_depths(t):
"""
Returns a list of integers representing the depth of each leaf in the tree
:return: list of integers
"""
return list(leaf_depths(t))
def count_nodes_by_depth(t):
total_depth = get_depth(t)
nodes_by_depth = [0]*(total_depth+1)
def navigate(t2, d):
if not t2.is_leaf():
d1 = d+1
nodes_by_depth[d1] += len(t2.children)
for ch in t2.children:
navigate(ch, d1)
nodes_by_depth[0] += 1
navigate(t, 0)
return nodes_by_depth
| 28.239819
| 121
| 0.577792
|
from biotrees.util import iter_merge, skip_nth
__all__ = ['Shape']
class Shape(object):
LEAF = None
CHERRY = None
def __init__(self, children=None):
assert children is None or len(children) > 0
self.children = children
def is_leaf(self):
return self.children is None
def clone(self):
if self.is_leaf():
return Shape.LEAF
else:
return Shape([ch.clone() for ch in self.children])
def _is_sorted(self):
if self.is_leaf():
return True
children = self.children
return all(ch._is_sorted() for ch in children) and \
all(ch1 <= ch2 for ch1, ch2 in zip(children[:-1], children[1:]))
def _sort(self):
if self.is_leaf():
return
for t in self.children:
t._sort()
self.children.sort()
def compare(self, t2):
if self.is_leaf() and t2.is_leaf():
return 0
elif self.is_leaf():
return -1
elif t2.is_leaf():
return 1
else:
c = len(self.children) - len(t2.children)
if c != 0:
return c
for t1, t2 in zip(self.children, t2.children):
c = t1.compare(t2)
if c != 0:
return c
return c
def __lt__(self, t2):
return self.compare(t2) < 0
def __le__(self, t2):
return self.compare(t2) <= 0
def __eq__(self, t2):
return self.compare(t2) == 0
def __ne__(self, t2):
return self.compare(t2) != 0
def __ge__(self, t2):
return self.compare(t2) >= 0
def __gt__(self, t2):
return self.compare(t2) > 0
def __str__(self):
from biotrees.shape.newick import to_newick
return to_newick(self)
def __repr__(self):
return str(self)
def shape(self):
return self
Shape.LEAF = Shape()
Shape.CHERRY = Shape([Shape.LEAF, Shape.LEAF])
def is_binary(t):
return t.is_leaf() or \
(len(t.children) == 2 and all(is_binary(ch) for t in t.children))
def count_leaves(t):
if t.is_leaf():
return 1
else:
return sum(count_leaves(t) for t in t.children)
def get_depth(t):
if t.is_leaf():
return 0
else:
return max(get_depth(ch) for ch in t.children) + 1
def leaf_depths(t):
if t.is_leaf():
yield 0
else:
for ch in t.children:
for depth in leaf_depths(ch):
yield depth+1
def get_leaf_depths(t):
return list(leaf_depths(t))
def count_nodes_by_depth(t):
total_depth = get_depth(t)
nodes_by_depth = [0]*(total_depth+1)
def navigate(t2, d):
if not t2.is_leaf():
d1 = d+1
nodes_by_depth[d1] += len(t2.children)
for ch in t2.children:
navigate(ch, d1)
nodes_by_depth[0] += 1
navigate(t, 0)
return nodes_by_depth
| true
| true
|
1c4a9e04658f85d01a0faf1688239f940ae2016c
| 552
|
py
|
Python
|
scripts/subscribe_kinect.py
|
hubertbraszko/follow-marker
|
2c168c2c9705fc2076712e20c62e05a439df6cf8
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/subscribe_kinect.py
|
hubertbraszko/follow-marker
|
2c168c2c9705fc2076712e20c62e05a439df6cf8
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/subscribe_kinect.py
|
hubertbraszko/follow-marker
|
2c168c2c9705fc2076712e20c62e05a439df6cf8
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
import sys
import cv2
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
def process_image(msg):
bridge = CvBridge()
img = bridge.imgmsg_to_cv2(msg, "bgr8")
cv2.imshow("image",img)
cv2.waitKey(50)
if __name__ == '__main__':
while not rospy.is_shutdown():
rospy.init_node('kinect_subscriber')
rospy.loginfo('image_sub node started')
rospy.Subscriber("/base_kinect/color/image_raw", Image, process_image)
rospy.spin()
| 25.090909
| 78
| 0.706522
|
import rospy
from std_msgs.msg import String
import sys
import cv2
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
def process_image(msg):
bridge = CvBridge()
img = bridge.imgmsg_to_cv2(msg, "bgr8")
cv2.imshow("image",img)
cv2.waitKey(50)
if __name__ == '__main__':
while not rospy.is_shutdown():
rospy.init_node('kinect_subscriber')
rospy.loginfo('image_sub node started')
rospy.Subscriber("/base_kinect/color/image_raw", Image, process_image)
rospy.spin()
| true
| true
|
1c4a9e3834a7eccc3c3b7c6fddcede09ac3c225d
| 4,372
|
py
|
Python
|
contrib/seeds/generate-seeds.py
|
pniwre/Sato_origin
|
5accbe690dbd2cae305cc7120a4824243f89701c
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
pniwre/Sato_origin
|
5accbe690dbd2cae305cc7120a4824243f89701c
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
pniwre/Sato_origin
|
5accbe690dbd2cae305cc7120a4824243f89701c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef RAVEN_CHAINPARAMSSEEDS_H\n')
g.write('#define RAVEN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the sato network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_main', 8767)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_test', 18767)
g.write('#endif // RAVEN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.453237
| 98
| 0.579597
|
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr:
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'):
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match:
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1:
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef RAVEN_CHAINPARAMSSEEDS_H\n')
g.write('#define RAVEN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the sato network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_main', 8767)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_test', 18767)
g.write('#endif // RAVEN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| true
| true
|
1c4a9e6deb4f7f104b05973b61354f9580522564
| 460
|
py
|
Python
|
bin/rehex.py
|
CryptoDEX/sentinel
|
88ac417f34c4f42670298abf3f252ab8652b7887
|
[
"MIT"
] | null | null | null |
bin/rehex.py
|
CryptoDEX/sentinel
|
88ac417f34c4f42670298abf3f252ab8652b7887
|
[
"MIT"
] | null | null | null |
bin/rehex.py
|
CryptoDEX/sentinel
|
88ac417f34c4f42670298abf3f252ab8652b7887
|
[
"MIT"
] | null | null | null |
import simplejson
import binascii
import sys
import pdb
from pprint import pprint
import sys
import os
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../lib')))
import cryptodexlib
# ============================================================================
usage = "%s <hex>" % sys.argv[0]
obj = None
if len(sys.argv) < 2:
print(usage)
sys.exit(1)
else:
obj = cryptodexlib.deserialise(sys.argv[1])
pdb.set_trace()
1
| 20.909091
| 84
| 0.591304
|
import simplejson
import binascii
import sys
import pdb
from pprint import pprint
import sys
import os
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../lib')))
import cryptodexlib
usage = "%s <hex>" % sys.argv[0]
obj = None
if len(sys.argv) < 2:
print(usage)
sys.exit(1)
else:
obj = cryptodexlib.deserialise(sys.argv[1])
pdb.set_trace()
1
| true
| true
|
1c4a9e7ab48b06150d1646076a4a5a0117b6efcc
| 2,186
|
py
|
Python
|
utils/modify_ravdess.py
|
sahiljuneja/kaggle-ctds
|
caac226f2c5d33b6d324c5cf33a777758b9163d1
|
[
"CC-BY-4.0"
] | null | null | null |
utils/modify_ravdess.py
|
sahiljuneja/kaggle-ctds
|
caac226f2c5d33b6d324c5cf33a777758b9163d1
|
[
"CC-BY-4.0"
] | null | null | null |
utils/modify_ravdess.py
|
sahiljuneja/kaggle-ctds
|
caac226f2c5d33b6d324c5cf33a777758b9163d1
|
[
"CC-BY-4.0"
] | null | null | null |
import re
import os
import argparse
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
def modify_data(input_path, save_path, dir_dict):
path = os.listdir(input_path)
for folders in path:
folders = os.path.sep.join([input_path, folders])
for file in os.listdir(folders):
num = re.findall('\d+', file)
emotion = dir_dict[num[2]]
file_save_path = save_path + str(emotion)
if not os.path.isdir(file_save_path):
os.makedirs(file_save_path)
load_file_path = '{0}/{1}'.format(folders, file)
file_name = "/{}.jpeg".format(file[:-4])
if not os.path.isfile(file_save_path + file_name):
y, sr = librosa.load(load_file_path)
yt, _ = librosa.effects.trim(y)
y = yt
mel_spect = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=1024, hop_length=100)
mel_spect = librosa.power_to_db(mel_spect, ref=np.max)
librosa.display.specshow(mel_spect, y_axis='mel', fmax=20000, x_axis='time');
plt.savefig(file_save_path + file_name)
#print("File saved to: {}".format(file_save_path + file_name))
if __name__ == "__main__":
# sample call
# python modify_ravdess.py -p /notebooks/storage/ravdess/ -s /notebooks/storage/ravdess_mod/
# arguments parser
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", type=str, help="path to raw data")
ap.add_argument("-s", "--save", type=str, help="path to save data after processing")
args = vars(ap.parse_args())
# directory structure dict
dir_dict = {'01' : 'neutral', '02' : 'calm', '03' : 'happy', '04' : 'sad',
'05' : 'angry', '06' : 'fearful', '07' : 'disgust', '08' : 'surprised'}
ip_path = args["path"]
save_path = args["save"]
if not os.path.isdir(save_path):
os.makedirs(save_path)
modify_data(ip_path, save_path, dir_dict)
print("Data converted from .wav to .jpeg")
| 31.681159
| 98
| 0.578225
|
import re
import os
import argparse
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
def modify_data(input_path, save_path, dir_dict):
path = os.listdir(input_path)
for folders in path:
folders = os.path.sep.join([input_path, folders])
for file in os.listdir(folders):
num = re.findall('\d+', file)
emotion = dir_dict[num[2]]
file_save_path = save_path + str(emotion)
if not os.path.isdir(file_save_path):
os.makedirs(file_save_path)
load_file_path = '{0}/{1}'.format(folders, file)
file_name = "/{}.jpeg".format(file[:-4])
if not os.path.isfile(file_save_path + file_name):
y, sr = librosa.load(load_file_path)
yt, _ = librosa.effects.trim(y)
y = yt
mel_spect = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=1024, hop_length=100)
mel_spect = librosa.power_to_db(mel_spect, ref=np.max)
librosa.display.specshow(mel_spect, y_axis='mel', fmax=20000, x_axis='time');
plt.savefig(file_save_path + file_name)
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", type=str, help="path to raw data")
ap.add_argument("-s", "--save", type=str, help="path to save data after processing")
args = vars(ap.parse_args())
dir_dict = {'01' : 'neutral', '02' : 'calm', '03' : 'happy', '04' : 'sad',
'05' : 'angry', '06' : 'fearful', '07' : 'disgust', '08' : 'surprised'}
ip_path = args["path"]
save_path = args["save"]
if not os.path.isdir(save_path):
os.makedirs(save_path)
modify_data(ip_path, save_path, dir_dict)
print("Data converted from .wav to .jpeg")
| true
| true
|
1c4aa08fdf2f3f6ab9a83890a7c17c9b27fbc3ac
| 6,062
|
py
|
Python
|
dyne/adjacency/coherence.py
|
akhambhati/dyne2
|
d2f050b3d14ef429fc9c52821e87f1c9a52a521d
|
[
"BSD-3-Clause"
] | 7
|
2015-01-11T03:57:19.000Z
|
2020-07-11T12:05:57.000Z
|
dyne/adjacency/coherence.py
|
akhambhati/dyne
|
d2f050b3d14ef429fc9c52821e87f1c9a52a521d
|
[
"BSD-3-Clause"
] | null | null | null |
dyne/adjacency/coherence.py
|
akhambhati/dyne
|
d2f050b3d14ef429fc9c52821e87f1c9a52a521d
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Coherence pipes for quantifying signal similarity (i.e. connectivity)
Created by: Ankit Khambhati
Change Log
----------
2016/03/06 - Implemented WelchCoh and MTCoh pipes
"""
from __future__ import division
import numpy as np
from mtspec import mt_coherence, mtspec
from scipy.signal import coherence
import matplotlib.pyplot as plt
from ..errors import check_type
from ..base import AdjacencyPipe
class WelchCoh(AdjacencyPipe):
"""
WelchCoh pipe for spectral coherence estimation using Welch's method
Parameters
----------
window: str
Desired window to use. See Scipy get_window for a list of windows.
secperseg: float
Length of each segment in seconds. Recommended half of window length.
pctoverlap: float (0<x<1)
Percent overlap between segments. Recommended values of 50 pct.
cf: list
Frequency range over which to compute coherence [-NW+C, C+NW]
"""
def __init__(self, window, secperseg, pctoverlap, cf):
# Standard param checks
check_type(window, str)
check_type(secperseg, float)
check_type(pctoverlap, float)
check_type(cf, list)
if not len(cf) == 2:
raise Exception('Must give a frequency range in list of length 2')
if (pctoverlap > 1) or (pctoverlap < 0):
raise Exception('Percent overlap must be a positive fraction')
# Assign to instance
self.window = window
self.secperseg = secperseg
self.pctoverlap = pctoverlap
self.cf = cf
def _pipe_as_flow(self, signal_packet):
# Get signal_packet details
hkey = signal_packet.keys()[0]
ax_0_ix = signal_packet[hkey]['meta']['ax_0']['index']
ax_1_ix = signal_packet[hkey]['meta']['ax_1']['index']
signal = signal_packet[hkey]['data']
fs = np.int(np.mean(1./np.diff(ax_0_ix)))
# Assume undirected connectivity
triu_ix, triu_iy = np.triu_indices(len(ax_1_ix), k=1)
# Initialize association matrix
adj = np.zeros((len(ax_1_ix), len(ax_1_ix)))
# Derive signal segmenting for coherence estimation
nperseg = int(self.secperseg*fs)
noverlap = int(self.secperseg*fs*self.pctoverlap)
freq, Cxy = coherence(signal[:, triu_ix],
signal[:, triu_iy],
fs=fs, window=self.window,
nperseg=nperseg, noverlap=noverlap,
axis=0)
# Find closest frequency to the desired center frequency
cf_idx = np.flatnonzero((freq >= self.cf[0]) &
(freq <= self.cf[1]))
# Store coherence in association matrix
adj[triu_ix, triu_iy] = np.mean(Cxy[cf_idx, :], axis=0)
adj += adj.T
new_packet = {}
new_packet[hkey] = {
'data': adj,
'meta': {
'ax_0': signal_packet[hkey]['meta']['ax_1'],
'ax_1': signal_packet[hkey]['meta']['ax_1'],
'time': {
'label': 'Time (sec)',
'index': np.float(ax_0_ix[-1])
}
}
}
return new_packet
class MTCoh(AdjacencyPipe):
"""
MTCoh pipe for spectral coherence estimation using
multitaper methods
Parameters
----------
time_band: float
The time half bandwidth resolution of the estimate [-NW, NW];
such that resolution is 2*NW
n_taper: int
Number of Slepian sequences to use (Usually < 2*NW-1)
cf: list
Frequency range over which to compute coherence [-NW+C, C+NW]
"""
def __init__(self, time_band, n_taper, cf):
# Standard param checks
check_type(time_band, float)
check_type(n_taper, int)
check_type(cf, list)
if n_taper >= 2*time_band:
raise Exception('Number of tapers must be less than 2*time_band')
if not len(cf) == 2:
raise Exception('Must give a frequency range in list of length 2')
# Assign instance parameters
self.time_band = time_band
self.n_taper = n_taper
self.cf = cf
def _pipe_as_flow(self, signal_packet):
# Get signal_packet details
hkey = signal_packet.keys()[0]
ax_0_ix = signal_packet[hkey]['meta']['ax_0']['index']
ax_1_ix = signal_packet[hkey]['meta']['ax_1']['index']
signal = signal_packet[hkey]['data']
fs = np.int(np.mean(1./np.diff(ax_0_ix)))
# Assume undirected connectivity
triu_ix, triu_iy = np.triu_indices(len(ax_1_ix), k=1)
# Initialize association matrix
adj = np.zeros((len(ax_1_ix), len(ax_1_ix)))
# Compute all coherences
for n1, n2 in zip(triu_ix, triu_iy):
out = mt_coherence(1.0/fs,
signal[:, n1],
signal[:, n2],
self.time_band,
self.n_taper,
int(len(ax_0_ix)/2.), 0.95,
iadapt=1,
cohe=True, freq=True)
# Find closest frequency to the desired center frequency
#cf_idx = np.argmin(np.abs(out['freq'] - self.cf))
cf_idx = np.flatnonzero((out['freq'] >= self.cf[0]) &
(out['freq'] <= self.cf[1]))
# Store coherence in association matrix
adj[n1, n2] = np.mean(out['cohe'][cf_idx])
adj += adj.T
new_packet = {}
new_packet[hkey] = {
'data': adj,
'meta': {
'ax_0': signal_packet[hkey]['meta']['ax_1'],
'ax_1': signal_packet[hkey]['meta']['ax_1'],
'time': {
'label': 'Time (sec)',
'index': np.float(ax_0_ix[-1])
}
}
}
return new_packet
| 32.591398
| 81
| 0.54421
|
from __future__ import division
import numpy as np
from mtspec import mt_coherence, mtspec
from scipy.signal import coherence
import matplotlib.pyplot as plt
from ..errors import check_type
from ..base import AdjacencyPipe
class WelchCoh(AdjacencyPipe):
def __init__(self, window, secperseg, pctoverlap, cf):
check_type(window, str)
check_type(secperseg, float)
check_type(pctoverlap, float)
check_type(cf, list)
if not len(cf) == 2:
raise Exception('Must give a frequency range in list of length 2')
if (pctoverlap > 1) or (pctoverlap < 0):
raise Exception('Percent overlap must be a positive fraction')
self.window = window
self.secperseg = secperseg
self.pctoverlap = pctoverlap
self.cf = cf
def _pipe_as_flow(self, signal_packet):
hkey = signal_packet.keys()[0]
ax_0_ix = signal_packet[hkey]['meta']['ax_0']['index']
ax_1_ix = signal_packet[hkey]['meta']['ax_1']['index']
signal = signal_packet[hkey]['data']
fs = np.int(np.mean(1./np.diff(ax_0_ix)))
triu_ix, triu_iy = np.triu_indices(len(ax_1_ix), k=1)
adj = np.zeros((len(ax_1_ix), len(ax_1_ix)))
nperseg = int(self.secperseg*fs)
noverlap = int(self.secperseg*fs*self.pctoverlap)
freq, Cxy = coherence(signal[:, triu_ix],
signal[:, triu_iy],
fs=fs, window=self.window,
nperseg=nperseg, noverlap=noverlap,
axis=0)
cf_idx = np.flatnonzero((freq >= self.cf[0]) &
(freq <= self.cf[1]))
adj[triu_ix, triu_iy] = np.mean(Cxy[cf_idx, :], axis=0)
adj += adj.T
new_packet = {}
new_packet[hkey] = {
'data': adj,
'meta': {
'ax_0': signal_packet[hkey]['meta']['ax_1'],
'ax_1': signal_packet[hkey]['meta']['ax_1'],
'time': {
'label': 'Time (sec)',
'index': np.float(ax_0_ix[-1])
}
}
}
return new_packet
class MTCoh(AdjacencyPipe):
def __init__(self, time_band, n_taper, cf):
check_type(time_band, float)
check_type(n_taper, int)
check_type(cf, list)
if n_taper >= 2*time_band:
raise Exception('Number of tapers must be less than 2*time_band')
if not len(cf) == 2:
raise Exception('Must give a frequency range in list of length 2')
self.time_band = time_band
self.n_taper = n_taper
self.cf = cf
def _pipe_as_flow(self, signal_packet):
hkey = signal_packet.keys()[0]
ax_0_ix = signal_packet[hkey]['meta']['ax_0']['index']
ax_1_ix = signal_packet[hkey]['meta']['ax_1']['index']
signal = signal_packet[hkey]['data']
fs = np.int(np.mean(1./np.diff(ax_0_ix)))
triu_ix, triu_iy = np.triu_indices(len(ax_1_ix), k=1)
adj = np.zeros((len(ax_1_ix), len(ax_1_ix)))
for n1, n2 in zip(triu_ix, triu_iy):
out = mt_coherence(1.0/fs,
signal[:, n1],
signal[:, n2],
self.time_band,
self.n_taper,
int(len(ax_0_ix)/2.), 0.95,
iadapt=1,
cohe=True, freq=True)
cf_idx = np.flatnonzero((out['freq'] >= self.cf[0]) &
(out['freq'] <= self.cf[1]))
adj[n1, n2] = np.mean(out['cohe'][cf_idx])
adj += adj.T
new_packet = {}
new_packet[hkey] = {
'data': adj,
'meta': {
'ax_0': signal_packet[hkey]['meta']['ax_1'],
'ax_1': signal_packet[hkey]['meta']['ax_1'],
'time': {
'label': 'Time (sec)',
'index': np.float(ax_0_ix[-1])
}
}
}
return new_packet
| true
| true
|
1c4aa09a56123eb89175e65ef12cd083888a57e1
| 3,456
|
py
|
Python
|
generated-libraries/python/netapp/iscsi/iscsi_security_entry_info.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/netapp/iscsi/iscsi_security_entry_info.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/netapp/iscsi/iscsi_security_entry_info.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
from netapp.netapp_object import NetAppObject
class IscsiSecurityEntryInfo(NetAppObject):
"""
Information about a single authentication entry.
"""
_user_name = None
@property
def user_name(self):
"""
Inbound CHAP user name, returned only if auth-type is CHAP.
"""
return self._user_name
@user_name.setter
def user_name(self, val):
if val != None:
self.validate('user_name', val)
self._user_name = val
_auth_chap_policy = None
@property
def auth_chap_policy(self):
"""
CHAP authentication path. Possible values: "local",
"radius".
"""
return self._auth_chap_policy
@auth_chap_policy.setter
def auth_chap_policy(self, val):
if val != None:
self.validate('auth_chap_policy', val)
self._auth_chap_policy = val
_initiator = None
@property
def initiator(self):
"""
Name of initiator. The initiator name must conform to
RFC 3720, for example:
"iqn.1987-06.com.initvendor1:appsrv.sn.2346",
or "default" if this is a default auth entry.
"""
return self._initiator
@initiator.setter
def initiator(self, val):
if val != None:
self.validate('initiator', val)
self._initiator = val
_vserver = None
@property
def vserver(self):
"""
Vserver containing this authentication information.
"""
return self._vserver
@vserver.setter
def vserver(self, val):
if val != None:
self.validate('vserver', val)
self._vserver = val
_auth_type = None
@property
def auth_type(self):
"""
Authentication type. Possible values: "CHAP", "none", "deny".
"""
return self._auth_type
@auth_type.setter
def auth_type(self, val):
if val != None:
self.validate('auth_type', val)
self._auth_type = val
_outbound_user_name = None
@property
def outbound_user_name(self):
"""
Outbound CHAP user name, returned only if auth-type is CHAP,
and outbound authentication is set for initiator.
"""
return self._outbound_user_name
@outbound_user_name.setter
def outbound_user_name(self, val):
if val != None:
self.validate('outbound_user_name', val)
self._outbound_user_name = val
@staticmethod
def get_api_name():
return "iscsi-security-entry-info"
@staticmethod
def get_desired_attrs():
return [
'user-name',
'auth-chap-policy',
'initiator',
'vserver',
'auth-type',
'outbound-user-name',
]
def describe_properties(self):
return {
'user_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'auth_chap_policy': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'initiator': { 'class': basestring, 'is_list': False, 'required': 'required' },
'vserver': { 'class': basestring, 'is_list': False, 'required': 'required' },
'auth_type': { 'class': basestring, 'is_list': False, 'required': 'required' },
'outbound_user_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| 30.052174
| 100
| 0.575521
|
from netapp.netapp_object import NetAppObject
class IscsiSecurityEntryInfo(NetAppObject):
_user_name = None
@property
def user_name(self):
return self._user_name
@user_name.setter
def user_name(self, val):
if val != None:
self.validate('user_name', val)
self._user_name = val
_auth_chap_policy = None
@property
def auth_chap_policy(self):
return self._auth_chap_policy
@auth_chap_policy.setter
def auth_chap_policy(self, val):
if val != None:
self.validate('auth_chap_policy', val)
self._auth_chap_policy = val
_initiator = None
@property
def initiator(self):
return self._initiator
@initiator.setter
def initiator(self, val):
if val != None:
self.validate('initiator', val)
self._initiator = val
_vserver = None
@property
def vserver(self):
return self._vserver
@vserver.setter
def vserver(self, val):
if val != None:
self.validate('vserver', val)
self._vserver = val
_auth_type = None
@property
def auth_type(self):
return self._auth_type
@auth_type.setter
def auth_type(self, val):
if val != None:
self.validate('auth_type', val)
self._auth_type = val
_outbound_user_name = None
@property
def outbound_user_name(self):
return self._outbound_user_name
@outbound_user_name.setter
def outbound_user_name(self, val):
if val != None:
self.validate('outbound_user_name', val)
self._outbound_user_name = val
@staticmethod
def get_api_name():
return "iscsi-security-entry-info"
@staticmethod
def get_desired_attrs():
return [
'user-name',
'auth-chap-policy',
'initiator',
'vserver',
'auth-type',
'outbound-user-name',
]
def describe_properties(self):
return {
'user_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'auth_chap_policy': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'initiator': { 'class': basestring, 'is_list': False, 'required': 'required' },
'vserver': { 'class': basestring, 'is_list': False, 'required': 'required' },
'auth_type': { 'class': basestring, 'is_list': False, 'required': 'required' },
'outbound_user_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| true
| true
|
1c4aa18bf302180255957ac3409b0d4e79760721
| 6,458
|
py
|
Python
|
scripts/classification/PointNet/run_pointnet_classification.py
|
amiralansary/BrainSurfaceTK
|
17e3ef5e1c5d6e1a75293fbe031977ec3fbe0fef
|
[
"MIT"
] | 7
|
2020-08-04T19:27:33.000Z
|
2022-01-28T18:00:21.000Z
|
scripts/classification/PointNet/run_pointnet_classification.py
|
amiralansary/BrainSurfaceTK
|
17e3ef5e1c5d6e1a75293fbe031977ec3fbe0fef
|
[
"MIT"
] | null | null | null |
scripts/classification/PointNet/run_pointnet_classification.py
|
amiralansary/BrainSurfaceTK
|
17e3ef5e1c5d6e1a75293fbe031977ec3fbe0fef
|
[
"MIT"
] | 3
|
2020-12-31T17:26:21.000Z
|
2021-02-11T19:24:56.000Z
|
import os.path as osp
PATH_TO_ROOT = osp.join(osp.dirname(osp.realpath(__file__)), '..', '..')
import sys
sys.path.append(PATH_TO_ROOT)
import os
import time
import pickle
import csv
import torch
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
from models.pointnet.src.models.pointnet2_classification import Net
from models.pointnet.main.pointnet2_classification import train, test_classification
from models.pointnet.src.utils import get_data_path, data
PATH_TO_ROOT = osp.join(osp.dirname(osp.realpath(__file__)), '..', '..') + '/'
PATH_TO_POINTNET = osp.join(osp.dirname(osp.realpath(__file__)), '..', '..', '..', 'models', 'pointnet') + '/'
if __name__ == '__main__':
PATH_TO_ROOT = osp.join(osp.dirname(osp.realpath(__file__)), '..') + '/'
num_workers = 2
local_features = []
global_features = []
#################################################
########### EXPERIMENT DESCRIPTION ##############
#################################################
recording = False
REPROCESS = False
data_nativeness = 'native'
data_compression = "10k"
data_type = 'pial'
hemisphere = 'both'
comment = 'comment'
# additional_comment = ''
#################################################
############ EXPERIMENT DESCRIPTION #############
#################################################
# 1. Model Parameters
################################################
lr = 0.001
batch_size = 2
gamma = 0.9875
scheduler_step_size = 2
target_class = 'gender'
task = 'classification'
numb_epochs = 1
number_of_points = 10000
################################################
########## INDICES FOR DATA SPLIT #############
with open(PATH_TO_POINTNET + 'src/names.pk', 'rb') as f:
indices = pickle.load(f)
###############################################
data_folder, files_ending = get_data_path(data_nativeness, data_compression, data_type, hemisphere=hemisphere)
train_dataset, test_dataset, validation_dataset, train_loader, test_loader, val_loader, num_labels = data(
data_folder,
files_ending,
data_type,
target_class,
task,
REPROCESS,
local_features,
global_features,
indices,
batch_size,
num_workers=2,
data_nativeness=data_nativeness,
data_compression=data_compression,
hemisphere=hemisphere
)
if len(local_features) > 0:
numb_local_features = train_dataset[0].x.size(1)
else:
numb_local_features = 0
numb_global_features = len(global_features)
# 7. Create the model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net(numb_local_features, numb_global_features).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = StepLR(optimizer, step_size=scheduler_step_size, gamma=gamma)
print(f'number of param: {sum(p.numel() for p in model.parameters() if p.requires_grad)}')
#################################################
############# EXPERIMENT LOGGING ################
#################################################
writer = None
results_folder = None
if recording:
# Tensorboard writer.
writer = SummaryWriter(log_dir='runs/' + task + '/' + comment, comment=comment)
results_folder = 'runs/' + task + '/' + comment + '/results'
model_dir = 'runs/' + task + '/' + comment + '/models'
if not osp.exists(results_folder):
os.makedirs(results_folder)
if not osp.exists(model_dir):
os.makedirs(model_dir)
with open(results_folder + '/configuration.txt', 'w', newline='') as config_file:
config_file.write('Learning rate - ' + str(lr) + '\n')
config_file.write('Batch size - ' + str(batch_size) + '\n')
config_file.write('Local features - ' + str(local_features) + '\n')
config_file.write('Global feature - ' + str(global_features) + '\n')
config_file.write('Number of points - ' + str(number_of_points) + '\n')
config_file.write('Data res - ' + data_compression + '\n')
config_file.write('Data type - ' + data_type + '\n')
config_file.write('Data nativeness - ' + data_nativeness + '\n')
# config_file.write('Additional comments - With rotate transforms' + '\n')
with open(results_folder + '/results.csv', 'w', newline='') as results_file:
result_writer = csv.writer(results_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
result_writer.writerow(['Patient ID', 'Session ID', 'Prediction', 'Label', 'Error'])
#################################################
#################################################
best_val_acc = 0.0
# MAIN TRAINING LOOP
for epoch in range(1, numb_epochs + 1):
start = time.time()
train(model, train_loader, epoch, device,
optimizer, scheduler, writer)
val_acc = test_classification(model, val_loader,
indices['Val'], device,
recording, results_folder,
epoch=epoch)
if recording:
writer.add_scalar('Acc/val', val_acc, epoch)
end = time.time()
print('Time: ' + str(end - start))
if val_acc > best_val_acc:
best_val_acc = val_acc
torch.save(model.state_dict(), model_dir + '/model_best.pt')
print('Saving Model'.center(60, '-'))
writer.add_scalar('Time/epoch', end - start, epoch)
test_classification(model, test_loader, indices['Test'], device, recording, results_folder, val=False)
if recording:
# save the last model
torch.save(model.state_dict(), model_dir + '/model_last.pt')
# Eval best model on test
model.load_state_dict(torch.load(model_dir + '/model_best.pt'))
with open(results_folder + '/results.csv', 'a', newline='') as results_file:
result_writer = csv.writer(results_file, delimiter=',', quoting=csv.QUOTE_MINIMAL)
result_writer.writerow(['Best model!'])
test_classification(model, test_loader, indices['Test'], device, recording, results_folder, val=False)
| 36.485876
| 114
| 0.562713
|
import os.path as osp
PATH_TO_ROOT = osp.join(osp.dirname(osp.realpath(__file__)), '..', '..')
import sys
sys.path.append(PATH_TO_ROOT)
import os
import time
import pickle
import csv
import torch
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
from models.pointnet.src.models.pointnet2_classification import Net
from models.pointnet.main.pointnet2_classification import train, test_classification
from models.pointnet.src.utils import get_data_path, data
PATH_TO_ROOT = osp.join(osp.dirname(osp.realpath(__file__)), '..', '..') + '/'
PATH_TO_POINTNET = osp.join(osp.dirname(osp.realpath(__file__)), '..', '..', '..', 'models', 'pointnet') + '/'
if __name__ == '__main__':
PATH_TO_ROOT = osp.join(osp.dirname(osp.realpath(__file__)), '..') + '/'
num_workers = 2
local_features = []
global_features = []
| true
| true
|
1c4aa2032c5c511a7b7e429659929c7288dac84f
| 360
|
py
|
Python
|
exercises/ja/solution_03_16_02.py
|
YanaPalacheva/spacy-course
|
59975f7348a601532303be91474d75d02d0540ef
|
[
"MIT"
] | 1
|
2021-12-30T06:40:11.000Z
|
2021-12-30T06:40:11.000Z
|
exercises/ja/solution_03_16_02.py
|
YanaPalacheva/spacy-course
|
59975f7348a601532303be91474d75d02d0540ef
|
[
"MIT"
] | null | null | null |
exercises/ja/solution_03_16_02.py
|
YanaPalacheva/spacy-course
|
59975f7348a601532303be91474d75d02d0540ef
|
[
"MIT"
] | 1
|
2020-06-08T13:26:06.000Z
|
2020-06-08T13:26:06.000Z
|
import spacy
nlp = spacy.load("en_core_web_sm")
text = (
"Chick-fil-A is an American fast food restaurant chain headquartered in "
"the city of College Park, Georgia, specializing in chicken sandwiches."
)
# taggerとparserを無効化する
with nlp.disable_pipes("tagger", "parser"):
# テキストを処理する
doc = nlp(text)
# docの固有表現をプリントする
print(doc.ents)
| 24
| 77
| 0.708333
|
import spacy
nlp = spacy.load("en_core_web_sm")
text = (
"Chick-fil-A is an American fast food restaurant chain headquartered in "
"the city of College Park, Georgia, specializing in chicken sandwiches."
)
with nlp.disable_pipes("tagger", "parser"):
doc = nlp(text)
print(doc.ents)
| true
| true
|
1c4aa2704b6464d4b7b602bb5e3d69e965b711b1
| 3,835
|
py
|
Python
|
src/python/Utils/MemoryCache.py
|
tslazarova/WMCore
|
a09e2aefe700fb9b0d12b9f7089b21bde5a5bd62
|
[
"Apache-2.0"
] | 1
|
2015-02-05T13:43:46.000Z
|
2015-02-05T13:43:46.000Z
|
src/python/Utils/MemoryCache.py
|
tslazarova/WMCore
|
a09e2aefe700fb9b0d12b9f7089b21bde5a5bd62
|
[
"Apache-2.0"
] | null | null | null |
src/python/Utils/MemoryCache.py
|
tslazarova/WMCore
|
a09e2aefe700fb9b0d12b9f7089b21bde5a5bd62
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple in-memory and non-thread safe cache.
Note that this module does not support home-made object types, since there is
an explicit data type check when adding a new item to the cache.
It raises a TypeError exception if the cache data type chagens;
or if the user tries to extend the cache with an incompatible
data type.
"""
from __future__ import (print_function, division)
from builtins import object
from time import time
class MemoryCacheException(Exception):
def __init__(self, message):
super(MemoryCacheException, self).__init__(message)
class MemoryCache(object):
__slots__ = ["lastUpdate", "expiration", "_cache"]
def __init__(self, expiration, initialData=None):
"""
Initializes cache object
:param expiration: expiration time in seconds
:param initialData: initial value for the cache
"""
self.lastUpdate = int(time())
self.expiration = expiration
self._cache = initialData
def __contains__(self, item):
"""
Check whether item is in the current cache
:param item: a simple object (string, integer, etc)
:return: True if the object can be found in the cache, False otherwise
"""
return item in self._cache
def isCacheExpired(self):
"""
Evaluate whether the cache has already expired, returning
True if it did, otherwise it returns False
"""
return self.lastUpdate + self.expiration < int(time())
def getCache(self):
"""
Raises an exception if the cache has expired, otherwise returns
its data
"""
if self.isCacheExpired():
expiredSince = int(time()) - (self.lastUpdate + self.expiration)
raise MemoryCacheException("Memory cache expired for %d seconds" % expiredSince)
return self._cache
def setCache(self, inputData):
"""
Refresh the cache with the content provided (refresh its expiration as well)
This method enforces the user to not change the cache data type
:param inputData: data to store in the cache
"""
if not isinstance(self._cache, type(inputData)):
raise TypeError("Current cache data type: %s, while new value is: %s" %
(type(self._cache), type(inputData)))
self.lastUpdate = int(time())
self._cache = inputData
def addItemToCache(self, inputItem):
"""
Adds new item(s) to the cache, without resetting its expiration.
It, of course, only works for data caches of type: list, set or dict.
:param inputItem: additional item to be added to the current cached data
"""
if isinstance(self._cache, set) and isinstance(inputItem, (list, set)):
# extend another list or set into a set
self._cache.update(inputItem)
elif isinstance(self._cache, set) and isinstance(inputItem, (int, float, str)):
# add a simple object (integer, string, etc) to a set
self._cache.add(inputItem)
elif isinstance(self._cache, list) and isinstance(inputItem, (list, set)):
# extend another list or set into a list
self._cache.extend(inputItem)
elif isinstance(self._cache, list) and isinstance(inputItem, (int, float, str)):
# add a simple object (integer, string, etc) to a list
self._cache.append(inputItem)
elif isinstance(self._cache, dict) and isinstance(inputItem, dict):
self._cache.update(inputItem)
else:
msg = "Input item type: %s cannot be added to a cache type: %s" % (type(self._cache), type(inputItem))
raise TypeError("Cache and input item data type mismatch. %s" % msg)
| 39.132653
| 114
| 0.642503
|
from __future__ import (print_function, division)
from builtins import object
from time import time
class MemoryCacheException(Exception):
def __init__(self, message):
super(MemoryCacheException, self).__init__(message)
class MemoryCache(object):
__slots__ = ["lastUpdate", "expiration", "_cache"]
def __init__(self, expiration, initialData=None):
self.lastUpdate = int(time())
self.expiration = expiration
self._cache = initialData
def __contains__(self, item):
return item in self._cache
def isCacheExpired(self):
return self.lastUpdate + self.expiration < int(time())
def getCache(self):
if self.isCacheExpired():
expiredSince = int(time()) - (self.lastUpdate + self.expiration)
raise MemoryCacheException("Memory cache expired for %d seconds" % expiredSince)
return self._cache
def setCache(self, inputData):
if not isinstance(self._cache, type(inputData)):
raise TypeError("Current cache data type: %s, while new value is: %s" %
(type(self._cache), type(inputData)))
self.lastUpdate = int(time())
self._cache = inputData
def addItemToCache(self, inputItem):
if isinstance(self._cache, set) and isinstance(inputItem, (list, set)):
self._cache.update(inputItem)
elif isinstance(self._cache, set) and isinstance(inputItem, (int, float, str)):
self._cache.add(inputItem)
elif isinstance(self._cache, list) and isinstance(inputItem, (list, set)):
self._cache.extend(inputItem)
elif isinstance(self._cache, list) and isinstance(inputItem, (int, float, str)):
self._cache.append(inputItem)
elif isinstance(self._cache, dict) and isinstance(inputItem, dict):
self._cache.update(inputItem)
else:
msg = "Input item type: %s cannot be added to a cache type: %s" % (type(self._cache), type(inputItem))
raise TypeError("Cache and input item data type mismatch. %s" % msg)
| true
| true
|
1c4aa339091f326094a1c407dad313c0bd77f65a
| 52,647
|
py
|
Python
|
ryu/services/protocols/bgp/bgpspeaker.py
|
starlingx-staging/stx-ryu
|
e73cdeab37d4890138f9c48bf41cf08974ab2146
|
[
"Apache-2.0"
] | null | null | null |
ryu/services/protocols/bgp/bgpspeaker.py
|
starlingx-staging/stx-ryu
|
e73cdeab37d4890138f9c48bf41cf08974ab2146
|
[
"Apache-2.0"
] | null | null | null |
ryu/services/protocols/bgp/bgpspeaker.py
|
starlingx-staging/stx-ryu
|
e73cdeab37d4890138f9c48bf41cf08974ab2146
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module offers a class to enable your code to speak BGP protocol.
"""
import netaddr
from ryu.lib import hub
from ryu.lib.packet.bgp import (
BGPFlowSpecTrafficActionCommunity,
BGPFlowSpecVlanActionCommunity,
BGPFlowSpecTPIDActionCommunity,
)
from ryu.services.protocols.bgp.core_manager import CORE_MANAGER
from ryu.services.protocols.bgp.signals.emit import BgpSignalBus
from ryu.services.protocols.bgp.api.base import call
from ryu.services.protocols.bgp.api.base import PREFIX
from ryu.services.protocols.bgp.api.base import EVPN_ROUTE_TYPE
from ryu.services.protocols.bgp.api.base import EVPN_ESI
from ryu.services.protocols.bgp.api.base import EVPN_ETHERNET_TAG_ID
from ryu.services.protocols.bgp.api.base import REDUNDANCY_MODE
from ryu.services.protocols.bgp.api.base import IP_ADDR
from ryu.services.protocols.bgp.api.base import MAC_ADDR
from ryu.services.protocols.bgp.api.base import NEXT_HOP
from ryu.services.protocols.bgp.api.base import IP_PREFIX
from ryu.services.protocols.bgp.api.base import GW_IP_ADDR
from ryu.services.protocols.bgp.api.base import ROUTE_DISTINGUISHER
from ryu.services.protocols.bgp.api.base import ROUTE_FAMILY
from ryu.services.protocols.bgp.api.base import EVPN_VNI
from ryu.services.protocols.bgp.api.base import TUNNEL_TYPE
from ryu.services.protocols.bgp.api.base import PMSI_TUNNEL_TYPE
from ryu.services.protocols.bgp.api.prefix import EVPN_MAX_ET
from ryu.services.protocols.bgp.api.prefix import ESI_TYPE_LACP
from ryu.services.protocols.bgp.api.prefix import ESI_TYPE_L2_BRIDGE
from ryu.services.protocols.bgp.api.prefix import ESI_TYPE_MAC_BASED
from ryu.services.protocols.bgp.api.prefix import EVPN_ETH_AUTO_DISCOVERY
from ryu.services.protocols.bgp.api.prefix import EVPN_MAC_IP_ADV_ROUTE
from ryu.services.protocols.bgp.api.prefix import EVPN_MULTICAST_ETAG_ROUTE
from ryu.services.protocols.bgp.api.prefix import EVPN_ETH_SEGMENT
from ryu.services.protocols.bgp.api.prefix import EVPN_IP_PREFIX_ROUTE
from ryu.services.protocols.bgp.api.prefix import REDUNDANCY_MODE_ALL_ACTIVE
from ryu.services.protocols.bgp.api.prefix import REDUNDANCY_MODE_SINGLE_ACTIVE
from ryu.services.protocols.bgp.api.prefix import TUNNEL_TYPE_VXLAN
from ryu.services.protocols.bgp.api.prefix import TUNNEL_TYPE_NVGRE
from ryu.services.protocols.bgp.api.prefix import (
PMSI_TYPE_NO_TUNNEL_INFO,
PMSI_TYPE_INGRESS_REP)
from ryu.services.protocols.bgp.api.prefix import (
FLOWSPEC_FAMILY,
FLOWSPEC_FAMILY_IPV4,
FLOWSPEC_FAMILY_VPNV4,
FLOWSPEC_FAMILY_IPV6,
FLOWSPEC_FAMILY_VPNV6,
FLOWSPEC_FAMILY_L2VPN,
FLOWSPEC_RULES,
FLOWSPEC_ACTIONS)
from ryu.services.protocols.bgp.rtconf.common import LOCAL_AS
from ryu.services.protocols.bgp.rtconf.common import ROUTER_ID
from ryu.services.protocols.bgp.rtconf.common import CLUSTER_ID
from ryu.services.protocols.bgp.rtconf.common import BGP_SERVER_HOSTS
from ryu.services.protocols.bgp.rtconf.common import BGP_SERVER_PORT
from ryu.services.protocols.bgp.rtconf.common import DEFAULT_BGP_SERVER_HOSTS
from ryu.services.protocols.bgp.rtconf.common import DEFAULT_BGP_SERVER_PORT
from ryu.services.protocols.bgp.rtconf.common import (
DEFAULT_REFRESH_MAX_EOR_TIME, DEFAULT_REFRESH_STALEPATH_TIME)
from ryu.services.protocols.bgp.rtconf.common import DEFAULT_LABEL_RANGE
from ryu.services.protocols.bgp.rtconf.common import REFRESH_MAX_EOR_TIME
from ryu.services.protocols.bgp.rtconf.common import REFRESH_STALEPATH_TIME
from ryu.services.protocols.bgp.rtconf.common import LABEL_RANGE
from ryu.services.protocols.bgp.rtconf.common import ALLOW_LOCAL_AS_IN_COUNT
from ryu.services.protocols.bgp.rtconf.common import LOCAL_PREF
from ryu.services.protocols.bgp.rtconf.common import DEFAULT_LOCAL_PREF
from ryu.services.protocols.bgp.rtconf import neighbors
from ryu.services.protocols.bgp.rtconf import vrfs
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV4
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV6
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV4
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV6
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_EVPN
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV4FS
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV6FS
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV4FS
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV6FS
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_L2VPNFS
from ryu.services.protocols.bgp.rtconf.base import CAP_ENHANCED_REFRESH
from ryu.services.protocols.bgp.rtconf.base import CAP_FOUR_OCTET_AS_NUMBER
from ryu.services.protocols.bgp.rtconf.base import HOLD_TIME
from ryu.services.protocols.bgp.rtconf.base import MULTI_EXIT_DISC
from ryu.services.protocols.bgp.rtconf.base import SITE_OF_ORIGINS
from ryu.services.protocols.bgp.rtconf.neighbors import (
DEFAULT_CAP_MBGP_IPV4,
DEFAULT_CAP_MBGP_IPV6,
DEFAULT_CAP_MBGP_VPNV4,
DEFAULT_CAP_MBGP_VPNV6,
DEFAULT_CAP_MBGP_EVPN,
DEFAULT_CAP_MBGP_IPV4FS,
DEFAULT_CAP_MBGP_IPV6FS,
DEFAULT_CAP_MBGP_VPNV4FS,
DEFAULT_CAP_MBGP_VPNV6FS,
DEFAULT_CAP_MBGP_L2VPNFS,
DEFAULT_HOLD_TIME,
)
from ryu.services.protocols.bgp.rtconf.neighbors import (
DEFAULT_CAP_ENHANCED_REFRESH, DEFAULT_CAP_FOUR_OCTET_AS_NUMBER)
from ryu.services.protocols.bgp.rtconf.neighbors import DEFAULT_CONNECT_MODE
from ryu.services.protocols.bgp.rtconf.neighbors import PEER_NEXT_HOP
from ryu.services.protocols.bgp.rtconf.neighbors import PASSWORD
from ryu.services.protocols.bgp.rtconf.neighbors import (
DEFAULT_IS_ROUTE_SERVER_CLIENT, IS_ROUTE_SERVER_CLIENT)
from ryu.services.protocols.bgp.rtconf.neighbors import (
DEFAULT_IS_ROUTE_REFLECTOR_CLIENT, IS_ROUTE_REFLECTOR_CLIENT)
from ryu.services.protocols.bgp.rtconf.neighbors import (
DEFAULT_IS_NEXT_HOP_SELF, IS_NEXT_HOP_SELF)
from ryu.services.protocols.bgp.rtconf.neighbors import CONNECT_MODE
from ryu.services.protocols.bgp.rtconf.neighbors import LOCAL_ADDRESS
from ryu.services.protocols.bgp.rtconf.neighbors import LOCAL_PORT
from ryu.services.protocols.bgp.rtconf.vrfs import SUPPORTED_VRF_RF
from ryu.services.protocols.bgp.info_base.base import Filter
from ryu.services.protocols.bgp.info_base.ipv4 import Ipv4Path
from ryu.services.protocols.bgp.info_base.ipv6 import Ipv6Path
from ryu.services.protocols.bgp.info_base.vpnv4 import Vpnv4Path
from ryu.services.protocols.bgp.info_base.vpnv6 import Vpnv6Path
from ryu.services.protocols.bgp.info_base.evpn import EvpnPath
NEIGHBOR_CONF_MED = MULTI_EXIT_DISC # for backward compatibility
RF_VPN_V4 = vrfs.VRF_RF_IPV4
RF_VPN_V6 = vrfs.VRF_RF_IPV6
RF_L2_EVPN = vrfs.VRF_RF_L2_EVPN
RF_VPNV4_FLOWSPEC = vrfs.VRF_RF_IPV4_FLOWSPEC
RF_VPNV6_FLOWSPEC = vrfs.VRF_RF_IPV6_FLOWSPEC
RF_L2VPN_FLOWSPEC = vrfs.VRF_RF_L2VPN_FLOWSPEC
# Constants for the Traffic Filtering Actions of Flow Specification.
FLOWSPEC_TA_SAMPLE = BGPFlowSpecTrafficActionCommunity.SAMPLE
FLOWSPEC_TA_TERMINAL = BGPFlowSpecTrafficActionCommunity.TERMINAL
# Constants for the VLAN Actions of Flow Specification.
FLOWSPEC_VLAN_POP = BGPFlowSpecVlanActionCommunity.POP
FLOWSPEC_VLAN_PUSH = BGPFlowSpecVlanActionCommunity.PUSH
FLOWSPEC_VLAN_SWAP = BGPFlowSpecVlanActionCommunity.SWAP
FLOWSPEC_VLAN_RW_INNER = BGPFlowSpecVlanActionCommunity.REWRITE_INNER
FLOWSPEC_VLAN_RW_OUTER = BGPFlowSpecVlanActionCommunity.REWRITE_OUTER
# Constants for the TPID Actions of Flow Specification.
FLOWSPEC_TPID_TI = BGPFlowSpecTPIDActionCommunity.TI
FLOWSPEC_TPID_TO = BGPFlowSpecTPIDActionCommunity.TO
class EventPrefix(object):
"""
Used to pass an update on any best remote path to
best_path_change_handler.
================ ======================================================
Attribute Description
================ ======================================================
remote_as The AS number of a peer that caused this change
route_dist None in the case of IPv4 or IPv6 family
prefix A prefix was changed
nexthop The nexthop of the changed prefix
label MPLS label for VPNv4, VPNv6 or EVPN prefix
path An instance of ``info_base.base.Path`` subclass
is_withdraw True if this prefix has gone otherwise False
================ ======================================================
"""
def __init__(self, path, is_withdraw):
self.path = path
self.is_withdraw = is_withdraw
@property
def remote_as(self):
return self.path.source.remote_as
@property
def route_dist(self):
if (isinstance(self.path, Vpnv4Path)
or isinstance(self.path, Vpnv6Path)
or isinstance(self.path, EvpnPath)):
return self.path.nlri.route_dist
else:
return None
@property
def prefix(self):
if isinstance(self.path, Ipv4Path) or isinstance(self.path, Ipv6Path):
return self.path.nlri.addr + '/' + str(self.path.nlri.length)
elif (isinstance(self.path, Vpnv4Path)
or isinstance(self.path, Vpnv6Path)
or isinstance(self.path, EvpnPath)):
return self.path.nlri.prefix
else:
return None
@property
def nexthop(self):
return self.path.nexthop
@property
def label(self):
if (isinstance(self.path, Vpnv4Path)
or isinstance(self.path, Vpnv6Path)
or isinstance(self.path, EvpnPath)):
return getattr(self.path.nlri, 'label_list', None)
else:
return None
class BGPSpeaker(object):
def __init__(self, as_number, router_id,
bgp_server_hosts=DEFAULT_BGP_SERVER_HOSTS,
bgp_server_port=DEFAULT_BGP_SERVER_PORT,
refresh_stalepath_time=DEFAULT_REFRESH_STALEPATH_TIME,
refresh_max_eor_time=DEFAULT_REFRESH_MAX_EOR_TIME,
best_path_change_handler=None,
peer_down_handler=None,
peer_up_handler=None,
ssh_console=False,
ssh_port=None, ssh_host=None, ssh_host_key=None,
label_range=DEFAULT_LABEL_RANGE,
allow_local_as_in_count=0,
cluster_id=None,
local_pref=DEFAULT_LOCAL_PREF):
"""Create a new BGPSpeaker object with as_number and router_id to
listen on bgp_server_port.
``as_number`` specifies an Autonomous Number. It must be an integer
between 1 and 65535.
``router_id`` specifies BGP router identifier. It must be the
string representation of an IPv4 address (e.g. 10.0.0.1).
``bgp_server_host`` specifies a list of TCP listen host addresses.
``bgp_server_port`` specifies TCP listen port number. 179 is
used if not specified.
``refresh_stalepath_time`` causes the BGP speaker to remove
stale routes from the BGP table after the timer expires, even
if the speaker does not receive a Router-Refresh End-of-RIB
message. This feature is disabled (not implemented yet).
``refresh_max_eor_time`` causes the BGP speaker to generate a
Route-Refresh End-of-RIB message if it was not able to
generate one due to route flapping. This feature is disabled
(not implemented yet).
``best_path_change_handler``, if specified, is called when any
best remote path is changed due to an update message or remote
peer down. The handler is supposed to take one argument, the
instance of an EventPrefix class instance.
``peer_down_handler``, if specified, is called when BGP peering
session goes down.
``peer_up_handler``, if specified, is called when BGP peering
session goes up.
``ssh_console`` specifies whether or not SSH CLI need to be started.
``ssh_port`` specifies the port number for SSH CLI server.
The default is bgp.operator.ssh.DEFAULT_SSH_PORT.
``ssh_host`` specifies the IP address for SSH CLI server.
The default is bgp.operator.ssh.DEFAULT_SSH_HOST.
``ssh_host_key`` specifies the path to the host key added to
the keys list used by SSH CLI server.
The default is bgp.operator.ssh.DEFAULT_SSH_HOST_KEY.
``label_range`` specifies the range of MPLS labels generated
automatically.
``allow_local_as_in_count`` maximum number of local AS number
occurrences in AS_PATH. This option is useful for e.g. auto RD/RT
configurations in leaf/spine architecture with shared AS numbers.
The default is 0 and means "local AS number is not allowed in
AS_PATH". To allow local AS, 3 is recommended (Cisco's default).
``cluster_id`` specifies the cluster identifier for Route Reflector.
It must be the string representation of an IPv4 address.
If omitted, "router_id" is used for this field.
``local_pref`` specifies the default local preference. It must be an
integer.
"""
super(BGPSpeaker, self).__init__()
settings = {
LOCAL_AS: as_number,
ROUTER_ID: router_id,
BGP_SERVER_HOSTS: bgp_server_hosts,
BGP_SERVER_PORT: bgp_server_port,
REFRESH_STALEPATH_TIME: refresh_stalepath_time,
REFRESH_MAX_EOR_TIME: refresh_max_eor_time,
LABEL_RANGE: label_range,
ALLOW_LOCAL_AS_IN_COUNT: allow_local_as_in_count,
CLUSTER_ID: cluster_id,
LOCAL_PREF: local_pref,
}
self._core_start(settings)
self._init_signal_listeners()
self._best_path_change_handler = best_path_change_handler
self._peer_down_handler = peer_down_handler
self._peer_up_handler = peer_up_handler
if ssh_console:
# Note: paramiko used in bgp.operator.ssh is the optional
# requirements, imports bgp.operator.ssh here.
from ryu.services.protocols.bgp.operator import ssh
ssh_settings = {
ssh.SSH_PORT: ssh_port or ssh.DEFAULT_SSH_PORT,
ssh.SSH_HOST: ssh_host or ssh.DEFAULT_SSH_HOST,
ssh.SSH_HOST_KEY: ssh_host_key or ssh.DEFAULT_SSH_HOST_KEY,
}
hub.spawn(ssh.SSH_CLI_CONTROLLER.start, **ssh_settings)
def _notify_peer_down(self, peer):
remote_ip = peer.ip_address
remote_as = peer.remote_as
if self._peer_down_handler:
self._peer_down_handler(remote_ip, remote_as)
def _notify_peer_up(self, peer):
remote_ip = peer.ip_address
remote_as = peer.remote_as
if self._peer_up_handler:
self._peer_up_handler(remote_ip, remote_as)
def _notify_best_path_changed(self, path, is_withdraw):
if (not path.source
or not isinstance(path, (Ipv4Path, Ipv6Path,
Vpnv4Path, Vpnv6Path, EvpnPath))):
return
ev = EventPrefix(path, is_withdraw)
if self._best_path_change_handler:
self._best_path_change_handler(ev)
def _init_signal_listeners(self):
CORE_MANAGER.get_core_service()._signal_bus.register_listener(
BgpSignalBus.BGP_BEST_PATH_CHANGED,
lambda _, info:
self._notify_best_path_changed(info['path'],
info['is_withdraw'])
)
CORE_MANAGER.get_core_service()._signal_bus.register_listener(
BgpSignalBus.BGP_ADJ_DOWN,
lambda _, info:
self._notify_peer_down(info['peer'])
)
CORE_MANAGER.get_core_service()._signal_bus.register_listener(
BgpSignalBus.BGP_ADJ_UP,
lambda _, info:
self._notify_peer_up(info['peer'])
)
def _core_start(self, settings):
waiter = hub.Event()
call('core.start', waiter=waiter, **settings)
waiter.wait()
def _serve_forever(self):
pass
def shutdown(self):
""" Shutdown BGP speaker
"""
call('core.stop')
def neighbor_add(self, address, remote_as,
enable_ipv4=DEFAULT_CAP_MBGP_IPV4,
enable_ipv6=DEFAULT_CAP_MBGP_IPV6,
enable_vpnv4=DEFAULT_CAP_MBGP_VPNV4,
enable_vpnv6=DEFAULT_CAP_MBGP_VPNV6,
enable_evpn=DEFAULT_CAP_MBGP_EVPN,
enable_ipv4fs=DEFAULT_CAP_MBGP_IPV4FS,
enable_ipv6fs=DEFAULT_CAP_MBGP_IPV6FS,
enable_vpnv4fs=DEFAULT_CAP_MBGP_VPNV4FS,
enable_vpnv6fs=DEFAULT_CAP_MBGP_VPNV6FS,
enable_l2vpnfs=DEFAULT_CAP_MBGP_L2VPNFS,
enable_enhanced_refresh=DEFAULT_CAP_ENHANCED_REFRESH,
enable_four_octet_as_number=DEFAULT_CAP_FOUR_OCTET_AS_NUMBER,
next_hop=None, password=None, multi_exit_disc=None,
site_of_origins=None,
is_route_server_client=DEFAULT_IS_ROUTE_SERVER_CLIENT,
is_route_reflector_client=DEFAULT_IS_ROUTE_REFLECTOR_CLIENT,
is_next_hop_self=DEFAULT_IS_NEXT_HOP_SELF,
local_address=None,
local_port=None, local_as=None,
connect_mode=DEFAULT_CONNECT_MODE,
hold_time=DEFAULT_HOLD_TIME):
""" This method registers a new neighbor. The BGP speaker tries to
establish a bgp session with the peer (accepts a connection
from the peer and also tries to connect to it).
``address`` specifies the IP address of the peer. It must be
the string representation of an IP address. Only IPv4 is
supported now.
``remote_as`` specifies the AS number of the peer. It must be
an integer between 1 and 65535.
``enable_ipv4`` enables IPv4 address family for this
neighbor.
``enable_ipv6`` enables IPv6 address family for this
neighbor.
``enable_vpnv4`` enables VPNv4 address family for this
neighbor.
``enable_vpnv6`` enables VPNv6 address family for this
neighbor.
``enable_evpn`` enables Ethernet VPN address family for this
neighbor.
``enable_ipv4fs`` enables IPv4 Flow Specification address family
for this neighbor.
``enable_ipv6fs`` enables IPv6 Flow Specification address family
for this neighbor.
``enable_vpnv4fs`` enables VPNv4 Flow Specification address family
for this neighbor.
``enable_vpnv6fs`` enables VPNv6 Flow Specification address family
for this neighbor.
``enable_l2vpnfs`` enables L2VPN Flow Specification address family
for this neighbor.
``enable_enhanced_refresh`` enables Enhanced Route Refresh for this
neighbor.
``enable_four_octet_as_number`` enables Four-Octet AS Number
capability for this neighbor.
``next_hop`` specifies the next hop IP address. If not
specified, host's ip address to access to a peer is used.
``password`` is used for the MD5 authentication if it's
specified. By default, the MD5 authentication is disabled.
``multi_exit_disc`` specifies multi exit discriminator (MED) value
as an int type value.
If omitted, MED is not sent to the neighbor.
``site_of_origins`` specifies site_of_origin values.
This parameter must be a list of string.
``is_route_server_client`` specifies whether this neighbor is a
router server's client or not.
``is_route_reflector_client`` specifies whether this neighbor is a
router reflector's client or not.
``is_next_hop_self`` specifies whether the BGP speaker announces
its own ip address to iBGP neighbor or not as path's next_hop address.
``local_address`` specifies Loopback interface address for
iBGP peering.
``local_port`` specifies source TCP port for iBGP peering.
``local_as`` specifies local AS number per-peer.
If omitted, the AS number of BGPSpeaker instance is used.
``connect_mode`` specifies how to connect to this neighbor.
This parameter must be one of the following.
- CONNECT_MODE_ACTIVE = 'active'
- CONNECT_MODE_PASSIVE = 'passive'
- CONNECT_MODE_BOTH (default) = 'both'
``hold_time`` specifies the time after which a peer is considered
down if no update or keepalive has been received.
"""
bgp_neighbor = {
neighbors.IP_ADDRESS: address,
neighbors.REMOTE_AS: remote_as,
PEER_NEXT_HOP: next_hop,
PASSWORD: password,
IS_ROUTE_SERVER_CLIENT: is_route_server_client,
IS_ROUTE_REFLECTOR_CLIENT: is_route_reflector_client,
IS_NEXT_HOP_SELF: is_next_hop_self,
CONNECT_MODE: connect_mode,
CAP_ENHANCED_REFRESH: enable_enhanced_refresh,
CAP_FOUR_OCTET_AS_NUMBER: enable_four_octet_as_number,
CAP_MBGP_IPV4: enable_ipv4,
CAP_MBGP_IPV6: enable_ipv6,
CAP_MBGP_VPNV4: enable_vpnv4,
CAP_MBGP_VPNV6: enable_vpnv6,
CAP_MBGP_EVPN: enable_evpn,
CAP_MBGP_IPV4FS: enable_ipv4fs,
CAP_MBGP_IPV6FS: enable_ipv6fs,
CAP_MBGP_VPNV4FS: enable_vpnv4fs,
CAP_MBGP_VPNV6FS: enable_vpnv6fs,
CAP_MBGP_L2VPNFS: enable_l2vpnfs,
HOLD_TIME: hold_time,
}
if multi_exit_disc:
bgp_neighbor[MULTI_EXIT_DISC] = multi_exit_disc
if site_of_origins:
bgp_neighbor[SITE_OF_ORIGINS] = site_of_origins
if local_address:
bgp_neighbor[LOCAL_ADDRESS] = local_address
if local_port:
bgp_neighbor[LOCAL_PORT] = local_port
if local_as:
bgp_neighbor[LOCAL_AS] = local_as
call('neighbor.create', **bgp_neighbor)
def neighbor_del(self, address):
""" This method unregister the registered neighbor. If a session with
the peer exists, the session will be closed.
``address`` specifies the IP address of the peer. It must be
the string representation of an IP address.
"""
bgp_neighbor = {
neighbors.IP_ADDRESS: address,
}
call('neighbor.delete', **bgp_neighbor)
def neighbor_reset(self, address):
""" This method reset the registered neighbor.
``address`` specifies the IP address of the peer. It must be
the string representation of an IP address.
"""
bgp_neighbor = {
neighbors.IP_ADDRESS: address,
}
call('core.reset_neighbor', **bgp_neighbor)
def neighbor_update(self, address, conf_type, conf_value):
""" This method changes the neighbor configuration.
``address`` specifies the IP address of the peer.
``conf_type`` specifies configuration type which you want to change.
Currently ryu.services.protocols.bgp.bgpspeaker.MULTI_EXIT_DISC
can be specified.
``conf_value`` specifies value for the configuration type.
"""
assert conf_type == MULTI_EXIT_DISC or conf_type == CONNECT_MODE
func_name = 'neighbor.update'
attribute_param = {}
if conf_type == MULTI_EXIT_DISC:
attribute_param = {neighbors.MULTI_EXIT_DISC: conf_value}
elif conf_type == CONNECT_MODE:
attribute_param = {neighbors.CONNECT_MODE: conf_value}
param = {neighbors.IP_ADDRESS: address,
neighbors.CHANGES: attribute_param}
call(func_name, **param)
def neighbor_state_get(self, address=None, format='json'):
""" This method returns the state of peer(s) in a json
format.
``address`` specifies the address of a peer. If not given, the
state of all the peers return.
``format`` specifies the format of the response.
This parameter must be one of the following.
- 'json' (default)
- 'cli'
"""
show = {
'params': ['neighbor', 'summary'],
'format': format,
}
if address:
show['params'].append(address)
return call('operator.show', **show)
def prefix_add(self, prefix, next_hop=None, route_dist=None):
""" This method adds a new prefix to be advertised.
``prefix`` must be the string representation of an IP network
(e.g., 10.1.1.0/24).
``next_hop`` specifies the next hop address for this
prefix. This parameter is necessary for only VPNv4 and VPNv6
address families.
``route_dist`` specifies a route distinguisher value. This
parameter is necessary for only VPNv4 and VPNv6 address
families.
"""
func_name = 'network.add'
networks = {
PREFIX: prefix,
}
if next_hop:
networks[NEXT_HOP] = next_hop
if route_dist:
func_name = 'prefix.add_local'
networks[ROUTE_DISTINGUISHER] = route_dist
rf, p = self._check_rf_and_normalize(prefix)
networks[ROUTE_FAMILY] = rf
networks[PREFIX] = p
if rf == vrfs.VRF_RF_IPV6 and netaddr.valid_ipv4(next_hop):
# convert the next_hop to IPv4-Mapped IPv6 Address
networks[NEXT_HOP] = \
str(netaddr.IPAddress(next_hop).ipv6())
return call(func_name, **networks)
def prefix_del(self, prefix, route_dist=None):
""" This method deletes a advertised prefix.
``prefix`` must be the string representation of an IP network.
``route_dist`` specifies a route distinguisher value.
"""
func_name = 'network.del'
networks = {
PREFIX: prefix,
}
if route_dist:
func_name = 'prefix.delete_local'
networks[ROUTE_DISTINGUISHER] = route_dist
rf, p = self._check_rf_and_normalize(prefix)
networks[ROUTE_FAMILY] = rf
networks[PREFIX] = p
call(func_name, **networks)
def evpn_prefix_add(self, route_type, route_dist, esi=0,
ethernet_tag_id=None, mac_addr=None, ip_addr=None,
ip_prefix=None, gw_ip_addr=None, vni=None,
next_hop=None, tunnel_type=None, pmsi_tunnel_type=None,
redundancy_mode=None):
""" This method adds a new EVPN route to be advertised.
``route_type`` specifies one of the EVPN route type name.
This parameter must be one of the following.
- EVPN_ETH_AUTO_DISCOVERY = 'eth_ad'
- EVPN_MAC_IP_ADV_ROUTE = 'mac_ip_adv'
- EVPN_MULTICAST_ETAG_ROUTE = 'multicast_etag'
- EVPN_ETH_SEGMENT = 'eth_seg'
- EVPN_IP_PREFIX_ROUTE = 'ip_prefix'
``route_dist`` specifies a route distinguisher value.
``esi`` is an value to specify the Ethernet Segment Identifier.
0 is the default and denotes a single-homed site.
If you want to advertise esi other than 0,
it must be set as dictionary type.
If esi is dictionary type, 'type' key must be set
and specifies ESI type.
For the supported ESI type, see :py:mod:`ryu.lib.packet.bgp.EvpnEsi`.
The remaining arguments are the same as that for
the corresponding class.
``ethernet_tag_id`` specifies the Ethernet Tag ID.
``mac_addr`` specifies a MAC address to advertise.
``ip_addr`` specifies an IPv4 or IPv6 address to advertise.
``ip_prefix`` specifies an IPv4 or IPv6 prefix to advertise.
``gw_ip_addr`` specifies an IPv4 or IPv6 address of
gateway to advertise.
``vni`` specifies an Virtual Network Identifier for VXLAN
or Virtual Subnet Identifier for NVGRE.
If tunnel_type is not TUNNEL_TYPE_VXLAN or TUNNEL_TYPE_NVGRE,
this field is ignored.
``next_hop`` specifies the next hop address for this prefix.
``tunnel_type`` specifies the data plane encapsulation type
to advertise. By the default, this attribute is not advertised.
The supported encapsulation types are following.
- TUNNEL_TYPE_VXLAN = 'vxlan'
- TUNNEL_TYPE_NVGRE = 'nvgre
``pmsi_tunnel_type`` specifies the type of the PMSI tunnel attribute
used to encode the multicast tunnel identifier.
This attribute is advertised only if route_type is
EVPN_MULTICAST_ETAG_ROUTE and not advertised by the default.
This attribute can also carry vni if tunnel_type is specified.
The supported PMSI tunnel types are following.
- PMSI_TYPE_NO_TUNNEL_INFO = 0
- PMSI_TYPE_INGRESS_REP = 6
``redundancy_mode`` specifies a redundancy mode type.
This attribute is advertised only if route_type is
EVPN_ETH_AUTO_DISCOVERY and not advertised by the default.
The supported redundancy mode types are following.
- REDUNDANCY_MODE_ALL_ACTIVE = 'all_active'
- REDUNDANCY_MODE_SINGLE_ACTIVE = 'single_active'
"""
func_name = 'evpn_prefix.add_local'
# Check the default values
if not next_hop:
next_hop = '0.0.0.0'
# Set required arguments
kwargs = {EVPN_ROUTE_TYPE: route_type,
ROUTE_DISTINGUISHER: route_dist,
NEXT_HOP: next_hop}
# Set optional arguments
if tunnel_type in [TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]:
kwargs[TUNNEL_TYPE] = tunnel_type
elif tunnel_type is not None:
raise ValueError('Unsupported tunnel type: %s' % tunnel_type)
# Set route type specific arguments
if route_type == EVPN_ETH_AUTO_DISCOVERY:
kwargs.update({
EVPN_ESI: esi,
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
})
if vni is not None:
kwargs[EVPN_VNI] = vni
# Set Redundancy Mode Attribute arguments
if redundancy_mode in [
REDUNDANCY_MODE_ALL_ACTIVE,
REDUNDANCY_MODE_SINGLE_ACTIVE]:
kwargs[REDUNDANCY_MODE] = redundancy_mode
elif redundancy_mode is not None:
raise ValueError('Unsupported Redundancy Mode: %s' %
redundancy_mode)
elif route_type == EVPN_MAC_IP_ADV_ROUTE:
kwargs.update({
EVPN_ESI: esi,
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
MAC_ADDR: mac_addr,
IP_ADDR: ip_addr,
})
# Set tunnel type specific arguments
if tunnel_type in [TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]:
kwargs[EVPN_VNI] = vni
elif route_type == EVPN_MULTICAST_ETAG_ROUTE:
kwargs.update({
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
IP_ADDR: ip_addr,
})
# Set tunnel type specific arguments
if tunnel_type in [TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]:
kwargs[EVPN_VNI] = vni
# Set PMSI Tunnel Attribute arguments
if pmsi_tunnel_type in [
PMSI_TYPE_NO_TUNNEL_INFO,
PMSI_TYPE_INGRESS_REP]:
kwargs[PMSI_TUNNEL_TYPE] = pmsi_tunnel_type
elif pmsi_tunnel_type is not None:
raise ValueError('Unsupported PMSI tunnel type: %s' %
pmsi_tunnel_type)
elif route_type == EVPN_ETH_SEGMENT:
kwargs.update({
EVPN_ESI: esi,
IP_ADDR: ip_addr,
})
elif route_type == EVPN_IP_PREFIX_ROUTE:
kwargs.update({
EVPN_ESI: esi,
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
IP_PREFIX: ip_prefix,
GW_IP_ADDR: gw_ip_addr,
})
# Set tunnel type specific arguments
if tunnel_type in [TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]:
kwargs[EVPN_VNI] = vni
else:
raise ValueError('Unsupported EVPN route type: %s' % route_type)
call(func_name, **kwargs)
def evpn_prefix_del(self, route_type, route_dist, esi=0,
ethernet_tag_id=None, mac_addr=None, ip_addr=None,
ip_prefix=None):
""" This method deletes an advertised EVPN route.
``route_type`` specifies one of the EVPN route type name.
``route_dist`` specifies a route distinguisher value.
``esi`` is an value to specify the Ethernet Segment Identifier.
``ethernet_tag_id`` specifies the Ethernet Tag ID.
``mac_addr`` specifies a MAC address to advertise.
``ip_addr`` specifies an IPv4 or IPv6 address to advertise.
``ip_prefix`` specifies an IPv4 or IPv6 prefix to advertise.
"""
func_name = 'evpn_prefix.delete_local'
# Set required arguments
kwargs = {EVPN_ROUTE_TYPE: route_type,
ROUTE_DISTINGUISHER: route_dist}
# Set route type specific arguments
if route_type == EVPN_ETH_AUTO_DISCOVERY:
kwargs.update({
EVPN_ESI: esi,
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
})
elif route_type == EVPN_MAC_IP_ADV_ROUTE:
kwargs.update({
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
MAC_ADDR: mac_addr,
IP_ADDR: ip_addr,
})
elif route_type == EVPN_MULTICAST_ETAG_ROUTE:
kwargs.update({
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
IP_ADDR: ip_addr,
})
elif route_type == EVPN_ETH_SEGMENT:
kwargs.update({
EVPN_ESI: esi,
IP_ADDR: ip_addr,
})
elif route_type == EVPN_IP_PREFIX_ROUTE:
kwargs.update({
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
IP_PREFIX: ip_prefix,
})
else:
raise ValueError('Unsupported EVPN route type: %s' % route_type)
call(func_name, **kwargs)
def flowspec_prefix_add(self, flowspec_family, rules, route_dist=None,
actions=None):
""" This method adds a new Flow Specification prefix to be advertised.
``flowspec_family`` specifies one of the flowspec family name.
This parameter must be one of the following.
- FLOWSPEC_FAMILY_IPV4 = 'ipv4fs'
- FLOWSPEC_FAMILY_IPV6 = 'ipv6fs'
- FLOWSPEC_FAMILY_VPNV4 = 'vpnv4fs'
- FLOWSPEC_FAMILY_VPNV6 = 'vpnv6fs'
- FLOWSPEC_FAMILY_L2VPN = 'l2vpnfs'
``rules`` specifies NLRIs of Flow Specification as
a dictionary type value.
For the supported NLRI types and arguments,
see `from_user()` method of the following classes.
- :py:mod:`ryu.lib.packet.bgp.FlowSpecIPv4NLRI`
- :py:mod:`ryu.lib.packet.bgp.FlowSpecIPv6NLRI`
- :py:mod:`ryu.lib.packet.bgp.FlowSpecVPNv4NLRI`
- :py:mod:`ryu.lib.packet.bgp.FlowSpecVPNv6NLRI`
- :py:mod:`ryu.lib.packet.bgp.FlowSpecL2VPNNLRI`
``route_dist`` specifies a route distinguisher value.
This parameter is required only if flowspec_family is one of the
following address family.
- FLOWSPEC_FAMILY_VPNV4 = 'vpnv4fs'
- FLOWSPEC_FAMILY_VPNV6 = 'vpnv6fs'
- FLOWSPEC_FAMILY_L2VPN = 'l2vpnfs'
``actions`` specifies Traffic Filtering Actions of
Flow Specification as a dictionary type value.
The keys are "ACTION_NAME" for each action class and
values are used for the arguments to that class.
For the supported "ACTION_NAME" and arguments,
see the following table.
=============== ===============================================================
ACTION_NAME Action Class
=============== ===============================================================
traffic_rate :py:mod:`ryu.lib.packet.bgp.BGPFlowSpecTrafficRateCommunity`
traffic_action :py:mod:`ryu.lib.packet.bgp.BGPFlowSpecTrafficActionCommunity`
redirect :py:mod:`ryu.lib.packet.bgp.BGPFlowSpecRedirectCommunity`
traffic_marking :py:mod:`ryu.lib.packet.bgp.BGPFlowSpecTrafficMarkingCommunity`
vlan_action :py:mod:`ryu.lib.packet.bgp.BGPFlowSpecVlanActionCommunity`
tpid_action :py:mod:`ryu.lib.packet.bgp.BGPFlowSpecTPIDActionCommunity`
=============== ===============================================================
Example(IPv4)::
>>> speaker = BGPSpeaker(as_number=65001, router_id='172.17.0.1')
>>> speaker.neighbor_add(address='172.17.0.2',
... remote_as=65002,
... enable_ipv4fs=True)
>>> speaker.flowspec_prefix_add(
... flowspec_family=FLOWSPEC_FAMILY_IPV4,
... rules={
... 'dst_prefix': '10.60.1.0/24'
... },
... actions={
... 'traffic_marking': {
... 'dscp': 24
... }
... }
... )
Example(VPNv4)::
>>> speaker = BGPSpeaker(as_number=65001, router_id='172.17.0.1')
>>> speaker.neighbor_add(address='172.17.0.2',
... remote_as=65002,
... enable_vpnv4fs=True)
>>> speaker.vrf_add(route_dist='65001:100',
... import_rts=['65001:100'],
... export_rts=['65001:100'],
... route_family=RF_VPNV4_FLOWSPEC)
>>> speaker.flowspec_prefix_add(
... flowspec_family=FLOWSPEC_FAMILY_VPNV4,
... route_dist='65000:100',
... rules={
... 'dst_prefix': '10.60.1.0/24'
... },
... actions={
... 'traffic_marking': {
... 'dscp': 24
... }
... }
... )
"""
func_name = 'flowspec.add'
# Set required arguments
kwargs = {
FLOWSPEC_FAMILY: flowspec_family,
FLOWSPEC_RULES: rules,
FLOWSPEC_ACTIONS: actions or {},
}
if flowspec_family in [FLOWSPEC_FAMILY_VPNV4, FLOWSPEC_FAMILY_VPNV6,
FLOWSPEC_FAMILY_L2VPN]:
func_name = 'flowspec.add_local'
kwargs.update({ROUTE_DISTINGUISHER: route_dist})
call(func_name, **kwargs)
def flowspec_prefix_del(self, flowspec_family, rules, route_dist=None):
""" This method deletes an advertised Flow Specification route.
``flowspec_family`` specifies one of the flowspec family name.
``rules`` specifies NLRIs of Flow Specification as
a dictionary type value.
``route_dist`` specifies a route distinguisher value.
"""
func_name = 'flowspec.del'
# Set required arguments
kwargs = {
FLOWSPEC_FAMILY: flowspec_family,
FLOWSPEC_RULES: rules,
}
if flowspec_family in [FLOWSPEC_FAMILY_VPNV4, FLOWSPEC_FAMILY_VPNV6,
FLOWSPEC_FAMILY_L2VPN]:
func_name = 'flowspec.del_local'
kwargs.update({ROUTE_DISTINGUISHER: route_dist})
call(func_name, **kwargs)
def vrf_add(self, route_dist, import_rts, export_rts, site_of_origins=None,
route_family=RF_VPN_V4, multi_exit_disc=None):
""" This method adds a new vrf used for VPN.
``route_dist`` specifies a route distinguisher value.
``import_rts`` specifies a list of route targets to be imported.
``export_rts`` specifies a list of route targets to be exported.
``site_of_origins`` specifies site_of_origin values.
This parameter must be a list of string.
``route_family`` specifies route family of the VRF.
This parameter must be one of the following.
- RF_VPN_V4 (default) = 'ipv4'
- RF_VPN_V6 = 'ipv6'
- RF_L2_EVPN = 'evpn'
- RF_VPNV4_FLOWSPEC = 'ipv4fs'
- RF_VPNV6_FLOWSPEC = 'ipv6fs'
- RF_L2VPN_FLOWSPEC = 'l2vpnfs'
``multi_exit_disc`` specifies multi exit discriminator (MED) value.
It must be an integer.
"""
if route_family not in SUPPORTED_VRF_RF:
raise ValueError('Unsupported route_family: %s' % route_family)
vrf = {
vrfs.ROUTE_DISTINGUISHER: route_dist,
vrfs.IMPORT_RTS: import_rts,
vrfs.EXPORT_RTS: export_rts,
vrfs.SITE_OF_ORIGINS: site_of_origins,
vrfs.VRF_RF: route_family,
vrfs.MULTI_EXIT_DISC: multi_exit_disc,
}
call('vrf.create', **vrf)
def vrf_del(self, route_dist):
""" This method deletes the existing vrf.
``route_dist`` specifies a route distinguisher value.
"""
vrf = {vrfs.ROUTE_DISTINGUISHER: route_dist}
call('vrf.delete', **vrf)
def vrfs_get(self, subcommand='routes', route_dist=None,
route_family='all', format='json'):
""" This method returns the existing vrfs.
``subcommand`` specifies one of the following.
- 'routes': shows routes present for vrf
- 'summary': shows configuration and summary of vrf
``route_dist`` specifies a route distinguisher value.
If route_family is not 'all', this value must be specified.
``route_family`` specifies route family of the VRF.
This parameter must be one of the following.
- RF_VPN_V4 = 'ipv4'
- RF_VPN_V6 = 'ipv6'
- RF_L2_EVPN = 'evpn'
- 'all' (default)
``format`` specifies the format of the response.
This parameter must be one of the following.
- 'json' (default)
- 'cli'
"""
show = {
'format': format,
}
if route_family in SUPPORTED_VRF_RF:
assert route_dist is not None
show['params'] = ['vrf', subcommand, route_dist, route_family]
else:
show['params'] = ['vrf', subcommand, 'all']
return call('operator.show', **show)
def rib_get(self, family='all', format='json'):
""" This method returns the BGP routing information in a json
format. This will be improved soon.
``family`` specifies the address family of the RIB (e.g. 'ipv4').
``format`` specifies the format of the response.
This parameter must be one of the following.
- 'json' (default)
- 'cli'
"""
show = {
'params': ['rib', family],
'format': format
}
return call('operator.show', **show)
def neighbor_get(self, route_type, address, format='json'):
""" This method returns the BGP adj-RIB-in/adj-RIB-out information
in a json format.
``route_type`` This parameter is necessary for only received-routes
and sent-routes.
- received-routes : paths received and not withdrawn by given peer
- sent-routes : paths sent and not withdrawn to given peer
``address`` specifies the IP address of the peer. It must be
the string representation of an IP address.
``format`` specifies the format of the response.
This parameter must be one of the following.
- 'json' (default)
- 'cli'
"""
show = {
'format': format,
}
if route_type == 'sent-routes' or route_type == 'received-routes':
show['params'] = ['neighbor', route_type, address, 'all']
else:
show['params'] = ['neighbor', 'received-routes', address, 'all']
return call('operator.show', **show)
def neighbors_get(self, format='json'):
""" This method returns a list of the BGP neighbors.
``format`` specifies the format of the response.
This parameter must be one of the following.
- 'json' (default)
- 'cli'
"""
show = {
'params': ['neighbor'],
'format': format,
}
return call('operator.show', **show)
def _set_filter(self, filter_type, address, filters):
assert filter_type in ('in', 'out'), (
"filter type must be 'in' or 'out'")
assert all(isinstance(f, Filter) for f in filters), (
'all the items in filters must be an instance of Filter sub-class')
if filters is None:
filters = []
func_name = 'neighbor.' + filter_type + '_filter.set'
param = {
neighbors.IP_ADDRESS: address,
}
if filter_type == 'in':
param[neighbors.IN_FILTER] = filters
else:
param[neighbors.OUT_FILTER] = filters
call(func_name, **param)
def out_filter_set(self, address, filters):
""" This method sets out-filter to neighbor.
``address`` specifies the IP address of the peer.
``filters`` specifies a filter list to filter the path advertisement.
The contents must be an instance of Filter sub-class
If you want to define out-filter that send only a particular
prefix to neighbor, filters can be created as follows::
p = PrefixFilter('10.5.111.0/24',
policy=PrefixFilter.POLICY_PERMIT)
all = PrefixFilter('0.0.0.0/0',
policy=PrefixFilter.POLICY_DENY)
pList = [p, all]
self.bgpspeaker.out_filter_set(neighbor_address, pList)
.. Note::
out-filter evaluates paths in the order of Filter in the pList.
"""
self._set_filter('out', address, filters)
def out_filter_get(self, address):
""" This method gets out-filter setting from the specified neighbor.
``address`` specifies the IP address of the peer.
Returns a list object containing an instance of Filter sub-class
"""
func_name = 'neighbor.out_filter.get'
param = {
neighbors.IP_ADDRESS: address,
}
return call(func_name, **param)
def in_filter_set(self, address, filters):
"""This method sets in-bound filters to a neighbor.
``address`` specifies the IP address of the neighbor
``filters`` specifies filter list applied before advertised paths are
imported to the global rib. All the items in the list must be an
instance of Filter sub-class.
"""
self._set_filter('in', address, filters)
def in_filter_get(self, address):
"""This method gets in-bound filters of the specified neighbor.
``address`` specifies the IP address of the neighbor.
Returns a list object containing an instance of Filter sub-class
"""
func_name = 'neighbor.in_filter.get'
param = {
neighbors.IP_ADDRESS: address,
}
return call(func_name, **param)
def bmp_server_add(self, address, port):
"""This method registers a new BMP (BGP monitoring Protocol)
server. The BGP speaker starts to send BMP messages to the
server. Currently, only one BMP server can be registered.
``address`` specifies the IP address of a BMP server.
``port`` specifies the listen port number of a BMP server.
"""
func_name = 'bmp.start'
param = {
'host': address,
'port': port,
}
call(func_name, **param)
def bmp_server_del(self, address, port):
""" This method unregister the registered BMP server.
``address`` specifies the IP address of a BMP server.
``port`` specifies the listen port number of a BMP server.
"""
func_name = 'bmp.stop'
param = {
'host': address,
'port': port,
}
call(func_name, **param)
def attribute_map_set(self, address, attribute_maps,
route_dist=None, route_family=RF_VPN_V4):
"""This method sets attribute mapping to a neighbor.
attribute mapping can be used when you want to apply
attribute to BGPUpdate under specific conditions.
``address`` specifies the IP address of the neighbor
``attribute_maps`` specifies attribute_map list that are used
before paths are advertised. All the items in the list must
be an instance of AttributeMap class
``route_dist`` specifies route dist in which attribute_maps
are added.
``route_family`` specifies route family of the VRF.
This parameter must be one of the following.
- RF_VPN_V4 (default) = 'ipv4'
- RF_VPN_V6 = 'ipv6'
We can set AttributeMap to a neighbor as follows::
pref_filter = PrefixFilter('192.168.103.0/30',
PrefixFilter.POLICY_PERMIT)
attribute_map = AttributeMap([pref_filter],
AttributeMap.ATTR_LOCAL_PREF, 250)
speaker.attribute_map_set('192.168.50.102', [attribute_map])
"""
if route_family not in SUPPORTED_VRF_RF:
raise ValueError('Unsupported route_family: %s' % route_family)
func_name = 'neighbor.attribute_map.set'
param = {
neighbors.IP_ADDRESS: address,
neighbors.ATTRIBUTE_MAP: attribute_maps,
}
if route_dist is not None:
param[vrfs.ROUTE_DISTINGUISHER] = route_dist
param[vrfs.VRF_RF] = route_family
call(func_name, **param)
def attribute_map_get(self, address, route_dist=None,
route_family=RF_VPN_V4):
"""This method gets in-bound filters of the specified neighbor.
``address`` specifies the IP address of the neighbor.
``route_dist`` specifies route distinguisher that has attribute_maps.
``route_family`` specifies route family of the VRF.
This parameter must be one of the following.
- RF_VPN_V4 (default) = 'ipv4'
- RF_VPN_V6 = 'ipv6'
Returns a list object containing an instance of AttributeMap
"""
if route_family not in SUPPORTED_VRF_RF:
raise ValueError('Unsupported route_family: %s' % route_family)
func_name = 'neighbor.attribute_map.get'
param = {
neighbors.IP_ADDRESS: address,
}
if route_dist is not None:
param[vrfs.ROUTE_DISTINGUISHER] = route_dist
param[vrfs.VRF_RF] = route_family
return call(func_name, **param)
@staticmethod
def _check_rf_and_normalize(prefix):
""" check prefix's route_family and if the address is
IPv6 address, return IPv6 route_family and normalized IPv6 address.
If the address is IPv4 address, return IPv4 route_family
and the prefix itself.
"""
ip, masklen = prefix.split('/')
if netaddr.valid_ipv6(ip):
# normalize IPv6 address
ipv6_prefix = str(netaddr.IPAddress(ip)) + '/' + masklen
return vrfs.VRF_RF_IPV6, ipv6_prefix
else:
return vrfs.VRF_RF_IPV4, prefix
| 38.428467
| 87
| 0.637757
|
import netaddr
from ryu.lib import hub
from ryu.lib.packet.bgp import (
BGPFlowSpecTrafficActionCommunity,
BGPFlowSpecVlanActionCommunity,
BGPFlowSpecTPIDActionCommunity,
)
from ryu.services.protocols.bgp.core_manager import CORE_MANAGER
from ryu.services.protocols.bgp.signals.emit import BgpSignalBus
from ryu.services.protocols.bgp.api.base import call
from ryu.services.protocols.bgp.api.base import PREFIX
from ryu.services.protocols.bgp.api.base import EVPN_ROUTE_TYPE
from ryu.services.protocols.bgp.api.base import EVPN_ESI
from ryu.services.protocols.bgp.api.base import EVPN_ETHERNET_TAG_ID
from ryu.services.protocols.bgp.api.base import REDUNDANCY_MODE
from ryu.services.protocols.bgp.api.base import IP_ADDR
from ryu.services.protocols.bgp.api.base import MAC_ADDR
from ryu.services.protocols.bgp.api.base import NEXT_HOP
from ryu.services.protocols.bgp.api.base import IP_PREFIX
from ryu.services.protocols.bgp.api.base import GW_IP_ADDR
from ryu.services.protocols.bgp.api.base import ROUTE_DISTINGUISHER
from ryu.services.protocols.bgp.api.base import ROUTE_FAMILY
from ryu.services.protocols.bgp.api.base import EVPN_VNI
from ryu.services.protocols.bgp.api.base import TUNNEL_TYPE
from ryu.services.protocols.bgp.api.base import PMSI_TUNNEL_TYPE
from ryu.services.protocols.bgp.api.prefix import EVPN_MAX_ET
from ryu.services.protocols.bgp.api.prefix import ESI_TYPE_LACP
from ryu.services.protocols.bgp.api.prefix import ESI_TYPE_L2_BRIDGE
from ryu.services.protocols.bgp.api.prefix import ESI_TYPE_MAC_BASED
from ryu.services.protocols.bgp.api.prefix import EVPN_ETH_AUTO_DISCOVERY
from ryu.services.protocols.bgp.api.prefix import EVPN_MAC_IP_ADV_ROUTE
from ryu.services.protocols.bgp.api.prefix import EVPN_MULTICAST_ETAG_ROUTE
from ryu.services.protocols.bgp.api.prefix import EVPN_ETH_SEGMENT
from ryu.services.protocols.bgp.api.prefix import EVPN_IP_PREFIX_ROUTE
from ryu.services.protocols.bgp.api.prefix import REDUNDANCY_MODE_ALL_ACTIVE
from ryu.services.protocols.bgp.api.prefix import REDUNDANCY_MODE_SINGLE_ACTIVE
from ryu.services.protocols.bgp.api.prefix import TUNNEL_TYPE_VXLAN
from ryu.services.protocols.bgp.api.prefix import TUNNEL_TYPE_NVGRE
from ryu.services.protocols.bgp.api.prefix import (
PMSI_TYPE_NO_TUNNEL_INFO,
PMSI_TYPE_INGRESS_REP)
from ryu.services.protocols.bgp.api.prefix import (
FLOWSPEC_FAMILY,
FLOWSPEC_FAMILY_IPV4,
FLOWSPEC_FAMILY_VPNV4,
FLOWSPEC_FAMILY_IPV6,
FLOWSPEC_FAMILY_VPNV6,
FLOWSPEC_FAMILY_L2VPN,
FLOWSPEC_RULES,
FLOWSPEC_ACTIONS)
from ryu.services.protocols.bgp.rtconf.common import LOCAL_AS
from ryu.services.protocols.bgp.rtconf.common import ROUTER_ID
from ryu.services.protocols.bgp.rtconf.common import CLUSTER_ID
from ryu.services.protocols.bgp.rtconf.common import BGP_SERVER_HOSTS
from ryu.services.protocols.bgp.rtconf.common import BGP_SERVER_PORT
from ryu.services.protocols.bgp.rtconf.common import DEFAULT_BGP_SERVER_HOSTS
from ryu.services.protocols.bgp.rtconf.common import DEFAULT_BGP_SERVER_PORT
from ryu.services.protocols.bgp.rtconf.common import (
DEFAULT_REFRESH_MAX_EOR_TIME, DEFAULT_REFRESH_STALEPATH_TIME)
from ryu.services.protocols.bgp.rtconf.common import DEFAULT_LABEL_RANGE
from ryu.services.protocols.bgp.rtconf.common import REFRESH_MAX_EOR_TIME
from ryu.services.protocols.bgp.rtconf.common import REFRESH_STALEPATH_TIME
from ryu.services.protocols.bgp.rtconf.common import LABEL_RANGE
from ryu.services.protocols.bgp.rtconf.common import ALLOW_LOCAL_AS_IN_COUNT
from ryu.services.protocols.bgp.rtconf.common import LOCAL_PREF
from ryu.services.protocols.bgp.rtconf.common import DEFAULT_LOCAL_PREF
from ryu.services.protocols.bgp.rtconf import neighbors
from ryu.services.protocols.bgp.rtconf import vrfs
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV4
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV6
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV4
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV6
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_EVPN
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV4FS
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV6FS
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV4FS
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV6FS
from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_L2VPNFS
from ryu.services.protocols.bgp.rtconf.base import CAP_ENHANCED_REFRESH
from ryu.services.protocols.bgp.rtconf.base import CAP_FOUR_OCTET_AS_NUMBER
from ryu.services.protocols.bgp.rtconf.base import HOLD_TIME
from ryu.services.protocols.bgp.rtconf.base import MULTI_EXIT_DISC
from ryu.services.protocols.bgp.rtconf.base import SITE_OF_ORIGINS
from ryu.services.protocols.bgp.rtconf.neighbors import (
DEFAULT_CAP_MBGP_IPV4,
DEFAULT_CAP_MBGP_IPV6,
DEFAULT_CAP_MBGP_VPNV4,
DEFAULT_CAP_MBGP_VPNV6,
DEFAULT_CAP_MBGP_EVPN,
DEFAULT_CAP_MBGP_IPV4FS,
DEFAULT_CAP_MBGP_IPV6FS,
DEFAULT_CAP_MBGP_VPNV4FS,
DEFAULT_CAP_MBGP_VPNV6FS,
DEFAULT_CAP_MBGP_L2VPNFS,
DEFAULT_HOLD_TIME,
)
from ryu.services.protocols.bgp.rtconf.neighbors import (
DEFAULT_CAP_ENHANCED_REFRESH, DEFAULT_CAP_FOUR_OCTET_AS_NUMBER)
from ryu.services.protocols.bgp.rtconf.neighbors import DEFAULT_CONNECT_MODE
from ryu.services.protocols.bgp.rtconf.neighbors import PEER_NEXT_HOP
from ryu.services.protocols.bgp.rtconf.neighbors import PASSWORD
from ryu.services.protocols.bgp.rtconf.neighbors import (
DEFAULT_IS_ROUTE_SERVER_CLIENT, IS_ROUTE_SERVER_CLIENT)
from ryu.services.protocols.bgp.rtconf.neighbors import (
DEFAULT_IS_ROUTE_REFLECTOR_CLIENT, IS_ROUTE_REFLECTOR_CLIENT)
from ryu.services.protocols.bgp.rtconf.neighbors import (
DEFAULT_IS_NEXT_HOP_SELF, IS_NEXT_HOP_SELF)
from ryu.services.protocols.bgp.rtconf.neighbors import CONNECT_MODE
from ryu.services.protocols.bgp.rtconf.neighbors import LOCAL_ADDRESS
from ryu.services.protocols.bgp.rtconf.neighbors import LOCAL_PORT
from ryu.services.protocols.bgp.rtconf.vrfs import SUPPORTED_VRF_RF
from ryu.services.protocols.bgp.info_base.base import Filter
from ryu.services.protocols.bgp.info_base.ipv4 import Ipv4Path
from ryu.services.protocols.bgp.info_base.ipv6 import Ipv6Path
from ryu.services.protocols.bgp.info_base.vpnv4 import Vpnv4Path
from ryu.services.protocols.bgp.info_base.vpnv6 import Vpnv6Path
from ryu.services.protocols.bgp.info_base.evpn import EvpnPath
NEIGHBOR_CONF_MED = MULTI_EXIT_DISC
RF_VPN_V4 = vrfs.VRF_RF_IPV4
RF_VPN_V6 = vrfs.VRF_RF_IPV6
RF_L2_EVPN = vrfs.VRF_RF_L2_EVPN
RF_VPNV4_FLOWSPEC = vrfs.VRF_RF_IPV4_FLOWSPEC
RF_VPNV6_FLOWSPEC = vrfs.VRF_RF_IPV6_FLOWSPEC
RF_L2VPN_FLOWSPEC = vrfs.VRF_RF_L2VPN_FLOWSPEC
FLOWSPEC_TA_SAMPLE = BGPFlowSpecTrafficActionCommunity.SAMPLE
FLOWSPEC_TA_TERMINAL = BGPFlowSpecTrafficActionCommunity.TERMINAL
FLOWSPEC_VLAN_POP = BGPFlowSpecVlanActionCommunity.POP
FLOWSPEC_VLAN_PUSH = BGPFlowSpecVlanActionCommunity.PUSH
FLOWSPEC_VLAN_SWAP = BGPFlowSpecVlanActionCommunity.SWAP
FLOWSPEC_VLAN_RW_INNER = BGPFlowSpecVlanActionCommunity.REWRITE_INNER
FLOWSPEC_VLAN_RW_OUTER = BGPFlowSpecVlanActionCommunity.REWRITE_OUTER
FLOWSPEC_TPID_TI = BGPFlowSpecTPIDActionCommunity.TI
FLOWSPEC_TPID_TO = BGPFlowSpecTPIDActionCommunity.TO
class EventPrefix(object):
def __init__(self, path, is_withdraw):
self.path = path
self.is_withdraw = is_withdraw
@property
def remote_as(self):
return self.path.source.remote_as
@property
def route_dist(self):
if (isinstance(self.path, Vpnv4Path)
or isinstance(self.path, Vpnv6Path)
or isinstance(self.path, EvpnPath)):
return self.path.nlri.route_dist
else:
return None
@property
def prefix(self):
if isinstance(self.path, Ipv4Path) or isinstance(self.path, Ipv6Path):
return self.path.nlri.addr + '/' + str(self.path.nlri.length)
elif (isinstance(self.path, Vpnv4Path)
or isinstance(self.path, Vpnv6Path)
or isinstance(self.path, EvpnPath)):
return self.path.nlri.prefix
else:
return None
@property
def nexthop(self):
return self.path.nexthop
@property
def label(self):
if (isinstance(self.path, Vpnv4Path)
or isinstance(self.path, Vpnv6Path)
or isinstance(self.path, EvpnPath)):
return getattr(self.path.nlri, 'label_list', None)
else:
return None
class BGPSpeaker(object):
def __init__(self, as_number, router_id,
bgp_server_hosts=DEFAULT_BGP_SERVER_HOSTS,
bgp_server_port=DEFAULT_BGP_SERVER_PORT,
refresh_stalepath_time=DEFAULT_REFRESH_STALEPATH_TIME,
refresh_max_eor_time=DEFAULT_REFRESH_MAX_EOR_TIME,
best_path_change_handler=None,
peer_down_handler=None,
peer_up_handler=None,
ssh_console=False,
ssh_port=None, ssh_host=None, ssh_host_key=None,
label_range=DEFAULT_LABEL_RANGE,
allow_local_as_in_count=0,
cluster_id=None,
local_pref=DEFAULT_LOCAL_PREF):
super(BGPSpeaker, self).__init__()
settings = {
LOCAL_AS: as_number,
ROUTER_ID: router_id,
BGP_SERVER_HOSTS: bgp_server_hosts,
BGP_SERVER_PORT: bgp_server_port,
REFRESH_STALEPATH_TIME: refresh_stalepath_time,
REFRESH_MAX_EOR_TIME: refresh_max_eor_time,
LABEL_RANGE: label_range,
ALLOW_LOCAL_AS_IN_COUNT: allow_local_as_in_count,
CLUSTER_ID: cluster_id,
LOCAL_PREF: local_pref,
}
self._core_start(settings)
self._init_signal_listeners()
self._best_path_change_handler = best_path_change_handler
self._peer_down_handler = peer_down_handler
self._peer_up_handler = peer_up_handler
if ssh_console:
from ryu.services.protocols.bgp.operator import ssh
ssh_settings = {
ssh.SSH_PORT: ssh_port or ssh.DEFAULT_SSH_PORT,
ssh.SSH_HOST: ssh_host or ssh.DEFAULT_SSH_HOST,
ssh.SSH_HOST_KEY: ssh_host_key or ssh.DEFAULT_SSH_HOST_KEY,
}
hub.spawn(ssh.SSH_CLI_CONTROLLER.start, **ssh_settings)
def _notify_peer_down(self, peer):
remote_ip = peer.ip_address
remote_as = peer.remote_as
if self._peer_down_handler:
self._peer_down_handler(remote_ip, remote_as)
def _notify_peer_up(self, peer):
remote_ip = peer.ip_address
remote_as = peer.remote_as
if self._peer_up_handler:
self._peer_up_handler(remote_ip, remote_as)
def _notify_best_path_changed(self, path, is_withdraw):
if (not path.source
or not isinstance(path, (Ipv4Path, Ipv6Path,
Vpnv4Path, Vpnv6Path, EvpnPath))):
return
ev = EventPrefix(path, is_withdraw)
if self._best_path_change_handler:
self._best_path_change_handler(ev)
def _init_signal_listeners(self):
CORE_MANAGER.get_core_service()._signal_bus.register_listener(
BgpSignalBus.BGP_BEST_PATH_CHANGED,
lambda _, info:
self._notify_best_path_changed(info['path'],
info['is_withdraw'])
)
CORE_MANAGER.get_core_service()._signal_bus.register_listener(
BgpSignalBus.BGP_ADJ_DOWN,
lambda _, info:
self._notify_peer_down(info['peer'])
)
CORE_MANAGER.get_core_service()._signal_bus.register_listener(
BgpSignalBus.BGP_ADJ_UP,
lambda _, info:
self._notify_peer_up(info['peer'])
)
def _core_start(self, settings):
waiter = hub.Event()
call('core.start', waiter=waiter, **settings)
waiter.wait()
def _serve_forever(self):
pass
def shutdown(self):
call('core.stop')
def neighbor_add(self, address, remote_as,
enable_ipv4=DEFAULT_CAP_MBGP_IPV4,
enable_ipv6=DEFAULT_CAP_MBGP_IPV6,
enable_vpnv4=DEFAULT_CAP_MBGP_VPNV4,
enable_vpnv6=DEFAULT_CAP_MBGP_VPNV6,
enable_evpn=DEFAULT_CAP_MBGP_EVPN,
enable_ipv4fs=DEFAULT_CAP_MBGP_IPV4FS,
enable_ipv6fs=DEFAULT_CAP_MBGP_IPV6FS,
enable_vpnv4fs=DEFAULT_CAP_MBGP_VPNV4FS,
enable_vpnv6fs=DEFAULT_CAP_MBGP_VPNV6FS,
enable_l2vpnfs=DEFAULT_CAP_MBGP_L2VPNFS,
enable_enhanced_refresh=DEFAULT_CAP_ENHANCED_REFRESH,
enable_four_octet_as_number=DEFAULT_CAP_FOUR_OCTET_AS_NUMBER,
next_hop=None, password=None, multi_exit_disc=None,
site_of_origins=None,
is_route_server_client=DEFAULT_IS_ROUTE_SERVER_CLIENT,
is_route_reflector_client=DEFAULT_IS_ROUTE_REFLECTOR_CLIENT,
is_next_hop_self=DEFAULT_IS_NEXT_HOP_SELF,
local_address=None,
local_port=None, local_as=None,
connect_mode=DEFAULT_CONNECT_MODE,
hold_time=DEFAULT_HOLD_TIME):
bgp_neighbor = {
neighbors.IP_ADDRESS: address,
neighbors.REMOTE_AS: remote_as,
PEER_NEXT_HOP: next_hop,
PASSWORD: password,
IS_ROUTE_SERVER_CLIENT: is_route_server_client,
IS_ROUTE_REFLECTOR_CLIENT: is_route_reflector_client,
IS_NEXT_HOP_SELF: is_next_hop_self,
CONNECT_MODE: connect_mode,
CAP_ENHANCED_REFRESH: enable_enhanced_refresh,
CAP_FOUR_OCTET_AS_NUMBER: enable_four_octet_as_number,
CAP_MBGP_IPV4: enable_ipv4,
CAP_MBGP_IPV6: enable_ipv6,
CAP_MBGP_VPNV4: enable_vpnv4,
CAP_MBGP_VPNV6: enable_vpnv6,
CAP_MBGP_EVPN: enable_evpn,
CAP_MBGP_IPV4FS: enable_ipv4fs,
CAP_MBGP_IPV6FS: enable_ipv6fs,
CAP_MBGP_VPNV4FS: enable_vpnv4fs,
CAP_MBGP_VPNV6FS: enable_vpnv6fs,
CAP_MBGP_L2VPNFS: enable_l2vpnfs,
HOLD_TIME: hold_time,
}
if multi_exit_disc:
bgp_neighbor[MULTI_EXIT_DISC] = multi_exit_disc
if site_of_origins:
bgp_neighbor[SITE_OF_ORIGINS] = site_of_origins
if local_address:
bgp_neighbor[LOCAL_ADDRESS] = local_address
if local_port:
bgp_neighbor[LOCAL_PORT] = local_port
if local_as:
bgp_neighbor[LOCAL_AS] = local_as
call('neighbor.create', **bgp_neighbor)
def neighbor_del(self, address):
bgp_neighbor = {
neighbors.IP_ADDRESS: address,
}
call('neighbor.delete', **bgp_neighbor)
def neighbor_reset(self, address):
bgp_neighbor = {
neighbors.IP_ADDRESS: address,
}
call('core.reset_neighbor', **bgp_neighbor)
def neighbor_update(self, address, conf_type, conf_value):
assert conf_type == MULTI_EXIT_DISC or conf_type == CONNECT_MODE
func_name = 'neighbor.update'
attribute_param = {}
if conf_type == MULTI_EXIT_DISC:
attribute_param = {neighbors.MULTI_EXIT_DISC: conf_value}
elif conf_type == CONNECT_MODE:
attribute_param = {neighbors.CONNECT_MODE: conf_value}
param = {neighbors.IP_ADDRESS: address,
neighbors.CHANGES: attribute_param}
call(func_name, **param)
def neighbor_state_get(self, address=None, format='json'):
show = {
'params': ['neighbor', 'summary'],
'format': format,
}
if address:
show['params'].append(address)
return call('operator.show', **show)
def prefix_add(self, prefix, next_hop=None, route_dist=None):
func_name = 'network.add'
networks = {
PREFIX: prefix,
}
if next_hop:
networks[NEXT_HOP] = next_hop
if route_dist:
func_name = 'prefix.add_local'
networks[ROUTE_DISTINGUISHER] = route_dist
rf, p = self._check_rf_and_normalize(prefix)
networks[ROUTE_FAMILY] = rf
networks[PREFIX] = p
if rf == vrfs.VRF_RF_IPV6 and netaddr.valid_ipv4(next_hop):
networks[NEXT_HOP] = \
str(netaddr.IPAddress(next_hop).ipv6())
return call(func_name, **networks)
def prefix_del(self, prefix, route_dist=None):
func_name = 'network.del'
networks = {
PREFIX: prefix,
}
if route_dist:
func_name = 'prefix.delete_local'
networks[ROUTE_DISTINGUISHER] = route_dist
rf, p = self._check_rf_and_normalize(prefix)
networks[ROUTE_FAMILY] = rf
networks[PREFIX] = p
call(func_name, **networks)
def evpn_prefix_add(self, route_type, route_dist, esi=0,
ethernet_tag_id=None, mac_addr=None, ip_addr=None,
ip_prefix=None, gw_ip_addr=None, vni=None,
next_hop=None, tunnel_type=None, pmsi_tunnel_type=None,
redundancy_mode=None):
func_name = 'evpn_prefix.add_local'
if not next_hop:
next_hop = '0.0.0.0'
kwargs = {EVPN_ROUTE_TYPE: route_type,
ROUTE_DISTINGUISHER: route_dist,
NEXT_HOP: next_hop}
if tunnel_type in [TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]:
kwargs[TUNNEL_TYPE] = tunnel_type
elif tunnel_type is not None:
raise ValueError('Unsupported tunnel type: %s' % tunnel_type)
if route_type == EVPN_ETH_AUTO_DISCOVERY:
kwargs.update({
EVPN_ESI: esi,
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
})
if vni is not None:
kwargs[EVPN_VNI] = vni
if redundancy_mode in [
REDUNDANCY_MODE_ALL_ACTIVE,
REDUNDANCY_MODE_SINGLE_ACTIVE]:
kwargs[REDUNDANCY_MODE] = redundancy_mode
elif redundancy_mode is not None:
raise ValueError('Unsupported Redundancy Mode: %s' %
redundancy_mode)
elif route_type == EVPN_MAC_IP_ADV_ROUTE:
kwargs.update({
EVPN_ESI: esi,
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
MAC_ADDR: mac_addr,
IP_ADDR: ip_addr,
})
if tunnel_type in [TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]:
kwargs[EVPN_VNI] = vni
elif route_type == EVPN_MULTICAST_ETAG_ROUTE:
kwargs.update({
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
IP_ADDR: ip_addr,
})
if tunnel_type in [TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]:
kwargs[EVPN_VNI] = vni
if pmsi_tunnel_type in [
PMSI_TYPE_NO_TUNNEL_INFO,
PMSI_TYPE_INGRESS_REP]:
kwargs[PMSI_TUNNEL_TYPE] = pmsi_tunnel_type
elif pmsi_tunnel_type is not None:
raise ValueError('Unsupported PMSI tunnel type: %s' %
pmsi_tunnel_type)
elif route_type == EVPN_ETH_SEGMENT:
kwargs.update({
EVPN_ESI: esi,
IP_ADDR: ip_addr,
})
elif route_type == EVPN_IP_PREFIX_ROUTE:
kwargs.update({
EVPN_ESI: esi,
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
IP_PREFIX: ip_prefix,
GW_IP_ADDR: gw_ip_addr,
})
if tunnel_type in [TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]:
kwargs[EVPN_VNI] = vni
else:
raise ValueError('Unsupported EVPN route type: %s' % route_type)
call(func_name, **kwargs)
def evpn_prefix_del(self, route_type, route_dist, esi=0,
ethernet_tag_id=None, mac_addr=None, ip_addr=None,
ip_prefix=None):
func_name = 'evpn_prefix.delete_local'
kwargs = {EVPN_ROUTE_TYPE: route_type,
ROUTE_DISTINGUISHER: route_dist}
if route_type == EVPN_ETH_AUTO_DISCOVERY:
kwargs.update({
EVPN_ESI: esi,
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
})
elif route_type == EVPN_MAC_IP_ADV_ROUTE:
kwargs.update({
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
MAC_ADDR: mac_addr,
IP_ADDR: ip_addr,
})
elif route_type == EVPN_MULTICAST_ETAG_ROUTE:
kwargs.update({
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
IP_ADDR: ip_addr,
})
elif route_type == EVPN_ETH_SEGMENT:
kwargs.update({
EVPN_ESI: esi,
IP_ADDR: ip_addr,
})
elif route_type == EVPN_IP_PREFIX_ROUTE:
kwargs.update({
EVPN_ETHERNET_TAG_ID: ethernet_tag_id,
IP_PREFIX: ip_prefix,
})
else:
raise ValueError('Unsupported EVPN route type: %s' % route_type)
call(func_name, **kwargs)
def flowspec_prefix_add(self, flowspec_family, rules, route_dist=None,
actions=None):
func_name = 'flowspec.add'
kwargs = {
FLOWSPEC_FAMILY: flowspec_family,
FLOWSPEC_RULES: rules,
FLOWSPEC_ACTIONS: actions or {},
}
if flowspec_family in [FLOWSPEC_FAMILY_VPNV4, FLOWSPEC_FAMILY_VPNV6,
FLOWSPEC_FAMILY_L2VPN]:
func_name = 'flowspec.add_local'
kwargs.update({ROUTE_DISTINGUISHER: route_dist})
call(func_name, **kwargs)
def flowspec_prefix_del(self, flowspec_family, rules, route_dist=None):
func_name = 'flowspec.del'
kwargs = {
FLOWSPEC_FAMILY: flowspec_family,
FLOWSPEC_RULES: rules,
}
if flowspec_family in [FLOWSPEC_FAMILY_VPNV4, FLOWSPEC_FAMILY_VPNV6,
FLOWSPEC_FAMILY_L2VPN]:
func_name = 'flowspec.del_local'
kwargs.update({ROUTE_DISTINGUISHER: route_dist})
call(func_name, **kwargs)
def vrf_add(self, route_dist, import_rts, export_rts, site_of_origins=None,
route_family=RF_VPN_V4, multi_exit_disc=None):
if route_family not in SUPPORTED_VRF_RF:
raise ValueError('Unsupported route_family: %s' % route_family)
vrf = {
vrfs.ROUTE_DISTINGUISHER: route_dist,
vrfs.IMPORT_RTS: import_rts,
vrfs.EXPORT_RTS: export_rts,
vrfs.SITE_OF_ORIGINS: site_of_origins,
vrfs.VRF_RF: route_family,
vrfs.MULTI_EXIT_DISC: multi_exit_disc,
}
call('vrf.create', **vrf)
def vrf_del(self, route_dist):
vrf = {vrfs.ROUTE_DISTINGUISHER: route_dist}
call('vrf.delete', **vrf)
def vrfs_get(self, subcommand='routes', route_dist=None,
route_family='all', format='json'):
show = {
'format': format,
}
if route_family in SUPPORTED_VRF_RF:
assert route_dist is not None
show['params'] = ['vrf', subcommand, route_dist, route_family]
else:
show['params'] = ['vrf', subcommand, 'all']
return call('operator.show', **show)
def rib_get(self, family='all', format='json'):
show = {
'params': ['rib', family],
'format': format
}
return call('operator.show', **show)
def neighbor_get(self, route_type, address, format='json'):
show = {
'format': format,
}
if route_type == 'sent-routes' or route_type == 'received-routes':
show['params'] = ['neighbor', route_type, address, 'all']
else:
show['params'] = ['neighbor', 'received-routes', address, 'all']
return call('operator.show', **show)
def neighbors_get(self, format='json'):
show = {
'params': ['neighbor'],
'format': format,
}
return call('operator.show', **show)
def _set_filter(self, filter_type, address, filters):
assert filter_type in ('in', 'out'), (
"filter type must be 'in' or 'out'")
assert all(isinstance(f, Filter) for f in filters), (
'all the items in filters must be an instance of Filter sub-class')
if filters is None:
filters = []
func_name = 'neighbor.' + filter_type + '_filter.set'
param = {
neighbors.IP_ADDRESS: address,
}
if filter_type == 'in':
param[neighbors.IN_FILTER] = filters
else:
param[neighbors.OUT_FILTER] = filters
call(func_name, **param)
def out_filter_set(self, address, filters):
self._set_filter('out', address, filters)
def out_filter_get(self, address):
func_name = 'neighbor.out_filter.get'
param = {
neighbors.IP_ADDRESS: address,
}
return call(func_name, **param)
def in_filter_set(self, address, filters):
self._set_filter('in', address, filters)
def in_filter_get(self, address):
func_name = 'neighbor.in_filter.get'
param = {
neighbors.IP_ADDRESS: address,
}
return call(func_name, **param)
def bmp_server_add(self, address, port):
func_name = 'bmp.start'
param = {
'host': address,
'port': port,
}
call(func_name, **param)
def bmp_server_del(self, address, port):
func_name = 'bmp.stop'
param = {
'host': address,
'port': port,
}
call(func_name, **param)
def attribute_map_set(self, address, attribute_maps,
route_dist=None, route_family=RF_VPN_V4):
if route_family not in SUPPORTED_VRF_RF:
raise ValueError('Unsupported route_family: %s' % route_family)
func_name = 'neighbor.attribute_map.set'
param = {
neighbors.IP_ADDRESS: address,
neighbors.ATTRIBUTE_MAP: attribute_maps,
}
if route_dist is not None:
param[vrfs.ROUTE_DISTINGUISHER] = route_dist
param[vrfs.VRF_RF] = route_family
call(func_name, **param)
def attribute_map_get(self, address, route_dist=None,
route_family=RF_VPN_V4):
if route_family not in SUPPORTED_VRF_RF:
raise ValueError('Unsupported route_family: %s' % route_family)
func_name = 'neighbor.attribute_map.get'
param = {
neighbors.IP_ADDRESS: address,
}
if route_dist is not None:
param[vrfs.ROUTE_DISTINGUISHER] = route_dist
param[vrfs.VRF_RF] = route_family
return call(func_name, **param)
@staticmethod
def _check_rf_and_normalize(prefix):
ip, masklen = prefix.split('/')
if netaddr.valid_ipv6(ip):
ipv6_prefix = str(netaddr.IPAddress(ip)) + '/' + masklen
return vrfs.VRF_RF_IPV6, ipv6_prefix
else:
return vrfs.VRF_RF_IPV4, prefix
| true
| true
|
1c4aa49b54346a99f2e75e366d65a02354ae6854
| 24,529
|
py
|
Python
|
neutron/agent/linux/ip_lib.py
|
insequent/neutron
|
2b1c4f121e3e8ba1c5eb2ba6661bf6326e1507c5
|
[
"Apache-2.0"
] | null | null | null |
neutron/agent/linux/ip_lib.py
|
insequent/neutron
|
2b1c4f121e3e8ba1c5eb2ba6661bf6326e1507c5
|
[
"Apache-2.0"
] | null | null | null |
neutron/agent/linux/ip_lib.py
|
insequent/neutron
|
2b1c4f121e3e8ba1c5eb2ba6661bf6326e1507c5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import netaddr
import os
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
OPTS = [
cfg.BoolOpt('ip_lib_force_root',
default=False,
help=_('Force ip_lib calls to use the root helper')),
]
LOOPBACK_DEVNAME = 'lo'
SYS_NET_PATH = '/sys/class/net'
class SubProcessBase(object):
def __init__(self, namespace=None,
log_fail_as_error=True):
self.namespace = namespace
self.log_fail_as_error = log_fail_as_error
try:
self.force_root = cfg.CONF.ip_lib_force_root
except cfg.NoSuchOptError:
# Only callers that need to force use of the root helper
# need to register the option.
self.force_root = False
def _run(self, options, command, args):
if self.namespace:
return self._as_root(options, command, args)
elif self.force_root:
# Force use of the root helper to ensure that commands
# will execute in dom0 when running under XenServer/XCP.
return self._execute(options, command, args, run_as_root=True,
log_fail_as_error=self.log_fail_as_error)
else:
return self._execute(options, command, args,
log_fail_as_error=self.log_fail_as_error)
def _as_root(self, options, command, args, use_root_namespace=False):
namespace = self.namespace if not use_root_namespace else None
return self._execute(options, command, args, run_as_root=True,
namespace=namespace,
log_fail_as_error=self.log_fail_as_error)
@classmethod
def _execute(cls, options, command, args, run_as_root=False,
namespace=None, log_fail_as_error=True):
opt_list = ['-%s' % o for o in options]
ip_cmd = add_namespace_to_cmd(['ip'], namespace)
cmd = ip_cmd + opt_list + [command] + list(args)
return utils.execute(cmd, run_as_root=run_as_root,
log_fail_as_error=log_fail_as_error)
def set_log_fail_as_error(self, fail_with_error):
self.log_fail_as_error = fail_with_error
class IPWrapper(SubProcessBase):
def __init__(self, namespace=None):
super(IPWrapper, self).__init__(namespace=namespace)
self.netns = IpNetnsCommand(self)
def device(self, name):
return IPDevice(name, namespace=self.namespace)
def get_devices(self, exclude_loopback=False):
retval = []
if self.namespace:
# we call out manually because in order to avoid screen scraping
# iproute2 we use find to see what is in the sysfs directory, as
# suggested by Stephen Hemminger (iproute2 dev).
output = utils.execute(['ip', 'netns', 'exec', self.namespace,
'find', SYS_NET_PATH, '-maxdepth', '1',
'-type', 'l', '-printf', '%f '],
run_as_root=True,
log_fail_as_error=self.log_fail_as_error
).split()
else:
output = (
i for i in os.listdir(SYS_NET_PATH)
if os.path.islink(os.path.join(SYS_NET_PATH, i))
)
for name in output:
if exclude_loopback and name == LOOPBACK_DEVNAME:
continue
retval.append(IPDevice(name, namespace=self.namespace))
return retval
def add_tuntap(self, name, mode='tap'):
self._as_root([], 'tuntap', ('add', name, 'mode', mode))
return IPDevice(name, namespace=self.namespace)
def add_veth(self, name1, name2, namespace2=None):
args = ['add', name1, 'type', 'veth', 'peer', 'name', name2]
if namespace2 is None:
namespace2 = self.namespace
else:
self.ensure_namespace(namespace2)
args += ['netns', namespace2]
self._as_root([], 'link', tuple(args))
return (IPDevice(name1, namespace=self.namespace),
IPDevice(name2, namespace=namespace2))
def del_veth(self, name):
"""Delete a virtual interface between two namespaces."""
self._as_root([], 'link', ('del', name))
def ensure_namespace(self, name):
if not self.netns.exists(name):
ip = self.netns.add(name)
lo = ip.device(LOOPBACK_DEVNAME)
lo.link.set_up()
else:
ip = IPWrapper(namespace=name)
return ip
def namespace_is_empty(self):
return not self.get_devices(exclude_loopback=True)
def garbage_collect_namespace(self):
"""Conditionally destroy the namespace if it is empty."""
if self.namespace and self.netns.exists(self.namespace):
if self.namespace_is_empty():
self.netns.delete(self.namespace)
return True
return False
def add_device_to_namespace(self, device):
if self.namespace:
device.link.set_netns(self.namespace)
def add_vxlan(self, name, vni, group=None, dev=None, ttl=None, tos=None,
local=None, port=None, proxy=False):
cmd = ['add', name, 'type', 'vxlan', 'id', vni]
if group:
cmd.extend(['group', group])
if dev:
cmd.extend(['dev', dev])
if ttl:
cmd.extend(['ttl', ttl])
if tos:
cmd.extend(['tos', tos])
if local:
cmd.extend(['local', local])
if proxy:
cmd.append('proxy')
# tuple: min,max
if port and len(port) == 2:
cmd.extend(['port', port[0], port[1]])
elif port:
raise exceptions.NetworkVxlanPortRangeError(vxlan_range=port)
self._as_root([], 'link', cmd)
return (IPDevice(name, namespace=self.namespace))
@classmethod
def get_namespaces(cls):
output = cls._execute([], 'netns', ('list',))
return [l.strip() for l in output.split('\n')]
class IPDevice(SubProcessBase):
def __init__(self, name, namespace=None):
super(IPDevice, self).__init__(namespace=namespace)
self.name = name
self.link = IpLinkCommand(self)
self.addr = IpAddrCommand(self)
self.route = IpRouteCommand(self)
self.neigh = IpNeighCommand(self)
def __eq__(self, other):
return (other is not None and self.name == other.name
and self.namespace == other.namespace)
def __str__(self):
return self.name
class IpCommandBase(object):
COMMAND = ''
def __init__(self, parent):
self._parent = parent
def _run(self, options, args):
return self._parent._run(options, self.COMMAND, args)
def _as_root(self, options, args, use_root_namespace=False):
return self._parent._as_root(options,
self.COMMAND,
args,
use_root_namespace=use_root_namespace)
class IPRule(SubProcessBase):
def __init__(self, namespace=None):
super(IPRule, self).__init__(namespace=namespace)
self.rule = IpRuleCommand(self)
class IpRuleCommand(IpCommandBase):
COMMAND = 'rule'
def _exists(self, ip, ip_version, table, rule_pr):
# Typical rule from 'ip rule show':
# 4030201: from 1.2.3.4/24 lookup 10203040
rule_pr = str(rule_pr) + ":"
for line in self._as_root([ip_version], ['show']).splitlines():
parts = line.split()
if parts and (parts[0] == rule_pr and
parts[2] == str(ip) and
parts[-1] == str(table)):
return True
return False
def add(self, ip, table, rule_pr):
ip_version = get_ip_version(ip)
if not self._exists(ip, ip_version, table, rule_pr):
args = ['add', 'from', ip, 'table', table, 'priority', rule_pr]
self._as_root([ip_version], tuple(args))
def delete(self, ip, table, rule_pr):
ip_version = get_ip_version(ip)
args = ['del', 'table', table, 'priority', rule_pr]
self._as_root([ip_version], tuple(args))
class IpDeviceCommandBase(IpCommandBase):
@property
def name(self):
return self._parent.name
class IpLinkCommand(IpDeviceCommandBase):
COMMAND = 'link'
def set_address(self, mac_address):
self._as_root([], ('set', self.name, 'address', mac_address))
def set_mtu(self, mtu_size):
self._as_root([], ('set', self.name, 'mtu', mtu_size))
def set_up(self):
self._as_root([], ('set', self.name, 'up'))
def set_down(self):
self._as_root([], ('set', self.name, 'down'))
def set_netns(self, namespace):
self._as_root([], ('set', self.name, 'netns', namespace))
self._parent.namespace = namespace
def set_name(self, name):
self._as_root([], ('set', self.name, 'name', name))
self._parent.name = name
def set_alias(self, alias_name):
self._as_root([], ('set', self.name, 'alias', alias_name))
def delete(self):
self._as_root([], ('delete', self.name))
@property
def address(self):
return self.attributes.get('link/ether')
@property
def state(self):
return self.attributes.get('state')
@property
def mtu(self):
return self.attributes.get('mtu')
@property
def qdisc(self):
return self.attributes.get('qdisc')
@property
def qlen(self):
return self.attributes.get('qlen')
@property
def alias(self):
return self.attributes.get('alias')
@property
def attributes(self):
return self._parse_line(self._run(['o'], ('show', self.name)))
def _parse_line(self, value):
if not value:
return {}
device_name, settings = value.replace("\\", '').split('>', 1)
tokens = settings.split()
keys = tokens[::2]
values = [int(v) if v.isdigit() else v for v in tokens[1::2]]
retval = dict(zip(keys, values))
return retval
class IpAddrCommand(IpDeviceCommandBase):
COMMAND = 'addr'
def add(self, cidr, scope='global'):
net = netaddr.IPNetwork(cidr)
args = ['add', cidr,
'scope', scope,
'dev', self.name]
if net.version == 4:
args += ['brd', str(net.broadcast)]
self._as_root([net.version], tuple(args))
def delete(self, cidr):
ip_version = get_ip_version(cidr)
self._as_root([ip_version],
('del', cidr,
'dev', self.name))
def flush(self, ip_version):
self._as_root([ip_version], ('flush', self.name))
def list(self, scope=None, to=None, filters=None, ip_version=None):
options = [ip_version] if ip_version else []
args = ['show', self.name]
if filters:
args += filters
retval = []
if scope:
args += ['scope', scope]
if to:
args += ['to', to]
for line in self._run(options, tuple(args)).split('\n'):
line = line.strip()
if not line.startswith('inet'):
continue
parts = line.split()
if parts[0] == 'inet6':
scope = parts[3]
else:
if parts[2] == 'brd':
scope = parts[5]
else:
scope = parts[3]
retval.append(dict(cidr=parts[1],
scope=scope,
dynamic=('dynamic' == parts[-1])))
return retval
class IpRouteCommand(IpDeviceCommandBase):
COMMAND = 'route'
def add_gateway(self, gateway, metric=None, table=None):
ip_version = get_ip_version(gateway)
args = ['replace', 'default', 'via', gateway]
if metric:
args += ['metric', metric]
args += ['dev', self.name]
if table:
args += ['table', table]
self._as_root([ip_version], tuple(args))
def delete_gateway(self, gateway, table=None):
ip_version = get_ip_version(gateway)
args = ['del', 'default',
'via', gateway,
'dev', self.name]
if table:
args += ['table', table]
self._as_root([ip_version], tuple(args))
def list_onlink_routes(self, ip_version):
def iterate_routes():
output = self._run([ip_version],
('list',
'dev', self.name,
'scope', 'link'))
for line in output.split('\n'):
line = line.strip()
if line and not line.count('src'):
yield line
return [x for x in iterate_routes()]
def add_onlink_route(self, cidr):
ip_version = get_ip_version(cidr)
self._as_root([ip_version],
('replace', cidr,
'dev', self.name,
'scope', 'link'))
def delete_onlink_route(self, cidr):
ip_version = get_ip_version(cidr)
self._as_root([ip_version],
('del', cidr,
'dev', self.name,
'scope', 'link'))
def get_gateway(self, scope=None, filters=None, ip_version=None):
options = [ip_version] if ip_version else []
args = ['list', 'dev', self.name]
if filters:
args += filters
retval = None
if scope:
args += ['scope', scope]
route_list_lines = self._run(options, tuple(args)).split('\n')
default_route_line = next((x.strip() for x in
route_list_lines if
x.strip().startswith('default')), None)
if default_route_line:
gateway_index = 2
parts = default_route_line.split()
retval = dict(gateway=parts[gateway_index])
if 'metric' in parts:
metric_index = parts.index('metric') + 1
retval.update(metric=int(parts[metric_index]))
return retval
def pullup_route(self, interface_name):
"""Ensures that the route entry for the interface is before all
others on the same subnet.
"""
device_list = []
device_route_list_lines = self._run([],
('list',
'proto', 'kernel',
'dev', interface_name)
).split('\n')
for device_route_line in device_route_list_lines:
try:
subnet = device_route_line.split()[0]
except Exception:
continue
subnet_route_list_lines = self._run([],
('list',
'proto', 'kernel',
'match', subnet)
).split('\n')
for subnet_route_line in subnet_route_list_lines:
i = iter(subnet_route_line.split())
while(i.next() != 'dev'):
pass
device = i.next()
try:
while(i.next() != 'src'):
pass
src = i.next()
except Exception:
src = ''
if device != interface_name:
device_list.append((device, src))
else:
break
for (device, src) in device_list:
self._as_root([], ('del', subnet, 'dev', device))
if (src != ''):
self._as_root([],
('append', subnet,
'proto', 'kernel',
'src', src,
'dev', device))
else:
self._as_root([],
('append', subnet,
'proto', 'kernel',
'dev', device))
def add_route(self, cidr, ip, table=None):
ip_version = get_ip_version(cidr)
args = ['replace', cidr, 'via', ip, 'dev', self.name]
if table:
args += ['table', table]
self._as_root([ip_version], tuple(args))
def delete_route(self, cidr, ip, table=None):
ip_version = get_ip_version(cidr)
args = ['del', cidr, 'via', ip, 'dev', self.name]
if table:
args += ['table', table]
self._as_root([ip_version], tuple(args))
class IpNeighCommand(IpDeviceCommandBase):
COMMAND = 'neigh'
def add(self, ip_address, mac_address):
ip_version = get_ip_version(ip_address)
self._as_root([ip_version],
('replace', ip_address,
'lladdr', mac_address,
'nud', 'permanent',
'dev', self.name))
def delete(self, ip_address, mac_address):
ip_version = get_ip_version(ip_address)
self._as_root([ip_version],
('del', ip_address,
'lladdr', mac_address,
'dev', self.name))
class IpNetnsCommand(IpCommandBase):
COMMAND = 'netns'
def add(self, name):
self._as_root([], ('add', name), use_root_namespace=True)
wrapper = IPWrapper(namespace=name)
wrapper.netns.execute(['sysctl', '-w',
'net.ipv4.conf.all.promote_secondaries=1'])
return wrapper
def delete(self, name):
self._as_root([], ('delete', name), use_root_namespace=True)
def execute(self, cmds, addl_env=None, check_exit_code=True,
extra_ok_codes=None):
ns_params = []
kwargs = {}
if self._parent.namespace:
kwargs['run_as_root'] = True
ns_params = ['ip', 'netns', 'exec', self._parent.namespace]
env_params = []
if addl_env:
env_params = (['env'] +
['%s=%s' % pair for pair in addl_env.items()])
cmd = ns_params + env_params + list(cmds)
return utils.execute(cmd, check_exit_code=check_exit_code,
extra_ok_codes=extra_ok_codes, **kwargs)
def exists(self, name):
output = self._parent._execute(
['o'], 'netns', ['list'],
run_as_root=cfg.CONF.AGENT.use_helper_for_ns_read)
for line in output.split('\n'):
if name == line.strip():
return True
return False
def device_exists(device_name, namespace=None):
"""Return True if the device exists in the namespace."""
try:
dev = IPDevice(device_name, namespace=namespace)
dev.set_log_fail_as_error(False)
address = dev.link.address
except RuntimeError:
return False
return bool(address)
def device_exists_with_ip_mac(device_name, ip_cidr, mac, namespace=None):
"""Return True if the device with the given IP and MAC addresses
exists in the namespace.
"""
try:
device = IPDevice(device_name, namespace=namespace)
if mac != device.link.address:
return False
if ip_cidr not in (ip['cidr'] for ip in device.addr.list()):
return False
except RuntimeError:
return False
else:
return True
def get_routing_table(namespace=None):
"""Return a list of dictionaries, each representing a route.
The dictionary format is: {'destination': cidr,
'nexthop': ip,
'device': device_name}
"""
ip_wrapper = IPWrapper(namespace=namespace)
table = ip_wrapper.netns.execute(['ip', 'route'], check_exit_code=True)
routes = []
# Example for route_lines:
# default via 192.168.3.120 dev wlp3s0 proto static metric 1024
# 10.0.0.0/8 dev tun0 proto static scope link metric 1024
# The first column is the destination, followed by key/value pairs.
# The generator splits the routing table by newline, then strips and splits
# each individual line.
route_lines = (line.split() for line in table.split('\n') if line.strip())
for route in route_lines:
network = route[0]
# Create a dict of key/value pairs (For example - 'dev': 'tun0')
# excluding the first column.
data = dict(route[i:i + 2] for i in range(1, len(route), 2))
routes.append({'destination': network,
'nexthop': data.get('via'),
'device': data.get('dev')})
return routes
def ensure_device_is_ready(device_name, namespace=None):
dev = IPDevice(device_name, namespace=namespace)
dev.set_log_fail_as_error(False)
try:
# Ensure the device is up, even if it is already up. If the device
# doesn't exist, a RuntimeError will be raised.
dev.link.set_up()
except RuntimeError:
return False
return True
def iproute_arg_supported(command, arg):
command += ['help']
stdout, stderr = utils.execute(command, check_exit_code=False,
return_stderr=True)
return any(arg in line for line in stderr.split('\n'))
def _arping(ns_name, iface_name, address, count):
# Pass -w to set timeout to ensure exit if interface removed while running
arping_cmd = ['arping', '-A', '-I', iface_name, '-c', count,
'-w', 1.5 * count, address]
try:
ip_wrapper = IPWrapper(namespace=ns_name)
ip_wrapper.netns.execute(arping_cmd, check_exit_code=True)
except Exception:
msg = _LE("Failed sending gratuitous ARP "
"to %(addr)s on %(iface)s in namespace %(ns)s")
LOG.exception(msg, {'addr': address,
'iface': iface_name,
'ns': ns_name})
def send_gratuitous_arp(ns_name, iface_name, address, count):
"""Send a gratuitous arp using given namespace, interface, and address."""
def arping():
_arping(ns_name, iface_name, address, count)
if count > 0:
eventlet.spawn_n(arping)
def send_garp_for_proxyarp(ns_name, iface_name, address, count):
"""
Send a gratuitous arp using given namespace, interface, and address
This version should be used when proxy arp is in use since the interface
won't actually have the address configured. We actually need to configure
the address on the interface and then remove it when the proxy arp has been
sent.
"""
def arping_with_temporary_address():
# Configure the address on the interface
device = IPDevice(iface_name, namespace=ns_name)
net = netaddr.IPNetwork(str(address))
device.addr.add(str(net))
_arping(ns_name, iface_name, address, count)
# Delete the address from the interface
device.addr.delete(str(net))
if count > 0:
eventlet.spawn_n(arping_with_temporary_address)
def add_namespace_to_cmd(cmd, namespace=None):
"""Add an optional namespace to the command."""
return ['ip', 'netns', 'exec', namespace] + cmd if namespace else cmd
def get_ip_version(ip_or_cidr):
return netaddr.IPNetwork(ip_or_cidr).version
| 34.068056
| 79
| 0.555669
|
import eventlet
import netaddr
import os
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
OPTS = [
cfg.BoolOpt('ip_lib_force_root',
default=False,
help=_('Force ip_lib calls to use the root helper')),
]
LOOPBACK_DEVNAME = 'lo'
SYS_NET_PATH = '/sys/class/net'
class SubProcessBase(object):
def __init__(self, namespace=None,
log_fail_as_error=True):
self.namespace = namespace
self.log_fail_as_error = log_fail_as_error
try:
self.force_root = cfg.CONF.ip_lib_force_root
except cfg.NoSuchOptError:
self.force_root = False
def _run(self, options, command, args):
if self.namespace:
return self._as_root(options, command, args)
elif self.force_root:
return self._execute(options, command, args, run_as_root=True,
log_fail_as_error=self.log_fail_as_error)
else:
return self._execute(options, command, args,
log_fail_as_error=self.log_fail_as_error)
def _as_root(self, options, command, args, use_root_namespace=False):
namespace = self.namespace if not use_root_namespace else None
return self._execute(options, command, args, run_as_root=True,
namespace=namespace,
log_fail_as_error=self.log_fail_as_error)
@classmethod
def _execute(cls, options, command, args, run_as_root=False,
namespace=None, log_fail_as_error=True):
opt_list = ['-%s' % o for o in options]
ip_cmd = add_namespace_to_cmd(['ip'], namespace)
cmd = ip_cmd + opt_list + [command] + list(args)
return utils.execute(cmd, run_as_root=run_as_root,
log_fail_as_error=log_fail_as_error)
def set_log_fail_as_error(self, fail_with_error):
self.log_fail_as_error = fail_with_error
class IPWrapper(SubProcessBase):
def __init__(self, namespace=None):
super(IPWrapper, self).__init__(namespace=namespace)
self.netns = IpNetnsCommand(self)
def device(self, name):
return IPDevice(name, namespace=self.namespace)
def get_devices(self, exclude_loopback=False):
retval = []
if self.namespace:
output = utils.execute(['ip', 'netns', 'exec', self.namespace,
'find', SYS_NET_PATH, '-maxdepth', '1',
'-type', 'l', '-printf', '%f '],
run_as_root=True,
log_fail_as_error=self.log_fail_as_error
).split()
else:
output = (
i for i in os.listdir(SYS_NET_PATH)
if os.path.islink(os.path.join(SYS_NET_PATH, i))
)
for name in output:
if exclude_loopback and name == LOOPBACK_DEVNAME:
continue
retval.append(IPDevice(name, namespace=self.namespace))
return retval
def add_tuntap(self, name, mode='tap'):
self._as_root([], 'tuntap', ('add', name, 'mode', mode))
return IPDevice(name, namespace=self.namespace)
def add_veth(self, name1, name2, namespace2=None):
args = ['add', name1, 'type', 'veth', 'peer', 'name', name2]
if namespace2 is None:
namespace2 = self.namespace
else:
self.ensure_namespace(namespace2)
args += ['netns', namespace2]
self._as_root([], 'link', tuple(args))
return (IPDevice(name1, namespace=self.namespace),
IPDevice(name2, namespace=namespace2))
def del_veth(self, name):
self._as_root([], 'link', ('del', name))
def ensure_namespace(self, name):
if not self.netns.exists(name):
ip = self.netns.add(name)
lo = ip.device(LOOPBACK_DEVNAME)
lo.link.set_up()
else:
ip = IPWrapper(namespace=name)
return ip
def namespace_is_empty(self):
return not self.get_devices(exclude_loopback=True)
def garbage_collect_namespace(self):
if self.namespace and self.netns.exists(self.namespace):
if self.namespace_is_empty():
self.netns.delete(self.namespace)
return True
return False
def add_device_to_namespace(self, device):
if self.namespace:
device.link.set_netns(self.namespace)
def add_vxlan(self, name, vni, group=None, dev=None, ttl=None, tos=None,
local=None, port=None, proxy=False):
cmd = ['add', name, 'type', 'vxlan', 'id', vni]
if group:
cmd.extend(['group', group])
if dev:
cmd.extend(['dev', dev])
if ttl:
cmd.extend(['ttl', ttl])
if tos:
cmd.extend(['tos', tos])
if local:
cmd.extend(['local', local])
if proxy:
cmd.append('proxy')
if port and len(port) == 2:
cmd.extend(['port', port[0], port[1]])
elif port:
raise exceptions.NetworkVxlanPortRangeError(vxlan_range=port)
self._as_root([], 'link', cmd)
return (IPDevice(name, namespace=self.namespace))
@classmethod
def get_namespaces(cls):
output = cls._execute([], 'netns', ('list',))
return [l.strip() for l in output.split('\n')]
class IPDevice(SubProcessBase):
def __init__(self, name, namespace=None):
super(IPDevice, self).__init__(namespace=namespace)
self.name = name
self.link = IpLinkCommand(self)
self.addr = IpAddrCommand(self)
self.route = IpRouteCommand(self)
self.neigh = IpNeighCommand(self)
def __eq__(self, other):
return (other is not None and self.name == other.name
and self.namespace == other.namespace)
def __str__(self):
return self.name
class IpCommandBase(object):
COMMAND = ''
def __init__(self, parent):
self._parent = parent
def _run(self, options, args):
return self._parent._run(options, self.COMMAND, args)
def _as_root(self, options, args, use_root_namespace=False):
return self._parent._as_root(options,
self.COMMAND,
args,
use_root_namespace=use_root_namespace)
class IPRule(SubProcessBase):
def __init__(self, namespace=None):
super(IPRule, self).__init__(namespace=namespace)
self.rule = IpRuleCommand(self)
class IpRuleCommand(IpCommandBase):
COMMAND = 'rule'
def _exists(self, ip, ip_version, table, rule_pr):
rule_pr = str(rule_pr) + ":"
for line in self._as_root([ip_version], ['show']).splitlines():
parts = line.split()
if parts and (parts[0] == rule_pr and
parts[2] == str(ip) and
parts[-1] == str(table)):
return True
return False
def add(self, ip, table, rule_pr):
ip_version = get_ip_version(ip)
if not self._exists(ip, ip_version, table, rule_pr):
args = ['add', 'from', ip, 'table', table, 'priority', rule_pr]
self._as_root([ip_version], tuple(args))
def delete(self, ip, table, rule_pr):
ip_version = get_ip_version(ip)
args = ['del', 'table', table, 'priority', rule_pr]
self._as_root([ip_version], tuple(args))
class IpDeviceCommandBase(IpCommandBase):
@property
def name(self):
return self._parent.name
class IpLinkCommand(IpDeviceCommandBase):
COMMAND = 'link'
def set_address(self, mac_address):
self._as_root([], ('set', self.name, 'address', mac_address))
def set_mtu(self, mtu_size):
self._as_root([], ('set', self.name, 'mtu', mtu_size))
def set_up(self):
self._as_root([], ('set', self.name, 'up'))
def set_down(self):
self._as_root([], ('set', self.name, 'down'))
def set_netns(self, namespace):
self._as_root([], ('set', self.name, 'netns', namespace))
self._parent.namespace = namespace
def set_name(self, name):
self._as_root([], ('set', self.name, 'name', name))
self._parent.name = name
def set_alias(self, alias_name):
self._as_root([], ('set', self.name, 'alias', alias_name))
def delete(self):
self._as_root([], ('delete', self.name))
@property
def address(self):
return self.attributes.get('link/ether')
@property
def state(self):
return self.attributes.get('state')
@property
def mtu(self):
return self.attributes.get('mtu')
@property
def qdisc(self):
return self.attributes.get('qdisc')
@property
def qlen(self):
return self.attributes.get('qlen')
@property
def alias(self):
return self.attributes.get('alias')
@property
def attributes(self):
return self._parse_line(self._run(['o'], ('show', self.name)))
def _parse_line(self, value):
if not value:
return {}
device_name, settings = value.replace("\\", '').split('>', 1)
tokens = settings.split()
keys = tokens[::2]
values = [int(v) if v.isdigit() else v for v in tokens[1::2]]
retval = dict(zip(keys, values))
return retval
class IpAddrCommand(IpDeviceCommandBase):
COMMAND = 'addr'
def add(self, cidr, scope='global'):
net = netaddr.IPNetwork(cidr)
args = ['add', cidr,
'scope', scope,
'dev', self.name]
if net.version == 4:
args += ['brd', str(net.broadcast)]
self._as_root([net.version], tuple(args))
def delete(self, cidr):
ip_version = get_ip_version(cidr)
self._as_root([ip_version],
('del', cidr,
'dev', self.name))
def flush(self, ip_version):
self._as_root([ip_version], ('flush', self.name))
def list(self, scope=None, to=None, filters=None, ip_version=None):
options = [ip_version] if ip_version else []
args = ['show', self.name]
if filters:
args += filters
retval = []
if scope:
args += ['scope', scope]
if to:
args += ['to', to]
for line in self._run(options, tuple(args)).split('\n'):
line = line.strip()
if not line.startswith('inet'):
continue
parts = line.split()
if parts[0] == 'inet6':
scope = parts[3]
else:
if parts[2] == 'brd':
scope = parts[5]
else:
scope = parts[3]
retval.append(dict(cidr=parts[1],
scope=scope,
dynamic=('dynamic' == parts[-1])))
return retval
class IpRouteCommand(IpDeviceCommandBase):
COMMAND = 'route'
def add_gateway(self, gateway, metric=None, table=None):
ip_version = get_ip_version(gateway)
args = ['replace', 'default', 'via', gateway]
if metric:
args += ['metric', metric]
args += ['dev', self.name]
if table:
args += ['table', table]
self._as_root([ip_version], tuple(args))
def delete_gateway(self, gateway, table=None):
ip_version = get_ip_version(gateway)
args = ['del', 'default',
'via', gateway,
'dev', self.name]
if table:
args += ['table', table]
self._as_root([ip_version], tuple(args))
def list_onlink_routes(self, ip_version):
def iterate_routes():
output = self._run([ip_version],
('list',
'dev', self.name,
'scope', 'link'))
for line in output.split('\n'):
line = line.strip()
if line and not line.count('src'):
yield line
return [x for x in iterate_routes()]
def add_onlink_route(self, cidr):
ip_version = get_ip_version(cidr)
self._as_root([ip_version],
('replace', cidr,
'dev', self.name,
'scope', 'link'))
def delete_onlink_route(self, cidr):
ip_version = get_ip_version(cidr)
self._as_root([ip_version],
('del', cidr,
'dev', self.name,
'scope', 'link'))
def get_gateway(self, scope=None, filters=None, ip_version=None):
options = [ip_version] if ip_version else []
args = ['list', 'dev', self.name]
if filters:
args += filters
retval = None
if scope:
args += ['scope', scope]
route_list_lines = self._run(options, tuple(args)).split('\n')
default_route_line = next((x.strip() for x in
route_list_lines if
x.strip().startswith('default')), None)
if default_route_line:
gateway_index = 2
parts = default_route_line.split()
retval = dict(gateway=parts[gateway_index])
if 'metric' in parts:
metric_index = parts.index('metric') + 1
retval.update(metric=int(parts[metric_index]))
return retval
def pullup_route(self, interface_name):
device_list = []
device_route_list_lines = self._run([],
('list',
'proto', 'kernel',
'dev', interface_name)
).split('\n')
for device_route_line in device_route_list_lines:
try:
subnet = device_route_line.split()[0]
except Exception:
continue
subnet_route_list_lines = self._run([],
('list',
'proto', 'kernel',
'match', subnet)
).split('\n')
for subnet_route_line in subnet_route_list_lines:
i = iter(subnet_route_line.split())
while(i.next() != 'dev'):
pass
device = i.next()
try:
while(i.next() != 'src'):
pass
src = i.next()
except Exception:
src = ''
if device != interface_name:
device_list.append((device, src))
else:
break
for (device, src) in device_list:
self._as_root([], ('del', subnet, 'dev', device))
if (src != ''):
self._as_root([],
('append', subnet,
'proto', 'kernel',
'src', src,
'dev', device))
else:
self._as_root([],
('append', subnet,
'proto', 'kernel',
'dev', device))
def add_route(self, cidr, ip, table=None):
ip_version = get_ip_version(cidr)
args = ['replace', cidr, 'via', ip, 'dev', self.name]
if table:
args += ['table', table]
self._as_root([ip_version], tuple(args))
def delete_route(self, cidr, ip, table=None):
ip_version = get_ip_version(cidr)
args = ['del', cidr, 'via', ip, 'dev', self.name]
if table:
args += ['table', table]
self._as_root([ip_version], tuple(args))
class IpNeighCommand(IpDeviceCommandBase):
COMMAND = 'neigh'
def add(self, ip_address, mac_address):
ip_version = get_ip_version(ip_address)
self._as_root([ip_version],
('replace', ip_address,
'lladdr', mac_address,
'nud', 'permanent',
'dev', self.name))
def delete(self, ip_address, mac_address):
ip_version = get_ip_version(ip_address)
self._as_root([ip_version],
('del', ip_address,
'lladdr', mac_address,
'dev', self.name))
class IpNetnsCommand(IpCommandBase):
COMMAND = 'netns'
def add(self, name):
self._as_root([], ('add', name), use_root_namespace=True)
wrapper = IPWrapper(namespace=name)
wrapper.netns.execute(['sysctl', '-w',
'net.ipv4.conf.all.promote_secondaries=1'])
return wrapper
def delete(self, name):
self._as_root([], ('delete', name), use_root_namespace=True)
def execute(self, cmds, addl_env=None, check_exit_code=True,
extra_ok_codes=None):
ns_params = []
kwargs = {}
if self._parent.namespace:
kwargs['run_as_root'] = True
ns_params = ['ip', 'netns', 'exec', self._parent.namespace]
env_params = []
if addl_env:
env_params = (['env'] +
['%s=%s' % pair for pair in addl_env.items()])
cmd = ns_params + env_params + list(cmds)
return utils.execute(cmd, check_exit_code=check_exit_code,
extra_ok_codes=extra_ok_codes, **kwargs)
def exists(self, name):
output = self._parent._execute(
['o'], 'netns', ['list'],
run_as_root=cfg.CONF.AGENT.use_helper_for_ns_read)
for line in output.split('\n'):
if name == line.strip():
return True
return False
def device_exists(device_name, namespace=None):
try:
dev = IPDevice(device_name, namespace=namespace)
dev.set_log_fail_as_error(False)
address = dev.link.address
except RuntimeError:
return False
return bool(address)
def device_exists_with_ip_mac(device_name, ip_cidr, mac, namespace=None):
try:
device = IPDevice(device_name, namespace=namespace)
if mac != device.link.address:
return False
if ip_cidr not in (ip['cidr'] for ip in device.addr.list()):
return False
except RuntimeError:
return False
else:
return True
def get_routing_table(namespace=None):
ip_wrapper = IPWrapper(namespace=namespace)
table = ip_wrapper.netns.execute(['ip', 'route'], check_exit_code=True)
routes = []
route_lines = (line.split() for line in table.split('\n') if line.strip())
for route in route_lines:
network = route[0]
data = dict(route[i:i + 2] for i in range(1, len(route), 2))
routes.append({'destination': network,
'nexthop': data.get('via'),
'device': data.get('dev')})
return routes
def ensure_device_is_ready(device_name, namespace=None):
dev = IPDevice(device_name, namespace=namespace)
dev.set_log_fail_as_error(False)
try:
dev.link.set_up()
except RuntimeError:
return False
return True
def iproute_arg_supported(command, arg):
command += ['help']
stdout, stderr = utils.execute(command, check_exit_code=False,
return_stderr=True)
return any(arg in line for line in stderr.split('\n'))
def _arping(ns_name, iface_name, address, count):
# Pass -w to set timeout to ensure exit if interface removed while running
arping_cmd = ['arping', '-A', '-I', iface_name, '-c', count,
'-w', 1.5 * count, address]
try:
ip_wrapper = IPWrapper(namespace=ns_name)
ip_wrapper.netns.execute(arping_cmd, check_exit_code=True)
except Exception:
msg = _LE("Failed sending gratuitous ARP "
"to %(addr)s on %(iface)s in namespace %(ns)s")
LOG.exception(msg, {'addr': address,
'iface': iface_name,
'ns': ns_name})
def send_gratuitous_arp(ns_name, iface_name, address, count):
def arping():
_arping(ns_name, iface_name, address, count)
if count > 0:
eventlet.spawn_n(arping)
def send_garp_for_proxyarp(ns_name, iface_name, address, count):
def arping_with_temporary_address():
# Configure the address on the interface
device = IPDevice(iface_name, namespace=ns_name)
net = netaddr.IPNetwork(str(address))
device.addr.add(str(net))
_arping(ns_name, iface_name, address, count)
# Delete the address from the interface
device.addr.delete(str(net))
if count > 0:
eventlet.spawn_n(arping_with_temporary_address)
def add_namespace_to_cmd(cmd, namespace=None):
return ['ip', 'netns', 'exec', namespace] + cmd if namespace else cmd
def get_ip_version(ip_or_cidr):
return netaddr.IPNetwork(ip_or_cidr).version
| true
| true
|
1c4aa4e22c898d243a62cb37a4da258b2d74fcfd
| 1,829
|
py
|
Python
|
imageRead/aluno/models.py
|
Sou-eu-Miguel/reply-card
|
d2b8716c02e593d627c6d88c7252c4abc897532b
|
[
"Apache-2.0"
] | 1
|
2018-12-12T21:36:10.000Z
|
2018-12-12T21:36:10.000Z
|
imageRead/aluno/models.py
|
LucasLimakxy/reply-card
|
d2b8716c02e593d627c6d88c7252c4abc897532b
|
[
"Apache-2.0"
] | 6
|
2021-02-02T22:56:52.000Z
|
2022-03-12T00:43:59.000Z
|
imageRead/aluno/models.py
|
Sou-eu-Miguel/reply-card
|
d2b8716c02e593d627c6d88c7252c4abc897532b
|
[
"Apache-2.0"
] | 1
|
2018-12-13T20:58:16.000Z
|
2018-12-13T20:58:16.000Z
|
from decimal import Decimal
from django.core.validators import MinValueValidator
from django.db import models
from django.urls import reverse
from ..turma.models import Turma, Sessao
# Create your models here.
class Aluno(models.Model):
nome = models.CharField('Nome', max_length=60)
matricula = models.CharField('matricula', max_length=11, unique=True)
turma = models.ForeignKey(Turma, on_delete=False, blank=True)
# Retorna o nome dos atributos
def __str__(self):
return self.matricula + ' - ' + self.nome
# Formatacao do nome da classe
class Meta:
verbose_name = 'Aluno'
verbose_name_plural = 'Alunos'
ordering = ['nome', 'matricula']
def get_absolute_url(self):
return reverse('aluno-turma-list', kwargs={'pk': self.pk})
class AlunoSessao(models.Model):
aluno = models.ForeignKey(Aluno, on_delete= models.CASCADE)
sessao = models.ForeignKey(Sessao, on_delete=models.CASCADE)
media = models.DecimalField('Média', decimal_places=2, max_digits=12, validators=[MinValueValidator(Decimal('0.01'))])
descricao_alterativas = models.CharField('Descrição de Alterativas', max_length=60, blank=True)
descricao_pontuacao = models.CharField('Descrição de Pontuação', max_length=60, blank=True)
# Retorna o nome dos atributos
def __str__(self):
return "{} - {}".format(self.aluno.nome, self.sessao.questionario.nome)
# Formatacao do nome da classe
class Meta:
verbose_name = 'AlunoSessao'
verbose_name_plural = 'AlunosSessao'
ordering = ['aluno', 'sessao','media']
def get_absolute_url(self):
return reverse('aluno-turma-list', kwargs={'pk': self.sessao.turma.pk})
| 38.104167
| 140
| 0.65883
|
from decimal import Decimal
from django.core.validators import MinValueValidator
from django.db import models
from django.urls import reverse
from ..turma.models import Turma, Sessao
class Aluno(models.Model):
nome = models.CharField('Nome', max_length=60)
matricula = models.CharField('matricula', max_length=11, unique=True)
turma = models.ForeignKey(Turma, on_delete=False, blank=True)
def __str__(self):
return self.matricula + ' - ' + self.nome
class Meta:
verbose_name = 'Aluno'
verbose_name_plural = 'Alunos'
ordering = ['nome', 'matricula']
def get_absolute_url(self):
return reverse('aluno-turma-list', kwargs={'pk': self.pk})
class AlunoSessao(models.Model):
aluno = models.ForeignKey(Aluno, on_delete= models.CASCADE)
sessao = models.ForeignKey(Sessao, on_delete=models.CASCADE)
media = models.DecimalField('Média', decimal_places=2, max_digits=12, validators=[MinValueValidator(Decimal('0.01'))])
descricao_alterativas = models.CharField('Descrição de Alterativas', max_length=60, blank=True)
descricao_pontuacao = models.CharField('Descrição de Pontuação', max_length=60, blank=True)
def __str__(self):
return "{} - {}".format(self.aluno.nome, self.sessao.questionario.nome)
class Meta:
verbose_name = 'AlunoSessao'
verbose_name_plural = 'AlunosSessao'
ordering = ['aluno', 'sessao','media']
def get_absolute_url(self):
return reverse('aluno-turma-list', kwargs={'pk': self.sessao.turma.pk})
| true
| true
|
1c4aa98aa4f83548da28cc3f7672a27cd6f68e46
| 137
|
py
|
Python
|
buildscripts/condarecipe/run_test.py
|
meawoppl/numba
|
bb8df0aee99133c6d52465ae9f9df2a7996339f3
|
[
"BSD-2-Clause"
] | null | null | null |
buildscripts/condarecipe/run_test.py
|
meawoppl/numba
|
bb8df0aee99133c6d52465ae9f9df2a7996339f3
|
[
"BSD-2-Clause"
] | null | null | null |
buildscripts/condarecipe/run_test.py
|
meawoppl/numba
|
bb8df0aee99133c6d52465ae9f9df2a7996339f3
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
import numba
if not numba.test():
print("Test failed")
sys.exit(1)
print('numba.__version__: %s' % numba.__version__)
| 19.571429
| 50
| 0.693431
|
import sys
import numba
if not numba.test():
print("Test failed")
sys.exit(1)
print('numba.__version__: %s' % numba.__version__)
| true
| true
|
1c4aa9eedb3b1c6e2e7a3e567eb7ad686eaa3237
| 95
|
py
|
Python
|
src/learndash/api_resources/__init__.py
|
MarkMacDon/learndash-python
|
a3fbfc45567a524b80c732d735f2ae101119f2e4
|
[
"MIT"
] | null | null | null |
src/learndash/api_resources/__init__.py
|
MarkMacDon/learndash-python
|
a3fbfc45567a524b80c732d735f2ae101119f2e4
|
[
"MIT"
] | 1
|
2021-05-06T19:01:24.000Z
|
2021-05-06T19:01:24.000Z
|
src/learndash/api_resources/__init__.py
|
MarkMacDon/learndash-python
|
a3fbfc45567a524b80c732d735f2ae101119f2e4
|
[
"MIT"
] | 2
|
2021-05-05T22:45:04.000Z
|
2021-07-24T08:47:02.000Z
|
from learndash.api_resources.course import Course
from learndash.api_resources.user import User
| 47.5
| 49
| 0.884211
|
from learndash.api_resources.course import Course
from learndash.api_resources.user import User
| true
| true
|
1c4aab8843fa67ec04a2e6b2e8f6b2aacade81f0
| 1,013
|
py
|
Python
|
test/test_allele_reads.py
|
carnivorouspeanut/isovar_comp
|
74fcc12ef52d08eb4cfa85bdcda8903970babbda
|
[
"Apache-2.0"
] | null | null | null |
test/test_allele_reads.py
|
carnivorouspeanut/isovar_comp
|
74fcc12ef52d08eb4cfa85bdcda8903970babbda
|
[
"Apache-2.0"
] | null | null | null |
test/test_allele_reads.py
|
carnivorouspeanut/isovar_comp
|
74fcc12ef52d08eb4cfa85bdcda8903970babbda
|
[
"Apache-2.0"
] | null | null | null |
from isovar.allele_reads import AlleleRead
from isovar.locus_reads import LocusRead
from nose.tools import eq_
def make_read_at_locus(prefix, alt, suffix, base_quality=30, name="dummy"):
dummy_sequence = prefix + alt + suffix
return LocusRead(
name="dummy",
sequence=dummy_sequence,
reference_positions=list(range(1, len(dummy_sequence) + 1)),
quality_scores=[base_quality] * len(dummy_sequence),
base0_read_position_before_variant=len(prefix) - 1,
base0_read_position_after_variant=len(prefix) + len(alt),
)
def test_allele_read_from_single_read_at_locus_trim_N_nucleotides():
read_at_locus = make_read_at_locus(prefix="NCCN", alt="A", suffix="TNNA")
allele_read = AlleleRead.from_locus_read(read_at_locus, n_ref=1)
print(allele_read)
expected = AlleleRead(prefix="", allele="A", suffix="T", name="dummy")
eq_(allele_read, expected)
if __name__ == "__main__":
test_allele_read_from_single_read_at_locus_trim_N_nucleotides()
| 38.961538
| 77
| 0.740375
|
from isovar.allele_reads import AlleleRead
from isovar.locus_reads import LocusRead
from nose.tools import eq_
def make_read_at_locus(prefix, alt, suffix, base_quality=30, name="dummy"):
dummy_sequence = prefix + alt + suffix
return LocusRead(
name="dummy",
sequence=dummy_sequence,
reference_positions=list(range(1, len(dummy_sequence) + 1)),
quality_scores=[base_quality] * len(dummy_sequence),
base0_read_position_before_variant=len(prefix) - 1,
base0_read_position_after_variant=len(prefix) + len(alt),
)
def test_allele_read_from_single_read_at_locus_trim_N_nucleotides():
read_at_locus = make_read_at_locus(prefix="NCCN", alt="A", suffix="TNNA")
allele_read = AlleleRead.from_locus_read(read_at_locus, n_ref=1)
print(allele_read)
expected = AlleleRead(prefix="", allele="A", suffix="T", name="dummy")
eq_(allele_read, expected)
if __name__ == "__main__":
test_allele_read_from_single_read_at_locus_trim_N_nucleotides()
| true
| true
|
1c4aad7d61342eb43c7ab5a6792d71b925c81eb8
| 4,918
|
py
|
Python
|
models/pointnet_cls_rot_transfer.py
|
OmidPoursaeed/Self_supervised_Learning_Point_Clouds
|
4f684cc761347f329eb967823f80522a8a3aedc0
|
[
"MIT"
] | 11
|
2020-12-16T16:27:36.000Z
|
2021-12-01T04:07:56.000Z
|
models/pointnet_cls_rot_transfer.py
|
OmidPoursaeed/Self_supervised_Learning_Point_Clouds
|
4f684cc761347f329eb967823f80522a8a3aedc0
|
[
"MIT"
] | 2
|
2021-02-09T11:35:01.000Z
|
2021-08-06T01:39:42.000Z
|
models/pointnet_cls_rot_transfer.py
|
OmidPoursaeed/Self_supervised_Learning_Point_Clouds
|
4f684cc761347f329eb967823f80522a8a3aedc0
|
[
"MIT"
] | 1
|
2021-08-05T14:07:51.000Z
|
2021-08-05T14:07:51.000Z
|
import tensorflow as tf
import numpy as np
import math
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
from transform_nets import input_transform_net, feature_transform_net
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, is_training_base, bn_decay=None, use_input_trans=True, use_feature_trans=True, num_classes=40):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
if use_input_trans:
with tf.variable_scope('transform_net1') as sc:
transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
point_cloud_transformed = tf.matmul(point_cloud, transform)
else:
point_cloud_transformed = point_cloud
input_image = tf.expand_dims(point_cloud_transformed, -1)
with tf.variable_scope('pointnet_cls_rotation'):
net = tf_util.conv2d(input_image, 64, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
if use_feature_trans:
with tf.variable_scope('transform_net2') as sc:
transform = feature_transform_net(net, is_training, bn_decay, K=64)
end_points['transform'] = transform
net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
net_transformed = tf.expand_dims(net_transformed, [2])
else:
net_transformed = net
with tf.variable_scope('pointnet_cls_rotation'):
net = tf_util.conv2d(net_transformed, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
# Symmetric function: max pooling
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
net = tf.reshape(net, [batch_size, -1])
# Retrained layers
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
scope='dp2')
net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training,
scope='transfer/fc3', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
scope='transfer/dp3')
net = tf_util.fully_connected(net, num_classes, activation_fn=None, scope='transfer/fc4')
return net, end_points
def get_loss(pred, label, end_points, reg_weight=0.001):
""" pred: B*NUM_CLASSES,
label: B, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
# Enforce the transformation as orthogonal matrix
# transform = end_points['transform'] # BxKxK
# K = transform.get_shape()[1].value
# mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1]))
# mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
# mat_diff_loss = tf.nn.l2_loss(mat_diff)
# tf.summary.scalar('mat loss', mat_diff_loss)
return classify_loss # + mat_diff_loss * reg_weight
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,3))
outputs = get_model(inputs, tf.constant(True))
print(outputs)
| 44.709091
| 135
| 0.614884
|
import tensorflow as tf
import numpy as np
import math
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
from transform_nets import input_transform_net, feature_transform_net
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, is_training_base, bn_decay=None, use_input_trans=True, use_feature_trans=True, num_classes=40):
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
if use_input_trans:
with tf.variable_scope('transform_net1') as sc:
transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
point_cloud_transformed = tf.matmul(point_cloud, transform)
else:
point_cloud_transformed = point_cloud
input_image = tf.expand_dims(point_cloud_transformed, -1)
with tf.variable_scope('pointnet_cls_rotation'):
net = tf_util.conv2d(input_image, 64, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
if use_feature_trans:
with tf.variable_scope('transform_net2') as sc:
transform = feature_transform_net(net, is_training, bn_decay, K=64)
end_points['transform'] = transform
net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
net_transformed = tf.expand_dims(net_transformed, [2])
else:
net_transformed = net
with tf.variable_scope('pointnet_cls_rotation'):
net = tf_util.conv2d(net_transformed, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
net = tf.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
scope='dp2')
net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training,
scope='transfer/fc3', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
scope='transfer/dp3')
net = tf_util.fully_connected(net, num_classes, activation_fn=None, scope='transfer/fc4')
return net, end_points
def get_loss(pred, label, end_points, reg_weight=0.001):
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
return classify_loss
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,3))
outputs = get_model(inputs, tf.constant(True))
print(outputs)
| true
| true
|
1c4aae5426137d7eb9f5b72856c25b9d57d53549
| 5,219
|
py
|
Python
|
seaborn/algorithms.py
|
jwillis0720/seaborn
|
0dc93d01c78370e91ebdf72c888719fbbc6d1085
|
[
"BSD-3-Clause"
] | null | null | null |
seaborn/algorithms.py
|
jwillis0720/seaborn
|
0dc93d01c78370e91ebdf72c888719fbbc6d1085
|
[
"BSD-3-Clause"
] | null | null | null |
seaborn/algorithms.py
|
jwillis0720/seaborn
|
0dc93d01c78370e91ebdf72c888719fbbc6d1085
|
[
"BSD-3-Clause"
] | null | null | null |
"""Algorithms to support fitting routines in seaborn plotting functions."""
import numbers
import numpy as np
import warnings
from math import sqrt
def wls_confidence_interval(data, z=1.96):
"""Calculate the Wilson score confidence interval for a data set.
data : array of 1-dimensional data, 1's or 0's
z : float, z-score default=1.96 for a 95% confidence interval
"""
n = len(data)
# counts the number of 1 or Trues over false or 0
p = len([i for i in data if i]) / n
denominator = 1 + z ** 2 / n
centre_adjusted_probability = p + z * z / (2 * n)
adjusted_standard_deviation = sqrt((p * (1 - p) + z * z / (4 * n)) / n)
lower_bound = (centre_adjusted_probability - z * adjusted_standard_deviation) / denominator
upper_bound = (centre_adjusted_probability + z * adjusted_standard_deviation) / denominator
return (lower_bound, upper_bound)
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default 10000
Number of iterations
axis : int, default None
Will pass axis to ``func`` as a keyword argument.
units : array, default None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
func : string or callable, default np.mean
Function to call on the args that are passed in. If string, tries
to use as named method on numpy array.
seed : Generator | SeedSequence | RandomState | int | None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", np.mean)
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
random_seed = kwargs.get("random_seed", None)
if random_seed is not None:
msg = "`random_seed` has been renamed to `seed` and will be removed"
warnings.warn(msg)
seed = kwargs.get("seed", random_seed)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rng = _handle_random_seed(seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
# Allow for a function that is the name of a method on an array
if isinstance(func, str):
def f(x):
return getattr(x, func)()
else:
f = func
# Handle numpy changes
try:
integers = rng.integers
except AttributeError:
integers = rng.randint
# Do the bootstrap
if units is not None:
return _structured_bootstrap(args, n_boot, units, f, func_kwargs, integers)
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n, n, dtype=np.intp) # intp is indexing dtype
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(f(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, integers):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n_units, n_units, dtype=np.intp)
sample = [[a[i] for i in resampler] for a in args]
lengths = map(len, sample[0])
resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)] for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _handle_random_seed(seed=None):
"""Given a seed in one of many formats, return a random number generator.
Generalizes across the numpy 1.17 changes, preferring newer functionality.
"""
if isinstance(seed, np.random.RandomState):
rng = seed
else:
try:
# General interface for seeding on numpy >= 1.17
rng = np.random.default_rng(seed)
except AttributeError:
# We are on numpy < 1.17, handle options ourselves
if isinstance(seed, (numbers.Integral, np.integer)):
rng = np.random.RandomState(seed)
elif seed is None:
rng = np.random.RandomState()
else:
err = "{} cannot be used to seed the randomn number generator"
raise ValueError(err.format(seed))
return rng
| 34.335526
| 95
| 0.634796
|
import numbers
import numpy as np
import warnings
from math import sqrt
def wls_confidence_interval(data, z=1.96):
n = len(data)
p = len([i for i in data if i]) / n
denominator = 1 + z ** 2 / n
centre_adjusted_probability = p + z * z / (2 * n)
adjusted_standard_deviation = sqrt((p * (1 - p) + z * z / (4 * n)) / n)
lower_bound = (centre_adjusted_probability - z * adjusted_standard_deviation) / denominator
upper_bound = (centre_adjusted_probability + z * adjusted_standard_deviation) / denominator
return (lower_bound, upper_bound)
def bootstrap(*args, **kwargs):
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", np.mean)
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
random_seed = kwargs.get("random_seed", None)
if random_seed is not None:
msg = "`random_seed` has been renamed to `seed` and will be removed"
warnings.warn(msg)
seed = kwargs.get("seed", random_seed)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
rng = _handle_random_seed(seed)
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
if isinstance(func, str):
def f(x):
return getattr(x, func)()
else:
f = func
try:
integers = rng.integers
except AttributeError:
integers = rng.randint
if units is not None:
return _structured_bootstrap(args, n_boot, units, f, func_kwargs, integers)
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n, n, dtype=np.intp)
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(f(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, integers):
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n_units, n_units, dtype=np.intp)
sample = [[a[i] for i in resampler] for a in args]
lengths = map(len, sample[0])
resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)] for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _handle_random_seed(seed=None):
if isinstance(seed, np.random.RandomState):
rng = seed
else:
try:
rng = np.random.default_rng(seed)
except AttributeError:
if isinstance(seed, (numbers.Integral, np.integer)):
rng = np.random.RandomState(seed)
elif seed is None:
rng = np.random.RandomState()
else:
err = "{} cannot be used to seed the randomn number generator"
raise ValueError(err.format(seed))
return rng
| true
| true
|
1c4aaf705158fbe0e91ecb297f20bdbdacfd197c
| 95
|
py
|
Python
|
siga/prospeccao/apps.py
|
JenniferAmaral/DjangoSpike
|
768237bb0f3cffe7bbdbcab38a8bae6faa78e495
|
[
"Apache-2.0"
] | null | null | null |
siga/prospeccao/apps.py
|
JenniferAmaral/DjangoSpike
|
768237bb0f3cffe7bbdbcab38a8bae6faa78e495
|
[
"Apache-2.0"
] | 2
|
2021-03-19T03:21:17.000Z
|
2021-03-30T13:24:07.000Z
|
siga/prospeccao/apps.py
|
JenniferAmaral/DjangoSpike
|
768237bb0f3cffe7bbdbcab38a8bae6faa78e495
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class ProspeccaoConfig(AppConfig):
name = 'prospeccao'
| 15.833333
| 34
| 0.768421
|
from django.apps import AppConfig
class ProspeccaoConfig(AppConfig):
name = 'prospeccao'
| true
| true
|
1c4aaf84e496d0feb65c92950f3a798d787c4fe0
| 4,776
|
py
|
Python
|
app/storage.py
|
JB-Tellez/flask-hello-world
|
7fce8dea357a847c90bce095b2bfc43036903e4d
|
[
"MIT"
] | null | null | null |
app/storage.py
|
JB-Tellez/flask-hello-world
|
7fce8dea357a847c90bce095b2bfc43036903e4d
|
[
"MIT"
] | null | null | null |
app/storage.py
|
JB-Tellez/flask-hello-world
|
7fce8dea357a847c90bce095b2bfc43036903e4d
|
[
"MIT"
] | null | null | null |
from flask import Flask, jsonify, request
from flask_cors import CORS
from os import environ
import requests
from datetime import datetime
def get_location():
query = request.args.get('data')
api_key = environ.get('GEOCODE_API_KEY')
URL = f'https://maps.googleapis.com/maps/api/geocode/json?address={query}&key={api_key}'
print('URL', URL)
locations = requests.get(URL).json()
print('locations', locations)
location = Location(query, locations['results'][0])
return jsonify(location.serialize())
class Location:
def __init__(self, query, info):
self.search_query = query
self.formatted_query = info['formatted_address']
self.latitude = info['geometry']['location']['lat']
self.longitude = info['geometry']['location']['lng']
def serialize(self):
return vars(self)
def get_weather():
api_key = environ.get('WEATHER_API_KEY')
latitude = request.args['data[latitude]']
longitude = request.args['data[longitude]']
url = f'https://api.darksky.net/forecast/{api_key}/{latitude},{longitude}'
forecasts = requests.get(url).json()
dailies = [Forecast(daily).serialize()
for daily in forecasts['daily']['data']]
return jsonify(dailies)
class Forecast:
def __init__(self, info):
self.forecast = info['summary']
epoch_seconds = int(info['time'])
self.time = datetime.utcfromtimestamp(
epoch_seconds).strftime("%A %B %d, %Y")
def serialize(self):
return vars(self)
def get_events():
api_key = environ.get('EVENTBRITE_API_KEY')
address = request.args['data[formatted_query]']
url = f'https://www.eventbriteapi.com/v3/events/search?token={api_key}&location.address={address}'
event_data = requests.get(url).json()
events = [Event(eventInfo).serialize()
for eventInfo in event_data['events']]
return jsonify(events)
class Event:
def __init__(self, info):
self.link = info['url']
self.name = info['name']['text']
self.event_date = datetime.fromisoformat(
info['start']['local']).strftime("%A %B %d, %Y")
self.summary = info['summary']
def serialize(self):
return vars(self)
def get_yelps():
auth_token = environ.get('YELP_API_KEY')
location = request.args['data[search_query]']
url = f'https://api.yelp.com/v3/businesses/search?location={location}'
hed = {'Authorization': 'Bearer ' + auth_token}
api_data = requests.get(url, headers=hed).json()
yelps = [Yelp(business).serialize() for business in api_data['businesses']]
return jsonify(yelps)
class Yelp:
"""
"""
def __init__(self, info):
self.name = info['name']
self.image_url = info['image_url']
self.rating = info['rating']
self.url = info['url']
def serialize(self):
return vars(self)
def get_movies():
api_key = environ.get('MOVIE_API_KEY')
query = request.args['data[search_query]']
url = f'https://api.themoviedb.org/3/search/movie/?api_key={api_key}&language=en-US&page=1&query={query}'
api_data = requests.get(url).json()
movies = [Movie(info).serialize() for info in api_data['results']]
return jsonify(movies)
class Movie:
"""
"""
def __init__(self, info):
self.title = info['title']
self.overview = info['overview']
self.average_votes = info['vote_average']
self.total_votes = info['vote_count']
self.popularity = info['popularity']
self.released_on = info['release_date']
self.image_url = 'https://image.tmdb.org/t/p/w500' + \
(info['poster_path'] or '')
def serialize(self):
return vars(self)
def get_trails():
api_key = environ.get('TRAIL_API_KEY')
latitude = request.args['data[latitude]']
longitude = request.args['data[longitude]']
url = f'https://www.hikingproject.com/data/get-trails?lat={latitude}&lon={longitude}&maxDistance=200&key={api_key}'
trail_data = requests.get(url).json()
trails = [Trail(trail_info).serialize()
for trail_info in trail_data['trails']]
return jsonify(trails)
class Trail:
"""
"""
def __init__(self, info):
self.name = info['name']
self.location = info['location']
self.length = info['length']
self.stars = info['stars']
self.star_votes = info['starVotes']
self.summary = info['summary']
self.trail_url = info['url']
self.conditions = info['conditionDetails']
self.condition_date = info['conditionDate'][0:10]
self.condition_time = info['conditionDate'][12:]
def serialize(self):
return vars(self)
| 24.618557
| 119
| 0.629606
|
from flask import Flask, jsonify, request
from flask_cors import CORS
from os import environ
import requests
from datetime import datetime
def get_location():
query = request.args.get('data')
api_key = environ.get('GEOCODE_API_KEY')
URL = f'https://maps.googleapis.com/maps/api/geocode/json?address={query}&key={api_key}'
print('URL', URL)
locations = requests.get(URL).json()
print('locations', locations)
location = Location(query, locations['results'][0])
return jsonify(location.serialize())
class Location:
def __init__(self, query, info):
self.search_query = query
self.formatted_query = info['formatted_address']
self.latitude = info['geometry']['location']['lat']
self.longitude = info['geometry']['location']['lng']
def serialize(self):
return vars(self)
def get_weather():
api_key = environ.get('WEATHER_API_KEY')
latitude = request.args['data[latitude]']
longitude = request.args['data[longitude]']
url = f'https://api.darksky.net/forecast/{api_key}/{latitude},{longitude}'
forecasts = requests.get(url).json()
dailies = [Forecast(daily).serialize()
for daily in forecasts['daily']['data']]
return jsonify(dailies)
class Forecast:
def __init__(self, info):
self.forecast = info['summary']
epoch_seconds = int(info['time'])
self.time = datetime.utcfromtimestamp(
epoch_seconds).strftime("%A %B %d, %Y")
def serialize(self):
return vars(self)
def get_events():
api_key = environ.get('EVENTBRITE_API_KEY')
address = request.args['data[formatted_query]']
url = f'https://www.eventbriteapi.com/v3/events/search?token={api_key}&location.address={address}'
event_data = requests.get(url).json()
events = [Event(eventInfo).serialize()
for eventInfo in event_data['events']]
return jsonify(events)
class Event:
def __init__(self, info):
self.link = info['url']
self.name = info['name']['text']
self.event_date = datetime.fromisoformat(
info['start']['local']).strftime("%A %B %d, %Y")
self.summary = info['summary']
def serialize(self):
return vars(self)
def get_yelps():
auth_token = environ.get('YELP_API_KEY')
location = request.args['data[search_query]']
url = f'https://api.yelp.com/v3/businesses/search?location={location}'
hed = {'Authorization': 'Bearer ' + auth_token}
api_data = requests.get(url, headers=hed).json()
yelps = [Yelp(business).serialize() for business in api_data['businesses']]
return jsonify(yelps)
class Yelp:
def __init__(self, info):
self.name = info['name']
self.image_url = info['image_url']
self.rating = info['rating']
self.url = info['url']
def serialize(self):
return vars(self)
def get_movies():
api_key = environ.get('MOVIE_API_KEY')
query = request.args['data[search_query]']
url = f'https://api.themoviedb.org/3/search/movie/?api_key={api_key}&language=en-US&page=1&query={query}'
api_data = requests.get(url).json()
movies = [Movie(info).serialize() for info in api_data['results']]
return jsonify(movies)
class Movie:
def __init__(self, info):
self.title = info['title']
self.overview = info['overview']
self.average_votes = info['vote_average']
self.total_votes = info['vote_count']
self.popularity = info['popularity']
self.released_on = info['release_date']
self.image_url = 'https://image.tmdb.org/t/p/w500' + \
(info['poster_path'] or '')
def serialize(self):
return vars(self)
def get_trails():
api_key = environ.get('TRAIL_API_KEY')
latitude = request.args['data[latitude]']
longitude = request.args['data[longitude]']
url = f'https://www.hikingproject.com/data/get-trails?lat={latitude}&lon={longitude}&maxDistance=200&key={api_key}'
trail_data = requests.get(url).json()
trails = [Trail(trail_info).serialize()
for trail_info in trail_data['trails']]
return jsonify(trails)
class Trail:
def __init__(self, info):
self.name = info['name']
self.location = info['location']
self.length = info['length']
self.stars = info['stars']
self.star_votes = info['starVotes']
self.summary = info['summary']
self.trail_url = info['url']
self.conditions = info['conditionDetails']
self.condition_date = info['conditionDate'][0:10]
self.condition_time = info['conditionDate'][12:]
def serialize(self):
return vars(self)
| true
| true
|
1c4ab0a1b89ee3ce8f7c20af7b3a0cf0e50ea511
| 1,530
|
py
|
Python
|
server/models/portfolio/risk.py
|
lluo5779/Robo-Adviser
|
43aa4b73bfc96e55ed664328330a930975596124
|
[
"MIT"
] | null | null | null |
server/models/portfolio/risk.py
|
lluo5779/Robo-Adviser
|
43aa4b73bfc96e55ed664328330a930975596124
|
[
"MIT"
] | 3
|
2021-03-31T19:24:03.000Z
|
2021-12-13T20:26:39.000Z
|
server/models/portfolio/risk.py
|
lluo5779/Robo-Adviser
|
43aa4b73bfc96e55ed664328330a930975596124
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
def risk_prefs(horizon, aversion, cardinal, return_target, l, mu_bl1, mu_bl2, cov_bl1):
if horizon is None:
horizon = 10
alpha = 0.05
safe_target = float(((mu_bl1 + mu_bl2) / 2).mean())
# set the variances for the first period estimates
vars = pd.DataFrame(np.diag(cov_bl1), index=cov_bl1.index)
risk_mul, turn_mul = l, 1
if horizon <= 1:
# select the 12 assets with the lowest variances
risk_mul *= 2
turn_mul *= 0.25
alpha = 0.20
elif horizon <= 5:
risk_mul *= 0.75
turn_mul *= 1
alpha = 0.10
else:
risk_mul *= 0.25
turn_mul *= 2
print("RISK PREFERENCES\n\n\n")
if return_target > safe_target:
risk_mul *= 0.5
if aversion == 1:
cardinality = list(np.where(mu_bl1.rank() > len(mu_bl1) - cardinal, 1, 0).ravel())
exposures = (0.02, 0.30)
elif aversion == 2:
cardinality = list(np.where(pd.DataFrame(np.divide(mu_bl1.values, vars.values).ravel()).rank() > len(mu_bl1) - cardinal, 1, 0).ravel())
exposures = (0.04, 0.20)
else:
# NO SINGLE NAME STOCKS
vars = pd.DataFrame(np.diag(cov_bl1.iloc[:-10, :-10]), index=mu_bl1[:-10].index)
cardinality = list(np.where(vars.rank(ascending=True) > (len(mu_bl1[:-10])- cardinal), 1, 0).ravel()) + [0]*10
exposures = (0.05, 0.15)
risk_mul *= aversion
return (alpha, alpha*1.02), (risk_mul, turn_mul), exposures, cardinality
| 28.333333
| 143
| 0.594118
|
import numpy as np
import pandas as pd
def risk_prefs(horizon, aversion, cardinal, return_target, l, mu_bl1, mu_bl2, cov_bl1):
if horizon is None:
horizon = 10
alpha = 0.05
safe_target = float(((mu_bl1 + mu_bl2) / 2).mean())
vars = pd.DataFrame(np.diag(cov_bl1), index=cov_bl1.index)
risk_mul, turn_mul = l, 1
if horizon <= 1:
risk_mul *= 2
turn_mul *= 0.25
alpha = 0.20
elif horizon <= 5:
risk_mul *= 0.75
turn_mul *= 1
alpha = 0.10
else:
risk_mul *= 0.25
turn_mul *= 2
print("RISK PREFERENCES\n\n\n")
if return_target > safe_target:
risk_mul *= 0.5
if aversion == 1:
cardinality = list(np.where(mu_bl1.rank() > len(mu_bl1) - cardinal, 1, 0).ravel())
exposures = (0.02, 0.30)
elif aversion == 2:
cardinality = list(np.where(pd.DataFrame(np.divide(mu_bl1.values, vars.values).ravel()).rank() > len(mu_bl1) - cardinal, 1, 0).ravel())
exposures = (0.04, 0.20)
else:
vars = pd.DataFrame(np.diag(cov_bl1.iloc[:-10, :-10]), index=mu_bl1[:-10].index)
cardinality = list(np.where(vars.rank(ascending=True) > (len(mu_bl1[:-10])- cardinal), 1, 0).ravel()) + [0]*10
exposures = (0.05, 0.15)
risk_mul *= aversion
return (alpha, alpha*1.02), (risk_mul, turn_mul), exposures, cardinality
| true
| true
|
1c4ab187d643a0593fe5cdf9d597191769ebd87f
| 2,243
|
py
|
Python
|
scripts/recalc_afacts.py
|
EvictionLab/eviction-lab-etl
|
d94a7e52de8890c9371518b5020d1a6aa3a5fc2e
|
[
"MIT"
] | 9
|
2018-04-07T17:52:49.000Z
|
2020-07-06T01:52:21.000Z
|
scripts/recalc_afacts.py
|
EvictionLab/eviction-lab-etl
|
d94a7e52de8890c9371518b5020d1a6aa3a5fc2e
|
[
"MIT"
] | 56
|
2017-09-11T21:19:13.000Z
|
2020-01-06T18:57:23.000Z
|
scripts/recalc_afacts.py
|
EvictionLab/eviction-lab-etl
|
d94a7e52de8890c9371518b5020d1a6aa3a5fc2e
|
[
"MIT"
] | 1
|
2019-11-04T18:56:45.000Z
|
2019-11-04T18:56:45.000Z
|
"""
Recalculates allocation factors for a given geography level and geographic
correspondence file.
Arguments
----------
argv[1] : str
The geography level to create weights for (block-groups or tracts)
argv[2] : str
The file path to the geography correspondence file
generated from http://mcdc.missouri.edu/applications/geocorr2000.html
Outputs
-------
str
a string of CSV data containing the weights
Output has header: GEOID00,pop2k,afact
"""
import sys
import pandas as pd
if __name__ == '__main__':
# load provided csv files into dataframes
geocorr_df = pd.read_csv(
sys.argv[2],
dtype={
'county': 'object',
'tract': 'object',
'bg': 'object',
'block': 'object',
'pop2k': 'float64'
})
# combine geography levels in the 2000 geo correspondence file to create
# block level GEOIDs for all entries
geocorr_df['GEOID00'] = (
geocorr_df['county'] + geocorr_df['tract'].str.replace(
'.', '') + geocorr_df['block'])
# Create GEOID for the provided geography level (tracts or block groups)
if sys.argv[1] == 'tracts':
geocorr_df['GEOID'] = (
geocorr_df['county'] + geocorr_df['tract'].str.replace(
'.', ''))
# Slice the last 4 characters off of block GEOID to get tract GEOID
geoid_slice = -4
elif sys.argv[1] == 'block-groups':
geocorr_df['GEOID'] = (
geocorr_df['county'] + geocorr_df['tract'].str.replace(
'.', '') + geocorr_df['bg'])
# Slice the last 3 characters off of block GEOID to get block group GEOID
geoid_slice = -3
else:
raise ValueError('Invalid geography string supplied')
# recalculate allocation factors
pop2k_totals = pd.DataFrame(geocorr_df.groupby('GEOID')['pop2k'].sum()).reset_index()
pop2k_totals.rename(columns={'pop2k': 'total_pop_00'}, inplace=True)
geocorr_df = geocorr_df.merge(pop2k_totals, on='GEOID', how='left')
del pop2k_totals
geocorr_df['afact'] = geocorr_df['pop2k'] / geocorr_df['total_pop_00']
output_df = geocorr_df[['GEOID00', 'pop2k', 'afact']].copy()
output_df.to_csv(sys.stdout, index=False)
| 32.042857
| 89
| 0.628622
|
import sys
import pandas as pd
if __name__ == '__main__':
geocorr_df = pd.read_csv(
sys.argv[2],
dtype={
'county': 'object',
'tract': 'object',
'bg': 'object',
'block': 'object',
'pop2k': 'float64'
})
geocorr_df['GEOID00'] = (
geocorr_df['county'] + geocorr_df['tract'].str.replace(
'.', '') + geocorr_df['block'])
if sys.argv[1] == 'tracts':
geocorr_df['GEOID'] = (
geocorr_df['county'] + geocorr_df['tract'].str.replace(
'.', ''))
geoid_slice = -4
elif sys.argv[1] == 'block-groups':
geocorr_df['GEOID'] = (
geocorr_df['county'] + geocorr_df['tract'].str.replace(
'.', '') + geocorr_df['bg'])
geoid_slice = -3
else:
raise ValueError('Invalid geography string supplied')
pop2k_totals = pd.DataFrame(geocorr_df.groupby('GEOID')['pop2k'].sum()).reset_index()
pop2k_totals.rename(columns={'pop2k': 'total_pop_00'}, inplace=True)
geocorr_df = geocorr_df.merge(pop2k_totals, on='GEOID', how='left')
del pop2k_totals
geocorr_df['afact'] = geocorr_df['pop2k'] / geocorr_df['total_pop_00']
output_df = geocorr_df[['GEOID00', 'pop2k', 'afact']].copy()
output_df.to_csv(sys.stdout, index=False)
| true
| true
|
1c4ab1e64362cf00e8647c93dc60b6ca75d9cbb0
| 4,371
|
py
|
Python
|
ui/prefs.py
|
bfrobin446/openfrontier
|
bde74dc82be858cd0b0bc64ddfe76020d1179a9c
|
[
"MIT"
] | null | null | null |
ui/prefs.py
|
bfrobin446/openfrontier
|
bde74dc82be858cd0b0bc64ddfe76020d1179a9c
|
[
"MIT"
] | null | null | null |
ui/prefs.py
|
bfrobin446/openfrontier
|
bde74dc82be858cd0b0bc64ddfe76020d1179a9c
|
[
"MIT"
] | null | null | null |
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import itertools
from . import colors
from . import keys
from .colorbutton import ColorButton
class KeyPicker(QLineEdit):
keyChanged = pyqtSignal(Qt.Key)
def __init__(self, key=None, parent=None, flags=Qt.Widget, **kwargs):
QLineEdit.__init__(self, parent, **kwargs)
self.setWindowFlags(flags)
self.setReadOnly(True)
self.key = key
self.setText(self.textForKey(self.key))
@staticmethod
def textForKey(k):
for name, value in Qt.__dict__.items():
if name[0:4] == 'Key_':
if k == value:
return name
return ''
def focusInEvent(self, evt):
self.setText("<press a key>")
def keyPressEvent(self, evt):
self.key = evt.key()
self.setText(self.textForKey(self.key))
self.keyChanged.emit(self.key)
def setKey(self, key):
self.key = key
self.setText(self.textForKey(self.key))
self.keyChanged.emit(self.key)
class PrefsDialog(QDialog):
def __init__(self, parent=None, flags=Qt.Widget):
QDialog.__init__(self, parent, flags)
self.settings = QSettings()
self.setLayout(QVBoxLayout(self))
self.tabs = QTabWidget(self)
self.layout().addWidget(self.tabs)
self.buttons = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.RestoreDefaults,
accepted = self.close
)
self.layout().addWidget(self.buttons)
self.buttons.button(
self.buttons.RestoreDefaults).clicked.connect(self.defaults)
self.addTab(ColorPrefPane(), "Colors")
self.addTab(KeyPrefPane(), "Keys")
def addTab(self, widget, title):
scroller = QScrollArea()
scroller.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroller.setWidget(widget)
scroller.setMinimumWidth(
widget.sizeHint().width()
+ qApp.style().pixelMetric(
QStyle.PM_ScrollBarExtent, None, scroller)
* 2)
widget.resize(widget.sizeHint())
self.tabs.addTab(scroller, title)
def defaults(self):
self.tabs.currentWidget().widget().defaults()
class ColorPrefPane(QWidget):
def __init__(self, parent=None, flags=Qt.Widget):
QWidget.__init__(self, parent, flags)
self.setLayout(QVBoxLayout(self))
for cat, catname in zip(colors.categories, colors.catnames):
self.layout().addWidget(
QLabel(''.join(('<b><big>', catname, '</big></b>')), self))
catLayout = QGridLayout()
self.layout().addLayout(catLayout)
for i, c in enumerate(getattr(colors, cat).values()):
catLayout.addWidget(QLabel(c.displayname, self), i, 1)
picker = ColorButton(c.current, self, colorChanged = c.update)
catLayout.addWidget(picker, i, 0)
def defaults(self):
for color, picker in zip(
itertools.chain.from_iterable(
getattr(colors, cat).values() for cat in colors.categories),
(obj for obj in self.children() if isinstance(obj, ColorButton))
):
picker.setColor(color.default)
class KeyPrefPane(QWidget):
def __init__(self, parent=None, flags=Qt.Widget):
QWidget.__init__(self, parent, flags)
self.setLayout(QVBoxLayout(self))
for cat, catname in zip(keys.categories, keys.catnames):
self.layout().addWidget(
QLabel(''.join(('<b><big>', catname, '</big></b>')), self))
catLayout = QGridLayout()
self.layout().addLayout(catLayout)
for i, c in enumerate(getattr(keys, cat).values()):
catLayout.addWidget(QLabel(c.displayname, self), i, 1)
picker = KeyPicker(c.current, self, keyChanged = c.update)
catLayout.addWidget(picker, i, 0)
def defaults(self):
for key, picker in zip(
itertools.chain.from_iterable(
getattr(keys, cat).values() for cat in keys.categories),
(obj for obj in self.children() if isinstance(obj, KeyPicker))
):
picker.setKey(key.default)
| 35.25
| 80
| 0.590254
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import itertools
from . import colors
from . import keys
from .colorbutton import ColorButton
class KeyPicker(QLineEdit):
keyChanged = pyqtSignal(Qt.Key)
def __init__(self, key=None, parent=None, flags=Qt.Widget, **kwargs):
QLineEdit.__init__(self, parent, **kwargs)
self.setWindowFlags(flags)
self.setReadOnly(True)
self.key = key
self.setText(self.textForKey(self.key))
@staticmethod
def textForKey(k):
for name, value in Qt.__dict__.items():
if name[0:4] == 'Key_':
if k == value:
return name
return ''
def focusInEvent(self, evt):
self.setText("<press a key>")
def keyPressEvent(self, evt):
self.key = evt.key()
self.setText(self.textForKey(self.key))
self.keyChanged.emit(self.key)
def setKey(self, key):
self.key = key
self.setText(self.textForKey(self.key))
self.keyChanged.emit(self.key)
class PrefsDialog(QDialog):
def __init__(self, parent=None, flags=Qt.Widget):
QDialog.__init__(self, parent, flags)
self.settings = QSettings()
self.setLayout(QVBoxLayout(self))
self.tabs = QTabWidget(self)
self.layout().addWidget(self.tabs)
self.buttons = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.RestoreDefaults,
accepted = self.close
)
self.layout().addWidget(self.buttons)
self.buttons.button(
self.buttons.RestoreDefaults).clicked.connect(self.defaults)
self.addTab(ColorPrefPane(), "Colors")
self.addTab(KeyPrefPane(), "Keys")
def addTab(self, widget, title):
scroller = QScrollArea()
scroller.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroller.setWidget(widget)
scroller.setMinimumWidth(
widget.sizeHint().width()
+ qApp.style().pixelMetric(
QStyle.PM_ScrollBarExtent, None, scroller)
* 2)
widget.resize(widget.sizeHint())
self.tabs.addTab(scroller, title)
def defaults(self):
self.tabs.currentWidget().widget().defaults()
class ColorPrefPane(QWidget):
def __init__(self, parent=None, flags=Qt.Widget):
QWidget.__init__(self, parent, flags)
self.setLayout(QVBoxLayout(self))
for cat, catname in zip(colors.categories, colors.catnames):
self.layout().addWidget(
QLabel(''.join(('<b><big>', catname, '</big></b>')), self))
catLayout = QGridLayout()
self.layout().addLayout(catLayout)
for i, c in enumerate(getattr(colors, cat).values()):
catLayout.addWidget(QLabel(c.displayname, self), i, 1)
picker = ColorButton(c.current, self, colorChanged = c.update)
catLayout.addWidget(picker, i, 0)
def defaults(self):
for color, picker in zip(
itertools.chain.from_iterable(
getattr(colors, cat).values() for cat in colors.categories),
(obj for obj in self.children() if isinstance(obj, ColorButton))
):
picker.setColor(color.default)
class KeyPrefPane(QWidget):
def __init__(self, parent=None, flags=Qt.Widget):
QWidget.__init__(self, parent, flags)
self.setLayout(QVBoxLayout(self))
for cat, catname in zip(keys.categories, keys.catnames):
self.layout().addWidget(
QLabel(''.join(('<b><big>', catname, '</big></b>')), self))
catLayout = QGridLayout()
self.layout().addLayout(catLayout)
for i, c in enumerate(getattr(keys, cat).values()):
catLayout.addWidget(QLabel(c.displayname, self), i, 1)
picker = KeyPicker(c.current, self, keyChanged = c.update)
catLayout.addWidget(picker, i, 0)
def defaults(self):
for key, picker in zip(
itertools.chain.from_iterable(
getattr(keys, cat).values() for cat in keys.categories),
(obj for obj in self.children() if isinstance(obj, KeyPicker))
):
picker.setKey(key.default)
| true
| true
|
1c4ab1eafd7a0741e2d75a3e980b2a4775179a92
| 417
|
py
|
Python
|
backend/tester1000_dev_23525/wsgi.py
|
crowdbotics-dev/tester1000-dev-23525
|
46c650bdac998a4df3ee19917a09571ec58c0c68
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/tester1000_dev_23525/wsgi.py
|
crowdbotics-dev/tester1000-dev-23525
|
46c650bdac998a4df3ee19917a09571ec58c0c68
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/tester1000_dev_23525/wsgi.py
|
crowdbotics-dev/tester1000-dev-23525
|
46c650bdac998a4df3ee19917a09571ec58c0c68
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for tester1000_dev_23525 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tester1000_dev_23525.settings')
application = get_wsgi_application()
| 24.529412
| 80
| 0.798561
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tester1000_dev_23525.settings')
application = get_wsgi_application()
| true
| true
|
1c4ab3589fb7396833431a0bdd7613a3be9d614a
| 5,648
|
py
|
Python
|
openstack/tests/unit/cloud/test_qos_rule_type.py
|
catalinpopc/openstacksdk
|
adaf758076b0c74cf4bb55e88fdee7072764f5f3
|
[
"Apache-2.0"
] | null | null | null |
openstack/tests/unit/cloud/test_qos_rule_type.py
|
catalinpopc/openstacksdk
|
adaf758076b0c74cf4bb55e88fdee7072764f5f3
|
[
"Apache-2.0"
] | null | null | null |
openstack/tests/unit/cloud/test_qos_rule_type.py
|
catalinpopc/openstacksdk
|
adaf758076b0c74cf4bb55e88fdee7072764f5f3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 OVH SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from openstack.cloud import exc
from openstack.tests.unit import base
class TestQosRuleType(base.TestCase):
rule_type_name = "bandwidth_limit"
qos_extension = {
"updated": "2015-06-08T10:00:00-00:00",
"name": "Quality of Service",
"links": [],
"alias": "qos",
"description": "The Quality of Service extension."
}
qos_rule_type_details_extension = {
"updated": "2017-06-22T10:00:00-00:00",
"name": "Details of QoS rule types",
"links": [],
"alias": "qos-rule-type-details",
"description": ("Expose details about QoS rule types supported by "
"loaded backend drivers")
}
mock_rule_type_bandwidth_limit = {
'type': 'bandwidth_limit'
}
mock_rule_type_dscp_marking = {
'type': 'dscp_marking'
}
mock_rule_types = [
mock_rule_type_bandwidth_limit, mock_rule_type_dscp_marking]
mock_rule_type_details = {
'drivers': [{
'name': 'linuxbridge',
'supported_parameters': [{
'parameter_values': {'start': 0, 'end': 2147483647},
'parameter_type': 'range',
'parameter_name': u'max_kbps'
}, {
'parameter_values': ['ingress', 'egress'],
'parameter_type': 'choices',
'parameter_name': u'direction'
}, {
'parameter_values': {'start': 0, 'end': 2147483647},
'parameter_type': 'range',
'parameter_name': 'max_burst_kbps'
}]
}],
'type': rule_type_name
}
def test_list_qos_rule_types(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [self.qos_extension]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'qos', 'rule-types.json']),
json={'rule_types': self.mock_rule_types})
])
rule_types = self.cloud.list_qos_rule_types()
self.assertEqual(self.mock_rule_types, rule_types)
self.assert_calls()
def test_list_qos_rule_types_no_qos_extension(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': []})
])
self.assertRaises(exc.OpenStackCloudException,
self.cloud.list_qos_rule_types)
self.assert_calls()
def test_get_qos_rule_type_details(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [
self.qos_extension,
self.qos_rule_type_details_extension]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [
self.qos_extension,
self.qos_rule_type_details_extension]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'qos', 'rule-types',
'%s.json' % self.rule_type_name]),
json={'rule_type': self.mock_rule_type_details})
])
self.assertEqual(
self.mock_rule_type_details,
self.cloud.get_qos_rule_type_details(self.rule_type_name)
)
self.assert_calls()
def test_get_qos_rule_type_details_no_qos_extension(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': []})
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.get_qos_rule_type_details, self.rule_type_name)
self.assert_calls()
def test_get_qos_rule_type_details_no_qos_details_extension(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [self.qos_extension]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [self.qos_extension]})
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.get_qos_rule_type_details, self.rule_type_name)
self.assert_calls()
| 37.653333
| 78
| 0.554887
|
from openstack.cloud import exc
from openstack.tests.unit import base
class TestQosRuleType(base.TestCase):
rule_type_name = "bandwidth_limit"
qos_extension = {
"updated": "2015-06-08T10:00:00-00:00",
"name": "Quality of Service",
"links": [],
"alias": "qos",
"description": "The Quality of Service extension."
}
qos_rule_type_details_extension = {
"updated": "2017-06-22T10:00:00-00:00",
"name": "Details of QoS rule types",
"links": [],
"alias": "qos-rule-type-details",
"description": ("Expose details about QoS rule types supported by "
"loaded backend drivers")
}
mock_rule_type_bandwidth_limit = {
'type': 'bandwidth_limit'
}
mock_rule_type_dscp_marking = {
'type': 'dscp_marking'
}
mock_rule_types = [
mock_rule_type_bandwidth_limit, mock_rule_type_dscp_marking]
mock_rule_type_details = {
'drivers': [{
'name': 'linuxbridge',
'supported_parameters': [{
'parameter_values': {'start': 0, 'end': 2147483647},
'parameter_type': 'range',
'parameter_name': u'max_kbps'
}, {
'parameter_values': ['ingress', 'egress'],
'parameter_type': 'choices',
'parameter_name': u'direction'
}, {
'parameter_values': {'start': 0, 'end': 2147483647},
'parameter_type': 'range',
'parameter_name': 'max_burst_kbps'
}]
}],
'type': rule_type_name
}
def test_list_qos_rule_types(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [self.qos_extension]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'qos', 'rule-types.json']),
json={'rule_types': self.mock_rule_types})
])
rule_types = self.cloud.list_qos_rule_types()
self.assertEqual(self.mock_rule_types, rule_types)
self.assert_calls()
def test_list_qos_rule_types_no_qos_extension(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': []})
])
self.assertRaises(exc.OpenStackCloudException,
self.cloud.list_qos_rule_types)
self.assert_calls()
def test_get_qos_rule_type_details(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [
self.qos_extension,
self.qos_rule_type_details_extension]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [
self.qos_extension,
self.qos_rule_type_details_extension]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'qos', 'rule-types',
'%s.json' % self.rule_type_name]),
json={'rule_type': self.mock_rule_type_details})
])
self.assertEqual(
self.mock_rule_type_details,
self.cloud.get_qos_rule_type_details(self.rule_type_name)
)
self.assert_calls()
def test_get_qos_rule_type_details_no_qos_extension(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': []})
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.get_qos_rule_type_details, self.rule_type_name)
self.assert_calls()
def test_get_qos_rule_type_details_no_qos_details_extension(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [self.qos_extension]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'extensions.json']),
json={'extensions': [self.qos_extension]})
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.get_qos_rule_type_details, self.rule_type_name)
self.assert_calls()
| true
| true
|
1c4ab3b577beb0365924ba95af395d8155ef537a
| 1,359
|
py
|
Python
|
src/niweb/apps/userprofile/views.py
|
emjemj/ni
|
a78e6d97d1e4610aad7698c4f0f459221c680b4f
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2018-12-21T09:35:27.000Z
|
2019-07-31T18:51:58.000Z
|
src/niweb/apps/userprofile/views.py
|
emjemj/ni
|
a78e6d97d1e4610aad7698c4f0f459221c680b4f
|
[
"BSD-2-Clause-FreeBSD"
] | 6
|
2019-07-25T07:10:23.000Z
|
2021-02-08T09:58:57.000Z
|
src/niweb/apps/userprofile/views.py
|
emjemj/ni
|
a78e6d97d1e4610aad7698c4f0f459221c680b4f
|
[
"BSD-2-Clause-FreeBSD"
] | 5
|
2019-02-06T12:00:26.000Z
|
2021-11-19T14:48:06.000Z
|
from apps.userprofile.models import UserProfile
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, get_object_or_404
from actstream.models import actor_stream
@login_required
def list_userprofiles(request):
profile_list = UserProfile.objects.all()
return render(request, 'userprofile/list_userprofiles.html',
{'profile_list': profile_list})
@login_required
def userprofile_detail(request, userprofile_id):
profile = get_object_or_404(UserProfile, pk=userprofile_id)
activities = actor_stream(profile.user)
paginator = Paginator(activities, 50, allow_empty_first_page=True) # Show 50 activities per page
page = request.GET.get('page')
try:
activities = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
activities = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
activities = paginator.page(paginator.num_pages)
total_activities = '{:,}'.format(activities.paginator.count)
return render(request, 'userprofile/userprofile_detail.html',
{'profile': profile, 'activities': activities, 'total_activities': total_activities})
| 42.46875
| 101
| 0.743194
|
from apps.userprofile.models import UserProfile
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, get_object_or_404
from actstream.models import actor_stream
@login_required
def list_userprofiles(request):
profile_list = UserProfile.objects.all()
return render(request, 'userprofile/list_userprofiles.html',
{'profile_list': profile_list})
@login_required
def userprofile_detail(request, userprofile_id):
profile = get_object_or_404(UserProfile, pk=userprofile_id)
activities = actor_stream(profile.user)
paginator = Paginator(activities, 50, allow_empty_first_page=True)
page = request.GET.get('page')
try:
activities = paginator.page(page)
except PageNotAnInteger:
activities = paginator.page(1)
except EmptyPage:
activities = paginator.page(paginator.num_pages)
total_activities = '{:,}'.format(activities.paginator.count)
return render(request, 'userprofile/userprofile_detail.html',
{'profile': profile, 'activities': activities, 'total_activities': total_activities})
| true
| true
|
1c4ab45c50dc2c8013cb457044e00259c45ba137
| 1,102
|
py
|
Python
|
app/__init__.py
|
Shindler7/libpraks
|
c7d09ef7c485d98e4c1d368ae4ebaf70ef77e410
|
[
"BSD-3-Clause"
] | 2
|
2020-04-02T13:35:57.000Z
|
2020-08-28T09:21:33.000Z
|
app/__init__.py
|
Shindler7/libpraks
|
c7d09ef7c485d98e4c1d368ae4ebaf70ef77e410
|
[
"BSD-3-Clause"
] | 4
|
2020-04-12T17:37:25.000Z
|
2022-01-13T02:49:04.000Z
|
app/__init__.py
|
Shindler7/libpraks
|
c7d09ef7c485d98e4c1d368ae4ebaf70ef77e410
|
[
"BSD-3-Clause"
] | 4
|
2020-04-01T14:11:50.000Z
|
2020-05-10T19:20:03.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
from flask import Flask
from flask_images import Images
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy as SQLA
from flask_sslify import SSLify
from flask_wtf.csrf import CSRFProtect
from flask_cors import CORS
from config import Config
logging.basicConfig(
level=logging.DEBUG,
filename='app/logs/get_screen.log',
filemode='a'
)
# Flask
application = Flask(__name__)
application.config.from_object(Config)
# Подключение (обслуживание) SSL
sslify = SSLify(application)
# CORS
CORS(application)
# SQLAlchemy + Migrate
db_lib = SQLA(application)
migrate = Migrate(application, db_lib)
# Login
login_manager = LoginManager(application)
login_manager.login_view = 'login'
# CSRF
csrf = CSRFProtect(application)
# Flask image
images = Images(application)
from app import views # noqa
from app import viewsfuture # noqa
from app import admin # noqa
from .api import ver_one # noqa
if __name__ == "__main__":
application.run(host='0.0.0.0', port=5000)
| 20.407407
| 47
| 0.768603
|
import logging
from flask import Flask
from flask_images import Images
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy as SQLA
from flask_sslify import SSLify
from flask_wtf.csrf import CSRFProtect
from flask_cors import CORS
from config import Config
logging.basicConfig(
level=logging.DEBUG,
filename='app/logs/get_screen.log',
filemode='a'
)
application = Flask(__name__)
application.config.from_object(Config)
sslify = SSLify(application)
CORS(application)
db_lib = SQLA(application)
migrate = Migrate(application, db_lib)
login_manager = LoginManager(application)
login_manager.login_view = 'login'
csrf = CSRFProtect(application)
images = Images(application)
from app import views
from app import viewsfuture
from app import admin
from .api import ver_one
if __name__ == "__main__":
application.run(host='0.0.0.0', port=5000)
| true
| true
|
1c4ab5d0b3ed0c4de6caa36c5588fd3dd0ac2b72
| 8,052
|
py
|
Python
|
e2e/test_e2e.py
|
bentobox-dev/bento-box
|
3e10c62f586c1251529e059b6af515d4d03c60e9
|
[
"MIT"
] | 1
|
2021-01-02T02:50:15.000Z
|
2021-01-02T02:50:15.000Z
|
e2e/test_e2e.py
|
joeltio/bento-box
|
3e10c62f586c1251529e059b6af515d4d03c60e9
|
[
"MIT"
] | 48
|
2020-10-21T07:42:30.000Z
|
2021-02-15T19:34:55.000Z
|
e2e/test_e2e.py
|
joeltio/bento-box
|
3e10c62f586c1251529e059b6af515d4d03c60e9
|
[
"MIT"
] | null | null | null |
#
# bento-box
# E2E Test
#
import pytest
from git import Repo
from math import cos, sin
from bento import types
from bento.sim import Simulation
from bento.utils import to_yaml_proto
from bento.graph.plotter import Plotter
from bento.spec.ecs import EntityDef, ComponentDef
from bento.example.specs import Velocity, Position
# define test components
Meta = ComponentDef(
name="meta",
schema={
"name": types.string,
"id": types.int64,
"version": types.int32,
},
)
Movement = ComponentDef(
name="movement",
schema={
"rotation": types.float32,
"speed": types.float64,
},
)
Keyboard = ComponentDef(
name="keyboard",
schema={
"up": types.boolean,
"down": types.boolean,
"left": types.boolean,
"right": types.boolean,
},
)
@pytest.fixture
def sim(client):
"""Applies the test Simulation to the Engine"""
sim = Simulation(
name="driving_sim",
components=[Keyboard, Movement, Velocity, Position, Meta],
entities=[
EntityDef(components=[Keyboard]),
EntityDef(components=[Movement, Velocity, Position, Meta]),
],
client=client,
)
@sim.init
def init_sim(g: Plotter):
controls = g.entity(components=[Keyboard])
controls[Keyboard].left = False
controls[Keyboard].right = False
controls[Keyboard].up = False
controls[Keyboard].down = False
car = g.entity(components=[Movement, Velocity, Position, Meta])
car[Meta].name = "beetle"
car[Meta].id = 512
car[Meta].version = 2
car[Movement].speed = 0.0
car[Movement].rotation = 90.0
car[Velocity].x = 0.0
car[Velocity].y = 0.0
car[Position].x = 0.0
car[Position].y = 0.0
@sim.system
def control_sys(g: Plotter):
controls = g.entity(components=[Keyboard])
car = g.entity(components=[Movement, Velocity, Position, Meta])
acceleration, max_speed, steer_rate = 5.0, 18.0, 10.0
# steer car
if controls[Keyboard].left:
car[Movement].rotation -= steer_rate
controls[Keyboard].left = False
elif controls[Keyboard].right:
car[Movement].rotation += steer_rate
controls[Keyboard].right = False
# accelerate/slow down car
if controls[Keyboard].up:
car[Movement].speed = g.min(car[Movement].speed + acceleration, max_speed)
controls[Keyboard].up = False
elif controls[Keyboard].down:
car[Movement].speed = g.max(car[Movement].speed - acceleration, 0.0)
controls[Keyboard].down = False
@sim.system
def physics_sys(g: Plotter):
# compute velocity from car's rotation and speed
car = g.entity(components=[Movement, Velocity, Position, Meta])
# rotation
heading_x, heading_y = g.cos(car[Movement].rotation), -g.sin(
car[Movement].rotation
)
# speed
car[Velocity].x = car[Movement].speed * heading_x
car[Velocity].y = car[Movement].speed * heading_y
# update car position based on current velocity
car[Position].x += car[Velocity].x
car[Position].y += car[Velocity].y
sim.start()
return sim
def test_e2e_sim_get_version(client):
# e2e test that we can obtain sim/engine's version via SDK
repo = Repo(search_parent_directories=True)
assert client.get_version() == repo.head.object.hexsha
def test_e2e_sim_apply_sim(sim):
# check the sim's entities have populated ids
assert len([e.id for e in sim.entities if e.id != 0]) == len(sim.entities)
def test_e2e_sim_list_sims(sim, client):
# check that sim is listed
assert client.list_sims()[0] == sim.name
def test_e2e_sim_get_sim(sim, client):
# check that sim's can be retrieved by name
applied_proto = client.get_sim(sim.name)
assert to_yaml_proto(applied_proto) == to_yaml_proto(sim.build())
# test error handling when getting nonexistent sim
has_error = False
try:
client.get_sim("not_found")
except LookupError:
has_error = True
assert has_error
def test_e2e_sim_remove(sim, client):
# test removing simulations
client.remove_sim(sim.name)
assert len(client.list_sims()) == 0
def test_e2e_sim_get_set_attr(sim, client):
# test setting/setting attributes for every primitive data type
controls = sim.entity(components=[Keyboard])
controls[Keyboard].left = True
assert controls[Keyboard].left == True
car = sim.entity(components=[Movement, Velocity, Position, Meta])
car[Meta].name = "sedan"
assert car[Meta].name == "sedan"
car[Meta].version = 10
assert car[Meta].version == 10
car[Movement].rotation = -134.2
# rounding required due to loss of precision when using float32
assert round(car[Movement].rotation, 4) == -134.2
car[Movement].speed = 23.5
assert car[Movement].speed == 23.5
def test_e2e_engine_implict_type_convert(sim, client):
# test implicit type conversion
car = sim.entity(components=[Movement, Velocity, Position, Meta])
controls = sim.entity(components=[Keyboard])
# setup test values to attributes
car[Meta].id = 1
car[Meta].version = 1
car[Movement].speed = 1.0
car[Movement].rotation = 1.0
# test implicit type conversion with combinations of numeric data types
# numeric data type => lambda to , get attribute) with that data type
dtype_attrs = {
"types.int64": (lambda: car[Meta].id),
"types.int32": (lambda: car[Meta].version),
"types.float64": (lambda: car[Movement].speed),
"types.float32": (lambda: car[Movement].rotation),
}
for dtype in dtype_attrs.keys():
other_dtypes = [t for t in dtype_attrs.keys() if t != dtype]
for other_dtype in other_dtypes:
value_attr = dtype_attrs[other_dtype]
if dtype == "types.int64":
car[Meta].id = value_attr()
elif dtype == "types.int32":
car[Meta].version = value_attr()
elif dtype == "types.float64":
car[Movement].speed = value_attr()
elif dtype == "types.float32":
car[Movement].rotation = value_attr()
else:
raise ValueError(f"Data type case not handled: {dtype}")
actual_attr = dtype_attrs[dtype]
assert actual_attr() == 1
def test_e2e_sim_step(sim, client):
# once https://github.com/joeltio/bento-box/issues/34 is fixed.
# test init
sim.step()
# check that values are set correctly by init graph
controls = sim.entity(components=[Keyboard])
assert controls[Keyboard].left == False
assert controls[Keyboard].right == False
assert controls[Keyboard].up == False
assert controls[Keyboard].left == False
car = sim.entity(components=[Movement, Velocity, Position, Meta])
assert car[Meta].name == "beetle"
assert car[Meta].version == 2
assert car[Meta].id == 512
assert car[Movement].speed == 0.0
assert car[Movement].rotation == 90.0
assert car[Velocity].x == 0.0
assert car[Velocity].y == 0.0
assert car[Position].x == 0.0
assert car[Position].y == 0.0
# test running simulation for one step
controls[Keyboard].up = True
controls[Keyboard].left = True
sim.step()
# test attributes have been updated by system
assert controls[Keyboard].left == False
assert controls[Keyboard].up == False
assert car[Movement].speed == 5
assert car[Movement].rotation == 80
# test running the simulation for one more step to exercise other conditional branch
controls[Keyboard].down = True
controls[Keyboard].right = True
sim.step()
# test attributes have been updated by system
assert controls[Keyboard].down == False
assert controls[Keyboard].right == False
assert car[Movement].speed == 0
assert car[Movement].rotation == 90
| 31.453125
| 88
| 0.638847
|
import pytest
from git import Repo
from math import cos, sin
from bento import types
from bento.sim import Simulation
from bento.utils import to_yaml_proto
from bento.graph.plotter import Plotter
from bento.spec.ecs import EntityDef, ComponentDef
from bento.example.specs import Velocity, Position
Meta = ComponentDef(
name="meta",
schema={
"name": types.string,
"id": types.int64,
"version": types.int32,
},
)
Movement = ComponentDef(
name="movement",
schema={
"rotation": types.float32,
"speed": types.float64,
},
)
Keyboard = ComponentDef(
name="keyboard",
schema={
"up": types.boolean,
"down": types.boolean,
"left": types.boolean,
"right": types.boolean,
},
)
@pytest.fixture
def sim(client):
sim = Simulation(
name="driving_sim",
components=[Keyboard, Movement, Velocity, Position, Meta],
entities=[
EntityDef(components=[Keyboard]),
EntityDef(components=[Movement, Velocity, Position, Meta]),
],
client=client,
)
@sim.init
def init_sim(g: Plotter):
controls = g.entity(components=[Keyboard])
controls[Keyboard].left = False
controls[Keyboard].right = False
controls[Keyboard].up = False
controls[Keyboard].down = False
car = g.entity(components=[Movement, Velocity, Position, Meta])
car[Meta].name = "beetle"
car[Meta].id = 512
car[Meta].version = 2
car[Movement].speed = 0.0
car[Movement].rotation = 90.0
car[Velocity].x = 0.0
car[Velocity].y = 0.0
car[Position].x = 0.0
car[Position].y = 0.0
@sim.system
def control_sys(g: Plotter):
controls = g.entity(components=[Keyboard])
car = g.entity(components=[Movement, Velocity, Position, Meta])
acceleration, max_speed, steer_rate = 5.0, 18.0, 10.0
if controls[Keyboard].left:
car[Movement].rotation -= steer_rate
controls[Keyboard].left = False
elif controls[Keyboard].right:
car[Movement].rotation += steer_rate
controls[Keyboard].right = False
if controls[Keyboard].up:
car[Movement].speed = g.min(car[Movement].speed + acceleration, max_speed)
controls[Keyboard].up = False
elif controls[Keyboard].down:
car[Movement].speed = g.max(car[Movement].speed - acceleration, 0.0)
controls[Keyboard].down = False
@sim.system
def physics_sys(g: Plotter):
car = g.entity(components=[Movement, Velocity, Position, Meta])
# rotation
heading_x, heading_y = g.cos(car[Movement].rotation), -g.sin(
car[Movement].rotation
)
# speed
car[Velocity].x = car[Movement].speed * heading_x
car[Velocity].y = car[Movement].speed * heading_y
# update car position based on current velocity
car[Position].x += car[Velocity].x
car[Position].y += car[Velocity].y
sim.start()
return sim
def test_e2e_sim_get_version(client):
# e2e test that we can obtain sim/engine's version via SDK
repo = Repo(search_parent_directories=True)
assert client.get_version() == repo.head.object.hexsha
def test_e2e_sim_apply_sim(sim):
assert len([e.id for e in sim.entities if e.id != 0]) == len(sim.entities)
def test_e2e_sim_list_sims(sim, client):
# check that sim is listed
assert client.list_sims()[0] == sim.name
def test_e2e_sim_get_sim(sim, client):
# check that sim's can be retrieved by name
applied_proto = client.get_sim(sim.name)
assert to_yaml_proto(applied_proto) == to_yaml_proto(sim.build())
has_error = False
try:
client.get_sim("not_found")
except LookupError:
has_error = True
assert has_error
def test_e2e_sim_remove(sim, client):
client.remove_sim(sim.name)
assert len(client.list_sims()) == 0
def test_e2e_sim_get_set_attr(sim, client):
controls = sim.entity(components=[Keyboard])
controls[Keyboard].left = True
assert controls[Keyboard].left == True
car = sim.entity(components=[Movement, Velocity, Position, Meta])
car[Meta].name = "sedan"
assert car[Meta].name == "sedan"
car[Meta].version = 10
assert car[Meta].version == 10
car[Movement].rotation = -134.2
assert round(car[Movement].rotation, 4) == -134.2
car[Movement].speed = 23.5
assert car[Movement].speed == 23.5
def test_e2e_engine_implict_type_convert(sim, client):
car = sim.entity(components=[Movement, Velocity, Position, Meta])
controls = sim.entity(components=[Keyboard])
car[Meta].id = 1
car[Meta].version = 1
car[Movement].speed = 1.0
car[Movement].rotation = 1.0
dtype_attrs = {
"types.int64": (lambda: car[Meta].id),
"types.int32": (lambda: car[Meta].version),
"types.float64": (lambda: car[Movement].speed),
"types.float32": (lambda: car[Movement].rotation),
}
for dtype in dtype_attrs.keys():
other_dtypes = [t for t in dtype_attrs.keys() if t != dtype]
for other_dtype in other_dtypes:
value_attr = dtype_attrs[other_dtype]
if dtype == "types.int64":
car[Meta].id = value_attr()
elif dtype == "types.int32":
car[Meta].version = value_attr()
elif dtype == "types.float64":
car[Movement].speed = value_attr()
elif dtype == "types.float32":
car[Movement].rotation = value_attr()
else:
raise ValueError(f"Data type case not handled: {dtype}")
actual_attr = dtype_attrs[dtype]
assert actual_attr() == 1
def test_e2e_sim_step(sim, client):
sim.step()
controls = sim.entity(components=[Keyboard])
assert controls[Keyboard].left == False
assert controls[Keyboard].right == False
assert controls[Keyboard].up == False
assert controls[Keyboard].left == False
car = sim.entity(components=[Movement, Velocity, Position, Meta])
assert car[Meta].name == "beetle"
assert car[Meta].version == 2
assert car[Meta].id == 512
assert car[Movement].speed == 0.0
assert car[Movement].rotation == 90.0
assert car[Velocity].x == 0.0
assert car[Velocity].y == 0.0
assert car[Position].x == 0.0
assert car[Position].y == 0.0
controls[Keyboard].up = True
controls[Keyboard].left = True
sim.step()
assert controls[Keyboard].left == False
assert controls[Keyboard].up == False
assert car[Movement].speed == 5
assert car[Movement].rotation == 80
controls[Keyboard].down = True
controls[Keyboard].right = True
sim.step()
assert controls[Keyboard].down == False
assert controls[Keyboard].right == False
assert car[Movement].speed == 0
assert car[Movement].rotation == 90
| true
| true
|
1c4ab5ee42184750c6098b8611c188c2f7b936ac
| 7,817
|
py
|
Python
|
lib/python/treadmill/cli/show.py
|
drienyov/treadmill
|
ce21537cd9a2fdb0567ac2aa3de1afcb2f6861de
|
[
"Apache-2.0"
] | 2
|
2017-10-31T18:48:20.000Z
|
2018-03-04T20:35:20.000Z
|
lib/python/treadmill/cli/show.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | null | null | null |
lib/python/treadmill/cli/show.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | null | null | null |
"""Manage Treadmill app manifest.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import click
from six.moves import urllib_parse
from treadmill import cli
from treadmill import restclient
from treadmill import context
from treadmill import utils
_LOGGER = logging.getLogger(__name__)
_FINISHED_STATES = ['finished', 'aborted', 'killed', 'terminated']
_STATE_FORMATTER = cli.make_formatter('instance-state')
_FINISHED_STATE_FORMATTER = cli.make_formatter('instance-finished-state')
_ENDPOINT_FORMATTER = cli.make_formatter('endpoint')
_APP_FORMATTER = cli.make_formatter('app')
def _get_state(apis, match=None, finished=False, partition=None):
"""Get cell state."""
url = '/state/'
query = {}
if match:
query['match'] = match
if finished:
query['finished'] = 'true'
if partition:
query['partition'] = partition
if query:
url += '?' + urllib_parse.urlencode(query)
response = restclient.get(apis, url)
return response.json()
def _show_state(apis, match=None, finished=False, partition=None):
"""Show cell state."""
state = _get_state(apis, match, finished, partition)
cli.out(_STATE_FORMATTER(state))
def _show_finished(apis, match=None, partition=None):
state = _get_state(apis, match=match, finished=True, partition=partition)
result = []
for item in state:
if item['state'] not in _FINISHED_STATES:
continue
details = None
if item.get('exitcode') is not None:
details = 'return code: {}'.format(item['exitcode'])
if item.get('signal') is not None:
details = 'signal: {}'.format(utils.signal2name(item['signal']))
if item.get('aborted_reason'):
details = 'reason: {}'.format(item['aborted_reason'])
if item.get('terminated_reason'):
details = 'reason: {}'.format(item['terminated_reason'])
if item.get('oom'):
details = 'out of memory'
result.append({
'name': item['name'],
'state': item['state'],
'host': item['host'],
'when': utils.strftime_utc(item['when']),
'details': details,
})
cli.out(_FINISHED_STATE_FORMATTER(result))
def _show_list(apis, match, states, finished=False, partition=None):
"""Show list of instnces in given state."""
state = _get_state(apis, match, finished, partition)
names = [item['name'] for item in state if item['state'] in states]
for name in names:
cli.out(name)
def _show_endpoints(apis, pattern, endpoint, proto):
"""Show cell endpoints."""
url = '/endpoint/%s' % urllib_parse.quote(pattern)
if endpoint:
if proto:
url += '/' + proto
else:
url += '/*'
url += '/' + endpoint
response = restclient.get(apis, url)
endpoints = [{
'name': end['name'],
'proto': end['proto'],
'endpoint': end['endpoint'],
'hostport': '{0}:{1}'.format(end['host'], end['port']),
'state': end.get('state')
} for end in response.json()]
cli.out(_ENDPOINT_FORMATTER(endpoints))
def _show_instance(apis, instance_id):
"""Show instance manifest."""
url = '/instance/%s' % urllib_parse.quote(instance_id)
response = restclient.get(apis, url)
cli.out(_APP_FORMATTER(response.json()))
def init():
"""Return top level command handler."""
ctx = {}
@click.group()
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
expose_value=False)
@click.option('--api', required=False, help='API url to use.',
metavar='URL',
envvar='TREADMILL_STATEAPI')
def show(api):
"""Show state of scheduled applications."""
ctx['api'] = api
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--finished', is_flag=True, default=False,
help='Show finished instances.')
@click.option('--partition', help='Filter apps by partition')
def state(match, finished, partition):
"""Show state of Treadmill scheduled instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_state(apis, match, finished, partition)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
def pending(match, partition):
"""Show pending instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['pending'], partition=partition)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
def running(match, partition):
"""Show running instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['running'], partition=partition)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
@click.option('--details', is_flag=True, default=False,
help='Show details.')
def finished(match, partition, details):
"""Show finished instances."""
apis = context.GLOBAL.state_api(ctx['api'])
if details:
return _show_finished(apis, match, partition)
return _show_list(
apis, match, _FINISHED_STATES, finished=True, partition=partition
)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
def scheduled(match, partition):
"""Show scheduled instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(
apis, match, ['running', 'scheduled'], partition=partition
)
@show.command(name='all')
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
def _all(match, partition):
"""Show scheduled instances."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(
apis,
match,
['pending', 'running', 'scheduled'],
partition=partition
)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.argument('pattern')
@click.argument('endpoint', required=False)
@click.argument('proto', required=False)
def endpoints(pattern, endpoint, proto):
"""Show application endpoints."""
apis = context.GLOBAL.state_api(ctx['api'])
return _show_endpoints(apis, pattern, endpoint, proto)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.argument('instance_id')
def instance(instance_id):
"""Show scheduled instance manifest."""
apis = context.GLOBAL.cell_api(ctx['api'])
return _show_instance(apis, instance_id)
del _all
del running
del scheduled
del pending
del finished
del instance
del state
del endpoints
return show
| 32.301653
| 77
| 0.63541
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import click
from six.moves import urllib_parse
from treadmill import cli
from treadmill import restclient
from treadmill import context
from treadmill import utils
_LOGGER = logging.getLogger(__name__)
_FINISHED_STATES = ['finished', 'aborted', 'killed', 'terminated']
_STATE_FORMATTER = cli.make_formatter('instance-state')
_FINISHED_STATE_FORMATTER = cli.make_formatter('instance-finished-state')
_ENDPOINT_FORMATTER = cli.make_formatter('endpoint')
_APP_FORMATTER = cli.make_formatter('app')
def _get_state(apis, match=None, finished=False, partition=None):
url = '/state/'
query = {}
if match:
query['match'] = match
if finished:
query['finished'] = 'true'
if partition:
query['partition'] = partition
if query:
url += '?' + urllib_parse.urlencode(query)
response = restclient.get(apis, url)
return response.json()
def _show_state(apis, match=None, finished=False, partition=None):
state = _get_state(apis, match, finished, partition)
cli.out(_STATE_FORMATTER(state))
def _show_finished(apis, match=None, partition=None):
state = _get_state(apis, match=match, finished=True, partition=partition)
result = []
for item in state:
if item['state'] not in _FINISHED_STATES:
continue
details = None
if item.get('exitcode') is not None:
details = 'return code: {}'.format(item['exitcode'])
if item.get('signal') is not None:
details = 'signal: {}'.format(utils.signal2name(item['signal']))
if item.get('aborted_reason'):
details = 'reason: {}'.format(item['aborted_reason'])
if item.get('terminated_reason'):
details = 'reason: {}'.format(item['terminated_reason'])
if item.get('oom'):
details = 'out of memory'
result.append({
'name': item['name'],
'state': item['state'],
'host': item['host'],
'when': utils.strftime_utc(item['when']),
'details': details,
})
cli.out(_FINISHED_STATE_FORMATTER(result))
def _show_list(apis, match, states, finished=False, partition=None):
state = _get_state(apis, match, finished, partition)
names = [item['name'] for item in state if item['state'] in states]
for name in names:
cli.out(name)
def _show_endpoints(apis, pattern, endpoint, proto):
url = '/endpoint/%s' % urllib_parse.quote(pattern)
if endpoint:
if proto:
url += '/' + proto
else:
url += '/*'
url += '/' + endpoint
response = restclient.get(apis, url)
endpoints = [{
'name': end['name'],
'proto': end['proto'],
'endpoint': end['endpoint'],
'hostport': '{0}:{1}'.format(end['host'], end['port']),
'state': end.get('state')
} for end in response.json()]
cli.out(_ENDPOINT_FORMATTER(endpoints))
def _show_instance(apis, instance_id):
url = '/instance/%s' % urllib_parse.quote(instance_id)
response = restclient.get(apis, url)
cli.out(_APP_FORMATTER(response.json()))
def init():
ctx = {}
@click.group()
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
expose_value=False)
@click.option('--api', required=False, help='API url to use.',
metavar='URL',
envvar='TREADMILL_STATEAPI')
def show(api):
ctx['api'] = api
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--finished', is_flag=True, default=False,
help='Show finished instances.')
@click.option('--partition', help='Filter apps by partition')
def state(match, finished, partition):
apis = context.GLOBAL.state_api(ctx['api'])
return _show_state(apis, match, finished, partition)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
def pending(match, partition):
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['pending'], partition=partition)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
def running(match, partition):
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(apis, match, ['running'], partition=partition)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
@click.option('--details', is_flag=True, default=False,
help='Show details.')
def finished(match, partition, details):
apis = context.GLOBAL.state_api(ctx['api'])
if details:
return _show_finished(apis, match, partition)
return _show_list(
apis, match, _FINISHED_STATES, finished=True, partition=partition
)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
def scheduled(match, partition):
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(
apis, match, ['running', 'scheduled'], partition=partition
)
@show.command(name='all')
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Application name pattern match')
@click.option('--partition', help='Filter apps by partition')
def _all(match, partition):
apis = context.GLOBAL.state_api(ctx['api'])
return _show_list(
apis,
match,
['pending', 'running', 'scheduled'],
partition=partition
)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.argument('pattern')
@click.argument('endpoint', required=False)
@click.argument('proto', required=False)
def endpoints(pattern, endpoint, proto):
apis = context.GLOBAL.state_api(ctx['api'])
return _show_endpoints(apis, pattern, endpoint, proto)
@show.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.argument('instance_id')
def instance(instance_id):
apis = context.GLOBAL.cell_api(ctx['api'])
return _show_instance(apis, instance_id)
del _all
del running
del scheduled
del pending
del finished
del instance
del state
del endpoints
return show
| true
| true
|
1c4ab644d7926e6cc00e6416eef269b9cd3f9640
| 7,055
|
py
|
Python
|
docs/source/conf.py
|
dsjoerg/sc2reader
|
adeb6e3da80e57974b1a29b20e80a02411e693e2
|
[
"MIT"
] | 2
|
2016-05-31T14:50:47.000Z
|
2021-11-04T20:03:19.000Z
|
docs/source/conf.py
|
dsjoerg/sc2reader
|
adeb6e3da80e57974b1a29b20e80a02411e693e2
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
dsjoerg/sc2reader
|
adeb6e3da80e57974b1a29b20e80a02411e693e2
|
[
"MIT"
] | 2
|
2017-01-28T09:09:47.000Z
|
2017-09-14T14:29:20.000Z
|
# -*- coding: utf-8 -*-
#
# sc2reader documentation build configuration file, created by
# sphinx-quickstart on Sun May 01 12:39:48 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sc2reader'
copyright = u'2011'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sc2readerdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sc2reader.tex', u'sc2reader Documentation',
u'Graylin Kim, Bas Peschier', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sc2reader', u'sc2reader Documentation',
[u'Graylin Kim, Bas Peschier'], 1)
]
| 32.511521
| 80
| 0.71949
|
import sys, os
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'sc2reader'
copyright = u'2011'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sc2readerdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sc2reader.tex', u'sc2reader Documentation',
u'Graylin Kim, Bas Peschier', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sc2reader', u'sc2reader Documentation',
[u'Graylin Kim, Bas Peschier'], 1)
]
| true
| true
|
1c4ab7312d9e6256798662f4b5f774925bdfd988
| 563
|
py
|
Python
|
HostsTool/gui/__init__.py
|
zte-lhg/chromium_org
|
6174180179b3c6b71c2d93df68e734cadf6d8d49
|
[
"Apache-2.0"
] | null | null | null |
HostsTool/gui/__init__.py
|
zte-lhg/chromium_org
|
6174180179b3c6b71c2d93df68e734cadf6d8d49
|
[
"Apache-2.0"
] | null | null | null |
HostsTool/gui/__init__.py
|
zte-lhg/chromium_org
|
6174180179b3c6b71c2d93df68e734cadf6d8d49
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# __init__.py : Declare modules to be called in gui module.
#
# Copyleft (C) 2014 - huhamhire <me@huhamhire.com>
# =====================================================================
# Licensed under the GNU General Public License, version 3. You should
# have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# =====================================================================
from hostsutil import HostsUtil
__all__ = ["HostsUtil"]
| 35.1875
| 71
| 0.53286
|
from hostsutil import HostsUtil
__all__ = ["HostsUtil"]
| true
| true
|
1c4ab74f30c876baed75a6ac1163521f349e3b56
| 3,383
|
py
|
Python
|
train.py
|
haziq9978/PythonChatbot
|
8eb77140b32a4c6770dab20d4e26be03504ac5ee
|
[
"MIT"
] | 2
|
2021-01-04T16:23:07.000Z
|
2021-01-05T03:25:19.000Z
|
train.py
|
haziq9978/PythonChatbot
|
8eb77140b32a4c6770dab20d4e26be03504ac5ee
|
[
"MIT"
] | null | null | null |
train.py
|
haziq9978/PythonChatbot
|
8eb77140b32a4c6770dab20d4e26be03504ac5ee
|
[
"MIT"
] | 1
|
2021-01-04T16:28:57.000Z
|
2021-01-04T16:28:57.000Z
|
import numpy as np
import random
import json
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from nltk_utils import bag_of_words, tokenize, stem
from model import NeuralNet
with open('dataCombine.json', 'r') as f:
intents = json.load(f)
all_words = []
tags = []
xy = []
# loop through each sentence in our intents patterns
for intent in intents['intents']:
tag = intent['tag']
# add to tag list
tags.append(tag)
for pattern in intent['patterns']:
# tokenize each word in the sentence
w = tokenize(pattern)
# add to our words list
all_words.extend(w)
# add to xy pair
xy.append((w, tag))
# stem and lower each word
ignore_words = ['?', '.', '!']
all_words = [stem(w) for w in all_words if w not in ignore_words]
# remove duplicates and sort
all_words = sorted(set(all_words))
tags = sorted(set(tags))
print(len(xy), "patterns")
print(len(tags), "tags:", tags)
print(len(all_words), "unique stemmed words:", all_words)
# create training data
X_train = []
y_train = []
for (pattern_sentence, tag) in xy:
# X: bag of words for each pattern_sentence
bag = bag_of_words(pattern_sentence, all_words)
X_train.append(bag)
# y: PyTorch CrossEntropyLoss needs only class labels, not one-hot
label = tags.index(tag)
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
# Hyper-parameters
num_epochs = 1000
batch_size = 8
learning_rate = 0.001
input_size = len(X_train[0])
hidden_size = 8
output_size = len(tags)
print(input_size, output_size)
class ChatDataset(Dataset):
def __init__(self):
self.n_samples = len(X_train)
self.x_data = X_train
self.y_data = y_train
# support indexing such that dataset[i] can be used to get i-th sample
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
# we can call len(dataset) to return the size
def __len__(self):
return self.n_samples
dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeuralNet(input_size, hidden_size, output_size).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
for epoch in range(num_epochs):
for (words, labels) in train_loader:
words = words.to(device)
labels = labels.to(dtype=torch.long).to(device)
# Forward pass
outputs = model(words)
# if y would be one-hot, we must apply
# labels = torch.max(labels, 1)[1]
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 10 == 0:
print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.7f}')
print(f'final loss: {loss.item():.4f}')
data = {
"model_state": model.state_dict(),
"input_size": input_size,
"hidden_size": hidden_size,
"output_size": output_size,
"all_words": all_words,
"tags": tags
}
FILE = "data.pth"
torch.save(data, FILE)
print(f'training complete. file saved to {FILE}')
| 26.023077
| 74
| 0.657996
|
import numpy as np
import random
import json
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from nltk_utils import bag_of_words, tokenize, stem
from model import NeuralNet
with open('dataCombine.json', 'r') as f:
intents = json.load(f)
all_words = []
tags = []
xy = []
for intent in intents['intents']:
tag = intent['tag']
tags.append(tag)
for pattern in intent['patterns']:
w = tokenize(pattern)
all_words.extend(w)
xy.append((w, tag))
ignore_words = ['?', '.', '!']
all_words = [stem(w) for w in all_words if w not in ignore_words]
all_words = sorted(set(all_words))
tags = sorted(set(tags))
print(len(xy), "patterns")
print(len(tags), "tags:", tags)
print(len(all_words), "unique stemmed words:", all_words)
X_train = []
y_train = []
for (pattern_sentence, tag) in xy:
bag = bag_of_words(pattern_sentence, all_words)
X_train.append(bag)
label = tags.index(tag)
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
num_epochs = 1000
batch_size = 8
learning_rate = 0.001
input_size = len(X_train[0])
hidden_size = 8
output_size = len(tags)
print(input_size, output_size)
class ChatDataset(Dataset):
def __init__(self):
self.n_samples = len(X_train)
self.x_data = X_train
self.y_data = y_train
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.n_samples
dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeuralNet(input_size, hidden_size, output_size).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for epoch in range(num_epochs):
for (words, labels) in train_loader:
words = words.to(device)
labels = labels.to(dtype=torch.long).to(device)
outputs = model(words)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 10 == 0:
print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.7f}')
print(f'final loss: {loss.item():.4f}')
data = {
"model_state": model.state_dict(),
"input_size": input_size,
"hidden_size": hidden_size,
"output_size": output_size,
"all_words": all_words,
"tags": tags
}
FILE = "data.pth"
torch.save(data, FILE)
print(f'training complete. file saved to {FILE}')
| true
| true
|
1c4ab7a54084ae0c5afba16f8fe6659ad81d8e17
| 3,924
|
py
|
Python
|
tools/similarity.py
|
whxf/nlp_api
|
a63b67287e9a90381cac14bb1c5b723ccbeb14a3
|
[
"MIT"
] | 13
|
2019-11-06T02:37:28.000Z
|
2022-03-21T06:16:18.000Z
|
tools/similarity.py
|
whxf/nlp_api
|
a63b67287e9a90381cac14bb1c5b723ccbeb14a3
|
[
"MIT"
] | null | null | null |
tools/similarity.py
|
whxf/nlp_api
|
a63b67287e9a90381cac14bb1c5b723ccbeb14a3
|
[
"MIT"
] | 4
|
2020-05-07T10:49:32.000Z
|
2021-12-31T04:03:19.000Z
|
"""
@author: Li Xi
@file: similarity.py
@time: 2019/10/30 15:37
@desc:
计算文本相似度:
1. WordMoverDistance 基于词移距离的文本相似度计算 【比较文档的相似度】
2. WordVectorSimilarity word-vector的句子相似度计算 【比较句子的相似度】
注意事项:
* 两种方法都需要输入句子分词之后的结果,类型需要时list
* 为提升效率/效果,可对分词结果进行处理,如去除停用词等
* 具体使用方法见文件的最下
* 可自定义加载词向量文件
"""
import os
import gensim
import numpy as np
from tools.segment import LtpSegment
class WordMoverDistance(object):
"""词移距离 Word Mover's Distance"""
__vector_path = os.path.join("source", "sgns.renmin.word.bz2")
word2vec_model = gensim.models.KeyedVectors.load_word2vec_format(__vector_path)
word2vec_model.init_sims(replace=True) # normalizes vectors
def distance(self, tokens1, tokens2):
"""
计算词移距离
!!!: 这里需要输入句子的分词后结果
:param tokens1: [list]
:param tokens2: [list]
:return: score 值
"""
distance = self.word2vec_model.wmdistance(tokens1, tokens2)
return distance
class WordVectorSimilarity(object):
"""
基于word-vector的句子相似度计算(余弦相似度)
!!!: 不仅可以使用词向量也可使用字向量
"""
__vector_path = os.path.join("source", "sgns.renmin.word.bz2")
word2vec_model = gensim.models.KeyedVectors.load_word2vec_format(__vector_path)
def __init__(self, vector_dim=300):
"""
:param vector_dim: 词向量的维度
"""
self.vector_dim = vector_dim
def get_word_vector(self, word):
"""
获取词的词向量,如果没有找到,返回全零的embedding
:param word:
:return:
"""
try:
return self.word2vec_model[word]
except:
return np.zeros(self.vector_dim)
def similarity_cosine(self, tokens1, tokens2):
"""
计算句子的余弦相似度,其中句子向量等于字符向量求平均
!!!: 这里需要输入句子的分词后结果
:param tokens1:
:param tokens2:
:return:
"""
# 求 sentence1 的向量表示
sentence1 = np.zeros(self.vector_dim)
for _token in tokens1:
sentence1 += self.get_word_vector(_token)
sentence1 = sentence1 / len(tokens1)
# 求 sentence2 的向量表示
sentence2 = np.zeros(self.vector_dim)
for _token in tokens2:
sentence2 += self.get_word_vector(_token)
sentence2 = sentence2 / len(tokens2)
# 余弦相似度计算公式 sim = sum(a*b) / { sum[ sqrt(a^2) ] * sum[ sqrt(b^2) ] }
cos1 = np.sum(sentence1 * sentence2)
cos21 = np.sqrt(sum(sentence1 ** 2))
cos22 = np.sqrt(sum(sentence2 ** 2))
similarity = cos1 / float(cos21 * cos22)
return similarity
def distance(self, tokens1, tokens2):
"""
计算 WordVectorSimilarity
!!!: 这里需要输入句子的分词后结果
:param tokens1:
:param tokens2:
:return:
"""
return self.similarity_cosine(tokens1, tokens2)
if __name__ == "__main__":
# -------- Begin WordMoverDistance Test --------
# 初始化 WordMoverDistance
sim = WordMoverDistance()
# 初始化 LTP 用于分词
ltp = LtpSegment()
str1 = ltp.segment("我是中国人,我深爱着我的祖国") # 分词结果为list
str2 = ltp.segment("中国是我的母亲,我热爱她")
print("相似度:{}".format(sim.distance(str1, str2)))
# 相似度:0.5040331478972442
str1 = ltp.segment("小勇硕士毕业于北京语言大学,目前在中科院软件所工作")
str2 = ltp.segment("大方博士就读于首都师范大学,未来不知道会在哪里上班")
print("相似度:{}".format(sim.distance(str1, str2)))
# 相似度:0.8857186341563674
# -------- End WordMoverDistance Test --------
# -------- Begin WordVectorSimilarity Test --------
# 初始化 WordVectorSimilarity
sim = WordVectorSimilarity()
# 初始化 LTP 用于分词
ltp = LtpSegment()
str1 = ltp.segment("我是中国人,我深爱着我的祖国") # 分词结果为list
str2 = ltp.segment("中国是我的母亲,我热爱她")
print("相似度:{}".format(sim.distance(str1, str2)))
# 相似度:0.9048935250581785
str1 = ltp.segment("小勇硕士毕业于北京语言大学,目前在中科院软件所工作")
str2 = ltp.segment("大方博士就读于首都师范大学,未来不知道会在哪里上班")
print("相似度:{}".format(sim.distance(str1, str2)))
# 相似度:0.812708497722071
# -------- End WordVectorSimilarity Test --------
| 28.230216
| 83
| 0.622579
|
import os
import gensim
import numpy as np
from tools.segment import LtpSegment
class WordMoverDistance(object):
__vector_path = os.path.join("source", "sgns.renmin.word.bz2")
word2vec_model = gensim.models.KeyedVectors.load_word2vec_format(__vector_path)
word2vec_model.init_sims(replace=True)
def distance(self, tokens1, tokens2):
distance = self.word2vec_model.wmdistance(tokens1, tokens2)
return distance
class WordVectorSimilarity(object):
__vector_path = os.path.join("source", "sgns.renmin.word.bz2")
word2vec_model = gensim.models.KeyedVectors.load_word2vec_format(__vector_path)
def __init__(self, vector_dim=300):
self.vector_dim = vector_dim
def get_word_vector(self, word):
try:
return self.word2vec_model[word]
except:
return np.zeros(self.vector_dim)
def similarity_cosine(self, tokens1, tokens2):
sentence1 = np.zeros(self.vector_dim)
for _token in tokens1:
sentence1 += self.get_word_vector(_token)
sentence1 = sentence1 / len(tokens1)
sentence2 = np.zeros(self.vector_dim)
for _token in tokens2:
sentence2 += self.get_word_vector(_token)
sentence2 = sentence2 / len(tokens2)
cos1 = np.sum(sentence1 * sentence2)
cos21 = np.sqrt(sum(sentence1 ** 2))
cos22 = np.sqrt(sum(sentence2 ** 2))
similarity = cos1 / float(cos21 * cos22)
return similarity
def distance(self, tokens1, tokens2):
return self.similarity_cosine(tokens1, tokens2)
if __name__ == "__main__":
sim = WordMoverDistance()
ltp = LtpSegment()
str1 = ltp.segment("我是中国人,我深爱着我的祖国")
str2 = ltp.segment("中国是我的母亲,我热爱她")
print("相似度:{}".format(sim.distance(str1, str2)))
str1 = ltp.segment("小勇硕士毕业于北京语言大学,目前在中科院软件所工作")
str2 = ltp.segment("大方博士就读于首都师范大学,未来不知道会在哪里上班")
print("相似度:{}".format(sim.distance(str1, str2)))
sim = WordVectorSimilarity()
ltp = LtpSegment()
str1 = ltp.segment("我是中国人,我深爱着我的祖国")
str2 = ltp.segment("中国是我的母亲,我热爱她")
print("相似度:{}".format(sim.distance(str1, str2)))
str1 = ltp.segment("小勇硕士毕业于北京语言大学,目前在中科院软件所工作")
str2 = ltp.segment("大方博士就读于首都师范大学,未来不知道会在哪里上班")
print("相似度:{}".format(sim.distance(str1, str2)))
| true
| true
|
1c4ab877b9f249b4301cd5d7ac6137a0a46850c9
| 1,339
|
py
|
Python
|
tests/integration_tests/ring_managers_tests/test_horizontals_at_top_scanbeam.py
|
synapticarbors/wagyu
|
b98354611dceda8888f2951e9704f843a4e88c27
|
[
"MIT"
] | 1
|
2021-01-20T05:49:13.000Z
|
2021-01-20T05:49:13.000Z
|
tests/integration_tests/ring_managers_tests/test_horizontals_at_top_scanbeam.py
|
synapticarbors/wagyu
|
b98354611dceda8888f2951e9704f843a4e88c27
|
[
"MIT"
] | 1
|
2020-11-20T18:21:24.000Z
|
2020-11-20T18:21:37.000Z
|
tests/integration_tests/ring_managers_tests/test_horizontals_at_top_scanbeam.py
|
synapticarbors/wagyu
|
b98354611dceda8888f2951e9704f843a4e88c27
|
[
"MIT"
] | 2
|
2020-11-20T18:17:31.000Z
|
2021-01-20T14:58:22.000Z
|
from typing import Tuple
from hypothesis import given
from tests.integration_tests.utils import (
BoundPortedBoundsListsPair,
BoundPortedRingManagersPair,
are_bound_ported_bounds_lists_equal,
are_bound_ported_ring_managers_equal)
from tests.utils import equivalence
from wagyu.hints import Coordinate
from . import strategies
@given(strategies.ring_managers_pairs,
strategies.coordinates,
strategies.non_empty_initialized_bounds_lists_pairs_indices)
def test_basic(pair: BoundPortedRingManagersPair,
top_y: Coordinate,
active_bounds_pair_index: Tuple[BoundPortedBoundsListsPair, int]
) -> None:
bound, ported = pair
(bound_active_bounds,
ported_active_bounds), index = active_bounds_pair_index
(bound_active_bounds, bound_index,
bound_result) = bound.horizontals_at_top_scanbeam(
top_y, bound_active_bounds, index)
ported_index, ported_result = ported.horizontals_at_top_scanbeam(
top_y, ported_active_bounds, index)
assert equivalence(bound_result, ported_result)
assert bound_index == ported_index
assert are_bound_ported_bounds_lists_equal(bound_active_bounds,
ported_active_bounds)
assert are_bound_ported_ring_managers_equal(bound, ported)
| 36.189189
| 79
| 0.746826
|
from typing import Tuple
from hypothesis import given
from tests.integration_tests.utils import (
BoundPortedBoundsListsPair,
BoundPortedRingManagersPair,
are_bound_ported_bounds_lists_equal,
are_bound_ported_ring_managers_equal)
from tests.utils import equivalence
from wagyu.hints import Coordinate
from . import strategies
@given(strategies.ring_managers_pairs,
strategies.coordinates,
strategies.non_empty_initialized_bounds_lists_pairs_indices)
def test_basic(pair: BoundPortedRingManagersPair,
top_y: Coordinate,
active_bounds_pair_index: Tuple[BoundPortedBoundsListsPair, int]
) -> None:
bound, ported = pair
(bound_active_bounds,
ported_active_bounds), index = active_bounds_pair_index
(bound_active_bounds, bound_index,
bound_result) = bound.horizontals_at_top_scanbeam(
top_y, bound_active_bounds, index)
ported_index, ported_result = ported.horizontals_at_top_scanbeam(
top_y, ported_active_bounds, index)
assert equivalence(bound_result, ported_result)
assert bound_index == ported_index
assert are_bound_ported_bounds_lists_equal(bound_active_bounds,
ported_active_bounds)
assert are_bound_ported_ring_managers_equal(bound, ported)
| true
| true
|
1c4ab8eaeea9b0696f105daca4407d3d104a98ea
| 7,518
|
py
|
Python
|
train_InfoGAN1.py
|
AnonymousExplorer/Conditional-GANs-Pytorch
|
6c15ec67217156d6f041e34efe29ab62f9ef7c7d
|
[
"MIT"
] | 40
|
2018-12-11T02:14:19.000Z
|
2022-03-19T06:16:26.000Z
|
train_InfoGAN1.py
|
AnonymousExplorer/Conditional-GANs-Pytorch
|
6c15ec67217156d6f041e34efe29ab62f9ef7c7d
|
[
"MIT"
] | null | null | null |
train_InfoGAN1.py
|
AnonymousExplorer/Conditional-GANs-Pytorch
|
6c15ec67217156d6f041e34efe29ab62f9ef7c7d
|
[
"MIT"
] | 19
|
2019-03-21T19:11:14.000Z
|
2022-01-17T05:54:13.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import model
import numpy as np
import pylib
import PIL.Image as Image
import tensorboardX
import torch
import torchvision
import torchvision.datasets as dsets
import torchvision.transforms as tforms
import torchlib
# ==============================================================================
# = param =
# ==============================================================================
# command line arguments
parser = argparse.ArgumentParser()
# model
parser.add_argument('--z_dim', dest='z_dim', type=int, default=100)
# training
parser.add_argument('--epoch', dest='epoch', type=int, default=50)
parser.add_argument('--batch_size', dest='batch_size', type=int, default=64)
parser.add_argument('--d_learning_rate', dest='d_learning_rate', type=float, default=0.0002)
parser.add_argument('--g_learning_rate', dest='g_learning_rate', type=float, default=0.001)
parser.add_argument('--n_d', dest='n_d', type=int, help='# of d updates per g update', default=1)
parser.add_argument('--loss_mode', dest='loss_mode', choices=['gan', 'lsgan', 'wgan', 'hinge_v1', 'hinge_v2'], default='hinge_v2')
parser.add_argument('--gp_mode', dest='gp_mode', choices=['none', 'dragan', 'wgan-gp'], default='none')
parser.add_argument('--gp_coef', dest='gp_coef', type=float, default=1.0)
parser.add_argument('--norm', dest='norm', choices=['none', 'batch_norm', 'instance_norm'], default='none')
parser.add_argument('--weight_norm', dest='weight_norm', choices=['none', 'spectral_norm', 'weight_norm'], default='spectral_norm')
# others
parser.add_argument('--experiment_name', dest='experiment_name', default='InfoGAN1_default')
# parse arguments
args = parser.parse_args()
# model
z_dim = args.z_dim
# training
epoch = args.epoch
batch_size = args.batch_size
d_learning_rate = args.d_learning_rate
g_learning_rate = args.g_learning_rate
n_d = args.n_d
loss_mode = args.loss_mode
gp_mode = args.gp_mode
gp_coef = args.gp_coef
norm = args.norm
weight_norm = args.weight_norm
# ohters
experiment_name = args.experiment_name
# save settings
pylib.mkdir('./output/%s' % experiment_name)
with open('./output/%s/setting.txt' % experiment_name, 'w') as f:
f.write(json.dumps(vars(args), indent=4, separators=(',', ':')))
# others
use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")
c_dim = 10
# ==============================================================================
# = setting =
# ==============================================================================
# data
transform = tforms.Compose(
[tforms.Scale(size=(32, 32), interpolation=Image.BICUBIC),
tforms.ToTensor(),
tforms.Lambda(lambda x: torch.cat((x, x, x), dim=0)),
tforms.Normalize(mean=[0.5] * 3, std=[0.5] * 3)]
)
train_loader = torch.utils.data.DataLoader(
dataset=dsets.FashionMNIST('data/FashionMNIST', train=True, download=True, transform=transform),
batch_size=batch_size,
shuffle=True,
num_workers=4,
pin_memory=use_gpu,
drop_last=True
)
# model
D = model.DiscriminatorInfoGAN1(x_dim=3, c_dim=c_dim, norm=norm, weight_norm=weight_norm).to(device)
G = model.GeneratorInfoGAN1(z_dim=z_dim, c_dim=c_dim).to(device)
# gan loss function
d_loss_fn, g_loss_fn = model.get_losses_fn(loss_mode)
# optimizer
d_optimizer = torch.optim.Adam(D.parameters(), lr=d_learning_rate, betas=(0.5, 0.999))
g_optimizer = torch.optim.Adam(G.parameters(), lr=g_learning_rate, betas=(0.5, 0.999))
# ==============================================================================
# = train =
# ==============================================================================
# load checkpoint
ckpt_dir = './output/%s/checkpoints' % experiment_name
pylib.mkdir(ckpt_dir)
try:
ckpt = torchlib.load_checkpoint(ckpt_dir)
start_ep = ckpt['epoch']
D.load_state_dict(ckpt['D'])
G.load_state_dict(ckpt['G'])
d_optimizer.load_state_dict(ckpt['d_optimizer'])
g_optimizer.load_state_dict(ckpt['g_optimizer'])
except:
print(' [*] No checkpoint!')
start_ep = 0
# writer
writer = tensorboardX.SummaryWriter('./output/%s/summaries' % experiment_name)
# run
z_sample = torch.randn(c_dim * 10, z_dim).to(device)
c_sample = torch.tensor(np.concatenate([np.eye(c_dim)] * 10), dtype=z_sample.dtype).to(device)
for ep in range(start_ep, epoch):
for i, (x, _) in enumerate(train_loader):
step = ep * len(train_loader) + i + 1
D.train()
G.train()
# train D and Q
x = x.to(device)
c_dense = torch.tensor(np.random.randint(c_dim, size=[batch_size])).to(device)
z = torch.randn(batch_size, z_dim).to(device)
c = torch.tensor(np.eye(c_dim)[c_dense.cpu().numpy()], dtype=z.dtype).to(device)
x_f = G(z, c).detach()
x_gan_logit, _ = D(x)
x_f_gan_logit, x_f_c_logit = D(x_f)
d_x_gan_loss, d_x_f_gan_loss = d_loss_fn(x_gan_logit, x_f_gan_logit)
d_x_f_c_logit = torch.nn.functional.cross_entropy(x_f_c_logit, c_dense)
gp = model.gradient_penalty(D, x, x_f, mode=gp_mode)
d_loss = d_x_gan_loss + d_x_f_gan_loss + gp * gp_coef + d_x_f_c_logit
D.zero_grad()
d_loss.backward()
d_optimizer.step()
writer.add_scalar('D/d_gan_loss', (d_x_gan_loss + d_x_f_gan_loss).data.cpu().numpy(), global_step=step)
writer.add_scalar('D/d_q_loss', d_x_f_c_logit.data.cpu().numpy(), global_step=step)
writer.add_scalar('D/gp', gp.data.cpu().numpy(), global_step=step)
# train G
if step % n_d == 0:
c_dense = torch.tensor(np.random.randint(c_dim, size=[batch_size])).to(device)
c = torch.tensor(np.eye(c_dim)[c_dense.cpu().numpy()], dtype=z.dtype).to(device)
z = torch.randn(batch_size, z_dim).to(device)
x_f = G(z, c)
x_f_gan_logit, x_f_c_logit = D(x_f)
g_gan_loss = g_loss_fn(x_f_gan_logit)
d_x_f_c_logit = torch.nn.functional.cross_entropy(x_f_c_logit, c_dense)
g_loss = g_gan_loss + d_x_f_c_logit
G.zero_grad()
g_loss.backward()
g_optimizer.step()
writer.add_scalar('G/g_gan_loss', g_gan_loss.data.cpu().numpy(), global_step=step)
writer.add_scalar('G/g_q_loss', d_x_f_c_logit.data.cpu().numpy(), global_step=step)
# display
if step % 1 == 0:
print("Epoch: (%3d) (%5d/%5d)" % (ep, i + 1, len(train_loader)))
# sample
if step % 100 == 0:
G.eval()
x_f_sample = (G(z_sample, c_sample) + 1) / 2.0
save_dir = './output/%s/sample_training' % experiment_name
pylib.mkdir(save_dir)
torchvision.utils.save_image(x_f_sample, '%s/Epoch_(%d)_(%dof%d).jpg' % (save_dir, ep, i + 1, len(train_loader)), nrow=10)
torchlib.save_checkpoint({'epoch': ep + 1,
'D': D.state_dict(),
'G': G.state_dict(),
'd_optimizer': d_optimizer.state_dict(),
'g_optimizer': g_optimizer.state_dict()},
'%s/Epoch_(%d).ckpt' % (ckpt_dir, ep + 1),
max_keep=2)
| 38.357143
| 134
| 0.600958
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import model
import numpy as np
import pylib
import PIL.Image as Image
import tensorboardX
import torch
import torchvision
import torchvision.datasets as dsets
import torchvision.transforms as tforms
import torchlib
parser = argparse.ArgumentParser()
parser.add_argument('--z_dim', dest='z_dim', type=int, default=100)
parser.add_argument('--epoch', dest='epoch', type=int, default=50)
parser.add_argument('--batch_size', dest='batch_size', type=int, default=64)
parser.add_argument('--d_learning_rate', dest='d_learning_rate', type=float, default=0.0002)
parser.add_argument('--g_learning_rate', dest='g_learning_rate', type=float, default=0.001)
parser.add_argument('--n_d', dest='n_d', type=int, help='# of d updates per g update', default=1)
parser.add_argument('--loss_mode', dest='loss_mode', choices=['gan', 'lsgan', 'wgan', 'hinge_v1', 'hinge_v2'], default='hinge_v2')
parser.add_argument('--gp_mode', dest='gp_mode', choices=['none', 'dragan', 'wgan-gp'], default='none')
parser.add_argument('--gp_coef', dest='gp_coef', type=float, default=1.0)
parser.add_argument('--norm', dest='norm', choices=['none', 'batch_norm', 'instance_norm'], default='none')
parser.add_argument('--weight_norm', dest='weight_norm', choices=['none', 'spectral_norm', 'weight_norm'], default='spectral_norm')
parser.add_argument('--experiment_name', dest='experiment_name', default='InfoGAN1_default')
args = parser.parse_args()
z_dim = args.z_dim
epoch = args.epoch
batch_size = args.batch_size
d_learning_rate = args.d_learning_rate
g_learning_rate = args.g_learning_rate
n_d = args.n_d
loss_mode = args.loss_mode
gp_mode = args.gp_mode
gp_coef = args.gp_coef
norm = args.norm
weight_norm = args.weight_norm
experiment_name = args.experiment_name
pylib.mkdir('./output/%s' % experiment_name)
with open('./output/%s/setting.txt' % experiment_name, 'w') as f:
f.write(json.dumps(vars(args), indent=4, separators=(',', ':')))
use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")
c_dim = 10
transform = tforms.Compose(
[tforms.Scale(size=(32, 32), interpolation=Image.BICUBIC),
tforms.ToTensor(),
tforms.Lambda(lambda x: torch.cat((x, x, x), dim=0)),
tforms.Normalize(mean=[0.5] * 3, std=[0.5] * 3)]
)
train_loader = torch.utils.data.DataLoader(
dataset=dsets.FashionMNIST('data/FashionMNIST', train=True, download=True, transform=transform),
batch_size=batch_size,
shuffle=True,
num_workers=4,
pin_memory=use_gpu,
drop_last=True
)
D = model.DiscriminatorInfoGAN1(x_dim=3, c_dim=c_dim, norm=norm, weight_norm=weight_norm).to(device)
G = model.GeneratorInfoGAN1(z_dim=z_dim, c_dim=c_dim).to(device)
d_loss_fn, g_loss_fn = model.get_losses_fn(loss_mode)
d_optimizer = torch.optim.Adam(D.parameters(), lr=d_learning_rate, betas=(0.5, 0.999))
g_optimizer = torch.optim.Adam(G.parameters(), lr=g_learning_rate, betas=(0.5, 0.999))
ckpt_dir = './output/%s/checkpoints' % experiment_name
pylib.mkdir(ckpt_dir)
try:
ckpt = torchlib.load_checkpoint(ckpt_dir)
start_ep = ckpt['epoch']
D.load_state_dict(ckpt['D'])
G.load_state_dict(ckpt['G'])
d_optimizer.load_state_dict(ckpt['d_optimizer'])
g_optimizer.load_state_dict(ckpt['g_optimizer'])
except:
print(' [*] No checkpoint!')
start_ep = 0
writer = tensorboardX.SummaryWriter('./output/%s/summaries' % experiment_name)
z_sample = torch.randn(c_dim * 10, z_dim).to(device)
c_sample = torch.tensor(np.concatenate([np.eye(c_dim)] * 10), dtype=z_sample.dtype).to(device)
for ep in range(start_ep, epoch):
for i, (x, _) in enumerate(train_loader):
step = ep * len(train_loader) + i + 1
D.train()
G.train()
x = x.to(device)
c_dense = torch.tensor(np.random.randint(c_dim, size=[batch_size])).to(device)
z = torch.randn(batch_size, z_dim).to(device)
c = torch.tensor(np.eye(c_dim)[c_dense.cpu().numpy()], dtype=z.dtype).to(device)
x_f = G(z, c).detach()
x_gan_logit, _ = D(x)
x_f_gan_logit, x_f_c_logit = D(x_f)
d_x_gan_loss, d_x_f_gan_loss = d_loss_fn(x_gan_logit, x_f_gan_logit)
d_x_f_c_logit = torch.nn.functional.cross_entropy(x_f_c_logit, c_dense)
gp = model.gradient_penalty(D, x, x_f, mode=gp_mode)
d_loss = d_x_gan_loss + d_x_f_gan_loss + gp * gp_coef + d_x_f_c_logit
D.zero_grad()
d_loss.backward()
d_optimizer.step()
writer.add_scalar('D/d_gan_loss', (d_x_gan_loss + d_x_f_gan_loss).data.cpu().numpy(), global_step=step)
writer.add_scalar('D/d_q_loss', d_x_f_c_logit.data.cpu().numpy(), global_step=step)
writer.add_scalar('D/gp', gp.data.cpu().numpy(), global_step=step)
if step % n_d == 0:
c_dense = torch.tensor(np.random.randint(c_dim, size=[batch_size])).to(device)
c = torch.tensor(np.eye(c_dim)[c_dense.cpu().numpy()], dtype=z.dtype).to(device)
z = torch.randn(batch_size, z_dim).to(device)
x_f = G(z, c)
x_f_gan_logit, x_f_c_logit = D(x_f)
g_gan_loss = g_loss_fn(x_f_gan_logit)
d_x_f_c_logit = torch.nn.functional.cross_entropy(x_f_c_logit, c_dense)
g_loss = g_gan_loss + d_x_f_c_logit
G.zero_grad()
g_loss.backward()
g_optimizer.step()
writer.add_scalar('G/g_gan_loss', g_gan_loss.data.cpu().numpy(), global_step=step)
writer.add_scalar('G/g_q_loss', d_x_f_c_logit.data.cpu().numpy(), global_step=step)
if step % 1 == 0:
print("Epoch: (%3d) (%5d/%5d)" % (ep, i + 1, len(train_loader)))
if step % 100 == 0:
G.eval()
x_f_sample = (G(z_sample, c_sample) + 1) / 2.0
save_dir = './output/%s/sample_training' % experiment_name
pylib.mkdir(save_dir)
torchvision.utils.save_image(x_f_sample, '%s/Epoch_(%d)_(%dof%d).jpg' % (save_dir, ep, i + 1, len(train_loader)), nrow=10)
torchlib.save_checkpoint({'epoch': ep + 1,
'D': D.state_dict(),
'G': G.state_dict(),
'd_optimizer': d_optimizer.state_dict(),
'g_optimizer': g_optimizer.state_dict()},
'%s/Epoch_(%d).ckpt' % (ckpt_dir, ep + 1),
max_keep=2)
| true
| true
|
1c4abab21c61cff4175293662effd5d6d19d1025
| 715
|
py
|
Python
|
test.py
|
Yvictor/simdjson
|
2e43ea714a75def3b55f0d6033acb36e31c6497b
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
Yvictor/simdjson
|
2e43ea714a75def3b55f0d6033acb36e31c6497b
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
Yvictor/simdjson
|
2e43ea714a75def3b55f0d6033acb36e31c6497b
|
[
"Apache-2.0"
] | null | null | null |
import sjson
import json
import pytest
test_case = [
'{"a": 1}',
'{"a": 1.1}',
'{"a": null}',
'{"a": "string test"}',
'{"a": true}',
'{"a": false}',
'{"a": 1, "b": 2}',
'{"a": 1, "b": 2, "c": 3}',
'{"a": 1, "b": 2, "c": 3, "d": 1.1}',
'{"a": [1, 1.1], "b": 2}',
'{"a": [1, 1.1], "b": {"nest": "a"} }',
'{"a": [1, 1.1], "b": {"nest": [1, 3, 5]} }',
'{"a": [1, 1.1], "b": {"nest": {"d": 1} } }',
'{"a": [1, 1.1], "b": {"nest": {"d": [1, 3, 2.1]} } }',
]
@pytest.mark.parametrize('json_string', test_case)
def test_json_loads(json_string):
assert sjson.loads(json_string) == json.loads(json_string)
if __name__ == "__main__":
pytest.main([__file__])
| 24.655172
| 62
| 0.418182
|
import sjson
import json
import pytest
test_case = [
'{"a": 1}',
'{"a": 1.1}',
'{"a": null}',
'{"a": "string test"}',
'{"a": true}',
'{"a": false}',
'{"a": 1, "b": 2}',
'{"a": 1, "b": 2, "c": 3}',
'{"a": 1, "b": 2, "c": 3, "d": 1.1}',
'{"a": [1, 1.1], "b": 2}',
'{"a": [1, 1.1], "b": {"nest": "a"} }',
'{"a": [1, 1.1], "b": {"nest": [1, 3, 5]} }',
'{"a": [1, 1.1], "b": {"nest": {"d": 1} } }',
'{"a": [1, 1.1], "b": {"nest": {"d": [1, 3, 2.1]} } }',
]
@pytest.mark.parametrize('json_string', test_case)
def test_json_loads(json_string):
assert sjson.loads(json_string) == json.loads(json_string)
if __name__ == "__main__":
pytest.main([__file__])
| true
| true
|
1c4abbc45219aa0b02fb8ac79f287143752f95fa
| 2,004
|
py
|
Python
|
netket/hilbert/random/particle.py
|
VolodyaCO/netket
|
629e885212d981d7748d155310abca4a1f9d5481
|
[
"Apache-2.0"
] | null | null | null |
netket/hilbert/random/particle.py
|
VolodyaCO/netket
|
629e885212d981d7748d155310abca4a1f9d5481
|
[
"Apache-2.0"
] | 26
|
2021-08-06T15:27:57.000Z
|
2022-03-30T16:55:18.000Z
|
netket/hilbert/random/particle.py
|
VolodyaCO/netket
|
629e885212d981d7748d155310abca4a1f9d5481
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax
from jax import numpy as jnp
from netket.hilbert import Particle
from netket.utils.dispatch import dispatch
@dispatch
def random_state(hilb: Particle, key, batches: int, *, dtype):
"""Positions particles w.r.t. normal distribution,
if no periodic boundary conditions are applied
in a spatial dimension. Otherwise the particles are
positioned evenly along the box from 0 to L, with Gaussian noise
of certain width."""
pbc = jnp.array(hilb.n_particles * hilb.pbc)
boundary = jnp.tile(pbc, (batches, 1))
Ls = jnp.array(hilb.n_particles * hilb.extent)
modulus = jnp.where(jnp.equal(pbc, False), jnp.inf, Ls)
gaussian = jax.random.normal(key, shape=(batches, hilb.size))
width = jnp.min(modulus) / (4.0 * hilb.n_particles)
# The width gives the noise level. In the periodic case the
# particles are evenly distributed between 0 and min(L). The
# distance between the particles coordinates is therefore given by
# min(L) / hilb.N. To avoid particles to have coincident
# positions the noise level should be smaller than half this distance.
# We choose width = min(L) / (4*hilb.N)
noise = gaussian * width
uniform = jnp.tile(jnp.linspace(0.0, jnp.min(modulus), hilb.size), (batches, 1))
rs = jnp.where(jnp.equal(boundary, False), gaussian, (uniform + noise) % modulus)
return jnp.asarray(rs, dtype=dtype)
| 41.75
| 85
| 0.722056
|
import jax
from jax import numpy as jnp
from netket.hilbert import Particle
from netket.utils.dispatch import dispatch
@dispatch
def random_state(hilb: Particle, key, batches: int, *, dtype):
pbc = jnp.array(hilb.n_particles * hilb.pbc)
boundary = jnp.tile(pbc, (batches, 1))
Ls = jnp.array(hilb.n_particles * hilb.extent)
modulus = jnp.where(jnp.equal(pbc, False), jnp.inf, Ls)
gaussian = jax.random.normal(key, shape=(batches, hilb.size))
width = jnp.min(modulus) / (4.0 * hilb.n_particles)
noise = gaussian * width
uniform = jnp.tile(jnp.linspace(0.0, jnp.min(modulus), hilb.size), (batches, 1))
rs = jnp.where(jnp.equal(boundary, False), gaussian, (uniform + noise) % modulus)
return jnp.asarray(rs, dtype=dtype)
| true
| true
|
1c4abfe636b358eb142c79f641327426a2e082d1
| 1,273
|
py
|
Python
|
pychron/furnace/firmware/__init__.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/furnace/firmware/__init__.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/furnace/firmware/__init__.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2016 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
__version__ = "1.0"
PARAMETER_REGISTRY = {
"furnace_env_humidity": "001",
"furnace_env_temperature": "002",
"furnace_setpoint": "003",
"furnace_process_value": "004",
"feeder_position": "021",
"funnel_position": "031",
"switch_status": "041",
}
# ============= EOF =============================================
| 38.575758
| 81
| 0.533386
|
__version__ = "1.0"
PARAMETER_REGISTRY = {
"furnace_env_humidity": "001",
"furnace_env_temperature": "002",
"furnace_setpoint": "003",
"furnace_process_value": "004",
"feeder_position": "021",
"funnel_position": "031",
"switch_status": "041",
}
| true
| true
|
1c4ac00f27d955404c36f607e418e6eb54dace2d
| 199
|
py
|
Python
|
ui/mock_keyboard.py
|
amirhertz/SPAGHETTI
|
660c4a565846090f73c3cadc3619255bca50d14f
|
[
"MIT"
] | 10
|
2022-02-03T06:19:13.000Z
|
2022-03-29T12:32:19.000Z
|
ui/mock_keyboard.py
|
amirhertz/SPAGHETTI
|
660c4a565846090f73c3cadc3619255bca50d14f
|
[
"MIT"
] | null | null | null |
ui/mock_keyboard.py
|
amirhertz/SPAGHETTI
|
660c4a565846090f73c3cadc3619255bca50d14f
|
[
"MIT"
] | null | null | null |
class Key:
ctrl_l = 'control_l'
class Controller:
@staticmethod
def press(key: str) -> str:
return key
@staticmethod
def release(key: str) -> str:
return key
| 13.266667
| 33
| 0.582915
|
class Key:
ctrl_l = 'control_l'
class Controller:
@staticmethod
def press(key: str) -> str:
return key
@staticmethod
def release(key: str) -> str:
return key
| true
| true
|
1c4ac0d7a55d4170cae7b7ec8b9808ffa64edcae
| 910
|
py
|
Python
|
dsvfile/Models/FactorySystem/InserterComponent.py
|
phoenixx-666/dsvread
|
8a073c12343b2f0d34f9b728282dfefe10999f24
|
[
"MIT"
] | 2
|
2021-03-01T19:57:20.000Z
|
2021-08-02T20:54:48.000Z
|
dsvfile/Models/FactorySystem/InserterComponent.py
|
phoenixx-666/dsvread
|
8a073c12343b2f0d34f9b728282dfefe10999f24
|
[
"MIT"
] | null | null | null |
dsvfile/Models/FactorySystem/InserterComponent.py
|
phoenixx-666/dsvread
|
8a073c12343b2f0d34f9b728282dfefe10999f24
|
[
"MIT"
] | null | null | null |
from ...Fields import Int16Field, FloatField, BoolField
from ...Fields.Enums import EInserterStage, EItem
from . import Model, Int32Field
class InserterComponent(Model):
version = Int32Field()
id = Int32Field()
entityId = Int32Field()
pcId = Int32Field()
stage = EInserterStage()
speed = Int32Field()
time = Int32Field()
stt = Int32Field()
delay = Int32Field()
pickTarget = Int32Field()
insertTarget = Int32Field()
careNeeds = BoolField()
canStack = BoolField()
pickOffset = Int16Field()
insertOffset = Int16Field()
filter = Int32Field()
itemId = EItem()
stackCount = Int32Field()
stackSize = Int32Field()
pos2_x = FloatField()
pos2_y = FloatField()
pos2_z = FloatField()
rot2_x = FloatField()
rot2_y = FloatField()
rot2_z = FloatField()
rot2_w = FloatField()
t1 = Int16Field()
t2 = Int16Field()
| 26
| 55
| 0.653846
|
from ...Fields import Int16Field, FloatField, BoolField
from ...Fields.Enums import EInserterStage, EItem
from . import Model, Int32Field
class InserterComponent(Model):
version = Int32Field()
id = Int32Field()
entityId = Int32Field()
pcId = Int32Field()
stage = EInserterStage()
speed = Int32Field()
time = Int32Field()
stt = Int32Field()
delay = Int32Field()
pickTarget = Int32Field()
insertTarget = Int32Field()
careNeeds = BoolField()
canStack = BoolField()
pickOffset = Int16Field()
insertOffset = Int16Field()
filter = Int32Field()
itemId = EItem()
stackCount = Int32Field()
stackSize = Int32Field()
pos2_x = FloatField()
pos2_y = FloatField()
pos2_z = FloatField()
rot2_x = FloatField()
rot2_y = FloatField()
rot2_z = FloatField()
rot2_w = FloatField()
t1 = Int16Field()
t2 = Int16Field()
| true
| true
|
1c4ac1528db9a11fa760116b25f5da776d7843b1
| 7,422
|
py
|
Python
|
ncappzoo/apps/object-detector/object-detector.py
|
yockgen/movidius
|
cc32f1951a4d00d2250bb0d2b9000c5f2435b41a
|
[
"MIT"
] | null | null | null |
ncappzoo/apps/object-detector/object-detector.py
|
yockgen/movidius
|
cc32f1951a4d00d2250bb0d2b9000c5f2435b41a
|
[
"MIT"
] | null | null | null |
ncappzoo/apps/object-detector/object-detector.py
|
yockgen/movidius
|
cc32f1951a4d00d2250bb0d2b9000c5f2435b41a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# ****************************************************************************
# Copyright(c) 2017 Intel Corporation.
# License: MIT See LICENSE file in root directory.
# ****************************************************************************
# How to run Single Shot Multibox Detectors (SSD)
# on Intel® Movidius™ Neural Compute Stick (NCS)
import os
import sys
import numpy as np
import ntpath
import argparse
import skimage.io
import skimage.transform
import mvnc.mvncapi as mvnc
from utils import visualize_output
from utils import deserialize_output
# Detection threshold: Minimum confidance to tag as valid detection
CONFIDANCE_THRESHOLD = 0.60 # 60% confidant
# Variable to store commandline arguments
ARGS = None
# ---- Step 1: Open the enumerated device and get a handle to it -------------
def open_ncs_device():
# Look for enumerated NCS device(s); quit program if none found.
devices = mvnc.EnumerateDevices()
if len( devices ) == 0:
print( "No devices found" )
quit()
# Get a handle to the first enumerated device and open it
device = mvnc.Device( devices[0] )
device.OpenDevice()
return device
# ---- Step 2: Load a graph file onto the NCS device -------------------------
def load_graph( device ):
# Read the graph file into a buffer
with open( ARGS.graph, mode='rb' ) as f:
blob = f.read()
# Load the graph buffer into the NCS
graph = device.AllocateGraph( blob )
return graph
# ---- Step 3: Pre-process the images ----------------------------------------
def pre_process_image( img_draw ):
# Resize image [Image size is defined during training]
img = skimage.transform.resize( img_draw, ARGS.dim, preserve_range=True )
# Convert RGB to BGR [skimage reads image in RGB, some networks may need BGR]
if( ARGS.colormode == "bgr" ):
img = img[:, :, ::-1]
# Mean subtraction & scaling [A common technique used to center the data]
img = img.astype( np.float16 )
img = ( img - np.float16( ARGS.mean ) ) * ARGS.scale
return img
# ---- Step 4: Read & print inference results from the NCS -------------------
def infer_image( graph, img ):
# Read original image, so we can perform visualization ops on it
img_draw = skimage.io.imread( ARGS.image )
# The first inference takes an additional ~20ms due to memory
# initializations, so we make a 'dummy forward pass'.
graph.LoadTensor( img, 'user object' )
output, userobj = graph.GetResult()
# Load the image as a half-precision floating point array
graph.LoadTensor( img, 'user object' )
# Get the results from NCS
output, userobj = graph.GetResult()
# Get execution time
inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )
# Deserialize the output into a python dictionary
if ARGS.network == 'SSD':
output_dict = deserialize_output.ssd( output, CONFIDANCE_THRESHOLD, img_draw.shape )
elif ARGS.network == 'TinyYolo':
output_dict = deserialize_output.tinyyolo( output, CONFIDANCE_THRESHOLD, img_draw.shape )
# Print the results
print( "\n==============================================================" )
print( "I found these objects in", ntpath.basename( ARGS.image ) )
print( "Execution time: " + str( np.sum( inference_time ) ) + "ms" )
print( "--------------------------------------------------------------" )
for i in range( 0, output_dict['num_detections'] ):
print( "%3.1f%%\t" % output_dict['detection_scores_' + str(i)]
+ labels[ int(output_dict['detection_classes_' + str(i)]) ]
+ ": Top Left: " + str( output_dict['detection_boxes_' + str(i)][0] )
+ " Bottom Right: " + str( output_dict['detection_boxes_' + str(i)][1] ) )
# Draw bounding boxes around valid detections
(y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
(y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]
# Prep string to overlay on the image
display_str = (
labels[output_dict.get('detection_classes_' + str(i))]
+ ": "
+ str( output_dict.get('detection_scores_' + str(i) ) )
+ "%" )
img_draw = visualize_output.draw_bounding_box(
y1, x1, y2, x2,
img_draw,
thickness=4,
color=(255, 255, 0),
display_str=display_str )
print( "==============================================================\n" )
# If a display is available, show the image on which inference was performed
if 'DISPLAY' in os.environ:
skimage.io.imshow( img_draw )
skimage.io.show()
# ---- Step 5: Unload the graph and close the device -------------------------
def close_ncs_device( device, graph ):
graph.DeallocateGraph()
device.CloseDevice()
# ---- Main function (entry point for this script ) --------------------------
def main():
device = open_ncs_device()
graph = load_graph( device )
img_draw = skimage.io.imread( ARGS.image )
img = pre_process_image( img_draw )
infer_image( graph, img )
close_ncs_device( device, graph )
# ---- Define 'main' function as the entry point for this script -------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Object detection using SSD on \
Intel® Movidius™ Neural Compute Stick." )
parser.add_argument( '-n', '--network', type=str,
default='SSD',
help="network name: SSD or TinyYolo." )
parser.add_argument( '-g', '--graph', type=str,
default='/home/pi/movidius/ncappzoo/caffe/SSD_MobileNet/graph',
help="Absolute path to the neural network graph file." )
parser.add_argument( '-i', '--image', type=str,
default='../../data/images/nps_chair.png',
help="Absolute path to the image that needs to be inferred." )
parser.add_argument( '-l', '--labels', type=str,
default='/home/pi/movidius/ncappzoo/caffe/SSD_MobileNet/labels.txt',
help="Absolute path to labels file." )
parser.add_argument( '-M', '--mean', type=float,
nargs='+',
default=[127.5, 127.5, 127.5],
help="',' delimited floating point values for image mean." )
parser.add_argument( '-S', '--scale', type=float,
default=0.00789,
help="Absolute path to labels file." )
parser.add_argument( '-D', '--dim', type=int,
nargs='+',
default=[300, 300],
help="Image dimensions. ex. -D 224 224" )
parser.add_argument( '-c', '--colormode', type=str,
default="bgr",
help="RGB vs BGR color sequence. This is network dependent." )
ARGS = parser.parse_args()
# Load the labels file
labels =[ line.rstrip('\n') for line in
open( ARGS.labels ) if line != 'classes\n']
main()
# ==== End of file ===========================================================
| 35.511962
| 97
| 0.551603
|
import os
import sys
import numpy as np
import ntpath
import argparse
import skimage.io
import skimage.transform
import mvnc.mvncapi as mvnc
from utils import visualize_output
from utils import deserialize_output
CONFIDANCE_THRESHOLD = 0.60
ARGS = None
def open_ncs_device():
devices = mvnc.EnumerateDevices()
if len( devices ) == 0:
print( "No devices found" )
quit()
device = mvnc.Device( devices[0] )
device.OpenDevice()
return device
def load_graph( device ):
with open( ARGS.graph, mode='rb' ) as f:
blob = f.read()
graph = device.AllocateGraph( blob )
return graph
def pre_process_image( img_draw ):
img = skimage.transform.resize( img_draw, ARGS.dim, preserve_range=True )
if( ARGS.colormode == "bgr" ):
img = img[:, :, ::-1]
img = img.astype( np.float16 )
img = ( img - np.float16( ARGS.mean ) ) * ARGS.scale
return img
def infer_image( graph, img ):
img_draw = skimage.io.imread( ARGS.image )
graph.LoadTensor( img, 'user object' )
output, userobj = graph.GetResult()
graph.LoadTensor( img, 'user object' )
output, userobj = graph.GetResult()
inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )
if ARGS.network == 'SSD':
output_dict = deserialize_output.ssd( output, CONFIDANCE_THRESHOLD, img_draw.shape )
elif ARGS.network == 'TinyYolo':
output_dict = deserialize_output.tinyyolo( output, CONFIDANCE_THRESHOLD, img_draw.shape )
print( "\n==============================================================" )
print( "I found these objects in", ntpath.basename( ARGS.image ) )
print( "Execution time: " + str( np.sum( inference_time ) ) + "ms" )
print( "--------------------------------------------------------------" )
for i in range( 0, output_dict['num_detections'] ):
print( "%3.1f%%\t" % output_dict['detection_scores_' + str(i)]
+ labels[ int(output_dict['detection_classes_' + str(i)]) ]
+ ": Top Left: " + str( output_dict['detection_boxes_' + str(i)][0] )
+ " Bottom Right: " + str( output_dict['detection_boxes_' + str(i)][1] ) )
(y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]
(y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]
display_str = (
labels[output_dict.get('detection_classes_' + str(i))]
+ ": "
+ str( output_dict.get('detection_scores_' + str(i) ) )
+ "%" )
img_draw = visualize_output.draw_bounding_box(
y1, x1, y2, x2,
img_draw,
thickness=4,
color=(255, 255, 0),
display_str=display_str )
print( "==============================================================\n" )
if 'DISPLAY' in os.environ:
skimage.io.imshow( img_draw )
skimage.io.show()
def close_ncs_device( device, graph ):
graph.DeallocateGraph()
device.CloseDevice()
def main():
device = open_ncs_device()
graph = load_graph( device )
img_draw = skimage.io.imread( ARGS.image )
img = pre_process_image( img_draw )
infer_image( graph, img )
close_ncs_device( device, graph )
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Object detection using SSD on \
Intel® Movidius™ Neural Compute Stick." )
parser.add_argument( '-n', '--network', type=str,
default='SSD',
help="network name: SSD or TinyYolo." )
parser.add_argument( '-g', '--graph', type=str,
default='/home/pi/movidius/ncappzoo/caffe/SSD_MobileNet/graph',
help="Absolute path to the neural network graph file." )
parser.add_argument( '-i', '--image', type=str,
default='../../data/images/nps_chair.png',
help="Absolute path to the image that needs to be inferred." )
parser.add_argument( '-l', '--labels', type=str,
default='/home/pi/movidius/ncappzoo/caffe/SSD_MobileNet/labels.txt',
help="Absolute path to labels file." )
parser.add_argument( '-M', '--mean', type=float,
nargs='+',
default=[127.5, 127.5, 127.5],
help="',' delimited floating point values for image mean." )
parser.add_argument( '-S', '--scale', type=float,
default=0.00789,
help="Absolute path to labels file." )
parser.add_argument( '-D', '--dim', type=int,
nargs='+',
default=[300, 300],
help="Image dimensions. ex. -D 224 224" )
parser.add_argument( '-c', '--colormode', type=str,
default="bgr",
help="RGB vs BGR color sequence. This is network dependent." )
ARGS = parser.parse_args()
labels =[ line.rstrip('\n') for line in
open( ARGS.labels ) if line != 'classes\n']
main()
| true
| true
|
1c4ac1901b03408ddb92516d19c818932cbc8832
| 2,161
|
py
|
Python
|
app/auth/forms.py
|
pointerboy/ModHub
|
5b2bdf31bdf409c677e1009f879794f91e636a7b
|
[
"MIT"
] | null | null | null |
app/auth/forms.py
|
pointerboy/ModHub
|
5b2bdf31bdf409c677e1009f879794f91e636a7b
|
[
"MIT"
] | 1
|
2022-01-13T02:52:49.000Z
|
2022-01-13T02:52:49.000Z
|
app/auth/forms.py
|
pointerboy/ModHub
|
5b2bdf31bdf409c677e1009f879794f91e636a7b
|
[
"MIT"
] | null | null | null |
from flask_babel import _, lazy_gettext as _l
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length
from wtforms_validators import AlphaNumeric
from app.models import User
class LoginForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
remember_me = BooleanField(_l('Remember Me'))
submit = SubmitField(_l('Sign In'), render_kw={'class': "btn btn-lg btn-primary btn-block btn-login text-uppercase font-weight-bold mb-2"})
class RegistrationForm(FlaskForm):
username = StringField(_l('Username'), validators=[Length(min=4), DataRequired(),
AlphaNumeric()])
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
password = PasswordField(_l('Password'), validators=[Length(min=4), DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(),
EqualTo('password')])
submit = SubmitField(_l('Make an account!'))
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError(_('Please use a different username.'))
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError(_('Please use a different email address.'))
class ResetPasswordRequestForm(FlaskForm):
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
submit = SubmitField(_l('Request Password Reset'))
class ResetPasswordForm(FlaskForm):
password = PasswordField(_l('Password'), validators=[DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(),
EqualTo('password')])
submit = SubmitField(_l('Request Password Reset'))
| 43.22
| 143
| 0.672837
|
from flask_babel import _, lazy_gettext as _l
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length
from wtforms_validators import AlphaNumeric
from app.models import User
class LoginForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
remember_me = BooleanField(_l('Remember Me'))
submit = SubmitField(_l('Sign In'), render_kw={'class': "btn btn-lg btn-primary btn-block btn-login text-uppercase font-weight-bold mb-2"})
class RegistrationForm(FlaskForm):
username = StringField(_l('Username'), validators=[Length(min=4), DataRequired(),
AlphaNumeric()])
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
password = PasswordField(_l('Password'), validators=[Length(min=4), DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(),
EqualTo('password')])
submit = SubmitField(_l('Make an account!'))
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError(_('Please use a different username.'))
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError(_('Please use a different email address.'))
class ResetPasswordRequestForm(FlaskForm):
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
submit = SubmitField(_l('Request Password Reset'))
class ResetPasswordForm(FlaskForm):
password = PasswordField(_l('Password'), validators=[DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(),
EqualTo('password')])
submit = SubmitField(_l('Request Password Reset'))
| true
| true
|
1c4ac1e423458e0a0a187f8698a9e64231b6a196
| 3,067
|
py
|
Python
|
tube/tests/test_tflAPI.py
|
adamgilman/tube-python
|
3d94e79f7d367eed95ed68b53d0ab13a36cc3219
|
[
"BSD-3-Clause"
] | 5
|
2017-01-26T00:06:08.000Z
|
2020-06-03T16:07:09.000Z
|
tube/tests/test_tflAPI.py
|
adamgilman/tube-python
|
3d94e79f7d367eed95ed68b53d0ab13a36cc3219
|
[
"BSD-3-Clause"
] | null | null | null |
tube/tests/test_tflAPI.py
|
adamgilman/tube-python
|
3d94e79f7d367eed95ed68b53d0ab13a36cc3219
|
[
"BSD-3-Clause"
] | 1
|
2021-11-22T16:23:14.000Z
|
2021-11-22T16:23:14.000Z
|
import unittest
from tube.tflAPI import TFLapi
import vcr
my_vcr = vcr.VCR(
serializer = 'json',
cassette_library_dir = 'tube/tests/fixtures/cassettes',
record_mode = 'once',
match_on = ['uri', 'method'],
)
import logging
logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from vcrpy
vcr_log = logging.getLogger("vcr")
vcr_log.setLevel(logging.ERROR)
class TestTFLapiByURL(unittest.TestCase):
def setUp(self):
self.api = TFLapi()
def test_VerifyCorrectURLFetched(self):
with my_vcr.use_cassette('Detail-OXC-B.json'):
detail = self.api.getDetailed(station="OXC", line="B")
self.assertEqual(detail.station, "OXC")
self.assertEqual(detail.line, "B")
def test_VerifyPlatformsQuantities(self):
#camden town has 4 northern line platforms
with my_vcr.use_cassette('Detail-CTN-N.json'):
detail = self.api.getDetailed(station="CTN", line="N")
self.assertEqual(detail.station, "CTN")
self.assertIsInstance(detail.platforms, list)
self.assertEqual( len(detail.platforms), 4)
#oxford circus has 2 bakerloo platforms
with my_vcr.use_cassette('Detail-OXC-B.json'):
detail = self.api.getDetailed(station="OXC", line="B")
self.assertEqual(detail.station, "OXC")
self.assertIsInstance(detail.platforms, list)
self.assertEqual( len(detail.platforms), 2)
def test_VerifyPlatformsIdentified(self):
with my_vcr.use_cassette('Detail-CTN-N.json'):
detail = self.api.getDetailed(station="CTN", line="N")
self.assertEqual(detail.platforms[0].name, "Northbound - Platform 1")
self.assertEqual(detail.platforms[1].name, "Southbound - Platform 2")
self.assertEqual(detail.platforms[2].name, "Northbound - Platform 3")
self.assertEqual(detail.platforms[3].name, "Southbound - Platform 4")
def test_VerifyTrainsOnPlatforms(self):
#need testcase for no trains on platforms
with my_vcr.use_cassette('Detail-OXC-B(TrainCode).json'):
detail = self.api.getDetailed(station="OXC", line="B")
self.assertIsInstance(detail.platforms[0].trains, list)
self.assertEqual(detail.platforms[0].trains[0].leadingcar_id, "1031576")
self.assertEqual(detail.platforms[0].trains[0].set_number, "236")
self.assertEqual(detail.platforms[0].trains[0].trip_number, "12")
self.assertEqual(detail.platforms[0].trains[0].arrival_seconds, "24")
self.assertEqual(detail.platforms[0].trains[0].arrival_time, "0:30")
self.assertEqual(detail.platforms[0].trains[0].current_location, "Between Regents Park and Oxford Circus")
self.assertEqual(detail.platforms[0].trains[0].destination, "Elephant and Castle")
self.assertEqual(detail.platforms[0].trains[0].destination_code, "154")
self.assertEqual(detail.platforms[0].trains[0].platform_departure_time, "15:28:23")
self.assertEqual(detail.platforms[0].trains[0].interval_between_previous_train, "24")
self.assertEqual(detail.platforms[0].trains[0].departed_current_station, "0")
self.assertEqual(detail.platforms[0].trains[0].direction, "0")
self.assertEqual(detail.platforms[0].trains[0].track_code, "TB391B")
| 44.449275
| 109
| 0.750245
|
import unittest
from tube.tflAPI import TFLapi
import vcr
my_vcr = vcr.VCR(
serializer = 'json',
cassette_library_dir = 'tube/tests/fixtures/cassettes',
record_mode = 'once',
match_on = ['uri', 'method'],
)
import logging
logging.basicConfig()
vcr_log = logging.getLogger("vcr")
vcr_log.setLevel(logging.ERROR)
class TestTFLapiByURL(unittest.TestCase):
def setUp(self):
self.api = TFLapi()
def test_VerifyCorrectURLFetched(self):
with my_vcr.use_cassette('Detail-OXC-B.json'):
detail = self.api.getDetailed(station="OXC", line="B")
self.assertEqual(detail.station, "OXC")
self.assertEqual(detail.line, "B")
def test_VerifyPlatformsQuantities(self):
with my_vcr.use_cassette('Detail-CTN-N.json'):
detail = self.api.getDetailed(station="CTN", line="N")
self.assertEqual(detail.station, "CTN")
self.assertIsInstance(detail.platforms, list)
self.assertEqual( len(detail.platforms), 4)
with my_vcr.use_cassette('Detail-OXC-B.json'):
detail = self.api.getDetailed(station="OXC", line="B")
self.assertEqual(detail.station, "OXC")
self.assertIsInstance(detail.platforms, list)
self.assertEqual( len(detail.platforms), 2)
def test_VerifyPlatformsIdentified(self):
with my_vcr.use_cassette('Detail-CTN-N.json'):
detail = self.api.getDetailed(station="CTN", line="N")
self.assertEqual(detail.platforms[0].name, "Northbound - Platform 1")
self.assertEqual(detail.platforms[1].name, "Southbound - Platform 2")
self.assertEqual(detail.platforms[2].name, "Northbound - Platform 3")
self.assertEqual(detail.platforms[3].name, "Southbound - Platform 4")
def test_VerifyTrainsOnPlatforms(self):
with my_vcr.use_cassette('Detail-OXC-B(TrainCode).json'):
detail = self.api.getDetailed(station="OXC", line="B")
self.assertIsInstance(detail.platforms[0].trains, list)
self.assertEqual(detail.platforms[0].trains[0].leadingcar_id, "1031576")
self.assertEqual(detail.platforms[0].trains[0].set_number, "236")
self.assertEqual(detail.platforms[0].trains[0].trip_number, "12")
self.assertEqual(detail.platforms[0].trains[0].arrival_seconds, "24")
self.assertEqual(detail.platforms[0].trains[0].arrival_time, "0:30")
self.assertEqual(detail.platforms[0].trains[0].current_location, "Between Regents Park and Oxford Circus")
self.assertEqual(detail.platforms[0].trains[0].destination, "Elephant and Castle")
self.assertEqual(detail.platforms[0].trains[0].destination_code, "154")
self.assertEqual(detail.platforms[0].trains[0].platform_departure_time, "15:28:23")
self.assertEqual(detail.platforms[0].trains[0].interval_between_previous_train, "24")
self.assertEqual(detail.platforms[0].trains[0].departed_current_station, "0")
self.assertEqual(detail.platforms[0].trains[0].direction, "0")
self.assertEqual(detail.platforms[0].trains[0].track_code, "TB391B")
| true
| true
|
1c4ac264208d85dfc358c39ac2b842a93a43d268
| 2,008
|
py
|
Python
|
nova/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py
|
bopopescu/extra-specs-1
|
6a14d8d7807727023b4d589af47e8a9605f12db1
|
[
"Apache-2.0"
] | null | null | null |
nova/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py
|
bopopescu/extra-specs-1
|
6a14d8d7807727023b4d589af47e8a9605f12db1
|
[
"Apache-2.0"
] | 1
|
2020-07-24T14:14:13.000Z
|
2020-07-24T14:14:13.000Z
|
nova/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py
|
bopopescu/extra-specs-1
|
6a14d8d7807727023b4d589af47e8a9605f12db1
|
[
"Apache-2.0"
] | 1
|
2020-07-24T10:40:59.000Z
|
2020-07-24T10:40:59.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime, BigInteger
from sqlalchemy import MetaData, Integer, String, Table
from nova import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# add column:
bw_usage_cache = Table('bw_usage_cache', meta, autoload=True)
uuid = Column('uuid', String(36))
# clear the cache to get rid of entries with no uuid
migrate_engine.execute(bw_usage_cache.delete())
bw_usage_cache.create_column(uuid)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# drop column:
bw_usage_cache = Table('bw_usage_cache', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('mac', String(255)),
Column('uuid', String(36)),
Column('start_period', DateTime(timezone=False), nullable=False),
Column('last_refreshed', DateTime(timezone=False)),
Column('bw_in', BigInteger()),
Column('bw_out', BigInteger()),
useexisting=True)
bw_usage_cache.drop_column('uuid')
| 34.033898
| 78
| 0.697709
|
from sqlalchemy import Boolean, Column, DateTime, BigInteger
from sqlalchemy import MetaData, Integer, String, Table
from nova import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
bw_usage_cache = Table('bw_usage_cache', meta, autoload=True)
uuid = Column('uuid', String(36))
migrate_engine.execute(bw_usage_cache.delete())
bw_usage_cache.create_column(uuid)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
bw_usage_cache = Table('bw_usage_cache', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('mac', String(255)),
Column('uuid', String(36)),
Column('start_period', DateTime(timezone=False), nullable=False),
Column('last_refreshed', DateTime(timezone=False)),
Column('bw_in', BigInteger()),
Column('bw_out', BigInteger()),
useexisting=True)
bw_usage_cache.drop_column('uuid')
| true
| true
|
1c4ac32fd1e2599ccd47b8d66ea9daef42b250e7
| 2,232
|
py
|
Python
|
tests/test_weighting.py
|
DimitrisAlivas/StarQE
|
c17676e5f1e3f19c0c4c117a50abe2ce22ffef28
|
[
"MIT"
] | 11
|
2021-06-17T15:01:36.000Z
|
2022-02-04T16:48:27.000Z
|
tests/test_weighting.py
|
DimitrisAlivas/StarQE
|
c17676e5f1e3f19c0c4c117a50abe2ce22ffef28
|
[
"MIT"
] | null | null | null |
tests/test_weighting.py
|
DimitrisAlivas/StarQE
|
c17676e5f1e3f19c0c4c117a50abe2ce22ffef28
|
[
"MIT"
] | 1
|
2022-03-28T03:55:33.000Z
|
2022-03-28T03:55:33.000Z
|
"""Tests for weighting."""
from typing import Any, MutableMapping
import torch
import unittest_templates
from mphrqe.layer.weighting import AttentionMessageWeighting, MessageWeighting, SymmetricMessageWeighting
class MessageWeightingTests(unittest_templates.GenericTestCase[MessageWeighting]):
"""Tests for message weighting."""
num_entities: int = 33
num_edges: int = 101
dim: int = 3
def test_forward(self):
# prepare data
x_e = torch.rand(self.num_entities, self.dim)
edge_index = torch.randint(self.num_entities, size=(2, self.num_edges))
message = torch.rand(self.num_edges, self.dim, requires_grad=True)
# forward pass
out = self.instance(
edge_index=edge_index,
message=message,
x_e=x_e,
)
# check type
assert isinstance(out, tuple)
assert len(out) == 2
message_, weight_ = out
assert torch.is_tensor(message_)
assert torch.is_tensor(weight_)
# check shape
assert message_.shape[0] == self.num_edges
assert weight_.shape[0] == self.num_edges
weighted_message = message_ * weight_.unsqueeze(dim=-1)
# try backward pass
weighted_message.mean().backward()
class SymmetricMessageWeightingTests(MessageWeightingTests):
"""Tests for static symmetric message weighting."""
cls = SymmetricMessageWeighting
class AttentionMessageWeightingTests(MessageWeightingTests):
"""Tests for message weighting by attention."""
cls = AttentionMessageWeighting
# make divisible by number of heads
dim = 8
num_heads = 2
def _pre_instantiation_hook(self, kwargs: MutableMapping[str, Any]) -> MutableMapping[str, Any]: # noqa: D102
kwargs = super()._pre_instantiation_hook(kwargs=kwargs)
# make sure that the output dimension is divisible by the number of heads.
kwargs["num_heads"] = self.num_heads
kwargs["output_dim"] = self.dim
return kwargs
class MessageWeightingMetaTest(unittest_templates.MetaTestCase[MessageWeighting]):
"""Test for tests for message weightings."""
base_cls = MessageWeighting
base_test = MessageWeightingTests
| 30.162162
| 114
| 0.68862
|
from typing import Any, MutableMapping
import torch
import unittest_templates
from mphrqe.layer.weighting import AttentionMessageWeighting, MessageWeighting, SymmetricMessageWeighting
class MessageWeightingTests(unittest_templates.GenericTestCase[MessageWeighting]):
num_entities: int = 33
num_edges: int = 101
dim: int = 3
def test_forward(self):
x_e = torch.rand(self.num_entities, self.dim)
edge_index = torch.randint(self.num_entities, size=(2, self.num_edges))
message = torch.rand(self.num_edges, self.dim, requires_grad=True)
out = self.instance(
edge_index=edge_index,
message=message,
x_e=x_e,
)
assert isinstance(out, tuple)
assert len(out) == 2
message_, weight_ = out
assert torch.is_tensor(message_)
assert torch.is_tensor(weight_)
assert message_.shape[0] == self.num_edges
assert weight_.shape[0] == self.num_edges
weighted_message = message_ * weight_.unsqueeze(dim=-1)
weighted_message.mean().backward()
class SymmetricMessageWeightingTests(MessageWeightingTests):
cls = SymmetricMessageWeighting
class AttentionMessageWeightingTests(MessageWeightingTests):
cls = AttentionMessageWeighting
dim = 8
num_heads = 2
def _pre_instantiation_hook(self, kwargs: MutableMapping[str, Any]) -> MutableMapping[str, Any]:
kwargs = super()._pre_instantiation_hook(kwargs=kwargs)
kwargs["num_heads"] = self.num_heads
kwargs["output_dim"] = self.dim
return kwargs
class MessageWeightingMetaTest(unittest_templates.MetaTestCase[MessageWeighting]):
base_cls = MessageWeighting
base_test = MessageWeightingTests
| true
| true
|
1c4ac39c33fc74a087b76922f9853b80be409055
| 1,078
|
py
|
Python
|
seedorf/sports/migrations/0003_add_sport_category_table_tennis.py
|
SportySpots/seedorf
|
3f09c720ea8df0d1171022b68b494c2758f75d44
|
[
"MIT"
] | 3
|
2018-04-22T10:11:01.000Z
|
2018-11-16T22:00:34.000Z
|
seedorf/sports/migrations/0003_add_sport_category_table_tennis.py
|
SportySpots/seedorf
|
3f09c720ea8df0d1171022b68b494c2758f75d44
|
[
"MIT"
] | 87
|
2018-03-14T13:42:55.000Z
|
2022-03-21T21:15:16.000Z
|
seedorf/sports/migrations/0003_add_sport_category_table_tennis.py
|
SportySpots/seedorf
|
3f09c720ea8df0d1171022b68b494c2758f75d44
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.2 on 2019-03-29 13:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("sports", "0002_auto_20180602_2110")]
operations = [
migrations.AlterField(
model_name="sport",
name="category",
field=models.CharField(
choices=[
("basketball", "Basketball"),
("beach_volleyball", "Beach Volleyball"),
("bootcamp", "Bootcamp"),
("boules", "Boules"),
("fitness", "Fitness"),
("others", "Others"),
("skating", "Skating"),
("soccer", "Soccer"),
("tennis", "Tennis"),
("table_tennis", "Table Tennis"),
],
default="others",
help_text="Name of the main category of the sport (e.g. Soccer).",
max_length=50,
verbose_name="Sport Category",
),
)
]
| 31.705882
| 82
| 0.454545
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("sports", "0002_auto_20180602_2110")]
operations = [
migrations.AlterField(
model_name="sport",
name="category",
field=models.CharField(
choices=[
("basketball", "Basketball"),
("beach_volleyball", "Beach Volleyball"),
("bootcamp", "Bootcamp"),
("boules", "Boules"),
("fitness", "Fitness"),
("others", "Others"),
("skating", "Skating"),
("soccer", "Soccer"),
("tennis", "Tennis"),
("table_tennis", "Table Tennis"),
],
default="others",
help_text="Name of the main category of the sport (e.g. Soccer).",
max_length=50,
verbose_name="Sport Category",
),
)
]
| true
| true
|
1c4ac3f3a8c481f3e1541b4748ebe97e017ea4e8
| 1,239
|
py
|
Python
|
BaseAdapter/EUAOSSHClient.py
|
leonevo/euao
|
ff7a2c9fa76c4eed297856ef82ac3d2baa8976c1
|
[
"Apache-2.0"
] | 2
|
2015-01-16T07:36:19.000Z
|
2017-03-10T06:11:55.000Z
|
BaseAdapter/EUAOSSHClient.py
|
leonevo/euao
|
ff7a2c9fa76c4eed297856ef82ac3d2baa8976c1
|
[
"Apache-2.0"
] | null | null | null |
BaseAdapter/EUAOSSHClient.py
|
leonevo/euao
|
ff7a2c9fa76c4eed297856ef82ac3d2baa8976c1
|
[
"Apache-2.0"
] | null | null | null |
import paramiko
class EUAOSSHClient(paramiko.SSHClient):
## overload the exec_command method
def exec_command(self, command, bufsize=-1, timeout=None):
chan = self._transport.open_session()
chan.settimeout(timeout)
chan.exec_command(command)
stdin = chan.makefile('wb', bufsize)
stdout = chan.makefile('rb', bufsize)
stderr = chan.makefile_stderr('rb', bufsize)
return stdin, stdout, stderr
if __name__ == '__main__':
cmd=r'mksyscfg -r lpar -m Server-9117-MMA-SN06D6D82 -i "name=testEUAOclient,profile_name=default,lpar_env=aixlinux,min_mem=1024,desired_mem=2048,max_mem=32768,proc_mode=shared,min_procs=1,desired_procs=2,max_procs=16,min_proc_units=0.1,desired_proc_units=0.5,max_proc_units=16,sharing_mode=uncap,uncap_weight=128,auto_start=1,boot_mode=norm,max_virtual_slots=1000,\"virtual_eth_adapters=22/0/1///1,23/0/2///1\",\"virtual_scsi_adapters=20/client//VIOserver1/23/1,21/client//VIOserver2/23/1\""'
#ExecuteSimpleCMDviaSSH2('182.247.251.247','hscroot','abc1234',cmd)
#ExecuteCMDviaSSH2('182.247.251.247','hscroot','abc1234',cmd,connect_timeout=5,command_timeout=20,cmd_prompt='hscroot@localhost:~>')
sc=EUAOSSHClient()
| 72.882353
| 497
| 0.727199
|
import paramiko
class EUAOSSHClient(paramiko.SSHClient):
bufsize=-1, timeout=None):
chan = self._transport.open_session()
chan.settimeout(timeout)
chan.exec_command(command)
stdin = chan.makefile('wb', bufsize)
stdout = chan.makefile('rb', bufsize)
stderr = chan.makefile_stderr('rb', bufsize)
return stdin, stdout, stderr
if __name__ == '__main__':
cmd=r'mksyscfg -r lpar -m Server-9117-MMA-SN06D6D82 -i "name=testEUAOclient,profile_name=default,lpar_env=aixlinux,min_mem=1024,desired_mem=2048,max_mem=32768,proc_mode=shared,min_procs=1,desired_procs=2,max_procs=16,min_proc_units=0.1,desired_proc_units=0.5,max_proc_units=16,sharing_mode=uncap,uncap_weight=128,auto_start=1,boot_mode=norm,max_virtual_slots=1000,\"virtual_eth_adapters=22/0/1///1,23/0/2///1\",\"virtual_scsi_adapters=20/client//VIOserver1/23/1,21/client//VIOserver2/23/1\""'
sc=EUAOSSHClient()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.