hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f702aa947d0946923d43e7498a61a680b76392a9
| 663
|
py
|
Python
|
solids/nvd/utils/schema.py
|
d3vzer0/vulnerabilities-pipeline
|
a6df7a233eaf66a8cb7c81aed69b377274ca3cf7
|
[
"MIT"
] | 1
|
2021-11-22T09:57:20.000Z
|
2021-11-22T09:57:20.000Z
|
solids/nvd/utils/schema.py
|
d3vzer0/vulnerabilities-pipeline
|
a6df7a233eaf66a8cb7c81aed69b377274ca3cf7
|
[
"MIT"
] | 1
|
2021-08-03T21:56:03.000Z
|
2021-08-09T15:05:40.000Z
|
solids/nvd/utils/schema.py
|
d3vzer0/vulnerabilities-pipeline
|
a6df7a233eaf66a8cb7c81aed69b377274ca3cf7
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import List, Dict, Optional
from pydantic import BaseModel, validator, root_validator
class ItemModel(BaseModel):
cve: Dict
configurations: Optional[Dict]
impact: Optional[Dict]
publishedDate: datetime
lastModifiedDate: datetime
class ResultModel(BaseModel):
CVE_data_timestamp: datetime
CVE_data_type: str
CVE_Items: List[ItemModel]
@validator('CVE_data_type')
def fixed_type(cls, v):
assert v == 'CVE', 'Must be of type CVE'
return v
class ResponseModel(BaseModel):
resultsPerPage: int
startIndex: int
totalResults: int
result: ResultModel
| 22.1
| 57
| 0.717949
|
from datetime import datetime
from typing import List, Dict, Optional
from pydantic import BaseModel, validator, root_validator
class ItemModel(BaseModel):
cve: Dict
configurations: Optional[Dict]
impact: Optional[Dict]
publishedDate: datetime
lastModifiedDate: datetime
class ResultModel(BaseModel):
CVE_data_timestamp: datetime
CVE_data_type: str
CVE_Items: List[ItemModel]
@validator('CVE_data_type')
def fixed_type(cls, v):
assert v == 'CVE', 'Must be of type CVE'
return v
class ResponseModel(BaseModel):
resultsPerPage: int
startIndex: int
totalResults: int
result: ResultModel
| true
| true
|
f702ab8c70e4e0252724db9b6bec22b2fcca74e7
| 203
|
py
|
Python
|
Ar_Script/past/eg_用户注册.py
|
archerckk/PyTest
|
610dd89df8d70c096f4670ca11ed2f0ca3196ca5
|
[
"MIT"
] | null | null | null |
Ar_Script/past/eg_用户注册.py
|
archerckk/PyTest
|
610dd89df8d70c096f4670ca11ed2f0ca3196ca5
|
[
"MIT"
] | 1
|
2020-01-19T01:19:57.000Z
|
2020-01-19T01:19:57.000Z
|
Ar_Script/past/eg_用户注册.py
|
archerckk/PyTest
|
610dd89df8d70c096f4670ca11ed2f0ca3196ca5
|
[
"MIT"
] | null | null | null |
import easygui as g
user_info=g.multenterbox(title='账号中心',msg='【*用户名】为必填项\t【*真实姓名】为必填项\t【*手机号码】为必填项\t【*E-mail】为必填项',
fields=['*用户名','*真实姓名','固定电话','*手机号码','QQ','*E-mail']
)
| 40.6
| 96
| 0.55665
|
import easygui as g
user_info=g.multenterbox(title='账号中心',msg='【*用户名】为必填项\t【*真实姓名】为必填项\t【*手机号码】为必填项\t【*E-mail】为必填项',
fields=['*用户名','*真实姓名','固定电话','*手机号码','QQ','*E-mail']
)
| true
| true
|
f702ad147472072f25a6a2b2c6f88dcb2a58ea04
| 2,652
|
py
|
Python
|
api/__init__.py
|
aslanvaroqua/espa-api
|
7ea02c0a0e9abb75db97f0989c6bdd22222fb3e6
|
[
"Unlicense"
] | null | null | null |
api/__init__.py
|
aslanvaroqua/espa-api
|
7ea02c0a0e9abb75db97f0989c6bdd22222fb3e6
|
[
"Unlicense"
] | null | null | null |
api/__init__.py
|
aslanvaroqua/espa-api
|
7ea02c0a0e9abb75db97f0989c6bdd22222fb3e6
|
[
"Unlicense"
] | null | null | null |
import re
import os
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
""" Holds all the custom exceptions raised by the api """
class OrderNotFound(StandardError):
"""Error raised when an order is not found"""
def __init__(self, orderid):
"""Create new OrderNotFound
Args:
orderid (str): The orderid that was not found
"""
super(OrderNotFound, self).__init__(orderid)
class ItemNotFound(StandardError):
"""Error raised when an item is not found"""
def __init__(self, orderid, itemid):
"""Create new ItemNotFound
Args:
orderid (str): The orderid of the item
itemid (str): The id of the item that was not found
"""
super(ItemNotFound, self).__init__(orderid, itemid)
class ProductNotImplemented(NotImplementedError):
"""Exception to be thrown when trying to instantiate an unsupported
product"""
def __init__(self, product_id):
"""Constructor for the product not implemented
Keyword args:
product_id -- The product id of that is not implemented
Return:
None
"""
self.product_id = product_id
super(ProductNotImplemented, self).__init__(product_id)
class ValidationException(Exception):
"""Exceptions when there is an error with validating an order
example:
"3 validation errors": [
"Value u'' for field '<obj>.tm5.products[0]' cannot be blank'",
"Value u'' for field '<obj>.tm5.products[0]' is not in the enumeration: ['source_metadata', 'l1', 'toa', 'bt', 'cloud', 'sr', 'lst', 'swe', 'sr_ndvi', 'sr_evi', 'sr_savi', 'sr_msavi', 'sr_ndmi', 'sr_nbr', 'sr_nbr2', 'stats']",
"Value [u''] for field '<obj>.tm5.products' Requested products are not available"
]
"""
def __init__(self, msg):
err_ls = msg.split('\n')
err_key = err_ls[0].replace(':', '')
self.response = {err_key: []}
for err in err_ls[1:]:
if err:
err = re.sub(r'<obj>.', '', err)
self.response[err_key].append(err)
super(ValidationException, self).__init__(str(self.response))
class InventoryException(Exception):
"""Exception for handling problems with inventory handling"""
def __init__(self, msg):
super(InventoryException, self).__init__(msg)
self.response = {'Inputs Not Available': msg}
class InventoryConnectionException(Exception):
"""Exception handling if input data pool is down"""
def __init__(self, msg):
super(InventoryConnectionException, self).__init__(msg)
| 30.482759
| 232
| 0.634992
|
import re
import os
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
class OrderNotFound(StandardError):
def __init__(self, orderid):
super(OrderNotFound, self).__init__(orderid)
class ItemNotFound(StandardError):
def __init__(self, orderid, itemid):
super(ItemNotFound, self).__init__(orderid, itemid)
class ProductNotImplemented(NotImplementedError):
def __init__(self, product_id):
self.product_id = product_id
super(ProductNotImplemented, self).__init__(product_id)
class ValidationException(Exception):
def __init__(self, msg):
err_ls = msg.split('\n')
err_key = err_ls[0].replace(':', '')
self.response = {err_key: []}
for err in err_ls[1:]:
if err:
err = re.sub(r'<obj>.', '', err)
self.response[err_key].append(err)
super(ValidationException, self).__init__(str(self.response))
class InventoryException(Exception):
def __init__(self, msg):
super(InventoryException, self).__init__(msg)
self.response = {'Inputs Not Available': msg}
class InventoryConnectionException(Exception):
def __init__(self, msg):
super(InventoryConnectionException, self).__init__(msg)
| true
| true
|
f702af20dda49c4762d59b396d266f40cb1b5d33
| 48,802
|
py
|
Python
|
Gomoku_minimax.py
|
thelazyant164/Gomoku
|
7d87761ed5a72032ca3bebaf5cbfadc8977fb1f6
|
[
"MIT"
] | null | null | null |
Gomoku_minimax.py
|
thelazyant164/Gomoku
|
7d87761ed5a72032ca3bebaf5cbfadc8977fb1f6
|
[
"MIT"
] | null | null | null |
Gomoku_minimax.py
|
thelazyant164/Gomoku
|
7d87761ed5a72032ca3bebaf5cbfadc8977fb1f6
|
[
"MIT"
] | null | null | null |
#Import modules and libraries
from random import randint
from string import ascii_uppercase, ascii_lowercase
from itertools import permutations
from copy import deepcopy
from tail_recursion import tail_recursive, recurse
#Define board mapping function
def mapBoard(col, row, value):
board = [[value for x in range(col)] for y in range(row)]
return board
#Define metaboard mapping function
def mapMetaBoard(col, row):
metaboard = [[[[0, 0, 0, 0], [0, 0, 0, 0]] for x in range(col)] for y in range(row)]
return metaboard
#Define view board function
def viewBoard(board):
alphabet = ascii_uppercase
col = len(board[0])
row = len(board)
border = ""
topBorder = "#||"
for i in range(col):
border += "_" * 2
topBorder += alphabet[i]
topBorder += " "
border += "___"
print(topBorder)
print(border)
for i in range(row):
print(alphabet[i] + "||" + " ".join(board[i]) + "|")
#Define mark function
def mark(board, signature):
alphabet = ascii_uppercase
alphabet1 = ascii_lowercase
dimensionY = len(board)
dimensionX = len(board[0])
valid = False
while (not valid):
print("\n\nWhere do you want to mark?\n\n")
x = input(f"Column (A - {alphabet[dimensionX - 1]})? ")
y = input(f"Row (A - {alphabet[dimensionY - 1]})? ")
try:
x = alphabet.index(x)
except ValueError:
x = alphabet1.index(x)
try:
y = alphabet.index(y)
except:
y = alphabet1.index(y)
if (board[y][x] == ' '):
valid = True
else:
print('That position has already been marked. Please try again.\n')
board[y][x] = signature
print('\n')
viewBoard(board)
#Define function to find all occurences of 'X'
#Value is [opponentSignature]
#Return [[col1, row1], [col2, row2], ...]
def locate(value, board):
dimensionY = len(board)
dimensionX = len(board[0])
returnList = []
for row in range(dimensionY):
for col in range(dimensionX):
if (board[row][col] in value): returnList.append([col, row])
return returnList
#Define computer's turn -- recursive
@tail_recursive
def play(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, first = True):
#AI
#Each of metaboard's position is a list [danger, opportunity]
#Define function to update metaboard
#TODO: refine to improve efficiency at detecting risks and opportunities of non-continuous streak & multi-directional streaks
#REQUIREMENTS 1: resonant effect on a tile immediately next to a continuous winCond - 1 streak == risk/opportunity factor of interrupted resonance on a tile conjoining 2 aligning sub-streaks whose sum >= winCond - 1
#REQUIREMENTS 2: implement weighted resonance system on a tile conjoining multiple directional streaks > resonance system for linear streaks
def meta(board, opponentSignature, selfSignature, winCond, difficulty):
#Define function to sweep perimeter of a position's coordinates and add attributes to them
#coord = [col, row]
def sweep(metaboard, coord, keyword, opponentSignature, selfSignature, winCond):
if (keyword == 'danger'):
type = 0
otherType = 1
signature = opponentSignature
else:
type = 1
otherType = 0
signature = selfSignature
coordVars = list(permutations([-1, 0, 1], 2))
coordVars.extend(((-1, -1), (1, 1)))
for coordVar in coordVars:
try:
if (coordVar in [(-1, -1), (1, 1)]):
pos = 2
elif (coordVar in [(0, -1), (0, 1)]):
pos = 0
elif (coordVar in [(-1, 0), (1, 0)]):
pos = 1
else:
pos = 3
row = coord[1] + coordVar[0]
if (row < 0 or row > len(metaboard)): raise IndexError
col = coord[0] + coordVar[1]
if (col < 0 or col > len(metaboard[0])): raise IndexError
#Ripple effect
if (not isinstance(metaboard[row][col], str)):
for i in range(winCond - 1):
if (not isinstance(metaboard[row][col], str)):
metaboard[row][col][type][pos] += (1 - i/(winCond - 1))
metaboard[row][col][otherType][pos] -= (1 - i/(winCond - 1))
row += coordVar[0]
if (row < 0 or row > len(metaboard)): raise IndexError
col += coordVar[1]
if (col < 0 or col > len(metaboard[0])): raise IndexError
elif (metaboard[row][col] == signature):
row += coordVar[0]
if (row < 0 or row > len(metaboard)): raise IndexError
col += coordVar[1]
if (col < 0 or col > len(metaboard[0])): raise IndexError
else:
raise IndexError
#alphabet = ascii_uppercase
#print(f'Metaboard at column {alphabet[col]} and row {alphabet[row]} has a {keyword} level of {metaboard[row][col][type]}.')
#Resonance effect
if (metaboard[row][col] == signature):
alignment = 0
while (metaboard[row][col] == signature):
row += coordVar[0]
if (row < 0 or row > len(metaboard)): raise IndexError
col += coordVar[1]
if (col < 0 or col > len(metaboard[0])): raise IndexError
alignment += 1
if (isinstance(metaboard[row][col], list)):
metaboard[row][col][type][pos] += alignment
except IndexError: pass
#Define function to screen entire metaboard for invalidation
def screen(metaboard, selfSignature, opponentSignature, winCond):
#Define function to rotate board 90 degree counter-clockwise with perspective to keeping OG board intact
def rotate(board):
#Define function to inverse board vertically
def invertY(board):
invertYBoard = []
dimensionY = len(board)
for row in range(dimensionY):
invertYBoard.append(board[dimensionY - row - 1])
return invertYBoard
rotateBoard = []
dimensionY = len(board)
dimensionX = len(board[0])
for col in range(dimensionX):
column = [board[row][col] for row in range(dimensionY)]
rotateBoard.append(column)
return invertY(rotateBoard)
#Define function to screen the top left corner of the board
def screenTopLeftCorner(metaboard, winCond, pos, name):
for row in range(winCond - 1):
for col in range(winCond - 1 - row):
if (isinstance(metaboard[row][col], list)):
#print(f'nullify {row}:{col}\'s danger and potential in the {name} diagonal')
metaboard[row][col][0][pos] = 0
metaboard[row][col][1][pos] = 0
#Define function to screen metaboard to invalidate 'type' from signature (e.g, invalidate dangers between two blocked self) horizontally
def screenHorizontal(metaboard, signature, type, winCond, pos):
dimensionX = len(metaboard[0])
if type == 'danger': type = 0
else: type = 1
#Format all selfSignature's coords found in each row
#sus = [susRow1, susRow3, ...]
#susRow1 = [[col1, row], [col3, row], ...]
sus = []
for row in metaboard:
susEachRow = []
for col in row:
if (col == signature): susEachRow.append([row.index(col), metaboard.index(row)])
sus.append(susEachRow)
sus = [susEachRow for susEachRow in sus if len(susEachRow) != 0]
#Filter out all invalid segments between two blocked self horizontally
for susEachRow in sus:
for i in range(len(susEachRow) - 1):
if (2 <= susEachRow[i + 1][0] - susEachRow[i][0] <= winCond):
for k in range(0, susEachRow[i + 1][0] - susEachRow[i][0]):
if (isinstance(metaboard[susEachRow[i][1]][susEachRow[i][0] + k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {susEachRow[i][0]}:{susEachRow[i][1]} and {susEachRow[i + 1][0]}:{susEachRow[i + 1][1]}, the position with the coordinates {susEachRow[i][1]}:{susEachRow[i][0] + k} has been nullified of its {type}\'s {pos}.')
metaboard[susEachRow[i][1]][susEachRow[i][0] + k][type][pos] = 0
#Filter out all invalid segments between self and border
for susEachRow in sus:
start = susEachRow[0]
end = susEachRow[-1]
if (1 <= start[0] < winCond):
for k in range(0, start[0]):
if (isinstance(metaboard[start[1]][k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {start[0]}:{start[1]} and the border, the position with the coordinates {start[1]}:{k} has been nullified of its {type}\'s {pos}.')
metaboard[start[1]][k][type][pos] = 0
if (1 <= dimensionX - end[0] - 1 < winCond):
for k in range(0, dimensionX - end[0] - 1):
if (isinstance(metaboard[end[1]][end[0] + k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {end[0]}:{end[1]} and the border, the position with the coordinates {end[1]}:{end[0] + k} has been nullified of its {type}\'s {pos}.')
metaboard[end[1]][end[0] + k][type][pos] = 0
return metaboard
#Define function to screen metaboard to invalidate 'type' from signature (e.g, invalidate dangers between two blocked self) diagonally
def screenDiagonal(metaboard, signature, type, winCond, pos):
dimensionY = len(metaboard)
dimensionX = len(metaboard[0])
if type == 'danger': type = 0
else: type = 1
#Format all selfSignature's coords found in each diagonal
#susDiagDown, Up, sus = [susDiag1, susDiag3, ...]
#susDiag1 = [[col1, row1], [col3, row3], ...]
sus = []
susDiagDown = []
lenSusDiagDown = []
susDiagUp = []
lenSusDiagUp = []
susDuplicate = []
for i in range(dimensionY):
susEachDiagDown = []
originalDiagLen = 0
for j in range(dimensionY):
try:
if (metaboard[i + j][j] == signature): susEachDiagDown.append([i + j, j])
originalDiagLen += 1
except IndexError:
pass
susDiagDown.append(susEachDiagDown)
if (len(susEachDiagDown) != 0):
lenSusDiagDown.append(originalDiagLen)
else: lenSusDiagDown.append(0)
for i in range(dimensionX):
susEachDiagUp = []
originalDiagLen = 0
for j in range(dimensionX):
try:
if (metaboard[j][i + j] == signature): susEachDiagUp.append([j, i + j])
originalDiagLen += 1
except IndexError: pass
susDiagUp.append(susEachDiagUp)
if (len(susEachDiagUp) != 0):
lenSusDiagUp.append(originalDiagLen)
else: lenSusDiagUp.append(0)
sus.extend(susDiagDown)
sus.extend(susDiagUp)
for i in range(min(dimensionX, dimensionY)):
if (metaboard[i][i] == signature): susDuplicate.append([i, i])
sus.remove(susDuplicate)
susDiagUp = [susEachDiag for susEachDiag in susDiagUp if len(susEachDiag) != 0]
lenSusDiagUp = [eachLen for eachLen in lenSusDiagUp if eachLen != 0]
susDiagDown = [susEachDiag for susEachDiag in susDiagDown if len(susEachDiag) != 0]
lenSusDiagDown = [eachLen for eachLen in lenSusDiagDown if eachLen != 0]
#Filter out all invalid segments between two blocked self diagontally
for susEachDiag in sus:
for i in range(len(susEachDiag) - 1):
if (2 <= susEachDiag[i + 1][0] - susEachDiag[i][0] <= winCond):
for k in range(0, susEachDiag[i + 1][0] - susEachDiag[i][0]):
if (isinstance(metaboard[susEachDiag[i][0] + k][susEachDiag[i][1] + k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {susEachDiag[i][0]}:{susEachDiag[i][1]} and {susEachDiag[i + 1][0]}:{susEachDiag[i + 1][1]}, the position with the coordinates {susEachDiag[i][0] + k}:{susEachDiag[i][1] + k} has been nullified of its {type}\'s {pos}.')
metaboard[susEachDiag[i][0] + k][susEachDiag[i][1] + k][type][pos] = 0
#Filter out all invalid segments between self and border for susDiagUp
for susEachDiag in susDiagUp:
start = susEachDiag[0]
end = susEachDiag[-1]
if (1 <= min(start[0], start[1]) < winCond):
for k in range(0, min(start[0], start[1]) + 1):
if (isinstance(metaboard[start[0] - k][start[1] - k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {start[0]}:{start[1]} and the corner, the position with the coordinates {start[0] + k}:{start[1] + k} has been nullified of its {type}\'s {pos}.')
metaboard[start[0] - k][start[1] - k][type][pos] = 0
if (1 <= lenSusDiagUp[susDiagUp.index(susEachDiag)] - min(end[0], end[1]) <= winCond):
for k in range(0, lenSusDiagUp[susDiagUp.index(susEachDiag)] - min(end[0], end[1])):
if (isinstance(metaboard[end[0] + k][end[1] + k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {end[0]}:{end[1]} and the corner, the position with the coordinates {end[0] + k}:{end[1] + k} has been nullified of its {type}\'s {pos}.')
metaboard[end[0] + k][end[1] + k][type][pos] = 0
#Filter out all invalid segments between self and border for susDiagDown
for susEachDiag in susDiagDown:
start = susEachDiag[0]
end = susEachDiag[-1]
if (1 <= min(start[0], start[1]) < winCond):
for k in range(0, min(start[0], start[1]) + 1):
if (isinstance(metaboard[start[0] - k][start[1] - k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {start[0]}:{start[1]} and the corner, the position with the coordinates {start[0] + k}:{start[1] + k} has been nullified of its {type}\'s {pos}.')
metaboard[start[0] - k][start[1] - k][type][pos] = 0
if (1 <= lenSusDiagDown[susDiagDown.index(susEachDiag)] - min(end[0], end[1]) <= winCond):
for k in range(0, lenSusDiagDown[susDiagDown.index(susEachDiag)] - min(end[0], end[1])):
if (isinstance(metaboard[end[0] + k][end[1] + k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {end[0]}:{end[1]} and the corner, the position with the coordinates {end[0] + k}:{end[1] + k} has been nullified of its {type}\'s {pos}.')
metaboard[end[0] + k][end[1] + k][type][pos] = 0
return metaboard
#pos: index of relevant value (0: horizontal, 1: vertical, 2: NW - SE, 3: NE - SW)
#Screen top left corner
screenTopLeftCorner(metaboard, winCond, 3, 'top left')
metaboard = rotate(metaboard)
#Screen top right corner
screenTopLeftCorner(metaboard, winCond, 2, 'top right')
metaboard = rotate(metaboard)
#Screen bottom right corner
screenTopLeftCorner(metaboard, winCond, 3, 'bottom right')
metaboard = rotate(metaboard)
#Screen bottom left corner
screenTopLeftCorner(metaboard, winCond, 2, 'bottom left')
metaboard = rotate(metaboard)
#Screen horizontally
screenHorizontal(metaboard, selfSignature, 'danger' , winCond, 0)
screenHorizontal(metaboard, opponentSignature, 'opportunity' , winCond, 0)
metaboard = rotate(metaboard)
#Screen vertically
screenHorizontal(metaboard, selfSignature, 'danger' , winCond, 1)
screenHorizontal(metaboard, opponentSignature, 'opportunity' , winCond, 1)
for i in range(3): metaboard = rotate(metaboard)
#Screen NW-SE diagonally
screenDiagonal(metaboard, selfSignature, 'danger' , winCond, 2)
screenDiagonal(metaboard, opponentSignature, 'opportunity' , winCond, 2)
metaboard = rotate(metaboard)
#Screen NE-SW diagonally
screenDiagonal(metaboard, selfSignature, 'danger' , winCond, 3)
screenDiagonal(metaboard, opponentSignature, 'opportunity' , winCond, 3)
for i in range(3): metaboard = rotate(metaboard)
metaboard = mapMetaBoard(len(board[0]), len(board))
dangerCoords = locate([opponentSignature], board)
opportunityCoords = locate([selfSignature], board)
for coord in dangerCoords:
metaboard[coord[1]][coord[0]] = opponentSignature
for coord in opportunityCoords:
metaboard[coord[1]][coord[0]] = selfSignature
for coord in dangerCoords:
sweep(metaboard, coord, 'danger', opponentSignature, selfSignature, winCond)
for coord in opportunityCoords:
sweep(metaboard, coord, 'opportunity', opponentSignature, selfSignature, winCond)
#Screening applies for difficulty 2 and up
if (difficulty >= 2):
screen(metaboard, selfSignature, opponentSignature, winCond)
return metaboard
#Define function to choose between aggresive or defensive
def stance(metaboard, difficulty):
dangerList = []
opportunityList = []
for row in metaboard:
for col in row:
if (isinstance(col, list)):
dangerList.append(max(col[0]))
opportunityList.append(max(col[1]))
pressingDanger = max(dangerList)
pressingOpportunity = max(opportunityList)
#print(f'Highest danger is {pressingDanger}, whilst highest opportunity is {pressingOpportunity}.')
#'Tactical' playstyle applies only for difficulty 3
if (difficulty >= 3):
if (pressingOpportunity > pressingDanger):
return 'aggressive', pressingOpportunity
elif (pressingOpportunity == pressingDanger):
return 'tactical', pressingOpportunity
else:
return 'defensive', pressingDanger
else:
if (pressingOpportunity >= pressingDanger):
return 'aggressive', pressingOpportunity
else:
return 'defensive', pressingDanger
#Define function to make a play
@tail_recursive
def decide(forecasted, checked, style, value, metaboard, difficulty):
if style == 'aggressive': type = 1
elif style == 'defensive': type = 0
else: type = 2
if (style in ['aggressive', 'defensive']):
for row in metaboard:
for col in row:
if (isinstance(col, list)):
if max(col[type]) == value:
#print(col[type].index(value))
x, y = row.index(col), metaboard.index(row)
else:
returnList = []
maxTracker = []
for row in range(len(metaboard)):
for col in range(len(metaboard[0])):
if (isinstance(metaboard[row][col], list)):
if (max(metaboard[row][col][0]) == value) or (max(metaboard[row][col][1]) == value):
#print(col[type].index(value))
returnList.append([col, row])
maxTracker.append(sum(metaboard[row][col][0]) + sum(metaboard[row][col][1]))
x, y = returnList[maxTracker.index(max(maxTracker))][0], returnList[maxTracker.index(max(maxTracker))][1]
if [*forecasted, [x, y]] not in checked:
return x, y
else:
#For a checked position, set metaboard value to negative
metaboardTemp = deepcopy(metaboard)
metaboardTemp[y][x] = [[-1, -1, -1, -1], [-1, -1, -1, -1]]
style, newValue = stance(metaboardTemp, difficulty)
#When all potential positions have been checked, all potential metaboard values will have been set to negative => depleted
if newValue != value: raise ValueError
return recurse(forecasted, checked, style, newValue, metaboardTemp, difficulty)
#Define function to swap self signature and opponent signature
def swap(selfSignature, opponentSignature):
temp = selfSignature
selfSignature = opponentSignature
opponentSignature = temp
return selfSignature, opponentSignature
#Define function to determine if terminal node has been reached
def reachedTerminal(forecasted):
if len(forecasted) >= 1:
last = forecasted[-1][0]
return isinstance(last, bool) or isinstance(last, float)
return False
#Define function to evaluate value of self node
def evalSelf(selfPlaying: bool, possibilities, iteration):
def countExact(values, countItem):
counted = 0
for value in values:
if value is countItem: counted += 1
return counted
#Define function to collapse all forecasted paths with same iteration count
def collapse(selfPlaying: bool, possibilities, iteration):
def contains(values, comparisonItem):
for value in values:
if value is comparisonItem: return True
return False
#Extract all forecasted paths with same iteration count
#print("All possibilities at this stage are: ", possibilities)
extracted = deepcopy([possibility for possibility in possibilities if possibility[-1][1] == iteration])
#if selfPlaying: print("Node layer ", iteration, " and maximizer is playing.")
#else: print("Node layer ", iteration, " and minimizer is playing.")
#print("Before collapse, all values at node layer ", iteration, " is ", extracted)
tempPossibilities = deepcopy([possibility for possibility in possibilities if possibility not in extracted])
#Heuristics: if only 1 or less forecasted at current node, skip collapse
if len(extracted) == 1:
#print("Taking shortcut to skip collapse because only 1 forecasted detected at layer ", iteration, ": ", extracted[0])
tempPossibilities.append(extracted[0])
return tempPossibilities
elif len(extracted) == 0:
#print("Taking shortcut to skip collapse because no forecasted detected at layer ", iteration)
return tempPossibilities
values = [extraction[-1][0] for extraction in extracted]
#print("Performing collapse on ", values)
tieLimiter = False
for value in values:
if isinstance(value, float): tieLimiter = True
#Prioritize boolean: if True exists, all positive possibilities can be pruned
if contains(values, True) and selfPlaying:
values = [value for value in values if not (isinstance(value, float) and value > 0)]
if contains(values, False) and not selfPlaying:
values = [value for value in values if not (isinstance(value, float) and value < 0)]
#When both True and False exists, eliminate any in-between
if contains(values, True) and contains(values, False):
values = [value for value in values if not isinstance(value, float)]
#print("Preliminary sifting is done. Now performing collapse on ", values)
if selfPlaying:
#Due to Python's max([False, 0.0]) -> False, must remove all False if 0.0 exists in maximizer's turn
if tieLimiter and contains(values, False):
values = [value for value in values if value is not False]
returnValue = max(values)
else:
#Due to Python's min([0.0, False]) -> 0.0, must remove all float if False exists in minimizer's turn
if contains(values, False):
returnValue = False
else:
returnValue = min(values)
#print("Collapse done, ", returnValue)
#Deeper eval performed when multiple returnValue in values; choose longest steps for min; shortest steps for max
#Heuristics: when multiple combinations of moves result in same state, keep only 1
if countExact(values, returnValue) > 1:
#print("Multiple forecasted evaluating to the same value detected. Comparing steps for each.")
extractedShortlisted = [forecasted for forecasted in extracted if forecasted[-1][0] is returnValue]
lenList = [len(forecasted) for forecasted in extractedShortlisted]
if selfPlaying:
fullReturnValue = extractedShortlisted[lenList.index(min(lenList))]
else:
fullReturnValue = extractedShortlisted[lenList.index(max(lenList))]
#print("From ", extractedShortlisted, " choose ", fullReturnValue)
else:
#Reconstruct full format of possibility holding returnValue and add back to possibilities
fullReturnValue = [possibility for possibility in extracted if possibility[-1][0] is returnValue][0]
#print("After collapse, all values at node layer ", iteration, " is ", fullReturnValue)
tempPossibilities.append(fullReturnValue)
return tempPossibilities
#Define function to decrement all forecasted paths (should be 1) with iteration count matching current (bubble-up)
def passUp(possibilities, iteration):
for possibility in possibilities:
if possibility[-1][1] == iteration: possibility[-1][1] -= 1
#Identify if a duplicated iteration count exists in possibilities, then collapse all those forecasted depending on self nature
iterationList = [possibility[-1][1] for possibility in possibilities]
#print(iterationList)
for iterationItem in iterationList:
if countExact(iterationList, iterationItem) > 1:
possibilities = collapse(selfPlaying, possibilities, iteration)
#print(iteration)
if (iteration > 0):
passUp(possibilities, iteration)
return possibilities
#Even iteration = machine plays; odd = human
#maxDepthSearch = layer of nodes forecasted ahead by AI -- CAREFUL! O(n) time complexity = b ** m, with m being maxDepthSearch and b being branching factor = (boardDimensionX * boardDimensionY - claimed tiles)
#For 3x3 board, set to 10 for full coverage
if len(board) == len(board[0]) and len(board) == 3:
maxDepthSearch = 10
#If game is in developing phase (i.e, number of placed marks <= 1/2 win condition)
elif max(len(locate(selfSignature, board)), len(locate(opponentSignature, board))) <= winCond/2:
maxDepthSearch = 2
else:
maxDepthSearch = 3
#possibilities = [forecasted1, forecasted2, ...]
#forecasted = [[x1, y1], [x2, y2], [x3, y3]..., [True, iteration]] containing moves of both players until end & boolean of win state(True when self is winner, False otherwise)
#forecasted = [[x1, y1], [x2, y2], [x3, y3]..., [score: float, iteration]] containing moves of both players until maxDepthSearch reached, score is evaluated to assign to board state (0 when tie, +highestTacticalValue when it's self's turn, - otherwise)
#Evaluate value of self node depending on min/max nature, run when all child nodes to maxDepthSearch are explored/ when terminal node is detected
#evalSelf only sifts through forecasteds and collapses those having the same iteration value (vying to value same node)
#When bubble up 1 node, take all forecasteds in possibilities with matching current iteration (if everything is right this should already be collapsed to only 1) and decrement that (to imply this value is passed upwards to parent node and is now parent node's originating value)
if reachedTerminal(forecasted):
selfPlaying = (iteration % 2 == 0)
forecastedCopy = deepcopy(forecasted)
possibilities.append(forecastedCopy)
possibilities = evalSelf(selfPlaying, possibilities, iteration)
iteration -= 1
#Reset back 1 node higher
forecasted.pop(-1)
forecasted.pop(-1)
return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False)
#Terminal node: winCond is met/maxDepthSearch reached/no possible moves left
if win(board, winCond, selfSignature, opponentSignature) or win(board, winCond, opponentSignature, selfSignature) or len(locate(' ', board)) == 0 or iteration == maxDepthSearch:
if forecasted not in checked:
checked.append(deepcopy(forecasted))
#If self/other is winner, document move
if win(board, winCond, selfSignature, opponentSignature):
#If it's computer's turn, and computer wins
if (iteration % 2 == 0):
forecasted.append([True, iteration])
#print("Forecasted a possible win if moves are as followed: ", forecasted)
#viewBoard(board)
else:
forecasted.append([False, iteration])
#print("Forecasted a possible loss if moves are as followed: ", forecasted)
#viewBoard(board)
elif win(board, winCond, opponentSignature, selfSignature):
#If it's computer's turn, and computer's opponent wins
if (iteration % 2 == 0):
forecasted.append([False, iteration])
#print("Forecasted a possible loss if moves are as followed: ", forecasted)
#viewBoard(board)
else:
forecasted.append([True, iteration])
#print("Forecasted a possible win if moves are as followed: ", forecasted)
#viewBoard(board)
elif iteration == maxDepthSearch:
metaboard = meta(board, opponentSignature, selfSignature, winCond, difficulty)
try:
style, value = stance(metaboard, difficulty)
#If self's turn
if (iteration % 2 == 0):
forecasted.append([float(value), iteration])
#print("Max search depth reached: ", forecasted)
#viewBoard(board)
else:
forecasted.append([float(-value), iteration])
#print("Max search depth reached: ", forecasted)
#viewBoard(board)
#When maxDepthSearch is reached, but game is also tied
except ValueError:
forecasted.append([0.0, iteration])
#print("Forecasted a possible tie at max depth search if moves are as followed: ", forecasted)
#viewBoard(board)
#When tie is reached through tiles depletion, score is set to 0.0
else:
forecasted.append([0.0, iteration])
#print("Forecasted a possible tie if moves are as followed: ", forecasted)
#viewBoard(board)
#Reset back 1 node higher
boardHistory.pop(-1)
board = deepcopy(boardHistory[-1])
#print("Breakpoint 2: Reset board back to ")
#viewBoard(board)
selfSignature, opponentSignature = swap(selfSignature, opponentSignature)
return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False)
#At each node layer, make a decision and "forecast" board and metaboard, then switch position with opponent and do the same
#Normal case: when self node is not terminal, and all children are not depleted yet/maxDepthSearch is not reached yet
#dimension = len(board)
metaboard = meta(board, opponentSignature, selfSignature, winCond, difficulty)
#Heuristics: if there is only one available move left, take that move
if (len(locate(' ', board)) == 1):
x = locate(' ', board)[0][0]
y = locate(' ', board)[0][1]
#For actual move; only apply when not projecting self as opponent
if (len(checked) == 0 and iteration == 0):
alphabet = ascii_uppercase
print(f'Computer has decided to play at column {alphabet[x]} and row {alphabet[y]}.\n\n')
board = boardHistory[0]
board[y][x] = selfSignature
viewBoard(board)
return board
#For a forecasted move
elif [*forecasted, [x, y]] not in checked:
forecasted.append([x, y])
checked.append(deepcopy(forecasted))
board[y][x] = selfSignature
boardHistory.append(deepcopy(board))
iteration += 1
selfSignature, opponentSignature = swap(selfSignature, opponentSignature)
return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False)
style, value = stance(metaboard, difficulty)
try:
#For first move only
if len(locate(selfSignature, board)) == 0 and len(locate(opponentSignature, board)) == 0:
#For symmetrical board or customized board dimension smaller than twice win condition
if len(board) == len(board[0]) or (len(board) < winCond * 2) or (len(board[0]) < winCond * 2):
move = [int(len(board[0])/2), int(len(board)/2)]
#For customized board dimension larger than twice win condition
else:
move = [randint(winCond, len(board[0]) - 1 - winCond), randint(winCond, len(board) - 1 - winCond)]
x = move[0]
y = move[1]
alphabet = ascii_uppercase
print(f'Computer has decided to play at column {alphabet[x]} and row {alphabet[y]}.\n\n')
board = boardHistory[0]
board[y][x] = selfSignature
viewBoard(board)
return board
else:
x, y = decide(forecasted, checked, style, value, metaboard, difficulty)
except ValueError:
depleted = True
#All child nodes had been depleted (i.e, checked has been populated with all possible forecasted combinations)
if depleted:
depleted = False
selfPlaying = (iteration % 2 == 0)
possibilities = evalSelf(selfPlaying, possibilities, iteration)
iteration -= 1
#If base case had been evaluated; root has been given value; iteration is negative => make a move
#All child branches had been depleted
if iteration < 0:
#print(possibilities)
move = possibilities[0][0]
x = move[0]
y = move[1]
alphabet = ascii_uppercase
print(f'Computer has decided to play at column {alphabet[x]} and row {alphabet[y]}.\n\n')
board = boardHistory[0]
board[y][x] = selfSignature
viewBoard(board)
return board
forecasted.pop(-1)
boardHistory.pop(-1)
board = deepcopy(boardHistory[-1])
#print("Breakpoint 1: Reset board back to ")
#viewBoard(board)
selfSignature, opponentSignature = swap(selfSignature, opponentSignature)
return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False)
forecasted.append([x, y])
checked.append(deepcopy(forecasted))
board[y][x] = selfSignature
#print(selfSignature, " took the move ", [x, y])
#viewBoard(board)
boardHistory.append(deepcopy(board))
#print(f'Assessing risk and opportunity, taking {style} move this turn at col {x}, row {y}.')
# valid = False
# while (not valid):
# x = randint(0, dimension - 1)
# y = randint(0, dimension - 1)
# if board[y][x] == ' ': valid = True
iteration += 1
#Swap player each turn
selfSignature, opponentSignature = swap(selfSignature, opponentSignature)
return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False)
#Define winning
def win(board, winCond, signature, opponentSignature):
#Define function to determine box containing played area
def box(board):
#Define function to find first occurence of 'X' or 'O', row-wise; if none is found, return 0
#Value is [signature, opponentSignature]
def locate(value, board):
dimensionY = len(board)
dimensionX = len(board[0])
for row in range(dimensionY):
for col in range(dimensionX):
if (board[row][col] in value):
return row
return 0
#Define function to inverse board vertically
def invertY(board):
invertYBoard = []
dimensionY = len(board)
for row in range(dimensionY):
invertYBoard.append(board[dimensionY - row - 1])
return invertYBoard
#Define function to rotate board 90 degree
def rotate(board):
rotateBoard = []
dimensionY = len(board)
dimensionX = len(board[0])
for col in range(dimensionX):
column = [board[row][col] for row in range(dimensionY)]
rotateBoard.append(column)
return rotateBoard
dimensionY = len(board)
dimensionX = len(board[0])
boundaryN = locate([signature, opponentSignature], board)
boundaryS = dimensionY - locate([signature, opponentSignature], invertY(board)) - 1
boundaryW = locate([signature, opponentSignature], rotate(board))
boundaryE = dimensionX - locate([signature, opponentSignature], invertY(rotate(board))) - 1
box = []
for row in range(boundaryN, boundaryS + 1):
boxRow = [board[row][col] for col in range(boundaryW, boundaryE + 1)]
box.append(boxRow)
return box
#Create as many winCond x winCond grids as needed to cover the entire played area
def grid(box, winCond):
dimensionY = len(box)
dimensionX = len(box[0])
gridY = dimensionY - winCond + 1
if (gridY < 1): gridY = 1
gridX = dimensionX - winCond + 1
if (gridX < 1): gridX = 1
#List of grids
grids = []
for offsetX in range(gridX):
for offsetY in range(gridY):
grid = []
for row in range(offsetY, offsetY + winCond):
rowY = []
for col in range(offsetX, offsetX + winCond):
try:
rowY.append(box[row][col])
except IndexError: pass
grid.append(rowY)
grids.append(grid)
return grids
for board in grid(box(board), winCond):
#Within each grid:
dimensionY = len(board)
dimensionX = len(board[0])
#Count 'O's in a row
for row in range(dimensionY):
if (board[row].count(signature) >= winCond):
return True
#Count 'O's in a column
columns = []
for col in range(dimensionX):
try:
columns.append([row[col] for row in board])
except IndexError: pass
for col in columns:
if (col.count(signature) >= winCond):
return True
#Count 'O's in a diagonal line
dimension = min(dimensionX, dimensionY)
diagonalsNW = []
diagonalsNE = []
for i in range(dimension):
diagonalNW = []
diagonalNE = []
for j in range(dimension):
try:
diagonalNW.append(board[j][j])
except IndexError: pass
try:
diagonalNE.append(board[j][dimension - j - 1])
except IndexError: pass
diagonalsNW.append(diagonalNW)
diagonalsNE.append(diagonalNE)
for diagonalNW in diagonalsNW:
if (diagonalNW.count(signature) >= winCond):
return True
for diagonalNE in diagonalsNE:
if (diagonalNE.count(signature) >= winCond):
return True
#Game loop
print('Welcome to a game of Tic-tac-toe!\nThe rule is simple: block your opponent before they can get a long enough streak in a continuous row, column or diagonal to win.\n')
mode = True
while (mode):
gamemode = input('Before we start, there are two gamemodes: custom and preset. Which one would you prefer?\n(c) for custom, (p) for preset. ')
if (gamemode not in ['c', 'p']):
print('Unrecognized input command. Please read the instructions carefully and try again.\n')
else:
mode = False
print('\n\n')
#Configuration settings for custom gamemode
configure = True
while (configure):
#Set custom dimension
invalid = True
while (invalid and gamemode == 'c'):
try:
dimensionX, dimensionY = input('Input dimension for game initialization:\n(width x length): ').split('x')
dimensionX = int(dimensionX)
dimensionY = int(dimensionY)
invalid = False
except:
print('Invalid input detected. Please try again.\n')
#Preset dimension
if (gamemode == 'p'):
print('Default grid set to 26x26.')
dimensionX = 26
dimensionY = 26
#Set win condition
valid = False
while (not valid and gamemode == 'c'):
try:
winCond = input('Input streak size to count as win: ')
winCond = int(winCond)
if (not isinstance(winCond, int) or 3 > winCond > min(dimensionX, dimensionY)): raise TypeError
valid = True
except:
print('Invalid input detected. Please try again.\n')
#Preset win condition
if (gamemode == 'p'):
print('Default win streak set to 5.')
winCond = 5
#Set difficulty
chose = False
while (not chose and gamemode == 'c'):
try:
difficulty = int(input('Choose difficulty (easiest: 1 - hardest: 3): '))
if (3 < difficulty or difficulty < 1): raise ValueError
chose = True
except:
print('Invalid input detected. Please try again.\n')
#Preset difficulty
if (gamemode == 'p'):
print('Default difficulty set to 3.')
difficulty = 3
#Set player's marker
proper = False
while (not proper and gamemode == 'c'):
marker = input('Choose your prefered marker:\n(o) for \'O\', (x) for \'X\': ')
if (marker not in ['x', 'o']):
print('Invalid input detected. Please try again.\n')
else:
proper = True
if (marker == 'o'):
opponentSignature = 'O'
selfSignature = 'X'
else:
opponentSignature = 'X'
selfSignature = 'O'
#Preset marker
if (gamemode == 'p'):
print('Default player marker set to \'X\'.')
opponentSignature = 'X'
selfSignature = 'O'
#Choose who goes first
ok = False
while (not ok and gamemode == 'c'):
playerGoesFirst = input('Do you want to go first?\n(y) for yes, (n) for no: ')
if (playerGoesFirst not in ['y', 'n']):
print('Invalid input detected. Please try again.\n')
else:
ok = True
playerGoesFirst = (playerGoesFirst == 'y')
#Preset first play
if (gamemode == 'p'):
print('Default: computer goes first.')
playerGoesFirst = False
#Replay loop
replay = True
while (replay):
print('\n\n')
board = mapBoard(int(dimensionX), int(dimensionY), ' ')
viewBoard(board)
while (True):
try:
locate([' '], board)[0]
except IndexError:
print('\nIt\'s a tie!')
break
#Player plays
if (playerGoesFirst):
mark(board, opponentSignature)
if (win(board, winCond, opponentSignature, selfSignature)):
print('Congratulations, you won!')
break
playerGoesFirst = True
try:
locate([' '], board)[0]
except IndexError:
print('\nIt\'s a tie!')
break
print('\n\nComputer is calculating...')
#Computer plays
board = play([deepcopy(board)], False, [], 0, winCond, [], [], board, selfSignature, opponentSignature, difficulty)
if (win(board, winCond, selfSignature, opponentSignature)):
print('Sorry, you lost!')
break
#Replay choice
makingChoice = True
while makingChoice:
choice = input('\n\nDo you want to replay?\n(y) to replay with current configurations, (n) to quit, (p) to play with recommended configurations, or (c) to replay with different configurations.\n')
if (choice == 'y'):
replay = True
configure = False
print('\n\n')
makingChoice = False
elif (choice == 'n'):
replay = False
configure = False
makingChoice = False
elif (choice == 'p'):
replay = False
configure = True
gamemode = 'p'
print('\n\n')
makingChoice = False
elif (choice == 'c'):
replay = False
configure = True
gamemode = 'c'
print('\n\n')
makingChoice = False
else:
print('Invalid input detected. Please try again.\n')
input('\nPress ENTER to quit.')
| 50.053333
| 328
| 0.565346
|
from random import randint
from string import ascii_uppercase, ascii_lowercase
from itertools import permutations
from copy import deepcopy
from tail_recursion import tail_recursive, recurse
def mapBoard(col, row, value):
board = [[value for x in range(col)] for y in range(row)]
return board
def mapMetaBoard(col, row):
metaboard = [[[[0, 0, 0, 0], [0, 0, 0, 0]] for x in range(col)] for y in range(row)]
return metaboard
def viewBoard(board):
alphabet = ascii_uppercase
col = len(board[0])
row = len(board)
border = ""
topBorder = "#||"
for i in range(col):
border += "_" * 2
topBorder += alphabet[i]
topBorder += " "
border += "___"
print(topBorder)
print(border)
for i in range(row):
print(alphabet[i] + "||" + " ".join(board[i]) + "|")
def mark(board, signature):
alphabet = ascii_uppercase
alphabet1 = ascii_lowercase
dimensionY = len(board)
dimensionX = len(board[0])
valid = False
while (not valid):
print("\n\nWhere do you want to mark?\n\n")
x = input(f"Column (A - {alphabet[dimensionX - 1]})? ")
y = input(f"Row (A - {alphabet[dimensionY - 1]})? ")
try:
x = alphabet.index(x)
except ValueError:
x = alphabet1.index(x)
try:
y = alphabet.index(y)
except:
y = alphabet1.index(y)
if (board[y][x] == ' '):
valid = True
else:
print('That position has already been marked. Please try again.\n')
board[y][x] = signature
print('\n')
viewBoard(board)
def locate(value, board):
dimensionY = len(board)
dimensionX = len(board[0])
returnList = []
for row in range(dimensionY):
for col in range(dimensionX):
if (board[row][col] in value): returnList.append([col, row])
return returnList
@tail_recursive
def play(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, first = True):
#AI
#Each of metaboard's position is a list [danger, opportunity]
def meta(board, opponentSignature, selfSignature, winCond, difficulty):
#coord = [col, row]
def sweep(metaboard, coord, keyword, opponentSignature, selfSignature, winCond):
if (keyword == 'danger'):
type = 0
otherType = 1
signature = opponentSignature
else:
type = 1
otherType = 0
signature = selfSignature
coordVars = list(permutations([-1, 0, 1], 2))
coordVars.extend(((-1, -1), (1, 1)))
for coordVar in coordVars:
try:
if (coordVar in [(-1, -1), (1, 1)]):
pos = 2
elif (coordVar in [(0, -1), (0, 1)]):
pos = 0
elif (coordVar in [(-1, 0), (1, 0)]):
pos = 1
else:
pos = 3
row = coord[1] + coordVar[0]
if (row < 0 or row > len(metaboard)): raise IndexError
col = coord[0] + coordVar[1]
if (col < 0 or col > len(metaboard[0])): raise IndexError
#Ripple effect
if (not isinstance(metaboard[row][col], str)):
for i in range(winCond - 1):
if (not isinstance(metaboard[row][col], str)):
metaboard[row][col][type][pos] += (1 - i/(winCond - 1))
metaboard[row][col][otherType][pos] -= (1 - i/(winCond - 1))
row += coordVar[0]
if (row < 0 or row > len(metaboard)): raise IndexError
col += coordVar[1]
if (col < 0 or col > len(metaboard[0])): raise IndexError
elif (metaboard[row][col] == signature):
row += coordVar[0]
if (row < 0 or row > len(metaboard)): raise IndexError
col += coordVar[1]
if (col < 0 or col > len(metaboard[0])): raise IndexError
else:
raise IndexError
#alphabet = ascii_uppercase
#print(f'Metaboard at column {alphabet[col]} and row {alphabet[row]} has a {keyword} level of {metaboard[row][col][type]}.')
#Resonance effect
if (metaboard[row][col] == signature):
alignment = 0
while (metaboard[row][col] == signature):
row += coordVar[0]
if (row < 0 or row > len(metaboard)): raise IndexError
col += coordVar[1]
if (col < 0 or col > len(metaboard[0])): raise IndexError
alignment += 1
if (isinstance(metaboard[row][col], list)):
metaboard[row][col][type][pos] += alignment
except IndexError: pass
#Define function to screen entire metaboard for invalidation
def screen(metaboard, selfSignature, opponentSignature, winCond):
#Define function to rotate board 90 degree counter-clockwise with perspective to keeping OG board intact
def rotate(board):
#Define function to inverse board vertically
def invertY(board):
invertYBoard = []
dimensionY = len(board)
for row in range(dimensionY):
invertYBoard.append(board[dimensionY - row - 1])
return invertYBoard
rotateBoard = []
dimensionY = len(board)
dimensionX = len(board[0])
for col in range(dimensionX):
column = [board[row][col] for row in range(dimensionY)]
rotateBoard.append(column)
return invertY(rotateBoard)
#Define function to screen the top left corner of the board
def screenTopLeftCorner(metaboard, winCond, pos, name):
for row in range(winCond - 1):
for col in range(winCond - 1 - row):
if (isinstance(metaboard[row][col], list)):
#print(f'nullify {row}:{col}\'s danger and potential in the {name} diagonal')
metaboard[row][col][0][pos] = 0
metaboard[row][col][1][pos] = 0
def screenHorizontal(metaboard, signature, type, winCond, pos):
dimensionX = len(metaboard[0])
if type == 'danger': type = 0
else: type = 1
#sus = [susRow1, susRow3, ...]
#susRow1 = [[col1, row], [col3, row], ...]
sus = []
for row in metaboard:
susEachRow = []
for col in row:
if (col == signature): susEachRow.append([row.index(col), metaboard.index(row)])
sus.append(susEachRow)
sus = [susEachRow for susEachRow in sus if len(susEachRow) != 0]
#Filter out all invalid segments between two blocked self horizontally
for susEachRow in sus:
for i in range(len(susEachRow) - 1):
if (2 <= susEachRow[i + 1][0] - susEachRow[i][0] <= winCond):
for k in range(0, susEachRow[i + 1][0] - susEachRow[i][0]):
if (isinstance(metaboard[susEachRow[i][1]][susEachRow[i][0] + k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {susEachRow[i][0]}:{susEachRow[i][1]} and {susEachRow[i + 1][0]}:{susEachRow[i + 1][1]}, the position with the coordinates {susEachRow[i][1]}:{susEachRow[i][0] + k} has been nullified of its {type}\'s {pos}.')
metaboard[susEachRow[i][1]][susEachRow[i][0] + k][type][pos] = 0
for susEachRow in sus:
start = susEachRow[0]
end = susEachRow[-1]
if (1 <= start[0] < winCond):
for k in range(0, start[0]):
if (isinstance(metaboard[start[1]][k], list)):
metaboard[start[1]][k][type][pos] = 0
if (1 <= dimensionX - end[0] - 1 < winCond):
for k in range(0, dimensionX - end[0] - 1):
if (isinstance(metaboard[end[1]][end[0] + k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {end[0]}:{end[1]} and the border, the position with the coordinates {end[1]}:{end[0] + k} has been nullified of its {type}\'s {pos}.')
metaboard[end[1]][end[0] + k][type][pos] = 0
return metaboard
def screenDiagonal(metaboard, signature, type, winCond, pos):
dimensionY = len(metaboard)
dimensionX = len(metaboard[0])
if type == 'danger': type = 0
else: type = 1
#susDiagDown, Up, sus = [susDiag1, susDiag3, ...]
#susDiag1 = [[col1, row1], [col3, row3], ...]
sus = []
susDiagDown = []
lenSusDiagDown = []
susDiagUp = []
lenSusDiagUp = []
susDuplicate = []
for i in range(dimensionY):
susEachDiagDown = []
originalDiagLen = 0
for j in range(dimensionY):
try:
if (metaboard[i + j][j] == signature): susEachDiagDown.append([i + j, j])
originalDiagLen += 1
except IndexError:
pass
susDiagDown.append(susEachDiagDown)
if (len(susEachDiagDown) != 0):
lenSusDiagDown.append(originalDiagLen)
else: lenSusDiagDown.append(0)
for i in range(dimensionX):
susEachDiagUp = []
originalDiagLen = 0
for j in range(dimensionX):
try:
if (metaboard[j][i + j] == signature): susEachDiagUp.append([j, i + j])
originalDiagLen += 1
except IndexError: pass
susDiagUp.append(susEachDiagUp)
if (len(susEachDiagUp) != 0):
lenSusDiagUp.append(originalDiagLen)
else: lenSusDiagUp.append(0)
sus.extend(susDiagDown)
sus.extend(susDiagUp)
for i in range(min(dimensionX, dimensionY)):
if (metaboard[i][i] == signature): susDuplicate.append([i, i])
sus.remove(susDuplicate)
susDiagUp = [susEachDiag for susEachDiag in susDiagUp if len(susEachDiag) != 0]
lenSusDiagUp = [eachLen for eachLen in lenSusDiagUp if eachLen != 0]
susDiagDown = [susEachDiag for susEachDiag in susDiagDown if len(susEachDiag) != 0]
lenSusDiagDown = [eachLen for eachLen in lenSusDiagDown if eachLen != 0]
#Filter out all invalid segments between two blocked self diagontally
for susEachDiag in sus:
for i in range(len(susEachDiag) - 1):
if (2 <= susEachDiag[i + 1][0] - susEachDiag[i][0] <= winCond):
for k in range(0, susEachDiag[i + 1][0] - susEachDiag[i][0]):
if (isinstance(metaboard[susEachDiag[i][0] + k][susEachDiag[i][1] + k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {susEachDiag[i][0]}:{susEachDiag[i][1]} and {susEachDiag[i + 1][0]}:{susEachDiag[i + 1][1]}, the position with the coordinates {susEachDiag[i][0] + k}:{susEachDiag[i][1] + k} has been nullified of its {type}\'s {pos}.')
metaboard[susEachDiag[i][0] + k][susEachDiag[i][1] + k][type][pos] = 0
for susEachDiag in susDiagUp:
start = susEachDiag[0]
end = susEachDiag[-1]
if (1 <= min(start[0], start[1]) < winCond):
for k in range(0, min(start[0], start[1]) + 1):
if (isinstance(metaboard[start[0] - k][start[1] - k], list)):
metaboard[start[0] - k][start[1] - k][type][pos] = 0
if (1 <= lenSusDiagUp[susDiagUp.index(susEachDiag)] - min(end[0], end[1]) <= winCond):
for k in range(0, lenSusDiagUp[susDiagUp.index(susEachDiag)] - min(end[0], end[1])):
if (isinstance(metaboard[end[0] + k][end[1] + k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {end[0]}:{end[1]} and the corner, the position with the coordinates {end[0] + k}:{end[1] + k} has been nullified of its {type}\'s {pos}.')
metaboard[end[0] + k][end[1] + k][type][pos] = 0
for susEachDiag in susDiagDown:
start = susEachDiag[0]
end = susEachDiag[-1]
if (1 <= min(start[0], start[1]) < winCond):
for k in range(0, min(start[0], start[1]) + 1):
if (isinstance(metaboard[start[0] - k][start[1] - k], list)):
metaboard[start[0] - k][start[1] - k][type][pos] = 0
if (1 <= lenSusDiagDown[susDiagDown.index(susEachDiag)] - min(end[0], end[1]) <= winCond):
for k in range(0, lenSusDiagDown[susDiagDown.index(susEachDiag)] - min(end[0], end[1])):
if (isinstance(metaboard[end[0] + k][end[1] + k], list)):
#print(f'Due to being blocked on both ends by {signature} at coordinates {end[0]}:{end[1]} and the corner, the position with the coordinates {end[0] + k}:{end[1] + k} has been nullified of its {type}\'s {pos}.')
metaboard[end[0] + k][end[1] + k][type][pos] = 0
return metaboard
screenTopLeftCorner(metaboard, winCond, 3, 'top left')
metaboard = rotate(metaboard)
screenTopLeftCorner(metaboard, winCond, 2, 'top right')
metaboard = rotate(metaboard)
screenTopLeftCorner(metaboard, winCond, 3, 'bottom right')
metaboard = rotate(metaboard)
screenTopLeftCorner(metaboard, winCond, 2, 'bottom left')
metaboard = rotate(metaboard)
screenHorizontal(metaboard, selfSignature, 'danger' , winCond, 0)
screenHorizontal(metaboard, opponentSignature, 'opportunity' , winCond, 0)
metaboard = rotate(metaboard)
screenHorizontal(metaboard, selfSignature, 'danger' , winCond, 1)
screenHorizontal(metaboard, opponentSignature, 'opportunity' , winCond, 1)
for i in range(3): metaboard = rotate(metaboard)
screenDiagonal(metaboard, selfSignature, 'danger' , winCond, 2)
screenDiagonal(metaboard, opponentSignature, 'opportunity' , winCond, 2)
metaboard = rotate(metaboard)
screenDiagonal(metaboard, selfSignature, 'danger' , winCond, 3)
screenDiagonal(metaboard, opponentSignature, 'opportunity' , winCond, 3)
for i in range(3): metaboard = rotate(metaboard)
metaboard = mapMetaBoard(len(board[0]), len(board))
dangerCoords = locate([opponentSignature], board)
opportunityCoords = locate([selfSignature], board)
for coord in dangerCoords:
metaboard[coord[1]][coord[0]] = opponentSignature
for coord in opportunityCoords:
metaboard[coord[1]][coord[0]] = selfSignature
for coord in dangerCoords:
sweep(metaboard, coord, 'danger', opponentSignature, selfSignature, winCond)
for coord in opportunityCoords:
sweep(metaboard, coord, 'opportunity', opponentSignature, selfSignature, winCond)
if (difficulty >= 2):
screen(metaboard, selfSignature, opponentSignature, winCond)
return metaboard
def stance(metaboard, difficulty):
dangerList = []
opportunityList = []
for row in metaboard:
for col in row:
if (isinstance(col, list)):
dangerList.append(max(col[0]))
opportunityList.append(max(col[1]))
pressingDanger = max(dangerList)
pressingOpportunity = max(opportunityList)
if (difficulty >= 3):
if (pressingOpportunity > pressingDanger):
return 'aggressive', pressingOpportunity
elif (pressingOpportunity == pressingDanger):
return 'tactical', pressingOpportunity
else:
return 'defensive', pressingDanger
else:
if (pressingOpportunity >= pressingDanger):
return 'aggressive', pressingOpportunity
else:
return 'defensive', pressingDanger
@tail_recursive
def decide(forecasted, checked, style, value, metaboard, difficulty):
if style == 'aggressive': type = 1
elif style == 'defensive': type = 0
else: type = 2
if (style in ['aggressive', 'defensive']):
for row in metaboard:
for col in row:
if (isinstance(col, list)):
if max(col[type]) == value:
x, y = row.index(col), metaboard.index(row)
else:
returnList = []
maxTracker = []
for row in range(len(metaboard)):
for col in range(len(metaboard[0])):
if (isinstance(metaboard[row][col], list)):
if (max(metaboard[row][col][0]) == value) or (max(metaboard[row][col][1]) == value):
returnList.append([col, row])
maxTracker.append(sum(metaboard[row][col][0]) + sum(metaboard[row][col][1]))
x, y = returnList[maxTracker.index(max(maxTracker))][0], returnList[maxTracker.index(max(maxTracker))][1]
if [*forecasted, [x, y]] not in checked:
return x, y
else:
metaboardTemp = deepcopy(metaboard)
metaboardTemp[y][x] = [[-1, -1, -1, -1], [-1, -1, -1, -1]]
style, newValue = stance(metaboardTemp, difficulty)
if newValue != value: raise ValueError
return recurse(forecasted, checked, style, newValue, metaboardTemp, difficulty)
def swap(selfSignature, opponentSignature):
temp = selfSignature
selfSignature = opponentSignature
opponentSignature = temp
return selfSignature, opponentSignature
def reachedTerminal(forecasted):
if len(forecasted) >= 1:
last = forecasted[-1][0]
return isinstance(last, bool) or isinstance(last, float)
return False
def evalSelf(selfPlaying: bool, possibilities, iteration):
def countExact(values, countItem):
counted = 0
for value in values:
if value is countItem: counted += 1
return counted
def collapse(selfPlaying: bool, possibilities, iteration):
def contains(values, comparisonItem):
for value in values:
if value is comparisonItem: return True
return False
extracted = deepcopy([possibility for possibility in possibilities if possibility[-1][1] == iteration])
tempPossibilities = deepcopy([possibility for possibility in possibilities if possibility not in extracted])
if len(extracted) == 1:
tempPossibilities.append(extracted[0])
return tempPossibilities
elif len(extracted) == 0:
return tempPossibilities
values = [extraction[-1][0] for extraction in extracted]
tieLimiter = False
for value in values:
if isinstance(value, float): tieLimiter = True
if contains(values, True) and selfPlaying:
values = [value for value in values if not (isinstance(value, float) and value > 0)]
if contains(values, False) and not selfPlaying:
values = [value for value in values if not (isinstance(value, float) and value < 0)]
if contains(values, True) and contains(values, False):
values = [value for value in values if not isinstance(value, float)]
if selfPlaying:
if tieLimiter and contains(values, False):
values = [value for value in values if value is not False]
returnValue = max(values)
else:
if contains(values, False):
returnValue = False
else:
returnValue = min(values)
if countExact(values, returnValue) > 1:
extractedShortlisted = [forecasted for forecasted in extracted if forecasted[-1][0] is returnValue]
lenList = [len(forecasted) for forecasted in extractedShortlisted]
if selfPlaying:
fullReturnValue = extractedShortlisted[lenList.index(min(lenList))]
else:
fullReturnValue = extractedShortlisted[lenList.index(max(lenList))]
else:
fullReturnValue = [possibility for possibility in extracted if possibility[-1][0] is returnValue][0]
tempPossibilities.append(fullReturnValue)
return tempPossibilities
def passUp(possibilities, iteration):
for possibility in possibilities:
if possibility[-1][1] == iteration: possibility[-1][1] -= 1
iterationList = [possibility[-1][1] for possibility in possibilities]
for iterationItem in iterationList:
if countExact(iterationList, iterationItem) > 1:
possibilities = collapse(selfPlaying, possibilities, iteration)
if (iteration > 0):
passUp(possibilities, iteration)
return possibilities
if len(board) == len(board[0]) and len(board) == 3:
maxDepthSearch = 10
elif max(len(locate(selfSignature, board)), len(locate(opponentSignature, board))) <= winCond/2:
maxDepthSearch = 2
else:
maxDepthSearch = 3
if reachedTerminal(forecasted):
selfPlaying = (iteration % 2 == 0)
forecastedCopy = deepcopy(forecasted)
possibilities.append(forecastedCopy)
possibilities = evalSelf(selfPlaying, possibilities, iteration)
iteration -= 1
#Reset back 1 node higher
forecasted.pop(-1)
forecasted.pop(-1)
return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False)
#Terminal node: winCond is met/maxDepthSearch reached/no possible moves left
if win(board, winCond, selfSignature, opponentSignature) or win(board, winCond, opponentSignature, selfSignature) or len(locate(' ', board)) == 0 or iteration == maxDepthSearch:
if forecasted not in checked:
checked.append(deepcopy(forecasted))
#If self/other is winner, document move
if win(board, winCond, selfSignature, opponentSignature):
#If it's computer's turn, and computer wins
if (iteration % 2 == 0):
forecasted.append([True, iteration])
#print("Forecasted a possible win if moves are as followed: ", forecasted)
#viewBoard(board)
else:
forecasted.append([False, iteration])
#print("Forecasted a possible loss if moves are as followed: ", forecasted)
#viewBoard(board)
elif win(board, winCond, opponentSignature, selfSignature):
#If it's computer's turn, and computer's opponent wins
if (iteration % 2 == 0):
forecasted.append([False, iteration])
else:
forecasted.append([True, iteration])
elif iteration == maxDepthSearch:
metaboard = meta(board, opponentSignature, selfSignature, winCond, difficulty)
try:
style, value = stance(metaboard, difficulty)
if (iteration % 2 == 0):
forecasted.append([float(value), iteration])
#print("Max search depth reached: ", forecasted)
#viewBoard(board)
else:
forecasted.append([float(-value), iteration])
#print("Max search depth reached: ", forecasted)
#viewBoard(board)
#When maxDepthSearch is reached, but game is also tied
except ValueError:
forecasted.append([0.0, iteration])
#print("Forecasted a possible tie at max depth search if moves are as followed: ", forecasted)
#viewBoard(board)
#When tie is reached through tiles depletion, score is set to 0.0
else:
forecasted.append([0.0, iteration])
#print("Forecasted a possible tie if moves are as followed: ", forecasted)
#viewBoard(board)
#Reset back 1 node higher
boardHistory.pop(-1)
board = deepcopy(boardHistory[-1])
#print("Breakpoint 2: Reset board back to ")
#viewBoard(board)
selfSignature, opponentSignature = swap(selfSignature, opponentSignature)
return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False)
#At each node layer, make a decision and "forecast" board and metaboard, then switch position with opponent and do the same
#Normal case: when self node is not terminal, and all children are not depleted yet/maxDepthSearch is not reached yet
#dimension = len(board)
metaboard = meta(board, opponentSignature, selfSignature, winCond, difficulty)
#Heuristics: if there is only one available move left, take that move
if (len(locate(' ', board)) == 1):
x = locate(' ', board)[0][0]
y = locate(' ', board)[0][1]
#For actual move; only apply when not projecting self as opponent
if (len(checked) == 0 and iteration == 0):
alphabet = ascii_uppercase
print(f'Computer has decided to play at column {alphabet[x]} and row {alphabet[y]}.\n\n')
board = boardHistory[0]
board[y][x] = selfSignature
viewBoard(board)
return board
#For a forecasted move
elif [*forecasted, [x, y]] not in checked:
forecasted.append([x, y])
checked.append(deepcopy(forecasted))
board[y][x] = selfSignature
boardHistory.append(deepcopy(board))
iteration += 1
selfSignature, opponentSignature = swap(selfSignature, opponentSignature)
return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False)
style, value = stance(metaboard, difficulty)
try:
#For first move only
if len(locate(selfSignature, board)) == 0 and len(locate(opponentSignature, board)) == 0:
#For symmetrical board or customized board dimension smaller than twice win condition
if len(board) == len(board[0]) or (len(board) < winCond * 2) or (len(board[0]) < winCond * 2):
move = [int(len(board[0])/2), int(len(board)/2)]
#For customized board dimension larger than twice win condition
else:
move = [randint(winCond, len(board[0]) - 1 - winCond), randint(winCond, len(board) - 1 - winCond)]
x = move[0]
y = move[1]
alphabet = ascii_uppercase
print(f'Computer has decided to play at column {alphabet[x]} and row {alphabet[y]}.\n\n')
board = boardHistory[0]
board[y][x] = selfSignature
viewBoard(board)
return board
else:
x, y = decide(forecasted, checked, style, value, metaboard, difficulty)
except ValueError:
depleted = True
#All child nodes had been depleted (i.e, checked has been populated with all possible forecasted combinations)
if depleted:
depleted = False
selfPlaying = (iteration % 2 == 0)
possibilities = evalSelf(selfPlaying, possibilities, iteration)
iteration -= 1
#If base case had been evaluated; root has been given value; iteration is negative => make a move
#All child branches had been depleted
if iteration < 0:
#print(possibilities)
move = possibilities[0][0]
x = move[0]
y = move[1]
alphabet = ascii_uppercase
print(f'Computer has decided to play at column {alphabet[x]} and row {alphabet[y]}.\n\n')
board = boardHistory[0]
board[y][x] = selfSignature
viewBoard(board)
return board
forecasted.pop(-1)
boardHistory.pop(-1)
board = deepcopy(boardHistory[-1])
#print("Breakpoint 1: Reset board back to ")
#viewBoard(board)
selfSignature, opponentSignature = swap(selfSignature, opponentSignature)
return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False)
forecasted.append([x, y])
checked.append(deepcopy(forecasted))
board[y][x] = selfSignature
#print(selfSignature, " took the move ", [x, y])
#viewBoard(board)
boardHistory.append(deepcopy(board))
#print(f'Assessing risk and opportunity, taking {style} move this turn at col {x}, row {y}.')
# valid = False
# while (not valid):
# x = randint(0, dimension - 1)
# y = randint(0, dimension - 1)
# if board[y][x] == ' ': valid = True
iteration += 1
#Swap player each turn
selfSignature, opponentSignature = swap(selfSignature, opponentSignature)
return recurse(boardHistory, depleted, checked, iteration, winCond, forecasted, possibilities, board, selfSignature, opponentSignature, difficulty, False)
#Define winning
def win(board, winCond, signature, opponentSignature):
#Define function to determine box containing played area
def box(board):
#Define function to find first occurence of 'X' or 'O', row-wise; if none is found, return 0
#Value is [signature, opponentSignature]
def locate(value, board):
dimensionY = len(board)
dimensionX = len(board[0])
for row in range(dimensionY):
for col in range(dimensionX):
if (board[row][col] in value):
return row
return 0
#Define function to inverse board vertically
def invertY(board):
invertYBoard = []
dimensionY = len(board)
for row in range(dimensionY):
invertYBoard.append(board[dimensionY - row - 1])
return invertYBoard
#Define function to rotate board 90 degree
def rotate(board):
rotateBoard = []
dimensionY = len(board)
dimensionX = len(board[0])
for col in range(dimensionX):
column = [board[row][col] for row in range(dimensionY)]
rotateBoard.append(column)
return rotateBoard
dimensionY = len(board)
dimensionX = len(board[0])
boundaryN = locate([signature, opponentSignature], board)
boundaryS = dimensionY - locate([signature, opponentSignature], invertY(board)) - 1
boundaryW = locate([signature, opponentSignature], rotate(board))
boundaryE = dimensionX - locate([signature, opponentSignature], invertY(rotate(board))) - 1
box = []
for row in range(boundaryN, boundaryS + 1):
boxRow = [board[row][col] for col in range(boundaryW, boundaryE + 1)]
box.append(boxRow)
return box
#Create as many winCond x winCond grids as needed to cover the entire played area
def grid(box, winCond):
dimensionY = len(box)
dimensionX = len(box[0])
gridY = dimensionY - winCond + 1
if (gridY < 1): gridY = 1
gridX = dimensionX - winCond + 1
if (gridX < 1): gridX = 1
#List of grids
grids = []
for offsetX in range(gridX):
for offsetY in range(gridY):
grid = []
for row in range(offsetY, offsetY + winCond):
rowY = []
for col in range(offsetX, offsetX + winCond):
try:
rowY.append(box[row][col])
except IndexError: pass
grid.append(rowY)
grids.append(grid)
return grids
for board in grid(box(board), winCond):
#Within each grid:
dimensionY = len(board)
dimensionX = len(board[0])
#Count 'O's in a row
for row in range(dimensionY):
if (board[row].count(signature) >= winCond):
return True
#Count 'O's in a column
columns = []
for col in range(dimensionX):
try:
columns.append([row[col] for row in board])
except IndexError: pass
for col in columns:
if (col.count(signature) >= winCond):
return True
#Count 'O's in a diagonal line
dimension = min(dimensionX, dimensionY)
diagonalsNW = []
diagonalsNE = []
for i in range(dimension):
diagonalNW = []
diagonalNE = []
for j in range(dimension):
try:
diagonalNW.append(board[j][j])
except IndexError: pass
try:
diagonalNE.append(board[j][dimension - j - 1])
except IndexError: pass
diagonalsNW.append(diagonalNW)
diagonalsNE.append(diagonalNE)
for diagonalNW in diagonalsNW:
if (diagonalNW.count(signature) >= winCond):
return True
for diagonalNE in diagonalsNE:
if (diagonalNE.count(signature) >= winCond):
return True
#Game loop
print('Welcome to a game of Tic-tac-toe!\nThe rule is simple: block your opponent before they can get a long enough streak in a continuous row, column or diagonal to win.\n')
mode = True
while (mode):
gamemode = input('Before we start, there are two gamemodes: custom and preset. Which one would you prefer?\n(c) for custom, (p) for preset. ')
if (gamemode not in ['c', 'p']):
print('Unrecognized input command. Please read the instructions carefully and try again.\n')
else:
mode = False
print('\n\n')
#Configuration settings for custom gamemode
configure = True
while (configure):
#Set custom dimension
invalid = True
while (invalid and gamemode == 'c'):
try:
dimensionX, dimensionY = input('Input dimension for game initialization:\n(width x length): ').split('x')
dimensionX = int(dimensionX)
dimensionY = int(dimensionY)
invalid = False
except:
print('Invalid input detected. Please try again.\n')
#Preset dimension
if (gamemode == 'p'):
print('Default grid set to 26x26.')
dimensionX = 26
dimensionY = 26
#Set win condition
valid = False
while (not valid and gamemode == 'c'):
try:
winCond = input('Input streak size to count as win: ')
winCond = int(winCond)
if (not isinstance(winCond, int) or 3 > winCond > min(dimensionX, dimensionY)): raise TypeError
valid = True
except:
print('Invalid input detected. Please try again.\n')
#Preset win condition
if (gamemode == 'p'):
print('Default win streak set to 5.')
winCond = 5
#Set difficulty
chose = False
while (not chose and gamemode == 'c'):
try:
difficulty = int(input('Choose difficulty (easiest: 1 - hardest: 3): '))
if (3 < difficulty or difficulty < 1): raise ValueError
chose = True
except:
print('Invalid input detected. Please try again.\n')
#Preset difficulty
if (gamemode == 'p'):
print('Default difficulty set to 3.')
difficulty = 3
#Set player's marker
proper = False
while (not proper and gamemode == 'c'):
marker = input('Choose your prefered marker:\n(o) for \'O\', (x) for \'X\': ')
if (marker not in ['x', 'o']):
print('Invalid input detected. Please try again.\n')
else:
proper = True
if (marker == 'o'):
opponentSignature = 'O'
selfSignature = 'X'
else:
opponentSignature = 'X'
selfSignature = 'O'
if (gamemode == 'p'):
print('Default player marker set to \'X\'.')
opponentSignature = 'X'
selfSignature = 'O'
ok = False
while (not ok and gamemode == 'c'):
playerGoesFirst = input('Do you want to go first?\n(y) for yes, (n) for no: ')
if (playerGoesFirst not in ['y', 'n']):
print('Invalid input detected. Please try again.\n')
else:
ok = True
playerGoesFirst = (playerGoesFirst == 'y')
if (gamemode == 'p'):
print('Default: computer goes first.')
playerGoesFirst = False
replay = True
while (replay):
print('\n\n')
board = mapBoard(int(dimensionX), int(dimensionY), ' ')
viewBoard(board)
while (True):
try:
locate([' '], board)[0]
except IndexError:
print('\nIt\'s a tie!')
break
#Player plays
if (playerGoesFirst):
mark(board, opponentSignature)
if (win(board, winCond, opponentSignature, selfSignature)):
print('Congratulations, you won!')
break
playerGoesFirst = True
try:
locate([' '], board)[0]
except IndexError:
print('\nIt\'s a tie!')
break
print('\n\nComputer is calculating...')
board = play([deepcopy(board)], False, [], 0, winCond, [], [], board, selfSignature, opponentSignature, difficulty)
if (win(board, winCond, selfSignature, opponentSignature)):
print('Sorry, you lost!')
break
makingChoice = True
while makingChoice:
choice = input('\n\nDo you want to replay?\n(y) to replay with current configurations, (n) to quit, (p) to play with recommended configurations, or (c) to replay with different configurations.\n')
if (choice == 'y'):
replay = True
configure = False
print('\n\n')
makingChoice = False
elif (choice == 'n'):
replay = False
configure = False
makingChoice = False
elif (choice == 'p'):
replay = False
configure = True
gamemode = 'p'
print('\n\n')
makingChoice = False
elif (choice == 'c'):
replay = False
configure = True
gamemode = 'c'
print('\n\n')
makingChoice = False
else:
print('Invalid input detected. Please try again.\n')
input('\nPress ENTER to quit.')
| true
| true
|
f702b0c9185d321ac1b98814edddd6bd103a696b
| 2,803
|
py
|
Python
|
nmma/em/create_injection_slurm.py
|
DavidIbarr/nmma
|
109fdd57add52cfea3553df8346981d6a117a7e7
|
[
"MIT"
] | 1
|
2022-02-12T18:06:50.000Z
|
2022-02-12T18:06:50.000Z
|
nmma/em/create_injection_slurm.py
|
DavidIbarr/nmma
|
109fdd57add52cfea3553df8346981d6a117a7e7
|
[
"MIT"
] | 10
|
2022-02-08T18:18:22.000Z
|
2022-03-10T13:11:03.000Z
|
nmma/em/create_injection_slurm.py
|
DavidIbarr/nmma
|
109fdd57add52cfea3553df8346981d6a117a7e7
|
[
"MIT"
] | 12
|
2022-02-07T21:15:16.000Z
|
2022-03-31T18:26:06.000Z
|
import os
import argparse
import json
import pandas as pd
import bilby
from bilby_pipe.create_injections import InjectionCreator
def main():
parser = argparse.ArgumentParser(description="Slurm files from nmma injection file")
parser.add_argument(
"--prior-file",
type=str,
required=True,
help="The prior file from which to generate injections",
)
parser.add_argument(
"--injection-file",
type=str,
required=True,
help="The bilby injection json file to be used",
)
parser.add_argument(
"--analysis-file",
type=str,
required=True,
help="The analysis bash script to be replicated",
)
parser.add_argument("-o", "--outdir", type=str, default="outdir")
args = parser.parse_args()
# load the injection json file
if args.injection_file:
if args.injection_file.endswith(".json"):
with open(args.injection_file, "rb") as f:
injection_data = json.load(f)
datadict = injection_data["injections"]["content"]
dataframe_from_inj = pd.DataFrame.from_dict(datadict)
else:
print("Only json supported.")
exit(1)
if len(dataframe_from_inj) > 0:
args.n_injection = len(dataframe_from_inj)
# create the injection dataframe from the prior_file
injection_creator = InjectionCreator(
prior_file=args.prior_file,
prior_dict=None,
n_injection=args.n_injection,
default_prior="PriorDict",
gps_file=None,
trigger_time=0,
generation_seed=0,
)
dataframe_from_prior = injection_creator.get_injection_dataframe()
# combine the dataframes
dataframe = pd.DataFrame.merge(
dataframe_from_inj,
dataframe_from_prior,
how="outer",
left_index=True,
right_index=True,
)
for index, row in dataframe.iterrows():
with open(args.analysis_file, "r") as file:
analysis = file.read()
outdir = os.path.join(args.outdir, str(index))
if not os.path.isdir(outdir):
os.makedirs(outdir)
priors = bilby.gw.prior.PriorDict(args.prior_file)
priors.to_file(outdir, label="injection")
priorfile = os.path.join(outdir, "injection.prior")
injfile = os.path.join(outdir, "lc.csv")
analysis = analysis.replace("PRIOR", priorfile)
analysis = analysis.replace("OUTDIR", outdir)
analysis = analysis.replace("INJOUT", injfile)
analysis = analysis.replace("INJNUM", str(index))
analysis_file = os.path.join(outdir, "inference.sh")
fid = open(analysis_file, "w")
fid.write(analysis)
fid.close()
if __name__ == "__main__":
main()
| 29.505263
| 88
| 0.625401
|
import os
import argparse
import json
import pandas as pd
import bilby
from bilby_pipe.create_injections import InjectionCreator
def main():
parser = argparse.ArgumentParser(description="Slurm files from nmma injection file")
parser.add_argument(
"--prior-file",
type=str,
required=True,
help="The prior file from which to generate injections",
)
parser.add_argument(
"--injection-file",
type=str,
required=True,
help="The bilby injection json file to be used",
)
parser.add_argument(
"--analysis-file",
type=str,
required=True,
help="The analysis bash script to be replicated",
)
parser.add_argument("-o", "--outdir", type=str, default="outdir")
args = parser.parse_args()
if args.injection_file:
if args.injection_file.endswith(".json"):
with open(args.injection_file, "rb") as f:
injection_data = json.load(f)
datadict = injection_data["injections"]["content"]
dataframe_from_inj = pd.DataFrame.from_dict(datadict)
else:
print("Only json supported.")
exit(1)
if len(dataframe_from_inj) > 0:
args.n_injection = len(dataframe_from_inj)
injection_creator = InjectionCreator(
prior_file=args.prior_file,
prior_dict=None,
n_injection=args.n_injection,
default_prior="PriorDict",
gps_file=None,
trigger_time=0,
generation_seed=0,
)
dataframe_from_prior = injection_creator.get_injection_dataframe()
dataframe = pd.DataFrame.merge(
dataframe_from_inj,
dataframe_from_prior,
how="outer",
left_index=True,
right_index=True,
)
for index, row in dataframe.iterrows():
with open(args.analysis_file, "r") as file:
analysis = file.read()
outdir = os.path.join(args.outdir, str(index))
if not os.path.isdir(outdir):
os.makedirs(outdir)
priors = bilby.gw.prior.PriorDict(args.prior_file)
priors.to_file(outdir, label="injection")
priorfile = os.path.join(outdir, "injection.prior")
injfile = os.path.join(outdir, "lc.csv")
analysis = analysis.replace("PRIOR", priorfile)
analysis = analysis.replace("OUTDIR", outdir)
analysis = analysis.replace("INJOUT", injfile)
analysis = analysis.replace("INJNUM", str(index))
analysis_file = os.path.join(outdir, "inference.sh")
fid = open(analysis_file, "w")
fid.write(analysis)
fid.close()
if __name__ == "__main__":
main()
| true
| true
|
f702b0d886a22f71a467ec815515b251b1e19d71
| 3,125
|
py
|
Python
|
testing/test_cases/object_formatting_test_cases.py
|
roym899/flake8-annotations
|
8b28fe6d3d00fc601d0f6e151588056d231a2186
|
[
"MIT"
] | 42
|
2020-09-02T22:45:19.000Z
|
2022-03-23T20:09:20.000Z
|
testing/test_cases/object_formatting_test_cases.py
|
roym899/flake8-annotations
|
8b28fe6d3d00fc601d0f6e151588056d231a2186
|
[
"MIT"
] | 23
|
2020-09-03T12:17:49.000Z
|
2022-03-31T15:07:45.000Z
|
testing/test_cases/object_formatting_test_cases.py
|
roym899/flake8-annotations
|
8b28fe6d3d00fc601d0f6e151588056d231a2186
|
[
"MIT"
] | 4
|
2021-03-30T13:40:52.000Z
|
2022-02-14T14:19:51.000Z
|
from functools import partial
from typing import NamedTuple, Union
from flake8_annotations import Argument, Function
from flake8_annotations.enums import AnnotationType
class FormatTestCase(NamedTuple):
"""Named tuple for representing our test cases."""
test_object: Union[Argument, Function]
str_output: str
repr_output: str
# Define partial functions to simplify object creation
arg = partial(Argument, lineno=0, col_offset=0, annotation_type=AnnotationType.ARGS)
func = partial(Function, name="test_func", lineno=0, col_offset=0, decorator_list=[])
formatting_test_cases = {
"arg": FormatTestCase(
test_object=arg(argname="test_arg"),
str_output="<Argument: test_arg, Annotated: False>",
repr_output=(
"Argument("
"argname='test_arg', "
"lineno=0, "
"col_offset=0, "
"annotation_type=AnnotationType.ARGS, "
"has_type_annotation=False, "
"has_3107_annotation=False, "
"has_type_comment=False"
")"
),
),
"func_no_args": FormatTestCase(
test_object=func(args=[arg(argname="return")]),
str_output="<Function: test_func, Args: [<Argument: return, Annotated: False>]>",
repr_output=(
"Function("
"name='test_func', "
"lineno=0, "
"col_offset=0, "
"function_type=FunctionType.PUBLIC, "
"is_class_method=False, "
"class_decorator_type=None, "
"is_return_annotated=False, "
"has_type_comment=False, "
"has_only_none_returns=True, "
"is_nested=False, "
"decorator_list=[], "
"args=[Argument(argname='return', lineno=0, col_offset=0, annotation_type=AnnotationType.ARGS, " # noqa: E501
"has_type_annotation=False, has_3107_annotation=False, has_type_comment=False)]"
")"
),
),
"func_has_arg": FormatTestCase(
test_object=func(args=[arg(argname="foo"), arg(argname="return")]),
str_output="<Function: test_func, Args: [<Argument: foo, Annotated: False>, <Argument: return, Annotated: False>]>", # noqa: E501
repr_output=(
"Function("
"name='test_func', "
"lineno=0, "
"col_offset=0, "
"function_type=FunctionType.PUBLIC, "
"is_class_method=False, "
"class_decorator_type=None, "
"is_return_annotated=False, "
"has_type_comment=False, "
"has_only_none_returns=True, "
"is_nested=False, "
"decorator_list=[], "
"args=[Argument(argname='foo', lineno=0, col_offset=0, annotation_type=AnnotationType.ARGS, " # noqa: E501
"has_type_annotation=False, has_3107_annotation=False, has_type_comment=False), "
"Argument(argname='return', lineno=0, col_offset=0, annotation_type=AnnotationType.ARGS, " # noqa: E501
"has_type_annotation=False, has_3107_annotation=False, has_type_comment=False)]"
")"
),
),
}
| 38.580247
| 138
| 0.60704
|
from functools import partial
from typing import NamedTuple, Union
from flake8_annotations import Argument, Function
from flake8_annotations.enums import AnnotationType
class FormatTestCase(NamedTuple):
test_object: Union[Argument, Function]
str_output: str
repr_output: str
arg = partial(Argument, lineno=0, col_offset=0, annotation_type=AnnotationType.ARGS)
func = partial(Function, name="test_func", lineno=0, col_offset=0, decorator_list=[])
formatting_test_cases = {
"arg": FormatTestCase(
test_object=arg(argname="test_arg"),
str_output="<Argument: test_arg, Annotated: False>",
repr_output=(
"Argument("
"argname='test_arg', "
"lineno=0, "
"col_offset=0, "
"annotation_type=AnnotationType.ARGS, "
"has_type_annotation=False, "
"has_3107_annotation=False, "
"has_type_comment=False"
")"
),
),
"func_no_args": FormatTestCase(
test_object=func(args=[arg(argname="return")]),
str_output="<Function: test_func, Args: [<Argument: return, Annotated: False>]>",
repr_output=(
"Function("
"name='test_func', "
"lineno=0, "
"col_offset=0, "
"function_type=FunctionType.PUBLIC, "
"is_class_method=False, "
"class_decorator_type=None, "
"is_return_annotated=False, "
"has_type_comment=False, "
"has_only_none_returns=True, "
"is_nested=False, "
"decorator_list=[], "
"args=[Argument(argname='return', lineno=0, col_offset=0, annotation_type=AnnotationType.ARGS, " "has_type_annotation=False, has_3107_annotation=False, has_type_comment=False)]"
")"
),
),
"func_has_arg": FormatTestCase(
test_object=func(args=[arg(argname="foo"), arg(argname="return")]),
str_output="<Function: test_func, Args: [<Argument: foo, Annotated: False>, <Argument: return, Annotated: False>]>", repr_output=(
"Function("
"name='test_func', "
"lineno=0, "
"col_offset=0, "
"function_type=FunctionType.PUBLIC, "
"is_class_method=False, "
"class_decorator_type=None, "
"is_return_annotated=False, "
"has_type_comment=False, "
"has_only_none_returns=True, "
"is_nested=False, "
"decorator_list=[], "
"args=[Argument(argname='foo', lineno=0, col_offset=0, annotation_type=AnnotationType.ARGS, " "has_type_annotation=False, has_3107_annotation=False, has_type_comment=False), "
"Argument(argname='return', lineno=0, col_offset=0, annotation_type=AnnotationType.ARGS, " "has_type_annotation=False, has_3107_annotation=False, has_type_comment=False)]"
")"
),
),
}
| true
| true
|
f702b11bd8a930c1afc521fe51421d52dde23c1f
| 1,650
|
py
|
Python
|
demos/matdb/demo_srim_compounddb_to_suzu.py
|
takaakiaoki/suzu
|
431975a5345d9683f0a9453275764693e9e2064e
|
[
"MIT"
] | 6
|
2018-05-05T10:13:11.000Z
|
2021-06-21T02:11:44.000Z
|
demos/matdb/demo_srim_compounddb_to_suzu.py
|
takaakiaoki/suzu
|
431975a5345d9683f0a9453275764693e9e2064e
|
[
"MIT"
] | null | null | null |
demos/matdb/demo_srim_compounddb_to_suzu.py
|
takaakiaoki/suzu
|
431975a5345d9683f0a9453275764693e9e2064e
|
[
"MIT"
] | 5
|
2018-05-05T10:13:56.000Z
|
2020-06-15T14:32:45.000Z
|
# coding: utf-8
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__),'../..'))
import suzu.matdb.srim_compounddb as compounddb
air = compounddb.Compound()
air.desc = 'Air, Dry near sea level (ICRU-104) 0.00120484 O-23.2, N-75.5, Ar-1.3'
air.name = '%Air, Dry (ICRU-104)'
air.density = 0.00120484
air.mass_percentage = True
air.elems = [(6, 0.000124), (8, 0.231781), (7, 0.755267), (18, 0.012827)]
air.bonding = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
air.comment = """corrected by H. Paul, Sept. 2004
"""
air.fulltext = """*Air, Dry near sea level (ICRU-104) 0.00120484 O-23.2, N-75.5, Ar-1.3
"%Air, Dry (ICRU-104)", .00120484, 4, 6, .000124, 8, .231781, 7, .755267, 18, .012827
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
$ corrected by H. Paul, Sept. 2004
$"""
water = compounddb.Compound()
water.desc = 'Water (liquid) 1.00 H-2, O-1'
water.name = 'Water_Liquid (ICRU-276)'
water.density = 1.0
water.mass_percentage = False
water.elems = [(1, 2.0), (8, 1.0)]
water.bonding = [0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
water.comment = b"""Chemical Formula: H \u00c4\u00c4 O \u00c4\u00c4 H
There is about an 8% increase in the peak of the stopping power
for ions in water vapour relative to the liquid. (The peak of the
stopping occurs at an energy of about 100 keV/amu times the 2/3
power of the ion's atomic number.) Above the peak the phase
difference begins to disappear. This calculation is for the
LIQUID phase. """.decode('cp437')
print(water.to_suzu())
print(air.to_suzu())
| 37.5
| 106
| 0.638182
|
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__),'../..'))
import suzu.matdb.srim_compounddb as compounddb
air = compounddb.Compound()
air.desc = 'Air, Dry near sea level (ICRU-104) 0.00120484 O-23.2, N-75.5, Ar-1.3'
air.name = '%Air, Dry (ICRU-104)'
air.density = 0.00120484
air.mass_percentage = True
air.elems = [(6, 0.000124), (8, 0.231781), (7, 0.755267), (18, 0.012827)]
air.bonding = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
air.comment = """corrected by H. Paul, Sept. 2004
"""
air.fulltext = """*Air, Dry near sea level (ICRU-104) 0.00120484 O-23.2, N-75.5, Ar-1.3
"%Air, Dry (ICRU-104)", .00120484, 4, 6, .000124, 8, .231781, 7, .755267, 18, .012827
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
$ corrected by H. Paul, Sept. 2004
$"""
water = compounddb.Compound()
water.desc = 'Water (liquid) 1.00 H-2, O-1'
water.name = 'Water_Liquid (ICRU-276)'
water.density = 1.0
water.mass_percentage = False
water.elems = [(1, 2.0), (8, 1.0)]
water.bonding = [0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
water.comment = b"""Chemical Formula: H \u00c4\u00c4 O \u00c4\u00c4 H
There is about an 8% increase in the peak of the stopping power
for ions in water vapour relative to the liquid. (The peak of the
stopping occurs at an energy of about 100 keV/amu times the 2/3
power of the ion's atomic number.) Above the peak the phase
difference begins to disappear. This calculation is for the
LIQUID phase. """.decode('cp437')
print(water.to_suzu())
print(air.to_suzu())
| true
| true
|
f702b1385043c102875d88217ec2b9871be5ba26
| 3,885
|
py
|
Python
|
tool/metric.py
|
dkswxd/unetpp_pytorch_qiu
|
df439b07d13c5d8c87975f0cca4dd7a5ff19f8c2
|
[
"Apache-2.0"
] | 1
|
2022-01-13T07:11:18.000Z
|
2022-01-13T07:11:18.000Z
|
code/utils/metric.py
|
DKJJ/SSL4MIS
|
7f139d0c71110052399f0a93b55a39ba85897561
|
[
"MIT"
] | null | null | null |
code/utils/metric.py
|
DKJJ/SSL4MIS
|
7f139d0c71110052399f0a93b55a39ba85897561
|
[
"MIT"
] | null | null | null |
import numpy as np
from sklearn import metrics
from PIL import Image
def get_metrics(pred, logits, gt):
if isinstance(logits, list):
logits = logits[-1]
result = {'confusion_matrix': metrics.confusion_matrix(gt.flatten(), pred.flatten(), labels=[1, 0]),
'auc': roc(gt, logits)}
return result
def get_metrics_without_roc(pred, gt):
result = {'confusion_matrix': metrics.confusion_matrix(gt.flatten(), pred.flatten(), labels=[1, 0])}
return result
def show_metrics(metrics):
con_mat = np.zeros((2,2))
auc = 0.0
for m in metrics:
con_mat += m['confusion_matrix']
auc += m['auc']
auc /= len(metrics)
result = {'confusion_matrix': con_mat.tolist(),
'accuracy': accuracy(con_mat),
'kappa': kappa(con_mat),
'precision': precision(con_mat),
'sensitivity': sensitivity(con_mat),
'specificity': specificity(con_mat),
'auc': auc,
}
return result
def show_metrics_without_roc(metrics):
con_mat = np.zeros((2,2))
for m in metrics:
con_mat += m['confusion_matrix']
result = {'confusion_matrix': con_mat,
'accuracy': accuracy(con_mat),
'kappa': kappa(con_mat),
'precision': precision(con_mat),
'sensitivity': sensitivity(con_mat),
'specificity': specificity(con_mat),
}
return result
def show_metrics_from_save_image(data):
pred = data[:,:,0] // 255
gt = data[:,:,1] // 255
metrics = [get_metrics_without_roc(pred, gt)]
return show_metrics_without_roc(metrics)
def kappa(matrix):
matrix = np.array(matrix)
n = np.sum(matrix)
sum_po = 0
sum_pe = 0
for i in range(len(matrix[0])):
sum_po += matrix[i][i]
row = np.sum(matrix[i, :])
col = np.sum(matrix[:, i])
sum_pe += row * col
po = sum_po / n
pe = sum_pe / (n * n)
# print(po, pe)
return (po - pe) / (1 - pe)
def sensitivity(matrix):
return matrix[0][0]/(matrix[0][0]+matrix[1][0])
def specificity(matrix):
return matrix[1][1]/(matrix[1][1]+matrix[0][1])
def precision(matrix):
return matrix[0][0]/(matrix[0][0]+matrix[0][1])
def roc(gt, logits):
gtlist = gt.flatten()
predlist = logits.detach().cpu().numpy()[0, 1, ...].flatten()
fpr, tpr, thresholds = metrics.roc_curve(gtlist, predlist, pos_label=1)
roc_auc = metrics.auc(fpr, tpr) # auc为Roc曲线下的面积
return roc_auc
def accuracy(matrix):
return (matrix[0][0]+matrix[1][1])/(matrix[0][0]+matrix[0][1]+matrix[1][0]+matrix[1][1])
def error_rate(predictions, labels):
"""
Return the error rate based on dense predictions and 1-hot labels.
"""
return 100.0 - (
100.0 *
np.sum(np.argmin(predictions, 3) == np.argmin(labels, 3)) /
(predictions.shape[0] * predictions.shape[1] * predictions.shape[2]))
def save_predict(filename, data, gt, pred):
pred = pred * 255
gt = gt[0, 1, :, :]
gt = np.where(gt > 0.5, 255, 0)
differ = np.stack([np.zeros_like(pred), gt, pred], -1)
pred = np.stack([pred, pred, pred], -1)
gt = np.stack([gt, gt, gt], -1)
data = np.transpose(data, (0, 2, 3, 1))[0,...]
if data.shape[2] == 60:
data = data[:, :, 10:40:10]
elif data.shape[2] == 1:
data = np.concatenate([data, data, data], -1)
elif data.shape[2] == 15:
data = data[:, :, 0:15:5]
data -= np.min(data, axis=(0,1))
data /= (np.max(data, axis=(0,1))/255)
data = data.astype(np.uint8)
img = Image.fromarray(np.concatenate([data, pred, gt, differ], axis=1).astype(np.uint8))
img.save(filename)
def save_logits(filename, pred):
pred = pred * 255
pred = np.stack([pred, pred, pred], -1)
img = Image.fromarray(pred.astype(np.uint8))
img.save(filename)
| 31.08
| 104
| 0.583269
|
import numpy as np
from sklearn import metrics
from PIL import Image
def get_metrics(pred, logits, gt):
if isinstance(logits, list):
logits = logits[-1]
result = {'confusion_matrix': metrics.confusion_matrix(gt.flatten(), pred.flatten(), labels=[1, 0]),
'auc': roc(gt, logits)}
return result
def get_metrics_without_roc(pred, gt):
result = {'confusion_matrix': metrics.confusion_matrix(gt.flatten(), pred.flatten(), labels=[1, 0])}
return result
def show_metrics(metrics):
con_mat = np.zeros((2,2))
auc = 0.0
for m in metrics:
con_mat += m['confusion_matrix']
auc += m['auc']
auc /= len(metrics)
result = {'confusion_matrix': con_mat.tolist(),
'accuracy': accuracy(con_mat),
'kappa': kappa(con_mat),
'precision': precision(con_mat),
'sensitivity': sensitivity(con_mat),
'specificity': specificity(con_mat),
'auc': auc,
}
return result
def show_metrics_without_roc(metrics):
con_mat = np.zeros((2,2))
for m in metrics:
con_mat += m['confusion_matrix']
result = {'confusion_matrix': con_mat,
'accuracy': accuracy(con_mat),
'kappa': kappa(con_mat),
'precision': precision(con_mat),
'sensitivity': sensitivity(con_mat),
'specificity': specificity(con_mat),
}
return result
def show_metrics_from_save_image(data):
pred = data[:,:,0] // 255
gt = data[:,:,1] // 255
metrics = [get_metrics_without_roc(pred, gt)]
return show_metrics_without_roc(metrics)
def kappa(matrix):
matrix = np.array(matrix)
n = np.sum(matrix)
sum_po = 0
sum_pe = 0
for i in range(len(matrix[0])):
sum_po += matrix[i][i]
row = np.sum(matrix[i, :])
col = np.sum(matrix[:, i])
sum_pe += row * col
po = sum_po / n
pe = sum_pe / (n * n)
return (po - pe) / (1 - pe)
def sensitivity(matrix):
return matrix[0][0]/(matrix[0][0]+matrix[1][0])
def specificity(matrix):
return matrix[1][1]/(matrix[1][1]+matrix[0][1])
def precision(matrix):
return matrix[0][0]/(matrix[0][0]+matrix[0][1])
def roc(gt, logits):
gtlist = gt.flatten()
predlist = logits.detach().cpu().numpy()[0, 1, ...].flatten()
fpr, tpr, thresholds = metrics.roc_curve(gtlist, predlist, pos_label=1)
roc_auc = metrics.auc(fpr, tpr) return roc_auc
def accuracy(matrix):
return (matrix[0][0]+matrix[1][1])/(matrix[0][0]+matrix[0][1]+matrix[1][0]+matrix[1][1])
def error_rate(predictions, labels):
return 100.0 - (
100.0 *
np.sum(np.argmin(predictions, 3) == np.argmin(labels, 3)) /
(predictions.shape[0] * predictions.shape[1] * predictions.shape[2]))
def save_predict(filename, data, gt, pred):
pred = pred * 255
gt = gt[0, 1, :, :]
gt = np.where(gt > 0.5, 255, 0)
differ = np.stack([np.zeros_like(pred), gt, pred], -1)
pred = np.stack([pred, pred, pred], -1)
gt = np.stack([gt, gt, gt], -1)
data = np.transpose(data, (0, 2, 3, 1))[0,...]
if data.shape[2] == 60:
data = data[:, :, 10:40:10]
elif data.shape[2] == 1:
data = np.concatenate([data, data, data], -1)
elif data.shape[2] == 15:
data = data[:, :, 0:15:5]
data -= np.min(data, axis=(0,1))
data /= (np.max(data, axis=(0,1))/255)
data = data.astype(np.uint8)
img = Image.fromarray(np.concatenate([data, pred, gt, differ], axis=1).astype(np.uint8))
img.save(filename)
def save_logits(filename, pred):
pred = pred * 255
pred = np.stack([pred, pred, pred], -1)
img = Image.fromarray(pred.astype(np.uint8))
img.save(filename)
| true
| true
|
f702b1cdf62d8a65d17e50a4ab858928e5956b21
| 2,601
|
py
|
Python
|
tests/GAPDemo_cor_002/run.py
|
sagscmi/GAPDemo2019
|
37ca1a9587a029194469cb084d309ccc2ea4be43
|
[
"Apache-2.0"
] | null | null | null |
tests/GAPDemo_cor_002/run.py
|
sagscmi/GAPDemo2019
|
37ca1a9587a029194469cb084d309ccc2ea4be43
|
[
"Apache-2.0"
] | null | null | null |
tests/GAPDemo_cor_002/run.py
|
sagscmi/GAPDemo2019
|
37ca1a9587a029194469cb084d309ccc2ea4be43
|
[
"Apache-2.0"
] | null | null | null |
from pysys.constants import *
from apama.basetest import ApamaBaseTest
from apama.correlator import CorrelatorHelper
from GAPDemoConnected import GAPDemoConnectedHelper
class PySysTest(ApamaBaseTest):
def __init__(self, descriptor, outsubdir, runner):
super(PySysTest, self).__init__(descriptor, outsubdir, runner)
self.helper = GAPDemoConnectedHelper(self, PROJECT)
def execute(self):
# Start application
correlator = self.helper.startApplication()
# Find a phone device
(phoneId, phoneName) = self.helper.getDeviceDetails()
self.log.info(f'Found c8y_SensorPhone device with name "{phoneName}" and id "{phoneId}"')
# Wait for application to subscribe to measurements from the phone
self.helper.waitForSubscription()
# Set baseline acceleration
self.helper.sendAcceleration(phoneId, 0.0, 0.0, 1.23)
# Wait for all events to be processed
self.helper.waitForBaseline()
# Get current active alarm counts
flipUpBefore = self.helper.countActiveAlarms("FlipUp")
self.log.info(f'Found {flipUpBefore} active "FlipUp" alarms before sending measurements')
flipDownBefore = self.helper.countActiveAlarms("FlipDown")
self.log.info(f'Found {flipDownBefore} active "FlipDown" alarms before sending measurements')
# Send acceleration measurements
self.log.info('Sending measurements...')
self.helper.sendAcceleration(phoneId, 0.0, 0.0, -0.9) # Up
self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.9) # Down
self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.4)
self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.0)
self.helper.sendAcceleration(phoneId, 0.0, 0.0, -0.4)
self.helper.sendAcceleration(phoneId, 0.0, 0.0, -0.9) # Up
self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.8)
self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.9)
self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.85) # Down
# wait for all events to be processed
self.helper.waitForMeasurements()
# Get latest active alarm counts and calculate delta
flipUpAfter = self.helper.countActiveAlarms("FlipUp")
self.log.info(f'Found {flipUpAfter} active "FlipUp" alarms after sending measurements')
flipDownAfter = self.helper.countActiveAlarms("FlipDown")
self.log.info(f'Found {flipDownAfter} active "FlipDown" alarms after sending measurements')
self.flipUpDelta = flipUpAfter - flipUpBefore
self.flipDownDelta = flipDownAfter - flipDownBefore
def validate(self):
self.assertEval("self.flipUpDelta=={expected}", expected=2)
self.assertEval("self.flipDownDelta=={expected}", expected=2)
| 42.639344
| 96
| 0.738562
|
from pysys.constants import *
from apama.basetest import ApamaBaseTest
from apama.correlator import CorrelatorHelper
from GAPDemoConnected import GAPDemoConnectedHelper
class PySysTest(ApamaBaseTest):
def __init__(self, descriptor, outsubdir, runner):
super(PySysTest, self).__init__(descriptor, outsubdir, runner)
self.helper = GAPDemoConnectedHelper(self, PROJECT)
def execute(self):
correlator = self.helper.startApplication()
(phoneId, phoneName) = self.helper.getDeviceDetails()
self.log.info(f'Found c8y_SensorPhone device with name "{phoneName}" and id "{phoneId}"')
self.helper.waitForSubscription()
self.helper.sendAcceleration(phoneId, 0.0, 0.0, 1.23)
self.helper.waitForBaseline()
flipUpBefore = self.helper.countActiveAlarms("FlipUp")
self.log.info(f'Found {flipUpBefore} active "FlipUp" alarms before sending measurements')
flipDownBefore = self.helper.countActiveAlarms("FlipDown")
self.log.info(f'Found {flipDownBefore} active "FlipDown" alarms before sending measurements')
self.log.info('Sending measurements...')
self.helper.sendAcceleration(phoneId, 0.0, 0.0, -0.9) self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.9) self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.4)
self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.0)
self.helper.sendAcceleration(phoneId, 0.0, 0.0, -0.4)
self.helper.sendAcceleration(phoneId, 0.0, 0.0, -0.9) self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.8)
self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.9)
self.helper.sendAcceleration(phoneId, 0.0, 0.0, 0.85)
self.helper.waitForMeasurements()
flipUpAfter = self.helper.countActiveAlarms("FlipUp")
self.log.info(f'Found {flipUpAfter} active "FlipUp" alarms after sending measurements')
flipDownAfter = self.helper.countActiveAlarms("FlipDown")
self.log.info(f'Found {flipDownAfter} active "FlipDown" alarms after sending measurements')
self.flipUpDelta = flipUpAfter - flipUpBefore
self.flipDownDelta = flipDownAfter - flipDownBefore
def validate(self):
self.assertEval("self.flipUpDelta=={expected}", expected=2)
self.assertEval("self.flipDownDelta=={expected}", expected=2)
| true
| true
|
f702b3026db98722fecc5c517e03dac41d42da66
| 802
|
py
|
Python
|
youtubesearch/urls.py
|
shankarj67/Django-youtubesearch
|
7a96592fa9c65ab44cce8724b0872675467a0863
|
[
"MIT"
] | null | null | null |
youtubesearch/urls.py
|
shankarj67/Django-youtubesearch
|
7a96592fa9c65ab44cce8724b0872675467a0863
|
[
"MIT"
] | 6
|
2020-06-05T20:50:34.000Z
|
2021-06-10T18:27:49.000Z
|
youtubesearch/urls.py
|
shankarj67/Django-youtubesearch
|
7a96592fa9c65ab44cce8724b0872675467a0863
|
[
"MIT"
] | null | null | null |
"""youtubesearch URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('search.urls')),
]
| 34.869565
| 77
| 0.704489
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('search.urls')),
]
| true
| true
|
f702b307d609b7a41be38ee79231f650669a6ccf
| 90,700
|
py
|
Python
|
selfdrive/car/hyundai/values.py
|
yayiblue/op_v0814_crwusiz
|
8c047a54cd950af875239eefc80f3558693cb4f8
|
[
"MIT"
] | null | null | null |
selfdrive/car/hyundai/values.py
|
yayiblue/op_v0814_crwusiz
|
8c047a54cd950af875239eefc80f3558693cb4f8
|
[
"MIT"
] | null | null | null |
selfdrive/car/hyundai/values.py
|
yayiblue/op_v0814_crwusiz
|
8c047a54cd950af875239eefc80f3558693cb4f8
|
[
"MIT"
] | null | null | null |
from cereal import car
from selfdrive.car import dbc_dict
Ecu = car.CarParams.Ecu
class CarControllerParams:
ACCEL_MAX = 2.0
ACCEL_MIN = -3.7
STEER_MAX = 384 # 409 is the max, 255 is stock
STEER_DELTA_UP = 3
STEER_DELTA_DOWN = 7
STEER_DRIVER_ALLOWANCE = 50
STEER_DRIVER_MULTIPLIER = 2
STEER_DRIVER_FACTOR = 1
class CAR:
# Hyundai
ELANTRA_I30 = "HYUNDAI AVANTE,I30 2017~2020 (AD,PD)"
ELANTRA21 = "HYUNDAI AVANTE 2021 (CN7)"
ELANTRA21_HEV = "HYUNDAI AVANTE HEV 2021 (CN7)"
SONATA = "HYUNDAI SONATA 2020 (DN8)"
SONATA_HEV = "HYUNDAI SONATA HEV 2020 (DN8)"
SONATA_LF = "HYUNDAI SONATA 2016~2019 (LF)"
SONATA_LF_HEV = "HYUNDAI SONATA 2018 HEV (LF)"
KONA = "HYUNDAI KONA 2019 (OS)"
KONA_EV = "HYUNDAI KONA EV 2019 (OS)"
KONA_HEV = "HYUNDAI KONA HEV 2019 (OS)"
IONIQ_EV = "HYUNDAI IONIQ EV 2019~2020 (AE)"
IONIQ_HEV = "HYUNDAI IONIQ HEV 2017 (AE)"
SANTA_FE = "HYUNDAI SANTA FE 2019~2021 (TM)"
SANTA_FE_HEV = "HYUNDAI SANTA FE 2021~2022 (TM)"
PALISADE = "HYUNDAI PALISADE 2020 (LX2)"
VELOSTER = "HYUNDAI VELOSTER 2019 (JS)"
GRANDEUR = "GRANDEUR 2017~2019 (IG)"
GRANDEUR_HEV = "GRANDEUR HEV 2018~2019 (IG)"
GRANDEUR20 = "GRANDEUR 2020 (IG)"
GRANDEUR20_HEV = "GRANDEUR HEV 2020 (IG)"
NEXO = "HYUNDAI NEXO (FE)"
# Kia
FORTE = "KIA K3 2018 (BD)"
K5 = "KIA K5 2016~2020 (JF)"
K5_HEV = "KIA K5 HEV 2016~2020 (JF)"
K5_DL3 = "KIA K5 2021 (DL3)"
K5_DL3_HEV = "KIA K5 HEV 2021 (DL3)"
K7 = "KIA K7 2016-2019 (YG)"
K7_HEV = "KIA K7 HEV 2017-2019 (YG)"
K9 = "KIA K9 2019-2021 (RJ)"
SPORTAGE = "KIA SPORTAGE 2016~2020 (QL)"
SORENTO = "KIA SORENTO 2017~2020 (UM)"
MOHAVE = "KIA MOHAVE 2020 (HM)"
STINGER = "KIA STINGER 2018~2021 (CK)"
NIRO_EV = "KIA NIRO EV 2020 (DE)"
NIRO_HEV = "KIA NIRO HEV 2018 (DE)"
SOUL_EV = "KIA SOUL EV 2019 (SK3)"
SELTOS = "KIA SELTOS 2019 (SP2)"
# Genesis
GENESIS = "GENESIS 2014-2016 (DH)"
GENESIS_G70 = "GENESIS G70 2018~ (IK)"
GENESIS_G80 = "GENESIS G80 2018~ (DH)"
GENESIS_G90 = "GENESIS G90,EQ900 2016~2019 (HI)"
# ---------------------------------------------------------------------------------------
# E-CAN Signal CAR
# hyundai - G80 2020(RG3), GV70 2021(JK1), GV80 2020(JX1), TUSON 2021(NX4), STARIA 2021(UX4), IONIQ5 2021(NE)
# kia - CARNIVAL 2021(KA4), SORENTO 2020(MQ4), K8 2021(GL3)
# ---------------------------------------------------------------------------------------
class Buttons:
NONE = 0
RES_ACCEL = 1
SET_DECEL = 2
GAP_DIST = 3
CANCEL = 4
FINGERPRINTS = {
# Hyundai
CAR.ELANTRA_I30: [{
66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 546: 8, 547: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 838: 8, 844: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1087: 8, 1151: 6, 1155: 8, 1164: 8, 1168: 7, 1170: 8, 1191: 2, 1193: 8, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1485: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 1792: 8, 1872: 8, 1937: 8, 1952: 8, 1953: 8, 1960: 8, 1968: 8, 1988: 8, 1990: 8, 1998: 8, 2000: 8, 2001: 8, 2003: 8, 2004: 8, 2005: 8, 2008: 8, 2009: 8, 2012: 8, 2013: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.ELANTRA21: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 524: 8, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 8, 865: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1069: 8, 1078: 4, 1102: 8, 1107: 5, 1108: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1339: 8, 1342: 8, 1343: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1394: 8, 1407: 8, 1419: 8, 1427: 6, 1446: 8, 1456: 4, 1470: 8, 1485: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8
}],
CAR.ELANTRA21_HEV: [{
}],
CAR.SONATA: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 545: 8, 546: 8, 547: 8, 548: 8, 549: 8, 550: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 8, 865: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 908: 8, 909: 8, 912: 7, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1089: 5, 1096: 8, 1107: 5, 1108: 8, 1114: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1184: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1330: 8, 1339: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1379: 8, 1384: 8, 1394: 8, 1407: 8, 1419: 8, 1427: 6, 1446: 8, 1456: 4, 1460: 8, 1470: 8, 1485: 8, 1504: 3, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
}],
CAR.SONATA_HEV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 548: 8, 576: 8, 593: 8, 688: 6, 757: 2, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1102: 8, 1108: 8, 1114: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1184: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 8, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1330: 8, 1339: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1446: 8, 1448: 8, 1456: 4, 1460: 8, 1470: 8, 1476: 8, 1535: 8
}],
CAR.SONATA_LF: [{
66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1397: 8, 1407: 8, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 2000: 8, 2001: 8, 2004: 8, 2005: 8, 2008: 8, 2009: 8, 2012: 8, 2013: 8, 2014: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.SONATA_LF_HEV: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 593: 8, 688: 5, 881: 8, 882: 8, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1186: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1425: 2, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8, 2013: 8
}],
CAR.KONA: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 3, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1193: 8, 1265: 4,1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1378: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1990: 8, 1996: 8, 1998: 8, 2000: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2015: 8
}],
CAR.KONA_EV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 549: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1260: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1307: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
}],
CAR.KONA_HEV: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 547: 8, 548: 8, 549: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1138: 4, 1151: 6, 1155: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 8, 1379: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.IONIQ_EV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 524: 8, 544: 7, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8, 2013: 8, 2015: 8
}],
CAR.IONIQ_HEV: [{
68:8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 524: 8, 544: 8, 576:8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1473: 8, 1476: 8, 1507: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8, 2013: 8
}],
CAR.SANTA_FE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 764: 8, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8, 1990: 8, 1998: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
}],
CAR.SANTA_FE_HEV: [{
}],
CAR.PALISADE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 549: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 2000: 8, 2005: 8, 2008: 8
}],
CAR.VELOSTER: [{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 558: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1170: 8, 1181: 5, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1378: 4, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 1872: 8, 1988: 8, 1996: 8, 2000: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.GRANDEUR: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 547: 8, 549: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8
}],
CAR.GRANDEUR_HEV: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 546: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1180: 8, 1185: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1379: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.GRANDEUR20: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 516: 8, 524: 8, 528: 8, 532: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8
}],
CAR.GRANDEUR20_HEV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 576: 8, 593: 8, 688: 5, 764: 8, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.NEXO: [{
127: 8, 145: 8, 146: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 512: 6, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 908: 8, 909: 8, 912: 7, 916: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1174: 8, 1180: 8, 1183: 8, 1186: 2, 1191: 2, 1192: 8, 1193: 8, 1210: 8, 1219: 8, 1220: 8, 1222: 6, 1223: 8, 1224: 8, 1227: 8, 1230: 6, 1231: 6, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1297: 8, 1298: 8, 1305: 8, 1312: 8, 1315: 8, 1316: 8, 1322: 8, 1324: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1437: 8, 1456: 4, 1460: 8, 1470: 8, 1484: 8, 1507: 8, 1520: 8, 1535: 8
}],
# Kia
CAR.FORTE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1078: 4, 1107: 5, 1136: 8, 1156: 8, 1170: 8, 1173: 8, 1191: 2, 1225: 8, 1265: 4, 1280: 4, 1287: 4, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1394: 8, 1407: 8, 1427: 6, 1456: 4, 1470: 8
}],
CAR.K5: [{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 625: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 909: 8, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1186: 2, 1191: 2, 1236: 2, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1268: 8, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1371: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1492: 8, 1530: 8, 1532: 5, 1905: 8, 1913: 8, 1952: 8, 1960: 8, 1988: 8, 1996: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.K5_HEV: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 6, 909: 8, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1236: 2, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1420: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.K5_DL3: [{
}],
CAR.K5_DL3_HEV: [{
}],
CAR.SPORTAGE: [{
67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 909: 8, 916: 8, 1040: 8, 1078: 4, 1170: 8, 1191: 2, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1492: 8, 1530: 8
}],
CAR.SORENTO: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1384: 8, 1407: 8, 1411: 8, 1419: 8, 1425: 2, 1427: 6, 1444: 8, 1456: 4, 1470: 8, 1489: 1
}],
CAR.MOHAVE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8
}],
CAR.STINGER: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8, 2015: 8
}],
CAR.NIRO_EV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1260: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 1988: 8, 1990: 8, 1998: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
}],
CAR.NIRO_HEV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1535: 8
}],
CAR.SOUL_EV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 548: 8, 549: 8, 593: 8, 688: 6, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 8, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8
}],
CAR.SELTOS: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 905: 8, 909: 8, 910: 5, 911: 5, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1114: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1446: 8, 1456: 4, 1470: 8, 1485: 8, 1911: 8
}],
CAR.K7: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1444: 8, 1456: 4, 1470: 8
}],
CAR.K7_HEV: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 549: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1096: 8, 1102: 8, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1210: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1379: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.K9: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8
}],
# Genesis
CAR.GENESIS: [{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 912: 7, 916: 8, 1024: 2, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1268: 8, 1280: 1, 1281: 3, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1342: 6, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1379: 8, 1384: 5, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1437: 8, 1456: 4
}],
CAR.GENESIS_G70: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
}],
CAR.GENESIS_G80: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1024: 2, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 8, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1437: 8, 1456: 4, 1470: 8
}],
CAR.GENESIS_G90: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1456: 4, 1470: 8, 1988: 8, 2000: 8, 2003: 8, 2004: 8, 2005: 8, 2008: 8, 2011: 8, 2012: 8, 2013: 8, 2015: 8
}],
}
ECU_FINGERPRINT = {
Ecu.fwdCamera: [832, 1156, 1191, 1342] #832:lkas11, 1156:hda11_mfc, 1191:mfc_4a7, 1342:lkas12
}
FW_VERSIONS = {
# fwdRadar, fwdCamera, eps, esp, engine, transmission
# hyundai
CAR.ELANTRA_I30: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00PD__ SCC F-CUP 1.00 1.01 99110-G3100 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00PDP LKAS AT AUS RHD 1.00 1.01 99211-G4000 v60',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00PDu MDPS C 1.00 1.01 56310/G3690 4PDUC101',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00PD ESC \x11 100 \a\x03 58910-G3AC0',
],
(Ecu.engine, 0x7e0, None): [
b'\x01TPD-1A506F000H00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U2VA051\x00\x00\xf1\x006U2V0_C2\x00\x006U2VA051\x00\x00DPD0H16US0\x00\x00\x00\x00',
],
},
CAR.ELANTRA21: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00CN7_ SCC F-CUP 1.00 1.01 99110-AA000 ',
b'\xf1\x00CN7_ SCC FHCUP 1.00 1.01 99110-AA000 ',
b'\xf1\x8799110AA000\xf1\x00CN7_ SCC FHCUP 1.00 1.01 99110-AA000 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00CN7 MFC AT USA LHD 1.00 1.00 99210-AB000 200819'
b'\xf1\x00CN7 MFC AT USA LHD 1.00 1.03 99210-AA000 200819',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x87\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x00CN7 MDPS C 1.00 1.06 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 4CNDC106',
b'\xf1\x8756310/AA070\xf1\x00CN7 MDPS C 1.00 1.06 56310/AA070 4CNDC106',
b'\xf1\x8756310AA050\x00\xf1\x00CN7 MDPS C 1.00 1.06 56310AA050\x00 4CNDC106',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00CN ESC \t 101 \x10\x03 58910-AB800',
b'\xf1\x8758910-AA800\xf1\x00CN ESC \t 104 \x08\x03 58910-AA800',
b'\xf1\x8758910-AB800\xf1\x00CN ESC \t 101 \x10\x03 58910-AB800',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x82CNCWD0AMFCXCSFFA',
b'\xf1\x82CNCWD0AMFCXCSFFB',
b'\xf1\x82CNCVD0AMFCXCSFFB',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x00HT6WA280BLHT6VA640A1CCN0N20NS5\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x00HT6WA280BLHT6VA640A1CCN0N20NS5\x00\x00\x00\x00\x00\x00\xe8\xba\xce\xfa',
b'\xf1\x87CXMQFM2135005JB2E\xb9\x89\x98W\xa9y\x97h\xa9\x98\x99wxvwh\x87\177\xffx\xff\xff\xff,,\xf1\x89HT6VA640A1\xf1\x82CCN0N20NS5\x00\x00\x00\x00\x00\x00',
b'\xf1\x87CXMQFM1916035JB2\x88vvgg\x87Wuwgev\xa9\x98\x88\x98h\x99\x9f\xffh\xff\xff\xff\xa5\xee\xf1\x89HT6VA640A1\xf1\x82CCN0N20NS5\x00\x00\x00\x00\x00\x00',
b'\xf1\x87CXLQF40189012JL2f\x88\x86\x88\x88vUex\xb8\x88\x88\x88\x87\x88\x89fh?\xffz\xff\xff\xff\x08z\xf1\x89HT6VA640A1\xf1\x82CCN0N20NS5\x00\x00\x00\x00\x00\x00',
],
},
CAR.ELANTRA21_HEV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\000CNhe SCC FHCUP 1.00 1.01 99110-BY000 ',
b'\xf1\x8799110BY000\xf1\x00CNhe SCC FHCUP 1.00 1.01 99110-BY000 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\000CN7HMFC AT USA LHD 1.00 1.03 99210-AA000 200819'
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x8756310/BY050\xf1\000CN7 MDPS C 1.00 1.02 56310/BY050 4CNHC102'
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6G5051\000\000\000\000\000\000\000\000'
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\0006U3L0_C2\000\0006U3K3051\000\000HCN0G16NS0\xb9?A\xaa',
b'\xf1\0006U3L0_C2\000\0006U3K3051\000\000HCN0G16NS0\000\000\000\000',
b'\xf1\x816U3K3051\000\000\xf1\0006U3L0_C2\000\0006U3K3051\000\000HCN0G16NS0\xb9?A\xaa',
b'\xf1\x816U3K3051\000\000\xf1\0006U3L0_C2\000\0006U3K3051\000\000HCN0G16NS0\000\000\000\000'
],
},
CAR.SONATA: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00DN8 1.00 99110-L0000 \xaa\xaa\xaa\xaa\xaa\xaa\xaa ',
b'\xf1\x00DN8 1.00 99110-L0000 \xaa\xaa\xaa\xaa\xaa\xaa\xaa\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x00DN8_ SCC F-CU- 1.00 1.00 99110-L0000 ',
b'\xf1\x00DN8_ SCC F-CUP 1.00 1.00 99110-L0000 ',
b'\xf1\x00DN8_ SCC F-CUP 1.00 1.02 99110-L1000 ',
b'\xf1\x00DN8_ SCC FHCUP 1.00 1.00 99110-L0000 ',
b'\xf1\x00DN8_ SCC FHCUP 1.00 1.01 99110-L1000 ',
b'\xf1\x00DN89110-L0000 \xaa\xaa\xaa\xaa\xaa\xaa\xaa ',
b'\xf1\x8799110L0000\xf1\x00DN8_ SCC F-CUP 1.00 1.00 99110-L0000 ',
b'\xf1\x8799110L0000\xf1\x00DN8_ SCC FHCUP 1.00 1.00 99110-L0000 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00DN8 MFC AT KOR LHD 1.00 1.02 99211-L1000 190422',
b'\xf1\x00DN8 MFC AT RUS LHD 1.00 1.03 99211-L1000 190705',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.00 99211-L0000 190716',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.01 99211-L0000 191016',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.03 99211-L0000 210603',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.05 99211-L1000 201109',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.06 99211-L1000 210325',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00DN8 MDPS C 1.00 1.01 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 4DNAC101',
b'\xf1\x00DN8 MDPS C 1.00 1.01 56310-L0010 4DNAC101',
b'\xf1\x00DN8 MDPS C 1.00 1.01 56310L0010\x00 4DNAC101',
b'\xf1\x00DN8 MDPS R 1.00 1.00 57700-L0000 4DNAP100',
b'\xf1\x87\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x00DN8 MDPS C 1.00 1.01 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 4DNAC101',
b'\xf1\x8756310-L0010\xf1\x00DN8 MDPS C 1.00 1.01 56310-L0010 4DNAC101',
b'\xf1\x8756310-L0210\xf1\x00DN8 MDPS C 1.00 1.01 56310-L0210 4DNAC101',
b'\xf1\x8756310-L1010\xf1\x00DN8 MDPS C 1.00 1.03 56310-L1010 4DNDC103',
b'\xf1\x8756310-L1030\xf1\x00DN8 MDPS C 1.00 1.03 56310-L1030 4DNDC103',
b'\xf1\x8756310L0010\x00\xf1\x00DN8 MDPS C 1.00 1.01 56310L0010\x00 4DNAC101',
b'\xf1\x8756310L0210\x00\xf1\x00DN8 MDPS C 1.00 1.01 56310L0210\x00 4DNAC101',
b'\xf1\x8757700-L0000\xf1\x00DN8 MDPS R 1.00 1.00 57700-L0000 4DNAP100',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00DN ESC \a 106 \a\x01 58910-L0100',
b'\xf1\x00DN ESC \x01 102\x19\x04\x13 58910-L1300',
b'\xf1\x00DN ESC \x03 100 \x08\x01 58910-L0300',
b'\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100',
b'\xf1\x00DN ESC \x07 104\x19\x08\x01 58910-L0100',
b'\xf1\x00DN ESC \x08 103\x19\x06\x01 58910-L1300',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \a 106 \a\x01 58910-L0100',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x06 106 \x07\x01 58910-L0100',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x07 104\x19\x08\x01 58910-L0100',
b'\xf1\x8758910-L0300\xf1\x00DN ESC \x03 100 \x08\x01 58910-L0300',
b'\xf1\x00DN ESC \x06 106 \x07\x01 58910-L0100',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81HM6M1_0a0_F00',
b'\xf1\x82DNBVN5GMCCXXXDCA',
b'\xf1\x82DNBVN5GMCCXXXG2F',
b'\xf1\x82DNBWN5TMDCXXXG2E',
b'\xf1\x82DNCVN5GMCCXXXF0A',
b'\xf1\x82DNCVN5GMCCXXXG2B',
b'\xf1\x870\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x82DNDWN5TMDCXXXJ1A',
b'\xf1\x87391162M003',
b'\xf1\x87391162M013',
b'\xf1\x87391162M023',
b'HM6M1_0a0_F00',
b'HM6M1_0a0_G20',
b'HM6M2_0a0_BD0',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB1\xe3\xc10\xa1',
b'\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
b'\xf1\x00HT6TA260BLHT6TA800A1TDN8C20KS4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x00HT6TA260BLHT6TA810A1TDN8M25GS0\x00\x00\x00\x00\x00\x00\xaa\x8c\xd9p',
b'\xf1\x00HT6WA250BLHT6WA910A1SDN8G25NB1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x00HT6WA250BLHT6WA910A1SDN8G25NB1\x00\x00\x00\x00\x00\x00\x96\xa1\xf1\x92',
b'\xf1\x00HT6WA280BLHT6WAD10A1SDN8G25NB2\x00\x00\x00\x00\x00\x00\x08\xc9O:',
b'\xf1\x00T02601BL T02730A1 VDN8T25XXX730NS5\xf7_\x92\xf5',
b'\xf1\x87954A02N060\x00\x00\x00\x00\x00\xf1\x81T02730A1 \xf1\x00T02601BL T02730A1 VDN8T25XXX730NS5\xf7_\x92\xf5',
b'\xf1\x87SAKFBA2926554GJ2VefVww\x87xwwwww\x88\x87xww\x87wTo\xfb\xffvUo\xff\x8d\x16\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SAKFBA3030524GJ2UVugww\x97yx\x88\x87\x88vw\x87gww\x87wto\xf9\xfffUo\xff\xa2\x0c\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SAKFBA3356084GJ2\x86fvgUUuWgw\x86www\x87wffvf\xb6\xcf\xfc\xffeUO\xff\x12\x19\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SAKFBA3474944GJ2ffvgwwwwg\x88\x86x\x88\x88\x98\x88ffvfeo\xfa\xff\x86fo\xff\t\xae\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SAKFBA3475714GJ2Vfvgvg\x96yx\x88\x97\x88ww\x87ww\x88\x87xs_\xfb\xffvUO\xff\x0f\xff\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALDBA3510954GJ3ww\x87xUUuWx\x88\x87\x88\x87w\x88wvfwfc_\xf9\xff\x98wO\xffl\xe0\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3573534GJ3\x89\x98\x89\x88EUuWgwvwwwwww\x88\x87xTo\xfa\xff\x86f\x7f\xffo\x0e\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3601464GJ3\x88\x88\x88\x88ffvggwvwvw\x87gww\x87wvo\xfb\xff\x98\x88\x7f\xffjJ\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3753044GJ3UUeVff\x86hwwwwvwwgvfgfvo\xf9\xfffU_\xffC\xae\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3862294GJ3vfvgvefVxw\x87\x87w\x88\x87xwwwwc_\xf9\xff\x87w\x9f\xff\xd5\xdc\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3873834GJ3fefVwuwWx\x88\x97\x88w\x88\x97xww\x87wU_\xfb\xff\x86f\x8f\xffN\x04\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA4525334GJ3\x89\x99\x99\x99fevWh\x88\x86\x88fwvgw\x88\x87xfo\xfa\xffuDo\xff\xd1>\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA4626804GJ3wwww\x88\x87\x88xx\x88\x87\x88wwgw\x88\x88\x98\x88\x95_\xf9\xffuDo\xff|\xe7\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA4803224GJ3wwwwwvwg\x88\x88\x98\x88wwww\x87\x88\x88xu\x9f\xfc\xff\x87f\x8f\xff\xea\xea\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA6212564GJ3\x87wwwUTuGg\x88\x86xx\x88\x87\x88\x87\x88\x98xu?\xf9\xff\x97f\x7f\xff\xb8\n\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA6347404GJ3wwwwff\x86hx\x88\x97\x88\x88\x88\x88\x88vfgf\x88?\xfc\xff\x86Uo\xff\xec/\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA6901634GJ3UUuWVeVUww\x87wwwwwvUge\x86/\xfb\xff\xbb\x99\x7f\xff]2\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA7077724GJ3\x98\x88\x88\x88ww\x97ygwvwww\x87ww\x88\x87x\x87_\xfd\xff\xba\x99o\xff\x99\x01\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALFBA3525114GJ2wvwgvfvggw\x86wffvffw\x86g\x85_\xf9\xff\xa8wo\xffv\xcd\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA3624024GJ2\x88\x88\x88\x88wv\x87hx\x88\x97\x88x\x88\x97\x88ww\x87w\x86o\xfa\xffvU\x7f\xff\xd1\xec\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA3960824GJ2wwwwff\x86hffvfffffvfwfg_\xf9\xff\xa9\x88\x8f\xffb\x99\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA4011074GJ2fgvwwv\x87hw\x88\x87xww\x87wwfgvu_\xfa\xffefo\xff\x87\xc0\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA4121304GJ2x\x87xwff\x86hwwwwww\x87wwwww\x84_\xfc\xff\x98\x88\x9f\xffi\xa6\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA4195874GJ2EVugvf\x86hgwvwww\x87wgw\x86wc_\xfb\xff\x98\x88\x8f\xff\xe23\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA4625294GJ2eVefeUeVx\x88\x97\x88wwwwwwww\xa7o\xfb\xffvw\x9f\xff\xee.\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA4728774GJ2vfvg\x87vwgww\x87ww\x88\x97xww\x87w\x86_\xfb\xffeD?\xffk0\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA5129064GJ2vfvgwv\x87hx\x88\x87\x88ww\x87www\x87wd_\xfa\xffvfo\xff\x1d\x00\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA5454914GJ2\x98\x88\x88\x88\x87vwgx\x88\x87\x88xww\x87ffvf\xa7\x7f\xf9\xff\xa8w\x7f\xff\x1b\x90\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA5987784GJ2UVugDDtGx\x88\x87\x88w\x88\x87xwwwwd/\xfb\xff\x97fO\xff\xb0h\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA5987864GJ2fgvwUUuWgwvw\x87wxwwwww\x84/\xfc\xff\x97w\x7f\xff\xdf\x1d\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA6337644GJ2vgvwwv\x87hgffvwwwwwwww\x85O\xfa\xff\xa7w\x7f\xff\xc5\xfc\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA6802004GJ2UUuWUUuWgw\x86www\x87www\x87w\x96?\xf9\xff\xa9\x88\x7f\xff\x9fK\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA6892284GJ233S5\x87w\x87xx\x88\x87\x88vwwgww\x87w\x84?\xfb\xff\x98\x88\x8f\xff*\x9e\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA7005534GJ2eUuWfg\x86xxww\x87x\x88\x87\x88\x88w\x88\x87\x87O\xfc\xffuUO\xff\xa3k\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB1\xe3\xc10\xa1',
b'\xf1\x87SALFBA7152454GJ2gvwgFf\x86hx\x88\x87\x88vfWfffffd?\xfa\xff\xba\x88o\xff,\xcf\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB1\xe3\xc10\xa1',
b'\xf1\x87SALFBA7485034GJ2ww\x87xww\x87xfwvgwwwwvfgf\xa5/\xfc\xff\xa9w_\xff40\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
b'\xf1\x87SAMDBA7743924GJ3wwwwww\x87xgwvw\x88\x88\x88\x88wwww\x85_\xfa\xff\x86f\x7f\xff0\x9d\xf1\x89HT6WAD10A1\xf1\x82SDN8G25NB2\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SAMDBA7817334GJ3Vgvwvfvgww\x87wwwwwwfgv\x97O\xfd\xff\x88\x88o\xff\x8e\xeb\xf1\x89HT6WAD10A1\xf1\x82SDN8G25NB2\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SAMDBA8054504GJ3gw\x87xffvgffffwwwweUVUf?\xfc\xffvU_\xff\xddl\xf1\x89HT6WAD10A1\xf1\x82SDN8G25NB2\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SAMFB41553621GC7ww\x87xUU\x85Xvwwg\x88\x88\x88\x88wwgw\x86\xaf\xfb\xffuDo\xff\xaa\x8f\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
b'\xf1\x87SAMFB42555421GC7\x88\x88\x88\x88wvwgx\x88\x87\x88wwgw\x87wxw3\x8f\xfc\xff\x98f\x8f\xffga\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
b'\xf1\x87SAMFBA7978674GJ2gw\x87xgw\x97ywwwwvUGeUUeU\x87O\xfb\xff\x98w\x8f\xfffF\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
b'\xf1\x87SAMFBA9283024GJ2wwwwEUuWwwgwwwwwwwww\x87/\xfb\xff\x98w\x8f\xff<\xd3\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
b'\xf1\x87SAMFBA9708354GJ2wwwwVf\x86h\x88wx\x87xww\x87\x88\x88\x88\x88w/\xfa\xff\x97w\x8f\xff\x86\xa0\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
],
},
CAR.SONATA_HEV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\000DNhe SCC FHCUP 1.00 1.02 99110-L5000 ',
b'\xf1\x8799110L5000\xf1\000DNhe SCC FHCUP 1.00 1.02 99110-L5000 ',
b'\xf1\000DNhe SCC F-CUP 1.00 1.02 99110-L5000 ',
b'\xf1\x8799110L5000\xf1\000DNhe SCC F-CUP 1.00 1.02 99110-L5000 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\000DN8HMFC AT USA LHD 1.00 1.04 99211-L1000 191016',
b'\xf1\x00DN8HMFC AT USA LHD 1.00 1.05 99211-L1000 201109',
b'\xf1\000DN8HMFC AT USA LHD 1.00 1.06 99211-L1000 210325',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x8756310-L5500\xf1\000DN8 MDPS C 1.00 1.02 56310-L5500 4DNHC102',
b'\xf1\x8756310-L5450\xf1\x00DN8 MDPS C 1.00 1.02 56310-L5450 4DNHC102',
b'\xf1\x8756310-L5450\xf1\000DN8 MDPS C 1.00 1.03 56310-L5450 4DNHC103',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100\xf1\xa01.04',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x87391062J002\xf1\xa0000P',
b'\xf1\x87391162J012',
b'\xf1\x87391162J013',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\000PSBG2333 E14\x00\x00\x00\x00\x00\x00\x00TDN2H20SA6N\xc2\xeeW',
b'\xf1\x87959102T250\000\000\000\000\000\xf1\x81E09\000\000\000\000\000\000\000\xf1\000PSBG2323 E09\000\000\000\000\000\000\000TDN2H20SA5\x97R\x88\x9e',
b'\xf1\000PSBG2323 E09\000\000\000\000\000\000\000TDN2H20SA5\x97R\x88\x9e',
b'\xf1\000PSBG2333 E16\000\000\000\000\000\000\000TDN2H20SA7\0323\xf9\xab',
b'\xf1\x87PCU\000\000\000\000\000\000\000\000\000\xf1\x81E16\000\000\000\000\000\000\000\xf1\000PSBG2333 E16\000\000\000\000\000\000\000TDN2H20SA7\0323\xf9\xab',
b'\xf1\x87959102T250\x00\x00\x00\x00\x00\xf1\x81E14\x00\x00\x00\x00\x00\x00\x00\xf1\x00PSBG2333 E14\x00\x00\x00\x00\x00\x00\x00TDN2H20SA6N\xc2\xeeW',
],
},
CAR.SONATA_LF: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00LF__ SCC F-CUP 1.00 1.00 96401-C2200 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00LFF LKAS AT USA LHD 1.00 1.01 95740-C1000 E51',
b'\xf1\x00LFF LKAS AT USA LHD 1.01 1.02 95740-C1000 E52',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00LF ESC \f 11 \x17\x01\x13 58920-C2610',
b'\xf1\x00LF ESC \t 11 \x17\x01\x13 58920-C2610',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81606D5051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606D5K51\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606G1051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\xb0\x9f\xee\xf5',
b'\xf1\x87\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf1\x816T6B4051\x00\x00\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\x00\x00\x00\x00',
b'\xf1\x87\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf1\x816T6B4051\x00\x00\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\xb0\x9f\xee\xf5',
b'\xf1\x87\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf1\x816T6B4051\x00\x00\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24SL2n\x8d\xbe\xd8',
b'\xf1\x87LAHSGN012918KF10\x98\x88x\x87\x88\x88x\x87\x88\x88\x98\x88\x87w\x88w\x88\x88\x98\x886o\xf6\xff\x98w\x7f\xff3\x00\xf1\x816W3B1051\x00\x00\xf1\x006W351_C2\x00\x006W3B1051\x00\x00TLF0T20NL2\x00\x00\x00\x00',
b'\xf1\x87LAHSGN012918KF10\x98\x88x\x87\x88\x88x\x87\x88\x88\x98\x88\x87w\x88w\x88\x88\x98\x886o\xf6\xff\x98w\x7f\xff3\x00\xf1\x816W3B1051\x00\x00\xf1\x006W351_C2\x00\x006W3B1051\x00\x00TLF0T20NL2H\r\xbdm',
],
},
CAR.KONA: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00OS__ SCC F-CUP 1.00 1.00 95655-J9200 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00OS9 LKAS AT USA LHD 1.00 1.00 95740-J9300 g21',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00OS MDPS C 1.00 1.05 56310J9030\x00 4OSDC105',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x816V5RAK00018.ELF\xf1\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'"\x01TOS-0NU06F301J02',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U2VE051\x00\x00\xf1\x006U2V0_C2\x00\x006U2VE051\x00\x00DOS4T16NS3\x00\x00\x00\x00',
],
},
CAR.KONA_EV: {
(Ecu.fwdRadar, 0x7D0, None): [
b'\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4000 ',
b'\xf1\x00OSev SCC F-CUP 1.00 1.00 99110-K4000 ',
b'\xf1\x00OSev SCC F-CUP 1.00 1.00 99110-K4100 ',
b'\xf1\x00OSev SCC F-CUP 1.00 1.01 99110-K4000 ',
b'\xf1\x00OSev SCC FNCUP 1.00 1.01 99110-K4000 ',
b'\xf1\x00DEev SCC F-CUP 1.00 1.03 96400-Q4100 ',
b'\xf1\x8799110Q4000\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4000 ',
b'\xf1\x8799110Q4100\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4100 ',
b'\xf1\x8799110Q4500\xf1\000DEev SCC F-CUP 1.00 1.00 99110-Q4500 ',
],
(Ecu.fwdCamera, 0x7C4, None): [
b'\xf1\x00DEE MFC AT USA LHD 1.00 1.03 95740-Q4000 180821',
b'\xf1\x00DEE MFC AT EUR LHD 1.00 1.00 99211-Q4000 191211',
b'\xf1\x00DEE MFC AT USA LHD 1.00 1.00 99211-Q4000 191211',
b'\xf1\000DEE MFC AT EUR LHD 1.00 1.00 99211-Q4100 200706',
b'\xf1\x00OSE LKAS AT EUR LHD 1.00 1.00 95740-K4100 W40',
b'\xf1\x00OSE LKAS AT EUR RHD 1.00 1.00 95740-K4100 W40',
b'\xf1\x00OSE LKAS AT KOR LHD 1.00 1.00 95740-K4100 W40',
b'\xf1\x00OE2 LKAS AT EUR LHD 1.00 1.00 95740-K4200 200',
b'\xf1\x00OSE LKAS AT USA LHD 1.00 1.00 95740-K4300 W50',
],
(Ecu.eps, 0x7D4, None): [
b'\xf1\x00OS MDPS C 1.00 1.03 56310/K4550 4OEDC103',
b'\xf1\x00OS MDPS C 1.00 1.04 56310K4000\x00 4OEDC104',
b'\xf1\x00OS MDPS C 1.00 1.04 56310K4050\x00 4OEDC104',
b'\xf1\x00DE MDPS C 1.00 1.05 56310Q4000\x00 4DEEC105',
b'\xf1\x00DE MDPS C 1.00 1.05 56310Q4100\x00 4DEEC105',
],
(Ecu.esp, 0x7D1, None): [
b'\xf1\x00OS IEB \r 105\x18\t\x18 58520-K4000',
b'\xf1\x00OS IEB \x01 212 \x11\x13 58520-K4000',
b'\xf1\x00OS IEB \x02 212 \x11\x13 58520-K4000',
b'\xf1\x00OS IEB \x03 210 \x02\x14 58520-K4000',
b'\xf1\x00OS IEB \x03 212 \x11\x13 58520-K4000',
],
},
CAR.KONA_HEV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00OShe SCC FNCUP 1.00 1.01 99110-CM000 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00OSH LKAS AT KOR LHD 1.00 1.01 95740-CM000 l31',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00OS MDPS C 1.00 1.00 56310CM030\x00 4OHDC100',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00OS IEB \x01 104 \x11 58520-CM000',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6F6051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HOS0G16DS1\x16\xc7\xb0\xd9',
],
},
CAR.IONIQ_EV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00AEev SCC F-CUP 1.00 1.00 96400-G7000 ',
b'\xf1\x00AEev SCC F-CUP 1.00 1.00 96400-G7100 ',
b'\xf1\x00AEev SCC F-CUP 1.00 1.01 99110-G7000 ',
b'\xf1\x00AEev SCC F-CUP 1.00 1.00 99110-G7200 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00AEE MFC AT EUR LHD 1.00 1.00 95740-G7200 160418',
b'\xf1\x00AEE MFC AT USA LHD 1.00 1.00 95740-G2400 180222',
b'\xf1\x00AEE MFC AT EUR LHD 1.00 1.03 95740-G2500 190516',
b'\xf1\x00AEE MFC AT EUR RHD 1.00 1.01 95740-G2600 190819',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00AE MDPS C 1.00 1.02 56310G7300\x00 4AEEC102',
b'\xf1\x00AE MDPS C 1.00 1.04 56310/G7501 4AEEC104',
b'\xf1\x00AE MDPS C 1.00 1.01 56310/G7310 4APEC101',
b'\xf1\x00AE MDPS C 1.00 1.01 56310/G7560 4APEC101',
],
},
CAR.IONIQ_HEV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\000AEhe SCC F-CUP 1.00 1.02 99110-G2100 ',
b'\xf1\x00AEhe SCC F-CUP 1.00 1.00 99110-G2200 ',
b'\xf1\x00AEhe SCC H-CUP 1.01 1.01 96400-G2000 ',
b'\xf1\x00AEhe SCC F-CUP 1.00 1.00 99110-G2600 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00AEH MFC AT EUR LHD 1.00 1.01 95740-G2600 190819',
b'\xf1\x00AEH MFC AT EUR LHD 1.00 1.00 95740-G2400 180222',
b'\xf1\000AEP MFC AT USA LHD 1.00 1.01 95740-G2600 190819',
b'\xf1\x00AEH MFC AT USA LHD 1.00 1.00 95740-G2700 201027',
],
(Ecu.eps, 0x7D4, None): [
b'\xf1\x00AE MDPS C 1.00 1.07 56310/G2301 4AEHC107',
b'\xf1\x00AE MDPS C 1.00 1.01 56310/G2310 4APHC101',
b'\xf1\000AE MDPS C 1.00 1.01 56310/G2510 4APHC101',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6F6051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x816H6F2051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x816H6F6051\000\000\000\000\000\000\000\000',
b'\xf1\x816H6G5051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U3J8051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J8051\x00\x00HAE0G16UL0Nd\xed:',
b'\xf1\x816U3H1051\x00\x00\xf1\x006U3H0_C2\x00\x006U3H1051\x00\x00HAE0G16US2\x95\xa2^$',
b'\xf1\x816U3J9051\000\000\xf1\0006U3H1_C2\000\0006U3J9051\000\000PAE0G16NL0\x82zT\xd2',
b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HAE0G16NL2\x00\x00\x00\x00',
],
},
CAR.SANTA_FE: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00TM__ SCC F-CUP 1.00 1.01 99110-S2000 ',
b'\xf1\x00TM__ SCC F-CUP 1.00 1.02 99110-S2000 ',
b'\xf1\x00TM__ SCC F-CUP 1.00 1.03 99110-S2000 ',
b'\xf1\x00TM__ SCC F-CUP 1.00 1.00 99110-S1500 ',
b'\xf1\x8799110S1500\xf1\x00TM__ SCC F-CUP 1.00 1.00 99110-S1500 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00TM MFC AT USA LHD 1.00 1.00 99211-S2000 180409',
b'\xf1\x00TMA MFC AT MEX LHD 1.00 1.01 99211-S2500 210205',
b'\xf1\x00TMA MFC AT USA LHD 1.00 1.00 99211-S2500 200720',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00TM MDPS C 1.00 1.00 56340-S2000 8409',
b'\xf1\x00TM MDPS C 1.00 1.00 56340-S2000 8A12',
b'\xf1\x00TM MDPS C 1.00 1.01 56340-S2000 9129',
b'\xf1\x00TM MDPS C 1.00 1.02 56370-S2AA0 0B19',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00TM ESC \r 100\x18\x031 58910-S2650',
b'\xf1\x00TM ESC \r 103\x18\x11\x08 58910-S2650',
b'\xf1\x00TM ESC \r 104\x19\a\b 58910-S2650',
b'\xf1\x00TM ESC \x02 100\x18\x030 58910-S2600',
b'\xf1\x00TM ESC \x02 102\x18\x07\x01 58910-S2600',
b'\xf1\x00TM ESC \x02 103\x18\x11\x07 58910-S2600',
b'\xf1\x00TM ESC \x02 104\x19\x07\x07 58910-S2600',
b'\xf1\x00TM ESC \x03 103\x18\x11\x07 58910-S2600',
b'\xf1\x00TM ESC \x0c 103\x18\x11\x08 58910-S2650',
b'\xf1\x00TM ESC \x02 101 \x08\x04 58910-S2GA0',
b'\xf1\x00TM ESC \x03 101 \x08\x02 58910-S2DA0',
b'\xf1\x8758910-S2DA0\xf1\x00TM ESC \x03 101 \x08\x02 58910-S2DA0',
b'\xf1\x8758910-S2GA0\xf1\x00TM ESC \x02 101 \x08\x04 58910-S2GA0',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81606EA051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606G1051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606G3051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x82TMBZN5TMD3XXXG2E',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87LBJSGA7082574HG0\x87www\x98\x88\x88\x88\x99\xaa\xb9\x9afw\x86gx\x99\xa7\x89co\xf8\xffvU_\xffR\xaf\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2T20NS1\x00\xa6\xe0\x91',
b'\xf1\x87LBKSGA0458404HG0vfvg\x87www\x89\x99\xa8\x99y\xaa\xa7\x9ax\x88\xa7\x88t_\xf9\xff\x86w\x8f\xff\x15x\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2T20NS1\x00\x00\x00\x00',
b'\xf1\x87LDJUEA6010814HG1\x87w\x87x\x86gvw\x88\x88\x98\x88gw\x86wx\x88\x97\x88\x85o\xf8\xff\x86f_\xff\xd37\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4T20NS0\xf8\x19\x92g',
b'\xf1\x87LDJUEA6458264HG1ww\x87x\x97x\x87\x88\x88\x99\x98\x89g\x88\x86xw\x88\x97x\x86o\xf7\xffvw\x8f\xff3\x9a\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4T20NS0\xf8\x19\x92g',
b'\xf1\x87LDKUEA2045844HG1wwww\x98\x88x\x87\x88\x88\xa8\x88x\x99\x97\x89x\x88\xa7\x88U\x7f\xf8\xffvfO\xffC\x1e\xf1\x816W3E0051\x00\x00\xf1\x006W351_C2\x00\x006W3E0051\x00\x00TTM4T20NS3\x00\x00\x00\x00',
b'\xf1\x87LDKUEA9993304HG1\x87www\x97x\x87\x88\x99\x99\xa9\x99x\x99\xa7\x89w\x88\x97x\x86_\xf7\xffwwO\xffl#\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4T20NS1R\x7f\x90\n',
b'\xf1\x87LDLUEA6061564HG1\xa9\x99\x89\x98\x87wwwx\x88\x97\x88x\x99\xa7\x89x\x99\xa7\x89sO\xf9\xffvU_\xff<\xde\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS50\xcb\xc3\xed',
b'\xf1\x87LDLUEA6159884HG1\x88\x87hv\x99\x99y\x97\x89\xaa\xb8\x9ax\x99\x87\x89y\x99\xb7\x99\xa7?\xf7\xff\x97wo\xff\xf3\x05\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS5\x00\x00\x00\x00',
b'\xf1\x87LDLUEA6852664HG1\x97wWu\x97www\x89\xaa\xc8\x9ax\x99\x97\x89x\x99\xa7\x89SO\xf7\xff\xa8\x88\x7f\xff\x03z\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS50\xcb\xc3\xed',
b'\xf1\x87LDLUEA6898374HG1fevW\x87wwwx\x88\x97\x88h\x88\x96\x88x\x88\xa7\x88ao\xf9\xff\x98\x99\x7f\xffD\xe2\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS5\x00\x00\x00\x00',
b'\xf1\x87LDLUEA6898374HG1fevW\x87wwwx\x88\x97\x88h\x88\x96\x88x\x88\xa7\x88ao\xf9\xff\x98\x99\x7f\xffD\xe2\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS50\xcb\xc3\xed',
b'\xf1\x87SBJWAA5842214GG0\x88\x87\x88xww\x87x\x89\x99\xa8\x99\x88\x99\x98\x89w\x88\x87xw_\xfa\xfffU_\xff\xd1\x8d\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x98{|\xe3',
b'\xf1\x87SBJWAA5890864GG0\xa9\x99\x89\x98\x98\x87\x98y\x89\x99\xa8\x99w\x88\x87xww\x87wvo\xfb\xffuD_\xff\x9f\xb5\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x98{|\xe3',
b'\xf1\x87SBJWAA6562474GG0ffvgeTeFx\x88\x97\x88ww\x87www\x87w\x84o\xfa\xff\x87fO\xff\xc2 \xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x00\x00\x00\x00',
b'\xf1\x87SBJWAA6562474GG0ffvgeTeFx\x88\x97\x88ww\x87www\x87w\x84o\xfa\xff\x87fO\xff\xc2 \xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x98{|\xe3',
b'\xf1\x87SBJWAA7780564GG0wvwgUUeVwwwwx\x88\x87\x88wwwwd_\xfc\xff\x86f\x7f\xff\xd7*\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS2F\x84<\xc0',
b'\xf1\x87SBJWAA8278284GG0ffvgUU\x85Xx\x88\x87\x88x\x88w\x88ww\x87w\x96o\xfd\xff\xa7U_\xff\xf2\xa0\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS2F\x84<\xc0',
b'\xf1\x87SBLWAA4363244GG0wvwgwv\x87hgw\x86ww\x88\x87xww\x87wdo\xfb\xff\x86f\x7f\xff3$\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS6\x00\x00\x00\x00',
b'\xf1\x87SBLWAA4363244GG0wvwgwv\x87hgw\x86ww\x88\x87xww\x87wdo\xfb\xff\x86f\x7f\xff3$\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS6x0\x17\xfe',
b'\xf1\x87SBLWAA4899564GG0VfvgUU\x85Xx\x88\x87\x88vfgf\x87wxwvO\xfb\xff\x97f\xb1\xffSB\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS7\x00\x00\x00\x00',
b'\xf1\x87SBLWAA6622844GG0wwwwff\x86hwwwwx\x88\x87\x88\x88\x88\x88\x88\x98?\xfd\xff\xa9\x88\x7f\xffn\xe5\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS7u\x1e{\x1c',
b'\xf1\x87SDJXAA7656854GG1DEtWUU\x85X\x88\x88\x98\x88w\x88\x87xx\x88\x87\x88\x96o\xfb\xff\x86f\x7f\xff.\xca\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4G24NS2\x00\x00\x00\x00',
b'\xf1\x87SDJXAA7656854GG1DEtWUU\x85X\x88\x88\x98\x88w\x88\x87xx\x88\x87\x88\x96o\xfb\xff\x86f\x7f\xff.\xca\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4G24NS2K\xdaV0',
b'\xf1\x87SDKXAA2443414GG1vfvgwv\x87h\x88\x88\x88\x88ww\x87wwwww\x99_\xfc\xffvD?\xffl\xd2\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4G24NS6\x00\x00\x00\x00',
b'\xf1\x00T02601BL T02730A1 VTMPT25XXX730NS2\xa6\x06\x88\xf7',
b'\xf1\x87SDMXCA8653204GN1EVugEUuWwwwwww\x87wwwwwv/\xfb\xff\xa8\x88\x9f\xff\xa5\x9c\xf1\x89HT6WAD00A1\xf1\x82STM4G25NH1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87954A02N250\x00\x00\x00\x00\x00\xf1\x81T02730A1 \xf1\x00T02601BL T02730A1 VTMPT25XXX730NS2\xa6\x06\x88\xf7',
],
},
CAR.SANTA_FE_HEV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x8799110CL500\xf1\x00TMhe SCC FHCUP 1.00 1.00 99110-CL500 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00TMH MFC AT USA LHD 1.00 1.03 99211-S1500 210224',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00TM MDPS C 1.00 1.02 56310-CLAC0 4TSHC102',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x87391312MTC1',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87959102T250\x00\x00\x00\x00\x00\xf1\x81E14\x00\x00\x00\x00\x00\x00\x00\xf1\x00PSBG2333 E14\x00\x00\x00\x00\x00\x00\x00TTM2H16SA2\x80\xd7l\xb2',
],
},
CAR.PALISADE: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\000LX2_ SCC F-CUP 1.00 1.05 99110-S8100 ',
b'\xf1\x00LX2 SCC FHCUP 1.00 1.04 99110-S8100 ',
b'\xf1\x00LX2_ SCC FHCU- 1.00 1.05 99110-S8100 ',
b'\xf1\x00LX2_ SCC FHCUP 1.00 1.00 99110-S8110 ',
b'\xf1\x00LX2_ SCC FHCUP 1.00 1.04 99110-S8100 ',
b'\xf1\x00LX2_ SCC FHCUP 1.00 1.05 99110-S8100 ',
b'\xf1\x00ON__ FCA FHCUP 1.00 1.02 99110-S9100 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.03 99211-S8100 190125',
b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.05 99211-S8100 190909',
b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.07 99211-S8100 200422',
b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.08 99211-S8100 200903',
b'\xf1\x00ON MFC AT USA LHD 1.00 1.01 99211-S9100 181105',
b'\xf1\x00ON MFC AT USA LHD 1.00 1.03 99211-S9100 200720',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00LX2 MDPS C 1,00 1,03 56310-S8020 4LXDC103', # modified firmware
b'\xf1\x00LX2 MDPS C 1.00 1.03 56310-S8020 4LXDC103',
b'\xf1\x00LX2 MDPS C 1.00 1.04 56310-S8020 4LXDC104',
b'\xf1\x00ON MDPS C 1.00 1.00 56340-S9000 8B13',
b'\xf1\x00ON MDPS C 1.00 1.01 56340-S9000 9201',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00LX ESC \x01 103\x19\t\x10 58910-S8360',
b'\xf1\x00LX ESC \x01 103\x31\t\020 58910-S8360',
b'\xf1\x00LX ESC \x0b 101\x19\x03\x17 58910-S8330',
b'\xf1\x00LX ESC \x0b 102\x19\x05\x07 58910-S8330',
b'\xf1\x00LX ESC \x0b 103\x19\t\x07 58910-S8330',
b'\xf1\x00LX ESC \x0b 103\x19\t\x10 58910-S8360',
b'\xf1\x00LX ESC \x0b 104 \x10\x16 58910-S8360',
b'\xf1\x00ON ESC \x0b 100\x18\x12\x18 58910-S9360',
b'\xf1\x00ON ESC \x0b 101\x19\t\x08 58910-S9360',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81640J0051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81640K0051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81640S1051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x00bcsh8p54 U872\x00\x00\x00\x00\x00\x00TON4G38NB1\x96z28',
b'\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00TON4G38NB2[v\\\xb6',
b'\xf1\x87LBLUFN591307KF25vgvw\x97wwwy\x99\xa7\x99\x99\xaa\xa9\x9af\x88\x96h\x95o\xf7\xff\x99f/\xff\xe4c\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB2\xd7\xc1/\xd1',
b'\xf1\x87LBLUFN650868KF36\xa9\x98\x89\x88\xa8\x88\x88\x88h\x99\xa6\x89fw\x86gw\x88\x97x\xaa\x7f\xf6\xff\xbb\xbb\x8f\xff+\x82\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8',
b'\xf1\x87LBLUFN655162KF36\x98\x88\x88\x88\x98\x88\x88\x88x\x99\xa7\x89x\x99\xa7\x89x\x99\x97\x89g\x7f\xf7\xffwU_\xff\xe9!\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8',
b'\xf1\x87LBLUFN731381KF36\xb9\x99\x89\x98\x98\x88\x88\x88\x89\x99\xa8\x99\x88\x99\xa8\x89\x88\x88\x98\x88V\177\xf6\xff\x99w\x8f\xff\xad\xd8\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\000bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8',
b'\xf1\x87LDKVAA0028604HH1\xa8\x88x\x87vgvw\x88\x99\xa8\x89gw\x86ww\x88\x97x\x97o\xf9\xff\x97w\x7f\xffo\x02\xf1\x81U872\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U872\x00\x00\x00\x00\x00\x00TON4G38NB1\x96z28',
b'\xf1\x87LDKVAA3068374HH1wwww\x87xw\x87y\x99\xa7\x99w\x88\x87xw\x88\x97x\x85\xaf\xfa\xffvU/\xffU\xdc\xf1\x81U872\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U872\x00\x00\x00\x00\x00\x00TON4G38NB1\x96z28',
b'\xf1\x87LDKVBN382172KF26\x98\x88\x88\x88\xa8\x88\x88\x88x\x99\xa7\x89\x87\x88\x98x\x98\x99\xa9\x89\xa5_\xf6\xffDDO\xff\xcd\x16\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7',
b'\xf1\x87LDKVBN424201KF26\xba\xaa\x9a\xa9\x99\x99\x89\x98\x89\x99\xa8\x99\x88\x99\x98\x89\x88\x99\xa8\x89v\x7f\xf7\xffwf_\xffq\xa6\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7',
b'\xf1\x87LDKVBN540766KF37\x87wgv\x87w\x87xx\x99\x97\x89v\x88\x97h\x88\x88\x88\x88x\x7f\xf6\xffvUo\xff\xd3\x01\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7',
b'\xf1\x87LDLVAA4225634HH1\x98\x88\x88\x88eUeVx\x88\x87\x88g\x88\x86xx\x88\x87\x88\x86o\xf9\xff\x87w\x7f\xff\xf2\xf7\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00TON4G38NB2[v\\\xb6',
b'\xf1\x87LDLVAA4777834HH1\x98\x88x\x87\x87wwwx\x88\x87\x88x\x99\x97\x89x\x88\x97\x88\x86o\xfa\xff\x86fO\xff\x1d9\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00TON4G38NB2[v\\\xb6',
b'\xf1\x87LDLVAA5194534HH1ffvguUUUx\x88\xa7\x88h\x99\x96\x89x\x88\x97\x88ro\xf9\xff\x98wo\xff\xaaM\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00TON4G38NB2[v\\\xb6',
b'\xf1\x87LDLVAA5949924HH1\xa9\x99y\x97\x87wwwx\x99\x97\x89x\x99\xa7\x89x\x99\xa7\x89\x87_\xfa\xffeD?\xff\xf1\xfd\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00TON4G38NB2[v\\\xb6',
b'\xf1\x87LDLVBN560098KF26\x86fff\x87vgfg\x88\x96xfw\x86gfw\x86g\x95\xf6\xffeU_\xff\x92c\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7',
b'\xf1\x87LDLVBN602045KF26\xb9\x99\x89\x98\x97vwgy\xaa\xb7\x9af\x88\x96hw\x99\xa7y\xa9\x7f\xf5\xff\x99w\x7f\xff,\xd3\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN628911KF26\xa9\x99\x89\x98\x98\x88\x88\x88y\x99\xa7\x99fw\x86gw\x88\x87x\x83\x7f\xf6\xff\x98wo\xff2\xda\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN645817KF37\x87www\x98\x87xwx\x99\x97\x89\x99\x99\x99\x99g\x88\x96x\xb6_\xf7\xff\x98fo\xff\xe2\x86\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN662115KF37\x98\x88\x88\x88\xa8\x88\x88\x88x\x99\x97\x89x\x99\xa7\x89\x88\x99\xa8\x89\x88\x7f\xf7\xfffD_\xff\xdc\x84\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN667933KF37\xb9\x99\x89\x98\xb9\x99\x99\x99x\x88\x87\x88w\x88\x87x\x88\x88\x98\x88\xcbo\xf7\xffe3/\xffQ!\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN673087KF37\x97www\x86fvgx\x99\x97\x89\x99\xaa\xa9\x9ag\x88\x86x\xe9_\xf8\xff\x98w\x7f\xff"\xad\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN673841KF37\x98\x88x\x87\x86g\x86xy\x99\xa7\x99\x88\x99\xa8\x89w\x88\x97xdo\xf5\xff\x98\x88\x8f\xffT\xec\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN681363KF37\x98\x88\x88\x88\x97x\x87\x88y\xaa\xa7\x9a\x88\x88\x98\x88\x88\x88\x88\x88vo\xf6\xffvD\x7f\xff%v\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN713782KF37\x99\x99y\x97\x98\x88\x88\x88x\x88\x97\x88\x88\x99\x98\x89\x88\x99\xa8\x89\x87o\xf7\xffeU?\xff7,\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN713890KF26\xb9\x99\x89\x98\xa9\x99\x99\x99x\x99\x97\x89\x88\x99\xa8\x89\x88\x99\xb8\x89Do\xf7\xff\xa9\x88o\xffs\r\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN733215KF37\x99\x98y\x87\x97wwwi\x99\xa6\x99x\x99\xa7\x89V\x88\x95h\x86o\xf7\xffeDO\xff\x12\xe7\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN750044KF37\xca\xa9\x8a\x98\xa7wwwy\xaa\xb7\x9ag\x88\x96x\x88\x99\xa8\x89\xb9\x7f\xf6\xff\xa8w\x7f\xff\xbe\xde\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN752612KF37\xba\xaa\x8a\xa8\x87w\x87xy\xaa\xa7\x9a\x88\x99\x98\x89x\x88\x97\x88\x96o\xf6\xffvU_\xffh\x1b\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN755553KF37\x87xw\x87\x97w\x87xy\x99\xa7\x99\x99\x99\xa9\x99Vw\x95gwo\xf6\xffwUO\xff\xb5T\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN757883KF37\x98\x87xw\x98\x87\x88xy\xaa\xb7\x9ag\x88\x96x\x89\x99\xa8\x99e\x7f\xf6\xff\xa9\x88o\xff5\x15\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6',
b'\xf1\x87LDMVBN778156KF37\x87vWe\xa9\x99\x99\x99y\x99\xb7\x99\x99\x99\x99\x99x\x99\x97\x89\xa8\x7f\xf8\xffwf\x7f\xff\x82_\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6',
b'\xf1\x87LDMVBN780576KF37\x98\x87hv\x97x\x97\x89x\x99\xa7\x89\x88\x99\x98\x89w\x88\x97x\x98\x7f\xf7\xff\xba\x88\x8f\xff\x1e0\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6',
b'\xf1\x87LDMVBN783485KF37\x87www\x87vwgy\x99\xa7\x99\x99\x99\xa9\x99Vw\x95g\x89_\xf6\xff\xa9w_\xff\xc5\xd6\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6',
b'\xf1\x87LDMVBN811844KF37\x87vwgvfffx\x99\xa7\x89Vw\x95gg\x88\xa6xe\x8f\xf6\xff\x97wO\xff\t\x80\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6',
b'\xf1\x87LDMVBN830601KF37\xa7www\xa8\x87xwx\x99\xa7\x89Uw\x85Ww\x88\x97x\x88o\xf6\xff\x8a\xaa\x7f\xff\xe2:\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6',
b'\xf1\x87LDMVBN848789KF37\x87w\x87x\x87w\x87xy\x99\xb7\x99\x87\x88\x98x\x88\x99\xa8\x89\x87\x7f\xf6\xfffUo\xff\xe3!\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b'\xf1\x87LDMVBN851595KF37\x97wgvvfffx\x99\xb7\x89\x88\x99\x98\x89\x87\x88\x98x\x99\x7f\xf7\xff\x97w\x7f\xff@\xf3\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b'\xf1\x87LDMVBN873175KF26\xa8\x88\x88\x88vfVex\x99\xb7\x89\x88\x99\x98\x89x\x88\x97\x88f\x7f\xf7\xff\xbb\xaa\x8f\xff,\x04\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b'\xf1\x87LDMVBN879401KF26veVU\xa8\x88\x88\x88g\x88\xa6xVw\x95gx\x88\xa7\x88v\x8f\xf9\xff\xdd\xbb\xbf\xff\xb3\x99\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b'\xf1\x87LDMVBN881314KF37\xa8\x88h\x86\x97www\x89\x99\xa8\x99w\x88\x97xx\x99\xa7\x89\xca\x7f\xf8\xff\xba\x99\x8f\xff\xd8v\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b'\xf1\x87LDMVBN888651KF37\xa9\x99\x89\x98vfff\x88\x99\x98\x89w\x99\xa7y\x88\x88\x98\x88D\x8f\xf9\xff\xcb\x99\x8f\xff\xa5\x1e\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b'\xf1\x87LDMVBN889419KF37\xa9\x99y\x97\x87w\x87xx\x88\x97\x88w\x88\x97x\x88\x99\x98\x89e\x9f\xf9\xffeUo\xff\x901\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b'\xf1\x87LDMVBN895969KF37vefV\x87vgfx\x99\xa7\x89\x99\x99\xb9\x99f\x88\x96he_\xf7\xffxwo\xff\x14\xf9\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b'\xf1\x87LDMVBN899222KF37\xa8\x88x\x87\x97www\x98\x99\x99\x89\x88\x99\x98\x89f\x88\x96hdo\xf7\xff\xbb\xaa\x9f\xff\xe2U\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b"\xf1\x87LBLUFN622950KF36\xa8\x88\x88\x88\x87w\x87xh\x99\x96\x89\x88\x99\x98\x89\x88\x99\x98\x89\x87o\xf6\xff\x98\x88o\xffx'\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8",
],
},
CAR.VELOSTER: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00JS__ SCC H-CUP 1.00 1.02 95650-J3200 ',
b'\xf1\x00JS__ SCC HNCUP 1.00 1.02 95650-J3100 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00JS LKAS AT USA LHD 1.00 1.02 95740-J3000 K32',
b'\xf1\x00JS LKAS AT KOR LHD 1.00 1.03 95740-J3000 K33',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00JSL MDPS C 1.00 1.03 56340-J3000 8308',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x01TJS-JNU06F200H0A',
b'\x01TJS-JDK06F200H0A',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJS0T16NS1\xba\x02\xb8\x80',
b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJS0T16NS1\x00\x00\x00\x00',
b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJS0T16KS2\016\xba\036\xa2',
],
},
# kia
CAR.FORTE: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00BD__ SCC H-CUP 1.00 1.02 99110-M6000 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00BD LKAS AT USA LHD 1.00 1.04 95740-M6000 J33',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00BD MDPS C 1.00 1.02 56310-XX000 4BD2C102',
b'\xf1\x00BD MDPS C 1.00 1.08 56310/M6300 4BDDC108',
b'\xf1\x00BD MDPS C 1.00 1.08 56310M6300\x00 4BDDC108',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x816VGRAH00018.ELF\xf1\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x01TBDM1NU06F200H01',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U2VC051\x00\x00\xf1\x006U2V0_C2\x00\x006U2VC051\x00\x00DBD0T16SS0\x00\x00\x00\x00',
b"\xf1\x816U2VC051\x00\x00\xf1\x006U2V0_C2\x00\x006U2VC051\x00\x00DBD0T16SS0\xcf\x1e'\xc3", ],
},
CAR.K5: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00JF__ SCC F-CUP 1.00 1.00 96400-D4110 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00JFA LKAS AT USA LHD 1.00 1.02 95895-D5000 h31',
b'\xf1\x00JFA LKAS AT USA LHD 1.00 1.00 95895-D5001 h32',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00TM MDPS C 1.00 1.00 56340-S2000 8409',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00JF ESC \v 11 \x18\x030 58920-D5180',
],
(Ecu.engine, 0x7e0, None): [
b'\x01TJFAJNU06F201H03',
b'\xf1\x89F1JF600AISEIU702\xf1\x82F1JF600AISEIU702',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJF0T16NL0\t\xd2GW', ],
},
CAR.K5_HEV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00DEhe SCC H-CUP 1.01 1.02 96400-G5100 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00DEP MFC AT USA LHD 1.00 1.01 95740-G5010 170424',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00DE MDPS C 1.00 1.09 56310G5301\x00 4DEHC109',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6F4051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b"\xf1\x816U3J2051\x00\x00\xf1\x006U3H0_C2\x00\x006U3J2051\x00\x00PDE0G16NS2\xf4'\\\x91", ],
},
CAR.K5_DL3: {
(Ecu.fwdRadar, 0x7D0, None): [
b'\xf1\000DL3_ SCC FHCUP 1.00 1.03 99110-L2000 ',
b'\xf1\x8799110L2000\xf1\000DL3_ SCC FHCUP 1.00 1.03 99110-L2000 ',
b'\xf1\x8799110L2100\xf1\x00DL3_ SCC F-CUP 1.00 1.03 99110-L2100 ',
b'\xf1\x8799110L2100\xf1\x00DL3_ SCC FHCUP 1.00 1.03 99110-L2100 ',
],
(Ecu.fwdCamera, 0x7C4, None): [
b'\xf1\000DL3 MFC AT USA LHD 1.00 1.03 99210-L3000 200915',
b'\xf1\x00DL3 MFC AT USA LHD 1.00 1.04 99210-L3000 210208',
],
(Ecu.eps, 0x7D4, None): [
b'\xf1\x8756310-L3110\xf1\000DL3 MDPS C 1.00 1.01 56310-L3110 4DLAC101',
b'\xf1\x8756310-L3220\xf1\x00DL3 MDPS C 1.00 1.01 56310-L3220 4DLAC101',
b'\xf1\x8757700-L3000\xf1\x00DL3 MDPS R 1.00 1.02 57700-L3000 4DLAP102',
],
(Ecu.esp, 0x7D1, None): [
b'\xf1\000DL ESC \006 101 \004\002 58910-L3200',
b'\xf1\x8758910-L3200\xf1\000DL ESC \006 101 \004\002 58910-L3200',
b'\xf1\x8758910-L3800\xf1\x00DL ESC \t 101 \x07\x02 58910-L3800',
b'\xf1\x8758910-L3600\xf1\x00DL ESC \x03 100 \x08\x02 58910-L3600',
],
(Ecu.engine, 0x7E0, None): [
b'\xf1\x87391212MKT0',
b'\xf1\x87391212MKV0',
b'\xf1\x870\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x82DLDWN5TMDCXXXJ1B',
],
(Ecu.transmission, 0x7E1, None): [
b'\xf1\000bcsh8p54 U913\000\000\000\000\000\000TDL2T16NB1ia\v\xb8',
b'\xf1\x87SALFEA5652514GK2UUeV\x88\x87\x88xxwg\x87ww\x87wwfwvd/\xfb\xffvU_\xff\x93\xd3\xf1\x81U913\000\000\000\000\000\000\xf1\000bcsh8p54 U913\000\000\000\000\000\000TDL2T16NB1ia\v\xb8',
b'\xf1\x87SALFEA6046104GK2wvwgeTeFg\x88\x96xwwwwffvfe?\xfd\xff\x86fo\xff\x97A\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00TDL2T16NB1ia\x0b\xb8',
b'\xf1\x87SCMSAA8572454GK1\x87x\x87\x88Vf\x86hgwvwvwwgvwwgT?\xfb\xff\x97fo\xffH\xb8\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00TDL4T16NB05\x94t\x18',
b'\xf1\x87954A02N300\x00\x00\x00\x00\x00\xf1\x81T02730A1 \xf1\x00T02601BL T02730A1 WDL3T25XXX730NS2b\x1f\xb8%',
],
},
CAR.STINGER: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00CK__ SCC F_CUP 1.00 1.01 96400-J5100 ',
b'\xf1\x00CK__ SCC F_CUP 1.00 1.03 96400-J5100 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00CK MFC AT USA LHD 1.00 1.03 95740-J5000 170822',
b'\xf1\x00CK MFC AT USA LHD 1.00 1.04 95740-J5000 180504',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00CK MDPS R 1.00 1.04 57700-J5200 4C2CL104',
b'\xf1\x00CK MDPS R 1.00 1.04 57700-J5220 4C2VL104',
b'\xf1\x00CK MDPS R 1.00 1.04 57700-J5420 4C4VL104',
b'\xf1\x00CK MDPS R 1.00 1.06 57700-J5420 4C4VL106',
b'\xf1\x00CK MDPS R 1.00 1.07 57700-J5420 4C4VL107',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81606DE051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81640E0051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81640L0051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x82CKJN3TMSDE0B\x00\x00\x00\x00',
b'\xf1\x82CKKN3TMD_H0A\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87VCJLE17622572DK0vd6D\x99\x98y\x97vwVffUfvfC%CuT&Dx\x87o\xff{\x1c\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0',
b'\xf1\x87VDHLG17000192DK2xdFffT\xa5VUD$DwT\x86wveVeeD&T\x99\xba\x8f\xff\xcc\x99\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0',
b'\xf1\x87VDHLG17000192DK2xdFffT\xa5VUD$DwT\x86wveVeeD&T\x99\xba\x8f\xff\xcc\x99\xf1\x89E21\x00\x00\x00\x00\x00\x00\x00\xf1\x82SCK0T33NB0',
b'\xf1\x87VDHLG17034412DK2vD6DfVvVTD$D\x99w\x88\x98EDEDeT6DgfO\xff\xc3=\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0',
b'\xf1\x87VDHLG17118862DK2\x8awWwgu\x96wVfUVwv\x97xWvfvUTGTx\x87o\xff\xc9\xed\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0',
b'\xf1\x87VDJLG18425192DK2xeGewfgf\x86eFeweWv\x88eVeuTGT\x89vo\xff\tJ\xf1\x81E24\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E24\x00\x00\x00\x00\x00\x00\x00SCK0T33NB1\x8a\xdcM\x90',
b'\xf1\x87VDKLJ18675252DK6\x89vhgwwwwveVU\x88w\x87w\x99vgf\x97vXfgw_\xff\xc2\xfb\xf1\x89E25\x00\x00\x00\x00\x00\x00\x00\xf1\x82TCK0T33NB2',
b'\xf1\x87WAJTE17552812CH4vfFffvfVeT5DwvvVVdFeegeg\x88\x88o\xff\x1a]\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00TCK2T20NB1\x19\xd2\x00\x94',
],
},
CAR.NIRO_EV: {
(Ecu.fwdRadar, 0x7D0, None): [
b'\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4000 ',
b'\xf1\x00DEev SCC F-CUP 1.00 1.02 96400-Q4100 ',
b'\xf1\x00DEev SCC F-CUP 1.00 1.03 96400-Q4100 ',
b'\xf1\x00OSev SCC F-CUP 1.00 1.01 99110-K4000 ',
b'\xf1\x8799110Q4000\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4000 ',
b'\xf1\x8799110Q4100\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4100 ',
b'\xf1\x8799110Q4500\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4500 ',
b'\xf1\x8799110Q4600\xf1\x00DEev SCC FNCUP 1.00 1.00 99110-Q4600 ',
b'\xf1\x8799110Q4600\xf1\x00DEev SCC FHCUP 1.00 1.00 99110-Q4600 ',
],
(Ecu.fwdCamera, 0x7C4, None): [
b'\xf1\x00DEE MFC AT USA LHD 1.00 1.03 95740-Q4000 180821',
b'\xf1\x00DEE MFC AT EUR LHD 1.00 1.00 99211-Q4000 191211',
b'\xf1\x00DEE MFC AT USA LHD 1.00 1.00 99211-Q4000 191211',
b'\xf1\000DEE MFC AT EUR LHD 1.00 1.00 99211-Q4100 200706',
b'\xf1\x00OSE LKAS AT EUR LHD 1.00 1.00 95740-K4100 W40',
],
(Ecu.eps, 0x7D4, None): [
b'\xf1\x00OS MDPS C 1.00 1.04 56310K4050\x00 4OEDC104',
b'\xf1\x00DE MDPS C 1.00 1.05 56310Q4000\x00 4DEEC105',
b'\xf1\x00DE MDPS C 1.00 1.05 56310Q4100\x00 4DEEC105',
],
(Ecu.esp, 0x7D1, None): [
b'\xf1\x00OS IEB \r 212 \x11\x13 58520-K4000',
],
},
CAR.NIRO_HEV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00DEhe SCC H-CUP 1.01 1.02 96400-G5100 ',
b'\xf1\x00DEhe SCC FHCUP 1.00 1.00 99110-G5600 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00DEP MFC AT USA LHD 1.00 1.01 95740-G5010 170424',
b'\xf1\x00DEH MFC AT USA LHD 1.00 1.07 99211-G5000 201221',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\000DE MDPS C 1.00 1.09 56310G5301\000 4DEHC109',
b'\xf1\x00DE MDPS C 1.00 1.01 56310G5520\x00 4DEPC101',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6F4051\000\000\000\000\000\000\000\000',
b'\xf1\x816H6G5051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b"\xf1\x816U3J2051\000\000\xf1\0006U3H0_C2\000\0006U3J2051\000\000PDE0G16NS2\xf4\'\\\x91",
b'\xf1\x816U3J2051\000\000\xf1\0006U3H0_C2\000\0006U3J2051\000\000PDE0G16NS2\000\000\000\000',
b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HDE0G16NL3\x00\x00\x00\x00',
b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HDE0G16NL3\xb9\xd3\xfaW',
],
},
CAR.SELTOS: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x8799110Q5100\xf1\000SP2_ SCC FHCUP 1.01 1.05 99110-Q5100 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\000SP2 MFC AT USA LHD 1.00 1.04 99210-Q5000 191114',
b'\xf1\000SP2 MFC AT USA LHD 1.00 1.05 99210-Q5000 201012',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\000SP2 MDPS C 1.00 1.04 56300Q5200 ',
b'\xf1\000SP2 MDPS C 1.01 1.05 56300Q5200 ',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x8758910-Q5450\xf1\000SP ESC \a 101\031\t\005 58910-Q5450',
b'\xf1\x8758910-Q5450\xf1\000SP ESC \t 101\031\t\005 58910-Q5450',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81616D2051\000\000\000\000\000\000\000\000',
b'\xf1\x81616D5051\000\000\000\000\000\000\000\000',
b'\001TSP2KNL06F100J0K',
b'\001TSP2KNL06F200J0K',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87CZLUB49370612JF7h\xa8y\x87\x99\xa7hv\x99\x97fv\x88\x87x\x89x\x96O\xff\x88\xff\xff\xff.@\xf1\x816V2C2051\000\000\xf1\0006V2B0_C2\000\0006V2C2051\000\000CSP4N20NS3\000\000\000\000',
b'\xf1\x87954A22D200\xf1\x81T01950A1 \xf1\000T0190XBL T01950A1 DSP2T16X4X950NS6\xd30\xa5\xb9',
b'\xf1\x87954A22D200\xf1\x81T01950A1 \xf1\000T0190XBL T01950A1 DSP2T16X4X950NS8\r\xfe\x9c\x8b',
],
},
CAR.K7: {
(Ecu.eps, 0x7d4, None): [b'\xf1\000YG MDPS C 1.00 1.01 56310F6350\000 4YG7C101',],
},
# Genesis
CAR.GENESIS_G70: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00IK__ SCC F-CUP 1.00 1.02 96400-G9100 ',
b'\xf1\x00IK__ SCC F-CUP 1.00 1.02 96400-G9100 \xf1\xa01.02',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00IK MFC AT USA LHD 1.00 1.01 95740-G9000 170920',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00IK MDPS R 1.00 1.06 57700-G9420 4I4VL106',
b'\xf1\x00IK MDPS R 1.00 1.07 57700-G9220 4I2VL107',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81640F0051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81640J0051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87VDJLT17895112DN4\x88fVf\x99\x88\x88\x88\x87fVe\x88vhwwUFU\x97eFex\x99\xff\xb7\x82\xf1\x81E25\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E25\x00\x00\x00\x00\x00\x00\x00SIK0T33NB2\x11\x1am\xda',
b'\xf1\x87VCJLP18407832DN3\x88vXfvUVT\x97eFU\x87d7v\x88eVeveFU\x89\x98\x7f\xff\xb2\xb0\xf1\x81E25\x00\x00\x00'
b'\x00\x00\x00\x00\xf1\x00bcsh8p54 E25\x00\x00\x00\x00\x00\x00\x00SIK0T33NB4\xecE\xefL',
],
},
}
CHECKSUM = {
"crc8": [CAR.SONATA, CAR.SANTA_FE, CAR.PALISADE, CAR.SELTOS, CAR.ELANTRA21, CAR.K5_DL3,
CAR.SONATA_HEV, CAR.SANTA_FE_HEV, CAR.SOUL_EV, CAR.ELANTRA21_HEV, CAR.K5_DL3_HEV],
"6B": [CAR.SORENTO, CAR.GENESIS],
}
FEATURES = {
"use_cluster_gears": # Use Cluster for Gear Selection, rather than Transmission [ CLU15 ]
{CAR.ELANTRA_I30, CAR.KONA, CAR.GRANDEUR, CAR.MOHAVE, CAR.NIRO_HEV, CAR.K7},
"use_tcu_gears": # Use TCU Message for Gear Selection [ TCU12 ]
{CAR.SONATA_LF, CAR.VELOSTER, CAR.K5},
"use_elect_gears": # Use Elect GEAR Message for Gear Selection [ ELECT_GEAR ]
{CAR.KONA_EV, CAR.IONIQ_EV, CAR.NEXO, CAR.NIRO_EV, CAR.SOUL_EV, CAR.KONA_HEV, CAR.IONIQ_HEV, CAR.NIRO_HEV,
CAR.SONATA_HEV, CAR.SONATA_LF_HEV, CAR.GRANDEUR_HEV, CAR.GRANDEUR20_HEV,
CAR.K5_HEV, CAR.K5_DL3_HEV, CAR.K7_HEV},
# Gear not set is [ LVR12 ]
# these cars use the [ FCA11 ] message for the AEB and FCW signals, all others use [ SCC12 ]
# "use_fca": {}, carstate aeb_fcw / qt ui aebselect toggle set
# "has_scc13": {},
# "has_scc14": {},
# new lfa car - carcontroller lfamfc / hyundaican lfamfc using qt ui mfcselect toggle set
}
EV_CAR = {CAR.KONA_EV, CAR.IONIQ_EV, CAR.NIRO_EV, CAR.SOUL_EV, CAR.NEXO}
HYBRID_CAR = {CAR.KONA_HEV, CAR.IONIQ_HEV, CAR.NIRO_HEV, CAR.SANTA_FE_HEV,
CAR.ELANTRA21_HEV, CAR.SONATA_HEV, CAR.SONATA_LF_HEV, CAR.GRANDEUR_HEV, CAR.GRANDEUR20_HEV,
CAR.K5_HEV, CAR.K5_DL3_HEV, CAR.K7_HEV}
EV_HYBRID_CAR = EV_CAR | HYBRID_CAR
DBC = {
# Hyundai
CAR.ELANTRA_I30: dbc_dict('hyundai_kia_generic', None),
CAR.ELANTRA21: dbc_dict('hyundai_kia_generic', None),
CAR.ELANTRA21_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.SONATA_HEV: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.SONATA_LF: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA_LF_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.KONA: dbc_dict('hyundai_kia_generic', None),
CAR.KONA_EV: dbc_dict('hyundai_kia_generic', None),
CAR.KONA_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_EV: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.IONIQ_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.SANTA_FE: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.SANTA_FE_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.PALISADE: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.VELOSTER: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR20: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR20_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.NEXO: dbc_dict('hyundai_kia_generic_nexo', None),
# Kia
CAR.FORTE: dbc_dict('hyundai_kia_generic', None),
CAR.K5: dbc_dict('hyundai_kia_generic', None),
CAR.K5_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.K5_DL3: dbc_dict('hyundai_kia_generic', None),
CAR.K5_DL3_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.SPORTAGE: dbc_dict('hyundai_kia_generic', None),
CAR.SORENTO: dbc_dict('hyundai_kia_generic', None),
CAR.MOHAVE: dbc_dict('hyundai_kia_generic', None),
CAR.STINGER: dbc_dict('hyundai_kia_generic', None),
CAR.NIRO_EV: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.NIRO_HEV: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.SOUL_EV: dbc_dict('hyundai_kia_generic', None),
CAR.SELTOS: dbc_dict('hyundai_kia_generic', None),
CAR.K7: dbc_dict('hyundai_kia_generic', None),
CAR.K7_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.K9: dbc_dict('hyundai_kia_generic', None),
# Genesis
CAR.GENESIS: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G70: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.GENESIS_G80: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G90: dbc_dict('hyundai_kia_generic', None),
}
STEER_THRESHOLD = 150
def main():
for member, value in vars(CAR).items():
if not member.startswith("_"):
print(value)
if __name__ == "__main__":
main()
| 80.265487
| 974
| 0.644818
|
from cereal import car
from selfdrive.car import dbc_dict
Ecu = car.CarParams.Ecu
class CarControllerParams:
ACCEL_MAX = 2.0
ACCEL_MIN = -3.7
STEER_MAX = 384 STEER_DELTA_UP = 3
STEER_DELTA_DOWN = 7
STEER_DRIVER_ALLOWANCE = 50
STEER_DRIVER_MULTIPLIER = 2
STEER_DRIVER_FACTOR = 1
class CAR:
ELANTRA_I30 = "HYUNDAI AVANTE,I30 2017~2020 (AD,PD)"
ELANTRA21 = "HYUNDAI AVANTE 2021 (CN7)"
ELANTRA21_HEV = "HYUNDAI AVANTE HEV 2021 (CN7)"
SONATA = "HYUNDAI SONATA 2020 (DN8)"
SONATA_HEV = "HYUNDAI SONATA HEV 2020 (DN8)"
SONATA_LF = "HYUNDAI SONATA 2016~2019 (LF)"
SONATA_LF_HEV = "HYUNDAI SONATA 2018 HEV (LF)"
KONA = "HYUNDAI KONA 2019 (OS)"
KONA_EV = "HYUNDAI KONA EV 2019 (OS)"
KONA_HEV = "HYUNDAI KONA HEV 2019 (OS)"
IONIQ_EV = "HYUNDAI IONIQ EV 2019~2020 (AE)"
IONIQ_HEV = "HYUNDAI IONIQ HEV 2017 (AE)"
SANTA_FE = "HYUNDAI SANTA FE 2019~2021 (TM)"
SANTA_FE_HEV = "HYUNDAI SANTA FE 2021~2022 (TM)"
PALISADE = "HYUNDAI PALISADE 2020 (LX2)"
VELOSTER = "HYUNDAI VELOSTER 2019 (JS)"
GRANDEUR = "GRANDEUR 2017~2019 (IG)"
GRANDEUR_HEV = "GRANDEUR HEV 2018~2019 (IG)"
GRANDEUR20 = "GRANDEUR 2020 (IG)"
GRANDEUR20_HEV = "GRANDEUR HEV 2020 (IG)"
NEXO = "HYUNDAI NEXO (FE)"
FORTE = "KIA K3 2018 (BD)"
K5 = "KIA K5 2016~2020 (JF)"
K5_HEV = "KIA K5 HEV 2016~2020 (JF)"
K5_DL3 = "KIA K5 2021 (DL3)"
K5_DL3_HEV = "KIA K5 HEV 2021 (DL3)"
K7 = "KIA K7 2016-2019 (YG)"
K7_HEV = "KIA K7 HEV 2017-2019 (YG)"
K9 = "KIA K9 2019-2021 (RJ)"
SPORTAGE = "KIA SPORTAGE 2016~2020 (QL)"
SORENTO = "KIA SORENTO 2017~2020 (UM)"
MOHAVE = "KIA MOHAVE 2020 (HM)"
STINGER = "KIA STINGER 2018~2021 (CK)"
NIRO_EV = "KIA NIRO EV 2020 (DE)"
NIRO_HEV = "KIA NIRO HEV 2018 (DE)"
SOUL_EV = "KIA SOUL EV 2019 (SK3)"
SELTOS = "KIA SELTOS 2019 (SP2)"
GENESIS = "GENESIS 2014-2016 (DH)"
GENESIS_G70 = "GENESIS G70 2018~ (IK)"
GENESIS_G80 = "GENESIS G80 2018~ (DH)"
GENESIS_G90 = "GENESIS G90,EQ900 2016~2019 (HI)"
class Buttons:
NONE = 0
RES_ACCEL = 1
SET_DECEL = 2
GAP_DIST = 3
CANCEL = 4
FINGERPRINTS = {
CAR.ELANTRA_I30: [{
66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 546: 8, 547: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 838: 8, 844: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1087: 8, 1151: 6, 1155: 8, 1164: 8, 1168: 7, 1170: 8, 1191: 2, 1193: 8, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1485: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 1792: 8, 1872: 8, 1937: 8, 1952: 8, 1953: 8, 1960: 8, 1968: 8, 1988: 8, 1990: 8, 1998: 8, 2000: 8, 2001: 8, 2003: 8, 2004: 8, 2005: 8, 2008: 8, 2009: 8, 2012: 8, 2013: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.ELANTRA21: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 524: 8, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 8, 865: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1069: 8, 1078: 4, 1102: 8, 1107: 5, 1108: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1339: 8, 1342: 8, 1343: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1394: 8, 1407: 8, 1419: 8, 1427: 6, 1446: 8, 1456: 4, 1470: 8, 1485: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8
}],
CAR.ELANTRA21_HEV: [{
}],
CAR.SONATA: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 545: 8, 546: 8, 547: 8, 548: 8, 549: 8, 550: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 8, 865: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 908: 8, 909: 8, 912: 7, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1089: 5, 1096: 8, 1107: 5, 1108: 8, 1114: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1184: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1330: 8, 1339: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1379: 8, 1384: 8, 1394: 8, 1407: 8, 1419: 8, 1427: 6, 1446: 8, 1456: 4, 1460: 8, 1470: 8, 1485: 8, 1504: 3, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
}],
CAR.SONATA_HEV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 548: 8, 576: 8, 593: 8, 688: 6, 757: 2, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1102: 8, 1108: 8, 1114: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1184: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 8, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1330: 8, 1339: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1446: 8, 1448: 8, 1456: 4, 1460: 8, 1470: 8, 1476: 8, 1535: 8
}],
CAR.SONATA_LF: [{
66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1397: 8, 1407: 8, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 2000: 8, 2001: 8, 2004: 8, 2005: 8, 2008: 8, 2009: 8, 2012: 8, 2013: 8, 2014: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.SONATA_LF_HEV: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 593: 8, 688: 5, 881: 8, 882: 8, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1186: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1425: 2, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8, 2013: 8
}],
CAR.KONA: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 3, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1193: 8, 1265: 4,1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1378: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1990: 8, 1996: 8, 1998: 8, 2000: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2015: 8
}],
CAR.KONA_EV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 549: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1260: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1307: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
}],
CAR.KONA_HEV: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 547: 8, 548: 8, 549: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1138: 4, 1151: 6, 1155: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 8, 1379: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.IONIQ_EV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 524: 8, 544: 7, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8, 2013: 8, 2015: 8
}],
CAR.IONIQ_HEV: [{
68:8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 524: 8, 544: 8, 576:8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1473: 8, 1476: 8, 1507: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8, 2013: 8
}],
CAR.SANTA_FE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 764: 8, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8, 1990: 8, 1998: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
}],
CAR.SANTA_FE_HEV: [{
}],
CAR.PALISADE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 549: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 2000: 8, 2005: 8, 2008: 8
}],
CAR.VELOSTER: [{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 558: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1170: 8, 1181: 5, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1378: 4, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 1872: 8, 1988: 8, 1996: 8, 2000: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.GRANDEUR: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 547: 8, 549: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8
}],
CAR.GRANDEUR_HEV: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 546: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1180: 8, 1185: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1379: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.GRANDEUR20: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 516: 8, 524: 8, 528: 8, 532: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8
}],
CAR.GRANDEUR20_HEV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 576: 8, 593: 8, 688: 5, 764: 8, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.NEXO: [{
127: 8, 145: 8, 146: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 512: 6, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 908: 8, 909: 8, 912: 7, 916: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1174: 8, 1180: 8, 1183: 8, 1186: 2, 1191: 2, 1192: 8, 1193: 8, 1210: 8, 1219: 8, 1220: 8, 1222: 6, 1223: 8, 1224: 8, 1227: 8, 1230: 6, 1231: 6, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1297: 8, 1298: 8, 1305: 8, 1312: 8, 1315: 8, 1316: 8, 1322: 8, 1324: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1437: 8, 1456: 4, 1460: 8, 1470: 8, 1484: 8, 1507: 8, 1520: 8, 1535: 8
}],
CAR.FORTE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1078: 4, 1107: 5, 1136: 8, 1156: 8, 1170: 8, 1173: 8, 1191: 2, 1225: 8, 1265: 4, 1280: 4, 1287: 4, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1394: 8, 1407: 8, 1427: 6, 1456: 4, 1470: 8
}],
CAR.K5: [{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 625: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 909: 8, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1186: 2, 1191: 2, 1236: 2, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1268: 8, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1371: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1492: 8, 1530: 8, 1532: 5, 1905: 8, 1913: 8, 1952: 8, 1960: 8, 1988: 8, 1996: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.K5_HEV: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 6, 909: 8, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1236: 2, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1420: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.K5_DL3: [{
}],
CAR.K5_DL3_HEV: [{
}],
CAR.SPORTAGE: [{
67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 909: 8, 916: 8, 1040: 8, 1078: 4, 1170: 8, 1191: 2, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1492: 8, 1530: 8
}],
CAR.SORENTO: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1384: 8, 1407: 8, 1411: 8, 1419: 8, 1425: 2, 1427: 6, 1444: 8, 1456: 4, 1470: 8, 1489: 1
}],
CAR.MOHAVE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8
}],
CAR.STINGER: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8, 2015: 8
}],
CAR.NIRO_EV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1260: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 1988: 8, 1990: 8, 1998: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
}],
CAR.NIRO_HEV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1535: 8
}],
CAR.SOUL_EV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 548: 8, 549: 8, 593: 8, 688: 6, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 8, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8
}],
CAR.SELTOS: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 905: 8, 909: 8, 910: 5, 911: 5, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1114: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1446: 8, 1456: 4, 1470: 8, 1485: 8, 1911: 8
}],
CAR.K7: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1444: 8, 1456: 4, 1470: 8
}],
CAR.K7_HEV: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 549: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1096: 8, 1102: 8, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1210: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1379: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.K9: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8
}],
CAR.GENESIS: [{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 912: 7, 916: 8, 1024: 2, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1268: 8, 1280: 1, 1281: 3, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1342: 6, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1379: 8, 1384: 5, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1437: 8, 1456: 4
}],
CAR.GENESIS_G70: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
}],
CAR.GENESIS_G80: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1024: 2, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 8, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1437: 8, 1456: 4, 1470: 8
}],
CAR.GENESIS_G90: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1456: 4, 1470: 8, 1988: 8, 2000: 8, 2003: 8, 2004: 8, 2005: 8, 2008: 8, 2011: 8, 2012: 8, 2013: 8, 2015: 8
}],
}
ECU_FINGERPRINT = {
Ecu.fwdCamera: [832, 1156, 1191, 1342] }
FW_VERSIONS = {
CAR.ELANTRA_I30: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00PD__ SCC F-CUP 1.00 1.01 99110-G3100 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00PDP LKAS AT AUS RHD 1.00 1.01 99211-G4000 v60',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00PDu MDPS C 1.00 1.01 56310/G3690 4PDUC101',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00PD ESC \x11 100 \a\x03 58910-G3AC0',
],
(Ecu.engine, 0x7e0, None): [
b'\x01TPD-1A506F000H00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U2VA051\x00\x00\xf1\x006U2V0_C2\x00\x006U2VA051\x00\x00DPD0H16US0\x00\x00\x00\x00',
],
},
CAR.ELANTRA21: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00CN7_ SCC F-CUP 1.00 1.01 99110-AA000 ',
b'\xf1\x00CN7_ SCC FHCUP 1.00 1.01 99110-AA000 ',
b'\xf1\x8799110AA000\xf1\x00CN7_ SCC FHCUP 1.00 1.01 99110-AA000 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00CN7 MFC AT USA LHD 1.00 1.00 99210-AB000 200819'
b'\xf1\x00CN7 MFC AT USA LHD 1.00 1.03 99210-AA000 200819',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x87\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x00CN7 MDPS C 1.00 1.06 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 4CNDC106',
b'\xf1\x8756310/AA070\xf1\x00CN7 MDPS C 1.00 1.06 56310/AA070 4CNDC106',
b'\xf1\x8756310AA050\x00\xf1\x00CN7 MDPS C 1.00 1.06 56310AA050\x00 4CNDC106',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00CN ESC \t 101 \x10\x03 58910-AB800',
b'\xf1\x8758910-AA800\xf1\x00CN ESC \t 104 \x08\x03 58910-AA800',
b'\xf1\x8758910-AB800\xf1\x00CN ESC \t 101 \x10\x03 58910-AB800',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x82CNCWD0AMFCXCSFFA',
b'\xf1\x82CNCWD0AMFCXCSFFB',
b'\xf1\x82CNCVD0AMFCXCSFFB',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x00HT6WA280BLHT6VA640A1CCN0N20NS5\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x00HT6WA280BLHT6VA640A1CCN0N20NS5\x00\x00\x00\x00\x00\x00\xe8\xba\xce\xfa',
b'\xf1\x87CXMQFM2135005JB2E\xb9\x89\x98W\xa9y\x97h\xa9\x98\x99wxvwh\x87\177\xffx\xff\xff\xff,,\xf1\x89HT6VA640A1\xf1\x82CCN0N20NS5\x00\x00\x00\x00\x00\x00',
b'\xf1\x87CXMQFM1916035JB2\x88vvgg\x87Wuwgev\xa9\x98\x88\x98h\x99\x9f\xffh\xff\xff\xff\xa5\xee\xf1\x89HT6VA640A1\xf1\x82CCN0N20NS5\x00\x00\x00\x00\x00\x00',
b'\xf1\x87CXLQF40189012JL2f\x88\x86\x88\x88vUex\xb8\x88\x88\x88\x87\x88\x89fh?\xffz\xff\xff\xff\x08z\xf1\x89HT6VA640A1\xf1\x82CCN0N20NS5\x00\x00\x00\x00\x00\x00',
],
},
CAR.ELANTRA21_HEV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\000CNhe SCC FHCUP 1.00 1.01 99110-BY000 ',
b'\xf1\x8799110BY000\xf1\x00CNhe SCC FHCUP 1.00 1.01 99110-BY000 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\000CN7HMFC AT USA LHD 1.00 1.03 99210-AA000 200819'
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x8756310/BY050\xf1\000CN7 MDPS C 1.00 1.02 56310/BY050 4CNHC102'
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6G5051\000\000\000\000\000\000\000\000'
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\0006U3L0_C2\000\0006U3K3051\000\000HCN0G16NS0\xb9?A\xaa',
b'\xf1\0006U3L0_C2\000\0006U3K3051\000\000HCN0G16NS0\000\000\000\000',
b'\xf1\x816U3K3051\000\000\xf1\0006U3L0_C2\000\0006U3K3051\000\000HCN0G16NS0\xb9?A\xaa',
b'\xf1\x816U3K3051\000\000\xf1\0006U3L0_C2\000\0006U3K3051\000\000HCN0G16NS0\000\000\000\000'
],
},
CAR.SONATA: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00DN8 1.00 99110-L0000 \xaa\xaa\xaa\xaa\xaa\xaa\xaa ',
b'\xf1\x00DN8 1.00 99110-L0000 \xaa\xaa\xaa\xaa\xaa\xaa\xaa\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x00DN8_ SCC F-CU- 1.00 1.00 99110-L0000 ',
b'\xf1\x00DN8_ SCC F-CUP 1.00 1.00 99110-L0000 ',
b'\xf1\x00DN8_ SCC F-CUP 1.00 1.02 99110-L1000 ',
b'\xf1\x00DN8_ SCC FHCUP 1.00 1.00 99110-L0000 ',
b'\xf1\x00DN8_ SCC FHCUP 1.00 1.01 99110-L1000 ',
b'\xf1\x00DN89110-L0000 \xaa\xaa\xaa\xaa\xaa\xaa\xaa ',
b'\xf1\x8799110L0000\xf1\x00DN8_ SCC F-CUP 1.00 1.00 99110-L0000 ',
b'\xf1\x8799110L0000\xf1\x00DN8_ SCC FHCUP 1.00 1.00 99110-L0000 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00DN8 MFC AT KOR LHD 1.00 1.02 99211-L1000 190422',
b'\xf1\x00DN8 MFC AT RUS LHD 1.00 1.03 99211-L1000 190705',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.00 99211-L0000 190716',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.01 99211-L0000 191016',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.03 99211-L0000 210603',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.05 99211-L1000 201109',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.06 99211-L1000 210325',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00DN8 MDPS C 1.00 1.01 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 4DNAC101',
b'\xf1\x00DN8 MDPS C 1.00 1.01 56310-L0010 4DNAC101',
b'\xf1\x00DN8 MDPS C 1.00 1.01 56310L0010\x00 4DNAC101',
b'\xf1\x00DN8 MDPS R 1.00 1.00 57700-L0000 4DNAP100',
b'\xf1\x87\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x00DN8 MDPS C 1.00 1.01 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 4DNAC101',
b'\xf1\x8756310-L0010\xf1\x00DN8 MDPS C 1.00 1.01 56310-L0010 4DNAC101',
b'\xf1\x8756310-L0210\xf1\x00DN8 MDPS C 1.00 1.01 56310-L0210 4DNAC101',
b'\xf1\x8756310-L1010\xf1\x00DN8 MDPS C 1.00 1.03 56310-L1010 4DNDC103',
b'\xf1\x8756310-L1030\xf1\x00DN8 MDPS C 1.00 1.03 56310-L1030 4DNDC103',
b'\xf1\x8756310L0010\x00\xf1\x00DN8 MDPS C 1.00 1.01 56310L0010\x00 4DNAC101',
b'\xf1\x8756310L0210\x00\xf1\x00DN8 MDPS C 1.00 1.01 56310L0210\x00 4DNAC101',
b'\xf1\x8757700-L0000\xf1\x00DN8 MDPS R 1.00 1.00 57700-L0000 4DNAP100',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00DN ESC \a 106 \a\x01 58910-L0100',
b'\xf1\x00DN ESC \x01 102\x19\x04\x13 58910-L1300',
b'\xf1\x00DN ESC \x03 100 \x08\x01 58910-L0300',
b'\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100',
b'\xf1\x00DN ESC \x07 104\x19\x08\x01 58910-L0100',
b'\xf1\x00DN ESC \x08 103\x19\x06\x01 58910-L1300',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \a 106 \a\x01 58910-L0100',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x06 106 \x07\x01 58910-L0100',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x07 104\x19\x08\x01 58910-L0100',
b'\xf1\x8758910-L0300\xf1\x00DN ESC \x03 100 \x08\x01 58910-L0300',
b'\xf1\x00DN ESC \x06 106 \x07\x01 58910-L0100',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81HM6M1_0a0_F00',
b'\xf1\x82DNBVN5GMCCXXXDCA',
b'\xf1\x82DNBVN5GMCCXXXG2F',
b'\xf1\x82DNBWN5TMDCXXXG2E',
b'\xf1\x82DNCVN5GMCCXXXF0A',
b'\xf1\x82DNCVN5GMCCXXXG2B',
b'\xf1\x870\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x82DNDWN5TMDCXXXJ1A',
b'\xf1\x87391162M003',
b'\xf1\x87391162M013',
b'\xf1\x87391162M023',
b'HM6M1_0a0_F00',
b'HM6M1_0a0_G20',
b'HM6M2_0a0_BD0',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB1\xe3\xc10\xa1',
b'\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
b'\xf1\x00HT6TA260BLHT6TA800A1TDN8C20KS4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x00HT6TA260BLHT6TA810A1TDN8M25GS0\x00\x00\x00\x00\x00\x00\xaa\x8c\xd9p',
b'\xf1\x00HT6WA250BLHT6WA910A1SDN8G25NB1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x00HT6WA250BLHT6WA910A1SDN8G25NB1\x00\x00\x00\x00\x00\x00\x96\xa1\xf1\x92',
b'\xf1\x00HT6WA280BLHT6WAD10A1SDN8G25NB2\x00\x00\x00\x00\x00\x00\x08\xc9O:',
b'\xf1\x00T02601BL T02730A1 VDN8T25XXX730NS5\xf7_\x92\xf5',
b'\xf1\x87954A02N060\x00\x00\x00\x00\x00\xf1\x81T02730A1 \xf1\x00T02601BL T02730A1 VDN8T25XXX730NS5\xf7_\x92\xf5',
b'\xf1\x87SAKFBA2926554GJ2VefVww\x87xwwwww\x88\x87xww\x87wTo\xfb\xffvUo\xff\x8d\x16\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SAKFBA3030524GJ2UVugww\x97yx\x88\x87\x88vw\x87gww\x87wto\xf9\xfffUo\xff\xa2\x0c\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SAKFBA3356084GJ2\x86fvgUUuWgw\x86www\x87wffvf\xb6\xcf\xfc\xffeUO\xff\x12\x19\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SAKFBA3474944GJ2ffvgwwwwg\x88\x86x\x88\x88\x98\x88ffvfeo\xfa\xff\x86fo\xff\t\xae\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SAKFBA3475714GJ2Vfvgvg\x96yx\x88\x97\x88ww\x87ww\x88\x87xs_\xfb\xffvUO\xff\x0f\xff\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALDBA3510954GJ3ww\x87xUUuWx\x88\x87\x88\x87w\x88wvfwfc_\xf9\xff\x98wO\xffl\xe0\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3573534GJ3\x89\x98\x89\x88EUuWgwvwwwwww\x88\x87xTo\xfa\xff\x86f\x7f\xffo\x0e\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3601464GJ3\x88\x88\x88\x88ffvggwvwvw\x87gww\x87wvo\xfb\xff\x98\x88\x7f\xffjJ\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3753044GJ3UUeVff\x86hwwwwvwwgvfgfvo\xf9\xfffU_\xffC\xae\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3862294GJ3vfvgvefVxw\x87\x87w\x88\x87xwwwwc_\xf9\xff\x87w\x9f\xff\xd5\xdc\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3873834GJ3fefVwuwWx\x88\x97\x88w\x88\x97xww\x87wU_\xfb\xff\x86f\x8f\xffN\x04\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA4525334GJ3\x89\x99\x99\x99fevWh\x88\x86\x88fwvgw\x88\x87xfo\xfa\xffuDo\xff\xd1>\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA4626804GJ3wwww\x88\x87\x88xx\x88\x87\x88wwgw\x88\x88\x98\x88\x95_\xf9\xffuDo\xff|\xe7\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA4803224GJ3wwwwwvwg\x88\x88\x98\x88wwww\x87\x88\x88xu\x9f\xfc\xff\x87f\x8f\xff\xea\xea\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA6212564GJ3\x87wwwUTuGg\x88\x86xx\x88\x87\x88\x87\x88\x98xu?\xf9\xff\x97f\x7f\xff\xb8\n\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA6347404GJ3wwwwff\x86hx\x88\x97\x88\x88\x88\x88\x88vfgf\x88?\xfc\xff\x86Uo\xff\xec/\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA6901634GJ3UUuWVeVUww\x87wwwwwvUge\x86/\xfb\xff\xbb\x99\x7f\xff]2\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA7077724GJ3\x98\x88\x88\x88ww\x97ygwvwww\x87ww\x88\x87x\x87_\xfd\xff\xba\x99o\xff\x99\x01\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALFBA3525114GJ2wvwgvfvggw\x86wffvffw\x86g\x85_\xf9\xff\xa8wo\xffv\xcd\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA3624024GJ2\x88\x88\x88\x88wv\x87hx\x88\x97\x88x\x88\x97\x88ww\x87w\x86o\xfa\xffvU\x7f\xff\xd1\xec\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA3960824GJ2wwwwff\x86hffvfffffvfwfg_\xf9\xff\xa9\x88\x8f\xffb\x99\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA4011074GJ2fgvwwv\x87hw\x88\x87xww\x87wwfgvu_\xfa\xffefo\xff\x87\xc0\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA4121304GJ2x\x87xwff\x86hwwwwww\x87wwwww\x84_\xfc\xff\x98\x88\x9f\xffi\xa6\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA4195874GJ2EVugvf\x86hgwvwww\x87wgw\x86wc_\xfb\xff\x98\x88\x8f\xff\xe23\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA4625294GJ2eVefeUeVx\x88\x97\x88wwwwwwww\xa7o\xfb\xffvw\x9f\xff\xee.\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA4728774GJ2vfvg\x87vwgww\x87ww\x88\x97xww\x87w\x86_\xfb\xffeD?\xffk0\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA5129064GJ2vfvgwv\x87hx\x88\x87\x88ww\x87www\x87wd_\xfa\xffvfo\xff\x1d\x00\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA5454914GJ2\x98\x88\x88\x88\x87vwgx\x88\x87\x88xww\x87ffvf\xa7\x7f\xf9\xff\xa8w\x7f\xff\x1b\x90\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA5987784GJ2UVugDDtGx\x88\x87\x88w\x88\x87xwwwwd/\xfb\xff\x97fO\xff\xb0h\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA5987864GJ2fgvwUUuWgwvw\x87wxwwwww\x84/\xfc\xff\x97w\x7f\xff\xdf\x1d\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA6337644GJ2vgvwwv\x87hgffvwwwwwwww\x85O\xfa\xff\xa7w\x7f\xff\xc5\xfc\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA6802004GJ2UUuWUUuWgw\x86www\x87www\x87w\x96?\xf9\xff\xa9\x88\x7f\xff\x9fK\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA6892284GJ233S5\x87w\x87xx\x88\x87\x88vwwgww\x87w\x84?\xfb\xff\x98\x88\x8f\xff*\x9e\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SALFBA7005534GJ2eUuWfg\x86xxww\x87x\x88\x87\x88\x88w\x88\x87\x87O\xfc\xffuUO\xff\xa3k\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB1\xe3\xc10\xa1',
b'\xf1\x87SALFBA7152454GJ2gvwgFf\x86hx\x88\x87\x88vfWfffffd?\xfa\xff\xba\x88o\xff,\xcf\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB1\xe3\xc10\xa1',
b'\xf1\x87SALFBA7485034GJ2ww\x87xww\x87xfwvgwwwwvfgf\xa5/\xfc\xff\xa9w_\xff40\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
b'\xf1\x87SAMDBA7743924GJ3wwwwww\x87xgwvw\x88\x88\x88\x88wwww\x85_\xfa\xff\x86f\x7f\xff0\x9d\xf1\x89HT6WAD10A1\xf1\x82SDN8G25NB2\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SAMDBA7817334GJ3Vgvwvfvgww\x87wwwwwwfgv\x97O\xfd\xff\x88\x88o\xff\x8e\xeb\xf1\x89HT6WAD10A1\xf1\x82SDN8G25NB2\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SAMDBA8054504GJ3gw\x87xffvgffffwwwweUVUf?\xfc\xffvU_\xff\xddl\xf1\x89HT6WAD10A1\xf1\x82SDN8G25NB2\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SAMFB41553621GC7ww\x87xUU\x85Xvwwg\x88\x88\x88\x88wwgw\x86\xaf\xfb\xffuDo\xff\xaa\x8f\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
b'\xf1\x87SAMFB42555421GC7\x88\x88\x88\x88wvwgx\x88\x87\x88wwgw\x87wxw3\x8f\xfc\xff\x98f\x8f\xffga\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
b'\xf1\x87SAMFBA7978674GJ2gw\x87xgw\x97ywwwwvUGeUUeU\x87O\xfb\xff\x98w\x8f\xfffF\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
b'\xf1\x87SAMFBA9283024GJ2wwwwEUuWwwgwwwwwwwww\x87/\xfb\xff\x98w\x8f\xff<\xd3\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
b'\xf1\x87SAMFBA9708354GJ2wwwwVf\x86h\x88wx\x87xww\x87\x88\x88\x88\x88w/\xfa\xff\x97w\x8f\xff\x86\xa0\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
],
},
CAR.SONATA_HEV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\000DNhe SCC FHCUP 1.00 1.02 99110-L5000 ',
b'\xf1\x8799110L5000\xf1\000DNhe SCC FHCUP 1.00 1.02 99110-L5000 ',
b'\xf1\000DNhe SCC F-CUP 1.00 1.02 99110-L5000 ',
b'\xf1\x8799110L5000\xf1\000DNhe SCC F-CUP 1.00 1.02 99110-L5000 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\000DN8HMFC AT USA LHD 1.00 1.04 99211-L1000 191016',
b'\xf1\x00DN8HMFC AT USA LHD 1.00 1.05 99211-L1000 201109',
b'\xf1\000DN8HMFC AT USA LHD 1.00 1.06 99211-L1000 210325',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x8756310-L5500\xf1\000DN8 MDPS C 1.00 1.02 56310-L5500 4DNHC102',
b'\xf1\x8756310-L5450\xf1\x00DN8 MDPS C 1.00 1.02 56310-L5450 4DNHC102',
b'\xf1\x8756310-L5450\xf1\000DN8 MDPS C 1.00 1.03 56310-L5450 4DNHC103',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100\xf1\xa01.04',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x87391062J002\xf1\xa0000P',
b'\xf1\x87391162J012',
b'\xf1\x87391162J013',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\000PSBG2333 E14\x00\x00\x00\x00\x00\x00\x00TDN2H20SA6N\xc2\xeeW',
b'\xf1\x87959102T250\000\000\000\000\000\xf1\x81E09\000\000\000\000\000\000\000\xf1\000PSBG2323 E09\000\000\000\000\000\000\000TDN2H20SA5\x97R\x88\x9e',
b'\xf1\000PSBG2323 E09\000\000\000\000\000\000\000TDN2H20SA5\x97R\x88\x9e',
b'\xf1\000PSBG2333 E16\000\000\000\000\000\000\000TDN2H20SA7\0323\xf9\xab',
b'\xf1\x87PCU\000\000\000\000\000\000\000\000\000\xf1\x81E16\000\000\000\000\000\000\000\xf1\000PSBG2333 E16\000\000\000\000\000\000\000TDN2H20SA7\0323\xf9\xab',
b'\xf1\x87959102T250\x00\x00\x00\x00\x00\xf1\x81E14\x00\x00\x00\x00\x00\x00\x00\xf1\x00PSBG2333 E14\x00\x00\x00\x00\x00\x00\x00TDN2H20SA6N\xc2\xeeW',
],
},
CAR.SONATA_LF: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00LF__ SCC F-CUP 1.00 1.00 96401-C2200 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00LFF LKAS AT USA LHD 1.00 1.01 95740-C1000 E51',
b'\xf1\x00LFF LKAS AT USA LHD 1.01 1.02 95740-C1000 E52',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00LF ESC \f 11 \x17\x01\x13 58920-C2610',
b'\xf1\x00LF ESC \t 11 \x17\x01\x13 58920-C2610',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81606D5051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606D5K51\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606G1051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\xb0\x9f\xee\xf5',
b'\xf1\x87\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf1\x816T6B4051\x00\x00\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\x00\x00\x00\x00',
b'\xf1\x87\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf1\x816T6B4051\x00\x00\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\xb0\x9f\xee\xf5',
b'\xf1\x87\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf1\x816T6B4051\x00\x00\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24SL2n\x8d\xbe\xd8',
b'\xf1\x87LAHSGN012918KF10\x98\x88x\x87\x88\x88x\x87\x88\x88\x98\x88\x87w\x88w\x88\x88\x98\x886o\xf6\xff\x98w\x7f\xff3\x00\xf1\x816W3B1051\x00\x00\xf1\x006W351_C2\x00\x006W3B1051\x00\x00TLF0T20NL2\x00\x00\x00\x00',
b'\xf1\x87LAHSGN012918KF10\x98\x88x\x87\x88\x88x\x87\x88\x88\x98\x88\x87w\x88w\x88\x88\x98\x886o\xf6\xff\x98w\x7f\xff3\x00\xf1\x816W3B1051\x00\x00\xf1\x006W351_C2\x00\x006W3B1051\x00\x00TLF0T20NL2H\r\xbdm',
],
},
CAR.KONA: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00OS__ SCC F-CUP 1.00 1.00 95655-J9200 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00OS9 LKAS AT USA LHD 1.00 1.00 95740-J9300 g21',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00OS MDPS C 1.00 1.05 56310J9030\x00 4OSDC105',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x816V5RAK00018.ELF\xf1\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'"\x01TOS-0NU06F301J02',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U2VE051\x00\x00\xf1\x006U2V0_C2\x00\x006U2VE051\x00\x00DOS4T16NS3\x00\x00\x00\x00',
],
},
CAR.KONA_EV: {
(Ecu.fwdRadar, 0x7D0, None): [
b'\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4000 ',
b'\xf1\x00OSev SCC F-CUP 1.00 1.00 99110-K4000 ',
b'\xf1\x00OSev SCC F-CUP 1.00 1.00 99110-K4100 ',
b'\xf1\x00OSev SCC F-CUP 1.00 1.01 99110-K4000 ',
b'\xf1\x00OSev SCC FNCUP 1.00 1.01 99110-K4000 ',
b'\xf1\x00DEev SCC F-CUP 1.00 1.03 96400-Q4100 ',
b'\xf1\x8799110Q4000\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4000 ',
b'\xf1\x8799110Q4100\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4100 ',
b'\xf1\x8799110Q4500\xf1\000DEev SCC F-CUP 1.00 1.00 99110-Q4500 ',
],
(Ecu.fwdCamera, 0x7C4, None): [
b'\xf1\x00DEE MFC AT USA LHD 1.00 1.03 95740-Q4000 180821',
b'\xf1\x00DEE MFC AT EUR LHD 1.00 1.00 99211-Q4000 191211',
b'\xf1\x00DEE MFC AT USA LHD 1.00 1.00 99211-Q4000 191211',
b'\xf1\000DEE MFC AT EUR LHD 1.00 1.00 99211-Q4100 200706',
b'\xf1\x00OSE LKAS AT EUR LHD 1.00 1.00 95740-K4100 W40',
b'\xf1\x00OSE LKAS AT EUR RHD 1.00 1.00 95740-K4100 W40',
b'\xf1\x00OSE LKAS AT KOR LHD 1.00 1.00 95740-K4100 W40',
b'\xf1\x00OE2 LKAS AT EUR LHD 1.00 1.00 95740-K4200 200',
b'\xf1\x00OSE LKAS AT USA LHD 1.00 1.00 95740-K4300 W50',
],
(Ecu.eps, 0x7D4, None): [
b'\xf1\x00OS MDPS C 1.00 1.03 56310/K4550 4OEDC103',
b'\xf1\x00OS MDPS C 1.00 1.04 56310K4000\x00 4OEDC104',
b'\xf1\x00OS MDPS C 1.00 1.04 56310K4050\x00 4OEDC104',
b'\xf1\x00DE MDPS C 1.00 1.05 56310Q4000\x00 4DEEC105',
b'\xf1\x00DE MDPS C 1.00 1.05 56310Q4100\x00 4DEEC105',
],
(Ecu.esp, 0x7D1, None): [
b'\xf1\x00OS IEB \r 105\x18\t\x18 58520-K4000',
b'\xf1\x00OS IEB \x01 212 \x11\x13 58520-K4000',
b'\xf1\x00OS IEB \x02 212 \x11\x13 58520-K4000',
b'\xf1\x00OS IEB \x03 210 \x02\x14 58520-K4000',
b'\xf1\x00OS IEB \x03 212 \x11\x13 58520-K4000',
],
},
CAR.KONA_HEV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00OShe SCC FNCUP 1.00 1.01 99110-CM000 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00OSH LKAS AT KOR LHD 1.00 1.01 95740-CM000 l31',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00OS MDPS C 1.00 1.00 56310CM030\x00 4OHDC100',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00OS IEB \x01 104 \x11 58520-CM000',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6F6051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HOS0G16DS1\x16\xc7\xb0\xd9',
],
},
CAR.IONIQ_EV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00AEev SCC F-CUP 1.00 1.00 96400-G7000 ',
b'\xf1\x00AEev SCC F-CUP 1.00 1.00 96400-G7100 ',
b'\xf1\x00AEev SCC F-CUP 1.00 1.01 99110-G7000 ',
b'\xf1\x00AEev SCC F-CUP 1.00 1.00 99110-G7200 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00AEE MFC AT EUR LHD 1.00 1.00 95740-G7200 160418',
b'\xf1\x00AEE MFC AT USA LHD 1.00 1.00 95740-G2400 180222',
b'\xf1\x00AEE MFC AT EUR LHD 1.00 1.03 95740-G2500 190516',
b'\xf1\x00AEE MFC AT EUR RHD 1.00 1.01 95740-G2600 190819',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00AE MDPS C 1.00 1.02 56310G7300\x00 4AEEC102',
b'\xf1\x00AE MDPS C 1.00 1.04 56310/G7501 4AEEC104',
b'\xf1\x00AE MDPS C 1.00 1.01 56310/G7310 4APEC101',
b'\xf1\x00AE MDPS C 1.00 1.01 56310/G7560 4APEC101',
],
},
CAR.IONIQ_HEV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\000AEhe SCC F-CUP 1.00 1.02 99110-G2100 ',
b'\xf1\x00AEhe SCC F-CUP 1.00 1.00 99110-G2200 ',
b'\xf1\x00AEhe SCC H-CUP 1.01 1.01 96400-G2000 ',
b'\xf1\x00AEhe SCC F-CUP 1.00 1.00 99110-G2600 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00AEH MFC AT EUR LHD 1.00 1.01 95740-G2600 190819',
b'\xf1\x00AEH MFC AT EUR LHD 1.00 1.00 95740-G2400 180222',
b'\xf1\000AEP MFC AT USA LHD 1.00 1.01 95740-G2600 190819',
b'\xf1\x00AEH MFC AT USA LHD 1.00 1.00 95740-G2700 201027',
],
(Ecu.eps, 0x7D4, None): [
b'\xf1\x00AE MDPS C 1.00 1.07 56310/G2301 4AEHC107',
b'\xf1\x00AE MDPS C 1.00 1.01 56310/G2310 4APHC101',
b'\xf1\000AE MDPS C 1.00 1.01 56310/G2510 4APHC101',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6F6051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x816H6F2051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x816H6F6051\000\000\000\000\000\000\000\000',
b'\xf1\x816H6G5051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U3J8051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J8051\x00\x00HAE0G16UL0Nd\xed:',
b'\xf1\x816U3H1051\x00\x00\xf1\x006U3H0_C2\x00\x006U3H1051\x00\x00HAE0G16US2\x95\xa2^$',
b'\xf1\x816U3J9051\000\000\xf1\0006U3H1_C2\000\0006U3J9051\000\000PAE0G16NL0\x82zT\xd2',
b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HAE0G16NL2\x00\x00\x00\x00',
],
},
CAR.SANTA_FE: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00TM__ SCC F-CUP 1.00 1.01 99110-S2000 ',
b'\xf1\x00TM__ SCC F-CUP 1.00 1.02 99110-S2000 ',
b'\xf1\x00TM__ SCC F-CUP 1.00 1.03 99110-S2000 ',
b'\xf1\x00TM__ SCC F-CUP 1.00 1.00 99110-S1500 ',
b'\xf1\x8799110S1500\xf1\x00TM__ SCC F-CUP 1.00 1.00 99110-S1500 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00TM MFC AT USA LHD 1.00 1.00 99211-S2000 180409',
b'\xf1\x00TMA MFC AT MEX LHD 1.00 1.01 99211-S2500 210205',
b'\xf1\x00TMA MFC AT USA LHD 1.00 1.00 99211-S2500 200720',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00TM MDPS C 1.00 1.00 56340-S2000 8409',
b'\xf1\x00TM MDPS C 1.00 1.00 56340-S2000 8A12',
b'\xf1\x00TM MDPS C 1.00 1.01 56340-S2000 9129',
b'\xf1\x00TM MDPS C 1.00 1.02 56370-S2AA0 0B19',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00TM ESC \r 100\x18\x031 58910-S2650',
b'\xf1\x00TM ESC \r 103\x18\x11\x08 58910-S2650',
b'\xf1\x00TM ESC \r 104\x19\a\b 58910-S2650',
b'\xf1\x00TM ESC \x02 100\x18\x030 58910-S2600',
b'\xf1\x00TM ESC \x02 102\x18\x07\x01 58910-S2600',
b'\xf1\x00TM ESC \x02 103\x18\x11\x07 58910-S2600',
b'\xf1\x00TM ESC \x02 104\x19\x07\x07 58910-S2600',
b'\xf1\x00TM ESC \x03 103\x18\x11\x07 58910-S2600',
b'\xf1\x00TM ESC \x0c 103\x18\x11\x08 58910-S2650',
b'\xf1\x00TM ESC \x02 101 \x08\x04 58910-S2GA0',
b'\xf1\x00TM ESC \x03 101 \x08\x02 58910-S2DA0',
b'\xf1\x8758910-S2DA0\xf1\x00TM ESC \x03 101 \x08\x02 58910-S2DA0',
b'\xf1\x8758910-S2GA0\xf1\x00TM ESC \x02 101 \x08\x04 58910-S2GA0',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81606EA051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606G1051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606G3051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x82TMBZN5TMD3XXXG2E',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87LBJSGA7082574HG0\x87www\x98\x88\x88\x88\x99\xaa\xb9\x9afw\x86gx\x99\xa7\x89co\xf8\xffvU_\xffR\xaf\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2T20NS1\x00\xa6\xe0\x91',
b'\xf1\x87LBKSGA0458404HG0vfvg\x87www\x89\x99\xa8\x99y\xaa\xa7\x9ax\x88\xa7\x88t_\xf9\xff\x86w\x8f\xff\x15x\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2T20NS1\x00\x00\x00\x00',
b'\xf1\x87LDJUEA6010814HG1\x87w\x87x\x86gvw\x88\x88\x98\x88gw\x86wx\x88\x97\x88\x85o\xf8\xff\x86f_\xff\xd37\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4T20NS0\xf8\x19\x92g',
b'\xf1\x87LDJUEA6458264HG1ww\x87x\x97x\x87\x88\x88\x99\x98\x89g\x88\x86xw\x88\x97x\x86o\xf7\xffvw\x8f\xff3\x9a\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4T20NS0\xf8\x19\x92g',
b'\xf1\x87LDKUEA2045844HG1wwww\x98\x88x\x87\x88\x88\xa8\x88x\x99\x97\x89x\x88\xa7\x88U\x7f\xf8\xffvfO\xffC\x1e\xf1\x816W3E0051\x00\x00\xf1\x006W351_C2\x00\x006W3E0051\x00\x00TTM4T20NS3\x00\x00\x00\x00',
b'\xf1\x87LDKUEA9993304HG1\x87www\x97x\x87\x88\x99\x99\xa9\x99x\x99\xa7\x89w\x88\x97x\x86_\xf7\xffwwO\xffl#\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4T20NS1R\x7f\x90\n',
b'\xf1\x87LDLUEA6061564HG1\xa9\x99\x89\x98\x87wwwx\x88\x97\x88x\x99\xa7\x89x\x99\xa7\x89sO\xf9\xffvU_\xff<\xde\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS50\xcb\xc3\xed',
b'\xf1\x87LDLUEA6159884HG1\x88\x87hv\x99\x99y\x97\x89\xaa\xb8\x9ax\x99\x87\x89y\x99\xb7\x99\xa7?\xf7\xff\x97wo\xff\xf3\x05\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS5\x00\x00\x00\x00',
b'\xf1\x87LDLUEA6852664HG1\x97wWu\x97www\x89\xaa\xc8\x9ax\x99\x97\x89x\x99\xa7\x89SO\xf7\xff\xa8\x88\x7f\xff\x03z\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS50\xcb\xc3\xed',
b'\xf1\x87LDLUEA6898374HG1fevW\x87wwwx\x88\x97\x88h\x88\x96\x88x\x88\xa7\x88ao\xf9\xff\x98\x99\x7f\xffD\xe2\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS5\x00\x00\x00\x00',
b'\xf1\x87LDLUEA6898374HG1fevW\x87wwwx\x88\x97\x88h\x88\x96\x88x\x88\xa7\x88ao\xf9\xff\x98\x99\x7f\xffD\xe2\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS50\xcb\xc3\xed',
b'\xf1\x87SBJWAA5842214GG0\x88\x87\x88xww\x87x\x89\x99\xa8\x99\x88\x99\x98\x89w\x88\x87xw_\xfa\xfffU_\xff\xd1\x8d\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x98{|\xe3',
b'\xf1\x87SBJWAA5890864GG0\xa9\x99\x89\x98\x98\x87\x98y\x89\x99\xa8\x99w\x88\x87xww\x87wvo\xfb\xffuD_\xff\x9f\xb5\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x98{|\xe3',
b'\xf1\x87SBJWAA6562474GG0ffvgeTeFx\x88\x97\x88ww\x87www\x87w\x84o\xfa\xff\x87fO\xff\xc2 \xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x00\x00\x00\x00',
b'\xf1\x87SBJWAA6562474GG0ffvgeTeFx\x88\x97\x88ww\x87www\x87w\x84o\xfa\xff\x87fO\xff\xc2 \xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x98{|\xe3',
b'\xf1\x87SBJWAA7780564GG0wvwgUUeVwwwwx\x88\x87\x88wwwwd_\xfc\xff\x86f\x7f\xff\xd7*\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS2F\x84<\xc0',
b'\xf1\x87SBJWAA8278284GG0ffvgUU\x85Xx\x88\x87\x88x\x88w\x88ww\x87w\x96o\xfd\xff\xa7U_\xff\xf2\xa0\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS2F\x84<\xc0',
b'\xf1\x87SBLWAA4363244GG0wvwgwv\x87hgw\x86ww\x88\x87xww\x87wdo\xfb\xff\x86f\x7f\xff3$\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS6\x00\x00\x00\x00',
b'\xf1\x87SBLWAA4363244GG0wvwgwv\x87hgw\x86ww\x88\x87xww\x87wdo\xfb\xff\x86f\x7f\xff3$\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS6x0\x17\xfe',
b'\xf1\x87SBLWAA4899564GG0VfvgUU\x85Xx\x88\x87\x88vfgf\x87wxwvO\xfb\xff\x97f\xb1\xffSB\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS7\x00\x00\x00\x00',
b'\xf1\x87SBLWAA6622844GG0wwwwff\x86hwwwwx\x88\x87\x88\x88\x88\x88\x88\x98?\xfd\xff\xa9\x88\x7f\xffn\xe5\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS7u\x1e{\x1c',
b'\xf1\x87SDJXAA7656854GG1DEtWUU\x85X\x88\x88\x98\x88w\x88\x87xx\x88\x87\x88\x96o\xfb\xff\x86f\x7f\xff.\xca\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4G24NS2\x00\x00\x00\x00',
b'\xf1\x87SDJXAA7656854GG1DEtWUU\x85X\x88\x88\x98\x88w\x88\x87xx\x88\x87\x88\x96o\xfb\xff\x86f\x7f\xff.\xca\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4G24NS2K\xdaV0',
b'\xf1\x87SDKXAA2443414GG1vfvgwv\x87h\x88\x88\x88\x88ww\x87wwwww\x99_\xfc\xffvD?\xffl\xd2\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4G24NS6\x00\x00\x00\x00',
b'\xf1\x00T02601BL T02730A1 VTMPT25XXX730NS2\xa6\x06\x88\xf7',
b'\xf1\x87SDMXCA8653204GN1EVugEUuWwwwwww\x87wwwwwv/\xfb\xff\xa8\x88\x9f\xff\xa5\x9c\xf1\x89HT6WAD00A1\xf1\x82STM4G25NH1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87954A02N250\x00\x00\x00\x00\x00\xf1\x81T02730A1 \xf1\x00T02601BL T02730A1 VTMPT25XXX730NS2\xa6\x06\x88\xf7',
],
},
CAR.SANTA_FE_HEV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x8799110CL500\xf1\x00TMhe SCC FHCUP 1.00 1.00 99110-CL500 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00TMH MFC AT USA LHD 1.00 1.03 99211-S1500 210224',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00TM MDPS C 1.00 1.02 56310-CLAC0 4TSHC102',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x87391312MTC1',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87959102T250\x00\x00\x00\x00\x00\xf1\x81E14\x00\x00\x00\x00\x00\x00\x00\xf1\x00PSBG2333 E14\x00\x00\x00\x00\x00\x00\x00TTM2H16SA2\x80\xd7l\xb2',
],
},
CAR.PALISADE: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\000LX2_ SCC F-CUP 1.00 1.05 99110-S8100 ',
b'\xf1\x00LX2 SCC FHCUP 1.00 1.04 99110-S8100 ',
b'\xf1\x00LX2_ SCC FHCU- 1.00 1.05 99110-S8100 ',
b'\xf1\x00LX2_ SCC FHCUP 1.00 1.00 99110-S8110 ',
b'\xf1\x00LX2_ SCC FHCUP 1.00 1.04 99110-S8100 ',
b'\xf1\x00LX2_ SCC FHCUP 1.00 1.05 99110-S8100 ',
b'\xf1\x00ON__ FCA FHCUP 1.00 1.02 99110-S9100 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.03 99211-S8100 190125',
b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.05 99211-S8100 190909',
b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.07 99211-S8100 200422',
b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.08 99211-S8100 200903',
b'\xf1\x00ON MFC AT USA LHD 1.00 1.01 99211-S9100 181105',
b'\xf1\x00ON MFC AT USA LHD 1.00 1.03 99211-S9100 200720',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00LX2 MDPS C 1,00 1,03 56310-S8020 4LXDC103', # modified firmware
b'\xf1\x00LX2 MDPS C 1.00 1.03 56310-S8020 4LXDC103',
b'\xf1\x00LX2 MDPS C 1.00 1.04 56310-S8020 4LXDC104',
b'\xf1\x00ON MDPS C 1.00 1.00 56340-S9000 8B13',
b'\xf1\x00ON MDPS C 1.00 1.01 56340-S9000 9201',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00LX ESC \x01 103\x19\t\x10 58910-S8360',
b'\xf1\x00LX ESC \x01 103\x31\t\020 58910-S8360',
b'\xf1\x00LX ESC \x0b 101\x19\x03\x17 58910-S8330',
b'\xf1\x00LX ESC \x0b 102\x19\x05\x07 58910-S8330',
b'\xf1\x00LX ESC \x0b 103\x19\t\x07 58910-S8330',
b'\xf1\x00LX ESC \x0b 103\x19\t\x10 58910-S8360',
b'\xf1\x00LX ESC \x0b 104 \x10\x16 58910-S8360',
b'\xf1\x00ON ESC \x0b 100\x18\x12\x18 58910-S9360',
b'\xf1\x00ON ESC \x0b 101\x19\t\x08 58910-S9360',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81640J0051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81640K0051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81640S1051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x00bcsh8p54 U872\x00\x00\x00\x00\x00\x00TON4G38NB1\x96z28',
b'\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00TON4G38NB2[v\\\xb6',
b'\xf1\x87LBLUFN591307KF25vgvw\x97wwwy\x99\xa7\x99\x99\xaa\xa9\x9af\x88\x96h\x95o\xf7\xff\x99f/\xff\xe4c\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB2\xd7\xc1/\xd1',
b'\xf1\x87LBLUFN650868KF36\xa9\x98\x89\x88\xa8\x88\x88\x88h\x99\xa6\x89fw\x86gw\x88\x97x\xaa\x7f\xf6\xff\xbb\xbb\x8f\xff+\x82\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8',
b'\xf1\x87LBLUFN655162KF36\x98\x88\x88\x88\x98\x88\x88\x88x\x99\xa7\x89x\x99\xa7\x89x\x99\x97\x89g\x7f\xf7\xffwU_\xff\xe9!\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8',
b'\xf1\x87LBLUFN731381KF36\xb9\x99\x89\x98\x98\x88\x88\x88\x89\x99\xa8\x99\x88\x99\xa8\x89\x88\x88\x98\x88V\177\xf6\xff\x99w\x8f\xff\xad\xd8\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\000bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8',
b'\xf1\x87LDKVAA0028604HH1\xa8\x88x\x87vgvw\x88\x99\xa8\x89gw\x86ww\x88\x97x\x97o\xf9\xff\x97w\x7f\xffo\x02\xf1\x81U872\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U872\x00\x00\x00\x00\x00\x00TON4G38NB1\x96z28',
b'\xf1\x87LDKVAA3068374HH1wwww\x87xw\x87y\x99\xa7\x99w\x88\x87xw\x88\x97x\x85\xaf\xfa\xffvU/\xffU\xdc\xf1\x81U872\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U872\x00\x00\x00\x00\x00\x00TON4G38NB1\x96z28',
b'\xf1\x87LDKVBN382172KF26\x98\x88\x88\x88\xa8\x88\x88\x88x\x99\xa7\x89\x87\x88\x98x\x98\x99\xa9\x89\xa5_\xf6\xffDDO\xff\xcd\x16\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7',
b'\xf1\x87LDKVBN424201KF26\xba\xaa\x9a\xa9\x99\x99\x89\x98\x89\x99\xa8\x99\x88\x99\x98\x89\x88\x99\xa8\x89v\x7f\xf7\xffwf_\xffq\xa6\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7',
b'\xf1\x87LDKVBN540766KF37\x87wgv\x87w\x87xx\x99\x97\x89v\x88\x97h\x88\x88\x88\x88x\x7f\xf6\xffvUo\xff\xd3\x01\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7',
b'\xf1\x87LDLVAA4225634HH1\x98\x88\x88\x88eUeVx\x88\x87\x88g\x88\x86xx\x88\x87\x88\x86o\xf9\xff\x87w\x7f\xff\xf2\xf7\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00TON4G38NB2[v\\\xb6',
b'\xf1\x87LDLVAA4777834HH1\x98\x88x\x87\x87wwwx\x88\x87\x88x\x99\x97\x89x\x88\x97\x88\x86o\xfa\xff\x86fO\xff\x1d9\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00TON4G38NB2[v\\\xb6',
b'\xf1\x87LDLVAA5194534HH1ffvguUUUx\x88\xa7\x88h\x99\x96\x89x\x88\x97\x88ro\xf9\xff\x98wo\xff\xaaM\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00TON4G38NB2[v\\\xb6',
b'\xf1\x87LDLVAA5949924HH1\xa9\x99y\x97\x87wwwx\x99\x97\x89x\x99\xa7\x89x\x99\xa7\x89\x87_\xfa\xffeD?\xff\xf1\xfd\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00TON4G38NB2[v\\\xb6',
b'\xf1\x87LDLVBN560098KF26\x86fff\x87vgfg\x88\x96xfw\x86gfw\x86g\x95\xf6\xffeU_\xff\x92c\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7',
b'\xf1\x87LDLVBN602045KF26\xb9\x99\x89\x98\x97vwgy\xaa\xb7\x9af\x88\x96hw\x99\xa7y\xa9\x7f\xf5\xff\x99w\x7f\xff,\xd3\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN628911KF26\xa9\x99\x89\x98\x98\x88\x88\x88y\x99\xa7\x99fw\x86gw\x88\x87x\x83\x7f\xf6\xff\x98wo\xff2\xda\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN645817KF37\x87www\x98\x87xwx\x99\x97\x89\x99\x99\x99\x99g\x88\x96x\xb6_\xf7\xff\x98fo\xff\xe2\x86\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN662115KF37\x98\x88\x88\x88\xa8\x88\x88\x88x\x99\x97\x89x\x99\xa7\x89\x88\x99\xa8\x89\x88\x7f\xf7\xfffD_\xff\xdc\x84\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN667933KF37\xb9\x99\x89\x98\xb9\x99\x99\x99x\x88\x87\x88w\x88\x87x\x88\x88\x98\x88\xcbo\xf7\xffe3/\xffQ!\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN673087KF37\x97www\x86fvgx\x99\x97\x89\x99\xaa\xa9\x9ag\x88\x86x\xe9_\xf8\xff\x98w\x7f\xff"\xad\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN673841KF37\x98\x88x\x87\x86g\x86xy\x99\xa7\x99\x88\x99\xa8\x89w\x88\x97xdo\xf5\xff\x98\x88\x8f\xffT\xec\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN681363KF37\x98\x88\x88\x88\x97x\x87\x88y\xaa\xa7\x9a\x88\x88\x98\x88\x88\x88\x88\x88vo\xf6\xffvD\x7f\xff%v\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN713782KF37\x99\x99y\x97\x98\x88\x88\x88x\x88\x97\x88\x88\x99\x98\x89\x88\x99\xa8\x89\x87o\xf7\xffeU?\xff7,\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN713890KF26\xb9\x99\x89\x98\xa9\x99\x99\x99x\x99\x97\x89\x88\x99\xa8\x89\x88\x99\xb8\x89Do\xf7\xff\xa9\x88o\xffs\r\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN733215KF37\x99\x98y\x87\x97wwwi\x99\xa6\x99x\x99\xa7\x89V\x88\x95h\x86o\xf7\xffeDO\xff\x12\xe7\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN750044KF37\xca\xa9\x8a\x98\xa7wwwy\xaa\xb7\x9ag\x88\x96x\x88\x99\xa8\x89\xb9\x7f\xf6\xff\xa8w\x7f\xff\xbe\xde\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN752612KF37\xba\xaa\x8a\xa8\x87w\x87xy\xaa\xa7\x9a\x88\x99\x98\x89x\x88\x97\x88\x96o\xf6\xffvU_\xffh\x1b\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN755553KF37\x87xw\x87\x97w\x87xy\x99\xa7\x99\x99\x99\xa9\x99Vw\x95gwo\xf6\xffwUO\xff\xb5T\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN757883KF37\x98\x87xw\x98\x87\x88xy\xaa\xb7\x9ag\x88\x96x\x89\x99\xa8\x99e\x7f\xf6\xff\xa9\x88o\xff5\x15\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6',
b'\xf1\x87LDMVBN778156KF37\x87vWe\xa9\x99\x99\x99y\x99\xb7\x99\x99\x99\x99\x99x\x99\x97\x89\xa8\x7f\xf8\xffwf\x7f\xff\x82_\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6',
b'\xf1\x87LDMVBN780576KF37\x98\x87hv\x97x\x97\x89x\x99\xa7\x89\x88\x99\x98\x89w\x88\x97x\x98\x7f\xf7\xff\xba\x88\x8f\xff\x1e0\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6',
b'\xf1\x87LDMVBN783485KF37\x87www\x87vwgy\x99\xa7\x99\x99\x99\xa9\x99Vw\x95g\x89_\xf6\xff\xa9w_\xff\xc5\xd6\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6',
b'\xf1\x87LDMVBN811844KF37\x87vwgvfffx\x99\xa7\x89Vw\x95gg\x88\xa6xe\x8f\xf6\xff\x97wO\xff\t\x80\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6',
b'\xf1\x87LDMVBN830601KF37\xa7www\xa8\x87xwx\x99\xa7\x89Uw\x85Ww\x88\x97x\x88o\xf6\xff\x8a\xaa\x7f\xff\xe2:\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6',
b'\xf1\x87LDMVBN848789KF37\x87w\x87x\x87w\x87xy\x99\xb7\x99\x87\x88\x98x\x88\x99\xa8\x89\x87\x7f\xf6\xfffUo\xff\xe3!\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b'\xf1\x87LDMVBN851595KF37\x97wgvvfffx\x99\xb7\x89\x88\x99\x98\x89\x87\x88\x98x\x99\x7f\xf7\xff\x97w\x7f\xff@\xf3\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b'\xf1\x87LDMVBN873175KF26\xa8\x88\x88\x88vfVex\x99\xb7\x89\x88\x99\x98\x89x\x88\x97\x88f\x7f\xf7\xff\xbb\xaa\x8f\xff,\x04\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b'\xf1\x87LDMVBN879401KF26veVU\xa8\x88\x88\x88g\x88\xa6xVw\x95gx\x88\xa7\x88v\x8f\xf9\xff\xdd\xbb\xbf\xff\xb3\x99\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b'\xf1\x87LDMVBN881314KF37\xa8\x88h\x86\x97www\x89\x99\xa8\x99w\x88\x97xx\x99\xa7\x89\xca\x7f\xf8\xff\xba\x99\x8f\xff\xd8v\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b'\xf1\x87LDMVBN888651KF37\xa9\x99\x89\x98vfff\x88\x99\x98\x89w\x99\xa7y\x88\x88\x98\x88D\x8f\xf9\xff\xcb\x99\x8f\xff\xa5\x1e\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b'\xf1\x87LDMVBN889419KF37\xa9\x99y\x97\x87w\x87xx\x88\x97\x88w\x88\x97x\x88\x99\x98\x89e\x9f\xf9\xffeUo\xff\x901\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b'\xf1\x87LDMVBN895969KF37vefV\x87vgfx\x99\xa7\x89\x99\x99\xb9\x99f\x88\x96he_\xf7\xffxwo\xff\x14\xf9\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b'\xf1\x87LDMVBN899222KF37\xa8\x88x\x87\x97www\x98\x99\x99\x89\x88\x99\x98\x89f\x88\x96hdo\xf7\xff\xbb\xaa\x9f\xff\xe2U\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB5\xb9\x94\xe8\x89',
b"\xf1\x87LBLUFN622950KF36\xa8\x88\x88\x88\x87w\x87xh\x99\x96\x89\x88\x99\x98\x89\x88\x99\x98\x89\x87o\xf6\xff\x98\x88o\xffx'\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8",
],
},
CAR.VELOSTER: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00JS__ SCC H-CUP 1.00 1.02 95650-J3200 ',
b'\xf1\x00JS__ SCC HNCUP 1.00 1.02 95650-J3100 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00JS LKAS AT USA LHD 1.00 1.02 95740-J3000 K32',
b'\xf1\x00JS LKAS AT KOR LHD 1.00 1.03 95740-J3000 K33',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00JSL MDPS C 1.00 1.03 56340-J3000 8308',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x01TJS-JNU06F200H0A',
b'\x01TJS-JDK06F200H0A',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJS0T16NS1\xba\x02\xb8\x80',
b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJS0T16NS1\x00\x00\x00\x00',
b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJS0T16KS2\016\xba\036\xa2',
],
},
# kia
CAR.FORTE: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00BD__ SCC H-CUP 1.00 1.02 99110-M6000 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00BD LKAS AT USA LHD 1.00 1.04 95740-M6000 J33',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00BD MDPS C 1.00 1.02 56310-XX000 4BD2C102',
b'\xf1\x00BD MDPS C 1.00 1.08 56310/M6300 4BDDC108',
b'\xf1\x00BD MDPS C 1.00 1.08 56310M6300\x00 4BDDC108',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x816VGRAH00018.ELF\xf1\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x01TBDM1NU06F200H01',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U2VC051\x00\x00\xf1\x006U2V0_C2\x00\x006U2VC051\x00\x00DBD0T16SS0\x00\x00\x00\x00',
b"\xf1\x816U2VC051\x00\x00\xf1\x006U2V0_C2\x00\x006U2VC051\x00\x00DBD0T16SS0\xcf\x1e'\xc3", ],
},
CAR.K5: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00JF__ SCC F-CUP 1.00 1.00 96400-D4110 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00JFA LKAS AT USA LHD 1.00 1.02 95895-D5000 h31',
b'\xf1\x00JFA LKAS AT USA LHD 1.00 1.00 95895-D5001 h32',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00TM MDPS C 1.00 1.00 56340-S2000 8409',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00JF ESC \v 11 \x18\x030 58920-D5180',
],
(Ecu.engine, 0x7e0, None): [
b'\x01TJFAJNU06F201H03',
b'\xf1\x89F1JF600AISEIU702\xf1\x82F1JF600AISEIU702',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJF0T16NL0\t\xd2GW', ],
},
CAR.K5_HEV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00DEhe SCC H-CUP 1.01 1.02 96400-G5100 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00DEP MFC AT USA LHD 1.00 1.01 95740-G5010 170424',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00DE MDPS C 1.00 1.09 56310G5301\x00 4DEHC109',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6F4051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b"\xf1\x816U3J2051\x00\x00\xf1\x006U3H0_C2\x00\x006U3J2051\x00\x00PDE0G16NS2\xf4'\\\x91", ],
},
CAR.K5_DL3: {
(Ecu.fwdRadar, 0x7D0, None): [
b'\xf1\000DL3_ SCC FHCUP 1.00 1.03 99110-L2000 ',
b'\xf1\x8799110L2000\xf1\000DL3_ SCC FHCUP 1.00 1.03 99110-L2000 ',
b'\xf1\x8799110L2100\xf1\x00DL3_ SCC F-CUP 1.00 1.03 99110-L2100 ',
b'\xf1\x8799110L2100\xf1\x00DL3_ SCC FHCUP 1.00 1.03 99110-L2100 ',
],
(Ecu.fwdCamera, 0x7C4, None): [
b'\xf1\000DL3 MFC AT USA LHD 1.00 1.03 99210-L3000 200915',
b'\xf1\x00DL3 MFC AT USA LHD 1.00 1.04 99210-L3000 210208',
],
(Ecu.eps, 0x7D4, None): [
b'\xf1\x8756310-L3110\xf1\000DL3 MDPS C 1.00 1.01 56310-L3110 4DLAC101',
b'\xf1\x8756310-L3220\xf1\x00DL3 MDPS C 1.00 1.01 56310-L3220 4DLAC101',
b'\xf1\x8757700-L3000\xf1\x00DL3 MDPS R 1.00 1.02 57700-L3000 4DLAP102',
],
(Ecu.esp, 0x7D1, None): [
b'\xf1\000DL ESC \006 101 \004\002 58910-L3200',
b'\xf1\x8758910-L3200\xf1\000DL ESC \006 101 \004\002 58910-L3200',
b'\xf1\x8758910-L3800\xf1\x00DL ESC \t 101 \x07\x02 58910-L3800',
b'\xf1\x8758910-L3600\xf1\x00DL ESC \x03 100 \x08\x02 58910-L3600',
],
(Ecu.engine, 0x7E0, None): [
b'\xf1\x87391212MKT0',
b'\xf1\x87391212MKV0',
b'\xf1\x870\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x82DLDWN5TMDCXXXJ1B',
],
(Ecu.transmission, 0x7E1, None): [
b'\xf1\000bcsh8p54 U913\000\000\000\000\000\000TDL2T16NB1ia\v\xb8',
b'\xf1\x87SALFEA5652514GK2UUeV\x88\x87\x88xxwg\x87ww\x87wwfwvd/\xfb\xffvU_\xff\x93\xd3\xf1\x81U913\000\000\000\000\000\000\xf1\000bcsh8p54 U913\000\000\000\000\000\000TDL2T16NB1ia\v\xb8',
b'\xf1\x87SALFEA6046104GK2wvwgeTeFg\x88\x96xwwwwffvfe?\xfd\xff\x86fo\xff\x97A\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00TDL2T16NB1ia\x0b\xb8',
b'\xf1\x87SCMSAA8572454GK1\x87x\x87\x88Vf\x86hgwvwvwwgvwwgT?\xfb\xff\x97fo\xffH\xb8\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00TDL4T16NB05\x94t\x18',
b'\xf1\x87954A02N300\x00\x00\x00\x00\x00\xf1\x81T02730A1 \xf1\x00T02601BL T02730A1 WDL3T25XXX730NS2b\x1f\xb8%',
],
},
CAR.STINGER: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00CK__ SCC F_CUP 1.00 1.01 96400-J5100 ',
b'\xf1\x00CK__ SCC F_CUP 1.00 1.03 96400-J5100 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00CK MFC AT USA LHD 1.00 1.03 95740-J5000 170822',
b'\xf1\x00CK MFC AT USA LHD 1.00 1.04 95740-J5000 180504',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00CK MDPS R 1.00 1.04 57700-J5200 4C2CL104',
b'\xf1\x00CK MDPS R 1.00 1.04 57700-J5220 4C2VL104',
b'\xf1\x00CK MDPS R 1.00 1.04 57700-J5420 4C4VL104',
b'\xf1\x00CK MDPS R 1.00 1.06 57700-J5420 4C4VL106',
b'\xf1\x00CK MDPS R 1.00 1.07 57700-J5420 4C4VL107',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81606DE051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81640E0051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81640L0051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x82CKJN3TMSDE0B\x00\x00\x00\x00',
b'\xf1\x82CKKN3TMD_H0A\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87VCJLE17622572DK0vd6D\x99\x98y\x97vwVffUfvfC%CuT&Dx\x87o\xff{\x1c\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0',
b'\xf1\x87VDHLG17000192DK2xdFffT\xa5VUD$DwT\x86wveVeeD&T\x99\xba\x8f\xff\xcc\x99\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0',
b'\xf1\x87VDHLG17000192DK2xdFffT\xa5VUD$DwT\x86wveVeeD&T\x99\xba\x8f\xff\xcc\x99\xf1\x89E21\x00\x00\x00\x00\x00\x00\x00\xf1\x82SCK0T33NB0',
b'\xf1\x87VDHLG17034412DK2vD6DfVvVTD$D\x99w\x88\x98EDEDeT6DgfO\xff\xc3=\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0',
b'\xf1\x87VDHLG17118862DK2\x8awWwgu\x96wVfUVwv\x97xWvfvUTGTx\x87o\xff\xc9\xed\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0',
b'\xf1\x87VDJLG18425192DK2xeGewfgf\x86eFeweWv\x88eVeuTGT\x89vo\xff\tJ\xf1\x81E24\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E24\x00\x00\x00\x00\x00\x00\x00SCK0T33NB1\x8a\xdcM\x90',
b'\xf1\x87VDKLJ18675252DK6\x89vhgwwwwveVU\x88w\x87w\x99vgf\x97vXfgw_\xff\xc2\xfb\xf1\x89E25\x00\x00\x00\x00\x00\x00\x00\xf1\x82TCK0T33NB2',
b'\xf1\x87WAJTE17552812CH4vfFffvfVeT5DwvvVVdFeegeg\x88\x88o\xff\x1a]\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00TCK2T20NB1\x19\xd2\x00\x94',
],
},
CAR.NIRO_EV: {
(Ecu.fwdRadar, 0x7D0, None): [
b'\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4000 ',
b'\xf1\x00DEev SCC F-CUP 1.00 1.02 96400-Q4100 ',
b'\xf1\x00DEev SCC F-CUP 1.00 1.03 96400-Q4100 ',
b'\xf1\x00OSev SCC F-CUP 1.00 1.01 99110-K4000 ',
b'\xf1\x8799110Q4000\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4000 ',
b'\xf1\x8799110Q4100\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4100 ',
b'\xf1\x8799110Q4500\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4500 ',
b'\xf1\x8799110Q4600\xf1\x00DEev SCC FNCUP 1.00 1.00 99110-Q4600 ',
b'\xf1\x8799110Q4600\xf1\x00DEev SCC FHCUP 1.00 1.00 99110-Q4600 ',
],
(Ecu.fwdCamera, 0x7C4, None): [
b'\xf1\x00DEE MFC AT USA LHD 1.00 1.03 95740-Q4000 180821',
b'\xf1\x00DEE MFC AT EUR LHD 1.00 1.00 99211-Q4000 191211',
b'\xf1\x00DEE MFC AT USA LHD 1.00 1.00 99211-Q4000 191211',
b'\xf1\000DEE MFC AT EUR LHD 1.00 1.00 99211-Q4100 200706',
b'\xf1\x00OSE LKAS AT EUR LHD 1.00 1.00 95740-K4100 W40',
],
(Ecu.eps, 0x7D4, None): [
b'\xf1\x00OS MDPS C 1.00 1.04 56310K4050\x00 4OEDC104',
b'\xf1\x00DE MDPS C 1.00 1.05 56310Q4000\x00 4DEEC105',
b'\xf1\x00DE MDPS C 1.00 1.05 56310Q4100\x00 4DEEC105',
],
(Ecu.esp, 0x7D1, None): [
b'\xf1\x00OS IEB \r 212 \x11\x13 58520-K4000',
],
},
CAR.NIRO_HEV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00DEhe SCC H-CUP 1.01 1.02 96400-G5100 ',
b'\xf1\x00DEhe SCC FHCUP 1.00 1.00 99110-G5600 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00DEP MFC AT USA LHD 1.00 1.01 95740-G5010 170424',
b'\xf1\x00DEH MFC AT USA LHD 1.00 1.07 99211-G5000 201221',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\000DE MDPS C 1.00 1.09 56310G5301\000 4DEHC109',
b'\xf1\x00DE MDPS C 1.00 1.01 56310G5520\x00 4DEPC101',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6F4051\000\000\000\000\000\000\000\000',
b'\xf1\x816H6G5051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b"\xf1\x816U3J2051\000\000\xf1\0006U3H0_C2\000\0006U3J2051\000\000PDE0G16NS2\xf4\'\\\x91",
b'\xf1\x816U3J2051\000\000\xf1\0006U3H0_C2\000\0006U3J2051\000\000PDE0G16NS2\000\000\000\000',
b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HDE0G16NL3\x00\x00\x00\x00',
b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HDE0G16NL3\xb9\xd3\xfaW',
],
},
CAR.SELTOS: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x8799110Q5100\xf1\000SP2_ SCC FHCUP 1.01 1.05 99110-Q5100 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\000SP2 MFC AT USA LHD 1.00 1.04 99210-Q5000 191114',
b'\xf1\000SP2 MFC AT USA LHD 1.00 1.05 99210-Q5000 201012',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\000SP2 MDPS C 1.00 1.04 56300Q5200 ',
b'\xf1\000SP2 MDPS C 1.01 1.05 56300Q5200 ',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x8758910-Q5450\xf1\000SP ESC \a 101\031\t\005 58910-Q5450',
b'\xf1\x8758910-Q5450\xf1\000SP ESC \t 101\031\t\005 58910-Q5450',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81616D2051\000\000\000\000\000\000\000\000',
b'\xf1\x81616D5051\000\000\000\000\000\000\000\000',
b'\001TSP2KNL06F100J0K',
b'\001TSP2KNL06F200J0K',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87CZLUB49370612JF7h\xa8y\x87\x99\xa7hv\x99\x97fv\x88\x87x\x89x\x96O\xff\x88\xff\xff\xff.@\xf1\x816V2C2051\000\000\xf1\0006V2B0_C2\000\0006V2C2051\000\000CSP4N20NS3\000\000\000\000',
b'\xf1\x87954A22D200\xf1\x81T01950A1 \xf1\000T0190XBL T01950A1 DSP2T16X4X950NS6\xd30\xa5\xb9',
b'\xf1\x87954A22D200\xf1\x81T01950A1 \xf1\000T0190XBL T01950A1 DSP2T16X4X950NS8\r\xfe\x9c\x8b',
],
},
CAR.K7: {
(Ecu.eps, 0x7d4, None): [b'\xf1\000YG MDPS C 1.00 1.01 56310F6350\000 4YG7C101',],
},
CAR.GENESIS_G70: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00IK__ SCC F-CUP 1.00 1.02 96400-G9100 ',
b'\xf1\x00IK__ SCC F-CUP 1.00 1.02 96400-G9100 \xf1\xa01.02',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00IK MFC AT USA LHD 1.00 1.01 95740-G9000 170920',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00IK MDPS R 1.00 1.06 57700-G9420 4I4VL106',
b'\xf1\x00IK MDPS R 1.00 1.07 57700-G9220 4I2VL107',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81640F0051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81640J0051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87VDJLT17895112DN4\x88fVf\x99\x88\x88\x88\x87fVe\x88vhwwUFU\x97eFex\x99\xff\xb7\x82\xf1\x81E25\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E25\x00\x00\x00\x00\x00\x00\x00SIK0T33NB2\x11\x1am\xda',
b'\xf1\x87VCJLP18407832DN3\x88vXfvUVT\x97eFU\x87d7v\x88eVeveFU\x89\x98\x7f\xff\xb2\xb0\xf1\x81E25\x00\x00\x00'
b'\x00\x00\x00\x00\xf1\x00bcsh8p54 E25\x00\x00\x00\x00\x00\x00\x00SIK0T33NB4\xecE\xefL',
],
},
}
CHECKSUM = {
"crc8": [CAR.SONATA, CAR.SANTA_FE, CAR.PALISADE, CAR.SELTOS, CAR.ELANTRA21, CAR.K5_DL3,
CAR.SONATA_HEV, CAR.SANTA_FE_HEV, CAR.SOUL_EV, CAR.ELANTRA21_HEV, CAR.K5_DL3_HEV],
"6B": [CAR.SORENTO, CAR.GENESIS],
}
FEATURES = {
"use_cluster_gears": {CAR.ELANTRA_I30, CAR.KONA, CAR.GRANDEUR, CAR.MOHAVE, CAR.NIRO_HEV, CAR.K7},
"use_tcu_gears": {CAR.SONATA_LF, CAR.VELOSTER, CAR.K5},
"use_elect_gears": {CAR.KONA_EV, CAR.IONIQ_EV, CAR.NEXO, CAR.NIRO_EV, CAR.SOUL_EV, CAR.KONA_HEV, CAR.IONIQ_HEV, CAR.NIRO_HEV,
CAR.SONATA_HEV, CAR.SONATA_LF_HEV, CAR.GRANDEUR_HEV, CAR.GRANDEUR20_HEV,
CAR.K5_HEV, CAR.K5_DL3_HEV, CAR.K7_HEV},
}
EV_CAR = {CAR.KONA_EV, CAR.IONIQ_EV, CAR.NIRO_EV, CAR.SOUL_EV, CAR.NEXO}
HYBRID_CAR = {CAR.KONA_HEV, CAR.IONIQ_HEV, CAR.NIRO_HEV, CAR.SANTA_FE_HEV,
CAR.ELANTRA21_HEV, CAR.SONATA_HEV, CAR.SONATA_LF_HEV, CAR.GRANDEUR_HEV, CAR.GRANDEUR20_HEV,
CAR.K5_HEV, CAR.K5_DL3_HEV, CAR.K7_HEV}
EV_HYBRID_CAR = EV_CAR | HYBRID_CAR
DBC = {
CAR.ELANTRA_I30: dbc_dict('hyundai_kia_generic', None),
CAR.ELANTRA21: dbc_dict('hyundai_kia_generic', None),
CAR.ELANTRA21_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.SONATA_HEV: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.SONATA_LF: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA_LF_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.KONA: dbc_dict('hyundai_kia_generic', None),
CAR.KONA_EV: dbc_dict('hyundai_kia_generic', None),
CAR.KONA_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_EV: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.IONIQ_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.SANTA_FE: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.SANTA_FE_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.PALISADE: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.VELOSTER: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR20: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR20_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.NEXO: dbc_dict('hyundai_kia_generic_nexo', None),
CAR.FORTE: dbc_dict('hyundai_kia_generic', None),
CAR.K5: dbc_dict('hyundai_kia_generic', None),
CAR.K5_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.K5_DL3: dbc_dict('hyundai_kia_generic', None),
CAR.K5_DL3_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.SPORTAGE: dbc_dict('hyundai_kia_generic', None),
CAR.SORENTO: dbc_dict('hyundai_kia_generic', None),
CAR.MOHAVE: dbc_dict('hyundai_kia_generic', None),
CAR.STINGER: dbc_dict('hyundai_kia_generic', None),
CAR.NIRO_EV: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.NIRO_HEV: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.SOUL_EV: dbc_dict('hyundai_kia_generic', None),
CAR.SELTOS: dbc_dict('hyundai_kia_generic', None),
CAR.K7: dbc_dict('hyundai_kia_generic', None),
CAR.K7_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.K9: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G70: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.GENESIS_G80: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G90: dbc_dict('hyundai_kia_generic', None),
}
STEER_THRESHOLD = 150
def main():
for member, value in vars(CAR).items():
if not member.startswith("_"):
print(value)
if __name__ == "__main__":
main()
| true
| true
|
f702b3137c31de742296618ee6e83a233c38cd21
| 7,384
|
py
|
Python
|
src/primaires/communication/editeurs/messagerie/edt_envoi.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | null | null | null |
src/primaires/communication/editeurs/messagerie/edt_envoi.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | null | null | null |
src/primaires/communication/editeurs/messagerie/edt_envoi.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | null | null | null |
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le contexte éditeur EdtBoiteEnvoi"""
from primaires.interpreteur.editeur import Editeur
from primaires.interpreteur.editeur.env_objet import EnveloppeObjet
from primaires.communication.editeurs.medit import EdtMedit
from primaires.communication.mudmail import ENVOYE
from primaires.format.fonctions import couper_phrase
class EdtBoiteEnvoi(Editeur):
"""Classe définissant le contexte-éditeur 'boîte d'envoi'.
Ce contexte liste les messages envoyés et propose des options d'édition.
"""
def __init__(self, pere, objet=None, attribut=None):
"""Constructeur de l'éditeur"""
Editeur.__init__(self, pere, objet, attribut)
self.ajouter_option("l", self.opt_lire)
self.ajouter_option("c", self.opt_copier)
self.ajouter_option("s", self.opt_supprimer)
def accueil(self):
"""Méthode d'accueil"""
joueur = self.pere.joueur
mails = type(self).importeur.communication.mails.get_mails_pour(
joueur, ENVOYE)
msg = "||tit| " + "Messages envoyés".ljust(76) + "|ff||\n"
msg += self.opts.separateur + "\n"
msg += self.aide_courte + "\n\n"
if not mails:
msg += "|att|Vous n'avez envoyé aucun message.|ff|"
else:
taille = 0
for mail in mails:
t_sujet = len(couper_phrase(mail.sujet, 33))
if t_sujet > taille:
taille = t_sujet
taille = (taille < 5 and 5) or taille
msg += "+" + "-".ljust(taille + 41, "-") + "+\n"
msg += "| |tit|N°|ff| | |tit|" + "Sujet".ljust(taille)
msg += "|ff| | |tit|Destinataire|ff| | |tit|" + "Date".ljust(16)
msg += "|ff| |\n"
i = 1
for mail in mails:
msg += "| |rg|" + str(i).rjust(2) + "|ff| | "
msg += "|vr|" + couper_phrase(mail.sujet, 33).ljust( \
taille) + "|ff| | |blc|"
msg += couper_phrase(mail.aff_dest,12).ljust(12) + "|ff| | "
msg += "|jn|" + mail.date.isoformat(" ")[:16] + "|ff| |\n"
i += 1
msg += "+" + "-".ljust(taille + 41, "-") + "+"
return msg
def opt_lire(self, arguments):
"""Option lire"""
if not arguments or arguments.isspace():
self.pere.joueur << "|err|Vous devez préciser le numéro d'un " \
"message.|ff|"
return
mails = type(self).importeur.communication.mails.get_mails_pour(
self.pere.joueur, ENVOYE)
try:
num = int(arguments.split(" ")[0])
except ValueError:
self.pere.joueur << "|err|Vous devez spécifier un nombre entier " \
"valide.|ff|"
else:
i = 1
l_mail = None
for mail in mails:
if num == i:
l_mail = mail
break
i += 1
if l_mail is None:
self.pere.joueur << "|err|Le numéro spécifié ne correspond à " \
"aucun message.|ff|"
return
self.pere.joueur << l_mail.afficher()
def opt_copier(self, arguments):
"""Option copier"""
if not arguments or arguments.isspace():
self.pere.joueur << "|err|Vous devez préciser le numéro d'un " \
"message.|ff|"
return
mails = type(self).importeur.communication.mails.get_mails_pour(
self.pere.joueur, ENVOYE)
try:
num = int(arguments.split(" ")[0])
except ValueError:
self.pere.joueur << "|err|Vous devez spécifier un nombre entier " \
"valide.|ff|"
else:
i = 1
c_mail = None
for mail in mails:
if num == i:
c_mail = mail
break
i += 1
if c_mail is None:
self.pere.joueur << "|err|Le numéro spécifié ne correspond à " \
"aucun message.|ff|"
return
mail = type(self).importeur.communication.mails.creer_mail(
self.pere.joueur)
mail.sujet = "CC:" + c_mail.sujet
mail.liste_dest = c_mail.liste_dest
mail.contenu.ajouter_paragraphe(str(c_mail.contenu))
enveloppe = EnveloppeObjet(EdtMedit, mail, None)
enveloppe.parent = self
contexte = enveloppe.construire(self.pere.joueur)
self.pere.joueur.contextes.ajouter(contexte)
contexte.actualiser()
def opt_supprimer(self, arguments):
"""Option supprimer"""
if not arguments or arguments.isspace():
self.pere.joueur << "|err|Vous devez préciser le numéro d'un " \
"message.|ff|"
return
mails = type(self).importeur.communication.mails.get_mails_pour(
self.pere.joueur, ENVOYE)
try:
num = int(arguments.split(" ")[0])
except ValueError:
self.pere.joueur << "|err|Vous devez spécifier un nombre entier " \
"valide.|ff|"
else:
i = 1
s_mail = None
for mail in mails:
if num == i:
s_mail = mail
break
i += 1
if s_mail is None:
self.pere.joueur << "|err|Le numéro spécifié ne correspond à " \
"aucun message.|ff|"
return
del type(self).importeur.communication.mails[s_mail.id]
self.pere.joueur << "|att|Ce message a bien été supprimé.|ff|"
| 41.717514
| 80
| 0.566766
|
from primaires.interpreteur.editeur import Editeur
from primaires.interpreteur.editeur.env_objet import EnveloppeObjet
from primaires.communication.editeurs.medit import EdtMedit
from primaires.communication.mudmail import ENVOYE
from primaires.format.fonctions import couper_phrase
class EdtBoiteEnvoi(Editeur):
def __init__(self, pere, objet=None, attribut=None):
Editeur.__init__(self, pere, objet, attribut)
self.ajouter_option("l", self.opt_lire)
self.ajouter_option("c", self.opt_copier)
self.ajouter_option("s", self.opt_supprimer)
def accueil(self):
joueur = self.pere.joueur
mails = type(self).importeur.communication.mails.get_mails_pour(
joueur, ENVOYE)
msg = "||tit| " + "Messages envoyés".ljust(76) + "|ff||\n"
msg += self.opts.separateur + "\n"
msg += self.aide_courte + "\n\n"
if not mails:
msg += "|att|Vous n'avez envoyé aucun message.|ff|"
else:
taille = 0
for mail in mails:
t_sujet = len(couper_phrase(mail.sujet, 33))
if t_sujet > taille:
taille = t_sujet
taille = (taille < 5 and 5) or taille
msg += "+" + "-".ljust(taille + 41, "-") + "+\n"
msg += "| |tit|N°|ff| | |tit|" + "Sujet".ljust(taille)
msg += "|ff| | |tit|Destinataire|ff| | |tit|" + "Date".ljust(16)
msg += "|ff| |\n"
i = 1
for mail in mails:
msg += "| |rg|" + str(i).rjust(2) + "|ff| | "
msg += "|vr|" + couper_phrase(mail.sujet, 33).ljust( \
taille) + "|ff| | |blc|"
msg += couper_phrase(mail.aff_dest,12).ljust(12) + "|ff| | "
msg += "|jn|" + mail.date.isoformat(" ")[:16] + "|ff| |\n"
i += 1
msg += "+" + "-".ljust(taille + 41, "-") + "+"
return msg
def opt_lire(self, arguments):
if not arguments or arguments.isspace():
self.pere.joueur << "|err|Vous devez préciser le numéro d'un " \
"message.|ff|"
return
mails = type(self).importeur.communication.mails.get_mails_pour(
self.pere.joueur, ENVOYE)
try:
num = int(arguments.split(" ")[0])
except ValueError:
self.pere.joueur << "|err|Vous devez spécifier un nombre entier " \
"valide.|ff|"
else:
i = 1
l_mail = None
for mail in mails:
if num == i:
l_mail = mail
break
i += 1
if l_mail is None:
self.pere.joueur << "|err|Le numéro spécifié ne correspond à " \
"aucun message.|ff|"
return
self.pere.joueur << l_mail.afficher()
def opt_copier(self, arguments):
if not arguments or arguments.isspace():
self.pere.joueur << "|err|Vous devez préciser le numéro d'un " \
"message.|ff|"
return
mails = type(self).importeur.communication.mails.get_mails_pour(
self.pere.joueur, ENVOYE)
try:
num = int(arguments.split(" ")[0])
except ValueError:
self.pere.joueur << "|err|Vous devez spécifier un nombre entier " \
"valide.|ff|"
else:
i = 1
c_mail = None
for mail in mails:
if num == i:
c_mail = mail
break
i += 1
if c_mail is None:
self.pere.joueur << "|err|Le numéro spécifié ne correspond à " \
"aucun message.|ff|"
return
mail = type(self).importeur.communication.mails.creer_mail(
self.pere.joueur)
mail.sujet = "CC:" + c_mail.sujet
mail.liste_dest = c_mail.liste_dest
mail.contenu.ajouter_paragraphe(str(c_mail.contenu))
enveloppe = EnveloppeObjet(EdtMedit, mail, None)
enveloppe.parent = self
contexte = enveloppe.construire(self.pere.joueur)
self.pere.joueur.contextes.ajouter(contexte)
contexte.actualiser()
def opt_supprimer(self, arguments):
if not arguments or arguments.isspace():
self.pere.joueur << "|err|Vous devez préciser le numéro d'un " \
"message.|ff|"
return
mails = type(self).importeur.communication.mails.get_mails_pour(
self.pere.joueur, ENVOYE)
try:
num = int(arguments.split(" ")[0])
except ValueError:
self.pere.joueur << "|err|Vous devez spécifier un nombre entier " \
"valide.|ff|"
else:
i = 1
s_mail = None
for mail in mails:
if num == i:
s_mail = mail
break
i += 1
if s_mail is None:
self.pere.joueur << "|err|Le numéro spécifié ne correspond à " \
"aucun message.|ff|"
return
del type(self).importeur.communication.mails[s_mail.id]
self.pere.joueur << "|att|Ce message a bien été supprimé.|ff|"
| true
| true
|
f702b3e836707390409c7ac1aa8b29e284bbca51
| 8,633
|
py
|
Python
|
pyro/infer/trace_elbo.py
|
cnheider/pyro
|
60bcab73ada30c2b3f05d525690c9664ff6fc22e
|
[
"MIT"
] | null | null | null |
pyro/infer/trace_elbo.py
|
cnheider/pyro
|
60bcab73ada30c2b3f05d525690c9664ff6fc22e
|
[
"MIT"
] | null | null | null |
pyro/infer/trace_elbo.py
|
cnheider/pyro
|
60bcab73ada30c2b3f05d525690c9664ff6fc22e
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import numbers
import warnings
import torch
from torch.autograd import Variable
import pyro
import pyro.poutine as poutine
from pyro.distributions.util import is_identically_zero
from pyro.infer.elbo import ELBO
from pyro.infer.enum import iter_discrete_traces
from pyro.infer.util import torch_backward, torch_data_sum, torch_sum
from pyro.poutine.util import prune_subsample_sites
from pyro.util import check_model_guide_match, is_nan
def check_enum_discrete_can_run(model_trace, guide_trace):
"""
Checks whether `enum_discrete` is supported for the given (model, guide) pair.
:param Trace model: A model trace.
:param Trace guide: A guide trace.
:raises: NotImplementedError
"""
# Check that all batch_log_pdf shapes are the same,
# since we currently do not correctly handle broadcasting.
model_trace.compute_batch_log_pdf()
guide_trace.compute_batch_log_pdf()
shapes = {}
for source, trace in [("model", model_trace), ("guide", guide_trace)]:
for name, site in trace.nodes.items():
if site["type"] == "sample":
shapes[site["batch_log_pdf"].size()] = (source, name)
if len(shapes) > 1:
raise NotImplementedError(
"enum_discrete does not support mixture of batched and un-batched variables. "
"Try rewriting your model to avoid batching or running with enum_discrete=False. "
"Found the following variables of different batch shapes:\n{}".format(
"\n".join(["{} {}: shape = {}".format(source, name, tuple(shape))
for shape, (source, name) in sorted(shapes.items())])))
class Trace_ELBO(ELBO):
"""
A trace implementation of ELBO-based SVI
"""
def _get_traces(self, model, guide, *args, **kwargs):
"""
runs the guide and runs the model against the guide with
the result packaged as a trace generator
"""
for i in range(self.num_particles):
if self.enum_discrete:
# This iterates over a bag of traces, for each particle.
for scale, guide_trace in iter_discrete_traces("flat", guide, *args, **kwargs):
model_trace = poutine.trace(poutine.replay(model, guide_trace),
graph_type="flat").get_trace(*args, **kwargs)
check_model_guide_match(model_trace, guide_trace)
guide_trace = prune_subsample_sites(guide_trace)
model_trace = prune_subsample_sites(model_trace)
check_enum_discrete_can_run(model_trace, guide_trace)
guide_trace.compute_score_parts()
log_r = model_trace.batch_log_pdf() - guide_trace.batch_log_pdf()
weight = scale / self.num_particles
yield weight, model_trace, guide_trace, log_r
continue
guide_trace = poutine.trace(guide).get_trace(*args, **kwargs)
model_trace = poutine.trace(poutine.replay(model, guide_trace)).get_trace(*args, **kwargs)
check_model_guide_match(model_trace, guide_trace)
guide_trace = prune_subsample_sites(guide_trace)
model_trace = prune_subsample_sites(model_trace)
guide_trace.compute_score_parts()
log_r = model_trace.log_pdf() - guide_trace.log_pdf()
weight = 1.0 / self.num_particles
yield weight, model_trace, guide_trace, log_r
def _is_batched(self, weight):
return self.enum_discrete and \
isinstance(weight, Variable) and \
weight.dim() > 0 and \
weight.size(0) > 1
def loss(self, model, guide, *args, **kwargs):
"""
:returns: returns an estimate of the ELBO
:rtype: float
Evaluates the ELBO with an estimator that uses num_particles many samples/particles.
"""
elbo = 0.0
for weight, model_trace, guide_trace, log_r in self._get_traces(model, guide, *args, **kwargs):
elbo_particle = weight * 0
if self._is_batched(weight):
log_pdf = "batch_log_pdf"
else:
log_pdf = "log_pdf"
for name in model_trace.nodes.keys():
if model_trace.nodes[name]["type"] == "sample":
if model_trace.nodes[name]["is_observed"]:
elbo_particle += model_trace.nodes[name][log_pdf]
else:
elbo_particle += model_trace.nodes[name][log_pdf]
elbo_particle -= guide_trace.nodes[name][log_pdf]
# drop terms of weight zero to avoid nans
if isinstance(weight, numbers.Number):
if weight == 0.0:
elbo_particle = torch.zeros_like(elbo_particle)
else:
elbo_particle[weight == 0] = 0.0
elbo += torch_data_sum(weight * elbo_particle)
loss = -elbo
if is_nan(loss):
warnings.warn('Encountered NAN loss')
return loss
def loss_and_grads(self, model, guide, *args, **kwargs):
"""
:returns: returns an estimate of the ELBO
:rtype: float
Computes the ELBO as well as the surrogate ELBO that is used to form the gradient estimator.
Performs backward on the latter. Num_particle many samples are used to form the estimators.
"""
elbo = 0.0
# grab a trace from the generator
for weight, model_trace, guide_trace, log_r in self._get_traces(model, guide, *args, **kwargs):
elbo_particle = weight * 0
surrogate_elbo_particle = weight * 0
batched = self._is_batched(weight)
# compute elbo and surrogate elbo
if batched:
log_pdf = "batch_log_pdf"
else:
log_pdf = "log_pdf"
for name, model_site in model_trace.nodes.items():
if model_site["type"] == "sample":
model_log_pdf = model_site[log_pdf]
if model_site["is_observed"]:
elbo_particle += model_log_pdf
surrogate_elbo_particle += model_log_pdf
else:
guide_site = guide_trace.nodes[name]
guide_log_pdf, score_function_term, entropy_term = guide_site["score_parts"]
if not batched:
guide_log_pdf = guide_log_pdf.sum()
elbo_particle += model_log_pdf - guide_log_pdf
surrogate_elbo_particle += model_log_pdf
if not is_identically_zero(entropy_term):
if not batched:
entropy_term = entropy_term.sum()
surrogate_elbo_particle -= entropy_term
if not is_identically_zero(score_function_term):
if not batched:
score_function_term = score_function_term.sum()
surrogate_elbo_particle += log_r.detach() * score_function_term
# drop terms of weight zero to avoid nans
if isinstance(weight, numbers.Number):
if weight == 0.0:
elbo_particle = torch.zeros_like(elbo_particle)
surrogate_elbo_particle = torch.zeros_like(surrogate_elbo_particle)
else:
weight_eq_zero = (weight == 0)
elbo_particle[weight_eq_zero] = 0.0
surrogate_elbo_particle[weight_eq_zero] = 0.0
elbo += torch_data_sum(weight * elbo_particle)
surrogate_elbo_particle = torch_sum(weight * surrogate_elbo_particle)
# collect parameters to train from model and guide
trainable_params = set(site["value"]
for trace in (model_trace, guide_trace)
for site in trace.nodes.values()
if site["type"] == "param")
if trainable_params:
surrogate_loss_particle = -surrogate_elbo_particle
torch_backward(surrogate_loss_particle)
pyro.get_param_store().mark_params_active(trainable_params)
loss = -elbo
if is_nan(loss):
warnings.warn('Encountered NAN loss')
return loss
| 42.950249
| 103
| 0.586355
|
from __future__ import absolute_import, division, print_function
import numbers
import warnings
import torch
from torch.autograd import Variable
import pyro
import pyro.poutine as poutine
from pyro.distributions.util import is_identically_zero
from pyro.infer.elbo import ELBO
from pyro.infer.enum import iter_discrete_traces
from pyro.infer.util import torch_backward, torch_data_sum, torch_sum
from pyro.poutine.util import prune_subsample_sites
from pyro.util import check_model_guide_match, is_nan
def check_enum_discrete_can_run(model_trace, guide_trace):
model_trace.compute_batch_log_pdf()
guide_trace.compute_batch_log_pdf()
shapes = {}
for source, trace in [("model", model_trace), ("guide", guide_trace)]:
for name, site in trace.nodes.items():
if site["type"] == "sample":
shapes[site["batch_log_pdf"].size()] = (source, name)
if len(shapes) > 1:
raise NotImplementedError(
"enum_discrete does not support mixture of batched and un-batched variables. "
"Try rewriting your model to avoid batching or running with enum_discrete=False. "
"Found the following variables of different batch shapes:\n{}".format(
"\n".join(["{} {}: shape = {}".format(source, name, tuple(shape))
for shape, (source, name) in sorted(shapes.items())])))
class Trace_ELBO(ELBO):
def _get_traces(self, model, guide, *args, **kwargs):
for i in range(self.num_particles):
if self.enum_discrete:
for scale, guide_trace in iter_discrete_traces("flat", guide, *args, **kwargs):
model_trace = poutine.trace(poutine.replay(model, guide_trace),
graph_type="flat").get_trace(*args, **kwargs)
check_model_guide_match(model_trace, guide_trace)
guide_trace = prune_subsample_sites(guide_trace)
model_trace = prune_subsample_sites(model_trace)
check_enum_discrete_can_run(model_trace, guide_trace)
guide_trace.compute_score_parts()
log_r = model_trace.batch_log_pdf() - guide_trace.batch_log_pdf()
weight = scale / self.num_particles
yield weight, model_trace, guide_trace, log_r
continue
guide_trace = poutine.trace(guide).get_trace(*args, **kwargs)
model_trace = poutine.trace(poutine.replay(model, guide_trace)).get_trace(*args, **kwargs)
check_model_guide_match(model_trace, guide_trace)
guide_trace = prune_subsample_sites(guide_trace)
model_trace = prune_subsample_sites(model_trace)
guide_trace.compute_score_parts()
log_r = model_trace.log_pdf() - guide_trace.log_pdf()
weight = 1.0 / self.num_particles
yield weight, model_trace, guide_trace, log_r
def _is_batched(self, weight):
return self.enum_discrete and \
isinstance(weight, Variable) and \
weight.dim() > 0 and \
weight.size(0) > 1
def loss(self, model, guide, *args, **kwargs):
elbo = 0.0
for weight, model_trace, guide_trace, log_r in self._get_traces(model, guide, *args, **kwargs):
elbo_particle = weight * 0
if self._is_batched(weight):
log_pdf = "batch_log_pdf"
else:
log_pdf = "log_pdf"
for name in model_trace.nodes.keys():
if model_trace.nodes[name]["type"] == "sample":
if model_trace.nodes[name]["is_observed"]:
elbo_particle += model_trace.nodes[name][log_pdf]
else:
elbo_particle += model_trace.nodes[name][log_pdf]
elbo_particle -= guide_trace.nodes[name][log_pdf]
if isinstance(weight, numbers.Number):
if weight == 0.0:
elbo_particle = torch.zeros_like(elbo_particle)
else:
elbo_particle[weight == 0] = 0.0
elbo += torch_data_sum(weight * elbo_particle)
loss = -elbo
if is_nan(loss):
warnings.warn('Encountered NAN loss')
return loss
def loss_and_grads(self, model, guide, *args, **kwargs):
elbo = 0.0
for weight, model_trace, guide_trace, log_r in self._get_traces(model, guide, *args, **kwargs):
elbo_particle = weight * 0
surrogate_elbo_particle = weight * 0
batched = self._is_batched(weight)
if batched:
log_pdf = "batch_log_pdf"
else:
log_pdf = "log_pdf"
for name, model_site in model_trace.nodes.items():
if model_site["type"] == "sample":
model_log_pdf = model_site[log_pdf]
if model_site["is_observed"]:
elbo_particle += model_log_pdf
surrogate_elbo_particle += model_log_pdf
else:
guide_site = guide_trace.nodes[name]
guide_log_pdf, score_function_term, entropy_term = guide_site["score_parts"]
if not batched:
guide_log_pdf = guide_log_pdf.sum()
elbo_particle += model_log_pdf - guide_log_pdf
surrogate_elbo_particle += model_log_pdf
if not is_identically_zero(entropy_term):
if not batched:
entropy_term = entropy_term.sum()
surrogate_elbo_particle -= entropy_term
if not is_identically_zero(score_function_term):
if not batched:
score_function_term = score_function_term.sum()
surrogate_elbo_particle += log_r.detach() * score_function_term
if isinstance(weight, numbers.Number):
if weight == 0.0:
elbo_particle = torch.zeros_like(elbo_particle)
surrogate_elbo_particle = torch.zeros_like(surrogate_elbo_particle)
else:
weight_eq_zero = (weight == 0)
elbo_particle[weight_eq_zero] = 0.0
surrogate_elbo_particle[weight_eq_zero] = 0.0
elbo += torch_data_sum(weight * elbo_particle)
surrogate_elbo_particle = torch_sum(weight * surrogate_elbo_particle)
trainable_params = set(site["value"]
for trace in (model_trace, guide_trace)
for site in trace.nodes.values()
if site["type"] == "param")
if trainable_params:
surrogate_loss_particle = -surrogate_elbo_particle
torch_backward(surrogate_loss_particle)
pyro.get_param_store().mark_params_active(trainable_params)
loss = -elbo
if is_nan(loss):
warnings.warn('Encountered NAN loss')
return loss
| true
| true
|
f702b5e51d59cc678d28c85bdace0ba9bb5040f9
| 120
|
py
|
Python
|
hydromt/workflows/__init__.py
|
couasnonanais/hydromt
|
6ff3bb6e76cea8247be171f1fe781c0cbb7e9c9e
|
[
"MIT"
] | null | null | null |
hydromt/workflows/__init__.py
|
couasnonanais/hydromt
|
6ff3bb6e76cea8247be171f1fe781c0cbb7e9c9e
|
[
"MIT"
] | null | null | null |
hydromt/workflows/__init__.py
|
couasnonanais/hydromt
|
6ff3bb6e76cea8247be171f1fe781c0cbb7e9c9e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""HydroMT workflows"""
from .basin_mask import *
from .forcing import *
from .rivers import *
| 17.142857
| 25
| 0.658333
|
from .basin_mask import *
from .forcing import *
from .rivers import *
| true
| true
|
f702b5f83d11d2fb519cd57e45d49aaab4d30380
| 1,456
|
py
|
Python
|
ddtrace/contrib/falcon/__init__.py
|
SzySteve/dd-trace-py
|
90d1d5981c72ea312c21ac04e5be47521d0f0f2e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
ddtrace/contrib/falcon/__init__.py
|
SzySteve/dd-trace-py
|
90d1d5981c72ea312c21ac04e5be47521d0f0f2e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
ddtrace/contrib/falcon/__init__.py
|
SzySteve/dd-trace-py
|
90d1d5981c72ea312c21ac04e5be47521d0f0f2e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
"""
To trace the falcon web framework, install the trace middleware::
import falcon
from ddtrace import tracer
from ddtrace.contrib.falcon import TraceMiddleware
mw = TraceMiddleware(tracer, 'my-falcon-app')
falcon.API(middleware=[mw])
You can also use the autopatching functionality::
import falcon
from ddtrace import tracer, patch
patch(falcon=True)
app = falcon.API()
To disable distributed tracing when using autopatching, set the
``DATADOG_FALCON_DISTRIBUTED_TRACING`` environment variable to ``False``.
**Supported span hooks**
The following is a list of available tracer hooks that can be used to intercept
and modify spans created by this integration.
- ``request``
- Called before the response has been finished
- ``def on_falcon_request(span, request, response)``
Example::
import falcon
from ddtrace import config, patch_all
patch_all()
app = falcon.API()
@config.falcon.hooks.on('request')
def on_falcon_request(span, request, response):
span.set_tag('my.custom', 'tag')
:ref:`Headers tracing <http-headers-tracing>` is supported for this integration.
"""
from ...utils.importlib import require_modules
required_modules = ["falcon"]
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .middleware import TraceMiddleware
from .patch import patch
__all__ = ["TraceMiddleware", "patch"]
| 25.54386
| 80
| 0.723214
|
from ...utils.importlib import require_modules
required_modules = ["falcon"]
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .middleware import TraceMiddleware
from .patch import patch
__all__ = ["TraceMiddleware", "patch"]
| true
| true
|
f702b664d120e7b3a5ada847d1a9b2e095657822
| 12,217
|
py
|
Python
|
script/sync.py
|
gonzalezjo/tcecgui
|
30fd82a0b83c7db7335eb3e7b05487d1fad1dbb3
|
[
"Apache-2.0"
] | null | null | null |
script/sync.py
|
gonzalezjo/tcecgui
|
30fd82a0b83c7db7335eb3e7b05487d1fad1dbb3
|
[
"Apache-2.0"
] | null | null | null |
script/sync.py
|
gonzalezjo/tcecgui
|
30fd82a0b83c7db7335eb3e7b05487d1fad1dbb3
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# @author octopoulo <polluxyz@gmail.com>
# @version 2020-05-01
"""
Sync
"""
import gzip
from logging import getLogger
import os
import re
import shutil
from subprocess import run
from time import time
from typing import Any
from PIL import Image, ImageFile
from common import makedirs_safe, read_text_safe, write_text_safe
from css_minify import css_minify
# folders, might want to edit these
BASE = os.path.dirname(os.path.dirname(__file__))
COMPILER = os.path.join(BASE, 'script/closure-compiler-v20200406.jar')
CSS_FOLDER = os.path.join(BASE, 'css')
JAVA = 'java'
JS_FOLDER = os.path.join(BASE, 'js')
LOCAL = BASE
# edit these files
CSS_FILES = [
'light',
]
JS_FILES = {
'4d': [
'libs/three',
'libs/stats',
'libs/GLTFLoader',
'libs/DRACOLoader',
'libs/camera-controls',
],
'all': [
'libs/socket.io',
':common',
'libs/chess-quick',
':engine',
':global',
':3d',
':xboard',
':graph',
':game',
':temp',
':network',
':startup',
':config',
'script',
],
'chart': [
'libs/chart-quick',
],
}
NEED_GZIPS = {
'4d_.js',
'ammo.wasm.js',
'ammo.wasm.wasm',
'chart_.js',
'chart.min.js',
'dark.css',
'dark-archive.css',
'draco_decoder.js',
'draco_decoder.wasm',
'draco_wasm_wrapper.js',
'fra.json',
'index.html',
'jpn.json',
'light-archive.css',
'manifest.json',
'pieces-draco.glb',
'rus.json',
'sea.css',
'sea-archive.css',
'ukr.json',
}
# don't gzip inside those folders
SKIP_GZIPS = {
'archive',
'doc',
'image',
'model',
'node_modules',
'script',
'sound',
'test',
'theme',
}
class Sync:
"""Sync
"""
#
def __init__(self, **kwargs):
self.kwargs = kwargs
self.clean = kwargs.get('clean') # type: bool
self.host = kwargs.get('host') # type: str
self.no_compress = kwargs.get('no_compress') # type: bool
self.no_debug = kwargs.get('no_debug') # type: bool
self.no_process = kwargs.get('no_process') # type: bool
self.zip = kwargs.get('zip') # type: bool
self.logger = getLogger(self.__class__.__name__)
def combine_pieces(self, folder: str):
"""Combine chess pieces png files into 1 file
"""
if 'metro' in folder:
height = 160
width = 160
else:
height = 80
width = 80
combined = Image.new('RGBA', (width * 12, height), (0, 255, 0, 0))
output = f'{folder}.png'
i = 0
pieces = 'bknpqr'
for color in 'bw':
for piece in pieces:
name = f'{color}{piece}'
image = Image.open(os.path.join(folder, f'{name}.png'))
offset = (i * width, 0)
combined.paste(image, offset)
i += 1
combined.save(output, format='png')
print('a', end='')
def combine_themes(self, folder: str):
"""Combine all pieces of each theme
"""
sources = os.listdir(folder)
for source in sources:
filename = os.path.join(folder, source)
if os.path.isdir(filename):
self.combine_pieces(filename)
def compress_3d(self, data: str) -> str:
"""Compress THREE javascript
"""
data = re.sub(r'\bTHREE\b', 'T', data)
data = re.sub(r'console\.(error|warn)\(.+?\);', '', data, flags=re.S)
return data
def compress_gzip(self, filename: str):
"""Gzip compress a file
"""
output = f'{filename}.gz'
with open(filename, 'rb') as f_in:
with gzip.open(output, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
# synchronise the date/time
if os.path.isfile(output):
info = os.stat(output)
os.utime(filename, (info.st_atime, info.st_mtime))
print('g', end='')
def compress_js(self, filename: str) -> str:
"""Compress javascript
"""
base, ext = os.path.splitext(filename)
output = f'{base}_{ext}'
if self.no_compress:
shutil.copy(filename, output)
return output
args = [
JAVA,
'-jar', COMPILER,
'--js', filename,
'--js_output_file', output,
'--language_in', 'ECMASCRIPT_2018',
'--language_out', 'ECMASCRIPT_2018',
]
if self.kwargs.get('advanced'):
args.extend(['--compilation_level', 'ADVANCED'])
run(args)
return output
def gzip_files(self, folder: str, depth: int, delete: bool):
"""Gzip all wanted files, recursively
"""
queues = []
sources = os.listdir(folder)
for source in sources:
if source.startswith(('.', '_')):
continue
filename = os.path.join(folder, source)
if os.path.isdir(filename):
if source not in SKIP_GZIPS:
queues.append(filename)
continue
# file
if not os.path.isfile(filename):
continue
if source not in NEED_GZIPS:
continue
output = f'{filename}.gz'
source_time = os.path.getmtime(filename)
if os.path.isfile(output):
destin_time = os.path.getmtime(output)
if delete:
os.unlink(output)
print('d', end='')
else:
destin_time = 0
if not delete and source_time != destin_time:
self.compress_gzip(filename)
print(f"{' ' * depth}{filename}")
for queue in queues:
self.gzip_files(queue, depth + 1, delete)
@staticmethod
def import_file(match: Any) -> str:
"""@import {common.js}
"""
source = match.group(1)
filename = os.path.join(JS_FOLDER, source)
data = read_text_safe(filename) or ''
if source.endswith('.js'):
data = re.sub(r'["\']use strict["\'];?', '', data)
return data
def normalise_folders(self):
"""Add the missing / (slash) at the end of the folder
"""
global CSS_FOLDER, JS_FOLDER, LOCAL
if CSS_FOLDER[-1] != '/':
CSS_FOLDER += '/'
if JS_FOLDER[-1] != '/':
JS_FOLDER += '/'
if LOCAL[-1] != '/':
LOCAL += '/'
def create_index(self):
"""Create the new index.html
"""
base = os.path.join(LOCAL, 'index_base.html')
base_time = os.path.getmtime(base)
index = os.path.join(LOCAL, 'index.html')
index_time = os.path.getmtime(index) if os.path.isfile(index) else 0
change = 0
if base_time >= index_time:
change += 1
# 1) minimise JS
for js_output, js_files in JS_FILES.items():
all_js = os.path.join(JS_FOLDER, f'{js_output}.js')
all_min_js = os.path.join(JS_FOLDER, f'{js_output}_.js')
# common/engine changed => need to update, even though we're not using those files
js_dates = [os.path.abspath(f"{JS_FOLDER}{js_file.strip(':')}.js") for js_file in js_files]
js_names = [os.path.abspath(f'{JS_FOLDER}{js_file}.js') for js_file in js_files if js_file[0] != ':']
if js_output == 'all':
# script_js = os.path.join(JS_FOLDER, 'script.js')
extras = []
else:
extras = []
# skip?
update = True
if os.path.isfile(all_min_js) and os.path.isfile(all_js):
all_time = os.path.getmtime(all_min_js)
update = False
for js_date in js_dates + extras:
update |= os.path.isfile(js_date) and os.path.getmtime(js_date) >= all_time
if not update:
print('J', end='')
continue
datas = []
for js_name in js_names:
print(js_name)
script_data = read_text_safe(js_name)
if not script_data:
continue
# process the script.js
if js_name.endswith('script.js'):
script_data = re.sub('@import {(.*?)}', self.import_file, script_data);
script_data = re.sub('// BEGIN.*?// END', '', script_data, flags=re.S)
if self.no_debug:
script_data = re.sub('// <<.*?// >>', '', script_data, flags=re.S)
# use HOST
print(f'host={self.host}')
if self.host != '/':
script_data = script_data.replace("HOST = '/',", f"HOST = '{self.host}',")
datas.append(script_data)
data = '\n'.join(datas)
if '4d' in js_output:
data = self.compress_3d(data)
write_text_safe(all_js, data)
self.compress_js(all_js)
print('j', end='')
change += 1
# 2) minimise CSS
all_css = os.path.join(CSS_FOLDER, 'all.css')
all_min_css = os.path.join(CSS_FOLDER, 'all_.css')
css_names = [os.path.abspath(f'{CSS_FOLDER}{css_file}.css') for css_file in CSS_FILES]
update = True
if os.path.isfile(all_min_css) and os.path.isfile(all_css):
all_time = os.path.getmtime(all_min_css)
update = False
for css_name in css_names:
update |= os.path.isfile(css_name) and os.path.getmtime(css_name) >= all_time
if update:
datas = []
for css_name in css_names:
datas.append(read_text_safe(css_name) or '')
data = '\n'.join(datas)
write_text_safe(all_css, data)
css_data = css_minify(data)
write_text_safe(all_min_css, css_data)
print('c', end='')
change += 1
else:
css_data = read_text_safe(all_min_css) or ''
print('C', end='')
if not change:
print('X', end='')
return
# 3) remove BEGIN ... END
html = read_text_safe(base)
html = re.sub('<!-- BEGIN -->.*?<!-- END -->', '', html, flags=re.S)
html = re.sub('// BEGIN.*?// END', '', html, flags=re.S)
# use the HOST
if self.host != '/':
replaces = {
'href="/': f'href="{self.host}',
'src="/': f'src="{self.host}',
}
for key, value in replaces.items():
html = html.replace(key, value)
# 4) create the new index.html
if not self.no_process:
all_min_js = os.path.join(JS_FOLDER, 'all_.js')
js_data = read_text_safe(all_min_js) or ''
replaces = {
'<!-- {SCRIPT} -->': f'<script>{js_data}</script>',
'<!-- {STYLE} -->': f'<style>{css_data}</style>',
}
for key, value in replaces.items():
html = html.replace(key, value)
html = re.sub('<!-- .*? -->', '', html, flags=re.S)
html = re.sub(r'\n\s+', '\n', html)
filename = os.path.join(LOCAL, 'index.html')
write_text_safe(filename, html)
def synchronise(self) -> bool:
"""Synchronise the files
"""
self.normalise_folders()
self.create_index()
if self.clean:
self.gzip_files(LOCAL, 0, True)
elif self.zip:
self.gzip_files(LOCAL, 0, False)
return True
if __name__ == '__main__':
start = time()
sync = Sync()
if 0:
sync.combine_themes(os.path.join(BASE, 'theme'))
else:
sync.synchronise()
end = time()
print(f'\nELAPSED: {end-start:.3f} seconds')
| 29.438554
| 113
| 0.503397
|
import gzip
from logging import getLogger
import os
import re
import shutil
from subprocess import run
from time import time
from typing import Any
from PIL import Image, ImageFile
from common import makedirs_safe, read_text_safe, write_text_safe
from css_minify import css_minify
BASE = os.path.dirname(os.path.dirname(__file__))
COMPILER = os.path.join(BASE, 'script/closure-compiler-v20200406.jar')
CSS_FOLDER = os.path.join(BASE, 'css')
JAVA = 'java'
JS_FOLDER = os.path.join(BASE, 'js')
LOCAL = BASE
CSS_FILES = [
'light',
]
JS_FILES = {
'4d': [
'libs/three',
'libs/stats',
'libs/GLTFLoader',
'libs/DRACOLoader',
'libs/camera-controls',
],
'all': [
'libs/socket.io',
':common',
'libs/chess-quick',
':engine',
':global',
':3d',
':xboard',
':graph',
':game',
':temp',
':network',
':startup',
':config',
'script',
],
'chart': [
'libs/chart-quick',
],
}
NEED_GZIPS = {
'4d_.js',
'ammo.wasm.js',
'ammo.wasm.wasm',
'chart_.js',
'chart.min.js',
'dark.css',
'dark-archive.css',
'draco_decoder.js',
'draco_decoder.wasm',
'draco_wasm_wrapper.js',
'fra.json',
'index.html',
'jpn.json',
'light-archive.css',
'manifest.json',
'pieces-draco.glb',
'rus.json',
'sea.css',
'sea-archive.css',
'ukr.json',
}
SKIP_GZIPS = {
'archive',
'doc',
'image',
'model',
'node_modules',
'script',
'sound',
'test',
'theme',
}
class Sync:
#
def __init__(self, **kwargs):
self.kwargs = kwargs
self.clean = kwargs.get('clean') # type: bool
self.host = kwargs.get('host') # type: str
self.no_compress = kwargs.get('no_compress') # type: bool
self.no_debug = kwargs.get('no_debug') # type: bool
self.no_process = kwargs.get('no_process') # type: bool
self.zip = kwargs.get('zip') # type: bool
self.logger = getLogger(self.__class__.__name__)
def combine_pieces(self, folder: str):
if 'metro' in folder:
height = 160
width = 160
else:
height = 80
width = 80
combined = Image.new('RGBA', (width * 12, height), (0, 255, 0, 0))
output = f'{folder}.png'
i = 0
pieces = 'bknpqr'
for color in 'bw':
for piece in pieces:
name = f'{color}{piece}'
image = Image.open(os.path.join(folder, f'{name}.png'))
offset = (i * width, 0)
combined.paste(image, offset)
i += 1
combined.save(output, format='png')
print('a', end='')
def combine_themes(self, folder: str):
sources = os.listdir(folder)
for source in sources:
filename = os.path.join(folder, source)
if os.path.isdir(filename):
self.combine_pieces(filename)
def compress_3d(self, data: str) -> str:
data = re.sub(r'\bTHREE\b', 'T', data)
data = re.sub(r'console\.(error|warn)\(.+?\);', '', data, flags=re.S)
return data
def compress_gzip(self, filename: str):
output = f'{filename}.gz'
with open(filename, 'rb') as f_in:
with gzip.open(output, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
# synchronise the date/time
if os.path.isfile(output):
info = os.stat(output)
os.utime(filename, (info.st_atime, info.st_mtime))
print('g', end='')
def compress_js(self, filename: str) -> str:
base, ext = os.path.splitext(filename)
output = f'{base}_{ext}'
if self.no_compress:
shutil.copy(filename, output)
return output
args = [
JAVA,
'-jar', COMPILER,
'--js', filename,
'--js_output_file', output,
'--language_in', 'ECMASCRIPT_2018',
'--language_out', 'ECMASCRIPT_2018',
]
if self.kwargs.get('advanced'):
args.extend(['--compilation_level', 'ADVANCED'])
run(args)
return output
def gzip_files(self, folder: str, depth: int, delete: bool):
queues = []
sources = os.listdir(folder)
for source in sources:
if source.startswith(('.', '_')):
continue
filename = os.path.join(folder, source)
if os.path.isdir(filename):
if source not in SKIP_GZIPS:
queues.append(filename)
continue
# file
if not os.path.isfile(filename):
continue
if source not in NEED_GZIPS:
continue
output = f'{filename}.gz'
source_time = os.path.getmtime(filename)
if os.path.isfile(output):
destin_time = os.path.getmtime(output)
if delete:
os.unlink(output)
print('d', end='')
else:
destin_time = 0
if not delete and source_time != destin_time:
self.compress_gzip(filename)
print(f"{' ' * depth}{filename}")
for queue in queues:
self.gzip_files(queue, depth + 1, delete)
@staticmethod
def import_file(match: Any) -> str:
source = match.group(1)
filename = os.path.join(JS_FOLDER, source)
data = read_text_safe(filename) or ''
if source.endswith('.js'):
data = re.sub(r'["\']use strict["\'];?', '', data)
return data
def normalise_folders(self):
global CSS_FOLDER, JS_FOLDER, LOCAL
if CSS_FOLDER[-1] != '/':
CSS_FOLDER += '/'
if JS_FOLDER[-1] != '/':
JS_FOLDER += '/'
if LOCAL[-1] != '/':
LOCAL += '/'
def create_index(self):
base = os.path.join(LOCAL, 'index_base.html')
base_time = os.path.getmtime(base)
index = os.path.join(LOCAL, 'index.html')
index_time = os.path.getmtime(index) if os.path.isfile(index) else 0
change = 0
if base_time >= index_time:
change += 1
# 1) minimise JS
for js_output, js_files in JS_FILES.items():
all_js = os.path.join(JS_FOLDER, f'{js_output}.js')
all_min_js = os.path.join(JS_FOLDER, f'{js_output}_.js')
# common/engine changed => need to update, even though we're not using those files
js_dates = [os.path.abspath(f"{JS_FOLDER}{js_file.strip(':')}.js") for js_file in js_files]
js_names = [os.path.abspath(f'{JS_FOLDER}{js_file}.js') for js_file in js_files if js_file[0] != ':']
if js_output == 'all':
extras = []
else:
extras = []
update = True
if os.path.isfile(all_min_js) and os.path.isfile(all_js):
all_time = os.path.getmtime(all_min_js)
update = False
for js_date in js_dates + extras:
update |= os.path.isfile(js_date) and os.path.getmtime(js_date) >= all_time
if not update:
print('J', end='')
continue
datas = []
for js_name in js_names:
print(js_name)
script_data = read_text_safe(js_name)
if not script_data:
continue
if js_name.endswith('script.js'):
script_data = re.sub('@import {(.*?)}', self.import_file, script_data);
script_data = re.sub('// BEGIN.*?// END', '', script_data, flags=re.S)
if self.no_debug:
script_data = re.sub('// <<.*?// >>', '', script_data, flags=re.S)
print(f'host={self.host}')
if self.host != '/':
script_data = script_data.replace("HOST = '/',", f"HOST = '{self.host}',")
datas.append(script_data)
data = '\n'.join(datas)
if '4d' in js_output:
data = self.compress_3d(data)
write_text_safe(all_js, data)
self.compress_js(all_js)
print('j', end='')
change += 1
all_css = os.path.join(CSS_FOLDER, 'all.css')
all_min_css = os.path.join(CSS_FOLDER, 'all_.css')
css_names = [os.path.abspath(f'{CSS_FOLDER}{css_file}.css') for css_file in CSS_FILES]
update = True
if os.path.isfile(all_min_css) and os.path.isfile(all_css):
all_time = os.path.getmtime(all_min_css)
update = False
for css_name in css_names:
update |= os.path.isfile(css_name) and os.path.getmtime(css_name) >= all_time
if update:
datas = []
for css_name in css_names:
datas.append(read_text_safe(css_name) or '')
data = '\n'.join(datas)
write_text_safe(all_css, data)
css_data = css_minify(data)
write_text_safe(all_min_css, css_data)
print('c', end='')
change += 1
else:
css_data = read_text_safe(all_min_css) or ''
print('C', end='')
if not change:
print('X', end='')
return
html = read_text_safe(base)
html = re.sub('<!-- BEGIN -->.*?<!-- END -->', '', html, flags=re.S)
html = re.sub('// BEGIN.*?// END', '', html, flags=re.S)
if self.host != '/':
replaces = {
'href="/': f'href="{self.host}',
'src="/': f'src="{self.host}',
}
for key, value in replaces.items():
html = html.replace(key, value)
if not self.no_process:
all_min_js = os.path.join(JS_FOLDER, 'all_.js')
js_data = read_text_safe(all_min_js) or ''
replaces = {
'<!-- {SCRIPT} -->': f'<script>{js_data}</script>',
'<!-- {STYLE} -->': f'<style>{css_data}</style>',
}
for key, value in replaces.items():
html = html.replace(key, value)
html = re.sub('<!-- .*? -->', '', html, flags=re.S)
html = re.sub(r'\n\s+', '\n', html)
filename = os.path.join(LOCAL, 'index.html')
write_text_safe(filename, html)
def synchronise(self) -> bool:
self.normalise_folders()
self.create_index()
if self.clean:
self.gzip_files(LOCAL, 0, True)
elif self.zip:
self.gzip_files(LOCAL, 0, False)
return True
if __name__ == '__main__':
start = time()
sync = Sync()
if 0:
sync.combine_themes(os.path.join(BASE, 'theme'))
else:
sync.synchronise()
end = time()
print(f'\nELAPSED: {end-start:.3f} seconds')
| true
| true
|
f702b7c58a323d000bad0d9da5c5c1cb62e79373
| 1,508
|
py
|
Python
|
tests/main.py
|
viniarck/yala
|
6e5493371645a6584dd54bc1a13ff819257f45a8
|
[
"MIT"
] | 3
|
2020-05-29T05:03:01.000Z
|
2020-06-09T14:40:28.000Z
|
tests/main.py
|
viniarck/yala
|
6e5493371645a6584dd54bc1a13ff819257f45a8
|
[
"MIT"
] | 25
|
2020-05-29T05:03:15.000Z
|
2021-11-15T05:21:21.000Z
|
tests/main.py
|
viniarck/yala
|
6e5493371645a6584dd54bc1a13ff819257f45a8
|
[
"MIT"
] | null | null | null |
"""Tests for the main module."""
import unittest
from unittest.mock import Mock, patch
from yala.main import LinterRunner
class TestLinterRunner(unittest.TestCase):
"""Test the LinterRunner class."""
@patch('yala.main.Config')
def test_chosen_not_found(self, mock_config):
"""Should print an error when chosen linter is not found."""
# Linter chosen by the user
name = 'my linter'
mock_config.user_linters = [name]
_, stderr = self._path_and_run(mock_config, name)
self.assertIn('Did you install', stderr[0])
@patch('yala.main.Config')
def test_not_chosen_not_found(self, mock_config):
"""Should not print an error when chosen linter is not found."""
# No linters chosen by the user
mock_config.user_linters = []
stdout, stderr = self._path_and_run(mock_config)
self.assertEqual(0, len(stdout))
self.assertEqual(0, len(stderr))
def _path_and_run(self, mock_config, name='my linter'):
cls = self._mock_linter_class(name)
mock_config.get_linter_classes.return_value = [cls]
with patch('yala.main.subprocess.run', side_effect=FileNotFoundError):
linter_cfg_tgts = cls, mock_config, []
return LinterRunner.run(linter_cfg_tgts)
@staticmethod
def _mock_linter_class(name):
linter_class = Mock()
linter = linter_class.return_value
linter.command_with_options = linter.name = name
return linter_class
| 35.904762
| 78
| 0.67374
|
import unittest
from unittest.mock import Mock, patch
from yala.main import LinterRunner
class TestLinterRunner(unittest.TestCase):
@patch('yala.main.Config')
def test_chosen_not_found(self, mock_config):
name = 'my linter'
mock_config.user_linters = [name]
_, stderr = self._path_and_run(mock_config, name)
self.assertIn('Did you install', stderr[0])
@patch('yala.main.Config')
def test_not_chosen_not_found(self, mock_config):
mock_config.user_linters = []
stdout, stderr = self._path_and_run(mock_config)
self.assertEqual(0, len(stdout))
self.assertEqual(0, len(stderr))
def _path_and_run(self, mock_config, name='my linter'):
cls = self._mock_linter_class(name)
mock_config.get_linter_classes.return_value = [cls]
with patch('yala.main.subprocess.run', side_effect=FileNotFoundError):
linter_cfg_tgts = cls, mock_config, []
return LinterRunner.run(linter_cfg_tgts)
@staticmethod
def _mock_linter_class(name):
linter_class = Mock()
linter = linter_class.return_value
linter.command_with_options = linter.name = name
return linter_class
| true
| true
|
f702b8499e7e86033ca8009acb49c904c799a1ff
| 3,359
|
py
|
Python
|
custom_components/kontomierz_sensor/sensor.py
|
pawelhulek/kontomierz-sensor
|
7e7862c259d11a3406ebc6faabe7f2c4bd9ff70b
|
[
"MIT"
] | 2
|
2022-02-15T19:41:22.000Z
|
2022-03-08T09:46:53.000Z
|
custom_components/kontomierz_sensor/sensor.py
|
pawelhulek/kontomierz-sensor
|
7e7862c259d11a3406ebc6faabe7f2c4bd9ff70b
|
[
"MIT"
] | null | null | null |
custom_components/kontomierz_sensor/sensor.py
|
pawelhulek/kontomierz-sensor
|
7e7862c259d11a3406ebc6faabe7f2c4bd9ff70b
|
[
"MIT"
] | null | null | null |
"""Platform for sensor integration."""
from __future__ import annotations
import homeassistant.helpers.config_validation as cv
import requests
import voluptuous as vol
from homeassistant.components.sensor import SensorEntity, PLATFORM_SCHEMA, SensorStateClass, SensorDeviceClass
from homeassistant.const import CONF_USERNAME, CONF_PASSWORD, CONF_API_TOKEN
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from requests.auth import HTTPBasicAuth
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_API_TOKEN): cv.string,
})
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None
) -> None:
"""Set up the sensor platform."""
url = "https://secure.kontomierz.pl/k4/user_accounts.json?api_key=" + config.get(CONF_API_TOKEN)
payload = {}
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
response = requests.get(url, auth=HTTPBasicAuth(config.get(CONF_USERNAME), config.get(CONF_PASSWORD)),
headers=headers, data=payload)
response_json = response.json()
for x in response_json:
account = x.get('user_account')
add_entities(
[KontomierzSensor(hass, config, account.get('bank_name') + " - " + account.get('display_name'),
account.get('iban'))])
class KontomierzSensor(SensorEntity):
"""Representation of a Sensor."""
def __init__(self, hass, config: dict, entity_name: string, iban: string) -> None:
self._attr_device_class = SensorDeviceClass.MONETARY
self._attr_state_class = SensorStateClass.MEASUREMENT
self._state = None
self.hass = hass
self.username = config.get(CONF_USERNAME)
self.password = config.get(CONF_PASSWORD)
self.apiToken = config.get(CONF_API_TOKEN)
self.entity_name = entity_name
self.iban = iban
@property
def unique_id(self) -> str | None:
return "kontomierz_sensor" + self.entity_name
@property
def name(self) -> str:
return self.entity_name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def update(self) -> None:
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
url = "https://secure.kontomierz.pl/k4/user_accounts.json?api_key=" + self.apiToken
response = requests.get(url, auth=HTTPBasicAuth(self.username, self.password), headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
}, data={})
response_json = response.json()
result = 0.0
for x in response_json:
user_account = x.get('user_account')
if self.iban == user_account.get('iban'):
result = float(user_account.get('balance'))
self._attr_native_unit_of_measurement = user_account.get('currency_name')
self._state = result
| 36.912088
| 110
| 0.671033
|
from __future__ import annotations
import homeassistant.helpers.config_validation as cv
import requests
import voluptuous as vol
from homeassistant.components.sensor import SensorEntity, PLATFORM_SCHEMA, SensorStateClass, SensorDeviceClass
from homeassistant.const import CONF_USERNAME, CONF_PASSWORD, CONF_API_TOKEN
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from requests.auth import HTTPBasicAuth
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_API_TOKEN): cv.string,
})
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None
) -> None:
url = "https://secure.kontomierz.pl/k4/user_accounts.json?api_key=" + config.get(CONF_API_TOKEN)
payload = {}
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
response = requests.get(url, auth=HTTPBasicAuth(config.get(CONF_USERNAME), config.get(CONF_PASSWORD)),
headers=headers, data=payload)
response_json = response.json()
for x in response_json:
account = x.get('user_account')
add_entities(
[KontomierzSensor(hass, config, account.get('bank_name') + " - " + account.get('display_name'),
account.get('iban'))])
class KontomierzSensor(SensorEntity):
def __init__(self, hass, config: dict, entity_name: string, iban: string) -> None:
self._attr_device_class = SensorDeviceClass.MONETARY
self._attr_state_class = SensorStateClass.MEASUREMENT
self._state = None
self.hass = hass
self.username = config.get(CONF_USERNAME)
self.password = config.get(CONF_PASSWORD)
self.apiToken = config.get(CONF_API_TOKEN)
self.entity_name = entity_name
self.iban = iban
@property
def unique_id(self) -> str | None:
return "kontomierz_sensor" + self.entity_name
@property
def name(self) -> str:
return self.entity_name
@property
def state(self):
return self._state
def update(self) -> None:
url = "https://secure.kontomierz.pl/k4/user_accounts.json?api_key=" + self.apiToken
response = requests.get(url, auth=HTTPBasicAuth(self.username, self.password), headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
}, data={})
response_json = response.json()
result = 0.0
for x in response_json:
user_account = x.get('user_account')
if self.iban == user_account.get('iban'):
result = float(user_account.get('balance'))
self._attr_native_unit_of_measurement = user_account.get('currency_name')
self._state = result
| true
| true
|
f702b8a772bb5d14f134560768bd7b16e89d9a92
| 15,785
|
py
|
Python
|
models/__init__.py
|
Sriram-Ravula/ncsnv2
|
f610b59441a34063fae1c02aa06837b7eec95c03
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
Sriram-Ravula/ncsnv2
|
f610b59441a34063fae1c02aa06837b7eec95c03
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
Sriram-Ravula/ncsnv2
|
f610b59441a34063fae1c02aa06837b7eec95c03
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
def get_sigmas(config):
if config.model.sigma_dist == 'geometric':
sigmas = torch.tensor(
np.exp(np.linspace(np.log(config.model.sigma_begin), np.log(config.model.sigma_end),
config.model.num_classes))).float().to(config.device)
elif config.model.sigma_dist == 'uniform':
sigmas = torch.tensor(
np.linspace(config.model.sigma_begin, config.model.sigma_end, config.model.num_classes)
).float().to(config.device)
else:
raise NotImplementedError('sigma distribution not supported')
return sigmas
@torch.no_grad()
def anneal_Langevin_dynamics(x_mod, scorenet, sigmas, n_steps_each=200, step_lr=0.000008,
final_only=False, verbose=False, denoise=True, add_noise=True):
images = []
with torch.no_grad():
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c #dummy target 1...T depending on iteration
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
grad = scorenet(x_mod, labels)
#choose whether to add random noise during each gradient ascent step
if add_noise:
noise = torch.randn_like(x_mod)
else:
noise = torch.zeros_like(x_mod)
#calculate l2 norms of gradient (score) and the additive noise for logging
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()
x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) #core Langevin step
#calc l2 norm of iterate variable for logging
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
#calc snr as scaled version of [||s(x, \sigma_i)|| / ||z_t||] and mean of score for logging
snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm
grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2
if not final_only:
images.append(x_mod.to('cpu'))
if verbose:
print("level: {}, step_size: {}, grad_norm: {}, image_norm: {}, snr: {}, grad_mean_norm: {}".format(
c, step_size, grad_norm.item(), image_norm.item(), snr.item(), grad_mean_norm.item()))
#final denoising step if desired - removes the very last additive z_L
if denoise:
last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device)
last_noise = last_noise.long()
x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise)
images.append(x_mod.to('cpu'))
if final_only:
return [x_mod.to('cpu')]
else:
return images
@torch.no_grad()
def langevin_Inverse(x_mod, y, A, scorenet, sigmas, n_steps_each=200, step_lr=0.000008,
final_only=False, verbose=False, denoise=True, add_noise=True,
decimate_sigma=None, mode=None, true_x=None):
images = []
#if desired, decimate the number of noise scales to speed up inference
if decimate_sigma is not None:
sigmas_temp = sigmas[0:-1:decimate_sigma].tolist() #grab every decimate_sigma'th value except the last one
sigmas_temp.append(sigmas[-1]) #add the last sigma value back to the list
# num_sigmas = sigmas.shape[0] // decimate_sigma
# sigmas_temp = []
# for i in range(num_sigmas):
# sigmas_temp.append(sigmas[-1])
sigmas = sigmas_temp #swap the new decimated sigma list for the main one
mse = torch.nn.MSELoss()
N, C, H, W = x_mod.shape
steps = np.geomspace(start=5, stop=1, num=len(sigmas))
c2 = 1
with torch.no_grad():
#outer loop over noise scales
for c, sigma in enumerate(sigmas):
#dummy target 1...T depending on iteration
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
#step_size = step_lr * (sigma / sigmas[-1]) ** 2
step_size = steps[c]
#Inner loop over T
for s in range(n_steps_each):
#s(x_t) ~= \grad_x log p(x) -- THE PRIOR
grad = scorenet(x_mod, labels)
prior_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
#prior_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2
#calculate the maximum likelihood gradient - i.e. MSE gradient
#A should be [N, m, C * H * W], x should be [N, C, H, W], y should be [N, m, 1]
if mode=='denoising':
Axt = x_mod
mle_grad = (Axt - y) * (1 / N) #for denoising, y has same dimension as x
else:
Axt = torch.matmul(A, x_mod.view(N, -1, 1))
mle_grad = torch.matmul(torch.transpose(A, -2, -1), Axt - y).view(N, C, H, W) * c2 #MSE gradient
#mle_grad = torch.matmul(torch.transpose(A, -2, -1), torch.sign(Axt - y)).view(N, C, H, W) * (1 / N) #L1 error gradient
likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()
#likelihood_mean_norm = torch.norm(mle_grad.mean(dim=0).view(-1)) ** 2
if c == 0 and s == 0:
c2 = prior_norm.item() / likelihood_norm.item()
mle_grad = mle_grad * c2 #MSE gradient
likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()
#The final gradient
grad = grad - mle_grad
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
#grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2
#choose whether to add random noise during each gradient ascent step
if add_noise:
noise = torch.randn_like(x_mod)
else:
noise = torch.zeros_like(x_mod)
x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) #core Langevin step
#calc l2 norm of iterate variable for logging
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()
snr = np.sqrt(step_size / 2.) * prior_norm / noise_norm
mse_iter = mse(Axt, y)
if true_x is not None:
mse_true = mse(true_x, x_mod)
if not final_only:
images.append(x_mod.to('cpu'))
if verbose:
print("\nlevel: {}, step_size: {:.4f}, prior_norm: {:.4f}, likelihood_norm: {:.4f}, grad_norm: {:.4f} \
image_norm: {:.4f}, train_mse: {:.4f}".format( \
c, step_size, prior_norm.item(), likelihood_norm.item(), grad_norm.item(), image_norm.item(), \
mse_iter.item()))
if true_x is not None:
print("true_mse: {:.4f}".format(mse_true.item()))
#final denoising step if desired - removes the very last additive z_L
if denoise:
last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device)
last_noise = last_noise.long()
x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise)
images.append(x_mod.to('cpu'))
if final_only:
return [x_mod.to('cpu')]
else:
return images
@torch.no_grad()
def inverse_solver(x_mod, y, A, scorenet, sigmas, lr = [5, 1], c1=1, c2=1, auto_c2=True,
final_only=False, verbose=False, likelihood_every=1,
decimate_sigma=None, mode=None, true_x=None, sigma_type = 'subsample', likelihood_type="l2"):
images = []
#if desired, decimate the number of noise scales to speed up inference
if decimate_sigma is not None:
if sigma_type == 'subsample': #grab equally-spaced sigma values
sigmas_temp = sigmas[0:-1:decimate_sigma].tolist()
sigmas_temp.append(sigmas[-1])
elif sigma_type == 'last': #grab just the last sigma value multiple times
num_sigmas = sigmas.shape[0] // decimate_sigma
sigmas_temp = []
for i in range(num_sigmas):
sigmas_temp.append(sigmas[-1])
else:
sigmas_temp = sigmas
sigmas = sigmas_temp
mse = torch.nn.MSELoss()
N, C, H, W = x_mod.shape
steps = np.geomspace(start=lr[0], stop=lr[1], num=len(sigmas))
likelihood_norm = 0
with torch.no_grad():
if sigma_type == 'last':
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * 1099
labels = labels.long()
for c, sigma in enumerate(sigmas):
if sigma_type == 'subsample':
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * decimate_sigma * c
labels = labels.long()
elif sigma_type != 'last':
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = steps[c]
#s(x_t) ~= \grad_x log p(x) -- THE PRIOR
grad = scorenet(x_mod, labels) * c1
prior_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
if c % likelihood_every == 0:
#\grad_x log p(y | x) -- LIKELIHOOD
if mode=='denoising':
Axt = x_mod
if likelihood_type == "l2":
mle_grad = (Axt - y) * c2
elif likelihood_type == "l1":
mle_grad = torch.sign(Axt - y) * c2
else:
Axt = torch.matmul(A, x_mod.view(N, -1, 1))
if likelihood_type == "l2":
mle_grad = torch.matmul(torch.transpose(A, -2, -1), Axt - y).view(N, C, H, W) * c2
elif likelihood_type == "l1":
mle_grad = torch.matmul(torch.transpose(A, -2, -1), torch.sign(Axt - y)).view(N, C, H, W) * c2
likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()
if auto_c2 and c == 0:
c2 = prior_norm.item() / likelihood_norm.item()
mle_grad = mle_grad * c2 #MSE gradient
likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()
grad = grad - mle_grad
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
x_mod = x_mod + step_size * grad
#x_mod = torch.clamp(x_mod, 0.0, 1.0)
#calc l2 norm of iterate variable for logging
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
mse_iter = mse(Axt, y)
if true_x is not None:
mse_true = mse(true_x, x_mod)
if not final_only:
images.append(x_mod.cpu())
if verbose:
print("\n iteration: {}, sigma: {:.4f}, step_size: {:.4f}, prior_norm: {:.4f}, likelihood_norm: {:.4f}, grad_norm: {:.4f} \
image_norm: {:.4f}, train_mse: {:.4f}".format( \
c, sigma, step_size, prior_norm.item(), likelihood_norm.item(), grad_norm.item(), image_norm.item(), \
mse_iter.item()))
if true_x is not None:
print("true_mse: {:.4f}".format(mse_true.item()))
if final_only:
return [x_mod.to('cpu')]
else:
return images
@torch.no_grad()
def anneal_Langevin_dynamics_inpainting(x_mod, refer_image, scorenet, sigmas, image_size,
n_steps_each=100, step_lr=0.000008):
"""
Currently only good for 32x32 images. Assuming the right half is missing.
"""
images = []
#refer_image is the untainted x (?)
#right now this only works with 3-channel images
refer_image = refer_image.unsqueeze(1).expand(-1, x_mod.shape[1], -1, -1, -1)
refer_image = refer_image.contiguous().view(-1, 3, image_size, image_size)
x_mod = x_mod.view(-1, 3, image_size, image_size)
cols = image_size // 2
half_refer_image = refer_image[..., :cols]
with torch.no_grad():
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
images.append(x_mod.to('cpu'))
corrupted_half_image = half_refer_image + torch.randn_like(half_refer_image) * sigma
x_mod[:, :, :, :cols] = corrupted_half_image
noise = torch.randn_like(x_mod) * np.sqrt(step_size * 2)
grad = scorenet(x_mod, labels)
x_mod = x_mod + step_size * grad + noise
print("class: {}, step_size: {}, mean {}, max {}".format(c, step_size, grad.abs().mean(),
grad.abs().max()))
return images
@torch.no_grad()
def anneal_Langevin_dynamics_interpolation(x_mod, scorenet, sigmas, n_interpolations, n_steps_each=200, step_lr=0.000008,
final_only=False, verbose=False):
images = []
n_rows = x_mod.shape[0]
x_mod = x_mod[:, None, ...].repeat(1, n_interpolations, 1, 1, 1)
x_mod = x_mod.reshape(-1, *x_mod.shape[2:])
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
grad = scorenet(x_mod, labels)
noise_p = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],
device=x_mod.device)
noise_q = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],
device=x_mod.device)
angles = torch.linspace(0, np.pi / 2., n_interpolations, device=x_mod.device)
noise = noise_p[:, None, ...] * torch.cos(angles)[None, :, None, None, None] + \
noise_q[:, None, ...] * torch.sin(angles)[None, :, None, None, None]
noise = noise.reshape(-1, *noise.shape[2:])
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2)
snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm
if not final_only:
images.append(x_mod.to('cpu'))
if verbose:
print(
"level: {}, step_size: {}, image_norm: {}, grad_norm: {}, snr: {}".format(
c, step_size, image_norm.item(), grad_norm.item(), snr.item()))
if final_only:
return [x_mod.to('cpu')]
else:
return images
| 44.092179
| 139
| 0.540703
|
import torch
import numpy as np
def get_sigmas(config):
if config.model.sigma_dist == 'geometric':
sigmas = torch.tensor(
np.exp(np.linspace(np.log(config.model.sigma_begin), np.log(config.model.sigma_end),
config.model.num_classes))).float().to(config.device)
elif config.model.sigma_dist == 'uniform':
sigmas = torch.tensor(
np.linspace(config.model.sigma_begin, config.model.sigma_end, config.model.num_classes)
).float().to(config.device)
else:
raise NotImplementedError('sigma distribution not supported')
return sigmas
@torch.no_grad()
def anneal_Langevin_dynamics(x_mod, scorenet, sigmas, n_steps_each=200, step_lr=0.000008,
final_only=False, verbose=False, denoise=True, add_noise=True):
images = []
with torch.no_grad():
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
grad = scorenet(x_mod, labels)
if add_noise:
noise = torch.randn_like(x_mod)
else:
noise = torch.zeros_like(x_mod)
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()
x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2)
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm
grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2
if not final_only:
images.append(x_mod.to('cpu'))
if verbose:
print("level: {}, step_size: {}, grad_norm: {}, image_norm: {}, snr: {}, grad_mean_norm: {}".format(
c, step_size, grad_norm.item(), image_norm.item(), snr.item(), grad_mean_norm.item()))
if denoise:
last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device)
last_noise = last_noise.long()
x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise)
images.append(x_mod.to('cpu'))
if final_only:
return [x_mod.to('cpu')]
else:
return images
@torch.no_grad()
def langevin_Inverse(x_mod, y, A, scorenet, sigmas, n_steps_each=200, step_lr=0.000008,
final_only=False, verbose=False, denoise=True, add_noise=True,
decimate_sigma=None, mode=None, true_x=None):
images = []
if decimate_sigma is not None:
sigmas_temp = sigmas[0:-1:decimate_sigma].tolist() sigmas_temp.append(sigmas[-1]) #add the last sigma value back to the list
# num_sigmas = sigmas.shape[0] // decimate_sigma
# sigmas_temp = []
# for i in range(num_sigmas):
# sigmas_temp.append(sigmas[-1])
sigmas = sigmas_temp #swap the new decimated sigma list for the main one
mse = torch.nn.MSELoss()
N, C, H, W = x_mod.shape
steps = np.geomspace(start=5, stop=1, num=len(sigmas))
c2 = 1
with torch.no_grad():
#outer loop over noise scales
for c, sigma in enumerate(sigmas):
#dummy target 1...T depending on iteration
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
#step_size = step_lr * (sigma / sigmas[-1]) ** 2
step_size = steps[c]
#Inner loop over T
for s in range(n_steps_each):
#s(x_t) ~= \grad_x log p(x) -- THE PRIOR
grad = scorenet(x_mod, labels)
prior_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
#prior_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2
#calculate the maximum likelihood gradient - i.e. MSE gradient
#A should be [N, m, C * H * W], x should be [N, C, H, W], y should be [N, m, 1]
if mode=='denoising':
Axt = x_mod
mle_grad = (Axt - y) * (1 / N) #for denoising, y has same dimension as x
else:
Axt = torch.matmul(A, x_mod.view(N, -1, 1))
mle_grad = torch.matmul(torch.transpose(A, -2, -1), Axt - y).view(N, C, H, W) * c2 #MSE gradient
#mle_grad = torch.matmul(torch.transpose(A, -2, -1), torch.sign(Axt - y)).view(N, C, H, W) * (1 / N) #L1 error gradient
likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()
#likelihood_mean_norm = torch.norm(mle_grad.mean(dim=0).view(-1)) ** 2
if c == 0 and s == 0:
c2 = prior_norm.item() / likelihood_norm.item()
mle_grad = mle_grad * c2 #MSE gradient
likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()
#The final gradient
grad = grad - mle_grad
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
#grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2
#choose whether to add random noise during each gradient ascent step
if add_noise:
noise = torch.randn_like(x_mod)
else:
noise = torch.zeros_like(x_mod)
x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) #core Langevin step
#calc l2 norm of iterate variable for logging
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()
snr = np.sqrt(step_size / 2.) * prior_norm / noise_norm
mse_iter = mse(Axt, y)
if true_x is not None:
mse_true = mse(true_x, x_mod)
if not final_only:
images.append(x_mod.to('cpu'))
if verbose:
print("\nlevel: {}, step_size: {:.4f}, prior_norm: {:.4f}, likelihood_norm: {:.4f}, grad_norm: {:.4f} \
image_norm: {:.4f}, train_mse: {:.4f}".format( \
c, step_size, prior_norm.item(), likelihood_norm.item(), grad_norm.item(), image_norm.item(), \
mse_iter.item()))
if true_x is not None:
print("true_mse: {:.4f}".format(mse_true.item()))
#final denoising step if desired - removes the very last additive z_L
if denoise:
last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device)
last_noise = last_noise.long()
x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise)
images.append(x_mod.to('cpu'))
if final_only:
return [x_mod.to('cpu')]
else:
return images
@torch.no_grad()
def inverse_solver(x_mod, y, A, scorenet, sigmas, lr = [5, 1], c1=1, c2=1, auto_c2=True,
final_only=False, verbose=False, likelihood_every=1,
decimate_sigma=None, mode=None, true_x=None, sigma_type = 'subsample', likelihood_type="l2"):
images = []
#if desired, decimate the number of noise scales to speed up inference
if decimate_sigma is not None:
if sigma_type == 'subsample': #grab equally-spaced sigma values
sigmas_temp = sigmas[0:-1:decimate_sigma].tolist()
sigmas_temp.append(sigmas[-1])
elif sigma_type == 'last': #grab just the last sigma value multiple times
num_sigmas = sigmas.shape[0] // decimate_sigma
sigmas_temp = []
for i in range(num_sigmas):
sigmas_temp.append(sigmas[-1])
else:
sigmas_temp = sigmas
sigmas = sigmas_temp
mse = torch.nn.MSELoss()
N, C, H, W = x_mod.shape
steps = np.geomspace(start=lr[0], stop=lr[1], num=len(sigmas))
likelihood_norm = 0
with torch.no_grad():
if sigma_type == 'last':
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * 1099
labels = labels.long()
for c, sigma in enumerate(sigmas):
if sigma_type == 'subsample':
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * decimate_sigma * c
labels = labels.long()
elif sigma_type != 'last':
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = steps[c]
#s(x_t) ~= \grad_x log p(x) -- THE PRIOR
grad = scorenet(x_mod, labels) * c1
prior_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
if c % likelihood_every == 0:
#\grad_x log p(y | x) -- LIKELIHOOD
if mode=='denoising':
Axt = x_mod
if likelihood_type == "l2":
mle_grad = (Axt - y) * c2
elif likelihood_type == "l1":
mle_grad = torch.sign(Axt - y) * c2
else:
Axt = torch.matmul(A, x_mod.view(N, -1, 1))
if likelihood_type == "l2":
mle_grad = torch.matmul(torch.transpose(A, -2, -1), Axt - y).view(N, C, H, W) * c2
elif likelihood_type == "l1":
mle_grad = torch.matmul(torch.transpose(A, -2, -1), torch.sign(Axt - y)).view(N, C, H, W) * c2
likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()
if auto_c2 and c == 0:
c2 = prior_norm.item() / likelihood_norm.item()
mle_grad = mle_grad * c2 #MSE gradient
likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()
grad = grad - mle_grad
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
x_mod = x_mod + step_size * grad
#x_mod = torch.clamp(x_mod, 0.0, 1.0)
#calc l2 norm of iterate variable for logging
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
mse_iter = mse(Axt, y)
if true_x is not None:
mse_true = mse(true_x, x_mod)
if not final_only:
images.append(x_mod.cpu())
if verbose:
print("\n iteration: {}, sigma: {:.4f}, step_size: {:.4f}, prior_norm: {:.4f}, likelihood_norm: {:.4f}, grad_norm: {:.4f} \
image_norm: {:.4f}, train_mse: {:.4f}".format( \
c, sigma, step_size, prior_norm.item(), likelihood_norm.item(), grad_norm.item(), image_norm.item(), \
mse_iter.item()))
if true_x is not None:
print("true_mse: {:.4f}".format(mse_true.item()))
if final_only:
return [x_mod.to('cpu')]
else:
return images
@torch.no_grad()
def anneal_Langevin_dynamics_inpainting(x_mod, refer_image, scorenet, sigmas, image_size,
n_steps_each=100, step_lr=0.000008):
images = []
#refer_image is the untainted x (?)
#right now this only works with 3-channel images
refer_image = refer_image.unsqueeze(1).expand(-1, x_mod.shape[1], -1, -1, -1)
refer_image = refer_image.contiguous().view(-1, 3, image_size, image_size)
x_mod = x_mod.view(-1, 3, image_size, image_size)
cols = image_size // 2
half_refer_image = refer_image[..., :cols]
with torch.no_grad():
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
images.append(x_mod.to('cpu'))
corrupted_half_image = half_refer_image + torch.randn_like(half_refer_image) * sigma
x_mod[:, :, :, :cols] = corrupted_half_image
noise = torch.randn_like(x_mod) * np.sqrt(step_size * 2)
grad = scorenet(x_mod, labels)
x_mod = x_mod + step_size * grad + noise
print("class: {}, step_size: {}, mean {}, max {}".format(c, step_size, grad.abs().mean(),
grad.abs().max()))
return images
@torch.no_grad()
def anneal_Langevin_dynamics_interpolation(x_mod, scorenet, sigmas, n_interpolations, n_steps_each=200, step_lr=0.000008,
final_only=False, verbose=False):
images = []
n_rows = x_mod.shape[0]
x_mod = x_mod[:, None, ...].repeat(1, n_interpolations, 1, 1, 1)
x_mod = x_mod.reshape(-1, *x_mod.shape[2:])
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
grad = scorenet(x_mod, labels)
noise_p = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],
device=x_mod.device)
noise_q = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],
device=x_mod.device)
angles = torch.linspace(0, np.pi / 2., n_interpolations, device=x_mod.device)
noise = noise_p[:, None, ...] * torch.cos(angles)[None, :, None, None, None] + \
noise_q[:, None, ...] * torch.sin(angles)[None, :, None, None, None]
noise = noise.reshape(-1, *noise.shape[2:])
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2)
snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm
if not final_only:
images.append(x_mod.to('cpu'))
if verbose:
print(
"level: {}, step_size: {}, image_norm: {}, grad_norm: {}, snr: {}".format(
c, step_size, image_norm.item(), grad_norm.item(), snr.item()))
if final_only:
return [x_mod.to('cpu')]
else:
return images
| true
| true
|
f702b9242485f5679da54c8293d1d4239b240653
| 10,790
|
py
|
Python
|
kivymd/uix/useranimationcard.py
|
RedGui/KivyMD
|
5fc9c4c52d01816ba8885fed57f89bf923b38c15
|
[
"MIT"
] | null | null | null |
kivymd/uix/useranimationcard.py
|
RedGui/KivyMD
|
5fc9c4c52d01816ba8885fed57f89bf923b38c15
|
[
"MIT"
] | null | null | null |
kivymd/uix/useranimationcard.py
|
RedGui/KivyMD
|
5fc9c4c52d01816ba8885fed57f89bf923b38c15
|
[
"MIT"
] | null | null | null |
"""
User Animation Card
===================
Copyright (c) 2019 Ivanov Yuri
For suggestions and questions:
<kivydevelopment@gmail.com>
This file is distributed under the terms of the same license,
as the Kivy framework.
Example
-------
from kivymd.app import MDApp
from kivy.lang import Builder
from kivy.factory import Factory
from kivymd.toast import toast
from kivymd.theming import ThemeManager
from kivymd.uix.useranimationcard import MDUserAnimationCard
from kivymd.uix.button import MDIconButton
from kivymd.uix.list import ILeftBodyTouch
# Your content for a contact card.
Builder.load_string('''
#:import get_hex_from_color kivy.utils.get_hex_from_color
<TestAnimationCard@BoxLayout>
orientation: 'vertical'
padding: dp(10)
spacing: dp(10)
size_hint_y: None
height: self.minimum_height
BoxLayout:
size_hint_y: None
height: self.minimum_height
Widget:
MDRoundFlatButton:
text: "Free call"
Widget:
MDRoundFlatButton:
text: "Free message"
Widget:
OneLineIconListItem:
text: "Video call"
IconLeftSampleWidget:
icon: 'camera-front-variant'
TwoLineIconListItem:
text: "Call Viber Out"
secondary_text: "[color=%s]Advantageous rates for calls[/color]" % get_hex_from_color(app.theme_cls.primary_color)
IconLeftSampleWidget:
icon: 'phone'
TwoLineIconListItem:
text: "Call over mobile network"
secondary_text: "[color=%s]Operator's tariffs apply[/color]" % get_hex_from_color(app.theme_cls.primary_color)
IconLeftSampleWidget:
icon: 'remote'
''')
class IconLeftSampleWidget(ILeftBodyTouch, MDIconButton):
pass
class Example(MDApp):
title = "Example Animation Card"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.user_animation_card = None
def build(self):
def main_back_callback():
toast('Close card')
if not self.user_animation_card:
self.user_animation_card = MDUserAnimationCard(
user_name="Lion Lion",
path_to_avatar="./assets/african-lion-951778_1280.jpg",
callback=main_back_callback)
self.user_animation_card.box_content.add_widget(
Factory.TestAnimationCard())
self.user_animation_card.open()
Example().run()
"""
from kivy.clock import Clock
from kivy.animation import Animation
from kivy.core.window import Window
from kivy.metrics import dp, sp
from kivy.properties import ObjectProperty, StringProperty, ListProperty
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.modalview import ModalView
from kivymd.uix.behaviors import SpecificBackgroundColorBehavior
from kivymd.uix.button import MDIconButton
from kivymd.theming import ThemableBehavior
Builder.load_string(
"""
#:import Window kivy.core.window.Window
#:import StiffScrollEffect kivymd.stiffscroll.StiffScrollEffect
<ModifiedToolbar>
size_hint_y: None
height: root.theme_cls.standard_increment
padding: [root.theme_cls.horizontal_margins - dp(12), 0]
BoxLayout:
id: left_actions
orientation: 'horizontal'
size_hint_x: None
padding: [0, (self.height - dp(48))/2]
BoxLayout:
padding: dp(12), 0
MDLabel:
font_style: 'H6'
opposite_colors: root.opposite_colors
theme_text_color: 'Custom'
text_color: root.specific_text_color
text: root.title
shorten: True
shorten_from: 'right'
BoxLayout:
id: right_actions
orientation: 'horizontal'
size_hint_x: None
padding: [0, (self.height - dp(48))/2]
<UserAnimationCard>
canvas:
Color:
rgba:
root.theme_cls.bg_dark \
if root.theme_cls.theme_style == 'Dark' \
else root.theme_cls.bg_light
Rectangle:
size: self.size
pos: self.pos
FitImage:
id: image
source: root.path_to_avatar
size_hint: 1, None
height: Window.height * 40 // 100
y: Window.height - self.height
allow_stretch: True
keep_ratio: False
canvas.after:
Color:
rgba: root._primary_color
Rectangle:
size: self.size
pos: self.pos
MDLabel:
id: user_name
font_style: 'H4'
theme_text_color: 'Custom'
color: 1, 1, 1, 1
shorten: True
shorten_from: 'right'
text: root.user_name
size_hint_y: None
height: self.texture_size[1]
ModifiedToolbar:
id: toolbar
md_bg_color: 0, 0, 0, 0
left_action_items: [['arrow-left', lambda x: root._callback_back()]]
y: Window.height - self.height
ScrollView:
id: scroll
y: -image.height
effect_cls: StiffScrollEffect
scroll_distance: 100
GridLayout:
id: box_content
size_hint_y: None
height: self.minimum_height
cols: 1
canvas:
Color:
rgba:
root.theme_cls.bg_dark \
if root.theme_cls.theme_style == 'Dark' \
else root.theme_cls.bg_light
Rectangle:
size: self.size
pos: self.pos
"""
)
class MDUserAnimationCard(ThemableBehavior, ModalView):
user_name = StringProperty()
path_to_avatar = StringProperty()
box_content = ObjectProperty()
callback = ObjectProperty()
_anim_bottom = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._primary_color = self.theme_cls.primary_color
self._primary_color[3] = 0
self.user_animation_card = UserAnimationCard(
user_name=self.user_name,
path_to_avatar=self.path_to_avatar,
_callback_back=self._callback_back,
_primary_color=self._primary_color,
)
self.user_animation_card.ids.user_name.pos = (
dp(15),
Window.height - self.user_animation_card.ids.image.height,
)
self.box_content = self.user_animation_card.ids.box_content
self.add_widget(self.user_animation_card)
self._obj_avatar = self.user_animation_card.ids.image
self._obj_user_name = self.user_animation_card.ids.user_name
self._obj_toolbar = self.user_animation_card.ids.toolbar
self._obj_scroll = self.user_animation_card.ids.scroll
self._set_current_pos_objects()
def _callback_back(self):
self.dismiss()
if self.callback:
self.callback()
def on_open(self):
self._primary_color = self.theme_cls.primary_color
self._primary_color[3] = 0
self.user_animation_card._primary_color = self._primary_color
def _set_current_pos_objects(self):
self._avatar_y = self._obj_avatar.y
self._toolbar_y = self._obj_toolbar.y
self._user_name_y = self._obj_user_name.y
self._scroll_y = self._obj_scroll.y
def on_touch_move(self, touch):
if touch.ud["swipe_begin"] < touch.y:
if self._anim_bottom:
self._anim_bottom = False
self.animation_to_top()
else:
if not self._anim_bottom:
self._anim_bottom = True
self.animation_to_bottom()
def on_touch_down(self, touch):
touch.ud["swipe_begin"] = touch.y
return super().on_touch_down(touch)
def on_touch_up(self, touch):
touch.ud["swipe_begin"] = 0
def animation_to_bottom(self):
Animation(y=self._scroll_y, d=0.4, t="in_out_cubic").start(
self._obj_scroll
)
Animation(y=self._user_name_y, d=0.5, x=dp(15), t="in_out_cubic").start(
self._obj_user_name
)
Animation(font_size=sp(36), d=0.3, t="in_out_cubic").start(
self._obj_user_name
)
Animation(_primary_color=[0, 0, 0, 0], d=0.3, t="in_out_cubic").start(
self.user_animation_card
)
Animation(y=self._avatar_y, d=0.4, t="in_out_cubic").start(
self._obj_avatar
)
def animation_to_top(self):
user_name_y = (
Window.height
- self._obj_toolbar.height
+ (self.theme_cls.standard_increment // 2 - dp(12))
)
user_name_x = self.theme_cls.horizontal_margins + dp(12) * 5
Animation(y=-self._obj_toolbar.height, d=0.4, t="in_out_cubic").start(
self._obj_scroll
)
Animation(y=user_name_y, d=0.3, x=user_name_x, t="in_out_cubic").start(
self._obj_user_name
)
Animation(font_size=sp(20), d=0.3, t="in_out_cubic").start(
self._obj_user_name
)
Animation(
_primary_color=self.theme_cls.primary_color, d=0.3, t="in_out_cubic"
).start(self.user_animation_card)
Animation(y=self._obj_avatar.y + 30, d=0.4, t="in_out_cubic").start(
self._obj_avatar
)
class UserAnimationCard(ThemableBehavior, FloatLayout):
user_name = StringProperty()
path_to_avatar = StringProperty()
_callback_back = ObjectProperty()
_primary_color = ListProperty()
class ModifiedToolbar(
ThemableBehavior, SpecificBackgroundColorBehavior, BoxLayout
):
left_action_items = ListProperty()
title = StringProperty()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.bind(specific_text_color=self.update_action_bar_text_colors)
Clock.schedule_once(
lambda x: self.on_left_action_items(0, self.left_action_items)
)
def on_left_action_items(self, instance, value):
self.update_action_bar(self.ids["left_actions"], value)
def update_action_bar(self, action_bar, action_bar_items):
action_bar.clear_widgets()
new_width = 0
for item in action_bar_items:
new_width += dp(48)
action_bar.add_widget(
MDIconButton(
icon=item[0],
on_release=item[1],
opposite_colors=True,
text_color=self.specific_text_color,
theme_text_color="Custom",
)
)
action_bar.width = new_width
def update_action_bar_text_colors(self, instance, value):
for child in self.ids["left_actions"].children:
child.text_color = self.specific_text_color
| 29.642857
| 122
| 0.624838
|
from kivy.clock import Clock
from kivy.animation import Animation
from kivy.core.window import Window
from kivy.metrics import dp, sp
from kivy.properties import ObjectProperty, StringProperty, ListProperty
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.modalview import ModalView
from kivymd.uix.behaviors import SpecificBackgroundColorBehavior
from kivymd.uix.button import MDIconButton
from kivymd.theming import ThemableBehavior
Builder.load_string(
"""
#:import Window kivy.core.window.Window
#:import StiffScrollEffect kivymd.stiffscroll.StiffScrollEffect
<ModifiedToolbar>
size_hint_y: None
height: root.theme_cls.standard_increment
padding: [root.theme_cls.horizontal_margins - dp(12), 0]
BoxLayout:
id: left_actions
orientation: 'horizontal'
size_hint_x: None
padding: [0, (self.height - dp(48))/2]
BoxLayout:
padding: dp(12), 0
MDLabel:
font_style: 'H6'
opposite_colors: root.opposite_colors
theme_text_color: 'Custom'
text_color: root.specific_text_color
text: root.title
shorten: True
shorten_from: 'right'
BoxLayout:
id: right_actions
orientation: 'horizontal'
size_hint_x: None
padding: [0, (self.height - dp(48))/2]
<UserAnimationCard>
canvas:
Color:
rgba:
root.theme_cls.bg_dark \
if root.theme_cls.theme_style == 'Dark' \
else root.theme_cls.bg_light
Rectangle:
size: self.size
pos: self.pos
FitImage:
id: image
source: root.path_to_avatar
size_hint: 1, None
height: Window.height * 40 // 100
y: Window.height - self.height
allow_stretch: True
keep_ratio: False
canvas.after:
Color:
rgba: root._primary_color
Rectangle:
size: self.size
pos: self.pos
MDLabel:
id: user_name
font_style: 'H4'
theme_text_color: 'Custom'
color: 1, 1, 1, 1
shorten: True
shorten_from: 'right'
text: root.user_name
size_hint_y: None
height: self.texture_size[1]
ModifiedToolbar:
id: toolbar
md_bg_color: 0, 0, 0, 0
left_action_items: [['arrow-left', lambda x: root._callback_back()]]
y: Window.height - self.height
ScrollView:
id: scroll
y: -image.height
effect_cls: StiffScrollEffect
scroll_distance: 100
GridLayout:
id: box_content
size_hint_y: None
height: self.minimum_height
cols: 1
canvas:
Color:
rgba:
root.theme_cls.bg_dark \
if root.theme_cls.theme_style == 'Dark' \
else root.theme_cls.bg_light
Rectangle:
size: self.size
pos: self.pos
"""
)
class MDUserAnimationCard(ThemableBehavior, ModalView):
user_name = StringProperty()
path_to_avatar = StringProperty()
box_content = ObjectProperty()
callback = ObjectProperty()
_anim_bottom = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._primary_color = self.theme_cls.primary_color
self._primary_color[3] = 0
self.user_animation_card = UserAnimationCard(
user_name=self.user_name,
path_to_avatar=self.path_to_avatar,
_callback_back=self._callback_back,
_primary_color=self._primary_color,
)
self.user_animation_card.ids.user_name.pos = (
dp(15),
Window.height - self.user_animation_card.ids.image.height,
)
self.box_content = self.user_animation_card.ids.box_content
self.add_widget(self.user_animation_card)
self._obj_avatar = self.user_animation_card.ids.image
self._obj_user_name = self.user_animation_card.ids.user_name
self._obj_toolbar = self.user_animation_card.ids.toolbar
self._obj_scroll = self.user_animation_card.ids.scroll
self._set_current_pos_objects()
def _callback_back(self):
self.dismiss()
if self.callback:
self.callback()
def on_open(self):
self._primary_color = self.theme_cls.primary_color
self._primary_color[3] = 0
self.user_animation_card._primary_color = self._primary_color
def _set_current_pos_objects(self):
self._avatar_y = self._obj_avatar.y
self._toolbar_y = self._obj_toolbar.y
self._user_name_y = self._obj_user_name.y
self._scroll_y = self._obj_scroll.y
def on_touch_move(self, touch):
if touch.ud["swipe_begin"] < touch.y:
if self._anim_bottom:
self._anim_bottom = False
self.animation_to_top()
else:
if not self._anim_bottom:
self._anim_bottom = True
self.animation_to_bottom()
def on_touch_down(self, touch):
touch.ud["swipe_begin"] = touch.y
return super().on_touch_down(touch)
def on_touch_up(self, touch):
touch.ud["swipe_begin"] = 0
def animation_to_bottom(self):
Animation(y=self._scroll_y, d=0.4, t="in_out_cubic").start(
self._obj_scroll
)
Animation(y=self._user_name_y, d=0.5, x=dp(15), t="in_out_cubic").start(
self._obj_user_name
)
Animation(font_size=sp(36), d=0.3, t="in_out_cubic").start(
self._obj_user_name
)
Animation(_primary_color=[0, 0, 0, 0], d=0.3, t="in_out_cubic").start(
self.user_animation_card
)
Animation(y=self._avatar_y, d=0.4, t="in_out_cubic").start(
self._obj_avatar
)
def animation_to_top(self):
user_name_y = (
Window.height
- self._obj_toolbar.height
+ (self.theme_cls.standard_increment // 2 - dp(12))
)
user_name_x = self.theme_cls.horizontal_margins + dp(12) * 5
Animation(y=-self._obj_toolbar.height, d=0.4, t="in_out_cubic").start(
self._obj_scroll
)
Animation(y=user_name_y, d=0.3, x=user_name_x, t="in_out_cubic").start(
self._obj_user_name
)
Animation(font_size=sp(20), d=0.3, t="in_out_cubic").start(
self._obj_user_name
)
Animation(
_primary_color=self.theme_cls.primary_color, d=0.3, t="in_out_cubic"
).start(self.user_animation_card)
Animation(y=self._obj_avatar.y + 30, d=0.4, t="in_out_cubic").start(
self._obj_avatar
)
class UserAnimationCard(ThemableBehavior, FloatLayout):
user_name = StringProperty()
path_to_avatar = StringProperty()
_callback_back = ObjectProperty()
_primary_color = ListProperty()
class ModifiedToolbar(
ThemableBehavior, SpecificBackgroundColorBehavior, BoxLayout
):
left_action_items = ListProperty()
title = StringProperty()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.bind(specific_text_color=self.update_action_bar_text_colors)
Clock.schedule_once(
lambda x: self.on_left_action_items(0, self.left_action_items)
)
def on_left_action_items(self, instance, value):
self.update_action_bar(self.ids["left_actions"], value)
def update_action_bar(self, action_bar, action_bar_items):
action_bar.clear_widgets()
new_width = 0
for item in action_bar_items:
new_width += dp(48)
action_bar.add_widget(
MDIconButton(
icon=item[0],
on_release=item[1],
opposite_colors=True,
text_color=self.specific_text_color,
theme_text_color="Custom",
)
)
action_bar.width = new_width
def update_action_bar_text_colors(self, instance, value):
for child in self.ids["left_actions"].children:
child.text_color = self.specific_text_color
| true
| true
|
f702bcd10e528ab743515c3efc44683485009bd7
| 1,733
|
py
|
Python
|
app/api/resources/validators.py
|
eLemmings/back
|
ba5dbc5f64625b61150ce53f12a9393fba060f02
|
[
"MIT"
] | null | null | null |
app/api/resources/validators.py
|
eLemmings/back
|
ba5dbc5f64625b61150ce53f12a9393fba060f02
|
[
"MIT"
] | null | null | null |
app/api/resources/validators.py
|
eLemmings/back
|
ba5dbc5f64625b61150ce53f12a9393fba060f02
|
[
"MIT"
] | null | null | null |
# Moduł definiujący walidatory API
from marshmallow import Schema, fields, validate
fields.Email.default_error_messages['required'] = 'Email jest wymagany'
fields.Email.default_error_messages['invalid'] = 'Niepoprawny adres email'
class VUser(Schema):
# Walidator rejestracji
nick = fields.String(
required=True, validate=validate.Length(min=4, max=30, error='Login musi mieć 4 - 30 znaków'))
email = fields.Email(required=True)
password = fields.String(
required=True, validate=validate.Length(min=8, max=30, error='Hasło musi mieć 8 - 30 znakow'))
class VUserLogin(Schema):
# Walidator logowania
email = fields.Email(required=True)
password = fields.String(
required=True, validate=validate.Length(min=8, max=30, error='Hasło jest wymagane'))
class VEmail(Schema):
# Walidator adresu email
email = fields.Email(required=True)
class VUserPatch(Schema):
# Walidator zapytania o zmianę pól w rekordzie użytkownika
field = fields.String(required=True, validate=validate.OneOf(['nick']))
value = fields.String(required=True)
class VEntry(Schema):
# Walidator wpisu w dzienniku
value = fields.Number(required=True)
description = fields.String()
class VDiary(Schema):
# Walidator dziennika
name = fields.String(required=True)
max = fields.Number(required=True)
date = fields.Number()
color = fields.String(validate=validate.Regexp("#[0-9a-fA-F]{6}"))
entries = fields.List(fields.Nested(VEntry), required=True)
class VJson(Schema):
# Walidator danych JSON
diaries = fields.List(fields.Nested(VDiary))
class VDiaryIndex(Schema):
# Walidator indexu dziennika
index = fields.Integer(required=True)
| 28.883333
| 102
| 0.713791
|
from marshmallow import Schema, fields, validate
fields.Email.default_error_messages['required'] = 'Email jest wymagany'
fields.Email.default_error_messages['invalid'] = 'Niepoprawny adres email'
class VUser(Schema):
nick = fields.String(
required=True, validate=validate.Length(min=4, max=30, error='Login musi mieć 4 - 30 znaków'))
email = fields.Email(required=True)
password = fields.String(
required=True, validate=validate.Length(min=8, max=30, error='Hasło musi mieć 8 - 30 znakow'))
class VUserLogin(Schema):
email = fields.Email(required=True)
password = fields.String(
required=True, validate=validate.Length(min=8, max=30, error='Hasło jest wymagane'))
class VEmail(Schema):
email = fields.Email(required=True)
class VUserPatch(Schema):
field = fields.String(required=True, validate=validate.OneOf(['nick']))
value = fields.String(required=True)
class VEntry(Schema):
value = fields.Number(required=True)
description = fields.String()
class VDiary(Schema):
name = fields.String(required=True)
max = fields.Number(required=True)
date = fields.Number()
color = fields.String(validate=validate.Regexp("#[0-9a-fA-F]{6}"))
entries = fields.List(fields.Nested(VEntry), required=True)
class VJson(Schema):
diaries = fields.List(fields.Nested(VDiary))
class VDiaryIndex(Schema):
index = fields.Integer(required=True)
| true
| true
|
f702bd8bccda922a4d40e754a378080f65315f49
| 2,869
|
py
|
Python
|
option.py
|
ISKU/BOJ-Solutions-Downloader
|
2277b2d00204ea47c1a086438100b6057daaa244
|
[
"MIT"
] | 2
|
2019-01-04T18:48:23.000Z
|
2019-10-27T10:48:09.000Z
|
option.py
|
ISKU/BOJ-Solutions-Downloader
|
2277b2d00204ea47c1a086438100b6057daaa244
|
[
"MIT"
] | null | null | null |
option.py
|
ISKU/BOJ-Solutions-Downloader
|
2277b2d00204ea47c1a086438100b6057daaa244
|
[
"MIT"
] | null | null | null |
class Option:
def __init__(self, option_info):
self.option_info = option_info
self.flag = option_info['flag']
def mkdir(self):
if self.flag == False:
return False
return self.option_info['mkdir']
def dir_name(self, problem):
if self.flag == False:
return ''
if not self.mkdir():
return ''
return self.replace_name(self.option_info['dir_name'], problem) + '/'
def source_name(self, problem):
if self.flag == False:
return problem['problem_id']
return self.replace_info(self.option_info['source_name'], problem)
def replace_name(self, value, problem):
value = value.replace('[NO]', problem['problem_id'])
value = value.replace('[TITLE]', problem['problem_title'])
return value
def get_ext(self, language):
extensions = {
'C': '.c',
'C++': '.cpp',
'C++11': '.cpp',
'C++14': '.cpp',
'C++17': '.cpp',
'Java': '.java',
'Java (OpenJDK)': '.java',
'C11': '.c',
'Python 2': '.py',
'Python 3': '.py',
'PyPy2': '.py',
'PyPy3': '.py',
'Ruby2.5': '.rb',
'Kotlin': '.kt',
'Swift': '.swift',
'C# 6.0': '.cs',
'Text': '.txt',
'node.js': 'js',
'Go': '.go',
'F#': '.fs',
'PHP': '.php',
'Pascal': '.pas',
'Lua': '.lua',
'Perl': '.pl',
'Objective-C': '.m',
'Objective-C++': '.mm',
'C (Clang)': '.c',
'C++11 (Clang)': '.cpp',
'C++14 (Clang)': '.cpp',
'C++17 (Clang)': '.cpp',
'Golfscript': '.gs',
'Bash': '.sh',
'Fortran': '.f95',
'Scheme': '.scm',
'Ada': '.ada',
'awk': '.awk',
'OCaml': '.ml',
'Brainfuck': '.bf',
'Whitespace': '.ws',
'Tcl': '.tcl',
'Assembly (32bit)': '.asm',
'Assembly (32bit)': '.asm',
'D': '.d',
'Clojure': '.clj',
'Rhino': '.js',
'Cobol': '.cob',
'SpiderMonkey': '.js',
'Pike': '.pike',
'sed': '.sed',
'Rust': '.rs',
'Boo': '.boo',
'Intercal': '.i',
'bc': '.bc',
'Nemerle': '.n',
'Cobra': '.cobra',
'Algol 68': '.a68',
'Befunge': '.bf',
'Haxe': '.hx',
'LOLCODE': '.lol',
'VB.NET 4.0': '.vb',
'아희': '.aheui'
}
if not language in extensions:
return True, 'Unknown extension'
return False, extensions[language]
| 29.57732
| 77
| 0.385849
|
class Option:
def __init__(self, option_info):
self.option_info = option_info
self.flag = option_info['flag']
def mkdir(self):
if self.flag == False:
return False
return self.option_info['mkdir']
def dir_name(self, problem):
if self.flag == False:
return ''
if not self.mkdir():
return ''
return self.replace_name(self.option_info['dir_name'], problem) + '/'
def source_name(self, problem):
if self.flag == False:
return problem['problem_id']
return self.replace_info(self.option_info['source_name'], problem)
def replace_name(self, value, problem):
value = value.replace('[NO]', problem['problem_id'])
value = value.replace('[TITLE]', problem['problem_title'])
return value
def get_ext(self, language):
extensions = {
'C': '.c',
'C++': '.cpp',
'C++11': '.cpp',
'C++14': '.cpp',
'C++17': '.cpp',
'Java': '.java',
'Java (OpenJDK)': '.java',
'C11': '.c',
'Python 2': '.py',
'Python 3': '.py',
'PyPy2': '.py',
'PyPy3': '.py',
'Ruby2.5': '.rb',
'Kotlin': '.kt',
'Swift': '.swift',
'C# 6.0': '.cs',
'Text': '.txt',
'node.js': 'js',
'Go': '.go',
'F#': '.fs',
'PHP': '.php',
'Pascal': '.pas',
'Lua': '.lua',
'Perl': '.pl',
'Objective-C': '.m',
'Objective-C++': '.mm',
'C (Clang)': '.c',
'C++11 (Clang)': '.cpp',
'C++14 (Clang)': '.cpp',
'C++17 (Clang)': '.cpp',
'Golfscript': '.gs',
'Bash': '.sh',
'Fortran': '.f95',
'Scheme': '.scm',
'Ada': '.ada',
'awk': '.awk',
'OCaml': '.ml',
'Brainfuck': '.bf',
'Whitespace': '.ws',
'Tcl': '.tcl',
'Assembly (32bit)': '.asm',
'Assembly (32bit)': '.asm',
'D': '.d',
'Clojure': '.clj',
'Rhino': '.js',
'Cobol': '.cob',
'SpiderMonkey': '.js',
'Pike': '.pike',
'sed': '.sed',
'Rust': '.rs',
'Boo': '.boo',
'Intercal': '.i',
'bc': '.bc',
'Nemerle': '.n',
'Cobra': '.cobra',
'Algol 68': '.a68',
'Befunge': '.bf',
'Haxe': '.hx',
'LOLCODE': '.lol',
'VB.NET 4.0': '.vb',
'아희': '.aheui'
}
if not language in extensions:
return True, 'Unknown extension'
return False, extensions[language]
| true
| true
|
f702bdf164ec4134439d875a2ad83515cbba5787
| 5,769
|
py
|
Python
|
torch/ao/quantization/fuse_modules.py
|
WBobby/pytorch
|
655960460ccca936fa5c06df6bbafd25b5582115
|
[
"Intel"
] | 24
|
2020-11-02T21:25:12.000Z
|
2022-03-17T07:20:33.000Z
|
torch/ao/quantization/fuse_modules.py
|
WBobby/pytorch
|
655960460ccca936fa5c06df6bbafd25b5582115
|
[
"Intel"
] | 1
|
2019-08-01T00:17:43.000Z
|
2019-09-12T01:31:53.000Z
|
torch/ao/quantization/fuse_modules.py
|
WBobby/pytorch
|
655960460ccca936fa5c06df6bbafd25b5582115
|
[
"Intel"
] | 12
|
2020-11-06T05:00:37.000Z
|
2022-01-30T19:17:36.000Z
|
import copy
import torch.nn as nn
from torch.quantization.fuser_method_mappings import get_fuser_method
# for backward compatiblity
from torch.quantization.fuser_method_mappings import fuse_conv_bn # noqa: F401
from torch.quantization.fuser_method_mappings import fuse_conv_bn_relu # noqa: F401
from typing import List, Optional
# Generalization of getattr
def _get_module(model, submodule_key):
tokens = submodule_key.split('.')
cur_mod = model
for s in tokens:
cur_mod = getattr(cur_mod, s)
return cur_mod
# Generalization of setattr
def _set_module(model, submodule_key, module):
tokens = submodule_key.split('.')
sub_tokens = tokens[:-1]
cur_mod = model
for s in sub_tokens:
cur_mod = getattr(cur_mod, s)
setattr(cur_mod, tokens[-1], module)
def fuse_known_modules(mod_list, additional_fuser_method_mapping=None):
r"""Returns a list of modules that fuses the operations specified
in the input module list.
Fuses only the following sequence of modules:
conv, bn
conv, bn, relu
conv, relu
linear, bn
linear, relu
For these sequences, the first element in the output module list performs
the fused operation. The rest of the elements are set to nn.Identity()
"""
types = tuple(type(m) for m in mod_list)
fuser_method = get_fuser_method(types, additional_fuser_method_mapping)
if fuser_method is None:
raise NotImplementedError("Cannot fuse modules: {}".format(types))
new_mod : List[Optional[nn.Module]] = [None] * len(mod_list)
fused = fuser_method(*mod_list)
# NOTE: forward hooks not processed in the two following for loops will be lost after the fusion
# Move pre forward hooks of the base module to resulting fused module
for handle_id, pre_hook_fn in mod_list[0]._forward_pre_hooks.items():
fused.register_forward_pre_hook(pre_hook_fn)
del mod_list[0]._forward_pre_hooks[handle_id]
# Move post forward hooks of the last module to resulting fused module
for handle_id, hook_fn in mod_list[-1]._forward_hooks.items():
fused.register_forward_hook(hook_fn)
del mod_list[-1]._forward_hooks[handle_id]
new_mod[0] = fused
for i in range(1, len(mod_list)):
identity = nn.Identity()
identity.training = mod_list[0].training
new_mod[i] = identity
return new_mod
def _fuse_modules(model, modules_to_fuse, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):
if fuse_custom_config_dict is None:
fuse_custom_config_dict = {}
additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {})
mod_list = []
for item in modules_to_fuse:
mod_list.append(_get_module(model, item))
# Fuse list of modules
new_mod_list = fuser_func(mod_list, additional_fuser_method_mapping)
# Replace original module list with fused module list
for i, item in enumerate(modules_to_fuse):
_set_module(model, item, new_mod_list[i])
def fuse_modules(model, modules_to_fuse, inplace=False, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):
r"""Fuses a list of modules into a single module
Fuses only the following sequence of modules:
conv, bn
conv, bn, relu
conv, relu
linear, relu
bn, relu
All other sequences are left unchanged.
For these sequences, replaces the first item in the list
with the fused module, replacing the rest of the modules
with identity.
Args:
model: Model containing the modules to be fused
modules_to_fuse: list of list of module names to fuse. Can also be a list
of strings if there is only a single list of modules to fuse.
inplace: bool specifying if fusion happens in place on the model, by default
a new model is returned
fuser_func: Function that takes in a list of modules and outputs a list of fused modules
of the same length. For example,
fuser_func([convModule, BNModule]) returns the list [ConvBNModule, nn.Identity()]
Defaults to torch.quantization.fuse_known_modules
`fuse_custom_config_dict`: custom configuration for fusion
.. code-block:: python
# Example of fuse_custom_config_dict
fuse_custom_config_dict = {
# Additional fuser_method mapping
"additional_fuser_method_mapping": {
(torch.nn.Conv2d, torch.nn.BatchNorm2d): fuse_conv_bn
},
}
Returns:
model with fused modules. A new copy is created if inplace=True.
Examples::
>>> m = myModel()
>>> # m is a module containing the sub-modules below
>>> modules_to_fuse = [ ['conv1', 'bn1', 'relu1'], ['submodule.conv', 'submodule.relu']]
>>> fused_m = torch.ao.quantization.fuse_modules(m, modules_to_fuse)
>>> output = fused_m(input)
>>> m = myModel()
>>> # Alternately provide a single list of modules to fuse
>>> modules_to_fuse = ['conv1', 'bn1', 'relu1']
>>> fused_m = torch.ao.quantization.fuse_modules(m, modules_to_fuse)
>>> output = fused_m(input)
"""
if not inplace:
model = copy.deepcopy(model)
if all(isinstance(module_element, str) for module_element in modules_to_fuse):
# Handle case of modules_to_fuse being a list
_fuse_modules(model, modules_to_fuse, fuser_func, fuse_custom_config_dict)
else:
# Handle case of modules_to_fuse being a list of lists
for module_list in modules_to_fuse:
_fuse_modules(model, module_list, fuser_func, fuse_custom_config_dict)
return model
| 38.97973
| 117
| 0.688508
|
import copy
import torch.nn as nn
from torch.quantization.fuser_method_mappings import get_fuser_method
from torch.quantization.fuser_method_mappings import fuse_conv_bn from torch.quantization.fuser_method_mappings import fuse_conv_bn_relu
from typing import List, Optional
def _get_module(model, submodule_key):
tokens = submodule_key.split('.')
cur_mod = model
for s in tokens:
cur_mod = getattr(cur_mod, s)
return cur_mod
def _set_module(model, submodule_key, module):
tokens = submodule_key.split('.')
sub_tokens = tokens[:-1]
cur_mod = model
for s in sub_tokens:
cur_mod = getattr(cur_mod, s)
setattr(cur_mod, tokens[-1], module)
def fuse_known_modules(mod_list, additional_fuser_method_mapping=None):
types = tuple(type(m) for m in mod_list)
fuser_method = get_fuser_method(types, additional_fuser_method_mapping)
if fuser_method is None:
raise NotImplementedError("Cannot fuse modules: {}".format(types))
new_mod : List[Optional[nn.Module]] = [None] * len(mod_list)
fused = fuser_method(*mod_list)
for handle_id, pre_hook_fn in mod_list[0]._forward_pre_hooks.items():
fused.register_forward_pre_hook(pre_hook_fn)
del mod_list[0]._forward_pre_hooks[handle_id]
for handle_id, hook_fn in mod_list[-1]._forward_hooks.items():
fused.register_forward_hook(hook_fn)
del mod_list[-1]._forward_hooks[handle_id]
new_mod[0] = fused
for i in range(1, len(mod_list)):
identity = nn.Identity()
identity.training = mod_list[0].training
new_mod[i] = identity
return new_mod
def _fuse_modules(model, modules_to_fuse, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):
if fuse_custom_config_dict is None:
fuse_custom_config_dict = {}
additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {})
mod_list = []
for item in modules_to_fuse:
mod_list.append(_get_module(model, item))
new_mod_list = fuser_func(mod_list, additional_fuser_method_mapping)
for i, item in enumerate(modules_to_fuse):
_set_module(model, item, new_mod_list[i])
def fuse_modules(model, modules_to_fuse, inplace=False, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):
if not inplace:
model = copy.deepcopy(model)
if all(isinstance(module_element, str) for module_element in modules_to_fuse):
_fuse_modules(model, modules_to_fuse, fuser_func, fuse_custom_config_dict)
else:
for module_list in modules_to_fuse:
_fuse_modules(model, module_list, fuser_func, fuse_custom_config_dict)
return model
| true
| true
|
f702c178fa0468bda62c777ae3343f3ff32258d0
| 2,947
|
py
|
Python
|
idaes/apps/ripe/__init__.py
|
OOAmusat/idaes-pse
|
ae7d3bb8e372bc32822dcdcb75e9fd96b78da539
|
[
"RSA-MD"
] | null | null | null |
idaes/apps/ripe/__init__.py
|
OOAmusat/idaes-pse
|
ae7d3bb8e372bc32822dcdcb75e9fd96b78da539
|
[
"RSA-MD"
] | null | null | null |
idaes/apps/ripe/__init__.py
|
OOAmusat/idaes-pse
|
ae7d3bb8e372bc32822dcdcb75e9fd96b78da539
|
[
"RSA-MD"
] | 1
|
2022-03-17T11:08:43.000Z
|
2022-03-17T11:08:43.000Z
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
# Institute for the Design of Advanced Energy Systems Process Systems
# Engineering Framework (IDAES PSE Framework) Copyright (c) 2018, by the
# software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia
# University Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.txt and LICENSE.txt for full copyright and
# license information, respectively. Both files are also available online
# at the URL "https://github.com/IDAES/idaes".
"""
__all__ = [
"ripemodel",
"ems",
"rspace",
"sharedata",
"debug",
"powerlawp5",
"powerlaw2",
"powerlaw3",
"powerlaw4",
"avrami2",
"avrami3",
"avrami4",
"avrami5",
"randomnuc",
"ptompkins",
"jander",
"antijander",
"valensi",
"parabolic",
"gb3d",
"zlt",
"grain",
# PYLINT-TODO-FIX: this seems to be a genuine error since "massact" is not imported from .mechs
"massact", # pylint: disable=undefined-all-variable
"massactm",
"getmechs",
]
from .main import ripemodel, ripewrite, print_results # noqa: F401
from .shared import rspace, sharedata, debug # noqa: F401
from .atermconstruct import (
makeaterm,
formatinputs,
checkargs,
normalizefeatures,
) # noqa: F401
from .kinforms import lin, linjac, arr, arrjac, refarr, refarrjac # noqa: F401
from .mechs import (
powerlawp5,
powerlaw2,
powerlaw3,
powerlaw4,
avrami2,
avrami3,
avrami4,
avrami5,
randomnuc,
ptompkins,
jander,
antijander,
valensi,
parabolic,
gb3d,
zlt,
grain,
getmechs,
massactm,
) # noqa: F401
from .genpyomo import ripeomo # noqa: F401
from .targets import (
doalamo,
dopwalamo,
gentargets,
sstargets,
dynamictargets,
) # noqa: F401
from .confinv import confinv # noqa: F401
from .emsampling import constructmodel, ems # noqa: F401
from .checkoptions import checkoptions # noqa: F401
from .bounds import stoich_cons, count_neg, get_bounds # noqa: F401
| 30.697917
| 99
| 0.663386
|
__all__ = [
"ripemodel",
"ems",
"rspace",
"sharedata",
"debug",
"powerlawp5",
"powerlaw2",
"powerlaw3",
"powerlaw4",
"avrami2",
"avrami3",
"avrami4",
"avrami5",
"randomnuc",
"ptompkins",
"jander",
"antijander",
"valensi",
"parabolic",
"gb3d",
"zlt",
"grain",
"massact", "massactm",
"getmechs",
]
from .main import ripemodel, ripewrite, print_results from .shared import rspace, sharedata, debug from .atermconstruct import (
makeaterm,
formatinputs,
checkargs,
normalizefeatures,
) from .kinforms import lin, linjac, arr, arrjac, refarr, refarrjac from .mechs import (
powerlawp5,
powerlaw2,
powerlaw3,
powerlaw4,
avrami2,
avrami3,
avrami4,
avrami5,
randomnuc,
ptompkins,
jander,
antijander,
valensi,
parabolic,
gb3d,
zlt,
grain,
getmechs,
massactm,
) from .genpyomo import ripeomo from .targets import (
doalamo,
dopwalamo,
gentargets,
sstargets,
dynamictargets,
) from .confinv import confinv from .emsampling import constructmodel, ems from .checkoptions import checkoptions from .bounds import stoich_cons, count_neg, get_bounds
| true
| true
|
f702c1a5dc7274750e530ef4de6a21cb1e73cad8
| 1,465
|
py
|
Python
|
tests/helpers/test_init.py
|
dauden1184/home-assistant
|
f4c6d389b77d0efa86644e76604eaea5d21abdb5
|
[
"Apache-2.0"
] | 37
|
2018-05-22T07:17:26.000Z
|
2022-03-03T13:14:46.000Z
|
tests/helpers/test_init.py
|
dauden1184/home-assistant
|
f4c6d389b77d0efa86644e76604eaea5d21abdb5
|
[
"Apache-2.0"
] | 125
|
2018-12-11T07:31:20.000Z
|
2021-07-27T08:20:03.000Z
|
tests/helpers/test_init.py
|
dauden1184/home-assistant
|
f4c6d389b77d0efa86644e76604eaea5d21abdb5
|
[
"Apache-2.0"
] | 21
|
2017-07-26T17:09:40.000Z
|
2022-03-27T22:37:22.000Z
|
"""Test component helpers."""
# pylint: disable=protected-access
from collections import OrderedDict
import unittest
from homeassistant import helpers
from tests.common import get_test_home_assistant
class TestHelpers(unittest.TestCase):
"""Tests homeassistant.helpers module."""
# pylint: disable=invalid-name
def setUp(self):
"""Init needed objects."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_extract_domain_configs(self):
"""Test the extraction of domain configuration."""
config = {
'zone': None,
'zoner': None,
'zone ': None,
'zone Hallo': None,
'zone 100': None,
}
self.assertEqual(set(['zone', 'zone Hallo', 'zone 100']),
set(helpers.extract_domain_configs(config, 'zone')))
def test_config_per_platform(self):
"""Test config per platform method."""
config = OrderedDict([
('zone', {'platform': 'hello'}),
('zoner', None),
('zone Hallo', [1, {'platform': 'hello 2'}]),
('zone 100', None),
])
assert [
('hello', config['zone']),
(None, 1),
('hello 2', config['zone Hallo'][1]),
] == list(helpers.config_per_platform(config, 'zone'))
| 28.72549
| 77
| 0.56314
|
from collections import OrderedDict
import unittest
from homeassistant import helpers
from tests.common import get_test_home_assistant
class TestHelpers(unittest.TestCase):
def setUp(self):
self.hass = get_test_home_assistant()
def tearDown(self):
self.hass.stop()
def test_extract_domain_configs(self):
config = {
'zone': None,
'zoner': None,
'zone ': None,
'zone Hallo': None,
'zone 100': None,
}
self.assertEqual(set(['zone', 'zone Hallo', 'zone 100']),
set(helpers.extract_domain_configs(config, 'zone')))
def test_config_per_platform(self):
config = OrderedDict([
('zone', {'platform': 'hello'}),
('zoner', None),
('zone Hallo', [1, {'platform': 'hello 2'}]),
('zone 100', None),
])
assert [
('hello', config['zone']),
(None, 1),
('hello 2', config['zone Hallo'][1]),
] == list(helpers.config_per_platform(config, 'zone'))
| true
| true
|
f702c27587a9d8e0dceffc00ca6aaa56ec63ef6f
| 3,699
|
py
|
Python
|
tools/processing.py
|
SmolakK/HuMobi
|
67b40f839a843123093582935e89f91e16bc4374
|
[
"BSD-3-Clause"
] | null | null | null |
tools/processing.py
|
SmolakK/HuMobi
|
67b40f839a843123093582935e89f91e16bc4374
|
[
"BSD-3-Clause"
] | null | null | null |
tools/processing.py
|
SmolakK/HuMobi
|
67b40f839a843123093582935e89f91e16bc4374
|
[
"BSD-3-Clause"
] | null | null | null |
import pandas as pd
import numpy as np
def top_time(ind=None, gs=None):
"""
Selects the location (by coordinates) which was visited for the longest period during given time interval
:param ind: user id
:param gs: GeoDataFrame from groupby execution containing all the data in the given time interval
:return: user id (if given) and the data for the longest visited location
"""
aggregated = []
for tstamp, g in gs: # for each record in the GeoDataFrame
if len(g) > 1: # if there is more than one record
diff_places = (g['geometry'].shift(-1) != g['geometry']).iloc[:-1] # checks when coordinates change
if diff_places.any(): # if there is change in locations
g_res = g.reset_index() # drop index
diffs = g_res.shift(-1)['datetime'] - g_res['datetime'] # find time differences (spent in location)
joined_dfs = g_res.join(diffs, rsuffix='a') # add them to locations
joined_dfs['geometry'] = g_res['geometry'].astype(str) # copy geometry as string
point_max = joined_dfs.groupby('geometry')['datetimea'].sum().idxmax() # grouping locations find the longest time sum
selected = g[g['geometry'].astype(str) == point_max] # select the location with the highest total time
else:
selected = g # if one location visited - copy GeoDataFrame
else:
selected = g
aggregated.append(selected)
if ind is None:
return pd.concat(aggregated)
else:
return ind, pd.concat(aggregated)
def mode_geoseries(ind, gs):
"""
Calculates mode for GeoSeries
:param ind: identifier
:param gs: GeoSeries
:return: identifier and a mode for GeoSeries
"""
aggregated = []
for g in gs:
if g[1].empty:
aggregated.append(None)
else:
selected = g[1].mode()
selected = selected.set_index(g[1].index)
aggregated.append(selected)
return ind, pd.concat(aggregated)
def rowwise_average(gs, row_count=None):
"""
Calculates an average for each row in each group - rowwise.
:param gs: GeoSeries
:param row_count: defines how much rows should be considered
:return: averaged GeoSeries rowwise
"""
if row_count is None:
row_count = gs.groupby(level=0).size().max()
return pd.Series([gs.groupby(level=0).nth(n).mean() for n in range(row_count)])
def groupwise_average(gs):
"""
Calculates an average from each group of GeoSeries
:param gs: GeoSeries
:return: averaged GeoSeries
"""
return gs.groupby(level=0).mean()
def groupwise_normalise(gs):
"""
Normalises each group of GeoSeries
:param gs: GeoSeries
:return: normalised GeoSeries
"""
return gs.groupby(level=0).apply(lambda x: x / x.sum())
def groupwise_expansion(gs):
"""
Calculates expanding mean for each group of GeoSeries
:param gs: GeoSeries
:return: averaged GeoSeries
"""
return gs.groupby(level=0).expanding().mean()
def total_normalise(gs):
"""
Performs complete normalisation of GeoSeries
:param gs: GeoSeries
:return: normalised GeoSeries
"""
return gs / gs.sum()
def start_end(trajectories_frame):
"""
Compresses stops in TrajectoriesFrame by adding start and end of visits in locations
:param trajectories_frame: TrajectoriesFrame object class
:return: compressed TrajectoriesFrame
"""
to_concat = []
if 'date' not in trajectories_frame.columns:
trajectories_frame['date'] = trajectories_frame.index.get_level_values(1)
for gs in trajectories_frame.groupby(level=0):
firsts = gs[1][gs[1]['geometry'].shift() != gs[1]['geometry']]
lasts = gs[1][gs[1]['geometry'].shift(-1) != gs[1]['geometry']]
firsts.loc[:, 'start'] = firsts['date']
lasts = lasts.set_index(firsts.index)
firsts.loc[:, 'end'] = lasts['date']
firsts = firsts[firsts['start'] != firsts['end']]
to_concat.append(firsts)
return pd.concat(to_concat)
| 31.347458
| 122
| 0.712625
|
import pandas as pd
import numpy as np
def top_time(ind=None, gs=None):
aggregated = []
for tstamp, g in gs: if len(g) > 1: diff_places = (g['geometry'].shift(-1) != g['geometry']).iloc[:-1] if diff_places.any(): g_res = g.reset_index() diffs = g_res.shift(-1)['datetime'] - g_res['datetime'] joined_dfs = g_res.join(diffs, rsuffix='a') joined_dfs['geometry'] = g_res['geometry'].astype(str) point_max = joined_dfs.groupby('geometry')['datetimea'].sum().idxmax() selected = g[g['geometry'].astype(str) == point_max] else:
selected = g else:
selected = g
aggregated.append(selected)
if ind is None:
return pd.concat(aggregated)
else:
return ind, pd.concat(aggregated)
def mode_geoseries(ind, gs):
aggregated = []
for g in gs:
if g[1].empty:
aggregated.append(None)
else:
selected = g[1].mode()
selected = selected.set_index(g[1].index)
aggregated.append(selected)
return ind, pd.concat(aggregated)
def rowwise_average(gs, row_count=None):
if row_count is None:
row_count = gs.groupby(level=0).size().max()
return pd.Series([gs.groupby(level=0).nth(n).mean() for n in range(row_count)])
def groupwise_average(gs):
return gs.groupby(level=0).mean()
def groupwise_normalise(gs):
return gs.groupby(level=0).apply(lambda x: x / x.sum())
def groupwise_expansion(gs):
return gs.groupby(level=0).expanding().mean()
def total_normalise(gs):
return gs / gs.sum()
def start_end(trajectories_frame):
to_concat = []
if 'date' not in trajectories_frame.columns:
trajectories_frame['date'] = trajectories_frame.index.get_level_values(1)
for gs in trajectories_frame.groupby(level=0):
firsts = gs[1][gs[1]['geometry'].shift() != gs[1]['geometry']]
lasts = gs[1][gs[1]['geometry'].shift(-1) != gs[1]['geometry']]
firsts.loc[:, 'start'] = firsts['date']
lasts = lasts.set_index(firsts.index)
firsts.loc[:, 'end'] = lasts['date']
firsts = firsts[firsts['start'] != firsts['end']]
to_concat.append(firsts)
return pd.concat(to_concat)
| true
| true
|
f702c2aa854a720c28f7a9cad75cc8ce2656eab8
| 36
|
py
|
Python
|
show_config.py
|
temper8/MatBench
|
1ea24d18af35b57ef2d61148709eb6d49835fe97
|
[
"MIT"
] | null | null | null |
show_config.py
|
temper8/MatBench
|
1ea24d18af35b57ef2d61148709eb6d49835fe97
|
[
"MIT"
] | null | null | null |
show_config.py
|
temper8/MatBench
|
1ea24d18af35b57ef2d61148709eb6d49835fe97
|
[
"MIT"
] | null | null | null |
import numpy as np
np.show_config()
| 18
| 19
| 0.777778
|
import numpy as np
np.show_config()
| true
| true
|
f702c2d50f706742af9223282c2024342b6a82c5
| 897
|
py
|
Python
|
Output.py
|
itsayeshanaeem/WCPSAccess
|
12b7a2f28a0f849a42336357723a57b6cb5905c9
|
[
"CNRI-Python"
] | null | null | null |
Output.py
|
itsayeshanaeem/WCPSAccess
|
12b7a2f28a0f849a42336357723a57b6cb5905c9
|
[
"CNRI-Python"
] | null | null | null |
Output.py
|
itsayeshanaeem/WCPSAccess
|
12b7a2f28a0f849a42336357723a57b6cb5905c9
|
[
"CNRI-Python"
] | null | null | null |
from PIL import Image as im
import numpy as np
from io import BytesIO
import csv
class outputResponse():
def __init__(self,reponse):
self.response = reponse
def retrieveResult(response, returntype):
if (returntype == "image/png" or returntype == "image/jpeg"):
img_arr = np.array(im.open(BytesIO(response.content)))
data = im.fromarray(img_arr)
data.show()
elif (returntype == "text/csv"):
response = response.content.decode('utf-8')
my_list = response.split (",")
with open ('x.csv', 'w') as file:
writer = csv.writer(file, delimiter = ',')
writer.writerow(my_list)
elif (returntype == 1 or returntype == 0):
print(response.content)
else:
response = response.content.decode('utf-8')
print (response)
| 30.931034
| 69
| 0.571906
|
from PIL import Image as im
import numpy as np
from io import BytesIO
import csv
class outputResponse():
def __init__(self,reponse):
self.response = reponse
def retrieveResult(response, returntype):
if (returntype == "image/png" or returntype == "image/jpeg"):
img_arr = np.array(im.open(BytesIO(response.content)))
data = im.fromarray(img_arr)
data.show()
elif (returntype == "text/csv"):
response = response.content.decode('utf-8')
my_list = response.split (",")
with open ('x.csv', 'w') as file:
writer = csv.writer(file, delimiter = ',')
writer.writerow(my_list)
elif (returntype == 1 or returntype == 0):
print(response.content)
else:
response = response.content.decode('utf-8')
print (response)
| true
| true
|
f702c446130e944a036310a43721bc766a7a5bdb
| 766
|
py
|
Python
|
tests/test_types_file.py
|
betasewer/machaon
|
63ccb4405ac693f14f9d25f6a706466a917dddbf
|
[
"MIT"
] | 2
|
2020-07-05T08:39:12.000Z
|
2022-01-19T22:08:21.000Z
|
tests/test_types_file.py
|
betasewer/machaon
|
63ccb4405ac693f14f9d25f6a706466a917dddbf
|
[
"MIT"
] | 23
|
2020-06-23T16:18:17.000Z
|
2021-12-29T09:56:48.000Z
|
tests/test_types_file.py
|
betasewer/machaon
|
63ccb4405ac693f14f9d25f6a706466a917dddbf
|
[
"MIT"
] | null | null | null |
import pytest
import os
from machaon.types.file import TextFile
from machaon.types.shell import Path
from machaon.core.invocation import instant_return_test, instant_context
def test_construct(tmp_path):
FILEPATH = Path(__file__)
context = instant_context()
context.define_type(TextFile)
f = instant_return_test(context, FILEPATH, "TextFile").value
assert isinstance(f, TextFile)
assert isinstance(f.path(), Path)
assert f.pathstr == FILEPATH.get()
p = Path(tmp_path) / "hello.txt"
f = instant_return_test(context, p, "TextFile").value
f.set_encoding("utf-8")
assert f.encoding() == "utf-8"
with f.open("w"):
f.stream.write("HELLO\n")
f.stream.write("WORLD")
assert f.text() == "HELLO\nWORLD"
| 28.37037
| 72
| 0.693211
|
import pytest
import os
from machaon.types.file import TextFile
from machaon.types.shell import Path
from machaon.core.invocation import instant_return_test, instant_context
def test_construct(tmp_path):
FILEPATH = Path(__file__)
context = instant_context()
context.define_type(TextFile)
f = instant_return_test(context, FILEPATH, "TextFile").value
assert isinstance(f, TextFile)
assert isinstance(f.path(), Path)
assert f.pathstr == FILEPATH.get()
p = Path(tmp_path) / "hello.txt"
f = instant_return_test(context, p, "TextFile").value
f.set_encoding("utf-8")
assert f.encoding() == "utf-8"
with f.open("w"):
f.stream.write("HELLO\n")
f.stream.write("WORLD")
assert f.text() == "HELLO\nWORLD"
| true
| true
|
f702c5549f1e9d10bb50fdc16097c0795dafbdde
| 692
|
py
|
Python
|
practice/Python3/regular_expressions/regular_expressions.py
|
21-guns/algo
|
b2a0665d7520cca1bd8a9a4fceed0ba09618eadd
|
[
"MIT"
] | null | null | null |
practice/Python3/regular_expressions/regular_expressions.py
|
21-guns/algo
|
b2a0665d7520cca1bd8a9a4fceed0ba09618eadd
|
[
"MIT"
] | null | null | null |
practice/Python3/regular_expressions/regular_expressions.py
|
21-guns/algo
|
b2a0665d7520cca1bd8a9a4fceed0ba09618eadd
|
[
"MIT"
] | 1
|
2018-01-10T13:39:47.000Z
|
2018-01-10T13:39:47.000Z
|
import re
# match whole string
data1 = "aaab"
data2 = "aaaba"
pattern = r"\Aa+b\Z"
match1 = re.match(pattern, data1)
print(match1)
match2 = re.match(pattern, data2)
print(match2)
# regular expression options
data = "AaaA\n\raaaA"
pattern = r"^(a+)$"
match = re.match(pattern, data, re.I | re.M)
print(match)
print(match.group())
# search all matches
data = "Pi = 3.14, exponent = 2.718"
pattern = r"(\d+\.\d+)"
matches = re.findall(pattern, data)
print(matches)
# replacement of the match(with catch group)
data = re.sub(pattern, r'<f>\1</f>', data)
print(data)
# search for a match
match = re.search(pattern, data)
if match:
print(match.group())
print(float(match.group()))
| 18.210526
| 44
| 0.669075
|
import re
data1 = "aaab"
data2 = "aaaba"
pattern = r"\Aa+b\Z"
match1 = re.match(pattern, data1)
print(match1)
match2 = re.match(pattern, data2)
print(match2)
data = "AaaA\n\raaaA"
pattern = r"^(a+)$"
match = re.match(pattern, data, re.I | re.M)
print(match)
print(match.group())
data = "Pi = 3.14, exponent = 2.718"
pattern = r"(\d+\.\d+)"
matches = re.findall(pattern, data)
print(matches)
data = re.sub(pattern, r'<f>\1</f>', data)
print(data)
match = re.search(pattern, data)
if match:
print(match.group())
print(float(match.group()))
| true
| true
|
f702c5f926fe8566850d15108b97b51680b44657
| 1,043
|
py
|
Python
|
django_bnr/migrations/0001_initial.py
|
presslabs/django-bnr
|
07ed65ba8e153197862baa8a4428e068ade99c9e
|
[
"MIT"
] | null | null | null |
django_bnr/migrations/0001_initial.py
|
presslabs/django-bnr
|
07ed65ba8e153197862baa8a4428e068ade99c9e
|
[
"MIT"
] | null | null | null |
django_bnr/migrations/0001_initial.py
|
presslabs/django-bnr
|
07ed65ba8e153197862baa8a4428e068ade99c9e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Rate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('rate', models.DecimalField(null=True, verbose_name=b'Exchange rate', max_digits=8, decimal_places=4, blank=True)),
('date', models.DateField(db_index=True)),
('currency', models.CharField(default=b'USD', max_length=3, db_index=True, choices=[(b'CHF', b'CHF'), (b'EUR', b'EUR'), (b'GBP', b'GBP'), (b'USD', b'USD')])),
],
options={
'ordering': ['-date', 'currency'],
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='rate',
unique_together=set([('date', 'currency')]),
),
]
| 33.645161
| 174
| 0.549377
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Rate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('rate', models.DecimalField(null=True, verbose_name=b'Exchange rate', max_digits=8, decimal_places=4, blank=True)),
('date', models.DateField(db_index=True)),
('currency', models.CharField(default=b'USD', max_length=3, db_index=True, choices=[(b'CHF', b'CHF'), (b'EUR', b'EUR'), (b'GBP', b'GBP'), (b'USD', b'USD')])),
],
options={
'ordering': ['-date', 'currency'],
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='rate',
unique_together=set([('date', 'currency')]),
),
]
| true
| true
|
f702c66ec9d5bae5d0c6a271922042b43ed38eb8
| 38
|
py
|
Python
|
{{cookiecutter.project_slug}}/base/settings_local.py
|
claysllanxavier/django-cookiecutter
|
97de7ff4ed3dc94c32bf756a57aee0664a888cbc
|
[
"BSD-3-Clause"
] | 8
|
2021-08-13T17:48:27.000Z
|
2022-02-22T02:34:15.000Z
|
{{cookiecutter.project_slug}}/base/settings_local.py
|
claysllanxavier/django-cookiecutter
|
97de7ff4ed3dc94c32bf756a57aee0664a888cbc
|
[
"BSD-3-Clause"
] | 2
|
2022-03-24T20:39:00.000Z
|
2022-03-24T20:39:48.000Z
|
{{cookiecutter.project_slug}}/base/settings_local.py
|
claysllanxavier/django-cookiecutter
|
97de7ff4ed3dc94c32bf756a57aee0664a888cbc
|
[
"BSD-3-Clause"
] | 2
|
2021-09-21T00:05:27.000Z
|
2022-01-03T10:50:05.000Z
|
DEBUG = True
ALLOWED_HOSTS = ['*', ]
| 9.5
| 23
| 0.578947
|
DEBUG = True
ALLOWED_HOSTS = ['*', ]
| true
| true
|
f702c70a99f711f50de59372d5545d6e6a043b23
| 41,365
|
py
|
Python
|
main.py
|
yukkerike/vklml
|
2efb6fa506a71f8dec8286c833b92985e70dc164
|
[
"MIT"
] | 6
|
2020-10-14T20:11:16.000Z
|
2022-02-08T16:12:46.000Z
|
main.py
|
yukkerike/vklml
|
2efb6fa506a71f8dec8286c833b92985e70dc164
|
[
"MIT"
] | null | null | null |
main.py
|
yukkerike/vklml
|
2efb6fa506a71f8dec8286c833b92985e70dc164
|
[
"MIT"
] | null | null | null |
import logging
import logging.handlers
import sys
import os
import json
import sqlite3
import signal
import threading
import time
import difflib
import vk_api
from vk_api.longpoll import VkLongPoll, VkEventType
import requests.exceptions
cwd = os.path.dirname(os.path.abspath(__file__))
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
stream=sys.stdout,
level=logging.WARNING
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.handlers.RotatingFileHandler(
os.path.join(cwd, 'log.txt'),
maxBytes=102400
)
handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
logger.info("Запуск...")
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, requests.exceptions.RequestException):
return
elif issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger.error("Непойманное исключение.", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
defaultConfig = {
"ACCESS_TOKEN": "",
"createIndex": False,
"maxCacheAge": 86400,
"preloadMessages": False,
"customActions": False,
"disableMessagesLogging": False,
'enableFlaskWebServer': False,
'useAuth': False,
'users': {
'admin':'password'
},
'port': 8080,
'https': False,
'httpsPort': 8443,
'cert': [
os.path.join(cwd, "cert.pem"),
os.path.join(cwd, "key.pem")
]
}
def grab_token_from_args():
if len(sys.argv) > 1:
defaultConfig['ACCESS_TOKEN'] = sys.argv[1]
elif defaultConfig['ACCESS_TOKEN'] == "":
raise Exception("Не задан ACCESS_TOKEN")
if not os.path.exists(os.path.join(cwd, "config.json")):
with open(os.path.join(cwd, "config.json"), 'w') as conf:
grab_token_from_args()
json.dump(defaultConfig, conf, indent=4)
config = defaultConfig
del defaultConfig
else:
with open(os.path.join(cwd, "config.json"), 'r') as conf:
config = json.load(conf)
for i in config:
if i in defaultConfig:
defaultConfig[i] = config[i]
grab_token_from_args()
if len(set(config)) - len(set(defaultConfig)) != 0:
with open(os.path.join(cwd, "config.json"), 'w') as conf:
json.dump(defaultConfig, conf, indent=4)
config = defaultConfig
del defaultConfig
stop_mutex = threading.Lock()
def run_flask_server():
port = config['httpsPort'] if config['https'] else config['port']
import socket
ip = socket.gethostbyname(socket.gethostname())
del socket
while True:
try:
if config['https']:
logger.info("Trying to run on https://%s:%s/", ip, port)
app.run(
host='0.0.0.0',
port=port,
ssl_context=(
config['cert'][0],
config['cert'][1]
)
)
else:
logger.info("Trying to run on http://%s:%s/", ip, port)
app.run(host='0.0.0.0', port=port)
except OSError:
port += 1
if config['enableFlaskWebServer']:
from flaskWebServer import app
threading.Thread(target=run_flask_server).start()
if config['createIndex']:
from updateIndex import indexUpdater
indexUpdater()
def tryAgainIfFailed(func, *args, maxRetries=5, **kwargs):
c = maxRetries
delay = 1
while True:
try:
return func(*args, **kwargs)
except vk_api.exceptions.ApiError:
if str(sys.exc_info()[1]).find("User authorization failed") != -1:
logger.warning("Токен недействителен.")
interrupt_handler(0, None)
raise Warning
except requests.exceptions.RequestException:
if delay < 32:
delay*=2
time.sleep(delay)
continue
except BaseException:
if maxRetries == 0:
logger.exception("После %s попыток %s(%s%s) завершился с ошибкой.", c, func.__name__, args, kwargs)
raise Warning
logger.warning("Перезапуск %s(%s%s) через %s секунд...", func.__name__, args, kwargs, delay)
if delay < 32:
delay*=2
time.sleep(delay)
if maxRetries > 0:
maxRetries -= 1
continue
vk_session = vk_api.VkApi(token=config['ACCESS_TOKEN'],api_version='5.130')
longpoll = VkLongPoll(vk_session, wait=60, mode=2)
vk = vk_session.get_api()
account_id = tryAgainIfFailed(vk.users.get)[0]['id']
if not config['disableMessagesLogging']:
if not os.path.exists(
os.path.join(
cwd,
"mesAct"
)
):
os.makedirs(
os.path.join(
cwd,
"mesAct"
)
)
f = open(
os.path.join(
cwd,
"mesAct",
"vkGetVideoLink.html"
),
'w',
encoding='utf-8'
)
f.write("""<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<style>
html,body,iframe{
width: 100%;
height: 100%;
}
</style>
</head>
<body>
<p>Если видео не проигрывается, прямую ссылку можно получить через api:</p>
<script>
function embedLink(id) {
var link = document.createElement('a');
link.href = "https://vk.com/dev/video.get?params[videos]=0_0," + id + "¶ms[count]=1¶ms[offset]=1";
link.innerText = id;
link.setAttribute('target', '_blank')
document.getElementsByTagName("body")[0].appendChild(link);
}
function embedPlayer(link) {
var frame = document.createElement('iframe');
frame.src = link;
frame.style = "width:100%;height:100%;";
frame.setAttribute('allowFullScreen', '')
document.getElementsByTagName("body")[0].appendChild(frame);
}
function splitArgs(){
var args = document.location.search;
var lastAmpersand = args.lastIndexOf('&');
return [args.slice(1, lastAmpersand), args.slice(lastAmpersand + 1)];
}
var args = splitArgs();
embedLink(args[1]);
embedPlayer(args[0]);
</script>
</body>
</html>""")
f.close()
if not os.path.exists(
os.path.join(
cwd,
"messages.db"
)
):
conn = sqlite3.connect(
os.path.join(
cwd,
"messages.db"
),
check_same_thread=False,
isolation_level=None,
timeout=15.0
)
cursor = conn.cursor()
cursor.execute("""CREATE TABLE "messages" (
"peer_id" INTEGER NOT NULL,
"user_id" INTEGER NOT NULL,
"message_id" INTEGER NOT NULL UNIQUE,
"message" TEXT,
"attachments" TEXT,
"timestamp" INTEGER NOT NULL,
"fwd_messages" TEXT
)""")
cursor.execute("""CREATE TABLE "chats_cache" (
"chat_id" INTEGER NOT NULL UNIQUE,
"chat_name" TEXT NOT NULL
)""")
cursor.execute("""CREATE TABLE "users_cache" (
"user_id" INTEGER NOT NULL UNIQUE,
"user_name" TEXT NOT NULL
)""")
account_name = tryAgainIfFailed(
vk.users.get,
user_id=account_id
)[0]
account_name = f"{account_name['first_name']} {account_name['last_name']}"
cursor.execute(
"""INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""",
(account_id, account_name,)
)
conn.commit()
else:
conn = sqlite3.connect(
os.path.join(cwd, "messages.db"),
check_same_thread=False,
timeout=15.0
)
cursor = conn.cursor()
if not os.path.exists(
os.path.join(
cwd,
"mesAct",
"bootstrap.css"
)
):
f = open(
os.path.join(
cwd,
"mesAct",
"bootstrap.css"
),
'w',
encoding='utf-8'
)
f.write(':root{--blue:#007bff;--indigo:#6610f2;--purple:#6f42c1;--pink:#e83e8c;--red:#dc3545;--orange:#fd7e14;--yellow:#ffc107;--green:#28a745;--teal:#20c997;--cyan:#17a2b8;--white:#fff;--gray:#6c757d;--gray-dark:#343a40;--primary:#007bff;--secondary:#6c757d;--success:#28a745;--info:#17a2b8;--warning:#ffc107;--danger:#dc3545;--light:#f8f9fa;--dark:#343a40;--breakpoint-xs:0;--breakpoint-sm:576px;--breakpoint-md:768px;--breakpoint-lg:992px;--breakpoint-xl:1200px;--font-family-sans-serif:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-family-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}*,::after,::before{box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:transparent}body{margin:0;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:left;background-color:#fff}dl,ol,ul{margin-top:0;margin-bottom:1rem}b,strong{font-weight:bolder}a{color:#007bff;text-decoration:none;background-color:transparent}img{vertical-align:middle;border-style:none}table{border-collapse:collapse}.table{width:100%;margin-bottom:1rem;color:#212529}.table td,.table th{padding:.75rem;vertical-align:top;border-top:1px solid #dee2e6}.table-sm td,.table-sm th{padding:.3rem}.table-bordered{border:1px solid #dee2e6}.table-bordered td,.table-bordered th{border:1px solid #dee2e6}.list-group{display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;padding-left:0;margin-bottom:0;border-radius:.25rem}.list-group-item{position:relative;display:block;padding:.75rem 1.25rem;background-color:#fff;border:1px solid rgba(0,0,0,.125)}.list-group-item:first-child{border-top-left-radius:inherit;border-top-right-radius:inherit}.list-group-item:last-child{border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.list-group-item+.list-group-item{border-top-width:0}.stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;pointer-events:auto;content:"";background-color:rgba(0,0,0,0)}.mes{word-break:break-all}img,a,audio{display:block}img{max-width:100%}')
f.close()
if config['customActions']:
from customActions import customActions
cust = customActions(vk, conn, cursor)
def bgWatcher():
while True:
maxCacheAge = config['maxCacheAge']
with stop_mutex:
logger.info("Обслуживание БД...")
try:
showMessagesWithDeletedAttachments()
except BaseException:
logger.exception("Ошибка при поиске удаленных фото")
try:
if maxCacheAge != -1:
cursor.execute(
"""DELETE FROM messages WHERE timestamp < ?""",
(time.time() - maxCacheAge,)
)
conn.commit()
cursor.execute("VACUUM")
else:
maxCacheAge = 86400
except BaseException:
logger.exception("Ошибка при очистке базы данных")
logger.info("Обслуживание БД завершено.")
time.sleep(maxCacheAge)
def interrupt_handler(signum, frame):
conn.commit()
cursor.close()
try:
tableWatcher.cancel()
except AttributeError:
pass
logger.info("Завершение...")
os._exit(0)
signal.signal(signal.SIGINT, interrupt_handler)
signal.signal(signal.SIGTERM, interrupt_handler)
def eventWorker_predefinedDisabled():
global events
while True:
flag.wait()
event = events.pop(0)
with stop_mutex:
try:
cust.act(event)
except BaseException:
logger.exception("Ошибка в customActions. \n %s", vars(event))
if len(events) == 0:
flag.clear()
def eventWorker_customDisabled():
global events
while True:
flag.wait()
event = events.pop(0)
with stop_mutex:
predefinedActions(event)
if len(events) == 0:
flag.clear()
conn.commit()
def eventWorker():
global events
while True:
flag.wait()
event = events.pop(0)
with stop_mutex:
try:
cust.act(event)
except BaseException:
logger.exception("Ошибка в customActions. \n %s", vars(event))
predefinedActions(event)
if len(events) == 0:
flag.clear()
conn.commit()
def predefinedActions(event):
try:
if event.type == VkEventType.MESSAGE_NEW:
cursor.execute(
"""INSERT INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""",
(event.peer_id, event.user_id, event.message_id, event.message, event.message_data[1], event.timestamp, event.message_data[2],)
)
conn.commit()
elif event.type == VkEventType.MESSAGE_EDIT:
if event.message_data[0]:
activityReport(event.message_id, event.peer_id, event.user_id, event.timestamp, True, event.message_data[1], event.message_data[2], event.text)
cursor.execute(
"""INSERT or REPLACE INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""",
(event.peer_id, event.user_id, event.message_id, event.message, event.message_data[1], event.timestamp, event.message_data[2],)
)
conn.commit()
elif event.type == VkEventType.MESSAGE_FLAGS_SET:
try:
activityReport(event.message_id)
cursor.execute(
"""DELETE FROM messages WHERE message_id = ?""",
(event.message_id,)
)
conn.commit()
except TypeError:
logger.info("Удаление невозможно, сообщение отсутствует в БД.")
except sqlite3.IntegrityError:
logger.warning("Запущено несколько копий программы, завершение...")
interrupt_handler(0, None)
except Warning:
pass
except BaseException:
logger.exception("Ошибка при сохранении сообщения. \n %s", vars(event))
def main():
logger.info("Запущен основной цикл.")
global events
for event in longpoll.listen():
try:
if event.raw[0] == 4 or event.raw[0] == 5:
if event.attachments != {}:
event.message_data = getAttachments(event)
else:
event.message_data = True, None, None
if event.from_user and event.raw[2] & 2:
event.user_id = account_id
elif event.from_group:
if event.from_me:
event.user_id = account_id
else:
event.user_id = event.peer_id
if not event.message:
event.message = None
events.append(event)
flag.set()
elif event.raw[0] == 2 and (event.raw[2] & 131072 or event.raw[2] & 128):
events.append(event)
flag.set()
except Warning:
pass
except BaseException:
logger.exception("Ошибка при добавлении события в очередь. \n %s", vars(event))
def showMessagesWithDeletedAttachments():
cursor.execute("""SELECT message_id, attachments FROM messages WHERE attachments IS NOT NULL""")
fetch_attachments = [[str(i[0]), json.loads(i[1])] for i in cursor.fetchall()]
cursor.execute("""SELECT message_id, fwd_messages FROM messages WHERE fwd_messages IS NOT NULL""")
fetch_fwd = [[str(i[0]), json.loads(i[1])] for i in cursor.fetchall()]
c = 0
for i in range(len(fetch_attachments)):
for j in fetch_attachments[i - c][1]:
if j['type'] == 'photo' or j['type'] == 'video' or j['type'] == 'doc':
break
else:
del fetch_attachments[i - c]
c += 1
messages_attachments = []
messages_fwd = []
for i in [[j[0] for j in fetch_attachments[i:i + 100]] for i in range(0, len(fetch_attachments), 100)]:
messages_attachments.extend(tryAgainIfFailed(
vk.messages.getById,
message_ids=','.join(i))['items']
)
for i in [[j[0] for j in fetch_fwd[i:i + 100]] for i in range(0, len(fetch_fwd), 100)]:
messages_fwd.extend(tryAgainIfFailed(
vk.messages.getById,
message_ids=','.join(i))['items']
)
c = 0
for i in range(len(fetch_attachments)):
if compareAttachments(messages_attachments[i - c]['attachments'], fetch_attachments[i - c][1]):
del fetch_attachments[i - c]
del messages_attachments[i - c]
c += 1
for i in range(len(fetch_attachments)):
activityReport(fetch_attachments[i][0])
if messages_attachments[i]['attachments'] == []:
cursor.execute(
"""UPDATE messages SET attachments = ? WHERE message_id = ?""",
(None, fetch_attachments[i][0],)
)
else:
cursor.execute(
"""UPDATE messages SET attachments = ? WHERE message_id = ?""",
(
json.dumps(messages_attachments[i]['attachments']),
fetch_attachments[i][0],
)
)
c = 0
for i in range(len(fetch_fwd)):
if compareFwd(
messages_fwd[i - c],
{
'fwd_messages': fetch_fwd[i - c][1]
}
):
del fetch_fwd[i - c]
del messages_fwd[i - c]
c += 1
for i in range(len(fetch_fwd)):
activityReport(fetch_fwd[i][0])
if messages_fwd[i]['fwd_messages'] == []:
cursor.execute(
"""UPDATE messages SET fwd_messages = ? WHERE message_id = ?""",
(None, fetch_fwd[i][0],)
)
else:
cursor.execute(
"""UPDATE messages SET fwd_messages = ? WHERE message_id = ?""",
(
json.dumps(messages_fwd[i]['fwd_messages']),
fetch_fwd[i][0],
)
)
conn.commit()
def compareFwd(new, old):
if 'reply_message' in new:
new['fwd_messages'] = [new['reply_message']]
if 'reply_message' in old:
old['fwd_messages'] = [old['reply_message']]
for i in range(len(old['fwd_messages'])):
if 'fwd_messages' in old['fwd_messages'][i] and 'fwd_messages' in new['fwd_messages'][i]:
if not compareFwd(
new['fwd_messages'][i],
old['fwd_messages'][i]
):
return False
if not compareAttachments(
new['fwd_messages'][i]['attachments'],
old['fwd_messages'][i]['attachments']
):
return False
return True
def compareAttachments(new, old):
if len(new) < len(old):
return False
return True
def attachmentsParse(urls):
if urls is None:
return ""
html = """<div>
"""
for i in urls:
urlSplit = i.split(',')
if i.find('vk.com/sticker/') != -1:
html += """ <img src="{}" />
""".format(i)
elif i.find('.jpg') != -1 and i.find(',') == -1:
html += """ <img src="{}" />
""".format(i)
elif i.find('.mp3') != -1:
html += """ <audio src="{}" controls></audio>
""".format(i)
elif i.find('https://vk.com/audio') != -1:
html += """ <a href="{}" target="_blank">
{}
</a>
""".format(i, i[23:-11].replace('%20', ' '))
elif i.find('@') != -1:
i = i.rsplit('@', 1)
html += """ <a href="{}" target="_blank">
{}
</a>
""".format(i[1], i[0])
elif len(urlSplit) == 3:
html += """ <a href="{}" target="_blank">
Видео
<img src="{}"/>
</a>
""".format(f"./vkGetVideoLink.html?{urlSplit[1]}&{urlSplit[2]}", urlSplit[0])
else:
html += """ <a href="{0}" target="_blank">
{0}
</a>
""".format(i)
html += """</div>"""
return html
def getAttachments(event):
message_id = event.message_id
fullLoadUnNeeded = not (event.raw[0] == 5 or 'fwd' in event.attachments)
count = 0
if fullLoadUnNeeded:
for i in range(1,11):
if f'attach{i}_type' in event.attachments:
if event.attachments[f'attach{i}_type'] not in ('sticker', 'link'):
fullLoadUnNeeded = False
else:
count = i
break
if fullLoadUnNeeded:
attachments = []
for i in range(1,count):
if event.attachments[f'attach{i}_type'] == 'sticker':
attachments.append({'type':'sticker','sticker':{'images':[{'height':64,'url':f'https://vk.com/sticker/1-{event.attachments[f"attach{i}"]}-64'}]}})
else:
if f'attach{i}_title' in event.attachments:
title = event.attachments[f'attach{i}_title']
else:
title = event.attachments[f'attach{i}_url']
attachments.append({'type':'link','link':{'title':title,'url':event.attachments[f'attach{i}_url']}})
return False, json.dumps(attachments, ensure_ascii=False,), None
mes = tryAgainIfFailed(
vk.messages.getById,
message_ids=message_id
)['items']
if not len(mes):
logger.info("Не удалось запросить вложения для сообщения, message_id = %i.", event.message_id)
return False, "[]", "[]"
else:
mes = mes[0]
hasUpdateTime = 'update_time' in mes
fwd_messages = None
if 'reply_message' in mes:
fwd_messages = json.dumps([mes['reply_message']], ensure_ascii=False,)
elif mes['fwd_messages'] != []:
fwd_messages = json.dumps(mes['fwd_messages'], ensure_ascii=False,)
if mes['attachments'] == []:
attachments = None
else:
attachments = json.dumps(mes['attachments'], ensure_ascii=False,)
return hasUpdateTime, attachments, fwd_messages
def parseUrls(attachments):
urls = []
for i in attachments:
if i['type'] == 'photo':
maxHeight = 0
maxUrl = ""
for j in i['photo']['sizes']:
if j['height'] > maxHeight:
maxHeight = j['height']
maxUrl = j['url']
urls.append(maxUrl)
elif i['type'] == 'audio_message':
urls.append(i['audio_message']['link_mp3'])
elif i['type'] == 'sticker':
urls.append(i['sticker']['images'][0]['url'])
elif i['type'] == 'gift':
urls.append(i['gift']['thumb_48'])
elif i['type'] == 'link':
urls.append(f"Ссылка: {i['link']['title']}@{i['link']['url']}")
elif i['type'] == 'video':
urls.append(f"{i['video']['image'][0]['url']},{i['video']['player']},{i['video']['owner_id']}_{i['video']['id']}_{i['video']['access_key']}")
elif i['type'] == 'wall':
urls.append(f"Пост: {i['wall']['text'][:25]}@https://vk.com/wall{i['wall']['from_id']}_{i['wall']['id']}")
elif i['type'] == 'wall_reply':
urls.append(f"Комментарий: {i['wall_reply']['text'][:25]}@https://vk.com/wall{i['wall_reply']['owner_id']}_{i['wall_reply']['post_id']}?reply={i['wall_reply']['id']}")
elif i['type'] == 'audio':
urls.append(f"https://vk.com/audio?q={i['audio']['artist'].replace(' ', '%20')}%20-%20{i['audio']['title'].replace(' ', '%20')}&tab=global")
elif i['type'] == 'audio_playlist':
urls.append(f"Плейлист: {i['audio_playlist']['title']}@https://vk.com/music?z=audio_playlist{i['audio_playlist']['owner_id']}_{i['audio_playlist']['id']}/{i['audio_playlist']['access_key']}")
elif i['type'] == 'market':
urls.append(f"https://vk.com/market?w=product{i['market']['owner_id']}_{i['market']['id']}")
elif i['type'] == 'poll':
urls.append(f"Голосование: {i['poll']['question'][:25]}@https://vk.com/poll{i['poll']['owner_id']}_{i['poll']['id']}")
elif i['type'] == 'doc':
urls.append(f"Документ: {i['doc']['title']}@{i['doc']['url']}")
else:
if 'url' in i[i['type']]:
urls.append(i[i['type']]['url'])
if urls == []:
return None
return urls
def getPeerName(id):
if id > 2000000000:
cursor.execute("""SELECT chat_name FROM chats_cache WHERE chat_id = ?""", (id,))
fetch = cursor.fetchone()
if fetch is None:
try:
name = tryAgainIfFailed(
vk.messages.getChat,
chat_id=id-2000000000
)['title']
cursor.execute("""INSERT INTO chats_cache (chat_id,chat_name) VALUES (?,?)""", (id, name,))
conn.commit()
except Warning:
name = "Секретный чат, используйте токен другого приложения"
else:
name = fetch[0]
elif id < 0:
cursor.execute("""SELECT user_name FROM users_cache WHERE user_id = ?""", (id,))
fetch = cursor.fetchone()
if fetch is None:
name = tryAgainIfFailed(
vk.groups.getById,
group_id=-id
)[0]['name']
cursor.execute("""INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""", (id, name,))
conn.commit()
else:
name = fetch[0]
else:
cursor.execute("""SELECT user_name FROM users_cache WHERE user_id = ?""", (id,))
fetch = cursor.fetchone()
if fetch is None:
name = tryAgainIfFailed(
vk.users.get,
user_id=id
)[0]
name = f"{name['first_name']} {name['last_name']}"
cursor.execute("""INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""", (id, name,))
conn.commit()
else:
name = fetch[0]
return name
def fwdParse(fwd):
html = """<table class="table table-sm table-bordered">
"""
for i in fwd:
user_name = getPeerName(i['from_id'])
if i['from_id'] < 0:
html += """ <tr>
<td>
<a href='https://vk.com/public{}' target="_blank">
{}
</a>
</td>
</tr>
""".format(-i['from_id'], user_name)
else:
html += """ <tr>
<td>
<a href='https://vk.com/id{}' target="_blank">
{}
</a>
</td>
</tr>
""".format(i['from_id'], user_name)
if i['text'] != "":
html += """ <tr>
<td>
<div class='mes'>
{}
</div>
""".format(xssFilter(i['text']))
else:
html += """ <tr>
<td>
"""
if i['attachments'] != []:
html += attachmentsParse(parseUrls(i['attachments']))
if 'fwd_messages' in i:
html += fwdParse(i['fwd_messages'])
elif 'reply_message' in i:
html += fwdParse([i['reply_message']])
html += """ </td>
</tr>
<tr>
<td>
{}
</td>
</tr>
""".format(time.strftime('%H:%M:%S %d.%m.%y', time.localtime(i['date'])))
html += "</table>"
return html
def xssFilter(s):
return s\
.replace('<', '<')\
.replace('>', '>')\
.replace('\n', '<br />')
def compareStrings(a, b):
aCounter = 0
bCounter = 0
for i in difflib.SequenceMatcher(None, a, b).get_opcodes():
if i[0] == 'insert':
b = f"{b[: i[3]+bCounter]}<ins>{b[i[3]+bCounter : i[4]+bCounter]}</ins>{b[i[4]+bCounter:]}"
bCounter += 11
elif i[0] == 'delete':
a = f"{a[: i[1]+aCounter]}<ins>{a[i[1]+aCounter : i[2]+aCounter]}</ins>{a[i[2]+aCounter:]}"
aCounter += 11
elif i[0] == 'replace':
a = f"{a[: i[1]+aCounter]}<ins>{a[i[1]+aCounter : i[2]+aCounter]}</ins>{a[i[2]+aCounter:]}"
b = f"{b[: i[3]+bCounter]}<ins>{b[i[3]+bCounter : i[4]+bCounter]}</ins>{b[i[4]+bCounter:]}"
aCounter += 11
bCounter += 11
return a, b
def activityReport(message_id, peer_id=None, user_id=None, timestamp=None, isEdited=False, attachments=None, fwd=None, message=None):
try:
peer_name = user_name = oldMessage = oldAttachments = date = oldFwd = None
cursor.execute("""SELECT * FROM messages WHERE message_id = ?""", (message_id,))
fetch = cursor.fetchone()
if attachments is not None:
attachments = parseUrls(json.loads(attachments))
if fwd is not None:
fwd = json.loads(fwd)
if fetch is None:
if isEdited:
logger.info("Изменение сообщения, отсутствующего в БД, message_id = %i.", message_id)
fetch = [0]*7
peer_name = getPeerName(peer_id)
user_name = getPeerName(user_id)
oldMessage = f"⚠️ {message}"
oldAttachments = attachments
oldFwd = fwd
date = f"<b>Доб:</b> {time.strftime('%H:%M:%S %d.%m', time.localtime(timestamp))}<br /><b>Изм:</b> {time.strftime('%H:%M:%S %d.%m', time.localtime())}"
else:
raise TypeError
else:
if fetch[3] is not None:
oldMessage = str(fetch[3])
if fetch[4] is not None:
oldAttachments = parseUrls(json.loads(fetch[4]))
if fetch[6] is not None:
oldFwd = json.loads(fetch[6])
peer_name = getPeerName(fetch[0])
user_name = getPeerName(fetch[1])
date = f"<b>Доб:</b> {time.strftime('%H:%M:%S %d.%m', time.localtime(fetch[5]))}<br /><b>Изм:</b> {time.strftime('%H:%M:%S %d.%m', time.localtime())}"
peer_id = fetch[0]
user_id = fetch[1]
del fetch
row = """ <tr><!-- {} -->
<td>{}
</td>
<td>{}
</td>
{}
<td>
{}
</td>
</tr>
"""
messageBlock = """
<div class='mes'>
{}
</div>"""
attachmentsBlock = """
<div>
<b>Вложения</b><br />
{}
</div>"""
fwdBlock = """
<div>
<b>Пересланное</b><br />
{}
</div>"""
if peer_id > 2000000000:
peer_id = """
<a href='https://vk.com/im?sel=c{}' target='_blank'>
{}
</a>""".format(str(peer_id-2000000000), peer_name)
elif peer_id < 0:
peer_id = """
<a href='https://vk.com/public{}' target='_blank'>
{}
</a>""".format(str(-peer_id), peer_name)
else:
peer_id = """
<a href='https://vk.com/id{}' target='_blank'>
{}
</a>""".format(str(peer_id), peer_name)
if user_id < 0:
user_id = """
<a href='https://vk.com/public{}' target='_blank'>
{}
</a>""".format(str(-user_id), user_name)
else:
user_id = """
<a href='https://vk.com/id{}' target='_blank'>
{}
</a>""".format(str(user_id), user_name)
if isEdited:
if not (oldMessage is None or message is None):
message = xssFilter(message)
oldMessage = xssFilter(oldMessage)
message, oldMessage = compareStrings(message, oldMessage)
oldMessage = messageBlock.format(oldMessage)
message = messageBlock.format(message)
elif oldMessage is None:
oldMessage = ""
message = messageBlock.format(xssFilter(message))
else:
oldMessage = messageBlock.format(xssFilter(oldMessage))
message = ""
if oldAttachments is not None:
oldAttachments = attachmentsBlock.format(attachmentsParse(oldAttachments))
else:
oldAttachments = ""
if oldFwd is not None:
oldFwd = fwdBlock.format(fwdParse(oldFwd))
else:
oldFwd = ""
if attachments is not None:
attachments = attachmentsBlock.format(attachmentsParse(attachments))
else:
attachments = ""
if fwd is not None:
fwd = fwdBlock.format(fwdParse(fwd))
else:
fwd = ""
messageBlock = """<td width='50%'>
<b>Старое</b><br />{}
</td>
<td width='50%'>
<b>Новое</b><br />{}
</td>""".format(oldMessage+oldAttachments+oldFwd, message+attachments+fwd)
else:
if oldMessage is not None:
oldMessage = messageBlock.format(xssFilter(oldMessage))
else:
oldMessage = ""
if oldAttachments is not None:
oldAttachments = attachmentsBlock.format(attachmentsParse(oldAttachments))
else:
oldAttachments = ""
if oldFwd is not None:
oldFwd = fwdBlock.format(fwdParse(oldFwd))
else:
oldFwd = ""
messageBlock = """<td width='100%' colspan='2'>
<b>Удалено</b><br />{}
</td>""".format(oldMessage+oldAttachments+oldFwd)
row = row.format(message_id, peer_id, user_id, messageBlock, date)
if os.path.exists(
os.path.join(
cwd,
"mesAct",
f"messages_{time.strftime('%d%m%y', time.localtime())}.html"
)
):
messagesActivities = open(
os.path.join(
cwd,
"mesAct",
f"messages_{time.strftime('%d%m%y',time.localtime())}.html"
),
'r',
encoding='utf-8'
)
messagesDump = messagesActivities.read()
messagesActivities.close()
messagesActivities = open(
os.path.join(
cwd,
"mesAct",
f"messages_{time.strftime('%d%m%y',time.localtime())}.html"
),
'w',
encoding='utf-8'
)
else:
messagesDump = template
messagesActivities = open(
os.path.join(
cwd,
"mesAct",
f"messages_{time.strftime('%d%m%y',time.localtime())}.html"
),
'w',
encoding='utf-8'
)
messagesDump = messagesDump[:offset]+row+messagesDump[offset:]
messagesActivities.write(messagesDump)
messagesActivities.close()
except TypeError:
raise TypeError
except BaseException:
logger.exception("Ошибка при логгировании изменений.")
if not config['disableMessagesLogging']:
tableWatcher = threading.Thread(target=bgWatcher)
tableWatcher.start()
template = """<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<link rel="stylesheet" href="./bootstrap.css">
</head>
<body>
<table class="table table-sm">
</table>
</body>
</html>"""
offset = template.index(""" </table>""")
events = []
flag = threading.Event()
def preloadMessages():
logger.info("Предзагрузка сообщений...")
offset = 0
peer_ids = []
messages = []
shouldContinue = True
try:
while shouldContinue:
shouldContinue = False
dialogs = tryAgainIfFailed(vk.messages.getConversations, offset=offset, count=20)
for i in range(0,len(dialogs['items'])):
if dialogs['items'][i]['last_message']['date'] >= time.time() - config['maxCacheAge']:
peer_ids.append(dialogs['items'][i]['conversation']['peer']['id'])
if i == len(dialogs['items']) - 1:
shouldContinue = True
offset+=20
for i in peer_ids:
offset = 0
if i > 2000000000:
count = 200
else:
count = 50
shouldContinue = True
while shouldContinue:
shouldContinue = False
mes = vk.messages.getHistory(offset=offset, count=count, peer_id=i)['items']
if mes[-1]['date']>= time.time() - config['maxCacheAge']:
shouldContinue = True
offset+=count
for j in mes:
if j['date'] >= time.time() - config['maxCacheAge']:
messages.append(j)
for i in messages:
message_id = i['id']
with stop_mutex:
cursor.execute("""SELECT message_id FROM messages WHERE message_id = ?""", (message_id,))
if cursor.fetchone() is not None:
continue
peer_id = i['peer_id']
user_id = i['from_id']
message = i['text']
timestamp = i['date']
fwd_messages = None
if 'reply_message' in i:
fwd_messages = json.dumps([i['reply_message']], ensure_ascii=False,)
elif i['fwd_messages'] != []:
fwd_messages = json.dumps(i['fwd_messages'], ensure_ascii=False,)
if i['attachments'] == []:
attachments = None
else:
attachments = json.dumps(i['attachments'], ensure_ascii=False,)
with stop_mutex:
cursor.execute(
"""INSERT INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""",
(peer_id, user_id, message_id, message, attachments, timestamp, fwd_messages,)
)
conn.commit()
except BaseException:
logger.exception("Ошибка во время предзагрузки сообщений")
logger.info("Предзагрузка сообщений завершена.")
if config['customActions'] and config['disableMessagesLogging']:
threading.Thread(target=eventWorker_predefinedDisabled).start()
elif not config['disableMessagesLogging'] and not config['customActions']:
threading.Thread(target=eventWorker_customDisabled).start()
else:
threading.Thread(target=eventWorker).start()
if config['preloadMessages']:
threading.Thread(target=preloadMessages).start()
try:
tryAgainIfFailed(
main,
maxRetries=-1
)
except Warning:
pass
| 39.47042
| 2,443
| 0.512656
|
import logging
import logging.handlers
import sys
import os
import json
import sqlite3
import signal
import threading
import time
import difflib
import vk_api
from vk_api.longpoll import VkLongPoll, VkEventType
import requests.exceptions
cwd = os.path.dirname(os.path.abspath(__file__))
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
stream=sys.stdout,
level=logging.WARNING
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.handlers.RotatingFileHandler(
os.path.join(cwd, 'log.txt'),
maxBytes=102400
)
handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
logger.info("Запуск...")
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, requests.exceptions.RequestException):
return
elif issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger.error("Непойманное исключение.", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
defaultConfig = {
"ACCESS_TOKEN": "",
"createIndex": False,
"maxCacheAge": 86400,
"preloadMessages": False,
"customActions": False,
"disableMessagesLogging": False,
'enableFlaskWebServer': False,
'useAuth': False,
'users': {
'admin':'password'
},
'port': 8080,
'https': False,
'httpsPort': 8443,
'cert': [
os.path.join(cwd, "cert.pem"),
os.path.join(cwd, "key.pem")
]
}
def grab_token_from_args():
if len(sys.argv) > 1:
defaultConfig['ACCESS_TOKEN'] = sys.argv[1]
elif defaultConfig['ACCESS_TOKEN'] == "":
raise Exception("Не задан ACCESS_TOKEN")
if not os.path.exists(os.path.join(cwd, "config.json")):
with open(os.path.join(cwd, "config.json"), 'w') as conf:
grab_token_from_args()
json.dump(defaultConfig, conf, indent=4)
config = defaultConfig
del defaultConfig
else:
with open(os.path.join(cwd, "config.json"), 'r') as conf:
config = json.load(conf)
for i in config:
if i in defaultConfig:
defaultConfig[i] = config[i]
grab_token_from_args()
if len(set(config)) - len(set(defaultConfig)) != 0:
with open(os.path.join(cwd, "config.json"), 'w') as conf:
json.dump(defaultConfig, conf, indent=4)
config = defaultConfig
del defaultConfig
stop_mutex = threading.Lock()
def run_flask_server():
port = config['httpsPort'] if config['https'] else config['port']
import socket
ip = socket.gethostbyname(socket.gethostname())
del socket
while True:
try:
if config['https']:
logger.info("Trying to run on https://%s:%s/", ip, port)
app.run(
host='0.0.0.0',
port=port,
ssl_context=(
config['cert'][0],
config['cert'][1]
)
)
else:
logger.info("Trying to run on http://%s:%s/", ip, port)
app.run(host='0.0.0.0', port=port)
except OSError:
port += 1
if config['enableFlaskWebServer']:
from flaskWebServer import app
threading.Thread(target=run_flask_server).start()
if config['createIndex']:
from updateIndex import indexUpdater
indexUpdater()
def tryAgainIfFailed(func, *args, maxRetries=5, **kwargs):
c = maxRetries
delay = 1
while True:
try:
return func(*args, **kwargs)
except vk_api.exceptions.ApiError:
if str(sys.exc_info()[1]).find("User authorization failed") != -1:
logger.warning("Токен недействителен.")
interrupt_handler(0, None)
raise Warning
except requests.exceptions.RequestException:
if delay < 32:
delay*=2
time.sleep(delay)
continue
except BaseException:
if maxRetries == 0:
logger.exception("После %s попыток %s(%s%s) завершился с ошибкой.", c, func.__name__, args, kwargs)
raise Warning
logger.warning("Перезапуск %s(%s%s) через %s секунд...", func.__name__, args, kwargs, delay)
if delay < 32:
delay*=2
time.sleep(delay)
if maxRetries > 0:
maxRetries -= 1
continue
vk_session = vk_api.VkApi(token=config['ACCESS_TOKEN'],api_version='5.130')
longpoll = VkLongPoll(vk_session, wait=60, mode=2)
vk = vk_session.get_api()
account_id = tryAgainIfFailed(vk.users.get)[0]['id']
if not config['disableMessagesLogging']:
if not os.path.exists(
os.path.join(
cwd,
"mesAct"
)
):
os.makedirs(
os.path.join(
cwd,
"mesAct"
)
)
f = open(
os.path.join(
cwd,
"mesAct",
"vkGetVideoLink.html"
),
'w',
encoding='utf-8'
)
f.write("""<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<style>
html,body,iframe{
width: 100%;
height: 100%;
}
</style>
</head>
<body>
<p>Если видео не проигрывается, прямую ссылку можно получить через api:</p>
<script>
function embedLink(id) {
var link = document.createElement('a');
link.href = "https://vk.com/dev/video.get?params[videos]=0_0," + id + "¶ms[count]=1¶ms[offset]=1";
link.innerText = id;
link.setAttribute('target', '_blank')
document.getElementsByTagName("body")[0].appendChild(link);
}
function embedPlayer(link) {
var frame = document.createElement('iframe');
frame.src = link;
frame.style = "width:100%;height:100%;";
frame.setAttribute('allowFullScreen', '')
document.getElementsByTagName("body")[0].appendChild(frame);
}
function splitArgs(){
var args = document.location.search;
var lastAmpersand = args.lastIndexOf('&');
return [args.slice(1, lastAmpersand), args.slice(lastAmpersand + 1)];
}
var args = splitArgs();
embedLink(args[1]);
embedPlayer(args[0]);
</script>
</body>
</html>""")
f.close()
if not os.path.exists(
os.path.join(
cwd,
"messages.db"
)
):
conn = sqlite3.connect(
os.path.join(
cwd,
"messages.db"
),
check_same_thread=False,
isolation_level=None,
timeout=15.0
)
cursor = conn.cursor()
cursor.execute("""CREATE TABLE "messages" (
"peer_id" INTEGER NOT NULL,
"user_id" INTEGER NOT NULL,
"message_id" INTEGER NOT NULL UNIQUE,
"message" TEXT,
"attachments" TEXT,
"timestamp" INTEGER NOT NULL,
"fwd_messages" TEXT
)""")
cursor.execute("""CREATE TABLE "chats_cache" (
"chat_id" INTEGER NOT NULL UNIQUE,
"chat_name" TEXT NOT NULL
)""")
cursor.execute("""CREATE TABLE "users_cache" (
"user_id" INTEGER NOT NULL UNIQUE,
"user_name" TEXT NOT NULL
)""")
account_name = tryAgainIfFailed(
vk.users.get,
user_id=account_id
)[0]
account_name = f"{account_name['first_name']} {account_name['last_name']}"
cursor.execute(
"""INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""",
(account_id, account_name,)
)
conn.commit()
else:
conn = sqlite3.connect(
os.path.join(cwd, "messages.db"),
check_same_thread=False,
timeout=15.0
)
cursor = conn.cursor()
if not os.path.exists(
os.path.join(
cwd,
"mesAct",
"bootstrap.css"
)
):
f = open(
os.path.join(
cwd,
"mesAct",
"bootstrap.css"
),
'w',
encoding='utf-8'
)
f.write(':root{--blue:#007bff;--indigo:#6610f2;--purple:#6f42c1;--pink:#e83e8c;--red:#dc3545;--orange:#fd7e14;--yellow:#ffc107;--green:#28a745;--teal:#20c997;--cyan:#17a2b8;--white:#fff;--gray:#6c757d;--gray-dark:#343a40;--primary:#007bff;--secondary:#6c757d;--success:#28a745;--info:#17a2b8;--warning:#ffc107;--danger:#dc3545;--light:#f8f9fa;--dark:#343a40;--breakpoint-xs:0;--breakpoint-sm:576px;--breakpoint-md:768px;--breakpoint-lg:992px;--breakpoint-xl:1200px;--font-family-sans-serif:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-family-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}*,::after,::before{box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:transparent}body{margin:0;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:left;background-color:#fff}dl,ol,ul{margin-top:0;margin-bottom:1rem}b,strong{font-weight:bolder}a{color:#007bff;text-decoration:none;background-color:transparent}img{vertical-align:middle;border-style:none}table{border-collapse:collapse}.table{width:100%;margin-bottom:1rem;color:#212529}.table td,.table th{padding:.75rem;vertical-align:top;border-top:1px solid #dee2e6}.table-sm td,.table-sm th{padding:.3rem}.table-bordered{border:1px solid #dee2e6}.table-bordered td,.table-bordered th{border:1px solid #dee2e6}.list-group{display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;padding-left:0;margin-bottom:0;border-radius:.25rem}.list-group-item{position:relative;display:block;padding:.75rem 1.25rem;background-color:#fff;border:1px solid rgba(0,0,0,.125)}.list-group-item:first-child{border-top-left-radius:inherit;border-top-right-radius:inherit}.list-group-item:last-child{border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.list-group-item+.list-group-item{border-top-width:0}.stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;pointer-events:auto;content:"";background-color:rgba(0,0,0,0)}.mes{word-break:break-all}img,a,audio{display:block}img{max-width:100%}')
f.close()
if config['customActions']:
from customActions import customActions
cust = customActions(vk, conn, cursor)
def bgWatcher():
while True:
maxCacheAge = config['maxCacheAge']
with stop_mutex:
logger.info("Обслуживание БД...")
try:
showMessagesWithDeletedAttachments()
except BaseException:
logger.exception("Ошибка при поиске удаленных фото")
try:
if maxCacheAge != -1:
cursor.execute(
"""DELETE FROM messages WHERE timestamp < ?""",
(time.time() - maxCacheAge,)
)
conn.commit()
cursor.execute("VACUUM")
else:
maxCacheAge = 86400
except BaseException:
logger.exception("Ошибка при очистке базы данных")
logger.info("Обслуживание БД завершено.")
time.sleep(maxCacheAge)
def interrupt_handler(signum, frame):
conn.commit()
cursor.close()
try:
tableWatcher.cancel()
except AttributeError:
pass
logger.info("Завершение...")
os._exit(0)
signal.signal(signal.SIGINT, interrupt_handler)
signal.signal(signal.SIGTERM, interrupt_handler)
def eventWorker_predefinedDisabled():
global events
while True:
flag.wait()
event = events.pop(0)
with stop_mutex:
try:
cust.act(event)
except BaseException:
logger.exception("Ошибка в customActions. \n %s", vars(event))
if len(events) == 0:
flag.clear()
def eventWorker_customDisabled():
global events
while True:
flag.wait()
event = events.pop(0)
with stop_mutex:
predefinedActions(event)
if len(events) == 0:
flag.clear()
conn.commit()
def eventWorker():
global events
while True:
flag.wait()
event = events.pop(0)
with stop_mutex:
try:
cust.act(event)
except BaseException:
logger.exception("Ошибка в customActions. \n %s", vars(event))
predefinedActions(event)
if len(events) == 0:
flag.clear()
conn.commit()
def predefinedActions(event):
try:
if event.type == VkEventType.MESSAGE_NEW:
cursor.execute(
"""INSERT INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""",
(event.peer_id, event.user_id, event.message_id, event.message, event.message_data[1], event.timestamp, event.message_data[2],)
)
conn.commit()
elif event.type == VkEventType.MESSAGE_EDIT:
if event.message_data[0]:
activityReport(event.message_id, event.peer_id, event.user_id, event.timestamp, True, event.message_data[1], event.message_data[2], event.text)
cursor.execute(
"""INSERT or REPLACE INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""",
(event.peer_id, event.user_id, event.message_id, event.message, event.message_data[1], event.timestamp, event.message_data[2],)
)
conn.commit()
elif event.type == VkEventType.MESSAGE_FLAGS_SET:
try:
activityReport(event.message_id)
cursor.execute(
"""DELETE FROM messages WHERE message_id = ?""",
(event.message_id,)
)
conn.commit()
except TypeError:
logger.info("Удаление невозможно, сообщение отсутствует в БД.")
except sqlite3.IntegrityError:
logger.warning("Запущено несколько копий программы, завершение...")
interrupt_handler(0, None)
except Warning:
pass
except BaseException:
logger.exception("Ошибка при сохранении сообщения. \n %s", vars(event))
def main():
logger.info("Запущен основной цикл.")
global events
for event in longpoll.listen():
try:
if event.raw[0] == 4 or event.raw[0] == 5:
if event.attachments != {}:
event.message_data = getAttachments(event)
else:
event.message_data = True, None, None
if event.from_user and event.raw[2] & 2:
event.user_id = account_id
elif event.from_group:
if event.from_me:
event.user_id = account_id
else:
event.user_id = event.peer_id
if not event.message:
event.message = None
events.append(event)
flag.set()
elif event.raw[0] == 2 and (event.raw[2] & 131072 or event.raw[2] & 128):
events.append(event)
flag.set()
except Warning:
pass
except BaseException:
logger.exception("Ошибка при добавлении события в очередь. \n %s", vars(event))
def showMessagesWithDeletedAttachments():
cursor.execute("""SELECT message_id, attachments FROM messages WHERE attachments IS NOT NULL""")
fetch_attachments = [[str(i[0]), json.loads(i[1])] for i in cursor.fetchall()]
cursor.execute("""SELECT message_id, fwd_messages FROM messages WHERE fwd_messages IS NOT NULL""")
fetch_fwd = [[str(i[0]), json.loads(i[1])] for i in cursor.fetchall()]
c = 0
for i in range(len(fetch_attachments)):
for j in fetch_attachments[i - c][1]:
if j['type'] == 'photo' or j['type'] == 'video' or j['type'] == 'doc':
break
else:
del fetch_attachments[i - c]
c += 1
messages_attachments = []
messages_fwd = []
for i in [[j[0] for j in fetch_attachments[i:i + 100]] for i in range(0, len(fetch_attachments), 100)]:
messages_attachments.extend(tryAgainIfFailed(
vk.messages.getById,
message_ids=','.join(i))['items']
)
for i in [[j[0] for j in fetch_fwd[i:i + 100]] for i in range(0, len(fetch_fwd), 100)]:
messages_fwd.extend(tryAgainIfFailed(
vk.messages.getById,
message_ids=','.join(i))['items']
)
c = 0
for i in range(len(fetch_attachments)):
if compareAttachments(messages_attachments[i - c]['attachments'], fetch_attachments[i - c][1]):
del fetch_attachments[i - c]
del messages_attachments[i - c]
c += 1
for i in range(len(fetch_attachments)):
activityReport(fetch_attachments[i][0])
if messages_attachments[i]['attachments'] == []:
cursor.execute(
"""UPDATE messages SET attachments = ? WHERE message_id = ?""",
(None, fetch_attachments[i][0],)
)
else:
cursor.execute(
"""UPDATE messages SET attachments = ? WHERE message_id = ?""",
(
json.dumps(messages_attachments[i]['attachments']),
fetch_attachments[i][0],
)
)
c = 0
for i in range(len(fetch_fwd)):
if compareFwd(
messages_fwd[i - c],
{
'fwd_messages': fetch_fwd[i - c][1]
}
):
del fetch_fwd[i - c]
del messages_fwd[i - c]
c += 1
for i in range(len(fetch_fwd)):
activityReport(fetch_fwd[i][0])
if messages_fwd[i]['fwd_messages'] == []:
cursor.execute(
"""UPDATE messages SET fwd_messages = ? WHERE message_id = ?""",
(None, fetch_fwd[i][0],)
)
else:
cursor.execute(
"""UPDATE messages SET fwd_messages = ? WHERE message_id = ?""",
(
json.dumps(messages_fwd[i]['fwd_messages']),
fetch_fwd[i][0],
)
)
conn.commit()
def compareFwd(new, old):
if 'reply_message' in new:
new['fwd_messages'] = [new['reply_message']]
if 'reply_message' in old:
old['fwd_messages'] = [old['reply_message']]
for i in range(len(old['fwd_messages'])):
if 'fwd_messages' in old['fwd_messages'][i] and 'fwd_messages' in new['fwd_messages'][i]:
if not compareFwd(
new['fwd_messages'][i],
old['fwd_messages'][i]
):
return False
if not compareAttachments(
new['fwd_messages'][i]['attachments'],
old['fwd_messages'][i]['attachments']
):
return False
return True
def compareAttachments(new, old):
if len(new) < len(old):
return False
return True
def attachmentsParse(urls):
if urls is None:
return ""
html = """<div>
"""
for i in urls:
urlSplit = i.split(',')
if i.find('vk.com/sticker/') != -1:
html += """ <img src="{}" />
""".format(i)
elif i.find('.jpg') != -1 and i.find(',') == -1:
html += """ <img src="{}" />
""".format(i)
elif i.find('.mp3') != -1:
html += """ <audio src="{}" controls></audio>
""".format(i)
elif i.find('https://vk.com/audio') != -1:
html += """ <a href="{}" target="_blank">
{}
</a>
""".format(i, i[23:-11].replace('%20', ' '))
elif i.find('@') != -1:
i = i.rsplit('@', 1)
html += """ <a href="{}" target="_blank">
{}
</a>
""".format(i[1], i[0])
elif len(urlSplit) == 3:
html += """ <a href="{}" target="_blank">
Видео
<img src="{}"/>
</a>
""".format(f"./vkGetVideoLink.html?{urlSplit[1]}&{urlSplit[2]}", urlSplit[0])
else:
html += """ <a href="{0}" target="_blank">
{0}
</a>
""".format(i)
html += """</div>"""
return html
def getAttachments(event):
message_id = event.message_id
fullLoadUnNeeded = not (event.raw[0] == 5 or 'fwd' in event.attachments)
count = 0
if fullLoadUnNeeded:
for i in range(1,11):
if f'attach{i}_type' in event.attachments:
if event.attachments[f'attach{i}_type'] not in ('sticker', 'link'):
fullLoadUnNeeded = False
else:
count = i
break
if fullLoadUnNeeded:
attachments = []
for i in range(1,count):
if event.attachments[f'attach{i}_type'] == 'sticker':
attachments.append({'type':'sticker','sticker':{'images':[{'height':64,'url':f'https://vk.com/sticker/1-{event.attachments[f"attach{i}"]}-64'}]}})
else:
if f'attach{i}_title' in event.attachments:
title = event.attachments[f'attach{i}_title']
else:
title = event.attachments[f'attach{i}_url']
attachments.append({'type':'link','link':{'title':title,'url':event.attachments[f'attach{i}_url']}})
return False, json.dumps(attachments, ensure_ascii=False,), None
mes = tryAgainIfFailed(
vk.messages.getById,
message_ids=message_id
)['items']
if not len(mes):
logger.info("Не удалось запросить вложения для сообщения, message_id = %i.", event.message_id)
return False, "[]", "[]"
else:
mes = mes[0]
hasUpdateTime = 'update_time' in mes
fwd_messages = None
if 'reply_message' in mes:
fwd_messages = json.dumps([mes['reply_message']], ensure_ascii=False,)
elif mes['fwd_messages'] != []:
fwd_messages = json.dumps(mes['fwd_messages'], ensure_ascii=False,)
if mes['attachments'] == []:
attachments = None
else:
attachments = json.dumps(mes['attachments'], ensure_ascii=False,)
return hasUpdateTime, attachments, fwd_messages
def parseUrls(attachments):
urls = []
for i in attachments:
if i['type'] == 'photo':
maxHeight = 0
maxUrl = ""
for j in i['photo']['sizes']:
if j['height'] > maxHeight:
maxHeight = j['height']
maxUrl = j['url']
urls.append(maxUrl)
elif i['type'] == 'audio_message':
urls.append(i['audio_message']['link_mp3'])
elif i['type'] == 'sticker':
urls.append(i['sticker']['images'][0]['url'])
elif i['type'] == 'gift':
urls.append(i['gift']['thumb_48'])
elif i['type'] == 'link':
urls.append(f"Ссылка: {i['link']['title']}@{i['link']['url']}")
elif i['type'] == 'video':
urls.append(f"{i['video']['image'][0]['url']},{i['video']['player']},{i['video']['owner_id']}_{i['video']['id']}_{i['video']['access_key']}")
elif i['type'] == 'wall':
urls.append(f"Пост: {i['wall']['text'][:25]}@https://vk.com/wall{i['wall']['from_id']}_{i['wall']['id']}")
elif i['type'] == 'wall_reply':
urls.append(f"Комментарий: {i['wall_reply']['text'][:25]}@https://vk.com/wall{i['wall_reply']['owner_id']}_{i['wall_reply']['post_id']}?reply={i['wall_reply']['id']}")
elif i['type'] == 'audio':
urls.append(f"https://vk.com/audio?q={i['audio']['artist'].replace(' ', '%20')}%20-%20{i['audio']['title'].replace(' ', '%20')}&tab=global")
elif i['type'] == 'audio_playlist':
urls.append(f"Плейлист: {i['audio_playlist']['title']}@https://vk.com/music?z=audio_playlist{i['audio_playlist']['owner_id']}_{i['audio_playlist']['id']}/{i['audio_playlist']['access_key']}")
elif i['type'] == 'market':
urls.append(f"https://vk.com/market?w=product{i['market']['owner_id']}_{i['market']['id']}")
elif i['type'] == 'poll':
urls.append(f"Голосование: {i['poll']['question'][:25]}@https://vk.com/poll{i['poll']['owner_id']}_{i['poll']['id']}")
elif i['type'] == 'doc':
urls.append(f"Документ: {i['doc']['title']}@{i['doc']['url']}")
else:
if 'url' in i[i['type']]:
urls.append(i[i['type']]['url'])
if urls == []:
return None
return urls
def getPeerName(id):
if id > 2000000000:
cursor.execute("""SELECT chat_name FROM chats_cache WHERE chat_id = ?""", (id,))
fetch = cursor.fetchone()
if fetch is None:
try:
name = tryAgainIfFailed(
vk.messages.getChat,
chat_id=id-2000000000
)['title']
cursor.execute("""INSERT INTO chats_cache (chat_id,chat_name) VALUES (?,?)""", (id, name,))
conn.commit()
except Warning:
name = "Секретный чат, используйте токен другого приложения"
else:
name = fetch[0]
elif id < 0:
cursor.execute("""SELECT user_name FROM users_cache WHERE user_id = ?""", (id,))
fetch = cursor.fetchone()
if fetch is None:
name = tryAgainIfFailed(
vk.groups.getById,
group_id=-id
)[0]['name']
cursor.execute("""INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""", (id, name,))
conn.commit()
else:
name = fetch[0]
else:
cursor.execute("""SELECT user_name FROM users_cache WHERE user_id = ?""", (id,))
fetch = cursor.fetchone()
if fetch is None:
name = tryAgainIfFailed(
vk.users.get,
user_id=id
)[0]
name = f"{name['first_name']} {name['last_name']}"
cursor.execute("""INSERT INTO users_cache (user_id,user_name) VALUES (?,?)""", (id, name,))
conn.commit()
else:
name = fetch[0]
return name
def fwdParse(fwd):
html = """<table class="table table-sm table-bordered">
"""
for i in fwd:
user_name = getPeerName(i['from_id'])
if i['from_id'] < 0:
html += """ <tr>
<td>
<a href='https://vk.com/public{}' target="_blank">
{}
</a>
</td>
</tr>
""".format(-i['from_id'], user_name)
else:
html += """ <tr>
<td>
<a href='https://vk.com/id{}' target="_blank">
{}
</a>
</td>
</tr>
""".format(i['from_id'], user_name)
if i['text'] != "":
html += """ <tr>
<td>
<div class='mes'>
{}
</div>
""".format(xssFilter(i['text']))
else:
html += """ <tr>
<td>
"""
if i['attachments'] != []:
html += attachmentsParse(parseUrls(i['attachments']))
if 'fwd_messages' in i:
html += fwdParse(i['fwd_messages'])
elif 'reply_message' in i:
html += fwdParse([i['reply_message']])
html += """ </td>
</tr>
<tr>
<td>
{}
</td>
</tr>
""".format(time.strftime('%H:%M:%S %d.%m.%y', time.localtime(i['date'])))
html += "</table>"
return html
def xssFilter(s):
return s\
.replace('<', '<')\
.replace('>', '>')\
.replace('\n', '<br />')
def compareStrings(a, b):
aCounter = 0
bCounter = 0
for i in difflib.SequenceMatcher(None, a, b).get_opcodes():
if i[0] == 'insert':
b = f"{b[: i[3]+bCounter]}<ins>{b[i[3]+bCounter : i[4]+bCounter]}</ins>{b[i[4]+bCounter:]}"
bCounter += 11
elif i[0] == 'delete':
a = f"{a[: i[1]+aCounter]}<ins>{a[i[1]+aCounter : i[2]+aCounter]}</ins>{a[i[2]+aCounter:]}"
aCounter += 11
elif i[0] == 'replace':
a = f"{a[: i[1]+aCounter]}<ins>{a[i[1]+aCounter : i[2]+aCounter]}</ins>{a[i[2]+aCounter:]}"
b = f"{b[: i[3]+bCounter]}<ins>{b[i[3]+bCounter : i[4]+bCounter]}</ins>{b[i[4]+bCounter:]}"
aCounter += 11
bCounter += 11
return a, b
def activityReport(message_id, peer_id=None, user_id=None, timestamp=None, isEdited=False, attachments=None, fwd=None, message=None):
try:
peer_name = user_name = oldMessage = oldAttachments = date = oldFwd = None
cursor.execute("""SELECT * FROM messages WHERE message_id = ?""", (message_id,))
fetch = cursor.fetchone()
if attachments is not None:
attachments = parseUrls(json.loads(attachments))
if fwd is not None:
fwd = json.loads(fwd)
if fetch is None:
if isEdited:
logger.info("Изменение сообщения, отсутствующего в БД, message_id = %i.", message_id)
fetch = [0]*7
peer_name = getPeerName(peer_id)
user_name = getPeerName(user_id)
oldMessage = f"⚠️ {message}"
oldAttachments = attachments
oldFwd = fwd
date = f"<b>Доб:</b> {time.strftime('%H:%M:%S %d.%m', time.localtime(timestamp))}<br /><b>Изм:</b> {time.strftime('%H:%M:%S %d.%m', time.localtime())}"
else:
raise TypeError
else:
if fetch[3] is not None:
oldMessage = str(fetch[3])
if fetch[4] is not None:
oldAttachments = parseUrls(json.loads(fetch[4]))
if fetch[6] is not None:
oldFwd = json.loads(fetch[6])
peer_name = getPeerName(fetch[0])
user_name = getPeerName(fetch[1])
date = f"<b>Доб:</b> {time.strftime('%H:%M:%S %d.%m', time.localtime(fetch[5]))}<br /><b>Изм:</b> {time.strftime('%H:%M:%S %d.%m', time.localtime())}"
peer_id = fetch[0]
user_id = fetch[1]
del fetch
row = """ <tr><!-- {} -->
<td>{}
</td>
<td>{}
</td>
{}
<td>
{}
</td>
</tr>
"""
messageBlock = """
<div class='mes'>
{}
</div>"""
attachmentsBlock = """
<div>
<b>Вложения</b><br />
{}
</div>"""
fwdBlock = """
<div>
<b>Пересланное</b><br />
{}
</div>"""
if peer_id > 2000000000:
peer_id = """
<a href='https://vk.com/im?sel=c{}' target='_blank'>
{}
</a>""".format(str(peer_id-2000000000), peer_name)
elif peer_id < 0:
peer_id = """
<a href='https://vk.com/public{}' target='_blank'>
{}
</a>""".format(str(-peer_id), peer_name)
else:
peer_id = """
<a href='https://vk.com/id{}' target='_blank'>
{}
</a>""".format(str(peer_id), peer_name)
if user_id < 0:
user_id = """
<a href='https://vk.com/public{}' target='_blank'>
{}
</a>""".format(str(-user_id), user_name)
else:
user_id = """
<a href='https://vk.com/id{}' target='_blank'>
{}
</a>""".format(str(user_id), user_name)
if isEdited:
if not (oldMessage is None or message is None):
message = xssFilter(message)
oldMessage = xssFilter(oldMessage)
message, oldMessage = compareStrings(message, oldMessage)
oldMessage = messageBlock.format(oldMessage)
message = messageBlock.format(message)
elif oldMessage is None:
oldMessage = ""
message = messageBlock.format(xssFilter(message))
else:
oldMessage = messageBlock.format(xssFilter(oldMessage))
message = ""
if oldAttachments is not None:
oldAttachments = attachmentsBlock.format(attachmentsParse(oldAttachments))
else:
oldAttachments = ""
if oldFwd is not None:
oldFwd = fwdBlock.format(fwdParse(oldFwd))
else:
oldFwd = ""
if attachments is not None:
attachments = attachmentsBlock.format(attachmentsParse(attachments))
else:
attachments = ""
if fwd is not None:
fwd = fwdBlock.format(fwdParse(fwd))
else:
fwd = ""
messageBlock = """<td width='50%'>
<b>Старое</b><br />{}
</td>
<td width='50%'>
<b>Новое</b><br />{}
</td>""".format(oldMessage+oldAttachments+oldFwd, message+attachments+fwd)
else:
if oldMessage is not None:
oldMessage = messageBlock.format(xssFilter(oldMessage))
else:
oldMessage = ""
if oldAttachments is not None:
oldAttachments = attachmentsBlock.format(attachmentsParse(oldAttachments))
else:
oldAttachments = ""
if oldFwd is not None:
oldFwd = fwdBlock.format(fwdParse(oldFwd))
else:
oldFwd = ""
messageBlock = """<td width='100%' colspan='2'>
<b>Удалено</b><br />{}
</td>""".format(oldMessage+oldAttachments+oldFwd)
row = row.format(message_id, peer_id, user_id, messageBlock, date)
if os.path.exists(
os.path.join(
cwd,
"mesAct",
f"messages_{time.strftime('%d%m%y', time.localtime())}.html"
)
):
messagesActivities = open(
os.path.join(
cwd,
"mesAct",
f"messages_{time.strftime('%d%m%y',time.localtime())}.html"
),
'r',
encoding='utf-8'
)
messagesDump = messagesActivities.read()
messagesActivities.close()
messagesActivities = open(
os.path.join(
cwd,
"mesAct",
f"messages_{time.strftime('%d%m%y',time.localtime())}.html"
),
'w',
encoding='utf-8'
)
else:
messagesDump = template
messagesActivities = open(
os.path.join(
cwd,
"mesAct",
f"messages_{time.strftime('%d%m%y',time.localtime())}.html"
),
'w',
encoding='utf-8'
)
messagesDump = messagesDump[:offset]+row+messagesDump[offset:]
messagesActivities.write(messagesDump)
messagesActivities.close()
except TypeError:
raise TypeError
except BaseException:
logger.exception("Ошибка при логгировании изменений.")
if not config['disableMessagesLogging']:
tableWatcher = threading.Thread(target=bgWatcher)
tableWatcher.start()
template = """<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<link rel="stylesheet" href="./bootstrap.css">
</head>
<body>
<table class="table table-sm">
</table>
</body>
</html>"""
offset = template.index(""" </table>""")
events = []
flag = threading.Event()
def preloadMessages():
logger.info("Предзагрузка сообщений...")
offset = 0
peer_ids = []
messages = []
shouldContinue = True
try:
while shouldContinue:
shouldContinue = False
dialogs = tryAgainIfFailed(vk.messages.getConversations, offset=offset, count=20)
for i in range(0,len(dialogs['items'])):
if dialogs['items'][i]['last_message']['date'] >= time.time() - config['maxCacheAge']:
peer_ids.append(dialogs['items'][i]['conversation']['peer']['id'])
if i == len(dialogs['items']) - 1:
shouldContinue = True
offset+=20
for i in peer_ids:
offset = 0
if i > 2000000000:
count = 200
else:
count = 50
shouldContinue = True
while shouldContinue:
shouldContinue = False
mes = vk.messages.getHistory(offset=offset, count=count, peer_id=i)['items']
if mes[-1]['date']>= time.time() - config['maxCacheAge']:
shouldContinue = True
offset+=count
for j in mes:
if j['date'] >= time.time() - config['maxCacheAge']:
messages.append(j)
for i in messages:
message_id = i['id']
with stop_mutex:
cursor.execute("""SELECT message_id FROM messages WHERE message_id = ?""", (message_id,))
if cursor.fetchone() is not None:
continue
peer_id = i['peer_id']
user_id = i['from_id']
message = i['text']
timestamp = i['date']
fwd_messages = None
if 'reply_message' in i:
fwd_messages = json.dumps([i['reply_message']], ensure_ascii=False,)
elif i['fwd_messages'] != []:
fwd_messages = json.dumps(i['fwd_messages'], ensure_ascii=False,)
if i['attachments'] == []:
attachments = None
else:
attachments = json.dumps(i['attachments'], ensure_ascii=False,)
with stop_mutex:
cursor.execute(
"""INSERT INTO messages(peer_id,user_id,message_id,message,attachments,timestamp,fwd_messages) VALUES (?,?,?,?,?,?,?)""",
(peer_id, user_id, message_id, message, attachments, timestamp, fwd_messages,)
)
conn.commit()
except BaseException:
logger.exception("Ошибка во время предзагрузки сообщений")
logger.info("Предзагрузка сообщений завершена.")
if config['customActions'] and config['disableMessagesLogging']:
threading.Thread(target=eventWorker_predefinedDisabled).start()
elif not config['disableMessagesLogging'] and not config['customActions']:
threading.Thread(target=eventWorker_customDisabled).start()
else:
threading.Thread(target=eventWorker).start()
if config['preloadMessages']:
threading.Thread(target=preloadMessages).start()
try:
tryAgainIfFailed(
main,
maxRetries=-1
)
except Warning:
pass
| true
| true
|
f702c7b2633322629d2c55aa4ffcdb1946ff6acb
| 946
|
py
|
Python
|
events.py
|
tilakchandlo/swing
|
f4aa10dd2858dfe85dc1d5c7077c883d2cf19d8d
|
[
"Apache-2.0"
] | 1
|
2021-07-05T10:18:30.000Z
|
2021-07-05T10:18:30.000Z
|
events.py
|
tilakchandlo/swing
|
f4aa10dd2858dfe85dc1d5c7077c883d2cf19d8d
|
[
"Apache-2.0"
] | null | null | null |
events.py
|
tilakchandlo/swing
|
f4aa10dd2858dfe85dc1d5c7077c883d2cf19d8d
|
[
"Apache-2.0"
] | 1
|
2021-04-29T11:08:59.000Z
|
2021-04-29T11:08:59.000Z
|
"""
Definition of events.
"""
from abc import ABC
EVENT_LOG = 'eLog' #Log Event
EVENT_MARKETDATA = 'eMarketData' #Pushing MarketData Event
EVENT_TRADE = 'eTrade' #Trade Event
EVENT_BUY = 'eBuy' #Buy Event
EVENT_SELL = 'eSell' #Sell Event
EVENT_CANCEL = 'eCancel' #Cancel Event
EVENT_POSITION = 'ePosition' #Position Query Event
EVENT_STATUS = 'eStatus' #Order Status Event
EVENT_ACCOUNT = 'eAccount' #Account Query Event
EVENT_PROFIT_CHANGED = 'eProfitChanged' #Profit Event
class StrategyEvent:
def __init__(self, type_=None, even_param_=None):
self.type_ = type_
self.even_param_ = even_param_
def clear(self):
"""
Delete unreferenced source.
"""
self.even_param_.clear()
class EventEngine(ABC):
pass
| 27.028571
| 68
| 0.571882
|
from abc import ABC
EVENT_LOG = 'eLog' EVENT_MARKETDATA = 'eMarketData' EVENT_TRADE = 'eTrade' EVENT_BUY = 'eBuy' EVENT_SELL = 'eSell' EVENT_CANCEL = 'eCancel' EVENT_POSITION = 'ePosition' EVENT_STATUS = 'eStatus' EVENT_ACCOUNT = 'eAccount' EVENT_PROFIT_CHANGED = 'eProfitChanged'
class StrategyEvent:
def __init__(self, type_=None, even_param_=None):
self.type_ = type_
self.even_param_ = even_param_
def clear(self):
self.even_param_.clear()
class EventEngine(ABC):
pass
| true
| true
|
f702ca819293ff5b6e420a06411eaf1637cfb437
| 5,235
|
py
|
Python
|
web/addons/sale_stock/res_config.py
|
diogocs1/comps
|
63df07f6cf21c41e4527c06e2d0499f23f4322e7
|
[
"Apache-2.0"
] | 1
|
2019-12-29T11:53:56.000Z
|
2019-12-29T11:53:56.000Z
|
odoo/addons/sale_stock/res_config.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | null | null | null |
odoo/addons/sale_stock/res_config.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | 3
|
2020-10-08T14:42:10.000Z
|
2022-01-28T14:12:29.000Z
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
class sale_configuration(osv.osv_memory):
_inherit = 'sale.config.settings'
_columns = {
'group_invoice_deli_orders': fields.boolean('Generate invoices after and based on delivery orders',
implied_group='sale_stock.group_invoice_deli_orders',
help="To allow your salesman to make invoices for Delivery Orders using the menu 'Deliveries to Invoice'."),
'task_work': fields.boolean("Prepare invoices based on task's activities",
help='Lets you transfer the entries under tasks defined for Project Management to '
'the Timesheet line entries for particular date and particular user with the effect of creating, editing and deleting either ways '
'and to automatically creates project tasks from procurement lines.\n'
'-This installs the modules project_timesheet and sale_service.'),
'default_order_policy': fields.selection(
[('manual', 'Invoice based on sales orders'), ('picking', 'Invoice based on deliveries')],
'The default invoicing method is', default_model='sale.order',
help="You can generate invoices based on sales orders or based on shippings."),
'module_delivery': fields.boolean('Allow adding shipping costs',
help='Allows you to add delivery methods in sales orders and delivery orders.\n'
'You can define your own carrier and delivery grids for prices.\n'
'-This installs the module delivery.'),
'default_picking_policy' : fields.boolean("Deliver all at once when all products are available.",
help = "Sales order by default will be configured to deliver all products at once instead of delivering each product when it is available. This may have an impact on the shipping price."),
'group_mrp_properties': fields.boolean('Product properties on order lines',
implied_group='sale.group_mrp_properties',
help="Allows you to tag sales order lines with properties."),
'module_project_timesheet': fields.boolean("Project Timesheet"),
'module_sale_service': fields.boolean("Sale Service"),
'group_route_so_lines': fields.boolean('Choose MTO, drop shipping,... on sales order lines',
implied_group='sale_stock.group_route_so_lines',
help="Allows you to choose a delivery route on sales order lines"),
}
_defaults = {
'default_order_policy': 'manual',
}
def default_get(self, cr, uid, fields, context=None):
res = super(sale_configuration, self).default_get(cr, uid, fields, context)
# task_work, time_unit depend on other fields
res['task_work'] = res.get('module_sale_service') and res.get('module_project_timesheet')
return res
def get_default_sale_config(self, cr, uid, ids, context=None):
ir_values = self.pool.get('ir.values')
default_picking_policy = ir_values.get_default(cr, uid, 'sale.order', 'picking_policy')
return {
'default_picking_policy': default_picking_policy == 'one',
}
def set_sale_defaults(self, cr, uid, ids, context=None):
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool.get('ir.values')
wizard = self.browse(cr, uid, ids)[0]
default_picking_policy = 'one' if wizard.default_picking_policy else 'direct'
ir_values.set_default(cr, SUPERUSER_ID, 'sale.order', 'picking_policy', default_picking_policy)
res = super(sale_configuration, self).set_sale_defaults(cr, uid, ids, context)
return res
def onchange_invoice_methods(self, cr, uid, ids, group_invoice_so_lines, group_invoice_deli_orders, context=None):
if not group_invoice_deli_orders:
return {'value': {'default_order_policy': 'manual'}}
if not group_invoice_so_lines:
return {'value': {'default_order_policy': 'picking'}}
return {}
| 56.290323
| 200
| 0.670487
|
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
class sale_configuration(osv.osv_memory):
_inherit = 'sale.config.settings'
_columns = {
'group_invoice_deli_orders': fields.boolean('Generate invoices after and based on delivery orders',
implied_group='sale_stock.group_invoice_deli_orders',
help="To allow your salesman to make invoices for Delivery Orders using the menu 'Deliveries to Invoice'."),
'task_work': fields.boolean("Prepare invoices based on task's activities",
help='Lets you transfer the entries under tasks defined for Project Management to '
'the Timesheet line entries for particular date and particular user with the effect of creating, editing and deleting either ways '
'and to automatically creates project tasks from procurement lines.\n'
'-This installs the modules project_timesheet and sale_service.'),
'default_order_policy': fields.selection(
[('manual', 'Invoice based on sales orders'), ('picking', 'Invoice based on deliveries')],
'The default invoicing method is', default_model='sale.order',
help="You can generate invoices based on sales orders or based on shippings."),
'module_delivery': fields.boolean('Allow adding shipping costs',
help='Allows you to add delivery methods in sales orders and delivery orders.\n'
'You can define your own carrier and delivery grids for prices.\n'
'-This installs the module delivery.'),
'default_picking_policy' : fields.boolean("Deliver all at once when all products are available.",
help = "Sales order by default will be configured to deliver all products at once instead of delivering each product when it is available. This may have an impact on the shipping price."),
'group_mrp_properties': fields.boolean('Product properties on order lines',
implied_group='sale.group_mrp_properties',
help="Allows you to tag sales order lines with properties."),
'module_project_timesheet': fields.boolean("Project Timesheet"),
'module_sale_service': fields.boolean("Sale Service"),
'group_route_so_lines': fields.boolean('Choose MTO, drop shipping,... on sales order lines',
implied_group='sale_stock.group_route_so_lines',
help="Allows you to choose a delivery route on sales order lines"),
}
_defaults = {
'default_order_policy': 'manual',
}
def default_get(self, cr, uid, fields, context=None):
res = super(sale_configuration, self).default_get(cr, uid, fields, context)
# task_work, time_unit depend on other fields
res['task_work'] = res.get('module_sale_service') and res.get('module_project_timesheet')
return res
def get_default_sale_config(self, cr, uid, ids, context=None):
ir_values = self.pool.get('ir.values')
default_picking_policy = ir_values.get_default(cr, uid, 'sale.order', 'picking_policy')
return {
'default_picking_policy': default_picking_policy == 'one',
}
def set_sale_defaults(self, cr, uid, ids, context=None):
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool.get('ir.values')
wizard = self.browse(cr, uid, ids)[0]
default_picking_policy = 'one' if wizard.default_picking_policy else 'direct'
ir_values.set_default(cr, SUPERUSER_ID, 'sale.order', 'picking_policy', default_picking_policy)
res = super(sale_configuration, self).set_sale_defaults(cr, uid, ids, context)
return res
def onchange_invoice_methods(self, cr, uid, ids, group_invoice_so_lines, group_invoice_deli_orders, context=None):
if not group_invoice_deli_orders:
return {'value': {'default_order_policy': 'manual'}}
if not group_invoice_so_lines:
return {'value': {'default_order_policy': 'picking'}}
return {}
| true
| true
|
f702caa371c248da66937b8521efe91e4540f538
| 3,164
|
py
|
Python
|
tests/test_preprocessing.py
|
liuyigh/CITE-seq-Count
|
5d03e382468fb28187dc15ee1d612dacaac52246
|
[
"MIT"
] | null | null | null |
tests/test_preprocessing.py
|
liuyigh/CITE-seq-Count
|
5d03e382468fb28187dc15ee1d612dacaac52246
|
[
"MIT"
] | null | null | null |
tests/test_preprocessing.py
|
liuyigh/CITE-seq-Count
|
5d03e382468fb28187dc15ee1d612dacaac52246
|
[
"MIT"
] | null | null | null |
import pytest
import io
from cite_seq_count import preprocessing
@pytest.fixture
def data():
from collections import OrderedDict
from itertools import islice
# Test file paths
pytest.correct_whitelist_path = 'tests/test_data/whitelists/correct.csv'
pytest.correct_tags_path = 'tests/test_data/tags/correct.csv'
pytest.correct_R1_path = 'tests/test_data/fastq/correct_R1.fastq.gz'
pytest.correct_R2_path = 'tests/test_data/fastq/correct_R2.fastq.gz'
pytest.corrupt_R1_path = 'tests/test_data/fastq/corrupted_R1.fastq.gz'
pytest.corrupt_R2_path = 'tests/test_data/fastq/corrupted_R2.fastq.gz'
# Create some variables to compare to
pytest.correct_whitelist = set(['ACTGTTTTATTGGCCT','TTCATAAGGTAGGGAT'])
pytest.correct_tags = {
'AGGACCATCCAA':'CITE_LEN_12_1',
'ACATGTTACCGT':'CITE_LEN_12_2',
'AGCTTACTATCC':'CITE_LEN_12_3',
'TCGATAATGCGAGTACAA':'CITE_LEN_18_1',
'GAGGCTGAGCTAGCTAGT':'CITE_LEN_18_2',
'GGCTGATGCTGACTGCTA':'CITE_LEN_18_3',
'TGTGACGTATTGCTAGCTAG':'CITE_LEN_20_1',
'ACTGTCTAACGGGTCAGTGC':'CITE_LEN_20_2',
'TATCACATCGGTGGATCCAT':'CITE_LEN_20_3'}
pytest.correct_ordered_tags = OrderedDict({
'TGTGACGTATTGCTAGCTAG':'CITE_LEN_20_1-TGTGACGTATTGCTAGCTAG',
'ACTGTCTAACGGGTCAGTGC':'CITE_LEN_20_2-ACTGTCTAACGGGTCAGTGC',
'TATCACATCGGTGGATCCAT':'CITE_LEN_20_3-TATCACATCGGTGGATCCAT',
'TCGATAATGCGAGTACAA':'CITE_LEN_18_1-TCGATAATGCGAGTACAA',
'GAGGCTGAGCTAGCTAGT':'CITE_LEN_18_2-GAGGCTGAGCTAGCTAGT',
'GGCTGATGCTGACTGCTA':'CITE_LEN_18_3-GGCTGATGCTGACTGCTA',
'AGGACCATCCAA':'CITE_LEN_12_1-AGGACCATCCAA',
'ACATGTTACCGT':'CITE_LEN_12_2-ACATGTTACCGT',
'AGCTTACTATCC':'CITE_LEN_12_3-AGCTTACTATCC'})
pytest.barcode_slice = slice(0, 16)
pytest.umi_slice = slice(16, 26)
pytest.barcode_umi_length = 26
@pytest.mark.dependency()
def test_parse_whitelist_csv(data):
assert preprocessing.parse_whitelist_csv(pytest.correct_whitelist_path, 16, 1) == (pytest.correct_whitelist,1)
@pytest.mark.dependency()
def test_parse_tags_csv(data):
assert preprocessing.parse_tags_csv(pytest.correct_tags_path) == pytest.correct_tags
@pytest.mark.dependency(depends=['test_parse_tags_csv'])
def test_check_tags(data):
assert preprocessing.check_tags(pytest.correct_tags, 5) == pytest.correct_ordered_tags
@pytest.mark.dependency(depends=['test_check_tags'])
def test_check_distance_too_big_between_tags(data):
with pytest.raises(SystemExit):
preprocessing.check_tags(pytest.correct_tags, 8)
@pytest.mark.dependency(depends=['test_parse_whitelist_csv'])
def test_check_barcodes_lengths(data):
assert preprocessing.check_barcodes_lengths(26, 1, 16, 17, 26) == (pytest.barcode_slice, pytest.umi_slice, pytest.barcode_umi_length)
@pytest.mark.dependency()
def test_get_n_lines(data):
assert preprocessing.get_n_lines(pytest.correct_R1_path) == (200 * 4)
@pytest.mark.dependency(depends=['test_get_n_lines'])
def test_get_n_lines_not_multiple_of_4(data):
with pytest.raises(SystemExit):
preprocessing.get_n_lines(pytest.corrupt_R1_path)
| 43.342466
| 137
| 0.760114
|
import pytest
import io
from cite_seq_count import preprocessing
@pytest.fixture
def data():
from collections import OrderedDict
from itertools import islice
pytest.correct_whitelist_path = 'tests/test_data/whitelists/correct.csv'
pytest.correct_tags_path = 'tests/test_data/tags/correct.csv'
pytest.correct_R1_path = 'tests/test_data/fastq/correct_R1.fastq.gz'
pytest.correct_R2_path = 'tests/test_data/fastq/correct_R2.fastq.gz'
pytest.corrupt_R1_path = 'tests/test_data/fastq/corrupted_R1.fastq.gz'
pytest.corrupt_R2_path = 'tests/test_data/fastq/corrupted_R2.fastq.gz'
pytest.correct_whitelist = set(['ACTGTTTTATTGGCCT','TTCATAAGGTAGGGAT'])
pytest.correct_tags = {
'AGGACCATCCAA':'CITE_LEN_12_1',
'ACATGTTACCGT':'CITE_LEN_12_2',
'AGCTTACTATCC':'CITE_LEN_12_3',
'TCGATAATGCGAGTACAA':'CITE_LEN_18_1',
'GAGGCTGAGCTAGCTAGT':'CITE_LEN_18_2',
'GGCTGATGCTGACTGCTA':'CITE_LEN_18_3',
'TGTGACGTATTGCTAGCTAG':'CITE_LEN_20_1',
'ACTGTCTAACGGGTCAGTGC':'CITE_LEN_20_2',
'TATCACATCGGTGGATCCAT':'CITE_LEN_20_3'}
pytest.correct_ordered_tags = OrderedDict({
'TGTGACGTATTGCTAGCTAG':'CITE_LEN_20_1-TGTGACGTATTGCTAGCTAG',
'ACTGTCTAACGGGTCAGTGC':'CITE_LEN_20_2-ACTGTCTAACGGGTCAGTGC',
'TATCACATCGGTGGATCCAT':'CITE_LEN_20_3-TATCACATCGGTGGATCCAT',
'TCGATAATGCGAGTACAA':'CITE_LEN_18_1-TCGATAATGCGAGTACAA',
'GAGGCTGAGCTAGCTAGT':'CITE_LEN_18_2-GAGGCTGAGCTAGCTAGT',
'GGCTGATGCTGACTGCTA':'CITE_LEN_18_3-GGCTGATGCTGACTGCTA',
'AGGACCATCCAA':'CITE_LEN_12_1-AGGACCATCCAA',
'ACATGTTACCGT':'CITE_LEN_12_2-ACATGTTACCGT',
'AGCTTACTATCC':'CITE_LEN_12_3-AGCTTACTATCC'})
pytest.barcode_slice = slice(0, 16)
pytest.umi_slice = slice(16, 26)
pytest.barcode_umi_length = 26
@pytest.mark.dependency()
def test_parse_whitelist_csv(data):
assert preprocessing.parse_whitelist_csv(pytest.correct_whitelist_path, 16, 1) == (pytest.correct_whitelist,1)
@pytest.mark.dependency()
def test_parse_tags_csv(data):
assert preprocessing.parse_tags_csv(pytest.correct_tags_path) == pytest.correct_tags
@pytest.mark.dependency(depends=['test_parse_tags_csv'])
def test_check_tags(data):
assert preprocessing.check_tags(pytest.correct_tags, 5) == pytest.correct_ordered_tags
@pytest.mark.dependency(depends=['test_check_tags'])
def test_check_distance_too_big_between_tags(data):
with pytest.raises(SystemExit):
preprocessing.check_tags(pytest.correct_tags, 8)
@pytest.mark.dependency(depends=['test_parse_whitelist_csv'])
def test_check_barcodes_lengths(data):
assert preprocessing.check_barcodes_lengths(26, 1, 16, 17, 26) == (pytest.barcode_slice, pytest.umi_slice, pytest.barcode_umi_length)
@pytest.mark.dependency()
def test_get_n_lines(data):
assert preprocessing.get_n_lines(pytest.correct_R1_path) == (200 * 4)
@pytest.mark.dependency(depends=['test_get_n_lines'])
def test_get_n_lines_not_multiple_of_4(data):
with pytest.raises(SystemExit):
preprocessing.get_n_lines(pytest.corrupt_R1_path)
| true
| true
|
f702cc5e6ea08c3e34d01f04c4c75d3ab18e6e75
| 4,384
|
py
|
Python
|
chess/python/chess_server.py
|
MrXisOnline/C-Program
|
9b95802a2d62f46f28039b5dae306d30296ecab0
|
[
"MIT"
] | null | null | null |
chess/python/chess_server.py
|
MrXisOnline/C-Program
|
9b95802a2d62f46f28039b5dae306d30296ecab0
|
[
"MIT"
] | null | null | null |
chess/python/chess_server.py
|
MrXisOnline/C-Program
|
9b95802a2d62f46f28039b5dae306d30296ecab0
|
[
"MIT"
] | null | null | null |
from game_data import *
from hosting import ServerHandler, ClientHandler
import json
board = [
["R", "K", "B", "Q", "E", "B", "K", "R"],
["P", "P", "P", "P", "P", "P", "P", "P"],
[" ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " "],
["P", "P", "P", "P", "P", "P", "P", "P"],
["R", "K", "B", "Q", "E", "B", "K", "R"]
]
pieces = Initiator()
pos_handler = PositionHandler(pieces[0]+pieces[1])
p1 = Player("white", pieces[0])
p2 = Player("black", pieces[1])
player_handler = PlayerHandler(p1, p2)
end = False
win_team = None
checkmate = False
try:
try:
net = eval(input("Enter Server IP, Port to Host: "))
except KeyboardInterrupt:
exit()
if type(net[0]) == str and net[1] > 5000 and net[1] < 65000:
server = ServerHandler(*net)
DisplayBoard(board)
while True:
error_msg = ""
if player_handler.current.team == "white":
if checkmate:
error_msg = "You're in Checkmate"
print(player_handler.current.give_pieces_position())
try:
piece_pos = eval(input("Position of Piece: "))
piece_to_go = eval(input("Position To Go: "))
except KeyboardInterrupt:
break
if PositionChecks(piece_pos) and PositionChecks(piece_to_go):
piece = pos_handler.get_piece(piece_pos)
if piece == False or piece.team != player_handler.current.team:
error_msg = "Piece Position is Incorrect"
else:
check, piece, n_board = player_handler.play_piece(piece, piece_to_go, board, pos_handler)
if check:
board = n_board
if piece != " ":
pieces[2].append(piece)
player_handler.remove_piece(piece)
pos_handler = PositionHandler(player_handler.player1.pieces + player_handler.player2.pieces)
end, lose_player = player_handler.game_end()
checkmate = player_handler.checkmate(board, pos_handler)
player_handler.change_player()
else:
error_msg = "Bad Position"
else:
error_msg = "Bad Position"
clear_screen()
DisplayBoard(board)
print(error_msg)
if end:
break
win_team = "white" if lose_player.team == "black" else "black"
else:
if checkmate:
server.send_state(server.encode_state("", "", "You're in Checkmate"))
server.send_state(server.encode_state(board, player_handler.current.give_pieces_position(), ""))
server.send_state("input")
pos_data = server.recv_inputs()
try:
pos_data = json.loads(pos_data)
print(pos_data)
piece_pos = tuple(pos_data["piece_pos"])
piece_to_go = tuple(pos_data["piece_to_go"])
if PositionChecks(piece_pos) and PositionChecks(piece_to_go):
piece = pos_handler.get_piece(piece_pos)
print(piece)
if piece == False or piece.team != player_handler.current.team:
server.send_state(server.encode_state("", "", "Piece Position is Incorrect"))
else:
check, piece, n_board = player_handler.play_piece(piece, piece_to_go, board, pos_handler)
if check:
board = n_board
if piece != " ":
pieces[2].append(piece)
player_handler.remove_piece(piece)
pos_handler = PositionHandler(player_handler.player1.pieces + player_handler.player2.pieces)
end, lose_player = player_handler.game_end()
checkmate = player_handler.checkmate(board, pos_handler)
player_handler.change_player()
server.send_state(server.encode_state(board, "", ""))
else:
server.send_state(server.encode_state("", "", "Bad Position"))
else:
server.send_state(server.encode_state("", "", "Bad Position"))
# clear_screen()
if end:
win_team = "white" if lose_player.team == "black" else "black"
break
clear_screen()
DisplayBoard(board)
except json.decoder.JSONDecodeError:
pass
server.send_state(server.encode_state("", "", f"{win_team} Won The Match"))
server.close_conn("end")
else:
print("[-] IP/Port is not Correctly Specified as rules.")
print("[-] Ip should be like \"127.0.0.1\" and Port Should be Between 5000 and 65000")
print("[-] Enter both like this \"127.0.0.1\", 9999")
print("[-] Do It Correctly Next Time Bitch :]")
except ConnectionResetError:
print("Client Disconnected")
except SyntaxError:
server.close_conn("end")
print("Syntax Error")
| 36.840336
| 100
| 0.627053
|
from game_data import *
from hosting import ServerHandler, ClientHandler
import json
board = [
["R", "K", "B", "Q", "E", "B", "K", "R"],
["P", "P", "P", "P", "P", "P", "P", "P"],
[" ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " "],
["P", "P", "P", "P", "P", "P", "P", "P"],
["R", "K", "B", "Q", "E", "B", "K", "R"]
]
pieces = Initiator()
pos_handler = PositionHandler(pieces[0]+pieces[1])
p1 = Player("white", pieces[0])
p2 = Player("black", pieces[1])
player_handler = PlayerHandler(p1, p2)
end = False
win_team = None
checkmate = False
try:
try:
net = eval(input("Enter Server IP, Port to Host: "))
except KeyboardInterrupt:
exit()
if type(net[0]) == str and net[1] > 5000 and net[1] < 65000:
server = ServerHandler(*net)
DisplayBoard(board)
while True:
error_msg = ""
if player_handler.current.team == "white":
if checkmate:
error_msg = "You're in Checkmate"
print(player_handler.current.give_pieces_position())
try:
piece_pos = eval(input("Position of Piece: "))
piece_to_go = eval(input("Position To Go: "))
except KeyboardInterrupt:
break
if PositionChecks(piece_pos) and PositionChecks(piece_to_go):
piece = pos_handler.get_piece(piece_pos)
if piece == False or piece.team != player_handler.current.team:
error_msg = "Piece Position is Incorrect"
else:
check, piece, n_board = player_handler.play_piece(piece, piece_to_go, board, pos_handler)
if check:
board = n_board
if piece != " ":
pieces[2].append(piece)
player_handler.remove_piece(piece)
pos_handler = PositionHandler(player_handler.player1.pieces + player_handler.player2.pieces)
end, lose_player = player_handler.game_end()
checkmate = player_handler.checkmate(board, pos_handler)
player_handler.change_player()
else:
error_msg = "Bad Position"
else:
error_msg = "Bad Position"
clear_screen()
DisplayBoard(board)
print(error_msg)
if end:
break
win_team = "white" if lose_player.team == "black" else "black"
else:
if checkmate:
server.send_state(server.encode_state("", "", "You're in Checkmate"))
server.send_state(server.encode_state(board, player_handler.current.give_pieces_position(), ""))
server.send_state("input")
pos_data = server.recv_inputs()
try:
pos_data = json.loads(pos_data)
print(pos_data)
piece_pos = tuple(pos_data["piece_pos"])
piece_to_go = tuple(pos_data["piece_to_go"])
if PositionChecks(piece_pos) and PositionChecks(piece_to_go):
piece = pos_handler.get_piece(piece_pos)
print(piece)
if piece == False or piece.team != player_handler.current.team:
server.send_state(server.encode_state("", "", "Piece Position is Incorrect"))
else:
check, piece, n_board = player_handler.play_piece(piece, piece_to_go, board, pos_handler)
if check:
board = n_board
if piece != " ":
pieces[2].append(piece)
player_handler.remove_piece(piece)
pos_handler = PositionHandler(player_handler.player1.pieces + player_handler.player2.pieces)
end, lose_player = player_handler.game_end()
checkmate = player_handler.checkmate(board, pos_handler)
player_handler.change_player()
server.send_state(server.encode_state(board, "", ""))
else:
server.send_state(server.encode_state("", "", "Bad Position"))
else:
server.send_state(server.encode_state("", "", "Bad Position"))
if end:
win_team = "white" if lose_player.team == "black" else "black"
break
clear_screen()
DisplayBoard(board)
except json.decoder.JSONDecodeError:
pass
server.send_state(server.encode_state("", "", f"{win_team} Won The Match"))
server.close_conn("end")
else:
print("[-] IP/Port is not Correctly Specified as rules.")
print("[-] Ip should be like \"127.0.0.1\" and Port Should be Between 5000 and 65000")
print("[-] Enter both like this \"127.0.0.1\", 9999")
print("[-] Do It Correctly Next Time Bitch :]")
except ConnectionResetError:
print("Client Disconnected")
except SyntaxError:
server.close_conn("end")
print("Syntax Error")
| true
| true
|
f702ccf3a56618e39a544845aed829d512ad3ede
| 6,100
|
py
|
Python
|
pydifact/segments.py
|
mj0nez/pydifact
|
3833060d30a3ac5601ec14902d844655ca9b0fc4
|
[
"MIT"
] | null | null | null |
pydifact/segments.py
|
mj0nez/pydifact
|
3833060d30a3ac5601ec14902d844655ca9b0fc4
|
[
"MIT"
] | null | null | null |
pydifact/segments.py
|
mj0nez/pydifact
|
3833060d30a3ac5601ec14902d844655ca9b0fc4
|
[
"MIT"
] | null | null | null |
# Pydifact - a python edifact library
#
# Copyright (c) 2019 Christian González
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from typing import Union, List
from pydifact.api import EDISyntaxError, PluginMount
from pydifact.control import Characters
class SegmentProvider(metaclass=PluginMount):
"""This is a plugin mount point for Segment plugins which represent a certain EDIFACT Segment.
Classes implementing this PluginMount should provide the following attributes:
"""
def __str__(self):
"""Returns the user readable text representation of this segment."""
def validate(self) -> bool:
"""Validates the Segment."""
class Segment(SegmentProvider):
"""Represents a low-level segment of an EDI interchange.
This class is used internally. read-world implementations of specialized should subclass Segment and provide
the `tag` and `validate` attributes.
"""
# tag is not a class attribute in this case, as each Segment instance could have another tag.
__omitted__ = True
def __init__(self, tag: str, *elements: Union[str, List[str]]):
"""Create a new Segment instance.
:param str tag: The code/tag of the segment. Must not be empty.
:param list elements: The data elements for this segment, as (possibly empty) list.
"""
self.tag = tag
# The data elements for this segment.
# this is converted to a list (due to the fact that python creates a tuple
# when passing a variable arguments list to a method)
self.elements = list(elements)
def __str__(self) -> str:
"""Returns the Segment in Python list printout"""
return "'{tag}' EDI segment: {elements}".format(
tag=self.tag, elements=str(self.elements)
)
def __repr__(self) -> str:
return "{} segment: {}".format(self.tag, str(self.elements))
def __eq__(self, other) -> bool:
# FIXME the other way round too? isinstance(other, type(self))?
return (
isinstance(self, type(other))
and self.tag == other.tag
and list(self.elements) == list(other.elements)
)
def __getitem__(self, key):
return self.elements[key]
def __setitem__(self, key, value):
self.elements[key] = value
def validate(self) -> bool:
"""
Segment validation.
The Segment class is part of the lower level interfaces of pydifact.
So it assumes that the given parameters are correct, there is no validation done here.
However, in segments derived from this class, there should be validation.
:return: bool True if given tag and elements are a valid EDIFACT segment, False if not.
"""
# FIXME: there should be a way of returning an error message - WHICH kind of validation failed.
if not self.tag:
return False
return True
class EDIenergySegment(Segment):
def __init__(self, tag: str, *elements: Union[str, List[str]]):
super().__init__(tag, *elements)
def validate(self) -> bool:
if not super().validate():
return False
else:
# TODO add validation method for EDI@Energy
pass
class SegmentFactory:
"""Factory for producing segments."""
characters = None
@staticmethod
def create_segment(
name: str, *elements: Union[str, List[str]], validate: bool = True
) -> Segment:
"""Create a new instance of the relevant class type.
:param name: The name of the segment
:param elements: The data elements for this segment
:param validate: bool if True, the created segment is validated before return
"""
if not SegmentFactory.characters:
SegmentFactory.characters = Characters()
# Basic segment type validation is done here.
# The more special validation must be done in the corresponding Segment
if not name:
raise EDISyntaxError("The tag of a segment must not be empty.")
if type(name) != str:
raise EDISyntaxError(
"The tag name of a segment must be a str, but is a {}: {}".format(
type(name), name
)
)
if not name.isalnum():
raise EDISyntaxError(
"Tag '{}': A tag name must only contain alphanumeric characters.".format(
name
)
)
for Plugin in SegmentProvider.plugins:
if getattr(Plugin, "tag", "") == name:
s = Plugin(name, *elements)
break
else:
# we don't support this kind of EDIFACT segment (yet), so
# just create a generic Segment()
s = Segment(name, *elements)
if validate:
if not s.validate():
raise EDISyntaxError(
"could not create '{}' Segment. Validation failed.".format(name)
)
# FIXME: characters is not used!
return s
| 35.465116
| 112
| 0.640656
|
from typing import Union, List
from pydifact.api import EDISyntaxError, PluginMount
from pydifact.control import Characters
class SegmentProvider(metaclass=PluginMount):
def __str__(self):
def validate(self) -> bool:
class Segment(SegmentProvider):
__omitted__ = True
def __init__(self, tag: str, *elements: Union[str, List[str]]):
self.tag = tag
self.elements = list(elements)
def __str__(self) -> str:
return "'{tag}' EDI segment: {elements}".format(
tag=self.tag, elements=str(self.elements)
)
def __repr__(self) -> str:
return "{} segment: {}".format(self.tag, str(self.elements))
def __eq__(self, other) -> bool:
return (
isinstance(self, type(other))
and self.tag == other.tag
and list(self.elements) == list(other.elements)
)
def __getitem__(self, key):
return self.elements[key]
def __setitem__(self, key, value):
self.elements[key] = value
def validate(self) -> bool:
if not self.tag:
return False
return True
class EDIenergySegment(Segment):
def __init__(self, tag: str, *elements: Union[str, List[str]]):
super().__init__(tag, *elements)
def validate(self) -> bool:
if not super().validate():
return False
else:
pass
class SegmentFactory:
characters = None
@staticmethod
def create_segment(
name: str, *elements: Union[str, List[str]], validate: bool = True
) -> Segment:
if not SegmentFactory.characters:
SegmentFactory.characters = Characters()
if not name:
raise EDISyntaxError("The tag of a segment must not be empty.")
if type(name) != str:
raise EDISyntaxError(
"The tag name of a segment must be a str, but is a {}: {}".format(
type(name), name
)
)
if not name.isalnum():
raise EDISyntaxError(
"Tag '{}': A tag name must only contain alphanumeric characters.".format(
name
)
)
for Plugin in SegmentProvider.plugins:
if getattr(Plugin, "tag", "") == name:
s = Plugin(name, *elements)
break
else:
# just create a generic Segment()
s = Segment(name, *elements)
if validate:
if not s.validate():
raise EDISyntaxError(
"could not create '{}' Segment. Validation failed.".format(name)
)
# FIXME: characters is not used!
return s
| true
| true
|
f702cd6d3b749291e05ca743af5b6d809f48a705
| 2,141
|
py
|
Python
|
common/realtime.py
|
wolterhv/openpilot
|
c189d15af9a613d8f109b39298c0ab3e22f39f6d
|
[
"MIT"
] | 171
|
2018-11-18T16:41:27.000Z
|
2022-03-15T06:58:04.000Z
|
common/realtime.py
|
wolterhv/openpilot
|
c189d15af9a613d8f109b39298c0ab3e22f39f6d
|
[
"MIT"
] | 41
|
2018-08-01T17:36:08.000Z
|
2020-12-16T02:42:57.000Z
|
common/realtime.py
|
wolterhv/openpilot
|
c189d15af9a613d8f109b39298c0ab3e22f39f6d
|
[
"MIT"
] | 378
|
2018-10-23T16:36:06.000Z
|
2022-03-11T08:59:51.000Z
|
"""Utilities for reading real time clocks and keeping soft real time constraints."""
import gc
import os
import time
import multiprocessing
from common.clock import sec_since_boot # pylint: disable=no-name-in-module, import-error
from selfdrive.hardware import PC, TICI
# time step for each process
DT_CTRL = 0.01 # controlsd
DT_MDL = 0.05 # model
DT_TRML = 0.5 # thermald and manager
# driver monitoring
if TICI:
DT_DMON = 0.05
else:
DT_DMON = 0.1
class Priority:
# CORE 2
# - modeld = 55
# - camerad = 54
CTRL_LOW = 51 # plannerd & radard
# CORE 3
# - boardd = 55
CTRL_HIGH = 53
def set_realtime_priority(level):
if not PC:
os.sched_setscheduler(0, os.SCHED_FIFO, os.sched_param(level))
def set_core_affinity(core):
if not PC:
os.sched_setaffinity(0, [core,])
def config_realtime_process(core, priority):
gc.disable()
set_realtime_priority(priority)
set_core_affinity(core)
class Ratekeeper():
def __init__(self, rate, print_delay_threshold=0.):
"""Rate in Hz for ratekeeping. print_delay_threshold must be nonnegative."""
self._interval = 1. / rate
self._next_frame_time = sec_since_boot() + self._interval
self._print_delay_threshold = print_delay_threshold
self._frame = 0
self._remaining = 0
self._process_name = multiprocessing.current_process().name
@property
def frame(self):
return self._frame
@property
def remaining(self):
return self._remaining
# Maintain loop rate by calling this at the end of each loop
def keep_time(self):
lagged = self.monitor_time()
if self._remaining > 0:
time.sleep(self._remaining)
return lagged
# this only monitor the cumulative lag, but does not enforce a rate
def monitor_time(self):
lagged = False
remaining = self._next_frame_time - sec_since_boot()
self._next_frame_time += self._interval
if self._print_delay_threshold is not None and remaining < -self._print_delay_threshold:
print("%s lagging by %.2f ms" % (self._process_name, -remaining * 1000))
lagged = True
self._frame += 1
self._remaining = remaining
return lagged
| 24.895349
| 92
| 0.714152
|
import gc
import os
import time
import multiprocessing
from common.clock import sec_since_boot from selfdrive.hardware import PC, TICI
DT_CTRL = 0.01 DT_MDL = 0.05 DT_TRML = 0.5
if TICI:
DT_DMON = 0.05
else:
DT_DMON = 0.1
class Priority:
CTRL_LOW = 51
CTRL_HIGH = 53
def set_realtime_priority(level):
if not PC:
os.sched_setscheduler(0, os.SCHED_FIFO, os.sched_param(level))
def set_core_affinity(core):
if not PC:
os.sched_setaffinity(0, [core,])
def config_realtime_process(core, priority):
gc.disable()
set_realtime_priority(priority)
set_core_affinity(core)
class Ratekeeper():
def __init__(self, rate, print_delay_threshold=0.):
self._interval = 1. / rate
self._next_frame_time = sec_since_boot() + self._interval
self._print_delay_threshold = print_delay_threshold
self._frame = 0
self._remaining = 0
self._process_name = multiprocessing.current_process().name
@property
def frame(self):
return self._frame
@property
def remaining(self):
return self._remaining
def keep_time(self):
lagged = self.monitor_time()
if self._remaining > 0:
time.sleep(self._remaining)
return lagged
def monitor_time(self):
lagged = False
remaining = self._next_frame_time - sec_since_boot()
self._next_frame_time += self._interval
if self._print_delay_threshold is not None and remaining < -self._print_delay_threshold:
print("%s lagging by %.2f ms" % (self._process_name, -remaining * 1000))
lagged = True
self._frame += 1
self._remaining = remaining
return lagged
| true
| true
|
f702cdd14336fe2d99f8b21d1c298aa8279cf0b2
| 7,420
|
py
|
Python
|
pyluna-pathology/luna/pathology/point_annotation/proxy_table/generate.py
|
msk-mind/data-processing
|
c016d218da2eca003d06b96f2c03f16b3ce97873
|
[
"Apache-2.0"
] | 1
|
2022-03-29T03:48:00.000Z
|
2022-03-29T03:48:00.000Z
|
pyluna-pathology/luna/pathology/point_annotation/proxy_table/generate.py
|
msk-mind/data-processing
|
c016d218da2eca003d06b96f2c03f16b3ce97873
|
[
"Apache-2.0"
] | 96
|
2020-11-15T01:39:12.000Z
|
2021-08-24T14:37:49.000Z
|
pyluna-pathology/luna/pathology/point_annotation/proxy_table/generate.py
|
msk-mind/luna
|
282b5bd594cb5bf1ef2a7fdf56fca9bea5ad7102
|
[
"Apache-2.0"
] | 1
|
2021-01-04T15:14:23.000Z
|
2021-01-04T15:14:23.000Z
|
import os, json
import shutil, logging
import click
from pyspark.sql.functions import lit, udf, explode, array, to_json
from pyspark.sql.types import ArrayType, StringType, IntegerType, MapType, StructType, StructField
from luna.common.CodeTimer import CodeTimer
from luna.common.config import ConfigSet
from luna.common.custom_logger import init_logger
from luna.common.sparksession import SparkConfig
from luna.common.utils import get_absolute_path
from luna.pathology.common.slideviewer_client import fetch_slide_ids
import luna.common.constants as const
os.environ['OPENBLAS_NUM_THREADS'] = '1'
def download_point_annotation(slideviewer_url, slideviewer_path, project_id, user):
"""Downloads point-click nuclear annotations using slideviewer API
Args:
slideviewer_url (string): slideviewer base url e.g. https://slideviewer-url.com
slideviewer_path (string): slide path in slideviewer
project_id (string): slideviewer project id
user (string): username used to create the expert annotation
Returns:
json: point-click nuclear annotations
"""
from slideviewer_client import download_sv_point_annotation
print (f" >>>>>>> Processing [{slideviewer_path}] <<<<<<<<")
url = slideviewer_url + "/slides/" + str(user) + "@mskcc.org/projects;" + \
str(project_id) + ';' + slideviewer_path + "/getSVGLabels/nucleus"
print(url)
return download_sv_point_annotation(url)
@click.command()
@click.option('-d', '--data_config_file', default=None, type=click.Path(exists=True),
help="path to yaml file containing data input and output parameters. "
"See data_config.yaml.template")
@click.option('-a', '--app_config_file', default='config.yaml', type=click.Path(exists=True),
help="path to yaml file containing application runtime parameters. "
"See config.yaml.template")
def cli(data_config_file, app_config_file):
"""This module generates a parquet table of point-click nuclear annotation jsons.
The configuration files are copied to your project/configs/table_name folder
to persist the metadata used to generate the proxy table.
INPUT PARAMETERS
app_config_file - path to yaml file containing application runtime parameters. See config.yaml.template
data_config_file - path to yaml file containing data input and output parameters. See data_config.yaml.template
- ROOT_PATH: path to output data
- DATA_TYPE: data type used in table name e.g. POINT_RAW_JSON
- PROJECT: your project name. used in table path
- DATASET_NAME: optional, dataset name to version your table
- PROJECT_ID: Slideviewer project id
- USERS: list of users that provide expert annotations for this project
- SLIDEVIEWER_CSV_FILE: an optional path to a SlideViewer csv file to use that lists the names of the whole slide images
and for which the regional annotation proxy table generator should download point annotations.
If this field is left blank, then the regional annotation proxy table generator will download this file from SlideViewer.
TABLE SCHEMA
- slideviewer_path: path to original slide image in slideviewer platform
- slide_id: id for the slide. synonymous with image_id
- sv_project_id: same as the PROJECT_ID from data_config_file, refers to the SlideViewer project number.
- sv_json: json annotation file downloaded from slideviewer.
- user: username of the annotator for a given annotation
- sv_json_record_uuid: hash of raw json annotation file from slideviewer, format: SVPTJSON-{json_hash}
"""
logger = init_logger()
with CodeTimer(logger, 'generate POINT_RAW_JSON table'):
logger.info('data config file: ' + data_config_file)
logger.info('app config file: ' + app_config_file)
# load configs
cfg = ConfigSet(name=const.DATA_CFG, config_file=data_config_file)
cfg = ConfigSet(name=const.APP_CFG, config_file=app_config_file)
# copy app and data configuration to destination config dir
config_location = const.CONFIG_LOCATION(cfg)
os.makedirs(config_location, exist_ok=True)
shutil.copy(app_config_file, os.path.join(config_location, "app_config.yaml"))
shutil.copy(data_config_file, os.path.join(config_location, "data_config.yaml"))
logger.info("config files copied to %s", config_location)
create_proxy_table()
def create_proxy_table():
"""Create a proxy table of point annotation json files downloaded from the SlideViewer API
Each row of the table is a point annotation json created by a user for a slide.
Returns:
None
"""
cfg = ConfigSet()
logger = logging.getLogger(__name__)
spark = SparkConfig().spark_session(config_name=const.APP_CFG, app_name="luna.pathology.point_annotation.proxy_table.generate")
# load paths from configs
point_table_path = const.TABLE_LOCATION(cfg)
PROJECT_ID = cfg.get_value(path=const.DATA_CFG+'::PROJECT_ID')
SLIDEVIEWER_URL = cfg.get_value(path=const.DATA_CFG+'::SLIDEVIEWER_URL')
# Get slide list to use
# Download CSV file in the project configs dir
slides = fetch_slide_ids(SLIDEVIEWER_URL, PROJECT_ID, const.CONFIG_LOCATION(cfg),
cfg.get_value(path=const.DATA_CFG+'::SLIDEVIEWER_CSV_FILE'))
logger.info(slides)
schema = StructType([StructField("slideviewer_path", StringType()),
StructField("slide_id", StringType()),
StructField("sv_project_id", IntegerType())
])
df = spark.createDataFrame(slides, schema)
# populate columns
df = df.withColumn("users", array([lit(user) for user in cfg.get_value(const.DATA_CFG+'::USERS')]))
df = df.select("slideviewer_path", "slide_id", "sv_project_id", explode("users").alias("user"))
# download slide point annotation jsons
# example point json:
# [{"project_id":"8","image_id":"123.svs","label_type":"nucleus","x":"1440","y":"747","class":"0","classname":"Tissue 1"},{"project_id":"8","image_id":"123.svs","label_type":"nucleus","x":"1424","y":"774","class":"3","classname":"Tissue 4"}]
point_json_struct = ArrayType(
MapType(StringType(), StringType())
)
spark.sparkContext.addPyFile(get_absolute_path(__file__, "../../common/slideviewer_client.py"))
download_point_annotation_udf = udf(download_point_annotation, point_json_struct)
df = df.withColumn("sv_json",
download_point_annotation_udf(lit(SLIDEVIEWER_URL), "slideviewer_path", "sv_project_id", "user"))\
.cache()
# drop empty jsons that may have been created
df = df.dropna(subset=["sv_json"])
# populate "date_added", "date_updated","latest", "sv_json_record_uuid"
spark.sparkContext.addPyFile(get_absolute_path(__file__, "../../common/EnsureByteContext.py"))
spark.sparkContext.addPyFile(get_absolute_path(__file__, "../../common/utils.py"))
from luna.common.utils import generate_uuid_dict
sv_json_record_uuid_udf = udf(generate_uuid_dict, StringType())
df = df.withColumn("sv_json_record_uuid", sv_json_record_uuid_udf(to_json("sv_json"), array(lit("SVPTJSON"))))
df.show(10, False)
df.write.format("parquet").mode("overwrite").save(point_table_path)
if __name__ == "__main__":
cli()
| 41.920904
| 245
| 0.714555
|
import os, json
import shutil, logging
import click
from pyspark.sql.functions import lit, udf, explode, array, to_json
from pyspark.sql.types import ArrayType, StringType, IntegerType, MapType, StructType, StructField
from luna.common.CodeTimer import CodeTimer
from luna.common.config import ConfigSet
from luna.common.custom_logger import init_logger
from luna.common.sparksession import SparkConfig
from luna.common.utils import get_absolute_path
from luna.pathology.common.slideviewer_client import fetch_slide_ids
import luna.common.constants as const
os.environ['OPENBLAS_NUM_THREADS'] = '1'
def download_point_annotation(slideviewer_url, slideviewer_path, project_id, user):
from slideviewer_client import download_sv_point_annotation
print (f" >>>>>>> Processing [{slideviewer_path}] <<<<<<<<")
url = slideviewer_url + "/slides/" + str(user) + "@mskcc.org/projects;" + \
str(project_id) + ';' + slideviewer_path + "/getSVGLabels/nucleus"
print(url)
return download_sv_point_annotation(url)
@click.command()
@click.option('-d', '--data_config_file', default=None, type=click.Path(exists=True),
help="path to yaml file containing data input and output parameters. "
"See data_config.yaml.template")
@click.option('-a', '--app_config_file', default='config.yaml', type=click.Path(exists=True),
help="path to yaml file containing application runtime parameters. "
"See config.yaml.template")
def cli(data_config_file, app_config_file):
logger = init_logger()
with CodeTimer(logger, 'generate POINT_RAW_JSON table'):
logger.info('data config file: ' + data_config_file)
logger.info('app config file: ' + app_config_file)
cfg = ConfigSet(name=const.DATA_CFG, config_file=data_config_file)
cfg = ConfigSet(name=const.APP_CFG, config_file=app_config_file)
config_location = const.CONFIG_LOCATION(cfg)
os.makedirs(config_location, exist_ok=True)
shutil.copy(app_config_file, os.path.join(config_location, "app_config.yaml"))
shutil.copy(data_config_file, os.path.join(config_location, "data_config.yaml"))
logger.info("config files copied to %s", config_location)
create_proxy_table()
def create_proxy_table():
cfg = ConfigSet()
logger = logging.getLogger(__name__)
spark = SparkConfig().spark_session(config_name=const.APP_CFG, app_name="luna.pathology.point_annotation.proxy_table.generate")
point_table_path = const.TABLE_LOCATION(cfg)
PROJECT_ID = cfg.get_value(path=const.DATA_CFG+'::PROJECT_ID')
SLIDEVIEWER_URL = cfg.get_value(path=const.DATA_CFG+'::SLIDEVIEWER_URL')
slides = fetch_slide_ids(SLIDEVIEWER_URL, PROJECT_ID, const.CONFIG_LOCATION(cfg),
cfg.get_value(path=const.DATA_CFG+'::SLIDEVIEWER_CSV_FILE'))
logger.info(slides)
schema = StructType([StructField("slideviewer_path", StringType()),
StructField("slide_id", StringType()),
StructField("sv_project_id", IntegerType())
])
df = spark.createDataFrame(slides, schema)
df = df.withColumn("users", array([lit(user) for user in cfg.get_value(const.DATA_CFG+'::USERS')]))
df = df.select("slideviewer_path", "slide_id", "sv_project_id", explode("users").alias("user"))
point_json_struct = ArrayType(
MapType(StringType(), StringType())
)
spark.sparkContext.addPyFile(get_absolute_path(__file__, "../../common/slideviewer_client.py"))
download_point_annotation_udf = udf(download_point_annotation, point_json_struct)
df = df.withColumn("sv_json",
download_point_annotation_udf(lit(SLIDEVIEWER_URL), "slideviewer_path", "sv_project_id", "user"))\
.cache()
df = df.dropna(subset=["sv_json"])
spark.sparkContext.addPyFile(get_absolute_path(__file__, "../../common/EnsureByteContext.py"))
spark.sparkContext.addPyFile(get_absolute_path(__file__, "../../common/utils.py"))
from luna.common.utils import generate_uuid_dict
sv_json_record_uuid_udf = udf(generate_uuid_dict, StringType())
df = df.withColumn("sv_json_record_uuid", sv_json_record_uuid_udf(to_json("sv_json"), array(lit("SVPTJSON"))))
df.show(10, False)
df.write.format("parquet").mode("overwrite").save(point_table_path)
if __name__ == "__main__":
cli()
| true
| true
|
f702cf37a5d2c8ddbc6bd4cf2cda75e9eb2dcfea
| 10,938
|
py
|
Python
|
MSSE-2021/train_model.py
|
clsteel/DeepPostures
|
8a7bed8f1e47e4a502080bf6edd513b822ea0bdf
|
[
"Apache-2.0"
] | 1
|
2021-06-23T13:28:51.000Z
|
2021-06-23T13:28:51.000Z
|
MSSE-2021/train_model.py
|
clsteel/DeepPostures
|
8a7bed8f1e47e4a502080bf6edd513b822ea0bdf
|
[
"Apache-2.0"
] | null | null | null |
MSSE-2021/train_model.py
|
clsteel/DeepPostures
|
8a7bed8f1e47e4a502080bf6edd513b822ea0bdf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Supun Nakandala. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import sys
import numpy as np
import tensorflow as tf
import pandas as pd
import random
import math
import argparse
sys.path.append('./')
from commons import cnn_bi_lstm_model, input_iterator
# Setting random seeds
tf.random.set_random_seed(2019)
random.seed(2019)
np.random.seed(2019)
def get_train_ops(y, logits, learning_rate, n_classes, class_weights):
y = tf.reshape(y, [-1])
logits = tf.reshape(logits, [-1, n_classes])
balanced_accuracy, update_op = tf.metrics.mean_per_class_accuracy(y, tf.argmax(logits, 1), n_classes)
y = tf.reshape(tf.one_hot(y, depth=n_classes, axis=1), [-1, n_classes])
loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y) * tf.reduce_sum(tf.constant(class_weights, dtype=tf.float32) * y, axis=1))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss)
return train_op, update_op, balanced_accuracy, loss
def window_generator(data_root, win_size_10s, subject_ids):
x_segments = []; y_segments = []
for subject_id in subject_ids:
for x_seq, _, y_seq in input_iterator(data_root, subject_id, train=True):
x_window = []; y_window = []
for x,y in zip(x_seq, y_seq):
x_window.append(x)
y_window.append(y)
if len(y_window) == win_size_10s:
yield np.stack(x_window, axis=0), np.stack(y_window, axis=0)
x_window = []; y_window = []
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Argument parser for training CNN model.')
optional_arguments = parser._action_groups.pop()
required_arguments = parser.add_argument_group('required arguments')
required_arguments.add_argument('--pre-processed-dir', help='Pre-processed data directory', required=True)
optional_arguments.add_argument('--transfer-learning-model', help='Transfer learning model name (default: CHAP_ALL_ADULTS)', default=None, required=False, choices=['CHAP_ALL_ADULTS'])
optional_arguments.add_argument('--learning-rate', help='Learning rate for training the model (default: 0.0001)', default=1e-4, type=float, required=False)
optional_arguments.add_argument('--num-epochs', help='Number of epochs to train the model (default: 15)', default=15, type=int, required=False)
optional_arguments.add_argument('--batch-size', help='Training batch size (default: 16)', default=16, type=int, required=False)
optional_arguments.add_argument('--amp-factor', help='Factor to increase the number of neurons in the CNN layers (default: 2)', default=2, type=int, required=False)
optional_arguments.add_argument('--cnn-window-size', help='CNN window size in seconds on which the predictions to be made (default: 10)', default=10, type=int, required=False)
optional_arguments.add_argument('--bi-lstm-window-size', help='BiLSTM window size in minutes on which the predictions to be smoothed (default: 7)', default=7, type=int, required=False)
optional_arguments.add_argument('--shuffle-buffer-size', help='Training data shuffle buffer size in terms of number of records (default: 10000)', default=10000, type=int, required=False)
optional_arguments.add_argument('--training-data-fraction', help='Percentage of subjects to be used for training (default: 60)', default=60, type=int, required=False)
optional_arguments.add_argument('--validation-data-fraction', help='Percentage of subjects to be used for validation (default: 20)', default=20, type=int, required=False)
optional_arguments.add_argument('--testing-data-fraction', help='Percentage of subjects to be used for testing (default: 20)', default=20, type=int, required=False)
optional_arguments.add_argument('--model-checkpoint-path', help='Path where the trained model will be saved (default: ./model-checkpoint)', default='./model-checkpoint', required=False)
optional_arguments.add_argument('--num-classes', help='Number of classes in the training dataset (default: 2)', default=2, type=int, required=False)
optional_arguments.add_argument('--class-weights', help='Class weights for loss aggregation (default: [1.0, 1.0])', default='[1.0, 1.0]', required=False)
optional_arguments.add_argument('--down-sample-frequency', help='Downsample frequency in Hz for GT3X data (default: 10)', default=10, type=int, required=False)
optional_arguments.add_argument('--silent', help='Whether to hide info messages', default=False, required=False, action='store_true')
parser._action_groups.append(optional_arguments)
args = parser.parse_args()
if os.path.exists(args.model_checkpoint_path):
raise Exception('Model checkpoint: {} already exists.'.format(args.model_checkpoint_path))
if args.transfer_learning_model:
if args.transfer_learning_model == 'CHAP_ALL_ADULTS':
args.amp_factor = 2
args.cnn_window_size = 10
args.bi_lstm_win_size = 7
else:
raise Exception('Unsupported transfer learning model: {}'.format(args.transfer_learning_model))
assert (args.training_data_fraction + args.validation_data_fraction + args.testing_data_fraction) == 100, 'Train, validation,test split fractions should add up to 100%'
subject_ids = [fname.split('.')[0] for fname in os.listdir(args.pre_processed_dir)]
random.shuffle(subject_ids)
n_train_subjects = int(math.ceil(len(subject_ids) * args.training_data_fraction / 100.))
train_subjects = subject_ids[:n_train_subjects]
subject_ids = subject_ids[n_train_subjects:]
test_frac = args.testing_data_fraction / (100.0 - args.training_data_fraction) * 100
n_test_subjects = int(math.ceil(len(subject_ids) * test_frac / 100.))
test_subjects = subject_ids[:n_test_subjects]
valid_subjects = subject_ids[n_test_subjects:]
output_shapes = ((args.bi_lstm_window_size*(60//args.cnn_window_size), args.cnn_window_size*args.down_sample_frequency, 3), (args.bi_lstm_window_size*(60//args.cnn_window_size)))
bi_lstm_win_size = 60//args.down_sample_frequency * args.bi_lstm_window_size
train_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, train_subjects),output_types=(tf.float32, tf.int32),
output_shapes=output_shapes).shuffle(args.shuffle_buffer_size).batch(args.batch_size).prefetch(10)
valid_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, valid_subjects),output_types=(tf.float32, tf.int32),
output_shapes=output_shapes).batch(args.batch_size).prefetch(10)
test_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, test_subjects),output_types=(tf.float32, tf.int32),
output_shapes=output_shapes).batch(args.batch_size).prefetch(10)
iterator = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes)
train_init_op = iterator.make_initializer(train_dataset)
valid_init_op = iterator.make_initializer(valid_dataset)
test_init_op = iterator.make_initializer(test_dataset)
x, y = iterator.get_next()
x = tf.reshape(x, [-1, args.cnn_window_size*args.down_sample_frequency, 3, 1])
x = tf.identity(x, name='input')
y = tf.reshape(y, [-1, bi_lstm_win_size])
learning_rate = tf.placeholder(tf.float32)
logits = cnn_bi_lstm_model(x, args.amp_factor, bi_lstm_win_size, args.num_classes)
output = tf.argmax(tf.reshape(logits, [-1, args.num_classes]), axis=1, name='output')
prediction = tf.identity(tf.argmax(logits, axis=1), name='prediction')
class_weights = eval(args.class_weights)
train_op, update_op, balanced_accuracy, loss = get_train_ops(y, logits, learning_rate, args.num_classes, class_weights)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if args.transfer_learning_model:
ckpt_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pre-trained-models', '{}_CKPT'.format(args.transfer_learning_model), 'model')
# Weights for the final classification layer (dense) are ignored
variables = [v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if not v.name.startswith('dense/')]
restorer = tf.train.Saver(variables)
restorer.restore(sess, ckpt_path)
if not args.silent:
print('Training subjects: {}'.format(train_subjects))
print('Validation subjects: {}'.format(valid_subjects))
print('Testing subjects: {}'.format(test_subjects))
for epoch in range(args.num_epochs):
for label, init_op, subjects in zip(["Train", "Validation", "Test"],
[train_init_op, valid_init_op, test_init_op], [train_subjects, valid_subjects, test_subjects]):
sess.run(tf.local_variables_initializer())
sess.run(init_op)
losses = []
while True:
try:
if label == "Train":
_, _, l = sess.run([train_op, update_op, loss], feed_dict={learning_rate: args.learning_rate})
elif label == "Validation":
_, l = sess.run([update_op, loss])
elif label == "Test":
_, l = sess.run([update_op, loss])
losses.append(l)
except tf.errors.OutOfRangeError:
if not args.silent:
ba = sess.run(balanced_accuracy)
print("Epoch: %d, %s Loss: %f, Balanced Accuracy: %f" %(epoch, label, sum(losses), ba))
break
if not os.path.exists(args.model_checkpoint_path):
os.makedirs(args.model_checkpoint_path)
tf.saved_model.simple_save(sess, os.path.join(args.model_checkpoint_path, 'CUSTOM_MODEL'), inputs={"input": x}, outputs={"output": output})
if not args.silent:
print('Model saved in path: {}'.format(args.model_checkpoint_path))
| 59.124324
| 190
| 0.6951
|
import os
import sys
import numpy as np
import tensorflow as tf
import pandas as pd
import random
import math
import argparse
sys.path.append('./')
from commons import cnn_bi_lstm_model, input_iterator
tf.random.set_random_seed(2019)
random.seed(2019)
np.random.seed(2019)
def get_train_ops(y, logits, learning_rate, n_classes, class_weights):
y = tf.reshape(y, [-1])
logits = tf.reshape(logits, [-1, n_classes])
balanced_accuracy, update_op = tf.metrics.mean_per_class_accuracy(y, tf.argmax(logits, 1), n_classes)
y = tf.reshape(tf.one_hot(y, depth=n_classes, axis=1), [-1, n_classes])
loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y) * tf.reduce_sum(tf.constant(class_weights, dtype=tf.float32) * y, axis=1))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss)
return train_op, update_op, balanced_accuracy, loss
def window_generator(data_root, win_size_10s, subject_ids):
x_segments = []; y_segments = []
for subject_id in subject_ids:
for x_seq, _, y_seq in input_iterator(data_root, subject_id, train=True):
x_window = []; y_window = []
for x,y in zip(x_seq, y_seq):
x_window.append(x)
y_window.append(y)
if len(y_window) == win_size_10s:
yield np.stack(x_window, axis=0), np.stack(y_window, axis=0)
x_window = []; y_window = []
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Argument parser for training CNN model.')
optional_arguments = parser._action_groups.pop()
required_arguments = parser.add_argument_group('required arguments')
required_arguments.add_argument('--pre-processed-dir', help='Pre-processed data directory', required=True)
optional_arguments.add_argument('--transfer-learning-model', help='Transfer learning model name (default: CHAP_ALL_ADULTS)', default=None, required=False, choices=['CHAP_ALL_ADULTS'])
optional_arguments.add_argument('--learning-rate', help='Learning rate for training the model (default: 0.0001)', default=1e-4, type=float, required=False)
optional_arguments.add_argument('--num-epochs', help='Number of epochs to train the model (default: 15)', default=15, type=int, required=False)
optional_arguments.add_argument('--batch-size', help='Training batch size (default: 16)', default=16, type=int, required=False)
optional_arguments.add_argument('--amp-factor', help='Factor to increase the number of neurons in the CNN layers (default: 2)', default=2, type=int, required=False)
optional_arguments.add_argument('--cnn-window-size', help='CNN window size in seconds on which the predictions to be made (default: 10)', default=10, type=int, required=False)
optional_arguments.add_argument('--bi-lstm-window-size', help='BiLSTM window size in minutes on which the predictions to be smoothed (default: 7)', default=7, type=int, required=False)
optional_arguments.add_argument('--shuffle-buffer-size', help='Training data shuffle buffer size in terms of number of records (default: 10000)', default=10000, type=int, required=False)
optional_arguments.add_argument('--training-data-fraction', help='Percentage of subjects to be used for training (default: 60)', default=60, type=int, required=False)
optional_arguments.add_argument('--validation-data-fraction', help='Percentage of subjects to be used for validation (default: 20)', default=20, type=int, required=False)
optional_arguments.add_argument('--testing-data-fraction', help='Percentage of subjects to be used for testing (default: 20)', default=20, type=int, required=False)
optional_arguments.add_argument('--model-checkpoint-path', help='Path where the trained model will be saved (default: ./model-checkpoint)', default='./model-checkpoint', required=False)
optional_arguments.add_argument('--num-classes', help='Number of classes in the training dataset (default: 2)', default=2, type=int, required=False)
optional_arguments.add_argument('--class-weights', help='Class weights for loss aggregation (default: [1.0, 1.0])', default='[1.0, 1.0]', required=False)
optional_arguments.add_argument('--down-sample-frequency', help='Downsample frequency in Hz for GT3X data (default: 10)', default=10, type=int, required=False)
optional_arguments.add_argument('--silent', help='Whether to hide info messages', default=False, required=False, action='store_true')
parser._action_groups.append(optional_arguments)
args = parser.parse_args()
if os.path.exists(args.model_checkpoint_path):
raise Exception('Model checkpoint: {} already exists.'.format(args.model_checkpoint_path))
if args.transfer_learning_model:
if args.transfer_learning_model == 'CHAP_ALL_ADULTS':
args.amp_factor = 2
args.cnn_window_size = 10
args.bi_lstm_win_size = 7
else:
raise Exception('Unsupported transfer learning model: {}'.format(args.transfer_learning_model))
assert (args.training_data_fraction + args.validation_data_fraction + args.testing_data_fraction) == 100, 'Train, validation,test split fractions should add up to 100%'
subject_ids = [fname.split('.')[0] for fname in os.listdir(args.pre_processed_dir)]
random.shuffle(subject_ids)
n_train_subjects = int(math.ceil(len(subject_ids) * args.training_data_fraction / 100.))
train_subjects = subject_ids[:n_train_subjects]
subject_ids = subject_ids[n_train_subjects:]
test_frac = args.testing_data_fraction / (100.0 - args.training_data_fraction) * 100
n_test_subjects = int(math.ceil(len(subject_ids) * test_frac / 100.))
test_subjects = subject_ids[:n_test_subjects]
valid_subjects = subject_ids[n_test_subjects:]
output_shapes = ((args.bi_lstm_window_size*(60//args.cnn_window_size), args.cnn_window_size*args.down_sample_frequency, 3), (args.bi_lstm_window_size*(60//args.cnn_window_size)))
bi_lstm_win_size = 60//args.down_sample_frequency * args.bi_lstm_window_size
train_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, train_subjects),output_types=(tf.float32, tf.int32),
output_shapes=output_shapes).shuffle(args.shuffle_buffer_size).batch(args.batch_size).prefetch(10)
valid_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, valid_subjects),output_types=(tf.float32, tf.int32),
output_shapes=output_shapes).batch(args.batch_size).prefetch(10)
test_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, test_subjects),output_types=(tf.float32, tf.int32),
output_shapes=output_shapes).batch(args.batch_size).prefetch(10)
iterator = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes)
train_init_op = iterator.make_initializer(train_dataset)
valid_init_op = iterator.make_initializer(valid_dataset)
test_init_op = iterator.make_initializer(test_dataset)
x, y = iterator.get_next()
x = tf.reshape(x, [-1, args.cnn_window_size*args.down_sample_frequency, 3, 1])
x = tf.identity(x, name='input')
y = tf.reshape(y, [-1, bi_lstm_win_size])
learning_rate = tf.placeholder(tf.float32)
logits = cnn_bi_lstm_model(x, args.amp_factor, bi_lstm_win_size, args.num_classes)
output = tf.argmax(tf.reshape(logits, [-1, args.num_classes]), axis=1, name='output')
prediction = tf.identity(tf.argmax(logits, axis=1), name='prediction')
class_weights = eval(args.class_weights)
train_op, update_op, balanced_accuracy, loss = get_train_ops(y, logits, learning_rate, args.num_classes, class_weights)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if args.transfer_learning_model:
ckpt_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pre-trained-models', '{}_CKPT'.format(args.transfer_learning_model), 'model')
variables = [v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if not v.name.startswith('dense/')]
restorer = tf.train.Saver(variables)
restorer.restore(sess, ckpt_path)
if not args.silent:
print('Training subjects: {}'.format(train_subjects))
print('Validation subjects: {}'.format(valid_subjects))
print('Testing subjects: {}'.format(test_subjects))
for epoch in range(args.num_epochs):
for label, init_op, subjects in zip(["Train", "Validation", "Test"],
[train_init_op, valid_init_op, test_init_op], [train_subjects, valid_subjects, test_subjects]):
sess.run(tf.local_variables_initializer())
sess.run(init_op)
losses = []
while True:
try:
if label == "Train":
_, _, l = sess.run([train_op, update_op, loss], feed_dict={learning_rate: args.learning_rate})
elif label == "Validation":
_, l = sess.run([update_op, loss])
elif label == "Test":
_, l = sess.run([update_op, loss])
losses.append(l)
except tf.errors.OutOfRangeError:
if not args.silent:
ba = sess.run(balanced_accuracy)
print("Epoch: %d, %s Loss: %f, Balanced Accuracy: %f" %(epoch, label, sum(losses), ba))
break
if not os.path.exists(args.model_checkpoint_path):
os.makedirs(args.model_checkpoint_path)
tf.saved_model.simple_save(sess, os.path.join(args.model_checkpoint_path, 'CUSTOM_MODEL'), inputs={"input": x}, outputs={"output": output})
if not args.silent:
print('Model saved in path: {}'.format(args.model_checkpoint_path))
| true
| true
|
f702d0bfaf4d80a5d7eaa4d7d94718ae6a61ede4
| 16,945
|
py
|
Python
|
port/boards/mpython-classroom-kit/modules/mpython_classroom_kit_driver.py
|
xjiezheng/mpython
|
010a92aa0c0984b9418ca124a3466616c3e6d77e
|
[
"MIT"
] | 6
|
2019-10-02T09:59:28.000Z
|
2020-10-11T07:15:58.000Z
|
port/boards/mpython-classroom-kit/modules/mpython_classroom_kit_driver.py
|
xjiezheng/mpython
|
010a92aa0c0984b9418ca124a3466616c3e6d77e
|
[
"MIT"
] | 5
|
2019-10-08T07:13:08.000Z
|
2019-10-09T04:06:07.000Z
|
port/boards/mpython-classroom-kit/modules/mpython_classroom_kit_driver.py
|
xjiezheng/mpython
|
010a92aa0c0984b9418ca124a3466616c3e6d77e
|
[
"MIT"
] | 2
|
2019-09-11T10:50:12.000Z
|
2020-03-07T21:17:44.000Z
|
# labplus mPython-box library
# MIT license; Copyright (c) 2018 labplus
# mpython-box buildin periphers drivers
# history:
# V1.0 zhaohuijiang
from machine import Pin, UART
import time
import ujson
from time import sleep_ms, sleep_us, sleep
# touchpad
class BS8112A(object):
""" """
def __init__(self, i2c):
self.addr = 80
# config
self._i2c = i2c
self.config = [0xB0, 0x00, 0x00, 0x83, 0xf3, 0x98, 0x0f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x00]
checksum = 0
for i in range(1, 19):
checksum += self.config[i]
checksum &= 0xff
self.config[18] = checksum
# print(self.config[18])
retry = 0
if (retry < 5):
try:
self._i2c.writeto(self.addr, bytearray(self.config), True)
return
except:
retry = retry + 1
else:
raise Exception("bs8112a i2c read/write error!")
# i2c.writeto(self.addr, b'\xB0', False)
# time.sleep_ms(10)
# print(i2c.readfrom(self.addr, 17, True))
# key map:
# value bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0
# bs8112a key Key8 Key7 Key6 Key5 Key4 Key3 Key2 Key1
# mpython key N O H T Y P
def key_value(self):
retry = 0
if (retry < 5):
try:
self._i2c.writeto(self.addr, b'\x08', False)
time.sleep_ms(10)
value = self._i2c.readfrom(self.addr, 1, True)
time.sleep_ms(10)
return value
except:
retry = retry + 1
else:
raise Exception("bs8112a i2c read/write error!")
class Codec_mode():
ES_MODULE_ADC_DAC = 0x00
ES_MODULE_DAC = 0x01
ES_MODULE_ADC = 0x02
class Es8388():
""" """
def __init__(self, i2c, adc_volume=0, dac_volume=0, volume=65):
self._i2c = i2c
self.addr = 16
self.adc_volume = adc_volume
self.dac_volume = dac_volume
self.volume = volume
self.set_voice_mute(1)
retry = 0
if (retry < 5):
try:
# i2c.writeto(self.addr, bytearray([0x19, 0x04])) # ES8388_DACCONTROL3 0x04 mute/0x00 unmute&ramp;DAC unmute and disabled digital volume control soft ramp
# Chip Control and Power Management
self._i2c.writeto(self.addr, bytearray(
[0x01, 0x50])) # ES8388_CONTROL2 0x40?
# ES8388_CHIPPOWER normal all and power up all
self._i2c.writeto(self.addr, bytearray([0x02, 0x00]))
# ES8388_MASTERMODE CODEC IN I2S SLAVE MODE 0x00: slave
self._i2c.writeto(self.addr, bytearray([0x08, 0x00]))
# dac setup
# ES8388_DACPOWER . disable DAC and disable Lout/Rout/1/2
self._i2c.writeto(self.addr, bytearray([0x04, 0xC0]))
# ES8388_CONTROL1. Enfr=0,Play&Record Mode,(0x17-both of mic&paly)
self._i2c.writeto(self.addr, bytearray([0x00, 0x12]))
# ES8388_DACCONTROL1 1a 0x18:16bit iis , 0x00:24
self._i2c.writeto(self.addr, bytearray([0x17, 0x18]))
# ES8388_DACCONTROL2 DACFsMode,SINGLE SPEED; DACFsRatio,256
self._i2c.writeto(self.addr, bytearray([0x18, 0x02]))
# ES8388_DACCONTROL16 0x00 audio on LIN1&RIN1, 0x09 LIN2&RIN2
self._i2c.writeto(self.addr, bytearray([0x26, 0x00]))
# ES8388_DACCONTROL17 only left DAC to left mixer enable 0db
self._i2c.writeto(self.addr, bytearray([0x27, 0x90]))
# ES8388_DACCONTROL20 only right DAC to right mixer enable 0db
self._i2c.writeto(self.addr, bytearray([0x2a, 0x90]))
# ES8388_DACCONTROL21 set internal ADC and DAC use the same LRCK clock, ADC LRCK as internal LRCK
self._i2c.writeto(self.addr, bytearray([0x2b, 0x80]))
# ES8388_DACCONTROL23 vroi=0
self._i2c.writeto(self.addr, bytearray([0x2d, 0x00]))
self.set_adc_dac_volume(
Codec_mode.ES_MODULE_DAC, self.dac_volume, 0) # 0db
# ES8388_DACPOWER 0x3c Enable DAC and Enable Lout/Rout/1/2
self._i2c.writeto(self.addr, bytearray([0x04, 0x3c]))
# adc setup
self._i2c.writeto(self.addr, bytearray(
[0x03, 0xff])) # ES8388_ADCPOWER
# ES8388_ADCCONTROL1 MIC Left and Right channel PGA gain
self._i2c.writeto(self.addr, bytearray([0x09, 0xbb]))
# ES8388_ADCCONTROL2 0x00 LINSEL & RINSEL, LIN1/RIN1 as ADC Input; DSSEL,use one DS Reg11; DSR, LINPUT1-RINPUT1
self._i2c.writeto(self.addr, bytearray([0x0a, 0x00]))
# ES8388_ADCCONTROL3 clock input
self._i2c.writeto(self.addr, bytearray([0x0b, 0x02]))
# ES8388_ADCCONTROL4 Left/Right data, Left/Right justified mode, Bits length 16bit, I2S format 0x0c?
self._i2c.writeto(self.addr, bytearray([0x0c, 0x0c]))
# ES8388_ADCCONTROL5 ADCFsMode,singel SPEED,RATIO=256
self._i2c.writeto(self.addr, bytearray([0x0d, 0x02]))
# ALC for Microphone
self.set_adc_dac_volume(
Codec_mode.ES_MODULE_ADC, self.adc_volume, 0) # 0db
# ES8388_ADCPOWER Power on ADC, Enable LIN&RIN, Power off MICBIAS, set int1lp to low power mode
self._i2c.writeto(self.addr, bytearray([0x03, 0x09]))
# set volume
self.set_volume(self.volume)
self.set_voice_mute(0)
# test
# for i in range(0, 52):
# i2c.writeto(self.addr, bytearray([i]))
# print("%d: %d" % (i, i2c.readfrom(self.addr, 1)[0]))
return
except:
retry = retry + 1
else:
raise Exception("es8388 i2c read/write error!")
def deinit(self):
retry = 0
if (retry < 5):
try:
# ES8388_CHIPPOWER reset and stop es838
self._i2c.writeto(self.addr, bytearray([0x02, 0xff]))
return
except:
retry = retry + 1
else:
raise Exception("bs8112a i2c read/write error!")
def set_adc_dac_volume(self, mode, volume, dot):
_volume = volume
if (_volume < -96):
_volume = -96
else:
_volume = 0
_dot = 0
if dot >= 5:
_dot = 1
_volume = (-_volume << 1) + _dot
retry = 0
if (retry < 5):
try:
if (mode == Codec_mode.ES_MODULE_ADC or mode == Codec_mode.ES_MODULE_ADC_DAC):
self._i2c.writeto(self.addr, bytearray(
[0x10, _volume])) # ES8388_ADCCONTROL8
self._i2c.writeto(self.addr, bytearray(
[0x11, _volume])) # ES8388_ADCCONTROL9
if (mode == Codec_mode.ES_MODULE_DAC or mode == Codec_mode.ES_MODULE_ADC_DAC):
self._i2c.writeto(self.addr, bytearray(
[0x1b, _volume])) # ES8388_DACCONTROL5
self._i2c.writeto(self.addr, bytearray(
[0x1a, _volume])) # ES8388_DACCONTROL4
return
except:
retry = retry + 1
else:
raise Exception("bs8112a i2c read/write error!")
def set_volume(self, volume):
self.volume = volume
if (self.volume < 0):
self.volume = 0
elif (self.volume > 100):
self.volume = 100
retry = 0
if (retry < 5):
try:
self._i2c.writeto(self.addr, bytearray(
[0x2e, self.volume//3])) # ES8388_DACCONTROL24
self._i2c.writeto(self.addr, bytearray(
[0x2f, self.volume//3])) # ES8388_DACCONTROL25
self._i2c.writeto(self.addr, bytearray(
[0x30, 0])) # ES8388_DACCONTROL26
self._i2c.writeto(self.addr, bytearray(
[0x31, 0])) # ES8388_DACCONTROL27
# print("volume L: %d" % (self.volume//3))
return
except:
retry = retry + 1
else:
raise Exception("bs8112a i2c read/write error!")
def get_volume(self):
return self.volume
def set_voice_mute(self, mute):
retry = 0
if (retry < 5):
try:
self._i2c.writeto(self.addr, b'\x19')
dac_ctr3 = self._i2c.readfrom(self.addr, 1)[0]
if(mute):
dac_ctr3 |= 0x04
else:
dac_ctr3 &= 0xFB
self._i2c.writeto(self.addr, bytearray([0x19, dac_ctr3]))
except:
retry = retry + 1
else:
raise Exception("bs8112a i2c read/write error!")
uart2 = UART(2, baudrate=1152000, rx=Pin.P8, tx=Pin.P23,
timeout=50, timeout_char=1024, rxbuf=2048, txbuf=2048)
class K210Error(Exception):
"""K210异常类"""
pass
class blob():
def __init__(self,*args):
self.dict = args[0]
def __repr__(self):
return self.dict
def x(self):
return self.dict['x']
def y(self):
return self.dict['y']
def w(self):
return self.dict['w']
def h(self):
return self.dict['h']
def rect(self):
return(self.dict['x'], self.dict['y'], self.dict['w'], self.dict['h'])
def pixels(self):
return self.dict['pixels']
def cx(self):
return self.dict['cx']
def cy(self):
return self.dict['cy']
def rotation(self):
return self.dict['rotation']
def code(self):
return self.dict['code']
def count(self):
return self.dict['count']
class K210():
def __init__(self):
t1 = time.ticks_ms()
while (time.ticks_diff(time.ticks_ms(), t1) < 10000):
rsp = self.send_cmd({'GET_KEYS': 0}) # 通过发获取按键指令测试K210是否初始化成功
if rsp is not None:
return
raise K210Error("K210 init failed!")
def send_cmd(self, command, wait=True, timeout=200):
json_stream = ujson.dumps(command)
uart2.write(json_stream + '\n')
# print("UART_Send:%s" % (json_stream + '\n'))
t1 = time.ticks_ms()
while wait:
if uart2.any() > 0:
r=None
r = uart2.readline()
r= r.strip()
while uart2.readline():
pass
# print("UART_Recv:%s" % r)
try:
rsp = ujson.loads(r)
except Exception as e:
print(e)
break
else:
if rsp and isinstance(rsp, dict):
for key, value in rsp.items():
if key == 'ERROR':
raise K210Error(value)
if key == 'RESP':
return value
if time.ticks_diff(time.ticks_ms(), t1) > timeout:
# raise K210Error("k210 not respone!")
return None
def get_key(self):
return self.send_cmd({'GET_KEYS': 0})
def get_distance(self):
resp = self.send_cmd({'GET_DISTANCE': 0})
if resp is None:
resp = 340
return resp
def set_cam_led(self, on_off):
return self.send_cmd({'SET_CAM_LED': on_off})
def set_motor(self, speed):
return self.send_cmd({'SET_MOTOR': speed})
def file_open(self, *args):
return self.send_cmd({'FILE_OPEN': args})
def file_read(self, *args):
return self.send_cmd({'FILE_READ': args[0]},timeout=300)
def file_write(self, *args):
return self.send_cmd({'FILE_WRITE': args[0]},timeout=300)
def file_close(self):
return self.send_cmd({'FILE_CLOSE': 0})
def reset(self):
self.send_cmd({'RESET': 0},False)
def select_model(self, *args):
self.send_cmd({'SELE_MOD': args[0]}, timeout=3000)
def load_model(self, **kws):
self.send_cmd({'LOD_MOD': kws}, timeout=3000)
def detect_yolo(self):
return self.send_cmd({'DET_YO': 0})
def predict_net(self):
return self.send_cmd({'PRE_NET': 0})
def deinit_yolo(self):
return self.send_cmd({'DINT_YO': 0})
def deinit_net(self):
return self.send_cmd({'DINT_NET': 0})
def camera_snapshot(self):
return self.send_cmd({'SNAPSHOT': 0})
def camera_reset(self):
return self.send_cmd({'CAM_RST': 0},timeout=3000)
def camera_run(self, *arg):
return self.send_cmd({'CAM_RUN': arg[0]})
def camera_set_pixformat(self, *arg):
return self.send_cmd({'CAM_SET_PF': arg[0]})
def camera_set_contrast(self, *arg):
return self.send_cmd({'CAM_SET_CRA': arg[0]})
def camera_set_brightness(self, *arg):
return self.send_cmd({'CAM_SET_BRG': arg[0]})
def camera_set_saturation(self, *arg):
return self.send_cmd({'CAM_SET_SAT': arg[0]})
def camera_set_auto_gain(self, *arg, **kw):
return self.send_cmd({'CAM_AUTO_GAIN': [arg, kw]})
def camera_set_auto_whitebal(self, *arg):
return self.send_cmd({'CAM_AUTO_WBAL': arg[0]})
def camera_set_windowing(self, *arg):
return self.send_cmd({'CAM_SET_WIN': arg[0]})
def camera_set_hmirror(self, *arg):
return self.send_cmd({'CAM_SET_HM': arg[0]})
def camera_set_vflip(self, *arg):
return self.send_cmd({'CAM_SET_VF': arg[0]})
def camera_skip_frames(self, *arg, **kw):
return self.send_cmd({'CAM_SKIP_FRM': [arg, kw]})
def lcd_init(self, *args, **kws):
return self.send_cmd({'LCD_INT': [args, kws]},timeout=5000)
def lcd_display(self, **kws):
return self.send_cmd({'LCD_DISP': kws})
def lcd_clear(self, **kws):
return self.send_cmd({'LCD_CLR': kws})
def lcd_draw_string(self, *args):
return self.send_cmd({'LCD_STR': args})
def image_load(self, *args, **kws):
self.send_cmd({'IMG_LOD': [args, kws]})
time.sleep_ms(200)
def image_width(self):
return self.send_cmd({'IMG_WID': 0})
def image_hight(self):
return self.send_cmd({'IMG_HIG': 0})
def image_format(self):
return self.send_cmd({'IMG_FRM': 0})
def image_size(self):
return self.send_cmd({'IMG_SIZE': 0})
def image_get_pixel(self, *args, **kws):
return self.send_cmd({'IMG_GET_PIX': [args, kws]})
def image_set_pixel(self, *args, **kws):
self.send_cmd({'IMG_SET_PIX': [args, kws]})
def image_mean_pool(self, *args, **kws):
self.send_cmd({'IMG_MEAN_P': [args, kws]})
def image_to_grayscale(self):
self.send_cmd({'IMG_TO_GRAY': 0})
def image_to_rainbow(self):
self.send_cmd({'IMG_TO_RB': 0})
def image_copy(self, *args, **kws):
self.send_cmd({'IMG_CPY': [args, kws]})
def image_save(self, *args, **kws):
self.send_cmd({'IMG_SAVE': [args, kws]})
time.sleep_ms(200)
def image_clear(self):
self.send_cmd({'IMG_CLR': 0})
def image_draw_line(self, *args, **kws):
self.send_cmd({'IMG_DRW_LN': [args, kws]})
def image_draw_rectangle(self, *args, **kws):
self.send_cmd({'IMG_DRW_RECTANG': [args, kws]})
def image_draw_circle(self, *args, **kws):
self.send_cmd({'IMG_DRW_CIR': [args, kws]})
def image_draw_string(self, *args, **kws):
self.send_cmd({'IMG_DRW_STR': [args, kws]})
def image_draw_cross(self, *args, **kws):
self.send_cmd({'IMG_DRW_CRS': [args, kws]})
def image_draw_arrow(self, *args, **kws):
self.send_cmd({'IMG_DRW_ARR': [args, kws]})
def image_draw_image(self, *args, **kws):
self.send_cmd({'IMG_DRW_IMG': [args, kws]})
def image_binary(self, *args, **kws):
self.send_cmd({'IMG_BINARY': [args, kws]})
def image_invert(self):
self.send_cmd({'IMG_INVERT': 0})
def image_erode(self, *args, **kws):
self.send_cmd({'IMG_ERODE': [args, kws]})
def image_dilate(self, *args, **kws):
self.send_cmd({'IMG_DIL': [args, kws]})
def image_negate(self, *args, **kws):
self.send_cmd({'IMG_NEG': [args, kws]})
def image_mean(self, *args, **kws):
self.send_cmd({'IMG_MEAN': [args, kws]})
def image_mode(self, *args, **kws):
self.send_cmd({'IMG_MODE': [args, kws]})
def image_median(self, *args, **kws):
self.send_cmd({'IMG_MEDIAN': [args, kws]})
def image_midpoint(self, *args, **kws):
self.send_cmd({'IMG_MIDP': [args, kws]})
def image_cartoon(self, *args, **kws):
self.send_cmd({'IMG_CART': [args, kws]})
def image_conv3(self, *args, **kws):
self.send_cmd({'IMG_CONV': [args, kws]})
def image_gaussian(self, *args, **kws):
self.send_cmd({'IMG_GAUS': [args, kws]})
def image_bilateral(self, *args, **kws):
self.send_cmd({'IMG_BIL': [args, kws]})
def image_linpolar(self, *args, **kws):
self.send_cmd({'IMG_LINP': [args, kws]})
def image_logpolar(self, *args, **kws):
self.send_cmd({'IMG_LOGP': [args, kws]})
def image_rotation_corr(self, *args, **kws):
self.send_cmd({'IMG_ROT_COR': [args, kws]})
def image_find_blobs(self, *args, **kws):
return [blob(i) for i in self.send_cmd({'IMG_FID_BLOB': [args, kws]})]
| 31.851504
| 167
| 0.580525
|
from machine import Pin, UART
import time
import ujson
from time import sleep_ms, sleep_us, sleep
class BS8112A(object):
def __init__(self, i2c):
self.addr = 80
self._i2c = i2c
self.config = [0xB0, 0x00, 0x00, 0x83, 0xf3, 0x98, 0x0f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x00]
checksum = 0
for i in range(1, 19):
checksum += self.config[i]
checksum &= 0xff
self.config[18] = checksum
retry = 0
if (retry < 5):
try:
self._i2c.writeto(self.addr, bytearray(self.config), True)
return
except:
retry = retry + 1
else:
raise Exception("bs8112a i2c read/write error!")
def key_value(self):
retry = 0
if (retry < 5):
try:
self._i2c.writeto(self.addr, b'\x08', False)
time.sleep_ms(10)
value = self._i2c.readfrom(self.addr, 1, True)
time.sleep_ms(10)
return value
except:
retry = retry + 1
else:
raise Exception("bs8112a i2c read/write error!")
class Codec_mode():
ES_MODULE_ADC_DAC = 0x00
ES_MODULE_DAC = 0x01
ES_MODULE_ADC = 0x02
class Es8388():
def __init__(self, i2c, adc_volume=0, dac_volume=0, volume=65):
self._i2c = i2c
self.addr = 16
self.adc_volume = adc_volume
self.dac_volume = dac_volume
self.volume = volume
self.set_voice_mute(1)
retry = 0
if (retry < 5):
try:
self._i2c.writeto(self.addr, bytearray(
[0x01, 0x50])) self._i2c.writeto(self.addr, bytearray([0x02, 0x00]))
self._i2c.writeto(self.addr, bytearray([0x08, 0x00]))
self._i2c.writeto(self.addr, bytearray([0x04, 0xC0]))
self._i2c.writeto(self.addr, bytearray([0x00, 0x12]))
self._i2c.writeto(self.addr, bytearray([0x17, 0x18]))
self._i2c.writeto(self.addr, bytearray([0x18, 0x02]))
self._i2c.writeto(self.addr, bytearray([0x26, 0x00]))
self._i2c.writeto(self.addr, bytearray([0x27, 0x90]))
self._i2c.writeto(self.addr, bytearray([0x2a, 0x90]))
self._i2c.writeto(self.addr, bytearray([0x2b, 0x80]))
self._i2c.writeto(self.addr, bytearray([0x2d, 0x00]))
self.set_adc_dac_volume(
Codec_mode.ES_MODULE_DAC, self.dac_volume, 0) self._i2c.writeto(self.addr, bytearray([0x04, 0x3c]))
self._i2c.writeto(self.addr, bytearray(
[0x03, 0xff])) self._i2c.writeto(self.addr, bytearray([0x09, 0xbb]))
self._i2c.writeto(self.addr, bytearray([0x0a, 0x00]))
self._i2c.writeto(self.addr, bytearray([0x0b, 0x02]))
self._i2c.writeto(self.addr, bytearray([0x0c, 0x0c]))
self._i2c.writeto(self.addr, bytearray([0x0d, 0x02]))
self.set_adc_dac_volume(
Codec_mode.ES_MODULE_ADC, self.adc_volume, 0) self._i2c.writeto(self.addr, bytearray([0x03, 0x09]))
self.set_volume(self.volume)
self.set_voice_mute(0)
return
except:
retry = retry + 1
else:
raise Exception("es8388 i2c read/write error!")
def deinit(self):
retry = 0
if (retry < 5):
try:
self._i2c.writeto(self.addr, bytearray([0x02, 0xff]))
return
except:
retry = retry + 1
else:
raise Exception("bs8112a i2c read/write error!")
def set_adc_dac_volume(self, mode, volume, dot):
_volume = volume
if (_volume < -96):
_volume = -96
else:
_volume = 0
_dot = 0
if dot >= 5:
_dot = 1
_volume = (-_volume << 1) + _dot
retry = 0
if (retry < 5):
try:
if (mode == Codec_mode.ES_MODULE_ADC or mode == Codec_mode.ES_MODULE_ADC_DAC):
self._i2c.writeto(self.addr, bytearray(
[0x10, _volume])) self._i2c.writeto(self.addr, bytearray(
[0x11, _volume])) if (mode == Codec_mode.ES_MODULE_DAC or mode == Codec_mode.ES_MODULE_ADC_DAC):
self._i2c.writeto(self.addr, bytearray(
[0x1b, _volume])) self._i2c.writeto(self.addr, bytearray(
[0x1a, _volume])) return
except:
retry = retry + 1
else:
raise Exception("bs8112a i2c read/write error!")
def set_volume(self, volume):
self.volume = volume
if (self.volume < 0):
self.volume = 0
elif (self.volume > 100):
self.volume = 100
retry = 0
if (retry < 5):
try:
self._i2c.writeto(self.addr, bytearray(
[0x2e, self.volume//3])) self._i2c.writeto(self.addr, bytearray(
[0x2f, self.volume//3])) self._i2c.writeto(self.addr, bytearray(
[0x30, 0])) self._i2c.writeto(self.addr, bytearray(
[0x31, 0])) return
except:
retry = retry + 1
else:
raise Exception("bs8112a i2c read/write error!")
def get_volume(self):
return self.volume
def set_voice_mute(self, mute):
retry = 0
if (retry < 5):
try:
self._i2c.writeto(self.addr, b'\x19')
dac_ctr3 = self._i2c.readfrom(self.addr, 1)[0]
if(mute):
dac_ctr3 |= 0x04
else:
dac_ctr3 &= 0xFB
self._i2c.writeto(self.addr, bytearray([0x19, dac_ctr3]))
except:
retry = retry + 1
else:
raise Exception("bs8112a i2c read/write error!")
uart2 = UART(2, baudrate=1152000, rx=Pin.P8, tx=Pin.P23,
timeout=50, timeout_char=1024, rxbuf=2048, txbuf=2048)
class K210Error(Exception):
pass
class blob():
def __init__(self,*args):
self.dict = args[0]
def __repr__(self):
return self.dict
def x(self):
return self.dict['x']
def y(self):
return self.dict['y']
def w(self):
return self.dict['w']
def h(self):
return self.dict['h']
def rect(self):
return(self.dict['x'], self.dict['y'], self.dict['w'], self.dict['h'])
def pixels(self):
return self.dict['pixels']
def cx(self):
return self.dict['cx']
def cy(self):
return self.dict['cy']
def rotation(self):
return self.dict['rotation']
def code(self):
return self.dict['code']
def count(self):
return self.dict['count']
class K210():
def __init__(self):
t1 = time.ticks_ms()
while (time.ticks_diff(time.ticks_ms(), t1) < 10000):
rsp = self.send_cmd({'GET_KEYS': 0}) if rsp is not None:
return
raise K210Error("K210 init failed!")
def send_cmd(self, command, wait=True, timeout=200):
json_stream = ujson.dumps(command)
uart2.write(json_stream + '\n')
t1 = time.ticks_ms()
while wait:
if uart2.any() > 0:
r=None
r = uart2.readline()
r= r.strip()
while uart2.readline():
pass
try:
rsp = ujson.loads(r)
except Exception as e:
print(e)
break
else:
if rsp and isinstance(rsp, dict):
for key, value in rsp.items():
if key == 'ERROR':
raise K210Error(value)
if key == 'RESP':
return value
if time.ticks_diff(time.ticks_ms(), t1) > timeout:
return None
def get_key(self):
return self.send_cmd({'GET_KEYS': 0})
def get_distance(self):
resp = self.send_cmd({'GET_DISTANCE': 0})
if resp is None:
resp = 340
return resp
def set_cam_led(self, on_off):
return self.send_cmd({'SET_CAM_LED': on_off})
def set_motor(self, speed):
return self.send_cmd({'SET_MOTOR': speed})
def file_open(self, *args):
return self.send_cmd({'FILE_OPEN': args})
def file_read(self, *args):
return self.send_cmd({'FILE_READ': args[0]},timeout=300)
def file_write(self, *args):
return self.send_cmd({'FILE_WRITE': args[0]},timeout=300)
def file_close(self):
return self.send_cmd({'FILE_CLOSE': 0})
def reset(self):
self.send_cmd({'RESET': 0},False)
def select_model(self, *args):
self.send_cmd({'SELE_MOD': args[0]}, timeout=3000)
def load_model(self, **kws):
self.send_cmd({'LOD_MOD': kws}, timeout=3000)
def detect_yolo(self):
return self.send_cmd({'DET_YO': 0})
def predict_net(self):
return self.send_cmd({'PRE_NET': 0})
def deinit_yolo(self):
return self.send_cmd({'DINT_YO': 0})
def deinit_net(self):
return self.send_cmd({'DINT_NET': 0})
def camera_snapshot(self):
return self.send_cmd({'SNAPSHOT': 0})
def camera_reset(self):
return self.send_cmd({'CAM_RST': 0},timeout=3000)
def camera_run(self, *arg):
return self.send_cmd({'CAM_RUN': arg[0]})
def camera_set_pixformat(self, *arg):
return self.send_cmd({'CAM_SET_PF': arg[0]})
def camera_set_contrast(self, *arg):
return self.send_cmd({'CAM_SET_CRA': arg[0]})
def camera_set_brightness(self, *arg):
return self.send_cmd({'CAM_SET_BRG': arg[0]})
def camera_set_saturation(self, *arg):
return self.send_cmd({'CAM_SET_SAT': arg[0]})
def camera_set_auto_gain(self, *arg, **kw):
return self.send_cmd({'CAM_AUTO_GAIN': [arg, kw]})
def camera_set_auto_whitebal(self, *arg):
return self.send_cmd({'CAM_AUTO_WBAL': arg[0]})
def camera_set_windowing(self, *arg):
return self.send_cmd({'CAM_SET_WIN': arg[0]})
def camera_set_hmirror(self, *arg):
return self.send_cmd({'CAM_SET_HM': arg[0]})
def camera_set_vflip(self, *arg):
return self.send_cmd({'CAM_SET_VF': arg[0]})
def camera_skip_frames(self, *arg, **kw):
return self.send_cmd({'CAM_SKIP_FRM': [arg, kw]})
def lcd_init(self, *args, **kws):
return self.send_cmd({'LCD_INT': [args, kws]},timeout=5000)
def lcd_display(self, **kws):
return self.send_cmd({'LCD_DISP': kws})
def lcd_clear(self, **kws):
return self.send_cmd({'LCD_CLR': kws})
def lcd_draw_string(self, *args):
return self.send_cmd({'LCD_STR': args})
def image_load(self, *args, **kws):
self.send_cmd({'IMG_LOD': [args, kws]})
time.sleep_ms(200)
def image_width(self):
return self.send_cmd({'IMG_WID': 0})
def image_hight(self):
return self.send_cmd({'IMG_HIG': 0})
def image_format(self):
return self.send_cmd({'IMG_FRM': 0})
def image_size(self):
return self.send_cmd({'IMG_SIZE': 0})
def image_get_pixel(self, *args, **kws):
return self.send_cmd({'IMG_GET_PIX': [args, kws]})
def image_set_pixel(self, *args, **kws):
self.send_cmd({'IMG_SET_PIX': [args, kws]})
def image_mean_pool(self, *args, **kws):
self.send_cmd({'IMG_MEAN_P': [args, kws]})
def image_to_grayscale(self):
self.send_cmd({'IMG_TO_GRAY': 0})
def image_to_rainbow(self):
self.send_cmd({'IMG_TO_RB': 0})
def image_copy(self, *args, **kws):
self.send_cmd({'IMG_CPY': [args, kws]})
def image_save(self, *args, **kws):
self.send_cmd({'IMG_SAVE': [args, kws]})
time.sleep_ms(200)
def image_clear(self):
self.send_cmd({'IMG_CLR': 0})
def image_draw_line(self, *args, **kws):
self.send_cmd({'IMG_DRW_LN': [args, kws]})
def image_draw_rectangle(self, *args, **kws):
self.send_cmd({'IMG_DRW_RECTANG': [args, kws]})
def image_draw_circle(self, *args, **kws):
self.send_cmd({'IMG_DRW_CIR': [args, kws]})
def image_draw_string(self, *args, **kws):
self.send_cmd({'IMG_DRW_STR': [args, kws]})
def image_draw_cross(self, *args, **kws):
self.send_cmd({'IMG_DRW_CRS': [args, kws]})
def image_draw_arrow(self, *args, **kws):
self.send_cmd({'IMG_DRW_ARR': [args, kws]})
def image_draw_image(self, *args, **kws):
self.send_cmd({'IMG_DRW_IMG': [args, kws]})
def image_binary(self, *args, **kws):
self.send_cmd({'IMG_BINARY': [args, kws]})
def image_invert(self):
self.send_cmd({'IMG_INVERT': 0})
def image_erode(self, *args, **kws):
self.send_cmd({'IMG_ERODE': [args, kws]})
def image_dilate(self, *args, **kws):
self.send_cmd({'IMG_DIL': [args, kws]})
def image_negate(self, *args, **kws):
self.send_cmd({'IMG_NEG': [args, kws]})
def image_mean(self, *args, **kws):
self.send_cmd({'IMG_MEAN': [args, kws]})
def image_mode(self, *args, **kws):
self.send_cmd({'IMG_MODE': [args, kws]})
def image_median(self, *args, **kws):
self.send_cmd({'IMG_MEDIAN': [args, kws]})
def image_midpoint(self, *args, **kws):
self.send_cmd({'IMG_MIDP': [args, kws]})
def image_cartoon(self, *args, **kws):
self.send_cmd({'IMG_CART': [args, kws]})
def image_conv3(self, *args, **kws):
self.send_cmd({'IMG_CONV': [args, kws]})
def image_gaussian(self, *args, **kws):
self.send_cmd({'IMG_GAUS': [args, kws]})
def image_bilateral(self, *args, **kws):
self.send_cmd({'IMG_BIL': [args, kws]})
def image_linpolar(self, *args, **kws):
self.send_cmd({'IMG_LINP': [args, kws]})
def image_logpolar(self, *args, **kws):
self.send_cmd({'IMG_LOGP': [args, kws]})
def image_rotation_corr(self, *args, **kws):
self.send_cmd({'IMG_ROT_COR': [args, kws]})
def image_find_blobs(self, *args, **kws):
return [blob(i) for i in self.send_cmd({'IMG_FID_BLOB': [args, kws]})]
| true
| true
|
f702d1ee5d85bb2a40fcf9e18f5769b34c6eb104
| 229
|
py
|
Python
|
examples/fortran/run.py
|
pyflosic/fodMC
|
93259b527d39cc02dcded0c42f89a73ba16851d1
|
[
"Apache-2.0"
] | 5
|
2019-06-24T08:03:58.000Z
|
2021-04-13T14:54:50.000Z
|
examples/fortran/run.py
|
pyflosic/fodMC
|
93259b527d39cc02dcded0c42f89a73ba16851d1
|
[
"Apache-2.0"
] | 15
|
2019-05-20T10:40:46.000Z
|
2021-07-20T16:40:25.000Z
|
examples/fortran/run.py
|
pyflosic/fodMC
|
93259b527d39cc02dcded0c42f89a73ba16851d1
|
[
"Apache-2.0"
] | 2
|
2019-09-28T12:40:59.000Z
|
2021-07-20T15:06:11.000Z
|
import fodmc
# output_mode: PyFLOSIC, NRLMOL
# output_name: NameOfMolecule.xyz (for PyFLOSIC only)
output_mode = ['NRLMOL','PyFLOSIC'][1]
output_name = ['', 'test.xyz'][1]
fodmc.fodmc_mod.get_guess(output_mode,output_name)
| 32.714286
| 54
| 0.737991
|
import fodmc
output_mode = ['NRLMOL','PyFLOSIC'][1]
output_name = ['', 'test.xyz'][1]
fodmc.fodmc_mod.get_guess(output_mode,output_name)
| true
| true
|
f702d2430cad6f9017bafe76fa53486b73f6bf16
| 5,816
|
py
|
Python
|
tests/gcp/sensors/test_cloud_storage_transfer_service.py
|
ktmud/incubator-airflow
|
43154c643c3c598c769d645891f2e8e123f8bdde
|
[
"Apache-2.0"
] | 5
|
2020-07-17T07:33:58.000Z
|
2022-03-02T06:23:47.000Z
|
tests/gcp/sensors/test_cloud_storage_transfer_service.py
|
ktmud/incubator-airflow
|
43154c643c3c598c769d645891f2e8e123f8bdde
|
[
"Apache-2.0"
] | 7
|
2020-06-03T14:55:17.000Z
|
2021-12-30T00:01:50.000Z
|
tests/gcp/sensors/test_cloud_storage_transfer_service.py
|
ktmud/incubator-airflow
|
43154c643c3c598c769d645891f2e8e123f8bdde
|
[
"Apache-2.0"
] | 12
|
2020-01-09T14:02:39.000Z
|
2022-01-24T07:18:51.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import mock
from parameterized import parameterized
from airflow.gcp.hooks.cloud_storage_transfer_service import GcpTransferOperationStatus
from airflow.gcp.sensors.cloud_storage_transfer_service import CloudDataTransferServiceJobStatusSensor
class TestGcpStorageTransferOperationWaitForJobStatusSensor(unittest.TestCase):
@mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook')
def test_wait_for_status_success(self, mock_tool):
operations = [{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}]
mock_tool.return_value.list_transfer_operations.return_value = operations
mock_tool.operations_contain_expected_statuses.return_value = True
op = CloudDataTransferServiceJobStatusSensor(
task_id='task-id',
job_name='job-name',
project_id='project-id',
expected_statuses=GcpTransferOperationStatus.SUCCESS,
)
context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))}
result = op.poke(context)
mock_tool.return_value.list_transfer_operations.assert_called_once_with(
request_filter={'project_id': 'project-id', 'job_names': ['job-name']}
)
mock_tool.operations_contain_expected_statuses.assert_called_once_with(
operations=operations, expected_statuses={GcpTransferOperationStatus.SUCCESS}
)
self.assertTrue(result)
@mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook')
def test_wait_for_status_success_default_expected_status(self, mock_tool):
op = CloudDataTransferServiceJobStatusSensor(
task_id='task-id',
job_name='job-name',
project_id='project-id',
expected_statuses=GcpTransferOperationStatus.SUCCESS,
)
context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))}
result = op.poke(context)
mock_tool.operations_contain_expected_statuses.assert_called_once_with(
operations=mock.ANY, expected_statuses={GcpTransferOperationStatus.SUCCESS}
)
self.assertTrue(result)
@mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook')
def test_wait_for_status_after_retry(self, mock_tool):
operations_set = [
[{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}],
[{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}],
]
mock_tool.return_value.list_transfer_operations.side_effect = operations_set
mock_tool.operations_contain_expected_statuses.side_effect = [False, True]
op = CloudDataTransferServiceJobStatusSensor(
task_id='task-id',
job_name='job-name',
project_id='project-id',
expected_statuses=GcpTransferOperationStatus.SUCCESS,
)
context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))}
result = op.poke(context)
self.assertFalse(result)
mock_tool.operations_contain_expected_statuses.assert_called_once_with(
operations=operations_set[0], expected_statuses={GcpTransferOperationStatus.SUCCESS}
)
mock_tool.operations_contain_expected_statuses.reset_mock()
result = op.poke(context)
self.assertTrue(result)
mock_tool.operations_contain_expected_statuses.assert_called_once_with(
operations=operations_set[1], expected_statuses={GcpTransferOperationStatus.SUCCESS}
)
@parameterized.expand(
[
(GcpTransferOperationStatus.SUCCESS, {GcpTransferOperationStatus.SUCCESS}),
({GcpTransferOperationStatus.SUCCESS}, {GcpTransferOperationStatus.SUCCESS}),
(
{GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.SUCCESS},
{GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.SUCCESS},
),
]
)
@mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook')
def test_wait_for_status_normalize_status(self, expected_status, received_status, mock_tool):
operations = [{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}]
mock_tool.return_value.list_transfer_operations.return_value = operations
mock_tool.operations_contain_expected_statuses.side_effect = [False, True]
op = CloudDataTransferServiceJobStatusSensor(
task_id='task-id',
job_name='job-name',
project_id='project-id',
expected_statuses=expected_status,
)
context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))}
result = op.poke(context)
self.assertFalse(result)
mock_tool.operations_contain_expected_statuses.assert_called_once_with(
operations=operations, expected_statuses=received_status
)
| 42.144928
| 102
| 0.714752
|
import unittest
import mock
from parameterized import parameterized
from airflow.gcp.hooks.cloud_storage_transfer_service import GcpTransferOperationStatus
from airflow.gcp.sensors.cloud_storage_transfer_service import CloudDataTransferServiceJobStatusSensor
class TestGcpStorageTransferOperationWaitForJobStatusSensor(unittest.TestCase):
@mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook')
def test_wait_for_status_success(self, mock_tool):
operations = [{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}]
mock_tool.return_value.list_transfer_operations.return_value = operations
mock_tool.operations_contain_expected_statuses.return_value = True
op = CloudDataTransferServiceJobStatusSensor(
task_id='task-id',
job_name='job-name',
project_id='project-id',
expected_statuses=GcpTransferOperationStatus.SUCCESS,
)
context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))}
result = op.poke(context)
mock_tool.return_value.list_transfer_operations.assert_called_once_with(
request_filter={'project_id': 'project-id', 'job_names': ['job-name']}
)
mock_tool.operations_contain_expected_statuses.assert_called_once_with(
operations=operations, expected_statuses={GcpTransferOperationStatus.SUCCESS}
)
self.assertTrue(result)
@mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook')
def test_wait_for_status_success_default_expected_status(self, mock_tool):
op = CloudDataTransferServiceJobStatusSensor(
task_id='task-id',
job_name='job-name',
project_id='project-id',
expected_statuses=GcpTransferOperationStatus.SUCCESS,
)
context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))}
result = op.poke(context)
mock_tool.operations_contain_expected_statuses.assert_called_once_with(
operations=mock.ANY, expected_statuses={GcpTransferOperationStatus.SUCCESS}
)
self.assertTrue(result)
@mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook')
def test_wait_for_status_after_retry(self, mock_tool):
operations_set = [
[{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}],
[{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}],
]
mock_tool.return_value.list_transfer_operations.side_effect = operations_set
mock_tool.operations_contain_expected_statuses.side_effect = [False, True]
op = CloudDataTransferServiceJobStatusSensor(
task_id='task-id',
job_name='job-name',
project_id='project-id',
expected_statuses=GcpTransferOperationStatus.SUCCESS,
)
context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))}
result = op.poke(context)
self.assertFalse(result)
mock_tool.operations_contain_expected_statuses.assert_called_once_with(
operations=operations_set[0], expected_statuses={GcpTransferOperationStatus.SUCCESS}
)
mock_tool.operations_contain_expected_statuses.reset_mock()
result = op.poke(context)
self.assertTrue(result)
mock_tool.operations_contain_expected_statuses.assert_called_once_with(
operations=operations_set[1], expected_statuses={GcpTransferOperationStatus.SUCCESS}
)
@parameterized.expand(
[
(GcpTransferOperationStatus.SUCCESS, {GcpTransferOperationStatus.SUCCESS}),
({GcpTransferOperationStatus.SUCCESS}, {GcpTransferOperationStatus.SUCCESS}),
(
{GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.SUCCESS},
{GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.SUCCESS},
),
]
)
@mock.patch('airflow.gcp.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook')
def test_wait_for_status_normalize_status(self, expected_status, received_status, mock_tool):
operations = [{'metadata': {'status': GcpTransferOperationStatus.SUCCESS}}]
mock_tool.return_value.list_transfer_operations.return_value = operations
mock_tool.operations_contain_expected_statuses.side_effect = [False, True]
op = CloudDataTransferServiceJobStatusSensor(
task_id='task-id',
job_name='job-name',
project_id='project-id',
expected_statuses=expected_status,
)
context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))}
result = op.poke(context)
self.assertFalse(result)
mock_tool.operations_contain_expected_statuses.assert_called_once_with(
operations=operations, expected_statuses=received_status
)
| true
| true
|
f702d248ad2a10ba4e875fcb3f658bb636bddf06
| 455
|
py
|
Python
|
maps/migrations/0012_auto_20201019_2139.py
|
naveennvrgup/smart-traffic-light
|
1c4d050314d8dc42ebf11491b3421c511e2718f3
|
[
"MIT"
] | null | null | null |
maps/migrations/0012_auto_20201019_2139.py
|
naveennvrgup/smart-traffic-light
|
1c4d050314d8dc42ebf11491b3421c511e2718f3
|
[
"MIT"
] | null | null | null |
maps/migrations/0012_auto_20201019_2139.py
|
naveennvrgup/smart-traffic-light
|
1c4d050314d8dc42ebf11491b3421c511e2718f3
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-10-19 16:09
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('maps', '0011_auto_20201019_1839'),
]
operations = [
migrations.AlterField(
model_name='trafficsignal',
name='timer',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 19, 21, 39, 12, 862273)),
),
]
| 22.75
| 100
| 0.621978
|
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('maps', '0011_auto_20201019_1839'),
]
operations = [
migrations.AlterField(
model_name='trafficsignal',
name='timer',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 19, 21, 39, 12, 862273)),
),
]
| true
| true
|
f702d2497d469161032517772b4500ad115d0b1e
| 670
|
py
|
Python
|
kekangpai/band/migrations/0003_auto_20160725_1313.py
|
returnToZ/BandHelper
|
ce8ca3094c6cc4b05b213766710ba6263f41705d
|
[
"Apache-2.0"
] | null | null | null |
kekangpai/band/migrations/0003_auto_20160725_1313.py
|
returnToZ/BandHelper
|
ce8ca3094c6cc4b05b213766710ba6263f41705d
|
[
"Apache-2.0"
] | null | null | null |
kekangpai/band/migrations/0003_auto_20160725_1313.py
|
returnToZ/BandHelper
|
ce8ca3094c6cc4b05b213766710ba6263f41705d
|
[
"Apache-2.0"
] | 1
|
2021-12-15T02:31:09.000Z
|
2021-12-15T02:31:09.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-07-25 13:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('band', '0002_auto_20160725_1313'),
]
operations = [
migrations.RemoveField(
model_name='personal',
name='id',
),
migrations.AlterField(
model_name='personal',
name='username',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='band.Account'),
),
]
| 25.769231
| 135
| 0.626866
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('band', '0002_auto_20160725_1313'),
]
operations = [
migrations.RemoveField(
model_name='personal',
name='id',
),
migrations.AlterField(
model_name='personal',
name='username',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='band.Account'),
),
]
| true
| true
|
f702d3674b7ec06859bfdd3f9e087d7eefda8de1
| 191,102
|
py
|
Python
|
imgaug/augmenters/size.py
|
fchouteau/imgaug
|
b282b97c13a27a32f91c2e2666db1e128e00cfde
|
[
"MIT"
] | 1
|
2020-02-26T01:05:12.000Z
|
2020-02-26T01:05:12.000Z
|
imgaug/augmenters/size.py
|
youbin2014/imgaug
|
b282b97c13a27a32f91c2e2666db1e128e00cfde
|
[
"MIT"
] | null | null | null |
imgaug/augmenters/size.py
|
youbin2014/imgaug
|
b282b97c13a27a32f91c2e2666db1e128e00cfde
|
[
"MIT"
] | null | null | null |
"""
Augmenters that somehow change the size of the images.
List of augmenters:
* :class:`Resize`
* :class:`CropAndPad`
* :class:`Crop`
* :class:`Pad`
* :class:`PadToFixedSize`
* :class:`CenterPadToFixedSize`
* :class:`CropToFixedSize`
* :class:`CenterCropToFixedSize`
* :class:`CropToMultiplesOf`
* :class:`CenterCropToMultiplesOf`
* :class:`PadToMultiplesOf`
* :class:`CenterPadToMultiplesOf`
* :class:`CropToPowersOf`
* :class:`CenterCropToPowersOf`
* :class:`PadToPowersOf`
* :class:`CenterPadToPowersOf`
* :class:`CropToAspectRatio`
* :class:`CenterCropToAspectRatio`
* :class:`PadToAspectRatio`
* :class:`CenterPadToAspectRatio`
* :class:`CropToSquare`
* :class:`CenterCropToSquare`
* :class:`PadToSquare`
* :class:`CenterPadToSquare`
* :class:`KeepSizeByResize`
"""
from __future__ import print_function, division, absolute_import
import re
import functools
import numpy as np
import cv2
import imgaug as ia
from imgaug.imgaug import _normalize_cv2_input_arr_
from . import meta
from .. import parameters as iap
def _crop_trbl_to_xyxy(shape, top, right, bottom, left, prevent_zero_size=True):
if prevent_zero_size:
top, right, bottom, left = _crop_prevent_zero_size(
shape[0], shape[1], top, right, bottom, left)
height, width = shape[0:2]
x1 = left
x2 = width - right
y1 = top
y2 = height - bottom
# these steps prevent negative sizes
# if x2==x1 or y2==y1 then the output arr has size 0 for the respective axis
# note that if height/width of arr is zero, then y2==y1 or x2==x1, which
# is still valid, even if height/width is zero and results in a zero-sized
# axis
x2 = max(x2, x1)
y2 = max(y2, y1)
return x1, y1, x2, y2
def _crop_arr_(arr, top, right, bottom, left, prevent_zero_size=True):
x1, y1, x2, y2 = _crop_trbl_to_xyxy(arr.shape, top, right, bottom, left,
prevent_zero_size=prevent_zero_size)
return arr[y1:y2, x1:x2, ...]
def _crop_and_pad_arr(arr, croppings, paddings, pad_mode="constant",
pad_cval=0, keep_size=False):
height, width = arr.shape[0:2]
image_cr = _crop_arr_(arr, *croppings)
image_cr_pa = pad(
image_cr,
top=paddings[0], right=paddings[1],
bottom=paddings[2], left=paddings[3],
mode=pad_mode, cval=pad_cval)
if keep_size:
image_cr_pa = ia.imresize_single_image(image_cr_pa, (height, width))
return image_cr_pa
def _crop_and_pad_heatmap_(heatmap, croppings_img, paddings_img,
pad_mode="constant", pad_cval=0.0, keep_size=False):
return _crop_and_pad_hms_or_segmaps_(heatmap, croppings_img,
paddings_img, pad_mode, pad_cval,
keep_size)
def _crop_and_pad_segmap_(segmap, croppings_img, paddings_img,
pad_mode="constant", pad_cval=0, keep_size=False):
return _crop_and_pad_hms_or_segmaps_(segmap, croppings_img,
paddings_img, pad_mode, pad_cval,
keep_size)
def _crop_and_pad_hms_or_segmaps_(augmentable, croppings_img,
paddings_img, pad_mode="constant",
pad_cval=None, keep_size=False):
if isinstance(augmentable, ia.HeatmapsOnImage):
arr_attr_name = "arr_0to1"
pad_cval = pad_cval if pad_cval is not None else 0.0
else:
assert isinstance(augmentable, ia.SegmentationMapsOnImage), (
"Expected HeatmapsOnImage or SegmentationMapsOnImage, got %s." % (
type(augmentable)))
arr_attr_name = "arr"
pad_cval = pad_cval if pad_cval is not None else 0
arr = getattr(augmentable, arr_attr_name)
arr_shape_orig = arr.shape
augm_shape = augmentable.shape
croppings_proj = _project_size_changes(croppings_img, augm_shape, arr.shape)
paddings_proj = _project_size_changes(paddings_img, augm_shape, arr.shape)
croppings_proj = _crop_prevent_zero_size(arr.shape[0], arr.shape[1],
*croppings_proj)
arr_cr = _crop_arr_(arr,
croppings_proj[0], croppings_proj[1],
croppings_proj[2], croppings_proj[3])
arr_cr_pa = pad(
arr_cr,
top=paddings_proj[0], right=paddings_proj[1],
bottom=paddings_proj[2], left=paddings_proj[3],
mode=pad_mode,
cval=pad_cval)
setattr(augmentable, arr_attr_name, arr_cr_pa)
if keep_size:
augmentable = augmentable.resize(arr_shape_orig[0:2])
else:
augmentable.shape = _compute_shape_after_crop_and_pad(
augmentable.shape, croppings_img, paddings_img)
return augmentable
def _crop_and_pad_kpsoi_(kpsoi, croppings_img, paddings_img, keep_size):
# using the trbl function instead of croppings_img has the advantage
# of incorporating prevent_zero_size, dealing with zero-sized input image
# axis and dealing the negative crop amounts
x1, y1, _x2, _y2 = _crop_trbl_to_xyxy(kpsoi.shape, *croppings_img)
crop_left = x1
crop_top = y1
shape_orig = kpsoi.shape
shifted = kpsoi.shift_(
x=-crop_left+paddings_img[3],
y=-crop_top+paddings_img[0])
shifted.shape = _compute_shape_after_crop_and_pad(
shape_orig, croppings_img, paddings_img)
if keep_size:
shifted = shifted.on_(shape_orig)
return shifted
def _compute_shape_after_crop_and_pad(old_shape, croppings, paddings):
x1, y1, x2, y2 = _crop_trbl_to_xyxy(old_shape, *croppings)
new_shape = list(old_shape)
new_shape[0] = y2 - y1 + paddings[0] + paddings[2]
new_shape[1] = x2 - x1 + paddings[1] + paddings[3]
return tuple(new_shape)
def _crop_prevent_zero_size(height, width, crop_top, crop_right, crop_bottom,
crop_left):
remaining_height = height - (crop_top + crop_bottom)
remaining_width = width - (crop_left + crop_right)
if remaining_height < 1:
regain = abs(remaining_height) + 1
regain_top = regain // 2
regain_bottom = regain // 2
if regain_top + regain_bottom < regain:
regain_top += 1
if regain_top > crop_top:
diff = regain_top - crop_top
regain_top = crop_top
regain_bottom += diff
elif regain_bottom > crop_bottom:
diff = regain_bottom - crop_bottom
regain_bottom = crop_bottom
regain_top += diff
crop_top = crop_top - regain_top
crop_bottom = crop_bottom - regain_bottom
if remaining_width < 1:
regain = abs(remaining_width) + 1
regain_right = regain // 2
regain_left = regain // 2
if regain_right + regain_left < regain:
regain_right += 1
if regain_right > crop_right:
diff = regain_right - crop_right
regain_right = crop_right
regain_left += diff
elif regain_left > crop_left:
diff = regain_left - crop_left
regain_left = crop_left
regain_right += diff
crop_right = crop_right - regain_right
crop_left = crop_left - regain_left
return (
max(crop_top, 0), max(crop_right, 0), max(crop_bottom, 0),
max(crop_left, 0))
def _project_size_changes(trbl, from_shape, to_shape):
if from_shape[0:2] == to_shape[0:2]:
return trbl
height_to = to_shape[0]
width_to = to_shape[1]
height_from = from_shape[0]
width_from = from_shape[1]
top = trbl[0]
right = trbl[1]
bottom = trbl[2]
left = trbl[3]
# Adding/subtracting 1e-4 here helps for the case where a heatmap/segmap
# is exactly half the size of an image and the size change on an axis is
# an odd value. Then the projected value would end up being <something>.5
# and the rounding would always round up to the next integer. If both
# sides then have the same change, they are both rounded up, resulting
# in more change than expected.
# E.g. image height is 8, map height is 4, change is 3 at the top and 3 at
# the bottom. The changes are projected to 4*(3/8) = 1.5 and both rounded
# up to 2.0. Hence, the maps are changed by 4 (100% of the map height,
# vs. 6 for images, which is 75% of the image height).
top = _int_r(height_to * (top/height_from) - 1e-4)
right = _int_r(width_to * (right/width_from) + 1e-4)
bottom = _int_r(height_to * (bottom/height_from) + 1e-4)
left = _int_r(width_to * (left/width_from) - 1e-4)
return top, right, bottom, left
def _int_r(value):
return int(np.round(value))
# TODO somehow integrate this with pad()
def _handle_pad_mode_param(pad_mode):
pad_modes_available = {
"constant", "edge", "linear_ramp", "maximum", "mean", "median",
"minimum", "reflect", "symmetric", "wrap"}
if pad_mode == ia.ALL:
return iap.Choice(list(pad_modes_available))
if ia.is_string(pad_mode):
assert pad_mode in pad_modes_available, (
"Value '%s' is not a valid pad mode. Valid pad modes are: %s." % (
pad_mode, ", ".join(pad_modes_available)))
return iap.Deterministic(pad_mode)
if isinstance(pad_mode, list):
assert all([v in pad_modes_available for v in pad_mode]), (
"At least one in list %s is not a valid pad mode. Valid pad "
"modes are: %s." % (str(pad_mode), ", ".join(pad_modes_available)))
return iap.Choice(pad_mode)
if isinstance(pad_mode, iap.StochasticParameter):
return pad_mode
raise Exception(
"Expected pad_mode to be ia.ALL or string or list of strings or "
"StochasticParameter, got %s." % (type(pad_mode),))
def _handle_position_parameter(position):
if position == "uniform":
return iap.Uniform(0.0, 1.0), iap.Uniform(0.0, 1.0)
if position == "normal":
return (
iap.Clip(iap.Normal(loc=0.5, scale=0.35 / 2),
minval=0.0, maxval=1.0),
iap.Clip(iap.Normal(loc=0.5, scale=0.35 / 2),
minval=0.0, maxval=1.0)
)
if position == "center":
return iap.Deterministic(0.5), iap.Deterministic(0.5)
if (ia.is_string(position)
and re.match(r"^(left|center|right)-(top|center|bottom)$",
position)):
mapping = {"top": 0.0, "center": 0.5, "bottom": 1.0, "left": 0.0,
"right": 1.0}
return (
iap.Deterministic(mapping[position.split("-")[0]]),
iap.Deterministic(mapping[position.split("-")[1]])
)
if isinstance(position, iap.StochasticParameter):
return position
if isinstance(position, tuple):
assert len(position) == 2, (
"Expected tuple with two entries as position parameter. "
"Got %d entries with types %s.." % (
len(position), str([type(item) for item in position])))
for item in position:
if ia.is_single_number(item) and (item < 0 or item > 1.0):
raise Exception(
"Both position values must be within the value range "
"[0.0, 1.0]. Got type %s with value %.8f." % (
type(item), item,))
position = [iap.Deterministic(item)
if ia.is_single_number(item)
else item for item in position]
only_sparams = all([isinstance(item, iap.StochasticParameter)
for item in position])
assert only_sparams, (
"Expected tuple with two entries that are both either "
"StochasticParameter or float/int. Got types %s." % (
str([type(item) for item in position])
))
return tuple(position)
raise Exception(
"Expected one of the following as position parameter: string "
"'uniform', string 'normal', string 'center', a string matching "
"regex ^(left|center|right)-(top|center|bottom)$, a single "
"StochasticParameter or a tuple of two entries, both being either "
"StochasticParameter or floats or int. Got instead type %s with "
"content '%s'." % (
type(position),
(str(position)
if len(str(position)) < 20
else str(position)[0:20] + "...")
)
)
# TODO this is the same as in imgaug.py, make DRY
def _assert_two_or_three_dims(shape):
if hasattr(shape, "shape"):
shape = shape.shape
assert len(shape) in [2, 3], (
"Expected image with two or three dimensions, but got %d dimensions "
"and shape %s." % (len(shape), shape))
def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around :func:`numpy.pad`.
Supported dtypes
----------------
* ``uint8``: yes; fully tested (1)
* ``uint16``: yes; fully tested (1)
* ``uint32``: yes; fully tested (2) (3)
* ``uint64``: yes; fully tested (2) (3)
* ``int8``: yes; fully tested (1)
* ``int16``: yes; fully tested (1)
* ``int32``: yes; fully tested (1)
* ``int64``: yes; fully tested (2) (3)
* ``float16``: yes; fully tested (2) (3)
* ``float32``: yes; fully tested (1)
* ``float64``: yes; fully tested (1)
* ``float128``: yes; fully tested (2) (3)
* ``bool``: yes; tested (2) (3)
- (1) Uses ``cv2`` if `mode` is one of: ``"constant"``, ``"edge"``,
``"reflect"``, ``"symmetric"``. Otherwise uses ``numpy``.
- (2) Uses ``numpy``.
- (3) Rejected by ``cv2``.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
top : int, optional
Amount of pixels to add to the top side of the image.
Must be ``0`` or greater.
right : int, optional
Amount of pixels to add to the right side of the image.
Must be ``0`` or greater.
bottom : int, optional
Amount of pixels to add to the bottom side of the image.
Must be ``0`` or greater.
left : int, optional
Amount of pixels to add to the left side of the image.
Must be ``0`` or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
In case of mode ``constant``, the parameter `cval` will be used as
the ``constant_values`` parameter to :func:`numpy.pad`.
In case of mode ``linear_ramp``, the parameter `cval` will be used as
the ``end_values`` parameter to :func:`numpy.pad`.
cval : number or iterable of number, optional
Value to use for padding if `mode` is ``constant``.
See :func:`numpy.pad` for details. The cval is expected to match the
input array's dtype and value range. If an iterable is used, it is
expected to contain one value per channel. The number of values
and number of channels are expected to match.
Returns
-------
(H',W') ndarray or (H',W',C) ndarray
Padded array with height ``H'=H+top+bottom`` and width
``W'=W+left+right``.
"""
import imgaug.dtypes as iadt
_assert_two_or_three_dims(arr)
assert all([v >= 0 for v in [top, right, bottom, left]]), (
"Expected padding amounts that are >=0, but got %d, %d, %d, %d "
"(top, right, bottom, left)" % (top, right, bottom, left))
is_multi_cval = ia.is_iterable(cval)
if top > 0 or right > 0 or bottom > 0 or left > 0:
min_value, _, max_value = iadt.get_value_range_of_dtype(arr.dtype)
# without the if here there are crashes for float128, e.g. if
# cval is an int (just using float(cval) seems to not be accurate
# enough)
if arr.dtype.name == "float128":
cval = np.float128(cval) # pylint: disable=no-member
if is_multi_cval:
cval = np.clip(cval, min_value, max_value)
else:
cval = max(min(cval, max_value), min_value)
# Note that copyMakeBorder() hangs/runs endlessly if arr has an
# axis of size 0 and mode is "reflect".
# Numpy also complains in these cases if mode is not "constant".
has_zero_sized_axis = any([axis == 0 for axis in arr.shape])
if has_zero_sized_axis:
mode = "constant"
mapping_mode_np_to_cv2 = {
"constant": cv2.BORDER_CONSTANT,
"edge": cv2.BORDER_REPLICATE,
"linear_ramp": None,
"maximum": None,
"mean": None,
"median": None,
"minimum": None,
"reflect": cv2.BORDER_REFLECT_101,
"symmetric": cv2.BORDER_REFLECT,
"wrap": None,
cv2.BORDER_CONSTANT: cv2.BORDER_CONSTANT,
cv2.BORDER_REPLICATE: cv2.BORDER_REPLICATE,
cv2.BORDER_REFLECT_101: cv2.BORDER_REFLECT_101,
cv2.BORDER_REFLECT: cv2.BORDER_REFLECT
}
bad_mode_cv2 = mapping_mode_np_to_cv2.get(mode, None) is None
# these datatypes all simply generate a "TypeError: src data type = X
# is not supported" error
bad_datatype_cv2 = (
arr.dtype.name
in ["uint32", "uint64", "int64", "float16", "float128", "bool"]
)
# OpenCV turns the channel axis for arrays with 0 channels to 512
# TODO add direct test for this. indirectly tested via Pad
bad_shape_cv2 = (arr.ndim == 3 and arr.shape[-1] == 0)
if not bad_datatype_cv2 and not bad_mode_cv2 and not bad_shape_cv2:
# convert cval to expected type, as otherwise we get TypeError
# for np inputs
kind = arr.dtype.kind
if is_multi_cval:
cval = [float(cval_c) if kind == "f" else int(cval_c)
for cval_c in cval]
else:
cval = float(cval) if kind == "f" else int(cval)
if arr.ndim == 2 or arr.shape[2] <= 4:
# without this, only the first channel is padded with the cval,
# all following channels with 0
if arr.ndim == 3 and not is_multi_cval:
cval = tuple([cval] * arr.shape[2])
arr_pad = cv2.copyMakeBorder(
_normalize_cv2_input_arr_(arr),
top=top, bottom=bottom, left=left, right=right,
borderType=mapping_mode_np_to_cv2[mode], value=cval)
if arr.ndim == 3 and arr_pad.ndim == 2:
arr_pad = arr_pad[..., np.newaxis]
else:
result = []
channel_start_idx = 0
cval = cval if is_multi_cval else tuple([cval] * arr.shape[2])
while channel_start_idx < arr.shape[2]:
arr_c = arr[..., channel_start_idx:channel_start_idx+4]
cval_c = cval[channel_start_idx:channel_start_idx+4]
arr_pad_c = cv2.copyMakeBorder(
_normalize_cv2_input_arr_(arr_c),
top=top, bottom=bottom, left=left, right=right,
borderType=mapping_mode_np_to_cv2[mode], value=cval_c)
arr_pad_c = np.atleast_3d(arr_pad_c)
result.append(arr_pad_c)
channel_start_idx += 4
arr_pad = np.concatenate(result, axis=2)
else:
# paddings for 2d case
paddings_np = [(top, bottom), (left, right)]
# add paddings for 3d case
if arr.ndim == 3:
paddings_np.append((0, 0))
if mode == "constant":
if arr.ndim > 2 and is_multi_cval:
arr_pad_chans = [
np.pad(arr[..., c], paddings_np[0:2], mode=mode,
constant_values=cval[c])
for c in np.arange(arr.shape[2])]
arr_pad = np.stack(arr_pad_chans, axis=-1)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode,
constant_values=cval)
elif mode == "linear_ramp":
if arr.ndim > 2 and is_multi_cval:
arr_pad_chans = [
np.pad(arr[..., c], paddings_np[0:2], mode=mode,
end_values=cval[c])
for c in np.arange(arr.shape[2])]
arr_pad = np.stack(arr_pad_chans, axis=-1)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode,
end_values=cval)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode)
return arr_pad
return np.copy(arr)
def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0,
return_pad_amounts=False):
"""Pad an image array on its sides so that it matches a target aspect ratio.
See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an
explanation of how the required padding amounts are distributed per
image axis.
Supported dtypes
----------------
See :func:`~imgaug.augmenters.size.pad`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the
image having twice as much width as height.
mode : str, optional
Padding mode to use. See :func:`~imgaug.imgaug.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``.
See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If ``False``, then only the padded image will be returned. If
``True``, a ``tuple`` with two entries will be returned, where the
first entry is the padded image and the second entry are the amounts
by which each image side was padded. These amounts are again a
``tuple`` of the form ``(top, right, bottom, left)``, with each value
being an ``int``.
Returns
-------
(H',W') ndarray or (H',W',C) ndarray
Padded image as ``(H',W')`` or ``(H',W',C)`` ndarray, fulfilling the
given `aspect_ratio`.
tuple of int
Amounts by which the image was padded on each side, given as a
``tuple`` ``(top, right, bottom, left)``.
This ``tuple`` is only returned if `return_pad_amounts` was set to
``True``.
"""
pad_top, pad_right, pad_bottom, pad_left = \
compute_paddings_to_reach_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
return arr_padded
def pad_to_multiples_of(arr, height_multiple, width_multiple, mode="constant",
cval=0, return_pad_amounts=False):
"""Pad an image array until its side lengths are multiples of given values.
See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an
explanation of how the required padding amounts are distributed per
image axis.
Supported dtypes
----------------
See :func:`~imgaug.augmenters.size.pad`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
height_multiple : None or int
The desired multiple of the height. The computed padding amount will
reflect a padding that increases the y axis size until it is a multiple
of this value.
width_multiple : None or int
The desired multiple of the width. The computed padding amount will
reflect a padding that increases the x axis size until it is a multiple
of this value.
mode : str, optional
Padding mode to use. See :func:`~imgaug.imgaug.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``.
See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If ``False``, then only the padded image will be returned. If
``True``, a ``tuple`` with two entries will be returned, where the
first entry is the padded image and the second entry are the amounts
by which each image side was padded. These amounts are again a
``tuple`` of the form ``(top, right, bottom, left)``, with each value
being an integer.
Returns
-------
(H',W') ndarray or (H',W',C) ndarray
Padded image as ``(H',W')`` or ``(H',W',C)`` ndarray.
tuple of int
Amounts by which the image was padded on each side, given as a
``tuple`` ``(top, right, bottom, left)``.
This ``tuple`` is only returned if `return_pad_amounts` was set to
``True``.
"""
pad_top, pad_right, pad_bottom, pad_left = \
compute_paddings_to_reach_multiples_of(
arr, height_multiple, width_multiple)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
return arr_padded
def compute_paddings_to_reach_aspect_ratio(arr, aspect_ratio):
"""Compute pad amounts required to fulfill an aspect ratio.
"Pad amounts" here denotes the number of pixels that have to be added to
each side to fulfill the desired constraint.
The aspect ratio is given as ``ratio = width / height``.
Depending on which dimension is smaller (height or width), only the
corresponding sides (top/bottom or left/right) will be padded.
The axis-wise padding amounts are always distributed equally over the
sides of the respective axis (i.e. left and right, top and bottom). For
odd pixel amounts, one pixel will be left over after the equal
distribution and could be added to either side of the axis. This function
will always add such a left over pixel to the bottom (y-axis) or
right (x-axis) side.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int
Image-like array or shape tuple for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the
image having twice as much width as height.
Returns
-------
tuple of int
Required padding amounts to reach the target aspect ratio, given as a
``tuple`` of the form ``(top, right, bottom, left)``.
"""
_assert_two_or_three_dims(arr)
assert aspect_ratio > 0, (
"Expected to get an aspect ratio >0, got %.4f." % (aspect_ratio,))
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
shape = arr.shape if hasattr(arr, "shape") else arr
height, width = shape[0:2]
if height == 0:
height = 1
pad_bottom += 1
if width == 0:
width = 1
pad_right += 1
aspect_ratio_current = width / height
if aspect_ratio_current < aspect_ratio:
# image is more vertical than desired, width needs to be increased
diff = (aspect_ratio * height) - width
pad_right += int(np.ceil(diff / 2))
pad_left += int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
# image is more horizontal than desired, height needs to be increased
diff = ((1/aspect_ratio) * width) - height
pad_top += int(np.floor(diff / 2))
pad_bottom += int(np.ceil(diff / 2))
return pad_top, pad_right, pad_bottom, pad_left
def compute_croppings_to_reach_aspect_ratio(arr, aspect_ratio):
"""Compute crop amounts required to fulfill an aspect ratio.
"Crop amounts" here denotes the number of pixels that have to be removed
from each side to fulfill the desired constraint.
The aspect ratio is given as ``ratio = width / height``.
Depending on which dimension is smaller (height or width), only the
corresponding sides (top/bottom or left/right) will be cropped.
The axis-wise padding amounts are always distributed equally over the
sides of the respective axis (i.e. left and right, top and bottom). For
odd pixel amounts, one pixel will be left over after the equal
distribution and could be added to either side of the axis. This function
will always add such a left over pixel to the bottom (y-axis) or
right (x-axis) side.
If an aspect ratio cannot be reached exactly, this function will return
rather one pixel too few than one pixel too many.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int
Image-like array or shape tuple for which to compute crop amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the
image having twice as much width as height.
Returns
-------
tuple of int
Required cropping amounts to reach the target aspect ratio, given as a
``tuple`` of the form ``(top, right, bottom, left)``.
"""
_assert_two_or_three_dims(arr)
assert aspect_ratio > 0, (
"Expected to get an aspect ratio >0, got %.4f." % (aspect_ratio,))
shape = arr.shape if hasattr(arr, "shape") else arr
assert shape[0] > 0, (
"Expected to get an array with height >0, got shape %s." % (shape,))
height, width = shape[0:2]
aspect_ratio_current = width / height
top = 0
right = 0
bottom = 0
left = 0
if aspect_ratio_current < aspect_ratio:
# image is more vertical than desired, height needs to be reduced
# c = H - W/r
crop_amount = height - (width / aspect_ratio)
crop_amount = min(crop_amount, height - 1)
top = int(np.floor(crop_amount / 2))
bottom = int(np.ceil(crop_amount / 2))
elif aspect_ratio_current > aspect_ratio:
# image is more horizontal than desired, width needs to be reduced
# c = W - Hr
crop_amount = width - height * aspect_ratio
crop_amount = min(crop_amount, width - 1)
left = int(np.floor(crop_amount / 2))
right = int(np.ceil(crop_amount / 2))
return top, right, bottom, left
def compute_paddings_to_reach_multiples_of(arr, height_multiple,
width_multiple):
"""Compute pad amounts until img height/width are multiples of given values.
See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an
explanation of how the required padding amounts are distributed per
image axis.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int
Image-like array or shape tuple for which to compute pad amounts.
height_multiple : None or int
The desired multiple of the height. The computed padding amount will
reflect a padding that increases the y axis size until it is a multiple
of this value.
width_multiple : None or int
The desired multiple of the width. The computed padding amount will
reflect a padding that increases the x axis size until it is a multiple
of this value.
Returns
-------
tuple of int
Required padding amounts to reach multiples of the provided values,
given as a ``tuple`` of the form ``(top, right, bottom, left)``.
"""
def _compute_axis_value(axis_size, multiple):
if multiple is None:
return 0, 0
if axis_size == 0:
to_pad = multiple
elif axis_size % multiple == 0:
to_pad = 0
else:
to_pad = multiple - (axis_size % multiple)
return int(np.floor(to_pad/2)), int(np.ceil(to_pad/2))
_assert_two_or_three_dims(arr)
if height_multiple is not None:
assert height_multiple > 0, (
"Can only pad to multiples of 1 or larger, got %d." % (
height_multiple,))
if width_multiple is not None:
assert width_multiple > 0, (
"Can only pad to multiples of 1 or larger, got %d." % (
width_multiple,))
shape = arr.shape if hasattr(arr, "shape") else arr
height, width = shape[0:2]
top, bottom = _compute_axis_value(height, height_multiple)
left, right = _compute_axis_value(width, width_multiple)
return top, right, bottom, left
def compute_croppings_to_reach_multiples_of(arr, height_multiple,
width_multiple):
"""Compute croppings to reach multiples of given heights/widths.
See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an
explanation of how the required cropping amounts are distributed per
image axis.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int
Image-like array or shape tuple for which to compute crop amounts.
height_multiple : None or int
The desired multiple of the height. The computed croppings will
reflect a crop operation that decreases the y axis size until it is
a multiple of this value.
width_multiple : None or int
The desired multiple of the width. The computed croppings amount will
reflect a crop operation that decreases the x axis size until it is
a multiple of this value.
Returns
-------
tuple of int
Required cropping amounts to reach multiples of the provided values,
given as a ``tuple`` of the form ``(top, right, bottom, left)``.
"""
def _compute_axis_value(axis_size, multiple):
if multiple is None:
return 0, 0
if axis_size == 0:
to_crop = 0
elif axis_size % multiple == 0:
to_crop = 0
else:
to_crop = axis_size % multiple
return int(np.floor(to_crop/2)), int(np.ceil(to_crop/2))
_assert_two_or_three_dims(arr)
if height_multiple is not None:
assert height_multiple > 0, (
"Can only crop to multiples of 1 or larger, got %d." % (
height_multiple,))
if width_multiple is not None:
assert width_multiple > 0, (
"Can only crop to multiples of 1 or larger, got %d." % (
width_multiple,))
shape = arr.shape if hasattr(arr, "shape") else arr
height, width = shape[0:2]
top, bottom = _compute_axis_value(height, height_multiple)
left, right = _compute_axis_value(width, width_multiple)
return top, right, bottom, left
def compute_paddings_to_reach_powers_of(arr, height_base, width_base,
allow_zero_exponent=False):
"""Compute paddings to reach powers of given base values.
For given axis size ``S``, padded size ``S'`` (``S' >= S``) and base ``B``
this function computes paddings that fulfill ``S' = B^E``, where ``E``
is any exponent from the discrete interval ``[0 .. inf)``.
See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an
explanation of how the required padding amounts are distributed per
image axis.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int
Image-like array or shape tuple for which to compute pad amounts.
height_base : None or int
The desired base of the height.
width_base : None or int
The desired base of the width.
allow_zero_exponent : bool, optional
Whether ``E=0`` in ``S'=B^E`` is a valid value. If ``True``, axes
with size ``0`` or ``1`` will be padded up to size ``B^0=1`` and
axes with size ``1 < S <= B`` will be padded up to ``B^1=B``.
If ``False``, the minimum output axis size is always at least ``B``.
Returns
-------
tuple of int
Required padding amounts to fulfill ``S' = B^E`` given as a
``tuple`` of the form ``(top, right, bottom, left)``.
"""
def _compute_axis_value(axis_size, base):
if base is None:
return 0, 0
if axis_size == 0:
to_pad = 1 if allow_zero_exponent else base
elif axis_size <= base:
to_pad = base - axis_size
else:
# log_{base}(axis_size) in numpy
exponent = np.log(axis_size) / np.log(base)
to_pad = (base ** int(np.ceil(exponent))) - axis_size
return int(np.floor(to_pad/2)), int(np.ceil(to_pad/2))
_assert_two_or_three_dims(arr)
if height_base is not None:
assert height_base > 1, (
"Can only pad to base larger than 1, got %d." % (height_base,))
if width_base is not None:
assert width_base > 1, (
"Can only pad to base larger than 1, got %d." % (width_base,))
shape = arr.shape if hasattr(arr, "shape") else arr
height, width = shape[0:2]
top, bottom = _compute_axis_value(height, height_base)
left, right = _compute_axis_value(width, width_base)
return top, right, bottom, left
def compute_croppings_to_reach_powers_of(arr, height_base, width_base,
allow_zero_exponent=False):
"""Compute croppings to reach powers of given base values.
For given axis size ``S``, cropped size ``S'`` (``S' <= S``) and base ``B``
this function computes croppings that fulfill ``S' = B^E``, where ``E``
is any exponent from the discrete interval ``[0 .. inf)``.
See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an
explanation of how the required cropping amounts are distributed per
image axis.
.. note::
For axes where ``S == 0``, this function alwayws returns zeros as
croppings.
For axes where ``1 <= S < B`` see parameter `allow_zero_exponent`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int
Image-like array or shape tuple for which to compute crop amounts.
height_base : None or int
The desired base of the height.
width_base : None or int
The desired base of the width.
allow_zero_exponent : bool
Whether ``E=0`` in ``S'=B^E`` is a valid value. If ``True``, axes
with size ``1 <= S < B`` will be cropped to size ``B^0=1``.
If ``False``, axes with sizes ``S < B`` will not be changed.
Returns
-------
tuple of int
Required cropping amounts to fulfill ``S' = B^E`` given as a
``tuple`` of the form ``(top, right, bottom, left)``.
"""
def _compute_axis_value(axis_size, base):
if base is None:
return 0, 0
if axis_size == 0:
to_crop = 0
elif axis_size < base:
# crop down to B^0 = 1
to_crop = axis_size - 1 if allow_zero_exponent else 0
else:
# log_{base}(axis_size) in numpy
exponent = np.log(axis_size) / np.log(base)
to_crop = axis_size - (base ** int(exponent))
return int(np.floor(to_crop/2)), int(np.ceil(to_crop/2))
_assert_two_or_three_dims(arr)
if height_base is not None:
assert height_base > 1, (
"Can only crop to base larger than 1, got %d." % (height_base,))
if width_base is not None:
assert width_base > 1, (
"Can only crop to base larger than 1, got %d." % (width_base,))
shape = arr.shape if hasattr(arr, "shape") else arr
height, width = shape[0:2]
top, bottom = _compute_axis_value(height, height_base)
left, right = _compute_axis_value(width, width_base)
return top, right, bottom, left
@ia.deprecated(alt_func="Resize",
comment="Resize has the exactly same interface as Scale.")
def Scale(*args, **kwargs):
"""Augmenter that resizes images to specified heights and widths."""
# pylint: disable=invalid-name
return Resize(*args, **kwargs)
class Resize(meta.Augmenter):
"""Augmenter that resizes images to specified heights and widths.
Supported dtypes
----------------
See :func:`~imgaug.imgaug.imresize_many_images`.
Parameters
----------
size : 'keep' or int or float or tuple of int or tuple of float or list of int or list of float or imgaug.parameters.StochasticParameter or dict
The new size of the images.
* If this has the string value ``keep``, the original height and
width values will be kept (image is not resized).
* If this is an ``int``, this value will always be used as the new
height and width of the images.
* If this is a ``float`` ``v``, then per image the image's height
``H`` and width ``W`` will be changed to ``H*v`` and ``W*v``.
* If this is a ``tuple``, it is expected to have two entries
``(a, b)``. If at least one of these are ``float`` s, a value
will be sampled from range ``[a, b]`` and used as the ``float``
value to resize the image (see above). If both are ``int`` s, a
value will be sampled from the discrete range ``[a..b]`` and
used as the integer value to resize the image (see above).
* If this is a ``list``, a random value from the ``list`` will be
picked to resize the image. All values in the ``list`` must be
``int`` s or ``float`` s (no mixture is possible).
* If this is a ``StochasticParameter``, then this parameter will
first be queried once per image. The resulting value will be used
for both height and width.
* If this is a ``dict``, it may contain the keys ``height`` and
``width`` or the keys ``shorter-side`` and ``longer-side``. Each
key may have the same datatypes as above and describes the
scaling on x and y-axis or the shorter and longer axis,
respectively. Both axis are sampled independently. Additionally,
one of the keys may have the value ``keep-aspect-ratio``, which
means that the respective side of the image will be resized so
that the original aspect ratio is kept. This is useful when only
resizing one image size by a pixel value (e.g. resize images to
a height of ``64`` pixels and resize the width so that the
overall aspect ratio is maintained).
interpolation : imgaug.ALL or int or str or list of int or list of str or imgaug.parameters.StochasticParameter, optional
Interpolation to use.
* If ``imgaug.ALL``, then a random interpolation from ``nearest``,
``linear``, ``area`` or ``cubic`` will be picked (per image).
* If ``int``, then this interpolation will always be used.
Expected to be any of the following:
``cv2.INTER_NEAREST``, ``cv2.INTER_LINEAR``, ``cv2.INTER_AREA``,
``cv2.INTER_CUBIC``
* If string, then this interpolation will always be used.
Expected to be any of the following:
``nearest``, ``linear``, ``area``, ``cubic``
* If ``list`` of ``int`` / ``str``, then a random one of the values
will be picked per image as the interpolation.
* If a ``StochasticParameter``, then this parameter will be
queried per image and is expected to return an ``int`` or
``str``.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Resize(32)
Resize all images to ``32x32`` pixels.
>>> aug = iaa.Resize(0.5)
Resize all images to ``50`` percent of their original size.
>>> aug = iaa.Resize((16, 22))
Resize all images to a random height and width within the discrete
interval ``[16..22]`` (uniformly sampled per image).
>>> aug = iaa.Resize((0.5, 0.75))
Resize all any input image so that its height (``H``) and width (``W``)
become ``H*v`` and ``W*v``, where ``v`` is uniformly sampled from the
interval ``[0.5, 0.75]``.
>>> aug = iaa.Resize([16, 32, 64])
Resize all images either to ``16x16``, ``32x32`` or ``64x64`` pixels.
>>> aug = iaa.Resize({"height": 32})
Resize all images to a height of ``32`` pixels and keeps the original
width.
>>> aug = iaa.Resize({"height": 32, "width": 48})
Resize all images to a height of ``32`` pixels and a width of ``48``.
>>> aug = iaa.Resize({"height": 32, "width": "keep-aspect-ratio"})
Resize all images to a height of ``32`` pixels and resizes the
x-axis (width) so that the aspect ratio is maintained.
>>> aug = iaa.Resize(
>>> {"shorter-side": 224, "longer-side": "keep-aspect-ratio"})
Resize all images to a height/width of ``224`` pixels, depending on which
axis is shorter and resize the other axis so that the aspect ratio is
maintained.
>>> aug = iaa.Resize({"height": (0.5, 0.75), "width": [16, 32, 64]})
Resize all images to a height of ``H*v``, where ``H`` is the original
height and ``v`` is a random value sampled from the interval
``[0.5, 0.75]``. The width/x-axis of each image is resized to either
``16`` or ``32`` or ``64`` pixels.
>>> aug = iaa.Resize(32, interpolation=["linear", "cubic"])
Resize all images to ``32x32`` pixels. Randomly use either ``linear``
or ``cubic`` interpolation.
"""
def __init__(self, size, interpolation="cubic",
seed=None, name=None, **old_kwargs):
super(Resize, self).__init__(
seed=seed, name=name, **old_kwargs)
self.size, self.size_order = self._handle_size_arg(size, False)
self.interpolation = self._handle_interpolation_arg(interpolation)
@classmethod
def _handle_size_arg(cls, size, subcall):
def _dict_to_size_tuple(val1, val2):
kaa = "keep-aspect-ratio"
not_both_kaa = (val1 != kaa or val2 != kaa)
assert not_both_kaa, (
"Expected at least one value to not be \"keep-aspect-ratio\", "
"but got it two times.")
size_tuple = []
for k in [val1, val2]:
if k in ["keep-aspect-ratio", "keep"]:
entry = iap.Deterministic(k)
else:
entry = cls._handle_size_arg(k, True)
size_tuple.append(entry)
return tuple(size_tuple)
def _contains_any_key(dict_, keys):
return any([key in dict_ for key in keys])
# HW = height, width
# SL = shorter, longer
size_order = "HW"
if size == "keep":
result = iap.Deterministic("keep")
elif ia.is_single_number(size):
assert size > 0, "Expected only values > 0, got %s" % (size,)
result = iap.Deterministic(size)
elif not subcall and isinstance(size, dict):
if len(size.keys()) == 0:
result = iap.Deterministic("keep")
elif _contains_any_key(size, ["height", "width"]):
height = size.get("height", "keep")
width = size.get("width", "keep")
result = _dict_to_size_tuple(height, width)
elif _contains_any_key(size, ["shorter-side", "longer-side"]):
shorter = size.get("shorter-side", "keep")
longer = size.get("longer-side", "keep")
result = _dict_to_size_tuple(shorter, longer)
size_order = "SL"
else:
raise ValueError(
"Expected dictionary containing no keys, "
"the keys \"height\" and/or \"width\", "
"or the keys \"shorter-side\" and/or \"longer-side\". "
"Got keys: %s." % (str(size.keys()),))
elif isinstance(size, tuple):
assert len(size) == 2, (
"Expected size tuple to contain exactly 2 values, "
"got %d." % (len(size),))
assert size[0] > 0 and size[1] > 0, (
"Expected size tuple to only contain values >0, "
"got %d and %d." % (size[0], size[1]))
if ia.is_single_float(size[0]) or ia.is_single_float(size[1]):
result = iap.Uniform(size[0], size[1])
else:
result = iap.DiscreteUniform(size[0], size[1])
elif isinstance(size, list):
if len(size) == 0:
result = iap.Deterministic("keep")
else:
all_int = all([ia.is_single_integer(v) for v in size])
all_float = all([ia.is_single_float(v) for v in size])
assert all_int or all_float, (
"Expected to get only integers or floats.")
assert all([v > 0 for v in size]), (
"Expected all values to be >0.")
result = iap.Choice(size)
elif isinstance(size, iap.StochasticParameter):
result = size
else:
raise ValueError(
"Expected number, tuple of two numbers, list of numbers, "
"dictionary of form "
"{'height': number/tuple/list/'keep-aspect-ratio'/'keep', "
"'width': <analogous>}, dictionary of form "
"{'shorter-side': number/tuple/list/'keep-aspect-ratio'/"
"'keep', 'longer-side': <analogous>} "
"or StochasticParameter, got %s." % (type(size),)
)
if subcall:
return result
return result, size_order
@classmethod
def _handle_interpolation_arg(cls, interpolation):
if interpolation == ia.ALL:
interpolation = iap.Choice(
["nearest", "linear", "area", "cubic"])
elif ia.is_single_integer(interpolation):
interpolation = iap.Deterministic(interpolation)
elif ia.is_string(interpolation):
interpolation = iap.Deterministic(interpolation)
elif ia.is_iterable(interpolation):
interpolation = iap.Choice(interpolation)
elif isinstance(interpolation, iap.StochasticParameter):
pass
else:
raise Exception(
"Expected int or string or iterable or StochasticParameter, "
"got %s." % (type(interpolation),))
return interpolation
def _augment_batch_(self, batch, random_state, parents, hooks):
nb_rows = batch.nb_rows
samples = self._draw_samples(nb_rows, random_state)
if batch.images is not None:
batch.images = self._augment_images_by_samples(batch.images,
samples)
if batch.heatmaps is not None:
# TODO this uses the same interpolation as for images for heatmaps
# while other augmenters resort to cubic
batch.heatmaps = self._augment_maps_by_samples(
batch.heatmaps, "arr_0to1", samples)
if batch.segmentation_maps is not None:
batch.segmentation_maps = self._augment_maps_by_samples(
batch.segmentation_maps, "arr",
(samples[0], samples[1], [None] * nb_rows))
for augm_name in ["keypoints", "bounding_boxes", "polygons",
"line_strings"]:
augm_value = getattr(batch, augm_name)
if augm_value is not None:
func = functools.partial(
self._augment_keypoints_by_samples,
samples=samples)
cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)
setattr(batch, augm_name, cbaois)
return batch
def _augment_images_by_samples(self, images, samples):
input_was_array = False
input_dtype = None
if ia.is_np_array(images):
input_was_array = True
input_dtype = images.dtype
samples_a, samples_b, samples_ip = samples
result = []
for i, image in enumerate(images):
h, w = self._compute_height_width(image.shape, samples_a[i],
samples_b[i], self.size_order)
image_rs = ia.imresize_single_image(image, (h, w),
interpolation=samples_ip[i])
result.append(image_rs)
if input_was_array:
all_same_size = (len({image.shape for image in result}) == 1)
if all_same_size:
result = np.array(result, dtype=input_dtype)
return result
def _augment_maps_by_samples(self, augmentables, arr_attr_name, samples):
result = []
samples_h, samples_w, samples_ip = samples
for i, augmentable in enumerate(augmentables):
arr = getattr(augmentable, arr_attr_name)
arr_shape = arr.shape
img_shape = augmentable.shape
h_img, w_img = self._compute_height_width(
img_shape, samples_h[i], samples_w[i], self.size_order)
h = int(np.round(h_img * (arr_shape[0] / img_shape[0])))
w = int(np.round(w_img * (arr_shape[1] / img_shape[1])))
h = max(h, 1)
w = max(w, 1)
if samples_ip[0] is not None:
# TODO change this for heatmaps to always have cubic or
# automatic interpolation?
augmentable_resize = augmentable.resize(
(h, w), interpolation=samples_ip[i])
else:
augmentable_resize = augmentable.resize((h, w))
augmentable_resize.shape = (h_img, w_img) + img_shape[2:]
result.append(augmentable_resize)
return result
def _augment_keypoints_by_samples(self, kpsois, samples):
result = []
samples_a, samples_b, _samples_ip = samples
for i, kpsoi in enumerate(kpsois):
h, w = self._compute_height_width(
kpsoi.shape, samples_a[i], samples_b[i], self.size_order)
new_shape = (h, w) + kpsoi.shape[2:]
keypoints_on_image_rs = kpsoi.on_(new_shape)
result.append(keypoints_on_image_rs)
return result
def _draw_samples(self, nb_images, random_state):
rngs = random_state.duplicate(3)
if isinstance(self.size, tuple):
samples_h = self.size[0].draw_samples(nb_images,
random_state=rngs[0])
samples_w = self.size[1].draw_samples(nb_images,
random_state=rngs[1])
else:
samples_h = self.size.draw_samples(nb_images, random_state=rngs[0])
samples_w = samples_h
samples_ip = self.interpolation.draw_samples(nb_images,
random_state=rngs[2])
return samples_h, samples_w, samples_ip
@classmethod
def _compute_height_width(cls, image_shape, sample_a, sample_b, size_order):
imh, imw = image_shape[0:2]
if size_order == 'SL':
# size order: short, long
if imh < imw:
h, w = sample_a, sample_b
else:
w, h = sample_a, sample_b
else:
# size order: height, width
h, w = sample_a, sample_b
if ia.is_single_float(h):
assert h > 0, "Expected 'h' to be >0, got %.4f" % (h,)
h = int(np.round(imh * h))
h = h if h > 0 else 1
elif h == "keep":
h = imh
if ia.is_single_float(w):
assert w > 0, "Expected 'w' to be >0, got %.4f" % (w,)
w = int(np.round(imw * w))
w = w if w > 0 else 1
elif w == "keep":
w = imw
# at least the checks for keep-aspect-ratio must come after
# the float checks, as they are dependent on the results
# this is also why these are not written as elifs
if h == "keep-aspect-ratio":
h_per_w_orig = imh / imw
h = int(np.round(w * h_per_w_orig))
if w == "keep-aspect-ratio":
w_per_h_orig = imw / imh
w = int(np.round(h * w_per_h_orig))
return h, w
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.size, self.interpolation, self.size_order]
class _CropAndPadSamplingResult(object):
def __init__(self, crop_top, crop_right, crop_bottom, crop_left,
pad_top, pad_right, pad_bottom, pad_left, pad_mode, pad_cval):
self.crop_top = crop_top
self.crop_right = crop_right
self.crop_bottom = crop_bottom
self.crop_left = crop_left
self.pad_top = pad_top
self.pad_right = pad_right
self.pad_bottom = pad_bottom
self.pad_left = pad_left
self.pad_mode = pad_mode
self.pad_cval = pad_cval
@property
def croppings(self):
"""Get absolute pixel amounts of croppings as a TRBL tuple."""
return self.crop_top, self.crop_right, self.crop_bottom, self.crop_left
@property
def paddings(self):
"""Get absolute pixel amounts of paddings as a TRBL tuple."""
return self.pad_top, self.pad_right, self.pad_bottom, self.pad_left
class CropAndPad(meta.Augmenter):
"""Crop/pad images by pixel amounts or fractions of image sizes.
Cropping removes pixels at the sides (i.e. extracts a subimage from
a given full image). Padding adds pixels to the sides (e.g. black pixels).
This augmenter will never crop images below a height or width of ``1``.
.. note::
This augmenter automatically resizes images back to their original size
after it has augmented them. To deactivate this, add the
parameter ``keep_size=False``.
Supported dtypes
----------------
if (keep_size=False):
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: yes; tested
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: yes; tested
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested
* ``bool``: yes; tested
if (keep_size=True):
minimum of (
``imgaug.augmenters.size.CropAndPad(keep_size=False)``,
:func:`~imgaug.imgaug.imresize_many_images`
)
Parameters
----------
px : None or int or imgaug.parameters.StochasticParameter or tuple, optional
The number of pixels to crop (negative values) or pad (positive values)
on each side of the image. Either this or the parameter `percent` may
be set, not both at the same time.
* If ``None``, then pixel-based cropping/padding will not be used.
* If ``int``, then that exact number of pixels will always be
cropped/padded.
* If ``StochasticParameter``, then that parameter will be used for
each image. Four samples will be drawn per image (top, right,
bottom, left), unless `sample_independently` is set to ``False``,
as then only one value will be sampled per image and used for
all sides.
* If a ``tuple`` of two ``int`` s with values ``a`` and ``b``,
then each side will be cropped/padded by a random amount sampled
uniformly per image and side from the inteval ``[a, b]``. If
however `sample_independently` is set to ``False``, only one
value will be sampled per image and used for all sides.
* If a ``tuple`` of four entries, then the entries represent top,
right, bottom, left. Each entry may be a single ``int`` (always
crop/pad by exactly that value), a ``tuple`` of two ``int`` s
``a`` and ``b`` (crop/pad by an amount within ``[a, b]``), a
``list`` of ``int`` s (crop/pad by a random value that is
contained in the ``list``) or a ``StochasticParameter`` (sample
the amount to crop/pad from that parameter).
percent : None or number or imgaug.parameters.StochasticParameter or tuple, optional
The number of pixels to crop (negative values) or pad (positive values)
on each side of the image given as a *fraction* of the image
height/width. E.g. if this is set to ``-0.1``, the augmenter will
always crop away ``10%`` of the image's height at both the top and the
bottom (both ``10%`` each), as well as ``10%`` of the width at the
right and left.
Expected value range is ``(-1.0, inf)``.
Either this or the parameter `px` may be set, not both
at the same time.
* If ``None``, then fraction-based cropping/padding will not be
used.
* If ``number``, then that fraction will always be cropped/padded.
* If ``StochasticParameter``, then that parameter will be used for
each image. Four samples will be drawn per image (top, right,
bottom, left). If however `sample_independently` is set to
``False``, only one value will be sampled per image and used for
all sides.
* If a ``tuple`` of two ``float`` s with values ``a`` and ``b``,
then each side will be cropped/padded by a random fraction
sampled uniformly per image and side from the interval
``[a, b]``. If however `sample_independently` is set to
``False``, only one value will be sampled per image and used for
all sides.
* If a ``tuple`` of four entries, then the entries represent top,
right, bottom, left. Each entry may be a single ``float``
(always crop/pad by exactly that percent value), a ``tuple`` of
two ``float`` s ``a`` and ``b`` (crop/pad by a fraction from
``[a, b]``), a ``list`` of ``float`` s (crop/pad by a random
value that is contained in the list) or a ``StochasticParameter``
(sample the percentage to crop/pad from that parameter).
pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional
Padding mode to use. The available modes match the numpy padding modes,
i.e. ``constant``, ``edge``, ``linear_ramp``, ``maximum``, ``median``,
``minimum``, ``reflect``, ``symmetric``, ``wrap``. The modes
``constant`` and ``linear_ramp`` use extra values, which are provided
by ``pad_cval`` when necessary. See :func:`~imgaug.imgaug.pad` for
more details.
* If ``imgaug.ALL``, then a random mode from all available modes
will be sampled per image.
* If a ``str``, it will be used as the pad mode for all images.
* If a ``list`` of ``str``, a random one of these will be sampled
per image and used as the mode.
* If ``StochasticParameter``, a random mode will be sampled from
this parameter per image.
pad_cval : number or tuple of number list of number or imgaug.parameters.StochasticParameter, optional
The constant value to use if the pad mode is ``constant`` or the end
value to use if the mode is ``linear_ramp``.
See :func:`~imgaug.imgaug.pad` for more details.
* If ``number``, then that value will be used.
* If a ``tuple`` of two ``number`` s and at least one of them is
a ``float``, then a random number will be uniformly sampled per
image from the continuous interval ``[a, b]`` and used as the
value. If both ``number`` s are ``int`` s, the interval is
discrete.
* If a ``list`` of ``number``, then a random value will be chosen
from the elements of the ``list`` and used as the value.
* If ``StochasticParameter``, a random value will be sampled from
that parameter per image.
keep_size : bool, optional
After cropping and padding, the result image will usually have a
different height/width compared to the original input image. If this
parameter is set to ``True``, then the cropped/padded image will be
resized to the input image's size, i.e. the augmenter's output shape
is always identical to the input shape.
sample_independently : bool, optional
If ``False`` *and* the values for `px`/`percent` result in exactly
*one* probability distribution for all image sides, only one single
value will be sampled from that probability distribution and used for
all sides. I.e. the crop/pad amount then is the same for all sides.
If ``True``, four values will be sampled independently, one per side.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CropAndPad(px=(-10, 0))
Crop each side by a random pixel value sampled uniformly per image and
side from the discrete interval ``[-10..0]``.
>>> aug = iaa.CropAndPad(px=(0, 10))
Pad each side by a random pixel value sampled uniformly per image and
side from the discrete interval ``[0..10]``. The padding happens by
zero-padding, i.e. it adds black pixels (default setting).
>>> aug = iaa.CropAndPad(px=(0, 10), pad_mode="edge")
Pad each side by a random pixel value sampled uniformly per image and
side from the discrete interval ``[0..10]``. The padding uses the
``edge`` mode from numpy's pad function, i.e. the pixel colors around
the image sides are repeated.
>>> aug = iaa.CropAndPad(px=(0, 10), pad_mode=["constant", "edge"])
Similar to the previous example, but uses zero-padding (``constant``) for
half of the images and ``edge`` padding for the other half.
>>> aug = iaa.CropAndPad(px=(0, 10), pad_mode=ia.ALL, pad_cval=(0, 255))
Similar to the previous example, but uses any available padding mode.
In case the padding mode ends up being ``constant`` or ``linear_ramp``,
and random intensity is uniformly sampled (once per image) from the
discrete interval ``[0..255]`` and used as the intensity of the new
pixels.
>>> aug = iaa.CropAndPad(px=(0, 10), sample_independently=False)
Pad each side by a random pixel value sampled uniformly once per image
from the discrete interval ``[0..10]``. Each sampled value is used
for *all* sides of the corresponding image.
>>> aug = iaa.CropAndPad(px=(0, 10), keep_size=False)
Pad each side by a random pixel value sampled uniformly per image and
side from the discrete interval ``[0..10]``. Afterwards, do **not**
resize the padded image back to the input image's size. This will increase
the image's height and width by a maximum of ``20`` pixels.
>>> aug = iaa.CropAndPad(px=((0, 10), (0, 5), (0, 10), (0, 5)))
Pad the top and bottom by a random pixel value sampled uniformly from the
discrete interval ``[0..10]``. Pad the left and right analogously by
a random value sampled from ``[0..5]``. Each value is always sampled
independently.
>>> aug = iaa.CropAndPad(percent=(0, 0.1))
Pad each side by a random fraction sampled uniformly from the continuous
interval ``[0.0, 0.10]``. The fraction is sampled once per image and
side. E.g. a sampled fraction of ``0.1`` for the top side would pad by
``0.1*H``, where ``H`` is the height of the input image.
>>> aug = iaa.CropAndPad(
>>> percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))
Pads each side by either ``5%`` or ``10%``. The values are sampled
once per side and image.
>>> aug = iaa.CropAndPad(px=(-10, 10))
Sample uniformly per image and side a value ``v`` from the discrete range
``[-10..10]``. Then either crop (negative sample) or pad (positive sample)
the side by ``v`` pixels.
"""
def __init__(self, px=None, percent=None, pad_mode="constant", pad_cval=0,
keep_size=True, sample_independently=True,
seed=None, name=None, **old_kwargs):
# pylint: disable=invalid-name
super(CropAndPad, self).__init__(
seed=seed, name=name, **old_kwargs)
self.mode, self.all_sides, self.top, self.right, self.bottom, \
self.left = self._handle_px_and_percent_args(px, percent)
self.pad_mode = _handle_pad_mode_param(pad_mode)
# TODO enable ALL here, like in e.g. Affine
self.pad_cval = iap.handle_discrete_param(
pad_cval, "pad_cval", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.keep_size = keep_size
self.sample_independently = sample_independently
# set these to None to use the same values as sampled for the
# images (not tested)
self._pad_mode_heatmaps = "constant"
self._pad_mode_segmentation_maps = "constant"
self._pad_cval_heatmaps = 0.0
self._pad_cval_segmentation_maps = 0
@classmethod
def _handle_px_and_percent_args(cls, px, percent):
# pylint: disable=invalid-name
all_sides = None
top, right, bottom, left = None, None, None, None
if px is None and percent is None:
mode = "noop"
elif px is not None and percent is not None:
raise Exception("Can only pad by pixels or percent, not both.")
elif px is not None:
mode = "px"
all_sides, top, right, bottom, left = cls._handle_px_arg(px)
else: # = elif percent is not None:
mode = "percent"
all_sides, top, right, bottom, left = cls._handle_percent_arg(
percent)
return mode, all_sides, top, right, bottom, left
@classmethod
def _handle_px_arg(cls, px):
# pylint: disable=invalid-name
all_sides = None
top, right, bottom, left = None, None, None, None
if ia.is_single_integer(px):
all_sides = iap.Deterministic(px)
elif isinstance(px, tuple):
assert len(px) in [2, 4], (
"Expected 'px' given as a tuple to contain 2 or 4 "
"entries, got %d." % (len(px),))
def handle_param(p):
if ia.is_single_integer(p):
return iap.Deterministic(p)
if isinstance(p, tuple):
assert len(p) == 2, (
"Expected tuple of 2 values, got %d." % (len(p)))
only_ints = (
ia.is_single_integer(p[0])
and ia.is_single_integer(p[1]))
assert only_ints, (
"Expected tuple of integers, got %s and %s." % (
type(p[0]), type(p[1])))
return iap.DiscreteUniform(p[0], p[1])
if isinstance(p, list):
assert len(p) > 0, (
"Expected non-empty list, but got empty one.")
assert all([ia.is_single_integer(val) for val in p]), (
"Expected list of ints, got types %s." % (
", ".join([str(type(v)) for v in p])))
return iap.Choice(p)
if isinstance(p, iap.StochasticParameter):
return p
raise Exception(
"Expected int, tuple of two ints, list of ints or "
"StochasticParameter, got type %s." % (type(p),))
if len(px) == 2:
all_sides = handle_param(px)
else: # len == 4
top = handle_param(px[0])
right = handle_param(px[1])
bottom = handle_param(px[2])
left = handle_param(px[3])
elif isinstance(px, iap.StochasticParameter):
top = right = bottom = left = px
else:
raise Exception(
"Expected int, tuple of 4 "
"ints/tuples/lists/StochasticParameters or "
"StochasticParameter, got type %s." % (type(px),))
return all_sides, top, right, bottom, left
@classmethod
def _handle_percent_arg(cls, percent):
all_sides = None
top, right, bottom, left = None, None, None, None
if ia.is_single_number(percent):
assert percent > -1.0, (
"Expected 'percent' to be >-1.0, got %.4f." % (percent,))
all_sides = iap.Deterministic(percent)
elif isinstance(percent, tuple):
assert len(percent) in [2, 4], (
"Expected 'percent' given as a tuple to contain 2 or 4 "
"entries, got %d." % (len(percent),))
def handle_param(p):
if ia.is_single_number(p):
return iap.Deterministic(p)
if isinstance(p, tuple):
assert len(p) == 2, (
"Expected tuple of 2 values, got %d." % (len(p),))
only_numbers = (
ia.is_single_number(p[0])
and ia.is_single_number(p[1]))
assert only_numbers, (
"Expected tuple of numbers, got %s and %s." % (
type(p[0]), type(p[1])))
assert p[0] > -1.0 and p[1] > -1.0, (
"Expected tuple of values >-1.0, got %.4f and "
"%.4f." % (p[0], p[1]))
return iap.Uniform(p[0], p[1])
if isinstance(p, list):
assert len(p) > 0, (
"Expected non-empty list, but got empty one.")
assert all([ia.is_single_number(val) for val in p]), (
"Expected list of numbers, got types %s." % (
", ".join([str(type(v)) for v in p])))
assert all([val > -1.0 for val in p]), (
"Expected list of values >-1.0, got values %s." % (
", ".join(["%.4f" % (v,) for v in p])))
return iap.Choice(p)
if isinstance(p, iap.StochasticParameter):
return p
raise Exception(
"Expected int, tuple of two ints, list of ints or "
"StochasticParameter, got type %s." % (type(p),))
if len(percent) == 2:
all_sides = handle_param(percent)
else: # len == 4
top = handle_param(percent[0])
right = handle_param(percent[1])
bottom = handle_param(percent[2])
left = handle_param(percent[3])
elif isinstance(percent, iap.StochasticParameter):
top = right = bottom = left = percent
else:
raise Exception(
"Expected number, tuple of 4 "
"numbers/tuples/lists/StochasticParameters or "
"StochasticParameter, got type %s." % (type(percent),))
return all_sides, top, right, bottom, left
def _augment_batch_(self, batch, random_state, parents, hooks):
shapes = batch.get_rowwise_shapes()
samples = self._draw_samples(random_state, shapes)
if batch.images is not None:
batch.images = self._augment_images_by_samples(batch.images,
samples)
if batch.heatmaps is not None:
batch.heatmaps = self._augment_maps_by_samples(
batch.heatmaps,
self._pad_mode_heatmaps, self._pad_cval_heatmaps,
samples)
if batch.segmentation_maps is not None:
batch.segmentation_maps = self._augment_maps_by_samples(
batch.segmentation_maps,
self._pad_mode_segmentation_maps,
self._pad_cval_segmentation_maps, samples)
for augm_name in ["keypoints", "bounding_boxes", "polygons",
"line_strings"]:
augm_value = getattr(batch, augm_name)
if augm_value is not None:
func = functools.partial(
self._augment_keypoints_by_samples,
samples=samples)
cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)
setattr(batch, augm_name, cbaois)
return batch
def _augment_images_by_samples(self, images, samples):
result = []
for i, image in enumerate(images):
samples_i = samples[i]
image_cr_pa = _crop_and_pad_arr(
image, samples_i.croppings, samples_i.paddings,
samples_i.pad_mode, samples_i.pad_cval, self.keep_size)
result.append(image_cr_pa)
if ia.is_np_array(images):
if self.keep_size:
result = np.array(result, dtype=images.dtype)
else:
nb_shapes = len({image.shape for image in result})
if nb_shapes == 1:
result = np.array(result, dtype=images.dtype)
return result
def _augment_maps_by_samples(self, augmentables, pad_mode, pad_cval,
samples):
result = []
for i, augmentable in enumerate(augmentables):
samples_img = samples[i]
augmentable = _crop_and_pad_hms_or_segmaps_(
augmentable,
croppings_img=samples_img.croppings,
paddings_img=samples_img.paddings,
pad_mode=(pad_mode
if pad_mode is not None
else samples_img.pad_mode),
pad_cval=(pad_cval
if pad_cval is not None
else samples_img.pad_cval),
keep_size=self.keep_size
)
result.append(augmentable)
return result
def _augment_keypoints_by_samples(self, keypoints_on_images, samples):
result = []
for i, keypoints_on_image in enumerate(keypoints_on_images):
samples_i = samples[i]
kpsoi_aug = _crop_and_pad_kpsoi_(
keypoints_on_image, croppings_img=samples_i.croppings,
paddings_img=samples_i.paddings, keep_size=self.keep_size)
result.append(kpsoi_aug)
return result
def _draw_samples(self, random_state, shapes):
nb_rows = len(shapes)
if self.mode == "noop":
top = right = bottom = left = np.full((nb_rows,), 0,
dtype=np.int32)
else:
if self.all_sides is not None:
if self.sample_independently:
samples = self.all_sides.draw_samples(
(nb_rows, 4), random_state=random_state)
top = samples[:, 0]
right = samples[:, 1]
bottom = samples[:, 2]
left = samples[:, 3]
else:
sample = self.all_sides.draw_samples(
(nb_rows,), random_state=random_state)
top = right = bottom = left = sample
else:
top = self.top.draw_samples(
(nb_rows,), random_state=random_state)
right = self.right.draw_samples(
(nb_rows,), random_state=random_state)
bottom = self.bottom.draw_samples(
(nb_rows,), random_state=random_state)
left = self.left.draw_samples(
(nb_rows,), random_state=random_state)
if self.mode == "px":
# no change necessary for pixel values
pass
elif self.mode == "percent":
# percentage values have to be transformed to pixel values
shapes_arr = np.array([shape[0:2] for shape in shapes],
dtype=np.float32)
heights = shapes_arr[:, 0]
widths = shapes_arr[:, 1]
top = np.round(heights * top).astype(np.int32)
right = np.round(widths * right).astype(np.int32)
bottom = np.round(heights * bottom).astype(np.int32)
left = np.round(widths * left).astype(np.int32)
else:
raise Exception("Invalid mode")
def _only_above_zero(arr):
arr = np.copy(arr)
mask = (arr < 0)
arr[mask] = 0
return arr
crop_top = _only_above_zero((-1) * top)
crop_right = _only_above_zero((-1) * right)
crop_bottom = _only_above_zero((-1) * bottom)
crop_left = _only_above_zero((-1) * left)
pad_top = _only_above_zero(top)
pad_right = _only_above_zero(right)
pad_bottom = _only_above_zero(bottom)
pad_left = _only_above_zero(left)
pad_mode = self.pad_mode.draw_samples((nb_rows,),
random_state=random_state)
pad_cval = self.pad_cval.draw_samples((nb_rows,),
random_state=random_state)
# TODO vectorize this part -- especially return only one instance
result = []
for i, shape in enumerate(shapes):
height, width = shape[0:2]
crop_top_i, crop_right_i, crop_bottom_i, crop_left_i = \
_crop_prevent_zero_size(
height, width,
crop_top[i], crop_right[i], crop_bottom[i], crop_left[i])
# add here any_crop_y to not warn in case of zero height/width
# images
any_crop_y = (crop_top_i > 0 or crop_bottom_i > 0)
if any_crop_y and crop_top_i + crop_bottom_i >= height:
ia.warn(
"Expected generated crop amounts in CropAndPad for top and "
"bottom image side to be less than the image's height, but "
"got %d (top) and %d (bottom) vs. image height %d. This "
"will result in an image with output height=1 (if input "
"height was >=1) or output height=0 (if input height "
"was 0)." % (crop_top_i, crop_bottom_i, height))
# add here any_crop_x to not warn in case of zero height/width
# images
any_crop_x = (crop_left_i > 0 or crop_right_i > 0)
if any_crop_x and crop_left_i + crop_right_i >= width:
ia.warn(
"Expected generated crop amounts in CropAndPad for left "
"and right image side to be less than the image's width, "
"but got %d (left) and %d (right) vs. image width %d. "
"This will result in an image with output width=1 (if "
"input width was >=1) or output width=0 (if input width "
"was 0)." % (crop_left_i, crop_right_i, width))
result.append(
_CropAndPadSamplingResult(
crop_top=crop_top_i,
crop_right=crop_right_i,
crop_bottom=crop_bottom_i,
crop_left=crop_left_i,
pad_top=pad_top[i],
pad_right=pad_right[i],
pad_bottom=pad_bottom[i],
pad_left=pad_left[i],
pad_mode=pad_mode[i],
pad_cval=pad_cval[i]))
return result
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.all_sides, self.top, self.right, self.bottom, self.left,
self.pad_mode, self.pad_cval]
class Pad(CropAndPad):
"""Pad images, i.e. adds columns/rows of pixels to them.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.CropAndPad`.
Parameters
----------
px : None or int or imgaug.parameters.StochasticParameter or tuple, optional
The number of pixels to pad on each side of the image.
Expected value range is ``[0, inf)``.
Either this or the parameter `percent` may be set, not both at the same
time.
* If ``None``, then pixel-based padding will not be used.
* If ``int``, then that exact number of pixels will always be
padded.
* If ``StochasticParameter``, then that parameter will be used for
each image. Four samples will be drawn per image (top, right,
bottom, left), unless `sample_independently` is set to ``False``,
as then only one value will be sampled per image and used for
all sides.
* If a ``tuple`` of two ``int`` s with values ``a`` and ``b``,
then each side will be padded by a random amount sampled
uniformly per image and side from the inteval ``[a, b]``. If
however `sample_independently` is set to ``False``, only one
value will be sampled per image and used for all sides.
* If a ``tuple`` of four entries, then the entries represent top,
right, bottom, left. Each entry may be a single ``int`` (always
pad by exactly that value), a ``tuple`` of two ``int`` s
``a`` and ``b`` (pad by an amount within ``[a, b]``), a
``list`` of ``int`` s (pad by a random value that is
contained in the ``list``) or a ``StochasticParameter`` (sample
the amount to pad from that parameter).
percent : None or int or float or imgaug.parameters.StochasticParameter or tuple, optional
The number of pixels to pad
on each side of the image given as a *fraction* of the image
height/width. E.g. if this is set to ``0.1``, the augmenter will
always pad ``10%`` of the image's height at both the top and the
bottom (both ``10%`` each), as well as ``10%`` of the width at the
right and left.
Expected value range is ``[0.0, inf)``.
Either this or the parameter `px` may be set, not both
at the same time.
* If ``None``, then fraction-based padding will not be
used.
* If ``number``, then that fraction will always be padded.
* If ``StochasticParameter``, then that parameter will be used for
each image. Four samples will be drawn per image (top, right,
bottom, left). If however `sample_independently` is set to
``False``, only one value will be sampled per image and used for
all sides.
* If a ``tuple`` of two ``float`` s with values ``a`` and ``b``,
then each side will be padded by a random fraction
sampled uniformly per image and side from the interval
``[a, b]``. If however `sample_independently` is set to
``False``, only one value will be sampled per image and used for
all sides.
* If a ``tuple`` of four entries, then the entries represent top,
right, bottom, left. Each entry may be a single ``float``
(always pad by exactly that fraction), a ``tuple`` of
two ``float`` s ``a`` and ``b`` (pad by a fraction from
``[a, b]``), a ``list`` of ``float`` s (pad by a random
value that is contained in the list) or a ``StochasticParameter``
(sample the percentage to pad from that parameter).
pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional
Padding mode to use. The available modes match the numpy padding modes,
i.e. ``constant``, ``edge``, ``linear_ramp``, ``maximum``, ``median``,
``minimum``, ``reflect``, ``symmetric``, ``wrap``. The modes
``constant`` and ``linear_ramp`` use extra values, which are provided
by ``pad_cval`` when necessary. See :func:`~imgaug.imgaug.pad` for
more details.
* If ``imgaug.ALL``, then a random mode from all available modes
will be sampled per image.
* If a ``str``, it will be used as the pad mode for all images.
* If a ``list`` of ``str``, a random one of these will be sampled
per image and used as the mode.
* If ``StochasticParameter``, a random mode will be sampled from
this parameter per image.
pad_cval : number or tuple of number list of number or imgaug.parameters.StochasticParameter, optional
The constant value to use if the pad mode is ``constant`` or the end
value to use if the mode is ``linear_ramp``.
See :func:`~imgaug.imgaug.pad` for more details.
* If ``number``, then that value will be used.
* If a ``tuple`` of two ``number`` s and at least one of them is
a ``float``, then a random number will be uniformly sampled per
image from the continuous interval ``[a, b]`` and used as the
value. If both ``number`` s are ``int`` s, the interval is
discrete.
* If a ``list`` of ``number``, then a random value will be chosen
from the elements of the ``list`` and used as the value.
* If ``StochasticParameter``, a random value will be sampled from
that parameter per image.
keep_size : bool, optional
After padding, the result image will usually have a
different height/width compared to the original input image. If this
parameter is set to ``True``, then the padded image will be
resized to the input image's size, i.e. the augmenter's output shape
is always identical to the input shape.
sample_independently : bool, optional
If ``False`` *and* the values for `px`/`percent` result in exactly
*one* probability distribution for all image sides, only one single
value will be sampled from that probability distribution and used for
all sides. I.e. the pad amount then is the same for all sides.
If ``True``, four values will be sampled independently, one per side.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Pad(px=(0, 10))
Pad each side by a random pixel value sampled uniformly per image and
side from the discrete interval ``[0..10]``. The padding happens by
zero-padding, i.e. it adds black pixels (default setting).
>>> aug = iaa.Pad(px=(0, 10), pad_mode="edge")
Pad each side by a random pixel value sampled uniformly per image and
side from the discrete interval ``[0..10]``. The padding uses the
``edge`` mode from numpy's pad function, i.e. the pixel colors around
the image sides are repeated.
>>> aug = iaa.Pad(px=(0, 10), pad_mode=["constant", "edge"])
Similar to the previous example, but uses zero-padding (``constant``) for
half of the images and ``edge`` padding for the other half.
>>> aug = iaa.Pad(px=(0, 10), pad_mode=ia.ALL, pad_cval=(0, 255))
Similar to the previous example, but uses any available padding mode.
In case the padding mode ends up being ``constant`` or ``linear_ramp``,
and random intensity is uniformly sampled (once per image) from the
discrete interval ``[0..255]`` and used as the intensity of the new
pixels.
>>> aug = iaa.Pad(px=(0, 10), sample_independently=False)
Pad each side by a random pixel value sampled uniformly once per image
from the discrete interval ``[0..10]``. Each sampled value is used
for *all* sides of the corresponding image.
>>> aug = iaa.Pad(px=(0, 10), keep_size=False)
Pad each side by a random pixel value sampled uniformly per image and
side from the discrete interval ``[0..10]``. Afterwards, do **not**
resize the padded image back to the input image's size. This will increase
the image's height and width by a maximum of ``20`` pixels.
>>> aug = iaa.Pad(px=((0, 10), (0, 5), (0, 10), (0, 5)))
Pad the top and bottom by a random pixel value sampled uniformly from the
discrete interval ``[0..10]``. Pad the left and right analogously by
a random value sampled from ``[0..5]``. Each value is always sampled
independently.
>>> aug = iaa.Pad(percent=(0, 0.1))
Pad each side by a random fraction sampled uniformly from the continuous
interval ``[0.0, 0.10]``. The fraction is sampled once per image and
side. E.g. a sampled fraction of ``0.1`` for the top side would pad by
``0.1*H``, where ``H`` is the height of the input image.
>>> aug = iaa.Pad(
>>> percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))
Pads each side by either ``5%`` or ``10%``. The values are sampled
once per side and image.
"""
def __init__(self, px=None, percent=None, pad_mode="constant", pad_cval=0,
keep_size=True, sample_independently=True,
seed=None, name=None, **old_kwargs):
def recursive_validate(value):
if value is None:
return value
if ia.is_single_number(value):
assert value >= 0, "Expected value >0, got %.4f" % (value,)
return value
if isinstance(value, iap.StochasticParameter):
return value
if isinstance(value, tuple):
return tuple([recursive_validate(v_) for v_ in value])
if isinstance(value, list):
return [recursive_validate(v_) for v_ in value]
raise Exception(
"Expected None or int or float or StochasticParameter or "
"list or tuple, got %s." % (type(value),))
px = recursive_validate(px)
percent = recursive_validate(percent)
super(Pad, self).__init__(
px=px,
percent=percent,
pad_mode=pad_mode,
pad_cval=pad_cval,
keep_size=keep_size,
sample_independently=sample_independently,
seed=seed, name=name, **old_kwargs)
class Crop(CropAndPad):
"""Crop images, i.e. remove columns/rows of pixels at the sides of images.
This augmenter allows to extract smaller-sized subimages from given
full-sized input images. The number of pixels to cut off may be defined
in absolute values or as fractions of the image sizes.
This augmenter will never crop images below a height or width of ``1``.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.CropAndPad`.
Parameters
----------
px : None or int or imgaug.parameters.StochasticParameter or tuple, optional
The number of pixels to crop on each side of the image.
Expected value range is ``[0, inf)``.
Either this or the parameter `percent` may be set, not both at the same
time.
* If ``None``, then pixel-based cropping will not be used.
* If ``int``, then that exact number of pixels will always be
cropped.
* If ``StochasticParameter``, then that parameter will be used for
each image. Four samples will be drawn per image (top, right,
bottom, left), unless `sample_independently` is set to ``False``,
as then only one value will be sampled per image and used for
all sides.
* If a ``tuple`` of two ``int`` s with values ``a`` and ``b``,
then each side will be cropped by a random amount sampled
uniformly per image and side from the inteval ``[a, b]``. If
however `sample_independently` is set to ``False``, only one
value will be sampled per image and used for all sides.
* If a ``tuple`` of four entries, then the entries represent top,
right, bottom, left. Each entry may be a single ``int`` (always
crop by exactly that value), a ``tuple`` of two ``int`` s
``a`` and ``b`` (crop by an amount within ``[a, b]``), a
``list`` of ``int`` s (crop by a random value that is
contained in the ``list``) or a ``StochasticParameter`` (sample
the amount to crop from that parameter).
percent : None or int or float or imgaug.parameters.StochasticParameter or tuple, optional
The number of pixels to crop
on each side of the image given as a *fraction* of the image
height/width. E.g. if this is set to ``0.1``, the augmenter will
always crop ``10%`` of the image's height at both the top and the
bottom (both ``10%`` each), as well as ``10%`` of the width at the
right and left.
Expected value range is ``[0.0, 1.0)``.
Either this or the parameter `px` may be set, not both
at the same time.
* If ``None``, then fraction-based cropping will not be
used.
* If ``number``, then that fraction will always be cropped.
* If ``StochasticParameter``, then that parameter will be used for
each image. Four samples will be drawn per image (top, right,
bottom, left). If however `sample_independently` is set to
``False``, only one value will be sampled per image and used for
all sides.
* If a ``tuple`` of two ``float`` s with values ``a`` and ``b``,
then each side will be cropped by a random fraction
sampled uniformly per image and side from the interval
``[a, b]``. If however `sample_independently` is set to
``False``, only one value will be sampled per image and used for
all sides.
* If a ``tuple`` of four entries, then the entries represent top,
right, bottom, left. Each entry may be a single ``float``
(always crop by exactly that fraction), a ``tuple`` of
two ``float`` s ``a`` and ``b`` (crop by a fraction from
``[a, b]``), a ``list`` of ``float`` s (crop by a random
value that is contained in the list) or a ``StochasticParameter``
(sample the percentage to crop from that parameter).
keep_size : bool, optional
After cropping, the result image will usually have a
different height/width compared to the original input image. If this
parameter is set to ``True``, then the cropped image will be
resized to the input image's size, i.e. the augmenter's output shape
is always identical to the input shape.
sample_independently : bool, optional
If ``False`` *and* the values for `px`/`percent` result in exactly
*one* probability distribution for all image sides, only one single
value will be sampled from that probability distribution and used for
all sides. I.e. the crop amount then is the same for all sides.
If ``True``, four values will be sampled independently, one per side.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Crop(px=(0, 10))
Crop each side by a random pixel value sampled uniformly per image and
side from the discrete interval ``[0..10]``.
>>> aug = iaa.Crop(px=(0, 10), sample_independently=False)
Crop each side by a random pixel value sampled uniformly once per image
from the discrete interval ``[0..10]``. Each sampled value is used
for *all* sides of the corresponding image.
>>> aug = iaa.Crop(px=(0, 10), keep_size=False)
Crop each side by a random pixel value sampled uniformly per image and
side from the discrete interval ``[0..10]``. Afterwards, do **not**
resize the cropped image back to the input image's size. This will decrease
the image's height and width by a maximum of ``20`` pixels.
>>> aug = iaa.Crop(px=((0, 10), (0, 5), (0, 10), (0, 5)))
Crop the top and bottom by a random pixel value sampled uniformly from the
discrete interval ``[0..10]``. Crop the left and right analogously by
a random value sampled from ``[0..5]``. Each value is always sampled
independently.
>>> aug = iaa.Crop(percent=(0, 0.1))
Crop each side by a random fraction sampled uniformly from the continuous
interval ``[0.0, 0.10]``. The fraction is sampled once per image and
side. E.g. a sampled fraction of ``0.1`` for the top side would crop by
``0.1*H``, where ``H`` is the height of the input image.
>>> aug = iaa.Crop(
>>> percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))
Crops each side by either ``5%`` or ``10%``. The values are sampled
once per side and image.
"""
def __init__(self, px=None, percent=None, keep_size=True,
sample_independently=True,
seed=None, name=None, **old_kwargs):
def recursive_negate(value):
if value is None:
return value
if ia.is_single_number(value):
assert value >= 0, "Expected value >0, got %.4f." % (value,)
return -value
if isinstance(value, iap.StochasticParameter):
return iap.Multiply(value, -1)
if isinstance(value, tuple):
return tuple([recursive_negate(v_) for v_ in value])
if isinstance(value, list):
return [recursive_negate(v_) for v_ in value]
raise Exception(
"Expected None or int or float or StochasticParameter or "
"list or tuple, got %s." % (type(value),))
px = recursive_negate(px)
percent = recursive_negate(percent)
super(Crop, self).__init__(
px=px,
percent=percent,
keep_size=keep_size,
sample_independently=sample_independently,
seed=seed, name=name, **old_kwargs)
# TODO maybe rename this to PadToMinimumSize?
# TODO this is very similar to CropAndPad, maybe add a way to generate crop
# values imagewise via a callback in in CropAndPad?
# TODO why is padding mode and cval here called pad_mode, pad_cval but in other
# cases mode/cval?
class PadToFixedSize(meta.Augmenter):
"""Pad images to a predefined minimum width and/or height.
If images are already at the minimum width/height or are larger, they will
not be padded. Note that this also means that images will not be cropped if
they exceed the required width/height.
The augmenter randomly decides per image how to distribute the required
padding amounts over the image axis. E.g. if 2px have to be padded on the
left or right to reach the required width, the augmenter will sometimes
add 2px to the left and 0px to the right, sometimes add 2px to the right
and 0px to the left and sometimes add 1px to both sides. Set `position`
to ``center`` to prevent that.
Supported dtypes
----------------
See :func:`~imgaug.augmenters.size.pad`.
Parameters
----------
width : int or None
Pad images up to this minimum width.
If ``None``, image widths will not be altered.
height : int or None
Pad images up to this minimum height.
If ``None``, image heights will not be altered.
pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.CropAndPad.__init__`.
pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.CropAndPad.__init__`.
position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional
Sets the center point of the padding, which determines how the
required padding amounts are distributed to each side. For a ``tuple``
``(a, b)``, both ``a`` and ``b`` are expected to be in range
``[0.0, 1.0]`` and describe the fraction of padding applied to the
left/right (low/high values for ``a``) and the fraction of padding
applied to the top/bottom (low/high values for ``b``). A padding
position at ``(0.5, 0.5)`` would be the center of the image and
distribute the padding equally to all sides. A padding position at
``(0.0, 1.0)`` would be the left-bottom and would apply 100% of the
required padding to the bottom and left sides of the image so that
the bottom left corner becomes more and more the new image
center (depending on how much is padded).
* If string ``uniform`` then the share of padding is randomly and
uniformly distributed over each side.
Equivalent to ``(Uniform(0.0, 1.0), Uniform(0.0, 1.0))``.
* If string ``normal`` then the share of padding is distributed
based on a normal distribution, leading to a focus on the
center of the images.
Equivalent to
``(Clip(Normal(0.5, 0.45/2), 0, 1),
Clip(Normal(0.5, 0.45/2), 0, 1))``.
* If string ``center`` then center point of the padding is
identical to the image center.
Equivalent to ``(0.5, 0.5)``.
* If a string matching regex
``^(left|center|right)-(top|center|bottom)$``, e.g. ``left-top``
or ``center-bottom`` then sets the center point of the padding
to the X-Y position matching that description.
* If a tuple of float, then expected to have exactly two entries
between ``0.0`` and ``1.0``, which will always be used as the
combination the position matching (x, y) form.
* If a ``StochasticParameter``, then that parameter will be queried
once per call to ``augment_*()`` to get ``Nx2`` center positions
in ``(x, y)`` form (with ``N`` the number of images).
* If a ``tuple`` of ``StochasticParameter``, then expected to have
exactly two entries that will both be queried per call to
``augment_*()``, each for ``(N,)`` values, to get the center
positions. First parameter is used for ``x`` coordinates,
second for ``y`` coordinates.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.PadToFixedSize(width=100, height=100)
For image sides smaller than ``100`` pixels, pad to ``100`` pixels. Do
nothing for the other edges. The padding is randomly (uniformly)
distributed over the sides, so that e.g. sometimes most of the required
padding is applied to the left, sometimes to the right (analogous
top/bottom).
>>> aug = iaa.PadToFixedSize(width=100, height=100, position="center")
For image sides smaller than ``100`` pixels, pad to ``100`` pixels. Do
nothing for the other image sides. The padding is always equally
distributed over the left/right and top/bottom sides.
>>> aug = iaa.PadToFixedSize(width=100, height=100, pad_mode=ia.ALL)
For image sides smaller than ``100`` pixels, pad to ``100`` pixels and
use any possible padding mode for that. Do nothing for the other image
sides. The padding is always equally distributed over the left/right and
top/bottom sides.
>>> aug = iaa.Sequential([
>>> iaa.PadToFixedSize(width=100, height=100),
>>> iaa.CropToFixedSize(width=100, height=100)
>>> ])
Pad images smaller than ``100x100`` until they reach ``100x100``.
Analogously, crop images larger than ``100x100`` until they reach
``100x100``. The output images therefore have a fixed size of ``100x100``.
"""
def __init__(self, width, height, pad_mode="constant", pad_cval=0,
position="uniform",
seed=None, name=None, **old_kwargs):
super(PadToFixedSize, self).__init__(
seed=seed, name=name, **old_kwargs)
self.size = (width, height)
# Position of where to pad. The further to the top left this is, the
# larger the share of pixels that will be added to the top and left
# sides. I.e. set to (Deterministic(0.0), Deterministic(0.0)) to only
# add at the top and left, (Deterministic(1.0), Deterministic(1.0))
# to only add at the bottom right. Analogously (0.5, 0.5) pads equally
# on both axis, (0.0, 1.0) pads left and bottom, (1.0, 0.0) pads right
# and top.
self.position = _handle_position_parameter(position)
self.pad_mode = _handle_pad_mode_param(pad_mode)
# TODO enable ALL here like in eg Affine
self.pad_cval = iap.handle_discrete_param(
pad_cval, "pad_cval", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
# set these to None to use the same values as sampled for the
# images (not tested)
self._pad_mode_heatmaps = "constant"
self._pad_mode_segmentation_maps = "constant"
self._pad_cval_heatmaps = 0.0
self._pad_cval_segmentation_maps = 0
def _augment_batch_(self, batch, random_state, parents, hooks):
# Providing the whole batch to _draw_samples() would not be necessary
# for this augmenter. The number of rows would be sufficient. This
# formulation however enables derived augmenters to use rowwise shapes
# without having to compute them here for this augmenter.
samples = self._draw_samples(batch, random_state)
if batch.images is not None:
batch.images = self._augment_images_by_samples(batch.images,
samples)
if batch.heatmaps is not None:
batch.heatmaps = self._augment_maps_by_samples(
batch.heatmaps, samples, self._pad_mode_heatmaps,
self._pad_cval_heatmaps)
if batch.segmentation_maps is not None:
batch.segmentation_maps = self._augment_maps_by_samples(
batch.segmentation_maps, samples, self._pad_mode_heatmaps,
self._pad_cval_heatmaps)
for augm_name in ["keypoints", "bounding_boxes", "polygons",
"line_strings"]:
augm_value = getattr(batch, augm_name)
if augm_value is not None:
func = functools.partial(
self._augment_keypoints_by_samples,
samples=samples)
cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)
setattr(batch, augm_name, cbaois)
return batch
def _augment_images_by_samples(self, images, samples):
result = []
sizes, pad_xs, pad_ys, pad_modes, pad_cvals = samples
for i, (image, size) in enumerate(zip(images, sizes)):
width_min, height_min = size
height_image, width_image = image.shape[:2]
paddings = self._calculate_paddings(height_image, width_image,
height_min, width_min,
pad_xs[i], pad_ys[i])
image = _crop_and_pad_arr(
image, (0, 0, 0, 0), paddings, pad_modes[i], pad_cvals[i],
keep_size=False)
result.append(image)
# TODO result is always a list. Should this be converted to an array
# if possible (not guaranteed that all images have same size,
# some might have been larger than desired height/width)
return result
def _augment_keypoints_by_samples(self, keypoints_on_images, samples):
result = []
sizes, pad_xs, pad_ys, _, _ = samples
for i, (kpsoi, size) in enumerate(zip(keypoints_on_images, sizes)):
width_min, height_min = size
height_image, width_image = kpsoi.shape[:2]
paddings_img = self._calculate_paddings(height_image, width_image,
height_min, width_min,
pad_xs[i], pad_ys[i])
keypoints_padded = _crop_and_pad_kpsoi_(
kpsoi, (0, 0, 0, 0), paddings_img,
keep_size=False)
result.append(keypoints_padded)
return result
def _augment_maps_by_samples(self, augmentables, samples, pad_mode,
pad_cval):
sizes, pad_xs, pad_ys, pad_modes, pad_cvals = samples
for i, (augmentable, size) in enumerate(zip(augmentables, sizes)):
width_min, height_min = size
height_img, width_img = augmentable.shape[:2]
paddings_img = self._calculate_paddings(
height_img, width_img, height_min, width_min,
pad_xs[i], pad_ys[i])
# TODO for the previous method (and likely the new/current one
# too):
# for 30x30 padded to 32x32 with 15x15 heatmaps this results
# in paddings of 1 on each side (assuming
# position=(0.5, 0.5)) giving 17x17 heatmaps when they should
# be 16x16. Error is due to each side getting projected 0.5
# padding which is rounded to 1. This doesn't seem right.
augmentables[i] = _crop_and_pad_hms_or_segmaps_(
augmentables[i],
(0, 0, 0, 0),
paddings_img,
pad_mode=pad_mode if pad_mode is not None else pad_modes[i],
pad_cval=pad_cval if pad_cval is not None else pad_cvals[i],
keep_size=False)
return augmentables
def _draw_samples(self, batch, random_state):
nb_images = batch.nb_rows
rngs = random_state.duplicate(4)
if isinstance(self.position, tuple):
pad_xs = self.position[0].draw_samples(nb_images,
random_state=rngs[0])
pad_ys = self.position[1].draw_samples(nb_images,
random_state=rngs[1])
else:
pads = self.position.draw_samples((nb_images, 2),
random_state=rngs[0])
pad_xs = pads[:, 0]
pad_ys = pads[:, 1]
pad_modes = self.pad_mode.draw_samples(nb_images,
random_state=rngs[2])
pad_cvals = self.pad_cval.draw_samples(nb_images,
random_state=rngs[3])
# We return here the sizes even though they are static as it allows
# derived augmenters to define image-specific heights/widths.
return [self.size] * nb_images, pad_xs, pad_ys, pad_modes, pad_cvals
@classmethod
def _calculate_paddings(cls, height_image, width_image,
height_min, width_min, pad_xs_i, pad_ys_i):
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
if width_min is not None and width_image < width_min:
pad_total_x = width_min - width_image
pad_left = int((1-pad_xs_i) * pad_total_x)
pad_right = pad_total_x - pad_left
if height_min is not None and height_image < height_min:
pad_total_y = height_min - height_image
pad_top = int((1-pad_ys_i) * pad_total_y)
pad_bottom = pad_total_y - pad_top
return pad_top, pad_right, pad_bottom, pad_left
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.size[0], self.size[1], self.pad_mode, self.pad_cval,
self.position]
class CenterPadToFixedSize(PadToFixedSize):
"""Pad images equally on all sides up to given minimum heights/widths.
This is an alias for :class:`~imgaug.augmenters.size.PadToFixedSize`
with ``position="center"``. It spreads the pad amounts equally over
all image sides, while :class:`~imgaug.augmenters.size.PadToFixedSize`
by defaults spreads them randomly.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.PadToFixedSize`.
Parameters
----------
width : int or None
See :func:`PadToFixedSize.__init__`.
height : int or None
See :func:`PadToFixedSize.__init__`.
pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional
See :func:`PadToFixedSize.__init__`.
pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See :func:`PadToFixedSize.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CenterPadToFixedSize(height=20, width=30)
Create an augmenter that pads images up to ``20x30``, with the padded
rows added *equally* on the top and bottom (analogous for the padded
columns).
"""
def __init__(self, width, height, pad_mode="constant", pad_cval=0,
seed=None, name=None, **old_kwargs):
super(CenterPadToFixedSize, self).__init__(
width=width, height=height, pad_mode=pad_mode, pad_cval=pad_cval,
position="center",
seed=seed, name=name, **old_kwargs)
# TODO maybe rename this to CropToMaximumSize ?
# TODO this is very similar to CropAndPad, maybe add a way to generate crop
# values imagewise via a callback in in CropAndPad?
# TODO add crop() function in imgaug, similar to pad
class CropToFixedSize(meta.Augmenter):
"""Crop images down to a predefined maximum width and/or height.
If images are already at the maximum width/height or are smaller, they
will not be cropped. Note that this also means that images will not be
padded if they are below the required width/height.
The augmenter randomly decides per image how to distribute the required
cropping amounts over the image axis. E.g. if 2px have to be cropped on
the left or right to reach the required width, the augmenter will
sometimes remove 2px from the left and 0px from the right, sometimes
remove 2px from the right and 0px from the left and sometimes remove 1px
from both sides. Set `position` to ``center`` to prevent that.
Supported dtypes
----------------
* ``uint8``: yes; fully tested
* ``uint16``: yes; tested
* ``uint32``: yes; tested
* ``uint64``: yes; tested
* ``int8``: yes; tested
* ``int16``: yes; tested
* ``int32``: yes; tested
* ``int64``: yes; tested
* ``float16``: yes; tested
* ``float32``: yes; tested
* ``float64``: yes; tested
* ``float128``: yes; tested
* ``bool``: yes; tested
Parameters
----------
width : int or None
Crop images down to this maximum width.
If ``None``, image widths will not be altered.
height : int or None
Crop images down to this maximum height.
If ``None``, image heights will not be altered.
position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional
Sets the center point of the cropping, which determines how the
required cropping amounts are distributed to each side. For a
``tuple`` ``(a, b)``, both ``a`` and ``b`` are expected to be in
range ``[0.0, 1.0]`` and describe the fraction of cropping applied
to the left/right (low/high values for ``a``) and the fraction
of cropping applied to the top/bottom (low/high values for ``b``).
A cropping position at ``(0.5, 0.5)`` would be the center of the
image and distribute the cropping equally over all sides. A cropping
position at ``(1.0, 0.0)`` would be the right-top and would apply
100% of the required cropping to the right and top sides of the image.
* If string ``uniform`` then the share of cropping is randomly
and uniformly distributed over each side.
Equivalent to ``(Uniform(0.0, 1.0), Uniform(0.0, 1.0))``.
* If string ``normal`` then the share of cropping is distributed
based on a normal distribution, leading to a focus on the center
of the images.
Equivalent to
``(Clip(Normal(0.5, 0.45/2), 0, 1),
Clip(Normal(0.5, 0.45/2), 0, 1))``.
* If string ``center`` then center point of the cropping is
identical to the image center.
Equivalent to ``(0.5, 0.5)``.
* If a string matching regex
``^(left|center|right)-(top|center|bottom)$``, e.g.
``left-top`` or ``center-bottom`` then sets the center point of
the cropping to the X-Y position matching that description.
* If a tuple of float, then expected to have exactly two entries
between ``0.0`` and ``1.0``, which will always be used as the
combination the position matching (x, y) form.
* If a ``StochasticParameter``, then that parameter will be queried
once per call to ``augment_*()`` to get ``Nx2`` center positions
in ``(x, y)`` form (with ``N`` the number of images).
* If a ``tuple`` of ``StochasticParameter``, then expected to have
exactly two entries that will both be queried per call to
``augment_*()``, each for ``(N,)`` values, to get the center
positions. First parameter is used for ``x`` coordinates,
second for ``y`` coordinates.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CropToFixedSize(width=100, height=100)
For image sides larger than ``100`` pixels, crop to ``100`` pixels. Do
nothing for the other sides. The cropping amounts are randomly (and
uniformly) distributed over the sides of the image.
>>> aug = iaa.CropToFixedSize(width=100, height=100, position="center")
For sides larger than ``100`` pixels, crop to ``100`` pixels. Do nothing
for the other sides. The cropping amounts are always equally distributed
over the left/right sides of the image (and analogously for top/bottom).
>>> aug = iaa.Sequential([
>>> iaa.PadToFixedSize(width=100, height=100),
>>> iaa.CropToFixedSize(width=100, height=100)
>>> ])
Pad images smaller than ``100x100`` until they reach ``100x100``.
Analogously, crop images larger than ``100x100`` until they reach
``100x100``. The output images therefore have a fixed size of ``100x100``.
"""
def __init__(self, width, height, position="uniform",
seed=None, name=None, **old_kwargs):
super(CropToFixedSize, self).__init__(
seed=seed, name=name, **old_kwargs)
self.size = (width, height)
# Position of where to crop. The further to the top left this is,
# the larger the share of pixels that will be cropped from the top
# and left sides. I.e. set to (Deterministic(0.0), Deterministic(0.0))
# to only crop at the top and left,
# (Deterministic(1.0), Deterministic(1.0)) to only crop at the bottom
# right. Analogously (0.5, 0.5) crops equally on both axis,
# (0.0, 1.0) crops left and bottom, (1.0, 0.0) crops right and top.
self.position = _handle_position_parameter(position)
def _augment_batch_(self, batch, random_state, parents, hooks):
# Providing the whole batch to _draw_samples() would not be necessary
# for this augmenter. The number of rows would be sufficient. This
# formulation however enables derived augmenters to use rowwise shapes
# without having to compute them here for this augmenter.
samples = self._draw_samples(batch, random_state)
if batch.images is not None:
batch.images = self._augment_images_by_samples(batch.images,
samples)
if batch.heatmaps is not None:
batch.heatmaps = self._augment_maps_by_samples(
batch.heatmaps, samples)
if batch.segmentation_maps is not None:
batch.segmentation_maps = self._augment_maps_by_samples(
batch.segmentation_maps, samples)
for augm_name in ["keypoints", "bounding_boxes", "polygons",
"line_strings"]:
augm_value = getattr(batch, augm_name)
if augm_value is not None:
func = functools.partial(
self._augment_keypoints_by_samples,
samples=samples)
cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)
setattr(batch, augm_name, cbaois)
return batch
def _augment_images_by_samples(self, images, samples):
result = []
sizes, offset_xs, offset_ys = samples
for i, (image, size) in enumerate(zip(images, sizes)):
w, h = size
height_image, width_image = image.shape[0:2]
croppings = self._calculate_crop_amounts(
height_image, width_image, h, w, offset_ys[i], offset_xs[i])
image_cropped = _crop_and_pad_arr(image, croppings, (0, 0, 0, 0),
keep_size=False)
result.append(image_cropped)
return result
def _augment_keypoints_by_samples(self, kpsois, samples):
result = []
sizes, offset_xs, offset_ys = samples
for i, (kpsoi, size) in enumerate(zip(kpsois, sizes)):
w, h = size
height_image, width_image = kpsoi.shape[0:2]
croppings_img = self._calculate_crop_amounts(
height_image, width_image, h, w, offset_ys[i], offset_xs[i])
kpsoi_cropped = _crop_and_pad_kpsoi_(
kpsoi, croppings_img, (0, 0, 0, 0), keep_size=False)
result.append(kpsoi_cropped)
return result
def _augment_maps_by_samples(self, augmentables, samples):
sizes, offset_xs, offset_ys = samples
for i, (augmentable, size) in enumerate(zip(augmentables, sizes)):
w, h = size
height_image, width_image = augmentable.shape[0:2]
croppings_img = self._calculate_crop_amounts(
height_image, width_image, h, w, offset_ys[i], offset_xs[i])
augmentables[i] = _crop_and_pad_hms_or_segmaps_(
augmentable, croppings_img, (0, 0, 0, 0), keep_size=False)
return augmentables
@classmethod
def _calculate_crop_amounts(cls, height_image, width_image,
height_max, width_max,
offset_y, offset_x):
crop_top = 0
crop_right = 0
crop_bottom = 0
crop_left = 0
if height_max is not None and height_image > height_max:
crop_top = int(offset_y * (height_image - height_max))
crop_bottom = height_image - height_max - crop_top
if width_max is not None and width_image > width_max:
crop_left = int(offset_x * (width_image - width_max))
crop_right = width_image - width_max - crop_left
return crop_top, crop_right, crop_bottom, crop_left
def _draw_samples(self, batch, random_state):
nb_images = batch.nb_rows
rngs = random_state.duplicate(2)
if isinstance(self.position, tuple):
offset_xs = self.position[0].draw_samples(nb_images,
random_state=rngs[0])
offset_ys = self.position[1].draw_samples(nb_images,
random_state=rngs[1])
else:
offsets = self.position.draw_samples((nb_images, 2),
random_state=rngs[0])
offset_xs = offsets[:, 0]
offset_ys = offsets[:, 1]
offset_xs = 1.0 - offset_xs
offset_ys = 1.0 - offset_ys
# We return here the sizes even though they are static as it allows
# derived augmenters to define image-specific heights/widths.
return [self.size] * nb_images, offset_xs, offset_ys
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.size[0], self.size[1], self.position]
class CenterCropToFixedSize(CropToFixedSize):
"""Take a crop from the center of each image.
This is an alias for :class:`~imgaug.augmenters.size.CropToFixedSize` with
``position="center"``.
.. note::
If images already have a width and/or height below the provided
width and/or height then this augmenter will do nothing for the
respective axis. Hence, resulting images can be smaller than the
provided axis sizes.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.CropToFixedSize`.
Parameters
----------
width : int or None
See :func:`CropToFixedSize.__init__`.
height : int or None
See :func:`CropToFixedSize.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> crop = iaa.CenterCropToFixedSize(height=20, width=10)
Create an augmenter that takes ``20x10`` sized crops from the center of
images.
"""
def __init__(self, width, height,
seed=None, name=None, **old_kwargs):
super(CenterCropToFixedSize, self).__init__(
width=width, height=height, position="center",
seed=seed, name=name, **old_kwargs)
class CropToMultiplesOf(CropToFixedSize):
"""Crop images down until their height/width is a multiple of a value.
.. note::
For a given axis size ``A`` and multiple ``M``, if ``A`` is in the
interval ``[0 .. M]``, the axis will not be changed.
As a result, this augmenter can still produce axis sizes that are
not multiples of the given values.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.CropToFixedSize`.
Parameters
----------
width_multiple : int or None
Multiple for the width. Images will be cropped down until their
width is a multiple of this value.
If ``None``, image widths will not be altered.
height_multiple : int or None
Multiple for the height. Images will be cropped down until their
height is a multiple of this value.
If ``None``, image heights will not be altered.
position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional
See :func:`CropToFixedSize.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CropToMultiplesOf(height_multiple=10, width_multiple=6)
Create an augmenter that crops images to multiples of ``10`` along
the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the
x-axis (i.e. 6, 12, 18, ...).
The rows to be cropped will be spread *randomly* over the top and bottom
sides (analogous for the left/right sides).
"""
def __init__(self, width_multiple, height_multiple, position="uniform",
seed=None, name=None, **old_kwargs):
super(CropToMultiplesOf, self).__init__(
width=None, height=None, position=position,
seed=seed, name=name, **old_kwargs)
self.width_multiple = width_multiple
self.height_multiple = height_multiple
def _draw_samples(self, batch, random_state):
_sizes, offset_xs, offset_ys = super(
CropToMultiplesOf, self
)._draw_samples(batch, random_state)
shapes = batch.get_rowwise_shapes()
sizes = []
for shape in shapes:
height, width = shape[0:2]
croppings = compute_croppings_to_reach_multiples_of(
shape,
height_multiple=self.height_multiple,
width_multiple=self.width_multiple)
# TODO change that
# note that these are not in the same order as shape tuples
# in CropToFixedSize
new_size = (
width - croppings[1] - croppings[3],
height - croppings[0] - croppings[2]
)
sizes.append(new_size)
return sizes, offset_xs, offset_ys
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.width_multiple, self.height_multiple, self.position]
class CenterCropToMultiplesOf(CropToMultiplesOf):
"""Crop images equally on all sides until H/W are multiples of given values.
This is the same as :class:`~imgaug.augmenters.size.CropToMultiplesOf`,
but uses ``position="center"`` by default, which spreads the crop amounts
equally over all image sides, while
:class:`~imgaug.augmenters.size.CropToMultiplesOf` by default spreads
them randomly.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.CropToFixedSize`.
Parameters
----------
width_multiple : int or None
See :func:`CropToMultiplesOf.__init__`.
height_multiple : int or None
See :func:`CropToMultiplesOf.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CenterCropToMultiplesOf(height_multiple=10, width_multiple=6)
Create an augmenter that crops images to multiples of ``10`` along
the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the
x-axis (i.e. 6, 12, 18, ...).
The rows to be cropped will be spread *equally* over the top and bottom
sides (analogous for the left/right sides).
"""
def __init__(self, width_multiple, height_multiple,
seed=None, name=None, **old_kwargs):
super(CenterCropToMultiplesOf, self).__init__(
width_multiple=width_multiple,
height_multiple=height_multiple,
position="center",
seed=seed, name=name, **old_kwargs)
class PadToMultiplesOf(PadToFixedSize):
"""Pad images until their height/width is a multiple of a value.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.PadToFixedSize`.
Parameters
----------
width_multiple : int or None
Multiple for the width. Images will be padded until their
width is a multiple of this value.
If ``None``, image widths will not be altered.
height_multiple : int or None
Multiple for the height. Images will be padded until their
height is a multiple of this value.
If ``None``, image heights will not be altered.
pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.
pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.
position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional
See :func:`PadToFixedSize.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.PadToMultiplesOf(height_multiple=10, width_multiple=6)
Create an augmenter that pads images to multiples of ``10`` along
the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the
x-axis (i.e. 6, 12, 18, ...).
The rows to be padded will be spread *randomly* over the top and bottom
sides (analogous for the left/right sides).
"""
def __init__(self, width_multiple, height_multiple,
pad_mode="constant", pad_cval=0,
position="uniform",
seed=None, name=None, **old_kwargs):
super(PadToMultiplesOf, self).__init__(
width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval,
position=position,
seed=seed, name=name, **old_kwargs)
self.width_multiple = width_multiple
self.height_multiple = height_multiple
def _draw_samples(self, batch, random_state):
_sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super(
PadToMultiplesOf, self
)._draw_samples(batch, random_state)
shapes = batch.get_rowwise_shapes()
sizes = []
for shape in shapes:
height, width = shape[0:2]
paddings = compute_paddings_to_reach_multiples_of(
shape,
height_multiple=self.height_multiple,
width_multiple=self.width_multiple)
# TODO change that
# note that these are not in the same order as shape tuples
# in PadToFixedSize
new_size = (
width + paddings[1] + paddings[3],
height + paddings[0] + paddings[2]
)
sizes.append(new_size)
return sizes, pad_xs, pad_ys, pad_modes, pad_cvals
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.width_multiple, self.height_multiple,
self.pad_mode, self.pad_cval,
self.position]
class CenterPadToMultiplesOf(PadToMultiplesOf):
"""Pad images equally on all sides until H/W are multiples of given values.
This is the same as :class:`~imgaug.augmenters.size.PadToMultiplesOf`, but
uses ``position="center"`` by default, which spreads the pad amounts
equally over all image sides, while
:class:`~imgaug.augmenters.size.PadToMultiplesOf` by default spreads them
randomly.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.PadToFixedSize`.
Parameters
----------
width_multiple : int or None
See :func:`PadToMultiplesOf.__init__`.
height_multiple : int or None
See :func:`PadToMultiplesOf.__init__`.
pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToMultiplesOf.__init__`.
pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToMultiplesOf.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CenterPadToMultiplesOf(height_multiple=10, width_multiple=6)
Create an augmenter that pads images to multiples of ``10`` along
the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the
x-axis (i.e. 6, 12, 18, ...).
The rows to be padded will be spread *equally* over the top and bottom
sides (analogous for the left/right sides).
"""
def __init__(self, width_multiple, height_multiple,
pad_mode="constant", pad_cval=0,
seed=None, name=None, **old_kwargs):
super(CenterPadToMultiplesOf, self).__init__(
width_multiple=width_multiple,
height_multiple=height_multiple,
pad_mode=pad_mode,
pad_cval=pad_cval,
position="center",
seed=seed, name=name, **old_kwargs)
class CropToPowersOf(CropToFixedSize):
"""Crop images until their height/width is a power of a base.
This augmenter removes pixels from an axis with size ``S`` leading to the
new size ``S'`` until ``S' = B^E`` is fulfilled, where ``B`` is a
provided base (e.g. ``2``) and ``E`` is an exponent from the discrete
interval ``[1 .. inf)``.
.. note::
This augmenter does nothing for axes with size less than ``B^1 = B``.
If you have images with ``S < B^1``, it is recommended
to combine this augmenter with a padding augmenter that pads each
axis up to ``B``.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.CropToFixedSize`.
Parameters
----------
width_base : int or None
Base for the width. Images will be cropped down until their
width fulfills ``width' = width_base ^ E`` with ``E`` being any
natural number.
If ``None``, image widths will not be altered.
height_base : int or None
Base for the height. Images will be cropped down until their
height fulfills ``height' = height_base ^ E`` with ``E`` being any
natural number.
If ``None``, image heights will not be altered.
position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional
See :func:`CropToFixedSize.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CropToPowersOf(height_base=3, width_base=2)
Create an augmenter that crops each image down to powers of ``3`` along
the y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e.
2, 4, 8, 16, ...).
The rows to be cropped will be spread *randomly* over the top and bottom
sides (analogous for the left/right sides).
"""
def __init__(self, width_base, height_base, position="uniform",
seed=None, name=None, **old_kwargs):
super(CropToPowersOf, self).__init__(
width=None, height=None, position=position,
seed=seed, name=name, **old_kwargs)
self.width_base = width_base
self.height_base = height_base
def _draw_samples(self, batch, random_state):
_sizes, offset_xs, offset_ys = super(
CropToPowersOf, self
)._draw_samples(batch, random_state)
shapes = batch.get_rowwise_shapes()
sizes = []
for shape in shapes:
height, width = shape[0:2]
croppings = compute_croppings_to_reach_powers_of(
shape,
height_base=self.height_base,
width_base=self.width_base)
# TODO change that
# note that these are not in the same order as shape tuples
# in CropToFixedSize
new_size = (
width - croppings[1] - croppings[3],
height - croppings[0] - croppings[2]
)
sizes.append(new_size)
return sizes, offset_xs, offset_ys
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.width_base, self.height_base, self.position]
class CenterCropToPowersOf(CropToPowersOf):
"""Crop images equally on all sides until H/W is a power of a base.
This is the same as :class:`~imgaug.augmenters.size.CropToPowersOf`, but
uses ``position="center"`` by default, which spreads the crop amounts
equally over all image sides, while
:class:`~imgaug.augmenters.size.CropToPowersOf` by default spreads them
randomly.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.CropToFixedSize`.
Parameters
----------
width_base : int or None
See :func:`CropToPowersOf.__init__`.
height_base : int or None
See :func:`CropToPowersOf.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CropToPowersOf(height_base=3, width_base=2)
Create an augmenter that crops each image down to powers of ``3`` along
the y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e.
2, 4, 8, 16, ...).
The rows to be cropped will be spread *equally* over the top and bottom
sides (analogous for the left/right sides).
"""
def __init__(self, width_base, height_base,
seed=None, name=None, **old_kwargs):
super(CenterCropToPowersOf, self).__init__(
width_base=width_base, height_base=height_base, position="center",
seed=seed, name=name, **old_kwargs)
class PadToPowersOf(PadToFixedSize):
"""Pad images until their height/width is a power of a base.
This augmenter adds pixels to an axis with size ``S`` leading to the
new size ``S'`` until ``S' = B^E`` is fulfilled, where ``B`` is a
provided base (e.g. ``2``) and ``E`` is an exponent from the discrete
interval ``[1 .. inf)``.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.PadToFixedSize`.
Parameters
----------
width_base : int or None
Base for the width. Images will be padded down until their
width fulfills ``width' = width_base ^ E`` with ``E`` being any
natural number.
If ``None``, image widths will not be altered.
height_base : int or None
Base for the height. Images will be padded until their
height fulfills ``height' = height_base ^ E`` with ``E`` being any
natural number.
If ``None``, image heights will not be altered.
pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.
pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.
position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional
See :func:`PadToFixedSize.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.PadToPowersOf(height_base=3, width_base=2)
Create an augmenter that pads each image to powers of ``3`` along the
y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e. 2,
4, 8, 16, ...).
The rows to be padded will be spread *randomly* over the top and bottom
sides (analogous for the left/right sides).
"""
def __init__(self, width_base, height_base,
pad_mode="constant", pad_cval=0,
position="uniform",
seed=None, name=None, **old_kwargs):
super(PadToPowersOf, self).__init__(
width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval,
position=position,
seed=seed, name=name, **old_kwargs)
self.width_base = width_base
self.height_base = height_base
def _draw_samples(self, batch, random_state):
_sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super(
PadToPowersOf, self
)._draw_samples(batch, random_state)
shapes = batch.get_rowwise_shapes()
sizes = []
for shape in shapes:
height, width = shape[0:2]
paddings = compute_paddings_to_reach_powers_of(
shape,
height_base=self.height_base,
width_base=self.width_base)
# TODO change that
# note that these are not in the same order as shape tuples
# in PadToFixedSize
new_size = (
width + paddings[1] + paddings[3],
height + paddings[0] + paddings[2]
)
sizes.append(new_size)
return sizes, pad_xs, pad_ys, pad_modes, pad_cvals
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.width_base, self.height_base,
self.pad_mode, self.pad_cval,
self.position]
class CenterPadToPowersOf(PadToPowersOf):
"""Pad images equally on all sides until H/W is a power of a base.
This is the same as :class:`~imgaug.augmenters.size.PadToPowersOf`, but uses
``position="center"`` by default, which spreads the pad amounts equally
over all image sides, while :class:`~imgaug.augmenters.size.PadToPowersOf`
by default spreads them randomly.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.PadToFixedSize`.
Parameters
----------
width_base : int or None
See :func:`PadToPowersOf.__init__`.
height_base : int or None
See :func:`PadToPowersOf.__init__`.
pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToPowersOf.__init__`.
pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToPowersOf.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CenterPadToPowersOf(height_base=5, width_base=2)
Create an augmenter that pads each image to powers of ``3`` along the
y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e. 2,
4, 8, 16, ...).
The rows to be padded will be spread *equally* over the top and bottom
sides (analogous for the left/right sides).
"""
def __init__(self, width_base, height_base,
pad_mode="constant", pad_cval=0,
seed=None, name=None, **old_kwargs):
super(CenterPadToPowersOf, self).__init__(
width_base=width_base, height_base=height_base,
pad_mode=pad_mode, pad_cval=pad_cval,
position="center",
seed=seed, name=name, **old_kwargs)
class CropToAspectRatio(CropToFixedSize):
"""Crop images until their width/height matches an aspect ratio.
This augmenter removes either rows or columns until the image reaches
the desired aspect ratio given in ``width / height``. The cropping
operation is stopped once the desired aspect ratio is reached or the image
side to crop reaches a size of ``1``. If any side of the image starts
with a size of ``0``, the image will not be changed.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.CropToFixedSize`.
Parameters
----------
aspect_ratio : number
The desired aspect ratio, given as ``width/height``. E.g. a ratio
of ``2.0`` denotes an image that is twice as wide as it is high.
position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional
See :func:`CropToFixedSize.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CropToAspectRatio(2.0)
Create an augmenter that crops each image until its aspect ratio is as
close as possible to ``2.0`` (i.e. two times as many pixels along the
x-axis than the y-axis).
The rows to be cropped will be spread *randomly* over the top and bottom
sides (analogous for the left/right sides).
"""
def __init__(self, aspect_ratio, position="uniform",
seed=None, name=None, **old_kwargs):
super(CropToAspectRatio, self).__init__(
width=None, height=None, position=position,
seed=seed, name=name, **old_kwargs)
self.aspect_ratio = aspect_ratio
def _draw_samples(self, batch, random_state):
_sizes, offset_xs, offset_ys = super(
CropToAspectRatio, self
)._draw_samples(batch, random_state)
shapes = batch.get_rowwise_shapes()
sizes = []
for shape in shapes:
height, width = shape[0:2]
if height == 0 or width == 0:
croppings = (0, 0, 0, 0)
else:
croppings = compute_croppings_to_reach_aspect_ratio(
shape,
aspect_ratio=self.aspect_ratio)
# TODO change that
# note that these are not in the same order as shape tuples
# in CropToFixedSize
new_size = (
width - croppings[1] - croppings[3],
height - croppings[0] - croppings[2]
)
sizes.append(new_size)
return sizes, offset_xs, offset_ys
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.aspect_ratio, self.position]
class CenterCropToAspectRatio(CropToAspectRatio):
"""Crop images equally on all sides until they reach an aspect ratio.
This is the same as :class:`~imgaug.augmenters.size.CropToAspectRatio`, but
uses ``position="center"`` by default, which spreads the crop amounts
equally over all image sides, while
:class:`~imgaug.augmenters.size.CropToAspectRatio` by default spreads
them randomly.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.CropToFixedSize`.
Parameters
----------
aspect_ratio : number
See :func:`CropToAspectRatio.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CenterCropToAspectRatio(2.0)
Create an augmenter that crops each image until its aspect ratio is as
close as possible to ``2.0`` (i.e. two times as many pixels along the
x-axis than the y-axis).
The rows to be cropped will be spread *equally* over the top and bottom
sides (analogous for the left/right sides).
"""
def __init__(self, aspect_ratio,
seed=None, name=None, **old_kwargs):
super(CenterCropToAspectRatio, self).__init__(
aspect_ratio=aspect_ratio, position="center",
seed=seed, name=name, **old_kwargs)
class PadToAspectRatio(PadToFixedSize):
"""Pad images until their width/height matches an aspect ratio.
This augmenter adds either rows or columns until the image reaches
the desired aspect ratio given in ``width / height``.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.PadToFixedSize`.
Parameters
----------
aspect_ratio : number
The desired aspect ratio, given as ``width/height``. E.g. a ratio
of ``2.0`` denotes an image that is twice as wide as it is high.
position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional
See :func:`PadToFixedSize.__init__`.
pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.
pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.PadToAspectRatio(2.0)
Create an augmenter that pads each image until its aspect ratio is as
close as possible to ``2.0`` (i.e. two times as many pixels along the
x-axis than the y-axis).
The rows to be padded will be spread *randomly* over the top and bottom
sides (analogous for the left/right sides).
"""
def __init__(self, aspect_ratio, pad_mode="constant", pad_cval=0,
position="uniform",
seed=None, name=None, **old_kwargs):
super(PadToAspectRatio, self).__init__(
width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval,
position=position,
seed=seed, name=name, **old_kwargs)
self.aspect_ratio = aspect_ratio
def _draw_samples(self, batch, random_state):
_sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super(
PadToAspectRatio, self
)._draw_samples(batch, random_state)
shapes = batch.get_rowwise_shapes()
sizes = []
for shape in shapes:
height, width = shape[0:2]
paddings = compute_paddings_to_reach_aspect_ratio(
shape,
aspect_ratio=self.aspect_ratio)
# TODO change that
# note that these are not in the same order as shape tuples
# in PadToFixedSize
new_size = (
width + paddings[1] + paddings[3],
height + paddings[0] + paddings[2]
)
sizes.append(new_size)
return sizes, pad_xs, pad_ys, pad_modes, pad_cvals
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.aspect_ratio, self.pad_mode, self.pad_cval,
self.position]
class CenterPadToAspectRatio(PadToAspectRatio):
"""Pad images equally on all sides until H/W matches an aspect ratio.
This is the same as :class:`~imgaug.augmenters.size.PadToAspectRatio`, but
uses ``position="center"`` by default, which spreads the pad amounts
equally over all image sides, while
:class:`~imgaug.augmenters.size.PadToAspectRatio` by default spreads them
randomly.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.PadToFixedSize`.
Parameters
----------
aspect_ratio : number
See :func:`PadToAspectRatio.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`.
pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`.
deterministic : bool, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.PadToAspectRatio(2.0)
Create am augmenter that pads each image until its aspect ratio is as
close as possible to ``2.0`` (i.e. two times as many pixels along the
x-axis than the y-axis).
The rows to be padded will be spread *equally* over the top and bottom
sides (analogous for the left/right sides).
"""
def __init__(self, aspect_ratio, pad_mode="constant", pad_cval=0,
seed=None, name=None, **old_kwargs):
super(CenterPadToAspectRatio, self).__init__(
aspect_ratio=aspect_ratio, position="center",
pad_mode=pad_mode, pad_cval=pad_cval,
seed=seed, name=name, **old_kwargs)
class CropToSquare(CropToAspectRatio):
"""Crop images until their width and height are identical.
This is identical to :class:`~imgaug.augmenters.size.CropToAspectRatio`
with ``aspect_ratio=1.0``.
Images with axis sizes of ``0`` will not be altered.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.CropToFixedSize`.
Parameters
----------
position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional
See :func:`CropToFixedSize.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CropToSquare()
Create an augmenter that crops each image until its square, i.e. height
and width match.
The rows to be cropped will be spread *randomly* over the top and bottom
sides (analogous for the left/right sides).
"""
def __init__(self, position="uniform",
seed=None, name=None, **old_kwargs):
super(CropToSquare, self).__init__(
aspect_ratio=1.0, position=position,
seed=seed, name=name, **old_kwargs)
class CenterCropToSquare(CropToSquare):
"""Crop images equally on all sides until their height/width are identical.
In contrast to :class:`~imgaug.augmenters.size.CropToSquare`, this
augmenter always tries to spread the columns/rows to remove equally over
both sides of the respective axis to be cropped.
:class:`~imgaug.augmenters.size.CropToAspectRatio` by default spreads the
croppings randomly.
This augmenter is identical to :class:`~imgaug.augmenters.size.CropToSquare`
with ``position="center"``, and thereby the same as
:class:`~imgaug.augmenters.size.CropToAspectRatio` with
``aspect_ratio=1.0, position="center"``.
Images with axis sizes of ``0`` will not be altered.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.CropToFixedSize`.
Parameters
----------
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CenterCropToSquare()
Create an augmenter that crops each image until its square, i.e. height
and width match.
The rows to be cropped will be spread *equally* over the top and bottom
sides (analogous for the left/right sides).
"""
def __init__(self, seed=None, name=None, **old_kwargs):
super(CenterCropToSquare, self).__init__(
position="center",
seed=seed, name=name, **old_kwargs)
class PadToSquare(PadToAspectRatio):
"""Pad images until their height and width are identical.
This augmenter is identical to
:class:`~imgaug.augmenters.size.PadToAspectRatio` with ``aspect_ratio=1.0``.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.PadToFixedSize`.
Parameters
----------
position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional
See :func:`PadToFixedSize.__init__`.
pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.
pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.PadToSquare()
Create an augmenter that pads each image until its square, i.e. height
and width match.
The rows to be padded will be spread *randomly* over the top and bottom
sides (analogous for the left/right sides).
"""
def __init__(self, pad_mode="constant", pad_cval=0, position="uniform",
seed=None, name=None, **old_kwargs):
super(PadToSquare, self).__init__(
aspect_ratio=1.0, pad_mode=pad_mode, pad_cval=pad_cval,
position=position,
seed=seed, name=name, **old_kwargs)
class CenterPadToSquare(PadToSquare):
"""Pad images equally on all sides until their height & width are identical.
This is the same as :class:`~imgaug.augmenters.size.PadToSquare`, but uses
``position="center"`` by default, which spreads the pad amounts equally
over all image sides, while :class:`~imgaug.augmenters.size.PadToSquare`
by default spreads them randomly. This augmenter is thus also identical to
:class:`~imgaug.augmenters.size.PadToAspectRatio` with
``aspect_ratio=1.0, position="center"``.
Supported dtypes
----------------
See :class:`~imgaug.augmenters.size.PadToFixedSize`.
Parameters
----------
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`.
pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`.
deterministic : bool, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CenterPadToSquare()
Create an augmenter that pads each image until its square, i.e. height
and width match.
The rows to be padded will be spread *equally* over the top and bottom
sides (analogous for the left/right sides).
"""
def __init__(self, pad_mode="constant", pad_cval=0,
seed=None, name=None, **old_kwargs):
super(CenterPadToSquare, self).__init__(
pad_mode=pad_mode, pad_cval=pad_cval, position="center",
seed=seed, name=name, **old_kwargs)
class KeepSizeByResize(meta.Augmenter):
"""Resize images back to their input sizes after applying child augmenters.
Combining this with e.g. a cropping augmenter as the child will lead to
images being resized back to the input size after the crop operation was
applied. Some augmenters have a ``keep_size`` argument that achieves the
same goal (if set to ``True``), though this augmenter offers control over
the interpolation mode and which augmentables to resize (images, heatmaps,
segmentation maps).
Supported dtypes
----------------
See :func:`~imgaug.imgaug.imresize_many_images`.
Parameters
----------
children : Augmenter or list of imgaug.augmenters.meta.Augmenter or None, optional
One or more augmenters to apply to images. These augmenters may change
the image size.
interpolation : KeepSizeByResize.NO_RESIZE or {'nearest', 'linear', 'area', 'cubic'} or {cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC} or list of str or list of int or StochasticParameter, optional
The interpolation mode to use when resizing images.
Can take any value that :func:`~imgaug.imgaug.imresize_single_image`
accepts, e.g. ``cubic``.
* If this is ``KeepSizeByResize.NO_RESIZE`` then images will not
be resized.
* If this is a single ``str``, it is expected to have one of the
following values: ``nearest``, ``linear``, ``area``, ``cubic``.
* If this is a single integer, it is expected to have a value
identical to one of: ``cv2.INTER_NEAREST``,
``cv2.INTER_LINEAR``, ``cv2.INTER_AREA``, ``cv2.INTER_CUBIC``.
* If this is a ``list`` of ``str`` or ``int``, it is expected that
each ``str``/``int`` is one of the above mentioned valid ones.
A random one of these values will be sampled per image.
* If this is a ``StochasticParameter``, it will be queried once per
call to ``_augment_images()`` and must return ``N`` ``str`` s or
``int`` s (matching the above mentioned ones) for ``N`` images.
interpolation_heatmaps : KeepSizeByResize.SAME_AS_IMAGES or KeepSizeByResize.NO_RESIZE or {'nearest', 'linear', 'area', 'cubic'} or {cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC} or list of str or list of int or StochasticParameter, optional
The interpolation mode to use when resizing heatmaps.
Meaning and valid values are similar to `interpolation`. This
parameter may also take the value ``KeepSizeByResize.SAME_AS_IMAGES``,
which will lead to copying the interpolation modes used for the
corresponding images. The value may also be returned on a per-image
basis if `interpolation_heatmaps` is provided as a
``StochasticParameter`` or may be one possible value if it is
provided as a ``list`` of ``str``.
interpolation_segmaps : KeepSizeByResize.SAME_AS_IMAGES or KeepSizeByResize.NO_RESIZE or {'nearest', 'linear', 'area', 'cubic'} or {cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC} or list of str or list of int or StochasticParameter, optional
The interpolation mode to use when resizing segmentation maps.
Similar to `interpolation_heatmaps`.
**Note**: For segmentation maps, only ``NO_RESIZE`` or nearest
neighbour interpolation (i.e. ``nearest``) make sense in the vast
majority of all cases.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
**old_kwargs
Outdated parameters. Avoid using these.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.KeepSizeByResize(
>>> iaa.Crop((20, 40), keep_size=False)
>>> )
Apply random cropping to input images, then resize them back to their
original input sizes. The resizing is done using this augmenter instead
of the corresponding internal resizing operation in ``Crop``.
>>> aug = iaa.KeepSizeByResize(
>>> iaa.Crop((20, 40), keep_size=False),
>>> interpolation="nearest"
>>> )
Same as in the previous example, but images are now always resized using
nearest neighbour interpolation.
>>> aug = iaa.KeepSizeByResize(
>>> iaa.Crop((20, 40), keep_size=False),
>>> interpolation=["nearest", "cubic"],
>>> interpolation_heatmaps=iaa.KeepSizeByResize.SAME_AS_IMAGES,
>>> interpolation_segmaps=iaa.KeepSizeByResize.NO_RESIZE
>>> )
Similar to the previous example, but images are now sometimes resized
using linear interpolation and sometimes using nearest neighbour
interpolation. Heatmaps are resized using the same interpolation as was
used for the corresponding image. Segmentation maps are not resized and
will therefore remain at their size after cropping.
"""
NO_RESIZE = "NO_RESIZE"
SAME_AS_IMAGES = "SAME_AS_IMAGES"
def __init__(self, children,
interpolation="cubic",
interpolation_heatmaps=SAME_AS_IMAGES,
interpolation_segmaps="nearest",
seed=None, name=None, **old_kwargs):
super(KeepSizeByResize, self).__init__(
seed=seed, name=name, **old_kwargs)
self.children = children
def _validate_param(val, allow_same_as_images):
valid_ips_and_resize = ia.IMRESIZE_VALID_INTERPOLATIONS \
+ [KeepSizeByResize.NO_RESIZE]
if allow_same_as_images and val == self.SAME_AS_IMAGES:
return self.SAME_AS_IMAGES
if val in valid_ips_and_resize:
return iap.Deterministic(val)
if isinstance(val, list):
assert len(val) > 0, (
"Expected a list of at least one interpolation method. "
"Got an empty list.")
valid_ips_here = valid_ips_and_resize
if allow_same_as_images:
valid_ips_here = valid_ips_here \
+ [KeepSizeByResize.SAME_AS_IMAGES]
only_valid_ips = all([ip in valid_ips_here for ip in val])
assert only_valid_ips, (
"Expected each interpolations to be one of '%s', got "
"'%s'." % (str(valid_ips_here), str(val)))
return iap.Choice(val)
if isinstance(val, iap.StochasticParameter):
return val
raise Exception(
"Expected interpolation to be one of '%s' or a list of "
"these values or a StochasticParameter. Got type %s." % (
str(ia.IMRESIZE_VALID_INTERPOLATIONS), type(val)))
self.children = meta.handle_children_list(children, self.name, "then")
self.interpolation = _validate_param(interpolation, False)
self.interpolation_heatmaps = _validate_param(interpolation_heatmaps,
True)
self.interpolation_segmaps = _validate_param(interpolation_segmaps,
True)
def _augment_batch_(self, batch, random_state, parents, hooks):
with batch.propagation_hooks_ctx(self, hooks, parents):
images_were_array = None
if batch.images is not None:
images_were_array = ia.is_np_array(batch.images)
shapes_orig = self._get_shapes(batch)
samples = self._draw_samples(batch.nb_rows, random_state)
batch = self.children.augment_batch_(
batch, parents=parents + [self], hooks=hooks)
if batch.images is not None:
batch.images = self._keep_size_images(
batch.images, shapes_orig["images"], images_were_array,
samples)
if batch.heatmaps is not None:
# dont use shapes_orig["images"] because they might be None
batch.heatmaps = self._keep_size_maps(
batch.heatmaps, shapes_orig["heatmaps"],
shapes_orig["heatmaps_arr"], samples[1])
if batch.segmentation_maps is not None:
# dont use shapes_orig["images"] because they might be None
batch.segmentation_maps = self._keep_size_maps(
batch.segmentation_maps, shapes_orig["segmentation_maps"],
shapes_orig["segmentation_maps_arr"], samples[2])
for augm_name in ["keypoints", "bounding_boxes", "polygons",
"line_strings"]:
augm_value = getattr(batch, augm_name)
if augm_value is not None:
func = functools.partial(
self._keep_size_keypoints,
shapes_orig=shapes_orig[augm_name],
interpolations=samples[0])
cbaois = self._apply_to_cbaois_as_keypoints(augm_value,
func)
setattr(batch, augm_name, cbaois)
return batch
@classmethod
def _keep_size_images(cls, images, shapes_orig, images_were_array,
samples):
interpolations, _, _ = samples
gen = zip(images, interpolations, shapes_orig)
result = []
for image, interpolation, input_shape in gen:
if interpolation == KeepSizeByResize.NO_RESIZE:
result.append(image)
else:
result.append(
ia.imresize_single_image(image, input_shape[0:2],
interpolation))
if images_were_array:
# note here that NO_RESIZE can have led to different shapes
nb_shapes = len({image.shape for image in result})
if nb_shapes == 1:
result = np.array(result, dtype=images.dtype)
return result
@classmethod
def _keep_size_maps(cls, augmentables, shapes_orig_images,
shapes_orig_arrs, interpolations):
result = []
gen = zip(augmentables, interpolations,
shapes_orig_arrs, shapes_orig_images)
for augmentable, interpolation, arr_shape_orig, img_shape_orig in gen:
if interpolation == "NO_RESIZE":
result.append(augmentable)
else:
augmentable = augmentable.resize(
arr_shape_orig[0:2], interpolation=interpolation)
augmentable.shape = img_shape_orig
result.append(augmentable)
return result
@classmethod
def _keep_size_keypoints(cls, kpsois_aug, shapes_orig, interpolations):
result = []
gen = zip(kpsois_aug, interpolations, shapes_orig)
for kpsoi_aug, interpolation, input_shape in gen:
if interpolation == KeepSizeByResize.NO_RESIZE:
result.append(kpsoi_aug)
else:
result.append(kpsoi_aug.on_(input_shape))
return result
@classmethod
def _get_shapes(cls, batch):
result = dict()
for column in batch.columns:
result[column.name] = [cell.shape for cell in column.value]
if batch.heatmaps is not None:
result["heatmaps_arr"] = [
cell.arr_0to1.shape for cell in batch.heatmaps]
if batch.segmentation_maps is not None:
result["segmentation_maps_arr"] = [
cell.arr.shape for cell in batch.segmentation_maps]
return result
def _draw_samples(self, nb_images, random_state):
rngs = random_state.duplicate(3)
interpolations = self.interpolation.draw_samples((nb_images,),
random_state=rngs[0])
if self.interpolation_heatmaps == KeepSizeByResize.SAME_AS_IMAGES:
interpolations_heatmaps = np.copy(interpolations)
else:
interpolations_heatmaps = self.interpolation_heatmaps.draw_samples(
(nb_images,), random_state=rngs[1]
)
# Note that `interpolations_heatmaps == self.SAME_AS_IMAGES`
# works here only if the datatype of the array is such that it
# may contain strings. It does not work properly for e.g.
# integer arrays and will produce a single bool output, even
# for arrays with more than one entry.
same_as_imgs_idx = [ip == self.SAME_AS_IMAGES
for ip in interpolations_heatmaps]
interpolations_heatmaps[same_as_imgs_idx] = \
interpolations[same_as_imgs_idx]
if self.interpolation_segmaps == KeepSizeByResize.SAME_AS_IMAGES:
interpolations_segmaps = np.copy(interpolations)
else:
# TODO This used previously the same seed as the heatmaps part
# leading to the same sampled values. Was that intentional?
# Doesn't look like it should be that way.
interpolations_segmaps = self.interpolation_segmaps.draw_samples(
(nb_images,), random_state=rngs[2]
)
# Note that `interpolations_heatmaps == self.SAME_AS_IMAGES`
# works here only if the datatype of the array is such that it
# may contain strings. It does not work properly for e.g.
# integer arrays and will produce a single bool output, even
# for arrays with more than one entry.
same_as_imgs_idx = [ip == self.SAME_AS_IMAGES
for ip in interpolations_segmaps]
interpolations_segmaps[same_as_imgs_idx] = \
interpolations[same_as_imgs_idx]
return interpolations, interpolations_heatmaps, interpolations_segmaps
def _to_deterministic(self):
aug = self.copy()
aug.children = aug.children.to_deterministic()
aug.deterministic = True
aug.random_state = self.random_state.derive_rng_()
return aug
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.interpolation, self.interpolation_heatmaps]
def get_children_lists(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_children_lists`."""
return [self.children]
def __str__(self):
pattern = (
"%s("
"interpolation=%s, "
"interpolation_heatmaps=%s, "
"name=%s, "
"children=%s, "
"deterministic=%s"
")")
return pattern % (
self.__class__.__name__, self.interpolation,
self.interpolation_heatmaps, self.name, self.children,
self.deterministic)
| 41.498806
| 269
| 0.610104
|
from __future__ import print_function, division, absolute_import
import re
import functools
import numpy as np
import cv2
import imgaug as ia
from imgaug.imgaug import _normalize_cv2_input_arr_
from . import meta
from .. import parameters as iap
def _crop_trbl_to_xyxy(shape, top, right, bottom, left, prevent_zero_size=True):
if prevent_zero_size:
top, right, bottom, left = _crop_prevent_zero_size(
shape[0], shape[1], top, right, bottom, left)
height, width = shape[0:2]
x1 = left
x2 = width - right
y1 = top
y2 = height - bottom
x2 = max(x2, x1)
y2 = max(y2, y1)
return x1, y1, x2, y2
def _crop_arr_(arr, top, right, bottom, left, prevent_zero_size=True):
x1, y1, x2, y2 = _crop_trbl_to_xyxy(arr.shape, top, right, bottom, left,
prevent_zero_size=prevent_zero_size)
return arr[y1:y2, x1:x2, ...]
def _crop_and_pad_arr(arr, croppings, paddings, pad_mode="constant",
pad_cval=0, keep_size=False):
height, width = arr.shape[0:2]
image_cr = _crop_arr_(arr, *croppings)
image_cr_pa = pad(
image_cr,
top=paddings[0], right=paddings[1],
bottom=paddings[2], left=paddings[3],
mode=pad_mode, cval=pad_cval)
if keep_size:
image_cr_pa = ia.imresize_single_image(image_cr_pa, (height, width))
return image_cr_pa
def _crop_and_pad_heatmap_(heatmap, croppings_img, paddings_img,
pad_mode="constant", pad_cval=0.0, keep_size=False):
return _crop_and_pad_hms_or_segmaps_(heatmap, croppings_img,
paddings_img, pad_mode, pad_cval,
keep_size)
def _crop_and_pad_segmap_(segmap, croppings_img, paddings_img,
pad_mode="constant", pad_cval=0, keep_size=False):
return _crop_and_pad_hms_or_segmaps_(segmap, croppings_img,
paddings_img, pad_mode, pad_cval,
keep_size)
def _crop_and_pad_hms_or_segmaps_(augmentable, croppings_img,
paddings_img, pad_mode="constant",
pad_cval=None, keep_size=False):
if isinstance(augmentable, ia.HeatmapsOnImage):
arr_attr_name = "arr_0to1"
pad_cval = pad_cval if pad_cval is not None else 0.0
else:
assert isinstance(augmentable, ia.SegmentationMapsOnImage), (
"Expected HeatmapsOnImage or SegmentationMapsOnImage, got %s." % (
type(augmentable)))
arr_attr_name = "arr"
pad_cval = pad_cval if pad_cval is not None else 0
arr = getattr(augmentable, arr_attr_name)
arr_shape_orig = arr.shape
augm_shape = augmentable.shape
croppings_proj = _project_size_changes(croppings_img, augm_shape, arr.shape)
paddings_proj = _project_size_changes(paddings_img, augm_shape, arr.shape)
croppings_proj = _crop_prevent_zero_size(arr.shape[0], arr.shape[1],
*croppings_proj)
arr_cr = _crop_arr_(arr,
croppings_proj[0], croppings_proj[1],
croppings_proj[2], croppings_proj[3])
arr_cr_pa = pad(
arr_cr,
top=paddings_proj[0], right=paddings_proj[1],
bottom=paddings_proj[2], left=paddings_proj[3],
mode=pad_mode,
cval=pad_cval)
setattr(augmentable, arr_attr_name, arr_cr_pa)
if keep_size:
augmentable = augmentable.resize(arr_shape_orig[0:2])
else:
augmentable.shape = _compute_shape_after_crop_and_pad(
augmentable.shape, croppings_img, paddings_img)
return augmentable
def _crop_and_pad_kpsoi_(kpsoi, croppings_img, paddings_img, keep_size):
x1, y1, _x2, _y2 = _crop_trbl_to_xyxy(kpsoi.shape, *croppings_img)
crop_left = x1
crop_top = y1
shape_orig = kpsoi.shape
shifted = kpsoi.shift_(
x=-crop_left+paddings_img[3],
y=-crop_top+paddings_img[0])
shifted.shape = _compute_shape_after_crop_and_pad(
shape_orig, croppings_img, paddings_img)
if keep_size:
shifted = shifted.on_(shape_orig)
return shifted
def _compute_shape_after_crop_and_pad(old_shape, croppings, paddings):
x1, y1, x2, y2 = _crop_trbl_to_xyxy(old_shape, *croppings)
new_shape = list(old_shape)
new_shape[0] = y2 - y1 + paddings[0] + paddings[2]
new_shape[1] = x2 - x1 + paddings[1] + paddings[3]
return tuple(new_shape)
def _crop_prevent_zero_size(height, width, crop_top, crop_right, crop_bottom,
crop_left):
remaining_height = height - (crop_top + crop_bottom)
remaining_width = width - (crop_left + crop_right)
if remaining_height < 1:
regain = abs(remaining_height) + 1
regain_top = regain // 2
regain_bottom = regain // 2
if regain_top + regain_bottom < regain:
regain_top += 1
if regain_top > crop_top:
diff = regain_top - crop_top
regain_top = crop_top
regain_bottom += diff
elif regain_bottom > crop_bottom:
diff = regain_bottom - crop_bottom
regain_bottom = crop_bottom
regain_top += diff
crop_top = crop_top - regain_top
crop_bottom = crop_bottom - regain_bottom
if remaining_width < 1:
regain = abs(remaining_width) + 1
regain_right = regain // 2
regain_left = regain // 2
if regain_right + regain_left < regain:
regain_right += 1
if regain_right > crop_right:
diff = regain_right - crop_right
regain_right = crop_right
regain_left += diff
elif regain_left > crop_left:
diff = regain_left - crop_left
regain_left = crop_left
regain_right += diff
crop_right = crop_right - regain_right
crop_left = crop_left - regain_left
return (
max(crop_top, 0), max(crop_right, 0), max(crop_bottom, 0),
max(crop_left, 0))
def _project_size_changes(trbl, from_shape, to_shape):
if from_shape[0:2] == to_shape[0:2]:
return trbl
height_to = to_shape[0]
width_to = to_shape[1]
height_from = from_shape[0]
width_from = from_shape[1]
top = trbl[0]
right = trbl[1]
bottom = trbl[2]
left = trbl[3]
top = _int_r(height_to * (top/height_from) - 1e-4)
right = _int_r(width_to * (right/width_from) + 1e-4)
bottom = _int_r(height_to * (bottom/height_from) + 1e-4)
left = _int_r(width_to * (left/width_from) - 1e-4)
return top, right, bottom, left
def _int_r(value):
return int(np.round(value))
def _handle_pad_mode_param(pad_mode):
pad_modes_available = {
"constant", "edge", "linear_ramp", "maximum", "mean", "median",
"minimum", "reflect", "symmetric", "wrap"}
if pad_mode == ia.ALL:
return iap.Choice(list(pad_modes_available))
if ia.is_string(pad_mode):
assert pad_mode in pad_modes_available, (
"Value '%s' is not a valid pad mode. Valid pad modes are: %s." % (
pad_mode, ", ".join(pad_modes_available)))
return iap.Deterministic(pad_mode)
if isinstance(pad_mode, list):
assert all([v in pad_modes_available for v in pad_mode]), (
"At least one in list %s is not a valid pad mode. Valid pad "
"modes are: %s." % (str(pad_mode), ", ".join(pad_modes_available)))
return iap.Choice(pad_mode)
if isinstance(pad_mode, iap.StochasticParameter):
return pad_mode
raise Exception(
"Expected pad_mode to be ia.ALL or string or list of strings or "
"StochasticParameter, got %s." % (type(pad_mode),))
def _handle_position_parameter(position):
if position == "uniform":
return iap.Uniform(0.0, 1.0), iap.Uniform(0.0, 1.0)
if position == "normal":
return (
iap.Clip(iap.Normal(loc=0.5, scale=0.35 / 2),
minval=0.0, maxval=1.0),
iap.Clip(iap.Normal(loc=0.5, scale=0.35 / 2),
minval=0.0, maxval=1.0)
)
if position == "center":
return iap.Deterministic(0.5), iap.Deterministic(0.5)
if (ia.is_string(position)
and re.match(r"^(left|center|right)-(top|center|bottom)$",
position)):
mapping = {"top": 0.0, "center": 0.5, "bottom": 1.0, "left": 0.0,
"right": 1.0}
return (
iap.Deterministic(mapping[position.split("-")[0]]),
iap.Deterministic(mapping[position.split("-")[1]])
)
if isinstance(position, iap.StochasticParameter):
return position
if isinstance(position, tuple):
assert len(position) == 2, (
"Expected tuple with two entries as position parameter. "
"Got %d entries with types %s.." % (
len(position), str([type(item) for item in position])))
for item in position:
if ia.is_single_number(item) and (item < 0 or item > 1.0):
raise Exception(
"Both position values must be within the value range "
"[0.0, 1.0]. Got type %s with value %.8f." % (
type(item), item,))
position = [iap.Deterministic(item)
if ia.is_single_number(item)
else item for item in position]
only_sparams = all([isinstance(item, iap.StochasticParameter)
for item in position])
assert only_sparams, (
"Expected tuple with two entries that are both either "
"StochasticParameter or float/int. Got types %s." % (
str([type(item) for item in position])
))
return tuple(position)
raise Exception(
"Expected one of the following as position parameter: string "
"'uniform', string 'normal', string 'center', a string matching "
"regex ^(left|center|right)-(top|center|bottom)$, a single "
"StochasticParameter or a tuple of two entries, both being either "
"StochasticParameter or floats or int. Got instead type %s with "
"content '%s'." % (
type(position),
(str(position)
if len(str(position)) < 20
else str(position)[0:20] + "...")
)
)
def _assert_two_or_three_dims(shape):
if hasattr(shape, "shape"):
shape = shape.shape
assert len(shape) in [2, 3], (
"Expected image with two or three dimensions, but got %d dimensions "
"and shape %s." % (len(shape), shape))
def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
import imgaug.dtypes as iadt
_assert_two_or_three_dims(arr)
assert all([v >= 0 for v in [top, right, bottom, left]]), (
"Expected padding amounts that are >=0, but got %d, %d, %d, %d "
"(top, right, bottom, left)" % (top, right, bottom, left))
is_multi_cval = ia.is_iterable(cval)
if top > 0 or right > 0 or bottom > 0 or left > 0:
min_value, _, max_value = iadt.get_value_range_of_dtype(arr.dtype)
if arr.dtype.name == "float128":
cval = np.float128(cval)
if is_multi_cval:
cval = np.clip(cval, min_value, max_value)
else:
cval = max(min(cval, max_value), min_value)
has_zero_sized_axis = any([axis == 0 for axis in arr.shape])
if has_zero_sized_axis:
mode = "constant"
mapping_mode_np_to_cv2 = {
"constant": cv2.BORDER_CONSTANT,
"edge": cv2.BORDER_REPLICATE,
"linear_ramp": None,
"maximum": None,
"mean": None,
"median": None,
"minimum": None,
"reflect": cv2.BORDER_REFLECT_101,
"symmetric": cv2.BORDER_REFLECT,
"wrap": None,
cv2.BORDER_CONSTANT: cv2.BORDER_CONSTANT,
cv2.BORDER_REPLICATE: cv2.BORDER_REPLICATE,
cv2.BORDER_REFLECT_101: cv2.BORDER_REFLECT_101,
cv2.BORDER_REFLECT: cv2.BORDER_REFLECT
}
bad_mode_cv2 = mapping_mode_np_to_cv2.get(mode, None) is None
# is not supported" error
bad_datatype_cv2 = (
arr.dtype.name
in ["uint32", "uint64", "int64", "float16", "float128", "bool"]
)
bad_shape_cv2 = (arr.ndim == 3 and arr.shape[-1] == 0)
if not bad_datatype_cv2 and not bad_mode_cv2 and not bad_shape_cv2:
kind = arr.dtype.kind
if is_multi_cval:
cval = [float(cval_c) if kind == "f" else int(cval_c)
for cval_c in cval]
else:
cval = float(cval) if kind == "f" else int(cval)
if arr.ndim == 2 or arr.shape[2] <= 4:
if arr.ndim == 3 and not is_multi_cval:
cval = tuple([cval] * arr.shape[2])
arr_pad = cv2.copyMakeBorder(
_normalize_cv2_input_arr_(arr),
top=top, bottom=bottom, left=left, right=right,
borderType=mapping_mode_np_to_cv2[mode], value=cval)
if arr.ndim == 3 and arr_pad.ndim == 2:
arr_pad = arr_pad[..., np.newaxis]
else:
result = []
channel_start_idx = 0
cval = cval if is_multi_cval else tuple([cval] * arr.shape[2])
while channel_start_idx < arr.shape[2]:
arr_c = arr[..., channel_start_idx:channel_start_idx+4]
cval_c = cval[channel_start_idx:channel_start_idx+4]
arr_pad_c = cv2.copyMakeBorder(
_normalize_cv2_input_arr_(arr_c),
top=top, bottom=bottom, left=left, right=right,
borderType=mapping_mode_np_to_cv2[mode], value=cval_c)
arr_pad_c = np.atleast_3d(arr_pad_c)
result.append(arr_pad_c)
channel_start_idx += 4
arr_pad = np.concatenate(result, axis=2)
else:
paddings_np = [(top, bottom), (left, right)]
if arr.ndim == 3:
paddings_np.append((0, 0))
if mode == "constant":
if arr.ndim > 2 and is_multi_cval:
arr_pad_chans = [
np.pad(arr[..., c], paddings_np[0:2], mode=mode,
constant_values=cval[c])
for c in np.arange(arr.shape[2])]
arr_pad = np.stack(arr_pad_chans, axis=-1)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode,
constant_values=cval)
elif mode == "linear_ramp":
if arr.ndim > 2 and is_multi_cval:
arr_pad_chans = [
np.pad(arr[..., c], paddings_np[0:2], mode=mode,
end_values=cval[c])
for c in np.arange(arr.shape[2])]
arr_pad = np.stack(arr_pad_chans, axis=-1)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode,
end_values=cval)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode)
return arr_pad
return np.copy(arr)
def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0,
return_pad_amounts=False):
pad_top, pad_right, pad_bottom, pad_left = \
compute_paddings_to_reach_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
return arr_padded
def pad_to_multiples_of(arr, height_multiple, width_multiple, mode="constant",
cval=0, return_pad_amounts=False):
pad_top, pad_right, pad_bottom, pad_left = \
compute_paddings_to_reach_multiples_of(
arr, height_multiple, width_multiple)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
return arr_padded
def compute_paddings_to_reach_aspect_ratio(arr, aspect_ratio):
_assert_two_or_three_dims(arr)
assert aspect_ratio > 0, (
"Expected to get an aspect ratio >0, got %.4f." % (aspect_ratio,))
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
shape = arr.shape if hasattr(arr, "shape") else arr
height, width = shape[0:2]
if height == 0:
height = 1
pad_bottom += 1
if width == 0:
width = 1
pad_right += 1
aspect_ratio_current = width / height
if aspect_ratio_current < aspect_ratio:
diff = (aspect_ratio * height) - width
pad_right += int(np.ceil(diff / 2))
pad_left += int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
diff = ((1/aspect_ratio) * width) - height
pad_top += int(np.floor(diff / 2))
pad_bottom += int(np.ceil(diff / 2))
return pad_top, pad_right, pad_bottom, pad_left
def compute_croppings_to_reach_aspect_ratio(arr, aspect_ratio):
_assert_two_or_three_dims(arr)
assert aspect_ratio > 0, (
"Expected to get an aspect ratio >0, got %.4f." % (aspect_ratio,))
shape = arr.shape if hasattr(arr, "shape") else arr
assert shape[0] > 0, (
"Expected to get an array with height >0, got shape %s." % (shape,))
height, width = shape[0:2]
aspect_ratio_current = width / height
top = 0
right = 0
bottom = 0
left = 0
if aspect_ratio_current < aspect_ratio:
crop_amount = height - (width / aspect_ratio)
crop_amount = min(crop_amount, height - 1)
top = int(np.floor(crop_amount / 2))
bottom = int(np.ceil(crop_amount / 2))
elif aspect_ratio_current > aspect_ratio:
crop_amount = width - height * aspect_ratio
crop_amount = min(crop_amount, width - 1)
left = int(np.floor(crop_amount / 2))
right = int(np.ceil(crop_amount / 2))
return top, right, bottom, left
def compute_paddings_to_reach_multiples_of(arr, height_multiple,
width_multiple):
def _compute_axis_value(axis_size, multiple):
if multiple is None:
return 0, 0
if axis_size == 0:
to_pad = multiple
elif axis_size % multiple == 0:
to_pad = 0
else:
to_pad = multiple - (axis_size % multiple)
return int(np.floor(to_pad/2)), int(np.ceil(to_pad/2))
_assert_two_or_three_dims(arr)
if height_multiple is not None:
assert height_multiple > 0, (
"Can only pad to multiples of 1 or larger, got %d." % (
height_multiple,))
if width_multiple is not None:
assert width_multiple > 0, (
"Can only pad to multiples of 1 or larger, got %d." % (
width_multiple,))
shape = arr.shape if hasattr(arr, "shape") else arr
height, width = shape[0:2]
top, bottom = _compute_axis_value(height, height_multiple)
left, right = _compute_axis_value(width, width_multiple)
return top, right, bottom, left
def compute_croppings_to_reach_multiples_of(arr, height_multiple,
width_multiple):
def _compute_axis_value(axis_size, multiple):
if multiple is None:
return 0, 0
if axis_size == 0:
to_crop = 0
elif axis_size % multiple == 0:
to_crop = 0
else:
to_crop = axis_size % multiple
return int(np.floor(to_crop/2)), int(np.ceil(to_crop/2))
_assert_two_or_three_dims(arr)
if height_multiple is not None:
assert height_multiple > 0, (
"Can only crop to multiples of 1 or larger, got %d." % (
height_multiple,))
if width_multiple is not None:
assert width_multiple > 0, (
"Can only crop to multiples of 1 or larger, got %d." % (
width_multiple,))
shape = arr.shape if hasattr(arr, "shape") else arr
height, width = shape[0:2]
top, bottom = _compute_axis_value(height, height_multiple)
left, right = _compute_axis_value(width, width_multiple)
return top, right, bottom, left
def compute_paddings_to_reach_powers_of(arr, height_base, width_base,
allow_zero_exponent=False):
def _compute_axis_value(axis_size, base):
if base is None:
return 0, 0
if axis_size == 0:
to_pad = 1 if allow_zero_exponent else base
elif axis_size <= base:
to_pad = base - axis_size
else:
exponent = np.log(axis_size) / np.log(base)
to_pad = (base ** int(np.ceil(exponent))) - axis_size
return int(np.floor(to_pad/2)), int(np.ceil(to_pad/2))
_assert_two_or_three_dims(arr)
if height_base is not None:
assert height_base > 1, (
"Can only pad to base larger than 1, got %d." % (height_base,))
if width_base is not None:
assert width_base > 1, (
"Can only pad to base larger than 1, got %d." % (width_base,))
shape = arr.shape if hasattr(arr, "shape") else arr
height, width = shape[0:2]
top, bottom = _compute_axis_value(height, height_base)
left, right = _compute_axis_value(width, width_base)
return top, right, bottom, left
def compute_croppings_to_reach_powers_of(arr, height_base, width_base,
allow_zero_exponent=False):
def _compute_axis_value(axis_size, base):
if base is None:
return 0, 0
if axis_size == 0:
to_crop = 0
elif axis_size < base:
to_crop = axis_size - 1 if allow_zero_exponent else 0
else:
exponent = np.log(axis_size) / np.log(base)
to_crop = axis_size - (base ** int(exponent))
return int(np.floor(to_crop/2)), int(np.ceil(to_crop/2))
_assert_two_or_three_dims(arr)
if height_base is not None:
assert height_base > 1, (
"Can only crop to base larger than 1, got %d." % (height_base,))
if width_base is not None:
assert width_base > 1, (
"Can only crop to base larger than 1, got %d." % (width_base,))
shape = arr.shape if hasattr(arr, "shape") else arr
height, width = shape[0:2]
top, bottom = _compute_axis_value(height, height_base)
left, right = _compute_axis_value(width, width_base)
return top, right, bottom, left
@ia.deprecated(alt_func="Resize",
comment="Resize has the exactly same interface as Scale.")
def Scale(*args, **kwargs):
return Resize(*args, **kwargs)
class Resize(meta.Augmenter):
def __init__(self, size, interpolation="cubic",
seed=None, name=None, **old_kwargs):
super(Resize, self).__init__(
seed=seed, name=name, **old_kwargs)
self.size, self.size_order = self._handle_size_arg(size, False)
self.interpolation = self._handle_interpolation_arg(interpolation)
@classmethod
def _handle_size_arg(cls, size, subcall):
def _dict_to_size_tuple(val1, val2):
kaa = "keep-aspect-ratio"
not_both_kaa = (val1 != kaa or val2 != kaa)
assert not_both_kaa, (
"Expected at least one value to not be \"keep-aspect-ratio\", "
"but got it two times.")
size_tuple = []
for k in [val1, val2]:
if k in ["keep-aspect-ratio", "keep"]:
entry = iap.Deterministic(k)
else:
entry = cls._handle_size_arg(k, True)
size_tuple.append(entry)
return tuple(size_tuple)
def _contains_any_key(dict_, keys):
return any([key in dict_ for key in keys])
size_order = "HW"
if size == "keep":
result = iap.Deterministic("keep")
elif ia.is_single_number(size):
assert size > 0, "Expected only values > 0, got %s" % (size,)
result = iap.Deterministic(size)
elif not subcall and isinstance(size, dict):
if len(size.keys()) == 0:
result = iap.Deterministic("keep")
elif _contains_any_key(size, ["height", "width"]):
height = size.get("height", "keep")
width = size.get("width", "keep")
result = _dict_to_size_tuple(height, width)
elif _contains_any_key(size, ["shorter-side", "longer-side"]):
shorter = size.get("shorter-side", "keep")
longer = size.get("longer-side", "keep")
result = _dict_to_size_tuple(shorter, longer)
size_order = "SL"
else:
raise ValueError(
"Expected dictionary containing no keys, "
"the keys \"height\" and/or \"width\", "
"or the keys \"shorter-side\" and/or \"longer-side\". "
"Got keys: %s." % (str(size.keys()),))
elif isinstance(size, tuple):
assert len(size) == 2, (
"Expected size tuple to contain exactly 2 values, "
"got %d." % (len(size),))
assert size[0] > 0 and size[1] > 0, (
"Expected size tuple to only contain values >0, "
"got %d and %d." % (size[0], size[1]))
if ia.is_single_float(size[0]) or ia.is_single_float(size[1]):
result = iap.Uniform(size[0], size[1])
else:
result = iap.DiscreteUniform(size[0], size[1])
elif isinstance(size, list):
if len(size) == 0:
result = iap.Deterministic("keep")
else:
all_int = all([ia.is_single_integer(v) for v in size])
all_float = all([ia.is_single_float(v) for v in size])
assert all_int or all_float, (
"Expected to get only integers or floats.")
assert all([v > 0 for v in size]), (
"Expected all values to be >0.")
result = iap.Choice(size)
elif isinstance(size, iap.StochasticParameter):
result = size
else:
raise ValueError(
"Expected number, tuple of two numbers, list of numbers, "
"dictionary of form "
"{'height': number/tuple/list/'keep-aspect-ratio'/'keep', "
"'width': <analogous>}, dictionary of form "
"{'shorter-side': number/tuple/list/'keep-aspect-ratio'/"
"'keep', 'longer-side': <analogous>} "
"or StochasticParameter, got %s." % (type(size),)
)
if subcall:
return result
return result, size_order
@classmethod
def _handle_interpolation_arg(cls, interpolation):
if interpolation == ia.ALL:
interpolation = iap.Choice(
["nearest", "linear", "area", "cubic"])
elif ia.is_single_integer(interpolation):
interpolation = iap.Deterministic(interpolation)
elif ia.is_string(interpolation):
interpolation = iap.Deterministic(interpolation)
elif ia.is_iterable(interpolation):
interpolation = iap.Choice(interpolation)
elif isinstance(interpolation, iap.StochasticParameter):
pass
else:
raise Exception(
"Expected int or string or iterable or StochasticParameter, "
"got %s." % (type(interpolation),))
return interpolation
def _augment_batch_(self, batch, random_state, parents, hooks):
nb_rows = batch.nb_rows
samples = self._draw_samples(nb_rows, random_state)
if batch.images is not None:
batch.images = self._augment_images_by_samples(batch.images,
samples)
if batch.heatmaps is not None:
batch.heatmaps = self._augment_maps_by_samples(
batch.heatmaps, "arr_0to1", samples)
if batch.segmentation_maps is not None:
batch.segmentation_maps = self._augment_maps_by_samples(
batch.segmentation_maps, "arr",
(samples[0], samples[1], [None] * nb_rows))
for augm_name in ["keypoints", "bounding_boxes", "polygons",
"line_strings"]:
augm_value = getattr(batch, augm_name)
if augm_value is not None:
func = functools.partial(
self._augment_keypoints_by_samples,
samples=samples)
cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)
setattr(batch, augm_name, cbaois)
return batch
def _augment_images_by_samples(self, images, samples):
input_was_array = False
input_dtype = None
if ia.is_np_array(images):
input_was_array = True
input_dtype = images.dtype
samples_a, samples_b, samples_ip = samples
result = []
for i, image in enumerate(images):
h, w = self._compute_height_width(image.shape, samples_a[i],
samples_b[i], self.size_order)
image_rs = ia.imresize_single_image(image, (h, w),
interpolation=samples_ip[i])
result.append(image_rs)
if input_was_array:
all_same_size = (len({image.shape for image in result}) == 1)
if all_same_size:
result = np.array(result, dtype=input_dtype)
return result
def _augment_maps_by_samples(self, augmentables, arr_attr_name, samples):
result = []
samples_h, samples_w, samples_ip = samples
for i, augmentable in enumerate(augmentables):
arr = getattr(augmentable, arr_attr_name)
arr_shape = arr.shape
img_shape = augmentable.shape
h_img, w_img = self._compute_height_width(
img_shape, samples_h[i], samples_w[i], self.size_order)
h = int(np.round(h_img * (arr_shape[0] / img_shape[0])))
w = int(np.round(w_img * (arr_shape[1] / img_shape[1])))
h = max(h, 1)
w = max(w, 1)
if samples_ip[0] is not None:
augmentable_resize = augmentable.resize(
(h, w), interpolation=samples_ip[i])
else:
augmentable_resize = augmentable.resize((h, w))
augmentable_resize.shape = (h_img, w_img) + img_shape[2:]
result.append(augmentable_resize)
return result
def _augment_keypoints_by_samples(self, kpsois, samples):
result = []
samples_a, samples_b, _samples_ip = samples
for i, kpsoi in enumerate(kpsois):
h, w = self._compute_height_width(
kpsoi.shape, samples_a[i], samples_b[i], self.size_order)
new_shape = (h, w) + kpsoi.shape[2:]
keypoints_on_image_rs = kpsoi.on_(new_shape)
result.append(keypoints_on_image_rs)
return result
def _draw_samples(self, nb_images, random_state):
rngs = random_state.duplicate(3)
if isinstance(self.size, tuple):
samples_h = self.size[0].draw_samples(nb_images,
random_state=rngs[0])
samples_w = self.size[1].draw_samples(nb_images,
random_state=rngs[1])
else:
samples_h = self.size.draw_samples(nb_images, random_state=rngs[0])
samples_w = samples_h
samples_ip = self.interpolation.draw_samples(nb_images,
random_state=rngs[2])
return samples_h, samples_w, samples_ip
@classmethod
def _compute_height_width(cls, image_shape, sample_a, sample_b, size_order):
imh, imw = image_shape[0:2]
if size_order == 'SL':
if imh < imw:
h, w = sample_a, sample_b
else:
w, h = sample_a, sample_b
else:
h, w = sample_a, sample_b
if ia.is_single_float(h):
assert h > 0, "Expected 'h' to be >0, got %.4f" % (h,)
h = int(np.round(imh * h))
h = h if h > 0 else 1
elif h == "keep":
h = imh
if ia.is_single_float(w):
assert w > 0, "Expected 'w' to be >0, got %.4f" % (w,)
w = int(np.round(imw * w))
w = w if w > 0 else 1
elif w == "keep":
w = imw
if h == "keep-aspect-ratio":
h_per_w_orig = imh / imw
h = int(np.round(w * h_per_w_orig))
if w == "keep-aspect-ratio":
w_per_h_orig = imw / imh
w = int(np.round(h * w_per_h_orig))
return h, w
def get_parameters(self):
return [self.size, self.interpolation, self.size_order]
class _CropAndPadSamplingResult(object):
def __init__(self, crop_top, crop_right, crop_bottom, crop_left,
pad_top, pad_right, pad_bottom, pad_left, pad_mode, pad_cval):
self.crop_top = crop_top
self.crop_right = crop_right
self.crop_bottom = crop_bottom
self.crop_left = crop_left
self.pad_top = pad_top
self.pad_right = pad_right
self.pad_bottom = pad_bottom
self.pad_left = pad_left
self.pad_mode = pad_mode
self.pad_cval = pad_cval
@property
def croppings(self):
return self.crop_top, self.crop_right, self.crop_bottom, self.crop_left
@property
def paddings(self):
return self.pad_top, self.pad_right, self.pad_bottom, self.pad_left
class CropAndPad(meta.Augmenter):
def __init__(self, px=None, percent=None, pad_mode="constant", pad_cval=0,
keep_size=True, sample_independently=True,
seed=None, name=None, **old_kwargs):
super(CropAndPad, self).__init__(
seed=seed, name=name, **old_kwargs)
self.mode, self.all_sides, self.top, self.right, self.bottom, \
self.left = self._handle_px_and_percent_args(px, percent)
self.pad_mode = _handle_pad_mode_param(pad_mode)
self.pad_cval = iap.handle_discrete_param(
pad_cval, "pad_cval", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.keep_size = keep_size
self.sample_independently = sample_independently
self._pad_mode_heatmaps = "constant"
self._pad_mode_segmentation_maps = "constant"
self._pad_cval_heatmaps = 0.0
self._pad_cval_segmentation_maps = 0
@classmethod
def _handle_px_and_percent_args(cls, px, percent):
all_sides = None
top, right, bottom, left = None, None, None, None
if px is None and percent is None:
mode = "noop"
elif px is not None and percent is not None:
raise Exception("Can only pad by pixels or percent, not both.")
elif px is not None:
mode = "px"
all_sides, top, right, bottom, left = cls._handle_px_arg(px)
else: mode = "percent"
all_sides, top, right, bottom, left = cls._handle_percent_arg(
percent)
return mode, all_sides, top, right, bottom, left
@classmethod
def _handle_px_arg(cls, px):
all_sides = None
top, right, bottom, left = None, None, None, None
if ia.is_single_integer(px):
all_sides = iap.Deterministic(px)
elif isinstance(px, tuple):
assert len(px) in [2, 4], (
"Expected 'px' given as a tuple to contain 2 or 4 "
"entries, got %d." % (len(px),))
def handle_param(p):
if ia.is_single_integer(p):
return iap.Deterministic(p)
if isinstance(p, tuple):
assert len(p) == 2, (
"Expected tuple of 2 values, got %d." % (len(p)))
only_ints = (
ia.is_single_integer(p[0])
and ia.is_single_integer(p[1]))
assert only_ints, (
"Expected tuple of integers, got %s and %s." % (
type(p[0]), type(p[1])))
return iap.DiscreteUniform(p[0], p[1])
if isinstance(p, list):
assert len(p) > 0, (
"Expected non-empty list, but got empty one.")
assert all([ia.is_single_integer(val) for val in p]), (
"Expected list of ints, got types %s." % (
", ".join([str(type(v)) for v in p])))
return iap.Choice(p)
if isinstance(p, iap.StochasticParameter):
return p
raise Exception(
"Expected int, tuple of two ints, list of ints or "
"StochasticParameter, got type %s." % (type(p),))
if len(px) == 2:
all_sides = handle_param(px)
else: top = handle_param(px[0])
right = handle_param(px[1])
bottom = handle_param(px[2])
left = handle_param(px[3])
elif isinstance(px, iap.StochasticParameter):
top = right = bottom = left = px
else:
raise Exception(
"Expected int, tuple of 4 "
"ints/tuples/lists/StochasticParameters or "
"StochasticParameter, got type %s." % (type(px),))
return all_sides, top, right, bottom, left
@classmethod
def _handle_percent_arg(cls, percent):
all_sides = None
top, right, bottom, left = None, None, None, None
if ia.is_single_number(percent):
assert percent > -1.0, (
"Expected 'percent' to be >-1.0, got %.4f." % (percent,))
all_sides = iap.Deterministic(percent)
elif isinstance(percent, tuple):
assert len(percent) in [2, 4], (
"Expected 'percent' given as a tuple to contain 2 or 4 "
"entries, got %d." % (len(percent),))
def handle_param(p):
if ia.is_single_number(p):
return iap.Deterministic(p)
if isinstance(p, tuple):
assert len(p) == 2, (
"Expected tuple of 2 values, got %d." % (len(p),))
only_numbers = (
ia.is_single_number(p[0])
and ia.is_single_number(p[1]))
assert only_numbers, (
"Expected tuple of numbers, got %s and %s." % (
type(p[0]), type(p[1])))
assert p[0] > -1.0 and p[1] > -1.0, (
"Expected tuple of values >-1.0, got %.4f and "
"%.4f." % (p[0], p[1]))
return iap.Uniform(p[0], p[1])
if isinstance(p, list):
assert len(p) > 0, (
"Expected non-empty list, but got empty one.")
assert all([ia.is_single_number(val) for val in p]), (
"Expected list of numbers, got types %s." % (
", ".join([str(type(v)) for v in p])))
assert all([val > -1.0 for val in p]), (
"Expected list of values >-1.0, got values %s." % (
", ".join(["%.4f" % (v,) for v in p])))
return iap.Choice(p)
if isinstance(p, iap.StochasticParameter):
return p
raise Exception(
"Expected int, tuple of two ints, list of ints or "
"StochasticParameter, got type %s." % (type(p),))
if len(percent) == 2:
all_sides = handle_param(percent)
else: top = handle_param(percent[0])
right = handle_param(percent[1])
bottom = handle_param(percent[2])
left = handle_param(percent[3])
elif isinstance(percent, iap.StochasticParameter):
top = right = bottom = left = percent
else:
raise Exception(
"Expected number, tuple of 4 "
"numbers/tuples/lists/StochasticParameters or "
"StochasticParameter, got type %s." % (type(percent),))
return all_sides, top, right, bottom, left
def _augment_batch_(self, batch, random_state, parents, hooks):
shapes = batch.get_rowwise_shapes()
samples = self._draw_samples(random_state, shapes)
if batch.images is not None:
batch.images = self._augment_images_by_samples(batch.images,
samples)
if batch.heatmaps is not None:
batch.heatmaps = self._augment_maps_by_samples(
batch.heatmaps,
self._pad_mode_heatmaps, self._pad_cval_heatmaps,
samples)
if batch.segmentation_maps is not None:
batch.segmentation_maps = self._augment_maps_by_samples(
batch.segmentation_maps,
self._pad_mode_segmentation_maps,
self._pad_cval_segmentation_maps, samples)
for augm_name in ["keypoints", "bounding_boxes", "polygons",
"line_strings"]:
augm_value = getattr(batch, augm_name)
if augm_value is not None:
func = functools.partial(
self._augment_keypoints_by_samples,
samples=samples)
cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)
setattr(batch, augm_name, cbaois)
return batch
def _augment_images_by_samples(self, images, samples):
result = []
for i, image in enumerate(images):
samples_i = samples[i]
image_cr_pa = _crop_and_pad_arr(
image, samples_i.croppings, samples_i.paddings,
samples_i.pad_mode, samples_i.pad_cval, self.keep_size)
result.append(image_cr_pa)
if ia.is_np_array(images):
if self.keep_size:
result = np.array(result, dtype=images.dtype)
else:
nb_shapes = len({image.shape for image in result})
if nb_shapes == 1:
result = np.array(result, dtype=images.dtype)
return result
def _augment_maps_by_samples(self, augmentables, pad_mode, pad_cval,
samples):
result = []
for i, augmentable in enumerate(augmentables):
samples_img = samples[i]
augmentable = _crop_and_pad_hms_or_segmaps_(
augmentable,
croppings_img=samples_img.croppings,
paddings_img=samples_img.paddings,
pad_mode=(pad_mode
if pad_mode is not None
else samples_img.pad_mode),
pad_cval=(pad_cval
if pad_cval is not None
else samples_img.pad_cval),
keep_size=self.keep_size
)
result.append(augmentable)
return result
def _augment_keypoints_by_samples(self, keypoints_on_images, samples):
result = []
for i, keypoints_on_image in enumerate(keypoints_on_images):
samples_i = samples[i]
kpsoi_aug = _crop_and_pad_kpsoi_(
keypoints_on_image, croppings_img=samples_i.croppings,
paddings_img=samples_i.paddings, keep_size=self.keep_size)
result.append(kpsoi_aug)
return result
def _draw_samples(self, random_state, shapes):
nb_rows = len(shapes)
if self.mode == "noop":
top = right = bottom = left = np.full((nb_rows,), 0,
dtype=np.int32)
else:
if self.all_sides is not None:
if self.sample_independently:
samples = self.all_sides.draw_samples(
(nb_rows, 4), random_state=random_state)
top = samples[:, 0]
right = samples[:, 1]
bottom = samples[:, 2]
left = samples[:, 3]
else:
sample = self.all_sides.draw_samples(
(nb_rows,), random_state=random_state)
top = right = bottom = left = sample
else:
top = self.top.draw_samples(
(nb_rows,), random_state=random_state)
right = self.right.draw_samples(
(nb_rows,), random_state=random_state)
bottom = self.bottom.draw_samples(
(nb_rows,), random_state=random_state)
left = self.left.draw_samples(
(nb_rows,), random_state=random_state)
if self.mode == "px":
pass
elif self.mode == "percent":
shapes_arr = np.array([shape[0:2] for shape in shapes],
dtype=np.float32)
heights = shapes_arr[:, 0]
widths = shapes_arr[:, 1]
top = np.round(heights * top).astype(np.int32)
right = np.round(widths * right).astype(np.int32)
bottom = np.round(heights * bottom).astype(np.int32)
left = np.round(widths * left).astype(np.int32)
else:
raise Exception("Invalid mode")
def _only_above_zero(arr):
arr = np.copy(arr)
mask = (arr < 0)
arr[mask] = 0
return arr
crop_top = _only_above_zero((-1) * top)
crop_right = _only_above_zero((-1) * right)
crop_bottom = _only_above_zero((-1) * bottom)
crop_left = _only_above_zero((-1) * left)
pad_top = _only_above_zero(top)
pad_right = _only_above_zero(right)
pad_bottom = _only_above_zero(bottom)
pad_left = _only_above_zero(left)
pad_mode = self.pad_mode.draw_samples((nb_rows,),
random_state=random_state)
pad_cval = self.pad_cval.draw_samples((nb_rows,),
random_state=random_state)
result = []
for i, shape in enumerate(shapes):
height, width = shape[0:2]
crop_top_i, crop_right_i, crop_bottom_i, crop_left_i = \
_crop_prevent_zero_size(
height, width,
crop_top[i], crop_right[i], crop_bottom[i], crop_left[i])
any_crop_y = (crop_top_i > 0 or crop_bottom_i > 0)
if any_crop_y and crop_top_i + crop_bottom_i >= height:
ia.warn(
"Expected generated crop amounts in CropAndPad for top and "
"bottom image side to be less than the image's height, but "
"got %d (top) and %d (bottom) vs. image height %d. This "
"will result in an image with output height=1 (if input "
"height was >=1) or output height=0 (if input height "
"was 0)." % (crop_top_i, crop_bottom_i, height))
# add here any_crop_x to not warn in case of zero height/width
# images
any_crop_x = (crop_left_i > 0 or crop_right_i > 0)
if any_crop_x and crop_left_i + crop_right_i >= width:
ia.warn(
"Expected generated crop amounts in CropAndPad for left "
"and right image side to be less than the image's width, "
"but got %d (left) and %d (right) vs. image width %d. "
"This will result in an image with output width=1 (if "
"input width was >=1) or output width=0 (if input width "
"was 0)." % (crop_left_i, crop_right_i, width))
result.append(
_CropAndPadSamplingResult(
crop_top=crop_top_i,
crop_right=crop_right_i,
crop_bottom=crop_bottom_i,
crop_left=crop_left_i,
pad_top=pad_top[i],
pad_right=pad_right[i],
pad_bottom=pad_bottom[i],
pad_left=pad_left[i],
pad_mode=pad_mode[i],
pad_cval=pad_cval[i]))
return result
def get_parameters(self):
return [self.all_sides, self.top, self.right, self.bottom, self.left,
self.pad_mode, self.pad_cval]
class Pad(CropAndPad):
def __init__(self, px=None, percent=None, pad_mode="constant", pad_cval=0,
keep_size=True, sample_independently=True,
seed=None, name=None, **old_kwargs):
def recursive_validate(value):
if value is None:
return value
if ia.is_single_number(value):
assert value >= 0, "Expected value >0, got %.4f" % (value,)
return value
if isinstance(value, iap.StochasticParameter):
return value
if isinstance(value, tuple):
return tuple([recursive_validate(v_) for v_ in value])
if isinstance(value, list):
return [recursive_validate(v_) for v_ in value]
raise Exception(
"Expected None or int or float or StochasticParameter or "
"list or tuple, got %s." % (type(value),))
px = recursive_validate(px)
percent = recursive_validate(percent)
super(Pad, self).__init__(
px=px,
percent=percent,
pad_mode=pad_mode,
pad_cval=pad_cval,
keep_size=keep_size,
sample_independently=sample_independently,
seed=seed, name=name, **old_kwargs)
class Crop(CropAndPad):
def __init__(self, px=None, percent=None, keep_size=True,
sample_independently=True,
seed=None, name=None, **old_kwargs):
def recursive_negate(value):
if value is None:
return value
if ia.is_single_number(value):
assert value >= 0, "Expected value >0, got %.4f." % (value,)
return -value
if isinstance(value, iap.StochasticParameter):
return iap.Multiply(value, -1)
if isinstance(value, tuple):
return tuple([recursive_negate(v_) for v_ in value])
if isinstance(value, list):
return [recursive_negate(v_) for v_ in value]
raise Exception(
"Expected None or int or float or StochasticParameter or "
"list or tuple, got %s." % (type(value),))
px = recursive_negate(px)
percent = recursive_negate(percent)
super(Crop, self).__init__(
px=px,
percent=percent,
keep_size=keep_size,
sample_independently=sample_independently,
seed=seed, name=name, **old_kwargs)
class PadToFixedSize(meta.Augmenter):
def __init__(self, width, height, pad_mode="constant", pad_cval=0,
position="uniform",
seed=None, name=None, **old_kwargs):
super(PadToFixedSize, self).__init__(
seed=seed, name=name, **old_kwargs)
self.size = (width, height)
self.position = _handle_position_parameter(position)
self.pad_mode = _handle_pad_mode_param(pad_mode)
self.pad_cval = iap.handle_discrete_param(
pad_cval, "pad_cval", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self._pad_mode_heatmaps = "constant"
self._pad_mode_segmentation_maps = "constant"
self._pad_cval_heatmaps = 0.0
self._pad_cval_segmentation_maps = 0
def _augment_batch_(self, batch, random_state, parents, hooks):
samples = self._draw_samples(batch, random_state)
if batch.images is not None:
batch.images = self._augment_images_by_samples(batch.images,
samples)
if batch.heatmaps is not None:
batch.heatmaps = self._augment_maps_by_samples(
batch.heatmaps, samples, self._pad_mode_heatmaps,
self._pad_cval_heatmaps)
if batch.segmentation_maps is not None:
batch.segmentation_maps = self._augment_maps_by_samples(
batch.segmentation_maps, samples, self._pad_mode_heatmaps,
self._pad_cval_heatmaps)
for augm_name in ["keypoints", "bounding_boxes", "polygons",
"line_strings"]:
augm_value = getattr(batch, augm_name)
if augm_value is not None:
func = functools.partial(
self._augment_keypoints_by_samples,
samples=samples)
cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)
setattr(batch, augm_name, cbaois)
return batch
def _augment_images_by_samples(self, images, samples):
result = []
sizes, pad_xs, pad_ys, pad_modes, pad_cvals = samples
for i, (image, size) in enumerate(zip(images, sizes)):
width_min, height_min = size
height_image, width_image = image.shape[:2]
paddings = self._calculate_paddings(height_image, width_image,
height_min, width_min,
pad_xs[i], pad_ys[i])
image = _crop_and_pad_arr(
image, (0, 0, 0, 0), paddings, pad_modes[i], pad_cvals[i],
keep_size=False)
result.append(image)
return result
def _augment_keypoints_by_samples(self, keypoints_on_images, samples):
result = []
sizes, pad_xs, pad_ys, _, _ = samples
for i, (kpsoi, size) in enumerate(zip(keypoints_on_images, sizes)):
width_min, height_min = size
height_image, width_image = kpsoi.shape[:2]
paddings_img = self._calculate_paddings(height_image, width_image,
height_min, width_min,
pad_xs[i], pad_ys[i])
keypoints_padded = _crop_and_pad_kpsoi_(
kpsoi, (0, 0, 0, 0), paddings_img,
keep_size=False)
result.append(keypoints_padded)
return result
def _augment_maps_by_samples(self, augmentables, samples, pad_mode,
pad_cval):
sizes, pad_xs, pad_ys, pad_modes, pad_cvals = samples
for i, (augmentable, size) in enumerate(zip(augmentables, sizes)):
width_min, height_min = size
height_img, width_img = augmentable.shape[:2]
paddings_img = self._calculate_paddings(
height_img, width_img, height_min, width_min,
pad_xs[i], pad_ys[i])
augmentables[i] = _crop_and_pad_hms_or_segmaps_(
augmentables[i],
(0, 0, 0, 0),
paddings_img,
pad_mode=pad_mode if pad_mode is not None else pad_modes[i],
pad_cval=pad_cval if pad_cval is not None else pad_cvals[i],
keep_size=False)
return augmentables
def _draw_samples(self, batch, random_state):
nb_images = batch.nb_rows
rngs = random_state.duplicate(4)
if isinstance(self.position, tuple):
pad_xs = self.position[0].draw_samples(nb_images,
random_state=rngs[0])
pad_ys = self.position[1].draw_samples(nb_images,
random_state=rngs[1])
else:
pads = self.position.draw_samples((nb_images, 2),
random_state=rngs[0])
pad_xs = pads[:, 0]
pad_ys = pads[:, 1]
pad_modes = self.pad_mode.draw_samples(nb_images,
random_state=rngs[2])
pad_cvals = self.pad_cval.draw_samples(nb_images,
random_state=rngs[3])
# We return here the sizes even though they are static as it allows
# derived augmenters to define image-specific heights/widths.
return [self.size] * nb_images, pad_xs, pad_ys, pad_modes, pad_cvals
@classmethod
def _calculate_paddings(cls, height_image, width_image,
height_min, width_min, pad_xs_i, pad_ys_i):
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
if width_min is not None and width_image < width_min:
pad_total_x = width_min - width_image
pad_left = int((1-pad_xs_i) * pad_total_x)
pad_right = pad_total_x - pad_left
if height_min is not None and height_image < height_min:
pad_total_y = height_min - height_image
pad_top = int((1-pad_ys_i) * pad_total_y)
pad_bottom = pad_total_y - pad_top
return pad_top, pad_right, pad_bottom, pad_left
def get_parameters(self):
return [self.size[0], self.size[1], self.pad_mode, self.pad_cval,
self.position]
class CenterPadToFixedSize(PadToFixedSize):
def __init__(self, width, height, pad_mode="constant", pad_cval=0,
seed=None, name=None, **old_kwargs):
super(CenterPadToFixedSize, self).__init__(
width=width, height=height, pad_mode=pad_mode, pad_cval=pad_cval,
position="center",
seed=seed, name=name, **old_kwargs)
# TODO maybe rename this to CropToMaximumSize ?
# TODO this is very similar to CropAndPad, maybe add a way to generate crop
# values imagewise via a callback in in CropAndPad?
# TODO add crop() function in imgaug, similar to pad
class CropToFixedSize(meta.Augmenter):
def __init__(self, width, height, position="uniform",
seed=None, name=None, **old_kwargs):
super(CropToFixedSize, self).__init__(
seed=seed, name=name, **old_kwargs)
self.size = (width, height)
# Position of where to crop. The further to the top left this is,
# the larger the share of pixels that will be cropped from the top
# and left sides. I.e. set to (Deterministic(0.0), Deterministic(0.0))
# to only crop at the top and left,
# (Deterministic(1.0), Deterministic(1.0)) to only crop at the bottom
# right. Analogously (0.5, 0.5) crops equally on both axis,
# (0.0, 1.0) crops left and bottom, (1.0, 0.0) crops right and top.
self.position = _handle_position_parameter(position)
def _augment_batch_(self, batch, random_state, parents, hooks):
# Providing the whole batch to _draw_samples() would not be necessary
# for this augmenter. The number of rows would be sufficient. This
# formulation however enables derived augmenters to use rowwise shapes
# without having to compute them here for this augmenter.
samples = self._draw_samples(batch, random_state)
if batch.images is not None:
batch.images = self._augment_images_by_samples(batch.images,
samples)
if batch.heatmaps is not None:
batch.heatmaps = self._augment_maps_by_samples(
batch.heatmaps, samples)
if batch.segmentation_maps is not None:
batch.segmentation_maps = self._augment_maps_by_samples(
batch.segmentation_maps, samples)
for augm_name in ["keypoints", "bounding_boxes", "polygons",
"line_strings"]:
augm_value = getattr(batch, augm_name)
if augm_value is not None:
func = functools.partial(
self._augment_keypoints_by_samples,
samples=samples)
cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)
setattr(batch, augm_name, cbaois)
return batch
def _augment_images_by_samples(self, images, samples):
result = []
sizes, offset_xs, offset_ys = samples
for i, (image, size) in enumerate(zip(images, sizes)):
w, h = size
height_image, width_image = image.shape[0:2]
croppings = self._calculate_crop_amounts(
height_image, width_image, h, w, offset_ys[i], offset_xs[i])
image_cropped = _crop_and_pad_arr(image, croppings, (0, 0, 0, 0),
keep_size=False)
result.append(image_cropped)
return result
def _augment_keypoints_by_samples(self, kpsois, samples):
result = []
sizes, offset_xs, offset_ys = samples
for i, (kpsoi, size) in enumerate(zip(kpsois, sizes)):
w, h = size
height_image, width_image = kpsoi.shape[0:2]
croppings_img = self._calculate_crop_amounts(
height_image, width_image, h, w, offset_ys[i], offset_xs[i])
kpsoi_cropped = _crop_and_pad_kpsoi_(
kpsoi, croppings_img, (0, 0, 0, 0), keep_size=False)
result.append(kpsoi_cropped)
return result
def _augment_maps_by_samples(self, augmentables, samples):
sizes, offset_xs, offset_ys = samples
for i, (augmentable, size) in enumerate(zip(augmentables, sizes)):
w, h = size
height_image, width_image = augmentable.shape[0:2]
croppings_img = self._calculate_crop_amounts(
height_image, width_image, h, w, offset_ys[i], offset_xs[i])
augmentables[i] = _crop_and_pad_hms_or_segmaps_(
augmentable, croppings_img, (0, 0, 0, 0), keep_size=False)
return augmentables
@classmethod
def _calculate_crop_amounts(cls, height_image, width_image,
height_max, width_max,
offset_y, offset_x):
crop_top = 0
crop_right = 0
crop_bottom = 0
crop_left = 0
if height_max is not None and height_image > height_max:
crop_top = int(offset_y * (height_image - height_max))
crop_bottom = height_image - height_max - crop_top
if width_max is not None and width_image > width_max:
crop_left = int(offset_x * (width_image - width_max))
crop_right = width_image - width_max - crop_left
return crop_top, crop_right, crop_bottom, crop_left
def _draw_samples(self, batch, random_state):
nb_images = batch.nb_rows
rngs = random_state.duplicate(2)
if isinstance(self.position, tuple):
offset_xs = self.position[0].draw_samples(nb_images,
random_state=rngs[0])
offset_ys = self.position[1].draw_samples(nb_images,
random_state=rngs[1])
else:
offsets = self.position.draw_samples((nb_images, 2),
random_state=rngs[0])
offset_xs = offsets[:, 0]
offset_ys = offsets[:, 1]
offset_xs = 1.0 - offset_xs
offset_ys = 1.0 - offset_ys
# We return here the sizes even though they are static as it allows
# derived augmenters to define image-specific heights/widths.
return [self.size] * nb_images, offset_xs, offset_ys
def get_parameters(self):
return [self.size[0], self.size[1], self.position]
class CenterCropToFixedSize(CropToFixedSize):
def __init__(self, width, height,
seed=None, name=None, **old_kwargs):
super(CenterCropToFixedSize, self).__init__(
width=width, height=height, position="center",
seed=seed, name=name, **old_kwargs)
class CropToMultiplesOf(CropToFixedSize):
def __init__(self, width_multiple, height_multiple, position="uniform",
seed=None, name=None, **old_kwargs):
super(CropToMultiplesOf, self).__init__(
width=None, height=None, position=position,
seed=seed, name=name, **old_kwargs)
self.width_multiple = width_multiple
self.height_multiple = height_multiple
def _draw_samples(self, batch, random_state):
_sizes, offset_xs, offset_ys = super(
CropToMultiplesOf, self
)._draw_samples(batch, random_state)
shapes = batch.get_rowwise_shapes()
sizes = []
for shape in shapes:
height, width = shape[0:2]
croppings = compute_croppings_to_reach_multiples_of(
shape,
height_multiple=self.height_multiple,
width_multiple=self.width_multiple)
# TODO change that
# note that these are not in the same order as shape tuples
# in CropToFixedSize
new_size = (
width - croppings[1] - croppings[3],
height - croppings[0] - croppings[2]
)
sizes.append(new_size)
return sizes, offset_xs, offset_ys
def get_parameters(self):
return [self.width_multiple, self.height_multiple, self.position]
class CenterCropToMultiplesOf(CropToMultiplesOf):
def __init__(self, width_multiple, height_multiple,
seed=None, name=None, **old_kwargs):
super(CenterCropToMultiplesOf, self).__init__(
width_multiple=width_multiple,
height_multiple=height_multiple,
position="center",
seed=seed, name=name, **old_kwargs)
class PadToMultiplesOf(PadToFixedSize):
def __init__(self, width_multiple, height_multiple,
pad_mode="constant", pad_cval=0,
position="uniform",
seed=None, name=None, **old_kwargs):
super(PadToMultiplesOf, self).__init__(
width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval,
position=position,
seed=seed, name=name, **old_kwargs)
self.width_multiple = width_multiple
self.height_multiple = height_multiple
def _draw_samples(self, batch, random_state):
_sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super(
PadToMultiplesOf, self
)._draw_samples(batch, random_state)
shapes = batch.get_rowwise_shapes()
sizes = []
for shape in shapes:
height, width = shape[0:2]
paddings = compute_paddings_to_reach_multiples_of(
shape,
height_multiple=self.height_multiple,
width_multiple=self.width_multiple)
# TODO change that
# note that these are not in the same order as shape tuples
# in PadToFixedSize
new_size = (
width + paddings[1] + paddings[3],
height + paddings[0] + paddings[2]
)
sizes.append(new_size)
return sizes, pad_xs, pad_ys, pad_modes, pad_cvals
def get_parameters(self):
return [self.width_multiple, self.height_multiple,
self.pad_mode, self.pad_cval,
self.position]
class CenterPadToMultiplesOf(PadToMultiplesOf):
def __init__(self, width_multiple, height_multiple,
pad_mode="constant", pad_cval=0,
seed=None, name=None, **old_kwargs):
super(CenterPadToMultiplesOf, self).__init__(
width_multiple=width_multiple,
height_multiple=height_multiple,
pad_mode=pad_mode,
pad_cval=pad_cval,
position="center",
seed=seed, name=name, **old_kwargs)
class CropToPowersOf(CropToFixedSize):
def __init__(self, width_base, height_base, position="uniform",
seed=None, name=None, **old_kwargs):
super(CropToPowersOf, self).__init__(
width=None, height=None, position=position,
seed=seed, name=name, **old_kwargs)
self.width_base = width_base
self.height_base = height_base
def _draw_samples(self, batch, random_state):
_sizes, offset_xs, offset_ys = super(
CropToPowersOf, self
)._draw_samples(batch, random_state)
shapes = batch.get_rowwise_shapes()
sizes = []
for shape in shapes:
height, width = shape[0:2]
croppings = compute_croppings_to_reach_powers_of(
shape,
height_base=self.height_base,
width_base=self.width_base)
# TODO change that
# note that these are not in the same order as shape tuples
# in CropToFixedSize
new_size = (
width - croppings[1] - croppings[3],
height - croppings[0] - croppings[2]
)
sizes.append(new_size)
return sizes, offset_xs, offset_ys
def get_parameters(self):
return [self.width_base, self.height_base, self.position]
class CenterCropToPowersOf(CropToPowersOf):
def __init__(self, width_base, height_base,
seed=None, name=None, **old_kwargs):
super(CenterCropToPowersOf, self).__init__(
width_base=width_base, height_base=height_base, position="center",
seed=seed, name=name, **old_kwargs)
class PadToPowersOf(PadToFixedSize):
def __init__(self, width_base, height_base,
pad_mode="constant", pad_cval=0,
position="uniform",
seed=None, name=None, **old_kwargs):
super(PadToPowersOf, self).__init__(
width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval,
position=position,
seed=seed, name=name, **old_kwargs)
self.width_base = width_base
self.height_base = height_base
def _draw_samples(self, batch, random_state):
_sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super(
PadToPowersOf, self
)._draw_samples(batch, random_state)
shapes = batch.get_rowwise_shapes()
sizes = []
for shape in shapes:
height, width = shape[0:2]
paddings = compute_paddings_to_reach_powers_of(
shape,
height_base=self.height_base,
width_base=self.width_base)
# TODO change that
# note that these are not in the same order as shape tuples
# in PadToFixedSize
new_size = (
width + paddings[1] + paddings[3],
height + paddings[0] + paddings[2]
)
sizes.append(new_size)
return sizes, pad_xs, pad_ys, pad_modes, pad_cvals
def get_parameters(self):
return [self.width_base, self.height_base,
self.pad_mode, self.pad_cval,
self.position]
class CenterPadToPowersOf(PadToPowersOf):
def __init__(self, width_base, height_base,
pad_mode="constant", pad_cval=0,
seed=None, name=None, **old_kwargs):
super(CenterPadToPowersOf, self).__init__(
width_base=width_base, height_base=height_base,
pad_mode=pad_mode, pad_cval=pad_cval,
position="center",
seed=seed, name=name, **old_kwargs)
class CropToAspectRatio(CropToFixedSize):
def __init__(self, aspect_ratio, position="uniform",
seed=None, name=None, **old_kwargs):
super(CropToAspectRatio, self).__init__(
width=None, height=None, position=position,
seed=seed, name=name, **old_kwargs)
self.aspect_ratio = aspect_ratio
def _draw_samples(self, batch, random_state):
_sizes, offset_xs, offset_ys = super(
CropToAspectRatio, self
)._draw_samples(batch, random_state)
shapes = batch.get_rowwise_shapes()
sizes = []
for shape in shapes:
height, width = shape[0:2]
if height == 0 or width == 0:
croppings = (0, 0, 0, 0)
else:
croppings = compute_croppings_to_reach_aspect_ratio(
shape,
aspect_ratio=self.aspect_ratio)
# TODO change that
# note that these are not in the same order as shape tuples
# in CropToFixedSize
new_size = (
width - croppings[1] - croppings[3],
height - croppings[0] - croppings[2]
)
sizes.append(new_size)
return sizes, offset_xs, offset_ys
def get_parameters(self):
return [self.aspect_ratio, self.position]
class CenterCropToAspectRatio(CropToAspectRatio):
def __init__(self, aspect_ratio,
seed=None, name=None, **old_kwargs):
super(CenterCropToAspectRatio, self).__init__(
aspect_ratio=aspect_ratio, position="center",
seed=seed, name=name, **old_kwargs)
class PadToAspectRatio(PadToFixedSize):
def __init__(self, aspect_ratio, pad_mode="constant", pad_cval=0,
position="uniform",
seed=None, name=None, **old_kwargs):
super(PadToAspectRatio, self).__init__(
width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval,
position=position,
seed=seed, name=name, **old_kwargs)
self.aspect_ratio = aspect_ratio
def _draw_samples(self, batch, random_state):
_sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super(
PadToAspectRatio, self
)._draw_samples(batch, random_state)
shapes = batch.get_rowwise_shapes()
sizes = []
for shape in shapes:
height, width = shape[0:2]
paddings = compute_paddings_to_reach_aspect_ratio(
shape,
aspect_ratio=self.aspect_ratio)
# TODO change that
# note that these are not in the same order as shape tuples
# in PadToFixedSize
new_size = (
width + paddings[1] + paddings[3],
height + paddings[0] + paddings[2]
)
sizes.append(new_size)
return sizes, pad_xs, pad_ys, pad_modes, pad_cvals
def get_parameters(self):
return [self.aspect_ratio, self.pad_mode, self.pad_cval,
self.position]
class CenterPadToAspectRatio(PadToAspectRatio):
def __init__(self, aspect_ratio, pad_mode="constant", pad_cval=0,
seed=None, name=None, **old_kwargs):
super(CenterPadToAspectRatio, self).__init__(
aspect_ratio=aspect_ratio, position="center",
pad_mode=pad_mode, pad_cval=pad_cval,
seed=seed, name=name, **old_kwargs)
class CropToSquare(CropToAspectRatio):
def __init__(self, position="uniform",
seed=None, name=None, **old_kwargs):
super(CropToSquare, self).__init__(
aspect_ratio=1.0, position=position,
seed=seed, name=name, **old_kwargs)
class CenterCropToSquare(CropToSquare):
def __init__(self, seed=None, name=None, **old_kwargs):
super(CenterCropToSquare, self).__init__(
position="center",
seed=seed, name=name, **old_kwargs)
class PadToSquare(PadToAspectRatio):
def __init__(self, pad_mode="constant", pad_cval=0, position="uniform",
seed=None, name=None, **old_kwargs):
super(PadToSquare, self).__init__(
aspect_ratio=1.0, pad_mode=pad_mode, pad_cval=pad_cval,
position=position,
seed=seed, name=name, **old_kwargs)
class CenterPadToSquare(PadToSquare):
def __init__(self, pad_mode="constant", pad_cval=0,
seed=None, name=None, **old_kwargs):
super(CenterPadToSquare, self).__init__(
pad_mode=pad_mode, pad_cval=pad_cval, position="center",
seed=seed, name=name, **old_kwargs)
class KeepSizeByResize(meta.Augmenter):
NO_RESIZE = "NO_RESIZE"
SAME_AS_IMAGES = "SAME_AS_IMAGES"
def __init__(self, children,
interpolation="cubic",
interpolation_heatmaps=SAME_AS_IMAGES,
interpolation_segmaps="nearest",
seed=None, name=None, **old_kwargs):
super(KeepSizeByResize, self).__init__(
seed=seed, name=name, **old_kwargs)
self.children = children
def _validate_param(val, allow_same_as_images):
valid_ips_and_resize = ia.IMRESIZE_VALID_INTERPOLATIONS \
+ [KeepSizeByResize.NO_RESIZE]
if allow_same_as_images and val == self.SAME_AS_IMAGES:
return self.SAME_AS_IMAGES
if val in valid_ips_and_resize:
return iap.Deterministic(val)
if isinstance(val, list):
assert len(val) > 0, (
"Expected a list of at least one interpolation method. "
"Got an empty list.")
valid_ips_here = valid_ips_and_resize
if allow_same_as_images:
valid_ips_here = valid_ips_here \
+ [KeepSizeByResize.SAME_AS_IMAGES]
only_valid_ips = all([ip in valid_ips_here for ip in val])
assert only_valid_ips, (
"Expected each interpolations to be one of '%s', got "
"'%s'." % (str(valid_ips_here), str(val)))
return iap.Choice(val)
if isinstance(val, iap.StochasticParameter):
return val
raise Exception(
"Expected interpolation to be one of '%s' or a list of "
"these values or a StochasticParameter. Got type %s." % (
str(ia.IMRESIZE_VALID_INTERPOLATIONS), type(val)))
self.children = meta.handle_children_list(children, self.name, "then")
self.interpolation = _validate_param(interpolation, False)
self.interpolation_heatmaps = _validate_param(interpolation_heatmaps,
True)
self.interpolation_segmaps = _validate_param(interpolation_segmaps,
True)
def _augment_batch_(self, batch, random_state, parents, hooks):
with batch.propagation_hooks_ctx(self, hooks, parents):
images_were_array = None
if batch.images is not None:
images_were_array = ia.is_np_array(batch.images)
shapes_orig = self._get_shapes(batch)
samples = self._draw_samples(batch.nb_rows, random_state)
batch = self.children.augment_batch_(
batch, parents=parents + [self], hooks=hooks)
if batch.images is not None:
batch.images = self._keep_size_images(
batch.images, shapes_orig["images"], images_were_array,
samples)
if batch.heatmaps is not None:
# dont use shapes_orig["images"] because they might be None
batch.heatmaps = self._keep_size_maps(
batch.heatmaps, shapes_orig["heatmaps"],
shapes_orig["heatmaps_arr"], samples[1])
if batch.segmentation_maps is not None:
# dont use shapes_orig["images"] because they might be None
batch.segmentation_maps = self._keep_size_maps(
batch.segmentation_maps, shapes_orig["segmentation_maps"],
shapes_orig["segmentation_maps_arr"], samples[2])
for augm_name in ["keypoints", "bounding_boxes", "polygons",
"line_strings"]:
augm_value = getattr(batch, augm_name)
if augm_value is not None:
func = functools.partial(
self._keep_size_keypoints,
shapes_orig=shapes_orig[augm_name],
interpolations=samples[0])
cbaois = self._apply_to_cbaois_as_keypoints(augm_value,
func)
setattr(batch, augm_name, cbaois)
return batch
@classmethod
def _keep_size_images(cls, images, shapes_orig, images_were_array,
samples):
interpolations, _, _ = samples
gen = zip(images, interpolations, shapes_orig)
result = []
for image, interpolation, input_shape in gen:
if interpolation == KeepSizeByResize.NO_RESIZE:
result.append(image)
else:
result.append(
ia.imresize_single_image(image, input_shape[0:2],
interpolation))
if images_were_array:
# note here that NO_RESIZE can have led to different shapes
nb_shapes = len({image.shape for image in result})
if nb_shapes == 1:
result = np.array(result, dtype=images.dtype)
return result
@classmethod
def _keep_size_maps(cls, augmentables, shapes_orig_images,
shapes_orig_arrs, interpolations):
result = []
gen = zip(augmentables, interpolations,
shapes_orig_arrs, shapes_orig_images)
for augmentable, interpolation, arr_shape_orig, img_shape_orig in gen:
if interpolation == "NO_RESIZE":
result.append(augmentable)
else:
augmentable = augmentable.resize(
arr_shape_orig[0:2], interpolation=interpolation)
augmentable.shape = img_shape_orig
result.append(augmentable)
return result
@classmethod
def _keep_size_keypoints(cls, kpsois_aug, shapes_orig, interpolations):
result = []
gen = zip(kpsois_aug, interpolations, shapes_orig)
for kpsoi_aug, interpolation, input_shape in gen:
if interpolation == KeepSizeByResize.NO_RESIZE:
result.append(kpsoi_aug)
else:
result.append(kpsoi_aug.on_(input_shape))
return result
@classmethod
def _get_shapes(cls, batch):
result = dict()
for column in batch.columns:
result[column.name] = [cell.shape for cell in column.value]
if batch.heatmaps is not None:
result["heatmaps_arr"] = [
cell.arr_0to1.shape for cell in batch.heatmaps]
if batch.segmentation_maps is not None:
result["segmentation_maps_arr"] = [
cell.arr.shape for cell in batch.segmentation_maps]
return result
def _draw_samples(self, nb_images, random_state):
rngs = random_state.duplicate(3)
interpolations = self.interpolation.draw_samples((nb_images,),
random_state=rngs[0])
if self.interpolation_heatmaps == KeepSizeByResize.SAME_AS_IMAGES:
interpolations_heatmaps = np.copy(interpolations)
else:
interpolations_heatmaps = self.interpolation_heatmaps.draw_samples(
(nb_images,), random_state=rngs[1]
)
# Note that `interpolations_heatmaps == self.SAME_AS_IMAGES`
# works here only if the datatype of the array is such that it
# may contain strings. It does not work properly for e.g.
# integer arrays and will produce a single bool output, even
# for arrays with more than one entry.
same_as_imgs_idx = [ip == self.SAME_AS_IMAGES
for ip in interpolations_heatmaps]
interpolations_heatmaps[same_as_imgs_idx] = \
interpolations[same_as_imgs_idx]
if self.interpolation_segmaps == KeepSizeByResize.SAME_AS_IMAGES:
interpolations_segmaps = np.copy(interpolations)
else:
# TODO This used previously the same seed as the heatmaps part
# leading to the same sampled values. Was that intentional?
# Doesn't look like it should be that way.
interpolations_segmaps = self.interpolation_segmaps.draw_samples(
(nb_images,), random_state=rngs[2]
)
same_as_imgs_idx = [ip == self.SAME_AS_IMAGES
for ip in interpolations_segmaps]
interpolations_segmaps[same_as_imgs_idx] = \
interpolations[same_as_imgs_idx]
return interpolations, interpolations_heatmaps, interpolations_segmaps
def _to_deterministic(self):
aug = self.copy()
aug.children = aug.children.to_deterministic()
aug.deterministic = True
aug.random_state = self.random_state.derive_rng_()
return aug
def get_parameters(self):
return [self.interpolation, self.interpolation_heatmaps]
def get_children_lists(self):
return [self.children]
def __str__(self):
pattern = (
"%s("
"interpolation=%s, "
"interpolation_heatmaps=%s, "
"name=%s, "
"children=%s, "
"deterministic=%s"
")")
return pattern % (
self.__class__.__name__, self.interpolation,
self.interpolation_heatmaps, self.name, self.children,
self.deterministic)
| true
| true
|
f702d36d493f53a3b370731decea28a5e15dc587
| 2,317
|
py
|
Python
|
tests/memory.py
|
vpaeder/pymcp2221
|
90f9ae85d7b852128d642e1382f9a7628fc72057
|
[
"MIT"
] | null | null | null |
tests/memory.py
|
vpaeder/pymcp2221
|
90f9ae85d7b852128d642e1382f9a7628fc72057
|
[
"MIT"
] | null | null | null |
tests/memory.py
|
vpaeder/pymcp2221
|
90f9ae85d7b852128d642e1382f9a7628fc72057
|
[
"MIT"
] | null | null | null |
from .common import *
__all__ = ["TestReadWriteMemory"]
class TestReadWriteMemory(MCPTestCase):
def test_read_flash_ok(self):
self.mcp.dev.read.return_value = self.xb0_00
self.assertEqual(self.mcp._read_flash(FlashDataSubcode.ChipSettings), self.xb0_00[4:14])
def test_read_sram_ok(self):
self.mcp.dev.read.return_value = self.x61
self.assertEqual(self.mcp._read_sram(SramDataSubcode.ChipSettings), self.x61[4:22])
self.assertEqual(self.mcp._read_sram(SramDataSubcode.GPSettings), self.x61[22:26])
def test_read_flash_byte_ok(self):
self.mcp.dev.read.return_value = self.xb0_00
for n in range(0,9):
result = self.mcp._read_flash_byte(FlashDataSubcode.ChipSettings, n, range(8))
value = int("".join(["1" if x else "0" for x in reversed(result)]),2)
self.assertEqual(value, self.xb0_00[4+n])
def test_read_sram_byte_ok(self):
self.mcp.dev.read.return_value = self.x61
for n in range(0,9):
result = self.mcp._read_sram_byte(SramDataSubcode.ChipSettings, n, range(8))
value = int("".join(["1" if x else "0" for x in reversed(result)]),2)
self.assertEqual(value, self.x61[4+n])
def test_write_flash_byte_ok(self):
# tests that 'write_flash_byte' sends the right data to hid write command
xb1_00 = bytearray(64)
xb1_00[0] = 0xb1
with patch.object(self.mcp, "_read_response", return_value = self.xb0_00):
for byte in range(9):
for bit in range(8):
xb1_00[2:12] = self.xb0_00[4:14]
xb1_00[2+byte] = self.mcp._MCP2221__and(xb1_00[2+byte], 0xff - (1<<bit))
self.mcp._write_flash_byte(FlashDataSubcode.ChipSettings, byte, [bit], [False])
self.assertEqual(self.mcp.dev.write.call_args[0][0], xb1_00)
def test_write_sram_ok(self):
# tests that 'write_sram' sends the right data to hid write command
with patch.object(self.mcp, "_read_response", return_value = self.x61):
v = 0xff
for byte in range(9):
self.mcp._write_sram(SramDataSubcode.ChipSettings, byte, v)
self.assertEqual(self.mcp.dev.write.call_args[0][0][2+byte], v)
| 47.285714
| 99
| 0.634009
|
from .common import *
__all__ = ["TestReadWriteMemory"]
class TestReadWriteMemory(MCPTestCase):
def test_read_flash_ok(self):
self.mcp.dev.read.return_value = self.xb0_00
self.assertEqual(self.mcp._read_flash(FlashDataSubcode.ChipSettings), self.xb0_00[4:14])
def test_read_sram_ok(self):
self.mcp.dev.read.return_value = self.x61
self.assertEqual(self.mcp._read_sram(SramDataSubcode.ChipSettings), self.x61[4:22])
self.assertEqual(self.mcp._read_sram(SramDataSubcode.GPSettings), self.x61[22:26])
def test_read_flash_byte_ok(self):
self.mcp.dev.read.return_value = self.xb0_00
for n in range(0,9):
result = self.mcp._read_flash_byte(FlashDataSubcode.ChipSettings, n, range(8))
value = int("".join(["1" if x else "0" for x in reversed(result)]),2)
self.assertEqual(value, self.xb0_00[4+n])
def test_read_sram_byte_ok(self):
self.mcp.dev.read.return_value = self.x61
for n in range(0,9):
result = self.mcp._read_sram_byte(SramDataSubcode.ChipSettings, n, range(8))
value = int("".join(["1" if x else "0" for x in reversed(result)]),2)
self.assertEqual(value, self.x61[4+n])
def test_write_flash_byte_ok(self):
xb1_00 = bytearray(64)
xb1_00[0] = 0xb1
with patch.object(self.mcp, "_read_response", return_value = self.xb0_00):
for byte in range(9):
for bit in range(8):
xb1_00[2:12] = self.xb0_00[4:14]
xb1_00[2+byte] = self.mcp._MCP2221__and(xb1_00[2+byte], 0xff - (1<<bit))
self.mcp._write_flash_byte(FlashDataSubcode.ChipSettings, byte, [bit], [False])
self.assertEqual(self.mcp.dev.write.call_args[0][0], xb1_00)
def test_write_sram_ok(self):
with patch.object(self.mcp, "_read_response", return_value = self.x61):
v = 0xff
for byte in range(9):
self.mcp._write_sram(SramDataSubcode.ChipSettings, byte, v)
self.assertEqual(self.mcp.dev.write.call_args[0][0][2+byte], v)
| true
| true
|
f702d376e02116e4fdc4ce32b9b5c0c6704c892d
| 1,829
|
py
|
Python
|
pi/pi.py
|
saneravi/ML_Stuff
|
74e1ed7ba9f4dccb555792315a14ba6071150304
|
[
"MIT"
] | 209
|
2015-01-02T03:47:12.000Z
|
2022-03-06T16:54:47.000Z
|
pi/pi.py
|
Kerwin-Xie/algorithms
|
4347a9b7bf54ef378d16d26ef9e357ddc710664b
|
[
"MIT"
] | 3
|
2015-12-06T14:40:34.000Z
|
2021-03-22T17:40:24.000Z
|
pi/pi.py
|
Kerwin-Xie/algorithms
|
4347a9b7bf54ef378d16d26ef9e357ddc710664b
|
[
"MIT"
] | 114
|
2015-01-31T08:37:10.000Z
|
2022-02-23T04:42:28.000Z
|
#!/usr/bin/env python
from decimal import Decimal, getcontext
from fractions import Fraction
digits = 500
getcontext().prec = digits
def leibnitz(n):
"""
Parameters
----------
n : int
Returns
-------
Fraction
Approximation of pi.
"""
pi = Fraction(0)
sign = 1
for k in range(1, n, 2):
pi = pi + sign*Fraction(4, k)
sign *= -1
return pi
def calc_pi(n):
"""
Calculate PI.
Parameters
----------
n : int
Number of fractions.
Returns
-------
Fraction
Approximation of pi.
"""
pi = Fraction(0)
for k in range(n):
# print(Fraction(-1,4)**k)
pi += (Fraction(-1, 4)**k * (Fraction(1, 1+2*k)
+ Fraction(2, 1+4*k)
+ Fraction(1, 3+4*k)))
return pi
def get_correct_digits(approx):
"""
Get how many digits were correct.
Parameters
----------
approx : str
String representation of an approximation of pi.
Returns
-------
int
The number of correct digits. If the number has too many correct
digits, -1 is returned.
"""
pi = ("3.14159265358979323846264338327950288419716939937510582097494459230"
"78164062862089986280348253421170679")
for i, el in enumerate(pi):
if len(approx) <= i:
return i-1
if el != approx[i]:
return i
return -1 # Very good!
if __name__ == "__main__":
# for n in range(1,180):
# approx = calc_pi(n)
# dec =Decimal(approx.numerator) / Decimal(approx.denominator)
# #print(dec)
# print("correct digits: %s (n=%i)" % (get_correct_digits(str(dec)),n))
n = digits
approx = calc_pi(n)
dec = Decimal(approx.numerator) / Decimal(approx.denominator)
print(dec)
| 21.022989
| 79
| 0.547294
|
from decimal import Decimal, getcontext
from fractions import Fraction
digits = 500
getcontext().prec = digits
def leibnitz(n):
pi = Fraction(0)
sign = 1
for k in range(1, n, 2):
pi = pi + sign*Fraction(4, k)
sign *= -1
return pi
def calc_pi(n):
pi = Fraction(0)
for k in range(n):
pi += (Fraction(-1, 4)**k * (Fraction(1, 1+2*k)
+ Fraction(2, 1+4*k)
+ Fraction(1, 3+4*k)))
return pi
def get_correct_digits(approx):
pi = ("3.14159265358979323846264338327950288419716939937510582097494459230"
"78164062862089986280348253421170679")
for i, el in enumerate(pi):
if len(approx) <= i:
return i-1
if el != approx[i]:
return i
return -1
if __name__ == "__main__":
n = digits
approx = calc_pi(n)
dec = Decimal(approx.numerator) / Decimal(approx.denominator)
print(dec)
| true
| true
|
f702d5de917640b0135e726ec3ce2820b1f09d38
| 1,295
|
py
|
Python
|
app/data/check.py
|
redforge/Flask_Signin
|
b9fe05e0a9af07603622c22d8eba060c2d696d52
|
[
"Unlicense"
] | 2
|
2018-08-08T20:26:16.000Z
|
2020-06-03T01:06:27.000Z
|
app/data/check.py
|
redforge/Flask_Signin
|
b9fe05e0a9af07603622c22d8eba060c2d696d52
|
[
"Unlicense"
] | 2
|
2018-08-08T23:26:19.000Z
|
2018-08-08T23:41:33.000Z
|
app/data/check.py
|
threethan/Parts-and-Crafts-Sign-In
|
b9fe05e0a9af07603622c22d8eba060c2d696d52
|
[
"Unlicense"
] | null | null | null |
import os.path
from app.data.database import init_db, db_path, get_expected_pathname, set_path
def db_exists():
return os.path.isfile(db_path)
def check_db():
global db_path
if (db_path != get_expected_pathname()):
print('DB Check: Running backup')
backup_database_to(get_expected_pathname())
init_db()
if (not db_exists()):
print('DB Check: No database found. Making a new one...')
init_db()
from app.data.camper_editing import reset_locs
reset_locs()
def backup_database_to(filename):
global db_path
from shutil import copy2
s = open('data/BACKUPDATA', 'a+')
s.seek(0)
prev_path = s.read()
set_path(filename)
db_path = filename #this line is a crude fix for some messy scoping
s.truncate(0)
s.seek(0)
s.write(filename)
if (prev_path == ""):
print("No previous database found, a new one will be generated. This may happen if the BACKUPDATA file is missing or corrupt.")
return False
elif (prev_path == filename):
print("Tried to back up to the same file!")
else:
print ("backing up & copying")
from app.data.camper_editing import reset_locs
copy2(prev_path, filename)
reset_locs()
return filename
| 27.553191
| 135
| 0.650193
|
import os.path
from app.data.database import init_db, db_path, get_expected_pathname, set_path
def db_exists():
return os.path.isfile(db_path)
def check_db():
global db_path
if (db_path != get_expected_pathname()):
print('DB Check: Running backup')
backup_database_to(get_expected_pathname())
init_db()
if (not db_exists()):
print('DB Check: No database found. Making a new one...')
init_db()
from app.data.camper_editing import reset_locs
reset_locs()
def backup_database_to(filename):
global db_path
from shutil import copy2
s = open('data/BACKUPDATA', 'a+')
s.seek(0)
prev_path = s.read()
set_path(filename)
db_path = filename
s.truncate(0)
s.seek(0)
s.write(filename)
if (prev_path == ""):
print("No previous database found, a new one will be generated. This may happen if the BACKUPDATA file is missing or corrupt.")
return False
elif (prev_path == filename):
print("Tried to back up to the same file!")
else:
print ("backing up & copying")
from app.data.camper_editing import reset_locs
copy2(prev_path, filename)
reset_locs()
return filename
| true
| true
|
f702d66e53c80b655a01fddefa34c998ecff7a5e
| 4,255
|
py
|
Python
|
sdk/python/pulumi_aws_native/resourcegroups/get_group.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 29
|
2021-09-30T19:32:07.000Z
|
2022-03-22T21:06:08.000Z
|
sdk/python/pulumi_aws_native/resourcegroups/get_group.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 232
|
2021-09-30T19:26:26.000Z
|
2022-03-31T23:22:06.000Z
|
sdk/python/pulumi_aws_native/resourcegroups/get_group.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 4
|
2021-11-10T19:42:01.000Z
|
2022-02-05T10:15:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'GetGroupResult',
'AwaitableGetGroupResult',
'get_group',
'get_group_output',
]
@pulumi.output_type
class GetGroupResult:
def __init__(__self__, arn=None, configuration=None, description=None, resource_query=None, resources=None, tags=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if configuration and not isinstance(configuration, list):
raise TypeError("Expected argument 'configuration' to be a list")
pulumi.set(__self__, "configuration", configuration)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if resource_query and not isinstance(resource_query, dict):
raise TypeError("Expected argument 'resource_query' to be a dict")
pulumi.set(__self__, "resource_query", resource_query)
if resources and not isinstance(resources, list):
raise TypeError("Expected argument 'resources' to be a list")
pulumi.set(__self__, "resources", resources)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
"""
The Resource Group ARN.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def configuration(self) -> Optional[Sequence['outputs.GroupConfigurationItem']]:
return pulumi.get(self, "configuration")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The description of the resource group
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="resourceQuery")
def resource_query(self) -> Optional['outputs.GroupResourceQuery']:
return pulumi.get(self, "resource_query")
@property
@pulumi.getter
def resources(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "resources")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.GroupTag']]:
return pulumi.get(self, "tags")
class AwaitableGetGroupResult(GetGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGroupResult(
arn=self.arn,
configuration=self.configuration,
description=self.description,
resource_query=self.resource_query,
resources=self.resources,
tags=self.tags)
def get_group(name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGroupResult:
"""
Schema for ResourceGroups::Group
:param str name: The name of the resource group
"""
__args__ = dict()
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:resourcegroups:getGroup', __args__, opts=opts, typ=GetGroupResult).value
return AwaitableGetGroupResult(
arn=__ret__.arn,
configuration=__ret__.configuration,
description=__ret__.description,
resource_query=__ret__.resource_query,
resources=__ret__.resources,
tags=__ret__.tags)
@_utilities.lift_output_func(get_group)
def get_group_output(name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGroupResult]:
"""
Schema for ResourceGroups::Group
:param str name: The name of the resource group
"""
...
| 33.242188
| 123
| 0.66134
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'GetGroupResult',
'AwaitableGetGroupResult',
'get_group',
'get_group_output',
]
@pulumi.output_type
class GetGroupResult:
def __init__(__self__, arn=None, configuration=None, description=None, resource_query=None, resources=None, tags=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if configuration and not isinstance(configuration, list):
raise TypeError("Expected argument 'configuration' to be a list")
pulumi.set(__self__, "configuration", configuration)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if resource_query and not isinstance(resource_query, dict):
raise TypeError("Expected argument 'resource_query' to be a dict")
pulumi.set(__self__, "resource_query", resource_query)
if resources and not isinstance(resources, list):
raise TypeError("Expected argument 'resources' to be a list")
pulumi.set(__self__, "resources", resources)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter
def configuration(self) -> Optional[Sequence['outputs.GroupConfigurationItem']]:
return pulumi.get(self, "configuration")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="resourceQuery")
def resource_query(self) -> Optional['outputs.GroupResourceQuery']:
return pulumi.get(self, "resource_query")
@property
@pulumi.getter
def resources(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "resources")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.GroupTag']]:
return pulumi.get(self, "tags")
class AwaitableGetGroupResult(GetGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGroupResult(
arn=self.arn,
configuration=self.configuration,
description=self.description,
resource_query=self.resource_query,
resources=self.resources,
tags=self.tags)
def get_group(name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGroupResult:
__args__ = dict()
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:resourcegroups:getGroup', __args__, opts=opts, typ=GetGroupResult).value
return AwaitableGetGroupResult(
arn=__ret__.arn,
configuration=__ret__.configuration,
description=__ret__.description,
resource_query=__ret__.resource_query,
resources=__ret__.resources,
tags=__ret__.tags)
@_utilities.lift_output_func(get_group)
def get_group_output(name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGroupResult]:
...
| true
| true
|
f702d67213c52e98de50c82d2ad8245c7db39257
| 926
|
py
|
Python
|
10.Algorithms_Data_Structure/Searching_n_Sorting/QuickSort.py
|
cuicaihao/Data_Science_Python
|
ca4cb64bf9afc1011c192586362d0dd036e9441e
|
[
"MIT"
] | 2
|
2018-04-26T12:11:41.000Z
|
2018-10-09T19:37:57.000Z
|
10.Algorithms_Data_Structure/Searching_n_Sorting/QuickSort.py
|
cuicaihao/Data_Science_Python
|
ca4cb64bf9afc1011c192586362d0dd036e9441e
|
[
"MIT"
] | null | null | null |
10.Algorithms_Data_Structure/Searching_n_Sorting/QuickSort.py
|
cuicaihao/Data_Science_Python
|
ca4cb64bf9afc1011c192586362d0dd036e9441e
|
[
"MIT"
] | 4
|
2018-10-09T19:37:59.000Z
|
2021-01-23T11:31:16.000Z
|
import numpy as np
def partition(arr, low, high):
i = (low-1) # index of smaller element
pivot = arr[high] # pivot
for j in range(low, high):
# If current element is smaller than the pivot
if arr[j] < pivot:
# increment index of smaller element
i = i+1
arr[i], arr[j] = arr[j], arr[i]
arr[i+1], arr[high] = arr[high], arr[i+1]
return (i + 1)
def quickSort(arr, low, high):
if low < high:
# pi is partitioning index, arr[p] is now
# at right place
pi = partition(arr, low, high)
# Separately sort elements before
# partition and after partition
quickSort(arr, low, pi-1)
quickSort(arr, pi + 1, high)
# Driver code to test above
# arr = [10, 7, 8, 9, 1, 5]
arr = np.random.randint(0, 1000000, 200000)
n = len(arr)
quickSort(arr, 0, n-1)
# print(f"Sorted array is: {arr}")
| 23.74359
| 54
| 0.560475
|
import numpy as np
def partition(arr, low, high):
i = (low-1) pivot = arr[high]
for j in range(low, high):
if arr[j] < pivot:
i = i+1
arr[i], arr[j] = arr[j], arr[i]
arr[i+1], arr[high] = arr[high], arr[i+1]
return (i + 1)
def quickSort(arr, low, high):
if low < high:
pi = partition(arr, low, high)
quickSort(arr, low, pi-1)
quickSort(arr, pi + 1, high)
arr = np.random.randint(0, 1000000, 200000)
n = len(arr)
quickSort(arr, 0, n-1)
| true
| true
|
f702d6e6217d88af39f7200ed453d1d9edb2e766
| 20,236
|
py
|
Python
|
idm/load_test.py
|
handavid/perf-scripts
|
910cdc0a10f2d3fde703726ea270487bedec50df
|
[
"Apache-2.0"
] | null | null | null |
idm/load_test.py
|
handavid/perf-scripts
|
910cdc0a10f2d3fde703726ea270487bedec50df
|
[
"Apache-2.0"
] | null | null | null |
idm/load_test.py
|
handavid/perf-scripts
|
910cdc0a10f2d3fde703726ea270487bedec50df
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/env python3
# Steps requried to use
# install requried libraries
# (root)# dnf install python3-ldap3
#
# Create python virtual environment directory
# (user)$ python3 -m venv ./venv3
#
# Enable virtual environment
# (user)$ source ./venv3/bin/activate
#
# Update pip and then install needed libary
# (user-venv3)$ pip install --upgrade pip
# (user-venv3)$ pip install python-freeipa
# (user-venv3)$ pip install ldap3
#
# Execute Script:
# (user-venv3)$ ./load_test.py -h
# -- not required, saved as a note
# dnf install python3-requests-kerberos python3-requests-gssapi
import sys
import time
from datetime import datetime
import re
import argparse
import logging
#from linetimer import CodeTimer
import itertools
import pprint
import subprocess
import socket
import dns.resolver
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# from ldap3 import Server, Connection, ALL, MODIFY_ADD
import ldap3
from python_freeipa import ClientMeta
# import requests
#from requests_kerberos import HTTPKerberosAuth
# generate a 4 digit randomizer from the current time
# randomizer = int(time.time()) % 10000
randomizer = datetime.now().strftime("%d%H%M")
start_timestr = datetime.now().strftime("%Y%m%d %H:%M")
start_time = time.time()
uid_template = "tuser{}_{{seq}}".format(randomizer)
pp=pprint.PrettyPrinter(indent=2)
class LogFilter(object):
def __init__(self,level,type='ge'):
self.__level = level
self.__type = type
def filter(self, logRecord):
if self.__type == 'ge':
return logRecord.levelno >= self.__level
elif self.__type == 'eq':
return logRecord.levelno == self.__level
else:
return logRecord.levelno <= self.__level
class MyLogger(logging.getLoggerClass()):
_PERF = 21
def __init__(self, name, **kwargs ):
super().__init__(name, **kwargs)
logging.addLevelName(self._PERF, 'PERF')
def perf(self, message, *args, **kwargs):
if self.isEnabledFor(self._PERF):
self._log(self._PERF, message, args, **kwargs)
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('IDM_user_load_tester')
logger.setLevel(logging.INFO)
_stout_handler = logging.StreamHandler()
_stout_handler.setLevel(logging.INFO)
logger.addHandler(_stout_handler)
def iter_timer(iterable, step=10, label=""):
start = time.time()
last_t = start
loop_tag = "loop {}{}{{}}".format(label, " "*bool(label))
logger.perf(loop_tag.format("start"))
pos = 0
# step_count=len(iterable)//step
for item in iterable:
pos = pos + 1
if pos != 0 and pos % step == 0:
logger.perf("{}: {:4.3f} {:4.3f}".format(item,time.time() - start, time.time() - last_t))
last_t = time.time()
yield item
logger.perf("{}: {:4.3f} {:4.3f}".format(pos,time.time() - start, time.time() - last_t))
logger.perf(loop_tag.format("end"))
def loop_timer(count,step=10,label=""):
start = time.time()
last_t = start
loop_tag = "loop {}{}{{}}".format(label, " "*bool(label))
logger.perf(loop_tag.format("start"))
for item in range(count):
if item != 0 and item % step == 0:
logger.perf("{}: {:4.3f} {:4.3f}".format(item,time.time() - start, time.time() - last_t))
last_t = time.time()
yield item
logger.perf("{}: {:4.3f} {:4.3f}".format(count,time.time() - start, time.time() - last_t))
logger.perf(loop_tag.format("end"))
# creates a generator to iterate through a list in chunks
# returns an iterator chunk of the iterable of up to the given size.
def chunker(iterable, size):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it,size))
if not chunk:
return
yield chunk
def dump_ldap_stats(reset=True):
logger.debug(ldap_conn.usage)
if reset:
ldap_conn.usage.reset()
def generate_user(seq_num, ldif_out=False, dc_dn=None):
#create a list/dict of user entries to use for passing to a function
user = {}
user["a_uid"] = uid_template.format(seq=seq_num)
user["o_givenname"] = str(seq_num)
user["o_sn"] = "tuser_{}".format(randomizer)
user["o_cn"] = "{} {}".format(user["o_givenname"], user["o_sn"])
user["o_preferredlanguage"]='EN'
user["o_employeetype"]="Created via load_test.py. Run started at: {}".format(start_timestr)
# if the user is to be used for LDIF, strip the first two prepended chars
if ldif_out:
clean_rex = r"^._"
keylist = list(user.keys())
user['attributes']={}
for key in keylist:
new_key = re.sub(clean_rex,'',key)
user['attributes'][new_key]=user[key]
del user[key]
if dc_dn is not None:
user['dn']="uid={},cn=staged users,cn=accounts,cn=provisioning,{}".format(user['attributes']['uid'],dc_dn)
user['object_class']=['top','inetorgperson']
return user
def add_users_api(total):
users=[]
for i in loop_timer(args.count,args.count//10,label="user_add_api"):
user = generate_user(i)
users.append(user["a_uid"])
logger.debug(user)
if args.stage:
user_out = client.stageuser_add(**user)
else:
user_out = client.user_add(**user)
logger.debug(user_out)
return users
def add_users_stage(total):
users=[]
if args.ldap_stage:
for i in loop_timer(args.count,args.count//10,label="user_add_stage_ldap"):
user = generate_user(i, ldif_out=True, dc_dn=dom_dn)
users.append(user['attributes']['uid'])
user_dn=user['dn']
del user['dn']
ldap_conn.add(user_dn,**user)
else:
for i in loop_timer(args.count,args.count//10,label="user_add_stage"):
user = generate_user(i)
users.append(user["a_uid"])
logger.debug(user)
user_out = client.stageuser_add(**user)
logger.debug(user_out)
for i in iter_timer(users,args.count//10,label="user_activate"):
activate_out = client.stageuser_activate(i)
logger.debug(activate_out)
return users
def get_users(template):
logger.perf("Checking for user template '{}'".format(template))
if client.user_find(template,o_sizelimit=1)['count'] > 0:
users = [ user['uid'][0] for user in client.user_find(template,o_sizelimit=0,o_timelimit=0)['result']]
logger.perf("Found {} users".format(len(users)))
else:
logger.perf("Unable to find user template")
exit(1)
return users
def get_users_ldap(template):
logger.perf("Checking for user template '{}'".format(template))
results = client.user_find(template,o_sizelimit=1)
if results['count'] > 0:
result=results['result'][0]
uid = result['uid'][0]
user_dn=result['dn']
base_dn = re.sub("uid={},".format(uid),'',user_dn)
entry_gen = ldap_conn.extend.standard.paged_search(search_base = base_dn,
search_filter = "(uid={}*)".format(template),
search_scope = ldap3.SUBTREE,
attributes = '*',
paged_size=1000,
generator=True)
total = 0
users=[]
for entry in entry_gen:
# print(entry)
total += 1
if total % 10000 == 0:
logger.perf("Loaded {} users".format(total))
dump_ldap_stats()
# extract user uid. For some reason uid is a list, we only need the first
users.append(entry['attributes']['uid'][0])
if args.user_limit>-1 and total >= args.user_limit:
break
logger.perf("Loaded {} users".format(len(users)))
dump_ldap_stats()
else:
logger.perf("Unable to find user template")
exit(1)
return users
def create_group_add_users_api(i,users):
group_name = "group{}_{}".format(randomizer,i)
group_desc = "Test group vor load_test.py. Run started at: {}".format(start_timestr)
logger.info("Creating group: {}".format(group_name))
result = client.group_add(group_name, o_description=group_desc)
if result["value"]==group_name:
logger.info("Success")
logger.debug(result)
logger.perf("Group: {}".format(group_name))
logger.info("Adding {} users".format(len(users)))
result = client.group_add_member(group_name, o_user=users)
logger.info("Done")
logger.debug(result)
def create_group_add_users_ldap(i,users,ldap_conn,base_user_dn,chunk=-1):
group_name = "group{}_{}".format(randomizer,i)
group_desc = "Test group vor load_test.py. Run started at: {}".format(start_timestr)
logger.info("Creating group: {}".format(group_name))
result = client.group_add(group_name, o_description=group_desc,o_raw=True)
group_dn=result['result']['dn']
logger.debug(result)
mod_group_users_ldap(users, ldap_conn, base_user_dn, group_dn, ldap3.MODIFY_ADD, chunk)
def remove_group_users_ldap(users, ldap_conn, base_user_dn, group_name, group_dn, chunk=-1):
logger.info("Group to delete: {}".format(group_dn))
start = time.time()
mod_group_users_ldap(users, ldap_conn, base_user_dn, group_dn, ldap3.MODIFY_DELETE, chunk)
logger.perf("Removing users from group took: {:4.3f}".format(time.time() - start))
result = client.group_show(group_name)
logger.info("Group show: {}".format(result))
logger.info("Delete group from IDM: {}".format(group_dn))
start = time.time()
result = client.group_del(group_name)
logger.perf("Delete group using API took: {:4.3f}".format(time.time() - start))
logger.info("Group del resul: {}".format(result))
def ldap_modify_retry(*fargs, **kwargs):
for retry_num in range(args.max_retries+1):
try:
return(ldap_conn.modify(*fargs,**kwargs))
except Exception as e:
logger.perf("Exception Occured")
logger.perf("'{}'".format(e))
logger.perf("{} retries left".format(args.max_retries-retry_num))
ldap_conn.unbind()
ldap_conn.bind()
logger.info("LDAP Connection rebound")
def mod_group_users_ldap(users, ldap_conn, base_user_dn, group_dn, ldap_mod_op, chunk=-1):
if chunk==-1:
chunk=len(users)
user_dn_list = [base_user_dn.format(user) for user in users]
for user_dn_chunk in chunker(user_dn_list,chunk):
# print(user_dn_chunk)
logger.perf("Chunk ({})".format(len(user_dn_chunk)))
logger.debug("Showing fist 20 of user_dn_chunk: {}".format(user_dn_chunk[:20]))
# result = ldap_conn.modify(group_dn,{"member":[(ldap_mod_op, user_dn_chunk)]})
result = ldap_modify_retry(group_dn,{"member":[(ldap_mod_op, user_dn_chunk)]})
dump_ldap_stats()
logger.debug("LDAP Modify result: {}".format(result))
if args.rebind:
logger.perf("rebinding LDAP connection")
ldap_conn.unbind()
ldap_conn.bind()
if args.delay>0:
logger.perf("Sleeping {} seconds".format(args.delay))
time.sleep(args.delay)
def check_dns_record(server, domain, record):
resolver = dns.resolver.Resolver()
resolver.nameservers=[socket.gethostbyname(server)]
try:
rdata = resolver.query(record + "." + domain)
logger.perf("Server [{}] answered with [{}]".format(server, rdata[0].address))
return 1
except dns.resolver.NXDOMAIN:
logger.perf("Record [{}] doesn't exist on server [{}]".format(record + "." + domain, server))
return 0
parser = argparse.ArgumentParser(description="Generate load test data for IdM",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-v', dest='verbosity', action='count', default=0,
help="Increase Verbosity, default is errors only. Only effective up to 3 levels.")
parser.add_argument('-c', type=int, dest='count',
help="Total count of users to add")
parser.add_argument('-g', dest='group_count', default=1, type=int,
help="Number of groups to create")
parser.add_argument('-S', dest='server', type=str,
help="Server to connect to")
parser.add_argument('-U', dest='user', type=str,
help="User account to use for connect")
parser.add_argument('-P', dest='password', type=str,
help="Password for connection")
parser.add_argument('--stage', dest='stage', action='store_true', default=False,
help="Create user in stage not active")
parser.add_argument('--stage-ldap', dest='ldap_stage', default=False, action='store_true',
help='Create stage users via ldap not API')
parser.add_argument('--ldap-group', dest='ldap_group', default=False, action='store_true',
help="Add users to group using LDAP directly")
parser.add_argument('--ldap-group-remove', dest='ldap_group_del', type=str,
help="Remove users from group using LDAP directly")
parser.add_argument('-C', dest='chunk', type=int, default=-1,
help="Chunk size for batching user adds to groups, -1 means all users given in count")
parser.add_argument('-r', dest='reuse_template', type=str,
help="Reuse existing users for group add using given user naming template")
parser.add_argument('-D', dest='delay',type=int, default=0,
help="Delay N seconds between chunks")
parser.add_argument('--rebind', dest='rebind',default=False,action='store_true',
help="Perform a unmind/bind operation between ldap operations.")
parser.add_argument('-l', dest='user_limit', type=int, default=-1,
help="Limit the number of users returned by reuse")
parser.add_argument('--max-retries',dest='max_retries', type=int, default=0,
help="Maximum number of retries for a failed chunk operation")
parser.add_argument('--check-repl', dest='check_repl',default=False,action='store_true',
help="Check when replication is finished by adding a DNS record")
args=parser.parse_args()
# setting up logger here to prevent log files being generated when showing help
perf_logfile = "perf_{}".format(randomizer)
_perf_handler = logging.FileHandler(perf_logfile)
_perf_formatter = logging.Formatter("%(asctime)s; %(message)s")
_perf_handler.setFormatter(_perf_formatter)
_perf_handler.addFilter(LogFilter(MyLogger._PERF,type='eq'))
logger.addHandler(_perf_handler)
if args.verbosity:
# Error is a level of 40.
level=30-(args.verbosity*10)
if level<0:
level=0
logger.setLevel(level)
levels={ 5: "CRITICAL",
4: "ERROR",
3: "WARNING",
2: "INFO",
1: "DEBUG",
0: "ALL" }
if level!=30:
log_file = "log_{}".format(randomizer)
_file_handler = logging.FileHandler(log_file)
_file_formatter = logging.Formatter('%(asctime)s %(levelname)s :: %(message)s')
_file_handler.setFormatter(_file_formatter)
_file_handler.addFilter(LogFilter(level))
logger.addHandler(_file_handler)
logger.info("Logging to file '{}'".format(log_file))
logger.info("Debug level: {0} ({1})".format(levels[level // 10],level))
# client = ClientMeta('ipaserver0.example.com',False)
# client.login('admin', 'admin123')
# kerberos seems broken using OS rpms on RHEL 8
#client.login_kerberos()
# user = client.user_add('test4', 'John', 'Doe', 'John Doe', o_preferredlanguage='EN')
# Output some data to the user about the script options passed in
# Not working as expected when git not found
try:
commit_info = str(subprocess.check_output(['git', 'log', '-n', '1', '--pretty=tformat:"%ci %H"']),"utf-8").strip()
logger.perf("Commit Info: {}".format(commit_info))
except:
logger.perf("No git info found")
pass
logger.perf("Start Time: {}".format(start_timestr))
logger.perf("User count: {} Group count: {}".format(args.count,args.group_count))
logger.perf("Server: {}".format(args.server))
logger.perf("Perf Log file: {}".format(perf_logfile))
if args.stage:
if args.ldap_stage:
logger.perf("Creating Stage users via ldap")
else:
logger.perf("Creating Stage users via API")
else:
logger.perf("Creating active users via API")
if args.ldap_group:
logger.perf("Adding users to groups via LDAP")
if args.chunk>-1:
logger.perf(" Using a chunk size of {}".format(args.chunk))
else:
logger.perf("Adding users to groups via API")
if args.reuse_template:
logger.perf("Reusing users starting with: '{}'".format(args.reuse_template))
if args.user_limit>-1:
logger.perf(" Limiting reuse to first {} users found".format(args.user_limit))
logger.debug(args)
logger.perf('----')
# end start header
client = ClientMeta(args.server,False)
client.login(args.user, args.password)
dnszone = client.dnszone_find(o_forward_only=True)['result'][0]
servers = dnszone['nsrecord']
domain = dnszone['idnsname'][0]['__dns_name__']
logger.info("Found servers: {} for domain: [{}]".format(servers, domain))
if args.ldap_group or args.ldap_stage:
user_dn=client.user_show(args.user,o_all=True)['result']['dn']
base_user_dn = re.sub("^uid={}".format(args.user),'uid={}',user_dn)
dom_dn = re.search("(dc=.*)",user_dn, re.IGNORECASE).group(1)
ldap_server = ldap3.Server(args.server, get_info=ldap3.ALL)
ldap_conn = ldap3.Connection(ldap_server,user=user_dn, password=args.password, auto_bind=True, collect_usage=True)
if args.reuse_template:
user_dn=client.user_show(args.user,o_all=True)['result']['dn']
base_user_dn = re.sub("^uid={},".format(args.user),'',user_dn)
logger.debug("base_user_dn: {}".format(base_user_dn))
ldap_server = ldap3.Server(args.server, get_info=ldap3.ALL)
ldap_conn = ldap3.Connection(ldap_server,user=user_dn, password=args.password, auto_bind=True, collect_usage=True)
users=get_users_ldap(args.reuse_template)
else:
logger.info("Creating {} users".format(args.count))
logger.info("template: {}".format(uid_template))
logger.info("Checking for existing templated users")
user_check=client.user_find(uid_template.format(seq=0))
if user_check["count"]>0:
sec_to_wait = 61 - datetime.now().second
logger.error("Existing users found please wait {} seconds".format(sec_to_wait))
exit(1)
else:
logger.info("Proceeding")
if args.stage:
users = add_users_stage(args.count)
else:
users = add_users_api(args.count)
if args.ldap_group:
# print(ldap_server.info)
# for i in iter_timer(range(args.group_count),step=1,label="group_add_user_ldap"):
# create_group_add_users_ldap(i,users,ldap_conn,base_user_dn,chunk=args.chunk)
for i in loop_timer(args.group_count,1,label="group_add_user_ldap"):
create_group_add_users_ldap(i,users,ldap_conn,base_user_dn,chunk=args.chunk)
elif args.ldap_group_del is not None:
user_dn=client.user_show(args.user,o_all=True)['result']['dn']
group_dn=client.group_show(args.ldap_group_del,o_all=True)['result']['dn']
base_user_dn = re.sub("^uid={}".format(args.user),'uid={}',user_dn)
ldap_server = ldap3.Server(args.server, get_info=ldap3.ALL)
ldap_conn = ldap3.Connection(ldap_server,user=user_dn, password=args.password, auto_bind=True)
remove_group_users_ldap(users, ldap_conn, base_user_dn, args.ldap_group_del, group_dn, chunk=args.chunk)
else:
for i in loop_timer(args.group_count,1,label="group_add_user_api"):
create_group_add_users_api(i,users)
logger.perf('----')
logger.perf("End Time: {}".format(datetime.now().strftime("%Y%m%d %H:%M")))
run_time=time.time() - start_time
logger.perf("Total Run Time: {:.3f}sec".format(run_time))
logger.perf("Total Run time: {:d}min {:.3f}sec".format(int(run_time//60),run_time%60))
if args.check_repl:
record = "trecord{}".format(randomizer)
client.dnsrecord_add(a_dnszoneidnsname=domain, a_idnsname=record, o_a_part_ip_address='1.1.1.1')
check_result = 0
itr_ctr = 0
while check_result < len(servers) and itr_ctr < 600:
time.sleep(1)
check_result = 0
logger.perf("---- Iteration [{}] ----".format(itr_ctr))
for server in servers:
check_result += check_dns_record(server, domain, record)
itr_ctr += 1
logger.perf('----')
logger.perf("End Time with replication: {}".format(datetime.now().strftime("%Y%m%d %H:%M")))
run_time=time.time() - start_time
logger.perf("Total Run Time with replication: {:.3f}sec".format(run_time))
logger.perf("Total Run time with replication: {:d}min {:.3f}sec".format(int(run_time//60),run_time%60))
| 36.859745
| 117
| 0.676517
|
import sys
import time
from datetime import datetime
import re
import argparse
import logging
import itertools
import pprint
import subprocess
import socket
import dns.resolver
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import ldap3
from python_freeipa import ClientMeta
randomizer = datetime.now().strftime("%d%H%M")
start_timestr = datetime.now().strftime("%Y%m%d %H:%M")
start_time = time.time()
uid_template = "tuser{}_{{seq}}".format(randomizer)
pp=pprint.PrettyPrinter(indent=2)
class LogFilter(object):
def __init__(self,level,type='ge'):
self.__level = level
self.__type = type
def filter(self, logRecord):
if self.__type == 'ge':
return logRecord.levelno >= self.__level
elif self.__type == 'eq':
return logRecord.levelno == self.__level
else:
return logRecord.levelno <= self.__level
class MyLogger(logging.getLoggerClass()):
_PERF = 21
def __init__(self, name, **kwargs ):
super().__init__(name, **kwargs)
logging.addLevelName(self._PERF, 'PERF')
def perf(self, message, *args, **kwargs):
if self.isEnabledFor(self._PERF):
self._log(self._PERF, message, args, **kwargs)
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('IDM_user_load_tester')
logger.setLevel(logging.INFO)
_stout_handler = logging.StreamHandler()
_stout_handler.setLevel(logging.INFO)
logger.addHandler(_stout_handler)
def iter_timer(iterable, step=10, label=""):
start = time.time()
last_t = start
loop_tag = "loop {}{}{{}}".format(label, " "*bool(label))
logger.perf(loop_tag.format("start"))
pos = 0
for item in iterable:
pos = pos + 1
if pos != 0 and pos % step == 0:
logger.perf("{}: {:4.3f} {:4.3f}".format(item,time.time() - start, time.time() - last_t))
last_t = time.time()
yield item
logger.perf("{}: {:4.3f} {:4.3f}".format(pos,time.time() - start, time.time() - last_t))
logger.perf(loop_tag.format("end"))
def loop_timer(count,step=10,label=""):
start = time.time()
last_t = start
loop_tag = "loop {}{}{{}}".format(label, " "*bool(label))
logger.perf(loop_tag.format("start"))
for item in range(count):
if item != 0 and item % step == 0:
logger.perf("{}: {:4.3f} {:4.3f}".format(item,time.time() - start, time.time() - last_t))
last_t = time.time()
yield item
logger.perf("{}: {:4.3f} {:4.3f}".format(count,time.time() - start, time.time() - last_t))
logger.perf(loop_tag.format("end"))
def chunker(iterable, size):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it,size))
if not chunk:
return
yield chunk
def dump_ldap_stats(reset=True):
logger.debug(ldap_conn.usage)
if reset:
ldap_conn.usage.reset()
def generate_user(seq_num, ldif_out=False, dc_dn=None):
user = {}
user["a_uid"] = uid_template.format(seq=seq_num)
user["o_givenname"] = str(seq_num)
user["o_sn"] = "tuser_{}".format(randomizer)
user["o_cn"] = "{} {}".format(user["o_givenname"], user["o_sn"])
user["o_preferredlanguage"]='EN'
user["o_employeetype"]="Created via load_test.py. Run started at: {}".format(start_timestr)
if ldif_out:
clean_rex = r"^._"
keylist = list(user.keys())
user['attributes']={}
for key in keylist:
new_key = re.sub(clean_rex,'',key)
user['attributes'][new_key]=user[key]
del user[key]
if dc_dn is not None:
user['dn']="uid={},cn=staged users,cn=accounts,cn=provisioning,{}".format(user['attributes']['uid'],dc_dn)
user['object_class']=['top','inetorgperson']
return user
def add_users_api(total):
users=[]
for i in loop_timer(args.count,args.count//10,label="user_add_api"):
user = generate_user(i)
users.append(user["a_uid"])
logger.debug(user)
if args.stage:
user_out = client.stageuser_add(**user)
else:
user_out = client.user_add(**user)
logger.debug(user_out)
return users
def add_users_stage(total):
users=[]
if args.ldap_stage:
for i in loop_timer(args.count,args.count//10,label="user_add_stage_ldap"):
user = generate_user(i, ldif_out=True, dc_dn=dom_dn)
users.append(user['attributes']['uid'])
user_dn=user['dn']
del user['dn']
ldap_conn.add(user_dn,**user)
else:
for i in loop_timer(args.count,args.count//10,label="user_add_stage"):
user = generate_user(i)
users.append(user["a_uid"])
logger.debug(user)
user_out = client.stageuser_add(**user)
logger.debug(user_out)
for i in iter_timer(users,args.count//10,label="user_activate"):
activate_out = client.stageuser_activate(i)
logger.debug(activate_out)
return users
def get_users(template):
logger.perf("Checking for user template '{}'".format(template))
if client.user_find(template,o_sizelimit=1)['count'] > 0:
users = [ user['uid'][0] for user in client.user_find(template,o_sizelimit=0,o_timelimit=0)['result']]
logger.perf("Found {} users".format(len(users)))
else:
logger.perf("Unable to find user template")
exit(1)
return users
def get_users_ldap(template):
logger.perf("Checking for user template '{}'".format(template))
results = client.user_find(template,o_sizelimit=1)
if results['count'] > 0:
result=results['result'][0]
uid = result['uid'][0]
user_dn=result['dn']
base_dn = re.sub("uid={},".format(uid),'',user_dn)
entry_gen = ldap_conn.extend.standard.paged_search(search_base = base_dn,
search_filter = "(uid={}*)".format(template),
search_scope = ldap3.SUBTREE,
attributes = '*',
paged_size=1000,
generator=True)
total = 0
users=[]
for entry in entry_gen:
total += 1
if total % 10000 == 0:
logger.perf("Loaded {} users".format(total))
dump_ldap_stats()
users.append(entry['attributes']['uid'][0])
if args.user_limit>-1 and total >= args.user_limit:
break
logger.perf("Loaded {} users".format(len(users)))
dump_ldap_stats()
else:
logger.perf("Unable to find user template")
exit(1)
return users
def create_group_add_users_api(i,users):
group_name = "group{}_{}".format(randomizer,i)
group_desc = "Test group vor load_test.py. Run started at: {}".format(start_timestr)
logger.info("Creating group: {}".format(group_name))
result = client.group_add(group_name, o_description=group_desc)
if result["value"]==group_name:
logger.info("Success")
logger.debug(result)
logger.perf("Group: {}".format(group_name))
logger.info("Adding {} users".format(len(users)))
result = client.group_add_member(group_name, o_user=users)
logger.info("Done")
logger.debug(result)
def create_group_add_users_ldap(i,users,ldap_conn,base_user_dn,chunk=-1):
group_name = "group{}_{}".format(randomizer,i)
group_desc = "Test group vor load_test.py. Run started at: {}".format(start_timestr)
logger.info("Creating group: {}".format(group_name))
result = client.group_add(group_name, o_description=group_desc,o_raw=True)
group_dn=result['result']['dn']
logger.debug(result)
mod_group_users_ldap(users, ldap_conn, base_user_dn, group_dn, ldap3.MODIFY_ADD, chunk)
def remove_group_users_ldap(users, ldap_conn, base_user_dn, group_name, group_dn, chunk=-1):
logger.info("Group to delete: {}".format(group_dn))
start = time.time()
mod_group_users_ldap(users, ldap_conn, base_user_dn, group_dn, ldap3.MODIFY_DELETE, chunk)
logger.perf("Removing users from group took: {:4.3f}".format(time.time() - start))
result = client.group_show(group_name)
logger.info("Group show: {}".format(result))
logger.info("Delete group from IDM: {}".format(group_dn))
start = time.time()
result = client.group_del(group_name)
logger.perf("Delete group using API took: {:4.3f}".format(time.time() - start))
logger.info("Group del resul: {}".format(result))
def ldap_modify_retry(*fargs, **kwargs):
for retry_num in range(args.max_retries+1):
try:
return(ldap_conn.modify(*fargs,**kwargs))
except Exception as e:
logger.perf("Exception Occured")
logger.perf("'{}'".format(e))
logger.perf("{} retries left".format(args.max_retries-retry_num))
ldap_conn.unbind()
ldap_conn.bind()
logger.info("LDAP Connection rebound")
def mod_group_users_ldap(users, ldap_conn, base_user_dn, group_dn, ldap_mod_op, chunk=-1):
if chunk==-1:
chunk=len(users)
user_dn_list = [base_user_dn.format(user) for user in users]
for user_dn_chunk in chunker(user_dn_list,chunk):
logger.perf("Chunk ({})".format(len(user_dn_chunk)))
logger.debug("Showing fist 20 of user_dn_chunk: {}".format(user_dn_chunk[:20]))
result = ldap_modify_retry(group_dn,{"member":[(ldap_mod_op, user_dn_chunk)]})
dump_ldap_stats()
logger.debug("LDAP Modify result: {}".format(result))
if args.rebind:
logger.perf("rebinding LDAP connection")
ldap_conn.unbind()
ldap_conn.bind()
if args.delay>0:
logger.perf("Sleeping {} seconds".format(args.delay))
time.sleep(args.delay)
def check_dns_record(server, domain, record):
resolver = dns.resolver.Resolver()
resolver.nameservers=[socket.gethostbyname(server)]
try:
rdata = resolver.query(record + "." + domain)
logger.perf("Server [{}] answered with [{}]".format(server, rdata[0].address))
return 1
except dns.resolver.NXDOMAIN:
logger.perf("Record [{}] doesn't exist on server [{}]".format(record + "." + domain, server))
return 0
parser = argparse.ArgumentParser(description="Generate load test data for IdM",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-v', dest='verbosity', action='count', default=0,
help="Increase Verbosity, default is errors only. Only effective up to 3 levels.")
parser.add_argument('-c', type=int, dest='count',
help="Total count of users to add")
parser.add_argument('-g', dest='group_count', default=1, type=int,
help="Number of groups to create")
parser.add_argument('-S', dest='server', type=str,
help="Server to connect to")
parser.add_argument('-U', dest='user', type=str,
help="User account to use for connect")
parser.add_argument('-P', dest='password', type=str,
help="Password for connection")
parser.add_argument('--stage', dest='stage', action='store_true', default=False,
help="Create user in stage not active")
parser.add_argument('--stage-ldap', dest='ldap_stage', default=False, action='store_true',
help='Create stage users via ldap not API')
parser.add_argument('--ldap-group', dest='ldap_group', default=False, action='store_true',
help="Add users to group using LDAP directly")
parser.add_argument('--ldap-group-remove', dest='ldap_group_del', type=str,
help="Remove users from group using LDAP directly")
parser.add_argument('-C', dest='chunk', type=int, default=-1,
help="Chunk size for batching user adds to groups, -1 means all users given in count")
parser.add_argument('-r', dest='reuse_template', type=str,
help="Reuse existing users for group add using given user naming template")
parser.add_argument('-D', dest='delay',type=int, default=0,
help="Delay N seconds between chunks")
parser.add_argument('--rebind', dest='rebind',default=False,action='store_true',
help="Perform a unmind/bind operation between ldap operations.")
parser.add_argument('-l', dest='user_limit', type=int, default=-1,
help="Limit the number of users returned by reuse")
parser.add_argument('--max-retries',dest='max_retries', type=int, default=0,
help="Maximum number of retries for a failed chunk operation")
parser.add_argument('--check-repl', dest='check_repl',default=False,action='store_true',
help="Check when replication is finished by adding a DNS record")
args=parser.parse_args()
# setting up logger here to prevent log files being generated when showing help
perf_logfile = "perf_{}".format(randomizer)
_perf_handler = logging.FileHandler(perf_logfile)
_perf_formatter = logging.Formatter("%(asctime)s; %(message)s")
_perf_handler.setFormatter(_perf_formatter)
_perf_handler.addFilter(LogFilter(MyLogger._PERF,type='eq'))
logger.addHandler(_perf_handler)
if args.verbosity:
# Error is a level of 40.
level=30-(args.verbosity*10)
if level<0:
level=0
logger.setLevel(level)
levels={ 5: "CRITICAL",
4: "ERROR",
3: "WARNING",
2: "INFO",
1: "DEBUG",
0: "ALL" }
if level!=30:
log_file = "log_{}".format(randomizer)
_file_handler = logging.FileHandler(log_file)
_file_formatter = logging.Formatter('%(asctime)s %(levelname)s :: %(message)s')
_file_handler.setFormatter(_file_formatter)
_file_handler.addFilter(LogFilter(level))
logger.addHandler(_file_handler)
logger.info("Logging to file '{}'".format(log_file))
logger.info("Debug level: {0} ({1})".format(levels[level // 10],level))
# client = ClientMeta('ipaserver0.example.com',False)
# client.login('admin', 'admin123')
# kerberos seems broken using OS rpms on RHEL 8
#client.login_kerberos()
# user = client.user_add('test4', 'John', 'Doe', 'John Doe', o_preferredlanguage='EN')
# Output some data to the user about the script options passed in
# Not working as expected when git not found
try:
commit_info = str(subprocess.check_output(['git', 'log', '-n', '1', '--pretty=tformat:"%ci %H"']),"utf-8").strip()
logger.perf("Commit Info: {}".format(commit_info))
except:
logger.perf("No git info found")
pass
logger.perf("Start Time: {}".format(start_timestr))
logger.perf("User count: {} Group count: {}".format(args.count,args.group_count))
logger.perf("Server: {}".format(args.server))
logger.perf("Perf Log file: {}".format(perf_logfile))
if args.stage:
if args.ldap_stage:
logger.perf("Creating Stage users via ldap")
else:
logger.perf("Creating Stage users via API")
else:
logger.perf("Creating active users via API")
if args.ldap_group:
logger.perf("Adding users to groups via LDAP")
if args.chunk>-1:
logger.perf(" Using a chunk size of {}".format(args.chunk))
else:
logger.perf("Adding users to groups via API")
if args.reuse_template:
logger.perf("Reusing users starting with: '{}'".format(args.reuse_template))
if args.user_limit>-1:
logger.perf(" Limiting reuse to first {} users found".format(args.user_limit))
logger.debug(args)
logger.perf('----')
# end start header
client = ClientMeta(args.server,False)
client.login(args.user, args.password)
dnszone = client.dnszone_find(o_forward_only=True)['result'][0]
servers = dnszone['nsrecord']
domain = dnszone['idnsname'][0]['__dns_name__']
logger.info("Found servers: {} for domain: [{}]".format(servers, domain))
if args.ldap_group or args.ldap_stage:
user_dn=client.user_show(args.user,o_all=True)['result']['dn']
base_user_dn = re.sub("^uid={}".format(args.user),'uid={}',user_dn)
dom_dn = re.search("(dc=.*)",user_dn, re.IGNORECASE).group(1)
ldap_server = ldap3.Server(args.server, get_info=ldap3.ALL)
ldap_conn = ldap3.Connection(ldap_server,user=user_dn, password=args.password, auto_bind=True, collect_usage=True)
if args.reuse_template:
user_dn=client.user_show(args.user,o_all=True)['result']['dn']
base_user_dn = re.sub("^uid={},".format(args.user),'',user_dn)
logger.debug("base_user_dn: {}".format(base_user_dn))
ldap_server = ldap3.Server(args.server, get_info=ldap3.ALL)
ldap_conn = ldap3.Connection(ldap_server,user=user_dn, password=args.password, auto_bind=True, collect_usage=True)
users=get_users_ldap(args.reuse_template)
else:
logger.info("Creating {} users".format(args.count))
logger.info("template: {}".format(uid_template))
logger.info("Checking for existing templated users")
user_check=client.user_find(uid_template.format(seq=0))
if user_check["count"]>0:
sec_to_wait = 61 - datetime.now().second
logger.error("Existing users found please wait {} seconds".format(sec_to_wait))
exit(1)
else:
logger.info("Proceeding")
if args.stage:
users = add_users_stage(args.count)
else:
users = add_users_api(args.count)
if args.ldap_group:
# print(ldap_server.info)
# for i in iter_timer(range(args.group_count),step=1,label="group_add_user_ldap"):
# create_group_add_users_ldap(i,users,ldap_conn,base_user_dn,chunk=args.chunk)
for i in loop_timer(args.group_count,1,label="group_add_user_ldap"):
create_group_add_users_ldap(i,users,ldap_conn,base_user_dn,chunk=args.chunk)
elif args.ldap_group_del is not None:
user_dn=client.user_show(args.user,o_all=True)['result']['dn']
group_dn=client.group_show(args.ldap_group_del,o_all=True)['result']['dn']
base_user_dn = re.sub("^uid={}".format(args.user),'uid={}',user_dn)
ldap_server = ldap3.Server(args.server, get_info=ldap3.ALL)
ldap_conn = ldap3.Connection(ldap_server,user=user_dn, password=args.password, auto_bind=True)
remove_group_users_ldap(users, ldap_conn, base_user_dn, args.ldap_group_del, group_dn, chunk=args.chunk)
else:
for i in loop_timer(args.group_count,1,label="group_add_user_api"):
create_group_add_users_api(i,users)
logger.perf('----')
logger.perf("End Time: {}".format(datetime.now().strftime("%Y%m%d %H:%M")))
run_time=time.time() - start_time
logger.perf("Total Run Time: {:.3f}sec".format(run_time))
logger.perf("Total Run time: {:d}min {:.3f}sec".format(int(run_time//60),run_time%60))
if args.check_repl:
record = "trecord{}".format(randomizer)
client.dnsrecord_add(a_dnszoneidnsname=domain, a_idnsname=record, o_a_part_ip_address='1.1.1.1')
check_result = 0
itr_ctr = 0
while check_result < len(servers) and itr_ctr < 600:
time.sleep(1)
check_result = 0
logger.perf("---- Iteration [{}] ----".format(itr_ctr))
for server in servers:
check_result += check_dns_record(server, domain, record)
itr_ctr += 1
logger.perf('----')
logger.perf("End Time with replication: {}".format(datetime.now().strftime("%Y%m%d %H:%M")))
run_time=time.time() - start_time
logger.perf("Total Run Time with replication: {:.3f}sec".format(run_time))
logger.perf("Total Run time with replication: {:d}min {:.3f}sec".format(int(run_time//60),run_time%60))
| true
| true
|
f702d770ee6291d4f0860e1e69892baca123eccb
| 10,186
|
py
|
Python
|
tests/components/upnp/test_config_flow.py
|
miccico/core
|
14c205384171dee59c1a908f8449f9864778b2dc
|
[
"Apache-2.0"
] | 4
|
2016-06-22T12:00:41.000Z
|
2018-06-11T20:31:25.000Z
|
tests/components/upnp/test_config_flow.py
|
miccico/core
|
14c205384171dee59c1a908f8449f9864778b2dc
|
[
"Apache-2.0"
] | 57
|
2020-10-15T06:47:00.000Z
|
2022-03-31T06:11:18.000Z
|
tests/components/upnp/test_config_flow.py
|
miccico/core
|
14c205384171dee59c1a908f8449f9864778b2dc
|
[
"Apache-2.0"
] | 6
|
2019-07-06T00:43:13.000Z
|
2021-01-16T13:27:06.000Z
|
"""Test UPnP/IGD config flow."""
from datetime import timedelta
from unittest.mock import AsyncMock, patch
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import ssdp
from homeassistant.components.upnp.const import (
CONFIG_ENTRY_SCAN_INTERVAL,
CONFIG_ENTRY_ST,
CONFIG_ENTRY_UDN,
DEFAULT_SCAN_INTERVAL,
DISCOVERY_LOCATION,
DISCOVERY_NAME,
DISCOVERY_ST,
DISCOVERY_UDN,
DISCOVERY_UNIQUE_ID,
DISCOVERY_USN,
DOMAIN,
DOMAIN_COORDINATORS,
)
from homeassistant.components.upnp.device import Device
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.setup import async_setup_component
from .mock_device import MockDevice
from tests.common import MockConfigEntry
async def test_flow_ssdp_discovery(hass: HomeAssistantType):
"""Test config flow: discovered + configured through ssdp."""
udn = "uuid:device_1"
location = "dummy"
mock_device = MockDevice(udn)
discoveries = [
{
DISCOVERY_LOCATION: location,
DISCOVERY_NAME: mock_device.name,
DISCOVERY_ST: mock_device.device_type,
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_UNIQUE_ID: mock_device.unique_id,
DISCOVERY_USN: mock_device.usn,
}
]
with patch.object(
Device, "async_create_device", AsyncMock(return_value=mock_device)
), patch.object(
Device, "async_discover", AsyncMock(return_value=discoveries)
), patch.object(
Device, "async_supplement_discovery", AsyncMock(return_value=discoveries[0])
):
# Discovered via step ssdp.
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
ssdp.ATTR_SSDP_LOCATION: location,
ssdp.ATTR_SSDP_ST: mock_device.device_type,
ssdp.ATTR_SSDP_USN: mock_device.usn,
ssdp.ATTR_UPNP_UDN: mock_device.udn,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "ssdp_confirm"
# Confirm via step ssdp_confirm.
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == mock_device.name
assert result["data"] == {
CONFIG_ENTRY_ST: mock_device.device_type,
CONFIG_ENTRY_UDN: mock_device.udn,
}
async def test_flow_ssdp_discovery_incomplete(hass: HomeAssistantType):
"""Test config flow: incomplete discovery through ssdp."""
udn = "uuid:device_1"
location = "dummy"
mock_device = MockDevice(udn)
# Discovered via step ssdp.
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
ssdp.ATTR_SSDP_ST: mock_device.device_type,
# ssdp.ATTR_UPNP_UDN: mock_device.udn, # Not provided.
ssdp.ATTR_SSDP_LOCATION: location,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "incomplete_discovery"
async def test_flow_user(hass: HomeAssistantType):
"""Test config flow: discovered + configured through user."""
udn = "uuid:device_1"
location = "dummy"
mock_device = MockDevice(udn)
discoveries = [
{
DISCOVERY_LOCATION: location,
DISCOVERY_NAME: mock_device.name,
DISCOVERY_ST: mock_device.device_type,
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_UNIQUE_ID: mock_device.unique_id,
DISCOVERY_USN: mock_device.usn,
}
]
with patch.object(
Device, "async_create_device", AsyncMock(return_value=mock_device)
), patch.object(
Device, "async_discover", AsyncMock(return_value=discoveries)
), patch.object(
Device, "async_supplement_discovery", AsyncMock(return_value=discoveries[0])
):
# Discovered via step user.
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# Confirmed via step user.
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={"unique_id": mock_device.unique_id},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == mock_device.name
assert result["data"] == {
CONFIG_ENTRY_ST: mock_device.device_type,
CONFIG_ENTRY_UDN: mock_device.udn,
}
async def test_flow_import(hass: HomeAssistantType):
"""Test config flow: discovered + configured through configuration.yaml."""
udn = "uuid:device_1"
mock_device = MockDevice(udn)
location = "dummy"
discoveries = [
{
DISCOVERY_LOCATION: location,
DISCOVERY_NAME: mock_device.name,
DISCOVERY_ST: mock_device.device_type,
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_UNIQUE_ID: mock_device.unique_id,
DISCOVERY_USN: mock_device.usn,
}
]
with patch.object(
Device, "async_create_device", AsyncMock(return_value=mock_device)
), patch.object(
Device, "async_discover", AsyncMock(return_value=discoveries)
), patch.object(
Device, "async_supplement_discovery", AsyncMock(return_value=discoveries[0])
):
# Discovered via step import.
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == mock_device.name
assert result["data"] == {
CONFIG_ENTRY_ST: mock_device.device_type,
CONFIG_ENTRY_UDN: mock_device.udn,
}
async def test_flow_import_already_configured(hass: HomeAssistantType):
"""Test config flow: discovered, but already configured."""
udn = "uuid:device_1"
mock_device = MockDevice(udn)
# Existing entry.
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
CONFIG_ENTRY_UDN: mock_device.udn,
CONFIG_ENTRY_ST: mock_device.device_type,
},
options={CONFIG_ENTRY_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
)
config_entry.add_to_hass(hass)
# Discovered via step import.
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_flow_import_incomplete(hass: HomeAssistantType):
"""Test config flow: incomplete discovery, configured through configuration.yaml."""
udn = "uuid:device_1"
mock_device = MockDevice(udn)
location = "dummy"
discoveries = [
{
DISCOVERY_LOCATION: location,
DISCOVERY_NAME: mock_device.name,
# DISCOVERY_ST: mock_device.device_type,
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_UNIQUE_ID: mock_device.unique_id,
DISCOVERY_USN: mock_device.usn,
}
]
with patch.object(Device, "async_discover", AsyncMock(return_value=discoveries)):
# Discovered via step import.
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "incomplete_discovery"
async def test_options_flow(hass: HomeAssistantType):
"""Test options flow."""
# Set up config entry.
udn = "uuid:device_1"
location = "http://192.168.1.1/desc.xml"
mock_device = MockDevice(udn)
discoveries = [
{
DISCOVERY_LOCATION: location,
DISCOVERY_NAME: mock_device.name,
DISCOVERY_ST: mock_device.device_type,
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_UNIQUE_ID: mock_device.unique_id,
DISCOVERY_USN: mock_device.usn,
}
]
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
CONFIG_ENTRY_UDN: mock_device.udn,
CONFIG_ENTRY_ST: mock_device.device_type,
},
options={CONFIG_ENTRY_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
)
config_entry.add_to_hass(hass)
config = {
# no upnp, ensures no import-flow is started.
}
with patch.object(
Device, "async_create_device", AsyncMock(return_value=mock_device)
), patch.object(Device, "async_discover", AsyncMock(return_value=discoveries)):
# Initialisation of component.
await async_setup_component(hass, "upnp", config)
await hass.async_block_till_done()
# DataUpdateCoordinator gets a default of 30 seconds for updates.
coordinator = hass.data[DOMAIN][DOMAIN_COORDINATORS][mock_device.udn]
assert coordinator.update_interval == timedelta(seconds=DEFAULT_SCAN_INTERVAL)
# Options flow with no input results in form.
result = await hass.config_entries.options.async_init(
config_entry.entry_id,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
# Options flow with input results in update to entry.
result2 = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONFIG_ENTRY_SCAN_INTERVAL: 60},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {
CONFIG_ENTRY_SCAN_INTERVAL: 60,
}
# Also updates DataUpdateCoordinator.
assert coordinator.update_interval == timedelta(seconds=60)
| 35.124138
| 88
| 0.660613
|
from datetime import timedelta
from unittest.mock import AsyncMock, patch
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import ssdp
from homeassistant.components.upnp.const import (
CONFIG_ENTRY_SCAN_INTERVAL,
CONFIG_ENTRY_ST,
CONFIG_ENTRY_UDN,
DEFAULT_SCAN_INTERVAL,
DISCOVERY_LOCATION,
DISCOVERY_NAME,
DISCOVERY_ST,
DISCOVERY_UDN,
DISCOVERY_UNIQUE_ID,
DISCOVERY_USN,
DOMAIN,
DOMAIN_COORDINATORS,
)
from homeassistant.components.upnp.device import Device
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.setup import async_setup_component
from .mock_device import MockDevice
from tests.common import MockConfigEntry
async def test_flow_ssdp_discovery(hass: HomeAssistantType):
udn = "uuid:device_1"
location = "dummy"
mock_device = MockDevice(udn)
discoveries = [
{
DISCOVERY_LOCATION: location,
DISCOVERY_NAME: mock_device.name,
DISCOVERY_ST: mock_device.device_type,
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_UNIQUE_ID: mock_device.unique_id,
DISCOVERY_USN: mock_device.usn,
}
]
with patch.object(
Device, "async_create_device", AsyncMock(return_value=mock_device)
), patch.object(
Device, "async_discover", AsyncMock(return_value=discoveries)
), patch.object(
Device, "async_supplement_discovery", AsyncMock(return_value=discoveries[0])
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
ssdp.ATTR_SSDP_LOCATION: location,
ssdp.ATTR_SSDP_ST: mock_device.device_type,
ssdp.ATTR_SSDP_USN: mock_device.usn,
ssdp.ATTR_UPNP_UDN: mock_device.udn,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "ssdp_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == mock_device.name
assert result["data"] == {
CONFIG_ENTRY_ST: mock_device.device_type,
CONFIG_ENTRY_UDN: mock_device.udn,
}
async def test_flow_ssdp_discovery_incomplete(hass: HomeAssistantType):
udn = "uuid:device_1"
location = "dummy"
mock_device = MockDevice(udn)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
ssdp.ATTR_SSDP_ST: mock_device.device_type,
ssdp.ATTR_SSDP_LOCATION: location,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "incomplete_discovery"
async def test_flow_user(hass: HomeAssistantType):
udn = "uuid:device_1"
location = "dummy"
mock_device = MockDevice(udn)
discoveries = [
{
DISCOVERY_LOCATION: location,
DISCOVERY_NAME: mock_device.name,
DISCOVERY_ST: mock_device.device_type,
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_UNIQUE_ID: mock_device.unique_id,
DISCOVERY_USN: mock_device.usn,
}
]
with patch.object(
Device, "async_create_device", AsyncMock(return_value=mock_device)
), patch.object(
Device, "async_discover", AsyncMock(return_value=discoveries)
), patch.object(
Device, "async_supplement_discovery", AsyncMock(return_value=discoveries[0])
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={"unique_id": mock_device.unique_id},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == mock_device.name
assert result["data"] == {
CONFIG_ENTRY_ST: mock_device.device_type,
CONFIG_ENTRY_UDN: mock_device.udn,
}
async def test_flow_import(hass: HomeAssistantType):
udn = "uuid:device_1"
mock_device = MockDevice(udn)
location = "dummy"
discoveries = [
{
DISCOVERY_LOCATION: location,
DISCOVERY_NAME: mock_device.name,
DISCOVERY_ST: mock_device.device_type,
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_UNIQUE_ID: mock_device.unique_id,
DISCOVERY_USN: mock_device.usn,
}
]
with patch.object(
Device, "async_create_device", AsyncMock(return_value=mock_device)
), patch.object(
Device, "async_discover", AsyncMock(return_value=discoveries)
), patch.object(
Device, "async_supplement_discovery", AsyncMock(return_value=discoveries[0])
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == mock_device.name
assert result["data"] == {
CONFIG_ENTRY_ST: mock_device.device_type,
CONFIG_ENTRY_UDN: mock_device.udn,
}
async def test_flow_import_already_configured(hass: HomeAssistantType):
udn = "uuid:device_1"
mock_device = MockDevice(udn)
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
CONFIG_ENTRY_UDN: mock_device.udn,
CONFIG_ENTRY_ST: mock_device.device_type,
},
options={CONFIG_ENTRY_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
)
config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_flow_import_incomplete(hass: HomeAssistantType):
udn = "uuid:device_1"
mock_device = MockDevice(udn)
location = "dummy"
discoveries = [
{
DISCOVERY_LOCATION: location,
DISCOVERY_NAME: mock_device.name,
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_UNIQUE_ID: mock_device.unique_id,
DISCOVERY_USN: mock_device.usn,
}
]
with patch.object(Device, "async_discover", AsyncMock(return_value=discoveries)):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "incomplete_discovery"
async def test_options_flow(hass: HomeAssistantType):
udn = "uuid:device_1"
location = "http://192.168.1.1/desc.xml"
mock_device = MockDevice(udn)
discoveries = [
{
DISCOVERY_LOCATION: location,
DISCOVERY_NAME: mock_device.name,
DISCOVERY_ST: mock_device.device_type,
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_UNIQUE_ID: mock_device.unique_id,
DISCOVERY_USN: mock_device.usn,
}
]
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
CONFIG_ENTRY_UDN: mock_device.udn,
CONFIG_ENTRY_ST: mock_device.device_type,
},
options={CONFIG_ENTRY_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
)
config_entry.add_to_hass(hass)
config = {
}
with patch.object(
Device, "async_create_device", AsyncMock(return_value=mock_device)
), patch.object(Device, "async_discover", AsyncMock(return_value=discoveries)):
await async_setup_component(hass, "upnp", config)
await hass.async_block_till_done()
coordinator = hass.data[DOMAIN][DOMAIN_COORDINATORS][mock_device.udn]
assert coordinator.update_interval == timedelta(seconds=DEFAULT_SCAN_INTERVAL)
result = await hass.config_entries.options.async_init(
config_entry.entry_id,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result2 = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONFIG_ENTRY_SCAN_INTERVAL: 60},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {
CONFIG_ENTRY_SCAN_INTERVAL: 60,
}
assert coordinator.update_interval == timedelta(seconds=60)
| true
| true
|
f702d8d44ce7219920cb5618642b42930ac1bfe7
| 48
|
py
|
Python
|
datasets/__init__.py
|
riccardodelutio/superpixel_fcn
|
d30a690836d7d6673b0a9f136019779f9e753f84
|
[
"MIT"
] | 291
|
2020-03-25T17:37:46.000Z
|
2022-03-31T12:32:29.000Z
|
datasets/__init__.py
|
wangyxxjtu/PCNet
|
ae4db30eeab92a1cbb30c6ef1c9878d8dbddbaf8
|
[
"MIT"
] | 32
|
2020-04-05T09:01:25.000Z
|
2022-03-13T00:37:12.000Z
|
datasets/__init__.py
|
wangyxxjtu/PCNet
|
ae4db30eeab92a1cbb30c6ef1c9878d8dbddbaf8
|
[
"MIT"
] | 71
|
2020-04-02T01:03:52.000Z
|
2022-03-25T12:12:11.000Z
|
from .BSD500 import BSD500
__all__ = ('BSD500')
| 16
| 26
| 0.729167
|
from .BSD500 import BSD500
__all__ = ('BSD500')
| true
| true
|
f702d8f110cd6e8a6a3ce7d246c153a15a956d43
| 85
|
py
|
Python
|
code/abc146_a_07.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | 3
|
2019-08-16T16:55:48.000Z
|
2021-04-11T10:21:40.000Z
|
code/abc146_a_07.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | null | null | null |
code/abc146_a_07.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | null | null | null |
week = ["SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"]
print(7-week.index(input()))
| 42.5
| 56
| 0.529412
|
week = ["SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"]
print(7-week.index(input()))
| true
| true
|
f702d98b6cf99f5c87c159957faf64295569a643
| 11,484
|
py
|
Python
|
priorityqueue.py
|
mjwestcott/priorityqueue
|
6bb9876644fe5ec307fd4ea1b73e4a722f89e408
|
[
"MIT"
] | 6
|
2015-12-18T21:06:54.000Z
|
2020-03-19T09:29:10.000Z
|
priorityqueue/priorityqueue.py
|
gaurav-kumar-pandit/competitiveprogramming
|
1edb34ee7167599404ac45b9155588b79592ef4d
|
[
"MIT"
] | null | null | null |
priorityqueue/priorityqueue.py
|
gaurav-kumar-pandit/competitiveprogramming
|
1edb34ee7167599404ac45b9155588b79592ef4d
|
[
"MIT"
] | 2
|
2021-05-22T13:45:06.000Z
|
2022-03-06T18:22:05.000Z
|
"""
priorityqueue.py
Priority Queue Implementation with a O(log n) Remove Method
This file implements min- amd max-oriented priority queues based on binary
heaps. I found the need for a priority queue with a O(log n) remove method.
This can't be achieved with any of Python's built in collections including
the heapq module, so I built my own. The heap is arranged according to a given
key function.
Usage:
>>> from priorityqueue import MinHeapPriorityQueue
>>> items = [4, 0, 1, 3, 2]
>>> pq = MinHeapPriorityQueue(items)
>>> pq.pop()
0
A priority queue accepts an optional key function.
>>> items = ['yy', 'ttttttt', 'z', 'wwww', 'uuuuuu', 'vvvvv', 'xxx']
>>> pq = MinHeapPriorityQueue(items, key=len)
>>> pq.pop()
'z'
>>> pq.pop()
'yy'
Internally, the queue is a list of tokens of type 'Locator', which contain
the priority value, the item itself, and its current index in the heap.
The index field is updated whenever the heap is modified. This is what
allows us to remove in O(log n). Appending an item returns it's Locator.
>>> token = pq.append('a')
>>> token
Locator(value=1, item='a', index=0)
>>> pq.remove(token)
'a'
If we want to be able to remove any item in the list we can maintain an
auxiliary dictionary mapping items to their Locators. Here's a simple
example with unique items:
>>> items = [12, 46, 89, 101, 72, 81]
>>> pq = MinHeapPriorityQueue()
>>> locs = {}
>>> for item in items:
... locs[item] = pq.append(item)
>>> locs[46]
Locator(value=46, item=46, index=1)
>>> pq.remove(locs[46])
46
Iterating with 'for item in pq' or iter() will produce the items, not the
Locator instances used in the internal representation. The items will be
generated in sorted order.
>>> items = [3, 1, 0, 2, 4]
>>> pq = MinHeapPriorityQueue(items)
>>> for item in pq:
... print(item)
0
1
2
3
4
"""
# Inspired by:
# - AdaptableHeapPriorityQueue in 'Data Structures and Algorithms in Python'
# - the Go Standard library's heap package
# - Python's heapq module
# - Raymond Hettinger's SortedCollection on ActiveState
# - Peter Norvig's PriorityQueue in the Python AIMA repo
class MinHeapPriorityQueue():
"""A locator-based min-oriented priority queue implemented with a binary
heap, arranged according to a key function.
Operation Running Time
len(P), P.peek() O(1)
P.update(loc, value, item) O(log n)
P.append(item) O(log n)*
P.pop() O(log n)*
P.remove(loc) O(log n)*
*amortized due to occasional resizing of the underlying python list
"""
def __init__(self, iterable=(), key=lambda x: x):
self._key = key
decorated = [(key(item), item) for item in iterable]
self._pq = [self.Locator(value, item, i) for i, (value, item) in enumerate(decorated)]
if len(self._pq) > 1:
self._heapify()
class Locator:
"""Token for locating an entry of the priority queue."""
__slots__ = '_value', '_item', '_index'
def __init__(self, value, item, i):
self._value = value
self._item = item
self._index = i
def __eq__(self, other):
return self._value == other._value
def __lt__(self, other):
return self._value < other._value
def __le__(self, other):
return self._value <= other._value
def __repr__(self):
return '{}(value={!r}, item={!r}, index={})'.format(
self.__class__.__name__,
self._value,
self._item,
self._index
)
#------------------------------------------------------------------------------
# non-public
def _parent(self, j):
return (j-1) // 2
def _left(self, j):
return 2*j + 1
def _right(self, j):
return 2*j + 2
def _swap(self, i, j):
"""Swap the elements at indices i and j of array."""
self._pq[i], self._pq[j] = self._pq[j], self._pq[i]
# Update the indices in the Locator instances.
self._pq[i]._index = i
self._pq[j]._index = j
def _upheap(self, i):
parent = self._parent(i)
if i > 0 and self._pq[i] < self._pq[parent]:
self._swap(i, parent)
self._upheap(parent)
def _downheap(self, i):
n = len(self._pq)
left, right = self._left(i), self._right(i)
if left < n:
child = left
if right < n and self._pq[right] < self._pq[left]:
child = right
if self._pq[child] < self._pq[i]:
self._swap(i, child)
self._downheap(child)
def _fix(self, i):
self._upheap(i)
self._downheap(i)
def _heapify(self):
start = self._parent(len(self) - 1) # Start at parent of last leaf
for j in range(start, -1, -1): # going to and includng the root.
self._downheap(j)
#------------------------------------------------------------------------------
# public
def append(self, item):
"""Add an item to the heap"""
token = self.Locator(self._key(item), item, len(self._pq))
self._pq.append(token)
self._upheap(len(self._pq) - 1) # Upheap newly added position.
return token
def update(self, loc, newval, newitem):
"""Update the priority value and item for the entry identified by Locator loc."""
j = loc._index
if not (0 <= j < len(self) and self._pq[j] is loc):
raise ValueError('Invalid locator')
loc._value = newval
loc._item = newitem
self._fix(j)
def remove(self, loc):
"""Remove and return the item identified by Locator loc."""
j = loc._index
if not (0 <= j < len(self) and self._pq[j] is loc):
raise ValueError('Invalid locator')
if j == len(self) - 1:
self._pq.pop()
else:
self._swap(j, len(self) - 1)
self._pq.pop()
self._fix(j)
return loc._item
def peek(self):
"""Return but do not remove item with minimum priority value."""
loc = self._pq[0]
return loc._item
def pop(self):
"""Remove and return item with minimum priority value."""
self._swap(0, len(self._pq) - 1)
loc = self._pq.pop()
self._downheap(0)
return loc._item
@property
def items(self):
return [token._item for token in self._pq]
def __len__(self):
return len(self._pq)
def __contains__(self, item):
return item in self.items
def __iter__(self):
return iter(sorted(self.items))
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self._pq)
class MaxHeapPriorityQueue(MinHeapPriorityQueue):
"""A locator-based max-oriented priority queue implemented with a binary
heap, arranged according to a key function.
Operation Running Time
len(P), P.peek() O(1)
P.update(loc, value, item) O(log n)
P.append(item) O(log n)*
P.pop() O(log n)*
P.remove(loc) O(log n)*
*amortized due to occasional resizing of the underlying python list
"""
# Override all relevant private methods of MinHeapPriorityQueue
# with max-oriented versions.
def _upheap(self, i):
parent = self._parent(i)
if i > 0 and self._pq[parent] < self._pq[i]:
self._swap(i, parent)
self._upheap(parent)
def _downheap(self, i):
n = len(self._pq)
left, right = self._left(i), self._right(i)
if left < n:
child = left
if right < n and self._pq[left] < self._pq[right]:
child = right
if self._pq[i] < self._pq[child]:
self._swap(i, child)
self._downheap(child)
def __iter__(self):
return iter(sorted(self.items, reverse=True))
__doc__ += """
>>> import random; random.seed(42)
>>> from priorityqueue import MinHeapPriorityQueue, MaxHeapPriorityQueue
Function to verify the min-heap invariant is true for all elements of pq.
>>> def verify(pq):
... n = len(pq._pq)
... for i in range(n):
... left, right = 2*i + 1, 2*i + 2
... if left < n:
... assert pq._pq[i] <= pq._pq[left]
... if right < n:
... assert pq._pq[i] <= pq._pq[right]
Function to verify the max-heap invariant is true for all elements of pq.
>>> def verify_max(pq):
... n = len(pq._pq)
... for i in range(n):
... left, right = 2*i + 1, 2*i + 2
... if left < n:
... assert pq._pq[i] >= pq._pq[left]
... if right < n:
... assert pq._pq[i] >= pq._pq[right]
>>> items = [random.randint(1, 100) for _ in range(10000)]
>>> pq = MinHeapPriorityQueue(items)
>>> verify(pq)
>>> pq = MaxHeapPriorityQueue(items)
>>> verify_max(pq)
Check multiple signs for priority values.
>>> items = list(range(100, -100, -1))
>>> random.shuffle(items)
>>> pq = MinHeapPriorityQueue(items)
>>> verify(pq)
>>> pq = MaxHeapPriorityQueue(items)
>>> verify_max(pq)
Test pop, peek, append, remove, update, __len__, and __contains__ operations.
>>> items = ['jjjjjjjjjj', 'iiiiiiiii', 'hhhhhhhh',
... 'ggggggg', 'ffffff', 'eeeee',
... 'dddd', 'ccc', 'bb', 'a']
>>> pq = MinHeapPriorityQueue(items, key=len)
>>> verify(pq)
>>> pq.pop()
'a'
>>> pq.pop()
'bb'
>>> pq.peek()
'ccc'
>>> pq.pop()
'ccc'
>>> pq.pop()
'dddd'
>>> pq.peek()
'eeeee'
>>> pq.pop()
'eeeee'
>>> _ = pq.append('a')
>>> _ = pq.append('bb')
>>> verify(pq)
>>> pq = MaxHeapPriorityQueue(key=len)
>>> pq.append([1, 2, 3])
Locator(value=3, item=[1, 2, 3], index=0)
>>> pq.append([1, 2, 3, 4, 5, 6])
Locator(value=6, item=[1, 2, 3, 4, 5, 6], index=0)
>>> pq.append([1])
Locator(value=1, item=[1], index=2)
>>> pq.append([1, 2, 3, 4, 5, 6, 7, 8, 9])
Locator(value=9, item=[1, 2, 3, 4, 5, 6, 7, 8, 9], index=0)
>>> len(pq)
4
>>> [1] in pq
True
>>> [1, 2, 3, 4, 5] in pq
False
>>> items = list(range(1, 10001))
>>> random.shuffle(items)
>>> pq = MinHeapPriorityQueue(items)
>>> verify(pq)
>>> len(pq) == 10000
True
>>> for i in range(1, 10001):
... x = pq.pop()
... assert x == i
>>> pq = MinHeapPriorityQueue()
>>> locs = {}
>>> for x in items:
... locs[x] = pq.append(x)
>>> pq.remove(locs[1])
1
>>> pq.remove(locs[2])
2
>>> pq.pop()
3
>>> for i in range(4, 100):
... _ = pq.remove(locs[i])
>>> pq.pop()
100
>>> verify(pq)
>>> pq.update(locs[999], 1, 'test')
>>> 999 in pq
False
>>> pq.pop()
'test'
>>> 998 in pq
True
Test the items and __repr__ methods.
>>> items = ['a', 'b', 'c']
>>> pq = MinHeapPriorityQueue(items)
>>> pq
MinHeapPriorityQueue([Locator(value='a', item='a', index=0), Locator(value='b', item='b', index=1), Locator(value='c', item='c', index=2)])
>>> pq.items == ['a', 'b', 'c']
True
Check that __iter__ generates items in sorted order.
>>> items = list(range(1000))
>>> pq = MinHeapPriorityQueue(items)
>>> for i, x in enumerate(pq):
... assert i == x
>>> pq = MaxHeapPriorityQueue(items)
>>> for i, x in enumerate(pq):
... assert 999 - i == x
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29.674419
| 139
| 0.562609
|
# - Python's heapq module
# - Peter Norvig's PriorityQueue in the Python AIMA repo
class MinHeapPriorityQueue():
def __init__(self, iterable=(), key=lambda x: x):
self._key = key
decorated = [(key(item), item) for item in iterable]
self._pq = [self.Locator(value, item, i) for i, (value, item) in enumerate(decorated)]
if len(self._pq) > 1:
self._heapify()
class Locator:
__slots__ = '_value', '_item', '_index'
def __init__(self, value, item, i):
self._value = value
self._item = item
self._index = i
def __eq__(self, other):
return self._value == other._value
def __lt__(self, other):
return self._value < other._value
def __le__(self, other):
return self._value <= other._value
def __repr__(self):
return '{}(value={!r}, item={!r}, index={})'.format(
self.__class__.__name__,
self._value,
self._item,
self._index
)
def _parent(self, j):
return (j-1) // 2
def _left(self, j):
return 2*j + 1
def _right(self, j):
return 2*j + 2
def _swap(self, i, j):
self._pq[i], self._pq[j] = self._pq[j], self._pq[i]
self._pq[i]._index = i
self._pq[j]._index = j
def _upheap(self, i):
parent = self._parent(i)
if i > 0 and self._pq[i] < self._pq[parent]:
self._swap(i, parent)
self._upheap(parent)
def _downheap(self, i):
n = len(self._pq)
left, right = self._left(i), self._right(i)
if left < n:
child = left
if right < n and self._pq[right] < self._pq[left]:
child = right
if self._pq[child] < self._pq[i]:
self._swap(i, child)
self._downheap(child)
def _fix(self, i):
self._upheap(i)
self._downheap(i)
def _heapify(self):
start = self._parent(len(self) - 1) for j in range(start, -1, -1): self._downheap(j)
def append(self, item):
token = self.Locator(self._key(item), item, len(self._pq))
self._pq.append(token)
self._upheap(len(self._pq) - 1) return token
def update(self, loc, newval, newitem):
j = loc._index
if not (0 <= j < len(self) and self._pq[j] is loc):
raise ValueError('Invalid locator')
loc._value = newval
loc._item = newitem
self._fix(j)
def remove(self, loc):
j = loc._index
if not (0 <= j < len(self) and self._pq[j] is loc):
raise ValueError('Invalid locator')
if j == len(self) - 1:
self._pq.pop()
else:
self._swap(j, len(self) - 1)
self._pq.pop()
self._fix(j)
return loc._item
def peek(self):
loc = self._pq[0]
return loc._item
def pop(self):
self._swap(0, len(self._pq) - 1)
loc = self._pq.pop()
self._downheap(0)
return loc._item
@property
def items(self):
return [token._item for token in self._pq]
def __len__(self):
return len(self._pq)
def __contains__(self, item):
return item in self.items
def __iter__(self):
return iter(sorted(self.items))
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self._pq)
class MaxHeapPriorityQueue(MinHeapPriorityQueue):
def _upheap(self, i):
parent = self._parent(i)
if i > 0 and self._pq[parent] < self._pq[i]:
self._swap(i, parent)
self._upheap(parent)
def _downheap(self, i):
n = len(self._pq)
left, right = self._left(i), self._right(i)
if left < n:
child = left
if right < n and self._pq[left] < self._pq[right]:
child = right
if self._pq[i] < self._pq[child]:
self._swap(i, child)
self._downheap(child)
def __iter__(self):
return iter(sorted(self.items, reverse=True))
__doc__ += """
>>> import random; random.seed(42)
>>> from priorityqueue import MinHeapPriorityQueue, MaxHeapPriorityQueue
Function to verify the min-heap invariant is true for all elements of pq.
>>> def verify(pq):
... n = len(pq._pq)
... for i in range(n):
... left, right = 2*i + 1, 2*i + 2
... if left < n:
... assert pq._pq[i] <= pq._pq[left]
... if right < n:
... assert pq._pq[i] <= pq._pq[right]
Function to verify the max-heap invariant is true for all elements of pq.
>>> def verify_max(pq):
... n = len(pq._pq)
... for i in range(n):
... left, right = 2*i + 1, 2*i + 2
... if left < n:
... assert pq._pq[i] >= pq._pq[left]
... if right < n:
... assert pq._pq[i] >= pq._pq[right]
>>> items = [random.randint(1, 100) for _ in range(10000)]
>>> pq = MinHeapPriorityQueue(items)
>>> verify(pq)
>>> pq = MaxHeapPriorityQueue(items)
>>> verify_max(pq)
Check multiple signs for priority values.
>>> items = list(range(100, -100, -1))
>>> random.shuffle(items)
>>> pq = MinHeapPriorityQueue(items)
>>> verify(pq)
>>> pq = MaxHeapPriorityQueue(items)
>>> verify_max(pq)
Test pop, peek, append, remove, update, __len__, and __contains__ operations.
>>> items = ['jjjjjjjjjj', 'iiiiiiiii', 'hhhhhhhh',
... 'ggggggg', 'ffffff', 'eeeee',
... 'dddd', 'ccc', 'bb', 'a']
>>> pq = MinHeapPriorityQueue(items, key=len)
>>> verify(pq)
>>> pq.pop()
'a'
>>> pq.pop()
'bb'
>>> pq.peek()
'ccc'
>>> pq.pop()
'ccc'
>>> pq.pop()
'dddd'
>>> pq.peek()
'eeeee'
>>> pq.pop()
'eeeee'
>>> _ = pq.append('a')
>>> _ = pq.append('bb')
>>> verify(pq)
>>> pq = MaxHeapPriorityQueue(key=len)
>>> pq.append([1, 2, 3])
Locator(value=3, item=[1, 2, 3], index=0)
>>> pq.append([1, 2, 3, 4, 5, 6])
Locator(value=6, item=[1, 2, 3, 4, 5, 6], index=0)
>>> pq.append([1])
Locator(value=1, item=[1], index=2)
>>> pq.append([1, 2, 3, 4, 5, 6, 7, 8, 9])
Locator(value=9, item=[1, 2, 3, 4, 5, 6, 7, 8, 9], index=0)
>>> len(pq)
4
>>> [1] in pq
True
>>> [1, 2, 3, 4, 5] in pq
False
>>> items = list(range(1, 10001))
>>> random.shuffle(items)
>>> pq = MinHeapPriorityQueue(items)
>>> verify(pq)
>>> len(pq) == 10000
True
>>> for i in range(1, 10001):
... x = pq.pop()
... assert x == i
>>> pq = MinHeapPriorityQueue()
>>> locs = {}
>>> for x in items:
... locs[x] = pq.append(x)
>>> pq.remove(locs[1])
1
>>> pq.remove(locs[2])
2
>>> pq.pop()
3
>>> for i in range(4, 100):
... _ = pq.remove(locs[i])
>>> pq.pop()
100
>>> verify(pq)
>>> pq.update(locs[999], 1, 'test')
>>> 999 in pq
False
>>> pq.pop()
'test'
>>> 998 in pq
True
Test the items and __repr__ methods.
>>> items = ['a', 'b', 'c']
>>> pq = MinHeapPriorityQueue(items)
>>> pq
MinHeapPriorityQueue([Locator(value='a', item='a', index=0), Locator(value='b', item='b', index=1), Locator(value='c', item='c', index=2)])
>>> pq.items == ['a', 'b', 'c']
True
Check that __iter__ generates items in sorted order.
>>> items = list(range(1000))
>>> pq = MinHeapPriorityQueue(items)
>>> for i, x in enumerate(pq):
... assert i == x
>>> pq = MaxHeapPriorityQueue(items)
>>> for i, x in enumerate(pq):
... assert 999 - i == x
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| true
| true
|
f702d9f83de12d57ddae1af8f5e99968f870e9e6
| 971
|
py
|
Python
|
fixture/application.py
|
dmi-vor/python_training
|
1e7b480bd40ce55fe19d19042c7d5ed4ffc873c8
|
[
"Apache-2.0"
] | null | null | null |
fixture/application.py
|
dmi-vor/python_training
|
1e7b480bd40ce55fe19d19042c7d5ed4ffc873c8
|
[
"Apache-2.0"
] | null | null | null |
fixture/application.py
|
dmi-vor/python_training
|
1e7b480bd40ce55fe19d19042c7d5ed4ffc873c8
|
[
"Apache-2.0"
] | null | null | null |
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.wd.implicitly_wait(5)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.base_url=base_url
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
| 25.552632
| 65
| 0.603502
|
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.wd.implicitly_wait(5)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.base_url=base_url
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
| true
| true
|
f702daa51357e8ff5b5a56b6929c7d46c451dc9e
| 1,385
|
py
|
Python
|
src/basior/logic_pkg/tramline.py
|
Marcin-Szadkowski/B.A.S.I.O.R
|
5b90ab6a05fdf2a3db8e5b9ba80a858a6628ab8c
|
[
"MIT"
] | 1
|
2020-04-26T17:41:33.000Z
|
2020-04-26T17:41:33.000Z
|
src/basior/logic_pkg/tramline.py
|
Marcin-Szadkowski/B.A.S.I.O.R
|
5b90ab6a05fdf2a3db8e5b9ba80a858a6628ab8c
|
[
"MIT"
] | null | null | null |
src/basior/logic_pkg/tramline.py
|
Marcin-Szadkowski/B.A.S.I.O.R
|
5b90ab6a05fdf2a3db8e5b9ba80a858a6628ab8c
|
[
"MIT"
] | 2
|
2020-06-17T16:03:01.000Z
|
2020-06-28T20:53:06.000Z
|
import matplotlib.pyplot as plt
from shapely.geometry import MultiLineString
from .route_iterator import RouteIterator
from .graphconverter import GraphConverter
class TramLine(object):
"""Class represents single tram line for example '33: from Pilczyce to Sępolno' """
def __init__(self, number, direction_to, dl):
"""
Basic requirements to unambiguously define line
:param number: number of line as str
:param direction_to:
:param dl: DataLoader object
"""
self.number = number # Stored as str
self.direction_to = direction_to
self.default_route = dl.load_single_line(number, direction_to) # As you can default_route is type LineString
self.stops = dl.load_tram_stops(self.default_route) # List of shapely.Point objects
self.current_route = self.default_route
self.route_in_order = GraphConverter.find_route_in_order(dl, self)
"""
def show(self, with_stops=True):
# Development tool. Plot line
if isinstance(self.current_route, MultiLineString):
for line in self.current_route:
plt.plot(line.xy[0], line.xy[1])
else:
plt.plot(self.current_route.xy[0], self.current_route.xy[1])
if with_stops:
plt.scatter([p.x for p in self.stops], [p.y for p in self.stops])
plt.show()
"""
| 36.447368
| 117
| 0.66787
|
import matplotlib.pyplot as plt
from shapely.geometry import MultiLineString
from .route_iterator import RouteIterator
from .graphconverter import GraphConverter
class TramLine(object):
def __init__(self, number, direction_to, dl):
self.number = number self.direction_to = direction_to
self.default_route = dl.load_single_line(number, direction_to) self.stops = dl.load_tram_stops(self.default_route) self.current_route = self.default_route
self.route_in_order = GraphConverter.find_route_in_order(dl, self)
| true
| true
|
f702dab63fb7a27fbc99af60fb2c5b50e7bb5377
| 4,365
|
py
|
Python
|
rhasspywake_snowboy_hermes/__main__.py
|
Romkabouter/rhasspy-wake-snowboy-hermes
|
6ad5372c89650987f92c22c0b745661680c17c94
|
[
"MIT"
] | null | null | null |
rhasspywake_snowboy_hermes/__main__.py
|
Romkabouter/rhasspy-wake-snowboy-hermes
|
6ad5372c89650987f92c22c0b745661680c17c94
|
[
"MIT"
] | null | null | null |
rhasspywake_snowboy_hermes/__main__.py
|
Romkabouter/rhasspy-wake-snowboy-hermes
|
6ad5372c89650987f92c22c0b745661680c17c94
|
[
"MIT"
] | null | null | null |
"""Hermes MQTT service for Rhasspy wakeword with snowboy"""
import argparse
import asyncio
import dataclasses
import itertools
import json
import logging
import os
import sys
import typing
from pathlib import Path
import paho.mqtt.client as mqtt
import rhasspyhermes.cli as hermes_cli
from . import SnowboyModel, WakeHermesMqtt
_DIR = Path(__file__).parent
_LOGGER = logging.getLogger("rhasspywake_snowboy_hermes")
# -----------------------------------------------------------------------------
def main():
"""Main method."""
parser = argparse.ArgumentParser(prog="rhasspy-wake-snowboy-hermes")
parser.add_argument(
"--model",
required=True,
action="append",
nargs="+",
help="Snowboy model settings (model, sensitivity, audio_gain, apply_frontend)",
)
parser.add_argument(
"--model-dir",
action="append",
default=[],
help="Directories with snowboy models",
)
parser.add_argument(
"--wakeword-id",
action="append",
help="Wakeword IDs of each keyword (default: use file name)",
)
parser.add_argument(
"--stdin-audio", action="store_true", help="Read WAV audio from stdin"
)
parser.add_argument(
"--udp-audio",
nargs=3,
action="append",
help="Host/port/siteId for UDP audio input",
)
parser.add_argument("--lang", help="Set lang in hotword detected message")
hermes_cli.add_hermes_args(parser)
args = parser.parse_args()
hermes_cli.setup_logging(args)
_LOGGER.debug(args)
if args.model_dir:
args.model_dir = [Path(d) for d in args.model_dir]
# Use embedded models too
args.model_dir.append(_DIR / "models")
# Load model settings
models: typing.List[SnowboyModel] = []
for model_settings in args.model:
model_path = Path(model_settings[0])
if not model_path.is_file():
# Resolve relative to model directories
for model_dir in args.model_dir:
maybe_path = model_dir / model_path.name
if maybe_path.is_file():
model_path = maybe_path
break
_LOGGER.debug("Loading model from %s", str(model_path))
model = SnowboyModel(model_path=model_path)
if len(model_settings) > 1:
model.sensitivity = model_settings[1]
if len(model_settings) > 2:
model.audio_gain = float(model_settings[2])
if len(model_settings) > 3:
model.apply_frontend = model_settings[3].strip().lower() == "true"
models.append(model)
wakeword_ids = [
kn[1]
for kn in itertools.zip_longest(
args.model, args.wakeword_id or [], fillvalue=""
)
]
if args.stdin_audio:
# Read WAV from stdin, detect, and exit
client = None
hermes = WakeHermesMqtt(client, models, wakeword_ids)
for site_id in args.site_id:
hermes.load_detectors(site_id)
if os.isatty(sys.stdin.fileno()):
print("Reading WAV data from stdin...", file=sys.stderr)
wav_bytes = sys.stdin.buffer.read()
# Print results as JSON
for result in hermes.handle_audio_frame(wav_bytes):
result_dict = dataclasses.asdict(result)
json.dump(result_dict, sys.stdout, ensure_ascii=False)
return
udp_audio = []
if args.udp_audio:
udp_audio = [
(host, int(port), site_id) for host, port, site_id in args.udp_audio
]
# Listen for messages
client = mqtt.Client()
hermes = WakeHermesMqtt(
client,
models,
wakeword_ids,
model_dirs=args.model_dir,
udp_audio=udp_audio,
site_ids=args.site_id,
lang=args.lang,
)
for site_id in args.site_id:
hermes.load_detectors(site_id)
_LOGGER.debug("Connecting to %s:%s", args.host, args.port)
hermes_cli.connect(client, args)
client.loop_start()
try:
# Run event loop
asyncio.run(hermes.handle_messages_async())
except KeyboardInterrupt:
pass
finally:
_LOGGER.debug("Shutting down")
client.loop_stop()
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main()
| 26.615854
| 87
| 0.59748
|
import argparse
import asyncio
import dataclasses
import itertools
import json
import logging
import os
import sys
import typing
from pathlib import Path
import paho.mqtt.client as mqtt
import rhasspyhermes.cli as hermes_cli
from . import SnowboyModel, WakeHermesMqtt
_DIR = Path(__file__).parent
_LOGGER = logging.getLogger("rhasspywake_snowboy_hermes")
def main():
parser = argparse.ArgumentParser(prog="rhasspy-wake-snowboy-hermes")
parser.add_argument(
"--model",
required=True,
action="append",
nargs="+",
help="Snowboy model settings (model, sensitivity, audio_gain, apply_frontend)",
)
parser.add_argument(
"--model-dir",
action="append",
default=[],
help="Directories with snowboy models",
)
parser.add_argument(
"--wakeword-id",
action="append",
help="Wakeword IDs of each keyword (default: use file name)",
)
parser.add_argument(
"--stdin-audio", action="store_true", help="Read WAV audio from stdin"
)
parser.add_argument(
"--udp-audio",
nargs=3,
action="append",
help="Host/port/siteId for UDP audio input",
)
parser.add_argument("--lang", help="Set lang in hotword detected message")
hermes_cli.add_hermes_args(parser)
args = parser.parse_args()
hermes_cli.setup_logging(args)
_LOGGER.debug(args)
if args.model_dir:
args.model_dir = [Path(d) for d in args.model_dir]
args.model_dir.append(_DIR / "models")
models: typing.List[SnowboyModel] = []
for model_settings in args.model:
model_path = Path(model_settings[0])
if not model_path.is_file():
for model_dir in args.model_dir:
maybe_path = model_dir / model_path.name
if maybe_path.is_file():
model_path = maybe_path
break
_LOGGER.debug("Loading model from %s", str(model_path))
model = SnowboyModel(model_path=model_path)
if len(model_settings) > 1:
model.sensitivity = model_settings[1]
if len(model_settings) > 2:
model.audio_gain = float(model_settings[2])
if len(model_settings) > 3:
model.apply_frontend = model_settings[3].strip().lower() == "true"
models.append(model)
wakeword_ids = [
kn[1]
for kn in itertools.zip_longest(
args.model, args.wakeword_id or [], fillvalue=""
)
]
if args.stdin_audio:
client = None
hermes = WakeHermesMqtt(client, models, wakeword_ids)
for site_id in args.site_id:
hermes.load_detectors(site_id)
if os.isatty(sys.stdin.fileno()):
print("Reading WAV data from stdin...", file=sys.stderr)
wav_bytes = sys.stdin.buffer.read()
for result in hermes.handle_audio_frame(wav_bytes):
result_dict = dataclasses.asdict(result)
json.dump(result_dict, sys.stdout, ensure_ascii=False)
return
udp_audio = []
if args.udp_audio:
udp_audio = [
(host, int(port), site_id) for host, port, site_id in args.udp_audio
]
client = mqtt.Client()
hermes = WakeHermesMqtt(
client,
models,
wakeword_ids,
model_dirs=args.model_dir,
udp_audio=udp_audio,
site_ids=args.site_id,
lang=args.lang,
)
for site_id in args.site_id:
hermes.load_detectors(site_id)
_LOGGER.debug("Connecting to %s:%s", args.host, args.port)
hermes_cli.connect(client, args)
client.loop_start()
try:
asyncio.run(hermes.handle_messages_async())
except KeyboardInterrupt:
pass
finally:
_LOGGER.debug("Shutting down")
client.loop_stop()
if __name__ == "__main__":
main()
| true
| true
|
f702dbc4b44a28bf194e2859e22aad59ea011f89
| 318
|
py
|
Python
|
parser.py
|
kylelaker/cfn-joiner-parser
|
b154c0baaff7cc14f71b2ff5e2fe24d484641941
|
[
"MIT"
] | 1
|
2021-03-22T15:19:34.000Z
|
2021-03-22T15:19:34.000Z
|
parser.py
|
kylelaker/cfn-joiner-parser
|
b154c0baaff7cc14f71b2ff5e2fe24d484641941
|
[
"MIT"
] | null | null | null |
parser.py
|
kylelaker/cfn-joiner-parser
|
b154c0baaff7cc14f71b2ff5e2fe24d484641941
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import yaml
def main():
args = sys.argv[1:]
file = args[0] if args else sys.stdin
data = yaml.safe_load(file)
join_args = data['Fn::Join']
contents = join_args[0].join(join_args[1])
print(contents, end='')
if __name__ == '__main__':
sys.exit(main())
| 16.736842
| 46
| 0.622642
|
import sys
import yaml
def main():
args = sys.argv[1:]
file = args[0] if args else sys.stdin
data = yaml.safe_load(file)
join_args = data['Fn::Join']
contents = join_args[0].join(join_args[1])
print(contents, end='')
if __name__ == '__main__':
sys.exit(main())
| true
| true
|
f702dc9d1eb4f79c76d2aa25ecbf10919de5b2a2
| 387
|
py
|
Python
|
take_single_user_input/c_windows_only.py
|
hafiz-kamilin/python_example_program
|
78e84eff9e8c266b56c4e58cf2ba2d0f198f77fd
|
[
"MIT"
] | 1
|
2020-04-29T12:12:10.000Z
|
2020-04-29T12:12:10.000Z
|
take_single_user_input/c_windows_only.py
|
hafiz-kamilin/miscellaneous_python_program
|
78e84eff9e8c266b56c4e58cf2ba2d0f198f77fd
|
[
"MIT"
] | null | null | null |
take_single_user_input/c_windows_only.py
|
hafiz-kamilin/miscellaneous_python_program
|
78e84eff9e8c266b56c4e58cf2ba2d0f198f77fd
|
[
"MIT"
] | 1
|
2018-11-03T00:10:53.000Z
|
2018-11-03T00:10:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# class for windows getch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
getch = _GetchWindows()
# print instruction
print ("Please enter something: ")
# read user input and save in into x
x = getch()
# print user input saved in x
print(x)
| 19.35
| 36
| 0.656331
|
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
getch = _GetchWindows()
print ("Please enter something: ")
x = getch()
print(x)
| true
| true
|
f702ddb6130de8cfc377c1f3f12d46bcdd929000
| 4,376
|
py
|
Python
|
odk_viewer/tests/test_remongo.py
|
Ecotrust/formhub
|
05033bb5aa152cc2cbcd7382c2c999d82b2c3276
|
[
"BSD-2-Clause"
] | 123
|
2015-01-08T09:21:05.000Z
|
2021-11-14T19:45:23.000Z
|
odk_viewer/tests/test_remongo.py
|
Ecotrust/formhub
|
05033bb5aa152cc2cbcd7382c2c999d82b2c3276
|
[
"BSD-2-Clause"
] | 16
|
2015-02-13T16:56:42.000Z
|
2021-02-20T23:58:43.000Z
|
odk_viewer/tests/test_remongo.py
|
Ecotrust/formhub
|
05033bb5aa152cc2cbcd7382c2c999d82b2c3276
|
[
"BSD-2-Clause"
] | 110
|
2015-01-19T14:34:06.000Z
|
2021-02-01T14:55:11.000Z
|
import os
from django.conf import settings
from main.tests.test_base import MainTestCase
from odk_viewer.models import ParsedInstance
from odk_viewer.management.commands.remongo import Command
from django.core.management import call_command
from common_tags import USERFORM_ID
class TestRemongo(MainTestCase):
def test_remongo_in_batches(self):
self._publish_transportation_form()
# submit 4 instances
self._make_submissions()
self.assertEqual(ParsedInstance.objects.count(), 4)
# clear mongo
settings.MONGO_DB.instances.drop()
c = Command()
c.handle(batchsize=3)
# mongo db should now have 5 records
count = settings.MONGO_DB.instances.count()
self.assertEqual(count, 4)
def test_remongo_with_username_id_string(self):
self._publish_transportation_form()
# submit 1 instances
s = self.surveys[0]
self._make_submission(os.path.join(self.this_directory, 'fixtures',
'transportation', 'instances', s, s + '.xml'))
# publish and submit for a different user
self._logout()
self._create_user_and_login("harry", "harry")
self._publish_transportation_form()
s = self.surveys[1]
self._make_submission(os.path.join(self.this_directory, 'fixtures',
'transportation', 'instances', s, s + '.xml'))
self.assertEqual(ParsedInstance.objects.count(), 2)
# clear mongo
settings.MONGO_DB.instances.drop()
c = Command()
c.handle(batchsize=3, username=self.user.username,
id_string=self.xform.id_string)
# mongo db should now have 2 records
count = settings.MONGO_DB.instances.count()
self.assertEqual(count, 1)
def test_indexes_exist(self):
"""
Make sure the required indexes are set, _userform_id as of now
"""
call_command('remongo')
# if index exists, ensure index returns None
# list of indexes to check for
index_list = [USERFORM_ID]
# get index info
index_info = settings.MONGO_DB.instances.index_information()
# index_info looks like this - {u'_id_': {u'key': [(u'_id', 1)], u'v': 1}, u'_userform_id_1': {u'key': [(u'_userform_id', 1)], u'v': 1}}
# lets make a list of the indexes
existing_indexes = [v['key'][0][0] for v in index_info.itervalues() if v['key'][0][1] == 1]
all_indexes_found = True
for index_item in index_list:
if index_item not in existing_indexes:
all_indexes_found = False
break
self.assertTrue(all_indexes_found)
def test_sync_mongo_with_all_option_deletes_existing_records(self):
self._publish_transportation_form()
userform_id = "%s_%s" % (self.user.username, self.xform.id_string)
initial_mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
for i in range(len(self.surveys)):
self._submit_transport_instance(i)
mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
# check our mongo count
self.assertEqual(mongo_count, initial_mongo_count + len(self.surveys))
# add dummy instance
settings.MONGO_DB.instances.save(
{"_id": 12345, "_userform_id": userform_id})
# make sure the dummy is returned as part of the forms mongo instances
mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
self.assertEqual(mongo_count,
initial_mongo_count + len(self.surveys) + 1)
# call sync_mongo WITHOUT the all option
call_command("sync_mongo", remongo=True)
mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
self.assertEqual(mongo_count,
initial_mongo_count + len(self.surveys) + 1)
# call sync_mongo WITH the all option
call_command("sync_mongo", remongo=True, update_all=True)
# check that we are back to just the submitted set
mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
self.assertEqual(mongo_count,
initial_mongo_count + len(self.surveys))
| 43.76
| 144
| 0.649452
|
import os
from django.conf import settings
from main.tests.test_base import MainTestCase
from odk_viewer.models import ParsedInstance
from odk_viewer.management.commands.remongo import Command
from django.core.management import call_command
from common_tags import USERFORM_ID
class TestRemongo(MainTestCase):
def test_remongo_in_batches(self):
self._publish_transportation_form()
self._make_submissions()
self.assertEqual(ParsedInstance.objects.count(), 4)
settings.MONGO_DB.instances.drop()
c = Command()
c.handle(batchsize=3)
count = settings.MONGO_DB.instances.count()
self.assertEqual(count, 4)
def test_remongo_with_username_id_string(self):
self._publish_transportation_form()
s = self.surveys[0]
self._make_submission(os.path.join(self.this_directory, 'fixtures',
'transportation', 'instances', s, s + '.xml'))
self._logout()
self._create_user_and_login("harry", "harry")
self._publish_transportation_form()
s = self.surveys[1]
self._make_submission(os.path.join(self.this_directory, 'fixtures',
'transportation', 'instances', s, s + '.xml'))
self.assertEqual(ParsedInstance.objects.count(), 2)
settings.MONGO_DB.instances.drop()
c = Command()
c.handle(batchsize=3, username=self.user.username,
id_string=self.xform.id_string)
count = settings.MONGO_DB.instances.count()
self.assertEqual(count, 1)
def test_indexes_exist(self):
call_command('remongo')
index_list = [USERFORM_ID]
index_info = settings.MONGO_DB.instances.index_information()
existing_indexes = [v['key'][0][0] for v in index_info.itervalues() if v['key'][0][1] == 1]
all_indexes_found = True
for index_item in index_list:
if index_item not in existing_indexes:
all_indexes_found = False
break
self.assertTrue(all_indexes_found)
def test_sync_mongo_with_all_option_deletes_existing_records(self):
self._publish_transportation_form()
userform_id = "%s_%s" % (self.user.username, self.xform.id_string)
initial_mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
for i in range(len(self.surveys)):
self._submit_transport_instance(i)
mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
self.assertEqual(mongo_count, initial_mongo_count + len(self.surveys))
settings.MONGO_DB.instances.save(
{"_id": 12345, "_userform_id": userform_id})
mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
self.assertEqual(mongo_count,
initial_mongo_count + len(self.surveys) + 1)
call_command("sync_mongo", remongo=True)
mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
self.assertEqual(mongo_count,
initial_mongo_count + len(self.surveys) + 1)
call_command("sync_mongo", remongo=True, update_all=True)
mongo_count = settings.MONGO_DB.instances.find(
{USERFORM_ID: userform_id}).count()
self.assertEqual(mongo_count,
initial_mongo_count + len(self.surveys))
| true
| true
|
f702de434a6aa087fd1d679fab09a173f17cae3d
| 1,278
|
py
|
Python
|
lesson_3/test_fixture1.py
|
Ryne777/Stepik_auto_test
|
7543c6616db9945fd56433877a292a9bfe80eb8d
|
[
"Apache-2.0"
] | null | null | null |
lesson_3/test_fixture1.py
|
Ryne777/Stepik_auto_test
|
7543c6616db9945fd56433877a292a9bfe80eb8d
|
[
"Apache-2.0"
] | null | null | null |
lesson_3/test_fixture1.py
|
Ryne777/Stepik_auto_test
|
7543c6616db9945fd56433877a292a9bfe80eb8d
|
[
"Apache-2.0"
] | null | null | null |
from selenium import webdriver
link = "http://selenium1py.pythonanywhere.com/"
class TestMainPage1():
@classmethod
def setup_class(self):
print("\nstart browser for test suite..")
self.browser = webdriver.Chrome()
@classmethod
def teardown_class(self):
print("quit browser for test suite..")
self.browser.quit()
def test_guest_should_see_login_link(self):
self.browser.get(link)
self.browser.find_element_by_css_selector("#login_link")
def test_guest_should_see_basket_link_on_the_main_page(self):
self.browser.get(link)
self.browser.find_element_by_css_selector(
".basket-mini .btn-group > a")
class TestMainPage2():
def setup_method(self):
print("start browser for test..")
self.browser = webdriver.Chrome()
def teardown_method(self):
print("quit browser for test..")
self.browser.quit()
def test_guest_should_see_login_link(self):
self.browser.get(link)
self.browser.find_element_by_css_selector("#login_link")
def test_guest_should_see_basket_link_on_the_main_page(self):
self.browser.get(link)
self.browser.find_element_by_css_selector(
".basket-mini .btn-group > a")
| 27.782609
| 65
| 0.677621
|
from selenium import webdriver
link = "http://selenium1py.pythonanywhere.com/"
class TestMainPage1():
@classmethod
def setup_class(self):
print("\nstart browser for test suite..")
self.browser = webdriver.Chrome()
@classmethod
def teardown_class(self):
print("quit browser for test suite..")
self.browser.quit()
def test_guest_should_see_login_link(self):
self.browser.get(link)
self.browser.find_element_by_css_selector("#login_link")
def test_guest_should_see_basket_link_on_the_main_page(self):
self.browser.get(link)
self.browser.find_element_by_css_selector(
".basket-mini .btn-group > a")
class TestMainPage2():
def setup_method(self):
print("start browser for test..")
self.browser = webdriver.Chrome()
def teardown_method(self):
print("quit browser for test..")
self.browser.quit()
def test_guest_should_see_login_link(self):
self.browser.get(link)
self.browser.find_element_by_css_selector("#login_link")
def test_guest_should_see_basket_link_on_the_main_page(self):
self.browser.get(link)
self.browser.find_element_by_css_selector(
".basket-mini .btn-group > a")
| true
| true
|
f702de58756cfee4caf31af038f6415d191aa875
| 7,895
|
py
|
Python
|
himalaya/kernel_ridge/tests/test_random_search_kernel.py
|
mvdoc/himalaya
|
7e3866287b835e2cc0a5c9848331e19c14896309
|
[
"BSD-3-Clause"
] | null | null | null |
himalaya/kernel_ridge/tests/test_random_search_kernel.py
|
mvdoc/himalaya
|
7e3866287b835e2cc0a5c9848331e19c14896309
|
[
"BSD-3-Clause"
] | null | null | null |
himalaya/kernel_ridge/tests/test_random_search_kernel.py
|
mvdoc/himalaya
|
7e3866287b835e2cc0a5c9848331e19c14896309
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import numpy as np
import sklearn.linear_model
import sklearn.model_selection
import scipy.linalg
from himalaya.backend import set_backend
from himalaya.backend import ALL_BACKENDS
from himalaya.utils import assert_array_almost_equal
from himalaya.scoring import r2_score
from himalaya.kernel_ridge import solve_multiple_kernel_ridge_random_search
def _create_dataset(backend, n_targets=4):
n_featuress = (100, 200)
n_samples = 80
n_gammas = 3
Xs = [
backend.asarray(backend.randn(n_samples, n_features), backend.float64)
for n_features in n_featuress
]
Ks = backend.stack([X @ X.T for X in Xs])
ws = [
backend.asarray(backend.randn(n_features, n_targets), backend.float64)
for n_features in n_featuress
]
Ys = backend.stack([X @ w for X, w in zip(Xs, ws)])
Y = Ys.sum(0)
gammas = backend.asarray(backend.rand(n_gammas, Ks.shape[0]),
backend.float64)
gammas /= gammas.sum(1)[:, None]
return Ks, Y, gammas, Xs
@pytest.mark.parametrize('local_alpha', [True, False])
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_local_alphah(
backend, local_alpha):
_test_solve_multiple_kernel_ridge_random_search(backend=backend,
local_alpha=local_alpha)
@pytest.mark.parametrize('n_targets_batch', [None, 3])
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_n_targets_batch(
backend, n_targets_batch):
_test_solve_multiple_kernel_ridge_random_search(
backend=backend, n_targets_batch=n_targets_batch)
@pytest.mark.parametrize('n_alphas_batch', [None, 2])
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_n_alphas_batch(
backend, n_alphas_batch):
_test_solve_multiple_kernel_ridge_random_search(
backend=backend, n_alphas_batch=n_alphas_batch)
@pytest.mark.parametrize('return_weights', ['primal', 'dual'])
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_return_weights(
backend, return_weights):
_test_solve_multiple_kernel_ridge_random_search(
backend=backend, return_weights=return_weights)
@pytest.mark.parametrize('diagonalize_method', ['eigh', 'svd'])
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_diagonalize_method(
backend, diagonalize_method):
_test_solve_multiple_kernel_ridge_random_search(
backend=backend, diagonalize_method=diagonalize_method)
def _test_solve_multiple_kernel_ridge_random_search(
backend, n_targets_batch=None, n_alphas_batch=None,
return_weights="dual", diagonalize_method="eigh", local_alpha=True):
backend = set_backend(backend)
Ks, Y, gammas, Xs = _create_dataset(backend)
alphas = backend.asarray_like(backend.logspace(-3, 5, 9), Ks)
n_targets = Y.shape[1]
cv = sklearn.model_selection.check_cv(10)
############
# run solver
results = solve_multiple_kernel_ridge_random_search(
Ks, Y, n_iter=gammas, alphas=alphas, score_func=r2_score, cv=cv,
n_targets_batch=n_targets_batch, Xs=Xs, progress_bar=False,
return_weights=return_weights, n_alphas_batch=n_alphas_batch,
diagonalize_method=diagonalize_method, local_alpha=local_alpha)
best_deltas, refit_weights, cv_scores = results
#########################################
# compare with sklearn.linear_model.Ridge
if local_alpha: # only compare when each target optimizes alpha
test_scores = []
for gamma in backend.sqrt(gammas):
X = backend.concatenate([x * g for x, g in zip(Xs, gamma)], 1)
for train, test in cv.split(X):
for alpha in alphas:
model = sklearn.linear_model.Ridge(
alpha=backend.to_numpy(alpha), fit_intercept=False)
model = model.fit(backend.to_numpy(X[train]),
backend.to_numpy(Y[train]))
predictions = backend.asarray_like(
model.predict(backend.to_numpy(X[test])), Y)
test_scores.append(r2_score(Y[test], predictions))
test_scores = backend.stack(test_scores)
test_scores = test_scores.reshape(len(gammas), cv.get_n_splits(),
len(alphas), n_targets)
test_scores_mean = backend.max(test_scores.mean(1), 1)
assert_array_almost_equal(cv_scores, test_scores_mean, decimal=5)
######################
# test refited_weights
for tt in range(n_targets):
gamma = backend.exp(best_deltas[:, tt])
alpha = 1.0
if return_weights == 'primal':
# compare primal weights with sklearn.linear_model.Ridge
X = backend.concatenate(
[X * backend.sqrt(g) for X, g in zip(Xs, gamma)], 1)
model = sklearn.linear_model.Ridge(fit_intercept=False,
alpha=backend.to_numpy(alpha))
w1 = model.fit(backend.to_numpy(X),
backend.to_numpy(Y[:, tt])).coef_
w1 = np.split(w1, np.cumsum([X.shape[1] for X in Xs][:-1]), axis=0)
w1 = [backend.asarray(w) for w in w1]
w1_scaled = backend.concatenate(
[w * backend.sqrt(g) for w, g, in zip(w1, gamma)])
assert_array_almost_equal(w1_scaled, refit_weights[:, tt],
decimal=5)
elif return_weights == 'dual':
# compare dual weights with scipy.linalg.solve
Ks_64 = backend.asarray(Ks, dtype=backend.float64)
gamma_64 = backend.asarray(gamma, dtype=backend.float64)
K = backend.matmul(Ks_64.T, gamma_64).T
reg = backend.asarray_like(np.eye(K.shape[0]), K) * alpha
Y_64 = backend.asarray(Y, dtype=backend.float64)
c1 = scipy.linalg.solve(backend.to_numpy(K + reg),
backend.to_numpy(Y_64[:, tt]))
c1 = backend.asarray_like(c1, K)
assert_array_almost_equal(c1, refit_weights[:, tt], decimal=5)
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_single_alpha_numpy(backend):
backend = set_backend(backend)
# just a smoke test, so make it minimal
Ks, Y, gammas, Xs = _create_dataset(backend)
alphas = 1.0
# make Y a numpy array
Y = backend.to_numpy(Y)
results = solve_multiple_kernel_ridge_random_search(
Ks, Y, n_iter=gammas, alphas=alphas
)
@pytest.mark.parametrize('backend', ALL_BACKENDS)
@pytest.mark.parametrize('n_kernels', [1, 2])
def test_solve_multiple_kernel_ridge_random_search_global_alpha(backend, n_kernels):
backend = set_backend(backend)
# add more targets to make sure we get some variability
Ks, Y, gammas, Xs = _create_dataset(backend, n_targets=20)
alphas = backend.asarray_like(backend.logspace(-3, 5, 9), Ks)
cv = sklearn.model_selection.check_cv(5)
deltas, *_, best_alphas = solve_multiple_kernel_ridge_random_search(
Ks[:n_kernels],
Y,
n_iter=50,
progress_bar=False,
alphas=alphas,
cv=cv,
local_alpha=False,
return_alphas=True
)
# test that we return a single combination of deltas
deltas = backend.to_numpy(deltas)
if deltas.ndim == 1:
assert np.allclose(deltas[0], deltas)
else:
for dd in deltas:
assert np.allclose(dd[0], dd)
# test that we return a single alpha
best_alphas = backend.to_numpy(best_alphas)
assert np.allclose(best_alphas[0], best_alphas)
| 39.873737
| 84
| 0.659658
|
import pytest
import numpy as np
import sklearn.linear_model
import sklearn.model_selection
import scipy.linalg
from himalaya.backend import set_backend
from himalaya.backend import ALL_BACKENDS
from himalaya.utils import assert_array_almost_equal
from himalaya.scoring import r2_score
from himalaya.kernel_ridge import solve_multiple_kernel_ridge_random_search
def _create_dataset(backend, n_targets=4):
n_featuress = (100, 200)
n_samples = 80
n_gammas = 3
Xs = [
backend.asarray(backend.randn(n_samples, n_features), backend.float64)
for n_features in n_featuress
]
Ks = backend.stack([X @ X.T for X in Xs])
ws = [
backend.asarray(backend.randn(n_features, n_targets), backend.float64)
for n_features in n_featuress
]
Ys = backend.stack([X @ w for X, w in zip(Xs, ws)])
Y = Ys.sum(0)
gammas = backend.asarray(backend.rand(n_gammas, Ks.shape[0]),
backend.float64)
gammas /= gammas.sum(1)[:, None]
return Ks, Y, gammas, Xs
@pytest.mark.parametrize('local_alpha', [True, False])
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_local_alphah(
backend, local_alpha):
_test_solve_multiple_kernel_ridge_random_search(backend=backend,
local_alpha=local_alpha)
@pytest.mark.parametrize('n_targets_batch', [None, 3])
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_n_targets_batch(
backend, n_targets_batch):
_test_solve_multiple_kernel_ridge_random_search(
backend=backend, n_targets_batch=n_targets_batch)
@pytest.mark.parametrize('n_alphas_batch', [None, 2])
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_n_alphas_batch(
backend, n_alphas_batch):
_test_solve_multiple_kernel_ridge_random_search(
backend=backend, n_alphas_batch=n_alphas_batch)
@pytest.mark.parametrize('return_weights', ['primal', 'dual'])
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_return_weights(
backend, return_weights):
_test_solve_multiple_kernel_ridge_random_search(
backend=backend, return_weights=return_weights)
@pytest.mark.parametrize('diagonalize_method', ['eigh', 'svd'])
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_diagonalize_method(
backend, diagonalize_method):
_test_solve_multiple_kernel_ridge_random_search(
backend=backend, diagonalize_method=diagonalize_method)
def _test_solve_multiple_kernel_ridge_random_search(
backend, n_targets_batch=None, n_alphas_batch=None,
return_weights="dual", diagonalize_method="eigh", local_alpha=True):
backend = set_backend(backend)
Ks, Y, gammas, Xs = _create_dataset(backend)
alphas = backend.asarray_like(backend.logspace(-3, 5, 9), Ks)
n_targets = Y.shape[1]
cv = sklearn.model_selection.check_cv(10)
results = solve_multiple_kernel_ridge_random_search(
Ks, Y, n_iter=gammas, alphas=alphas, score_func=r2_score, cv=cv,
n_targets_batch=n_targets_batch, Xs=Xs, progress_bar=False,
return_weights=return_weights, n_alphas_batch=n_alphas_batch,
diagonalize_method=diagonalize_method, local_alpha=local_alpha)
best_deltas, refit_weights, cv_scores = results
if local_alpha: test_scores = []
for gamma in backend.sqrt(gammas):
X = backend.concatenate([x * g for x, g in zip(Xs, gamma)], 1)
for train, test in cv.split(X):
for alpha in alphas:
model = sklearn.linear_model.Ridge(
alpha=backend.to_numpy(alpha), fit_intercept=False)
model = model.fit(backend.to_numpy(X[train]),
backend.to_numpy(Y[train]))
predictions = backend.asarray_like(
model.predict(backend.to_numpy(X[test])), Y)
test_scores.append(r2_score(Y[test], predictions))
test_scores = backend.stack(test_scores)
test_scores = test_scores.reshape(len(gammas), cv.get_n_splits(),
len(alphas), n_targets)
test_scores_mean = backend.max(test_scores.mean(1), 1)
assert_array_almost_equal(cv_scores, test_scores_mean, decimal=5)
for tt in range(n_targets):
gamma = backend.exp(best_deltas[:, tt])
alpha = 1.0
if return_weights == 'primal':
X = backend.concatenate(
[X * backend.sqrt(g) for X, g in zip(Xs, gamma)], 1)
model = sklearn.linear_model.Ridge(fit_intercept=False,
alpha=backend.to_numpy(alpha))
w1 = model.fit(backend.to_numpy(X),
backend.to_numpy(Y[:, tt])).coef_
w1 = np.split(w1, np.cumsum([X.shape[1] for X in Xs][:-1]), axis=0)
w1 = [backend.asarray(w) for w in w1]
w1_scaled = backend.concatenate(
[w * backend.sqrt(g) for w, g, in zip(w1, gamma)])
assert_array_almost_equal(w1_scaled, refit_weights[:, tt],
decimal=5)
elif return_weights == 'dual':
Ks_64 = backend.asarray(Ks, dtype=backend.float64)
gamma_64 = backend.asarray(gamma, dtype=backend.float64)
K = backend.matmul(Ks_64.T, gamma_64).T
reg = backend.asarray_like(np.eye(K.shape[0]), K) * alpha
Y_64 = backend.asarray(Y, dtype=backend.float64)
c1 = scipy.linalg.solve(backend.to_numpy(K + reg),
backend.to_numpy(Y_64[:, tt]))
c1 = backend.asarray_like(c1, K)
assert_array_almost_equal(c1, refit_weights[:, tt], decimal=5)
@pytest.mark.parametrize('backend', ALL_BACKENDS)
def test_solve_multiple_kernel_ridge_random_search_single_alpha_numpy(backend):
backend = set_backend(backend)
Ks, Y, gammas, Xs = _create_dataset(backend)
alphas = 1.0
Y = backend.to_numpy(Y)
results = solve_multiple_kernel_ridge_random_search(
Ks, Y, n_iter=gammas, alphas=alphas
)
@pytest.mark.parametrize('backend', ALL_BACKENDS)
@pytest.mark.parametrize('n_kernels', [1, 2])
def test_solve_multiple_kernel_ridge_random_search_global_alpha(backend, n_kernels):
backend = set_backend(backend)
Ks, Y, gammas, Xs = _create_dataset(backend, n_targets=20)
alphas = backend.asarray_like(backend.logspace(-3, 5, 9), Ks)
cv = sklearn.model_selection.check_cv(5)
deltas, *_, best_alphas = solve_multiple_kernel_ridge_random_search(
Ks[:n_kernels],
Y,
n_iter=50,
progress_bar=False,
alphas=alphas,
cv=cv,
local_alpha=False,
return_alphas=True
)
deltas = backend.to_numpy(deltas)
if deltas.ndim == 1:
assert np.allclose(deltas[0], deltas)
else:
for dd in deltas:
assert np.allclose(dd[0], dd)
best_alphas = backend.to_numpy(best_alphas)
assert np.allclose(best_alphas[0], best_alphas)
| true
| true
|
f702df62140e8d6bba1cd1a58b39f00070c3a064
| 5,343
|
py
|
Python
|
src/drugrelink/download.py
|
lingling93/comparison
|
9a9bbf57168b03c9097af22ecee660b3f432b1dd
|
[
"MIT"
] | 2
|
2019-04-11T14:06:00.000Z
|
2019-07-03T21:50:58.000Z
|
src/drugrelink/download.py
|
lingling93/comparison
|
9a9bbf57168b03c9097af22ecee660b3f432b1dd
|
[
"MIT"
] | 9
|
2019-04-19T19:33:54.000Z
|
2019-05-23T09:59:13.000Z
|
src/drugrelink/download.py
|
lingling93/comparison
|
9a9bbf57168b03c9097af22ecee660b3f432b1dd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Helper functions for getting resources."""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional
from urllib.request import urlretrieve
logger = logging.getLogger(__name__)
HERE = os.path.abspath(os.path.dirname(__file__))
DEFAULT_DIRECTORY = os.path.abspath(os.path.join(HERE, os.pardir, os.pardir, 'data'))
DATA_DIRECTORY = os.environ.get('REPOSITIONING_COMPARISON_DIRECTORY', DEFAULT_DIRECTORY)
# URLs from dhimmel/integrate
NODE_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/nodes.tsv'
EDGE_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/edges.sif.gz'
PERMUTATION1_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-1.json.bz2'
PERMUTATION2_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-2.json.bz2'
PERMUTATION3_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-3.json.bz2'
PERMUTATION4_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-4.json.bz2'
PERMUTATION5_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-5.json.bz2'
PERMUTATION_DATA_FILE_FMT = 'hetnet_perm-{}.json.bz2'
PERMUTATION_DATA_URL_FMT = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-{}.json.bz2'
# URLs from dhimmel/learn
TRANSFORMED_FEATURES_URL = 'https://github.com/dhimmel/learn/blob/master/prediction/features/features.tsv.bz2?raw=true'
VALIDATE_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/learn/master/validate/validation-statuses.tsv'
SYMPTOMATIC_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/learn/master/prediction/predictions/probabilities.tsv'
REPURPOSE_DATA_URL = 'https://raw.githubusercontent.com/drugrelink/drugrelink/master/notebooks/repurpose_overlap.json'
REPO_DATA_URL = 'https://raw.githubusercontent.com/drugrelink/drugrelink/master/notebooks/repo_data.csv'
@dataclass
class DataPaths:
"""Container for the paths for training."""
node_data_path: str
edge_data_path: str
transformed_features_path: str
validate_data_path: str
symptomatic_data_path: str
permutation_paths: List[str]
data_edge2vec_path: str
repurpose_data_path: str
repo_data_path: str
def get_data_paths(directory: Optional[str] = None) -> DataPaths:
"""Ensure Himmelstein's data files are downloaded."""
if directory is None:
directory = DATA_DIRECTORY
os.makedirs(directory, exist_ok=True)
node_data_path = os.path.join(directory, 'nodes.tsv')
if not os.path.exists(node_data_path):
logger.info(f'downloading {NODE_DATA_URL}')
urlretrieve(NODE_DATA_URL, node_data_path)
edge_data_path = os.path.join(directory, 'edges.sif.gz')
if not os.path.exists(edge_data_path):
logger.info(f'downloading {EDGE_DATA_URL}')
urlretrieve(EDGE_DATA_URL, edge_data_path)
transformed_features_path = os.path.join(directory, 'transformed-features.tsv.bz2')
if not os.path.exists(transformed_features_path):
logger.info(f'downloading {TRANSFORMED_FEATURES_URL}')
urlretrieve(TRANSFORMED_FEATURES_URL, transformed_features_path)
validate_data_path = os.path.join(directory, 'validation-statuses.tsv')
if not os.path.exists(validate_data_path):
logger.info(f'downloading {VALIDATE_DATA_URL}')
urlretrieve(VALIDATE_DATA_URL, validate_data_path)
symptomatic_data_path = os.path.join(directory, 'probabilities.tsv')
if not os.path.exists(symptomatic_data_path):
logger.info(f'downloading {SYMPTOMATIC_DATA_URL}')
urlretrieve(SYMPTOMATIC_DATA_URL, symptomatic_data_path)
repurpose_data_path = os.path.join(directory,'repurpose_overlap.json')
if not os.path.exists(repurpose_data_path):
logger.info(f'downloading {REPURPOSE_DATA_URL}')
urlretrieve(REPURPOSE_DATA_URL, repurpose_data_path)
repo_data_path = os.path.join(directory, 'repo_data.csv')
if not os.path.exists(repo_data_path):
logger.info(f'downloading {REPO_DATA_URL}')
urlretrieve(REPO_DATA_URL, repo_data_path)
permutation_directory = os.path.join(directory, "permutations")
os.makedirs(permutation_directory, exist_ok=True)
permutation_paths = []
for i in range(5):
permutation_data_path = os.path.join(permutation_directory, PERMUTATION_DATA_FILE_FMT.format(i + 1))
if not os.path.exists(permutation_data_path):
url = PERMUTATION_DATA_URL_FMT.format(i + 1)
logger.info(f'downloading {url}')
urlretrieve(url, permutation_data_path)
permutation_paths.append(permutation_data_path)
data_edge2vec_path = os.path.join(directory, 'data_edge2vec')
return DataPaths(
node_data_path=node_data_path,
edge_data_path=edge_data_path,
transformed_features_path=transformed_features_path,
validate_data_path=validate_data_path,
symptomatic_data_path=symptomatic_data_path,
permutation_paths=permutation_paths,
data_edge2vec_path=data_edge2vec_path,
repurpose_data_path = repurpose_data_path,
repo_data_path = repo_data_path
)
| 43.795082
| 125
| 0.764926
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional
from urllib.request import urlretrieve
logger = logging.getLogger(__name__)
HERE = os.path.abspath(os.path.dirname(__file__))
DEFAULT_DIRECTORY = os.path.abspath(os.path.join(HERE, os.pardir, os.pardir, 'data'))
DATA_DIRECTORY = os.environ.get('REPOSITIONING_COMPARISON_DIRECTORY', DEFAULT_DIRECTORY)
NODE_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/nodes.tsv'
EDGE_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/edges.sif.gz'
PERMUTATION1_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-1.json.bz2'
PERMUTATION2_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-2.json.bz2'
PERMUTATION3_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-3.json.bz2'
PERMUTATION4_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-4.json.bz2'
PERMUTATION5_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-5.json.bz2'
PERMUTATION_DATA_FILE_FMT = 'hetnet_perm-{}.json.bz2'
PERMUTATION_DATA_URL_FMT = 'https://raw.githubusercontent.com/dhimmel/integrate/master/data/permuted/hetnet_perm-{}.json.bz2'
TRANSFORMED_FEATURES_URL = 'https://github.com/dhimmel/learn/blob/master/prediction/features/features.tsv.bz2?raw=true'
VALIDATE_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/learn/master/validate/validation-statuses.tsv'
SYMPTOMATIC_DATA_URL = 'https://raw.githubusercontent.com/dhimmel/learn/master/prediction/predictions/probabilities.tsv'
REPURPOSE_DATA_URL = 'https://raw.githubusercontent.com/drugrelink/drugrelink/master/notebooks/repurpose_overlap.json'
REPO_DATA_URL = 'https://raw.githubusercontent.com/drugrelink/drugrelink/master/notebooks/repo_data.csv'
@dataclass
class DataPaths:
node_data_path: str
edge_data_path: str
transformed_features_path: str
validate_data_path: str
symptomatic_data_path: str
permutation_paths: List[str]
data_edge2vec_path: str
repurpose_data_path: str
repo_data_path: str
def get_data_paths(directory: Optional[str] = None) -> DataPaths:
if directory is None:
directory = DATA_DIRECTORY
os.makedirs(directory, exist_ok=True)
node_data_path = os.path.join(directory, 'nodes.tsv')
if not os.path.exists(node_data_path):
logger.info(f'downloading {NODE_DATA_URL}')
urlretrieve(NODE_DATA_URL, node_data_path)
edge_data_path = os.path.join(directory, 'edges.sif.gz')
if not os.path.exists(edge_data_path):
logger.info(f'downloading {EDGE_DATA_URL}')
urlretrieve(EDGE_DATA_URL, edge_data_path)
transformed_features_path = os.path.join(directory, 'transformed-features.tsv.bz2')
if not os.path.exists(transformed_features_path):
logger.info(f'downloading {TRANSFORMED_FEATURES_URL}')
urlretrieve(TRANSFORMED_FEATURES_URL, transformed_features_path)
validate_data_path = os.path.join(directory, 'validation-statuses.tsv')
if not os.path.exists(validate_data_path):
logger.info(f'downloading {VALIDATE_DATA_URL}')
urlretrieve(VALIDATE_DATA_URL, validate_data_path)
symptomatic_data_path = os.path.join(directory, 'probabilities.tsv')
if not os.path.exists(symptomatic_data_path):
logger.info(f'downloading {SYMPTOMATIC_DATA_URL}')
urlretrieve(SYMPTOMATIC_DATA_URL, symptomatic_data_path)
repurpose_data_path = os.path.join(directory,'repurpose_overlap.json')
if not os.path.exists(repurpose_data_path):
logger.info(f'downloading {REPURPOSE_DATA_URL}')
urlretrieve(REPURPOSE_DATA_URL, repurpose_data_path)
repo_data_path = os.path.join(directory, 'repo_data.csv')
if not os.path.exists(repo_data_path):
logger.info(f'downloading {REPO_DATA_URL}')
urlretrieve(REPO_DATA_URL, repo_data_path)
permutation_directory = os.path.join(directory, "permutations")
os.makedirs(permutation_directory, exist_ok=True)
permutation_paths = []
for i in range(5):
permutation_data_path = os.path.join(permutation_directory, PERMUTATION_DATA_FILE_FMT.format(i + 1))
if not os.path.exists(permutation_data_path):
url = PERMUTATION_DATA_URL_FMT.format(i + 1)
logger.info(f'downloading {url}')
urlretrieve(url, permutation_data_path)
permutation_paths.append(permutation_data_path)
data_edge2vec_path = os.path.join(directory, 'data_edge2vec')
return DataPaths(
node_data_path=node_data_path,
edge_data_path=edge_data_path,
transformed_features_path=transformed_features_path,
validate_data_path=validate_data_path,
symptomatic_data_path=symptomatic_data_path,
permutation_paths=permutation_paths,
data_edge2vec_path=data_edge2vec_path,
repurpose_data_path = repurpose_data_path,
repo_data_path = repo_data_path
)
| true
| true
|
f702df7d8ff5a627e903de5c57770336a7b23d38
| 9,024
|
py
|
Python
|
cinder/brick/initiator/linuxscsi.py
|
tmenjo/cinder-2015.1.1
|
1c83a5daa8041cb99bc85dd0301786d8ca43055a
|
[
"Apache-2.0"
] | null | null | null |
cinder/brick/initiator/linuxscsi.py
|
tmenjo/cinder-2015.1.1
|
1c83a5daa8041cb99bc85dd0301786d8ca43055a
|
[
"Apache-2.0"
] | null | null | null |
cinder/brick/initiator/linuxscsi.py
|
tmenjo/cinder-2015.1.1
|
1c83a5daa8041cb99bc85dd0301786d8ca43055a
|
[
"Apache-2.0"
] | null | null | null |
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic linux scsi subsystem and Multipath utilities.
Note, this is not iSCSI.
"""
import os
import re
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from cinder.brick import exception
from cinder.brick import executor
from cinder.i18n import _, _LW, _LE
from cinder.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
MULTIPATH_ERROR_REGEX = re.compile("\w{3} \d+ \d\d:\d\d:\d\d \|.*$")
MULTIPATH_WWID_REGEX = re.compile("\((?P<wwid>.+)\)")
class LinuxSCSI(executor.Executor):
def __init__(self, root_helper, execute=putils.execute,
*args, **kwargs):
super(LinuxSCSI, self).__init__(root_helper, execute,
*args, **kwargs)
def echo_scsi_command(self, path, content):
"""Used to echo strings to scsi subsystem."""
args = ["-a", path]
kwargs = dict(process_input=content,
run_as_root=True,
root_helper=self._root_helper)
self._execute('tee', *args, **kwargs)
def get_name_from_path(self, path):
"""Translates /dev/disk/by-path/ entry to /dev/sdX."""
name = os.path.realpath(path)
if name.startswith("/dev/"):
return name
else:
return None
def remove_scsi_device(self, device):
"""Removes a scsi device based upon /dev/sdX name."""
path = "/sys/block/%s/device/delete" % device.replace("/dev/", "")
if os.path.exists(path):
# flush any outstanding IO first
self.flush_device_io(device)
LOG.debug("Remove SCSI device(%s) with %s" % (device, path))
self.echo_scsi_command(path, "1")
def wait_for_volume_removal(self, volume_path):
"""This is used to ensure that volumes are gone."""
def _wait_for_volume_removal(volume_path):
LOG.debug("Waiting for SCSI mount point %s to be removed.",
volume_path)
if os.path.exists(volume_path):
if self.tries >= self.scan_attempts:
msg = _LE("Exceeded the number of attempts to detect "
"volume removal.")
LOG.error(msg)
raise exception.VolumePathNotRemoved(
volume_path=volume_path)
LOG.debug("%(path)s still exists, rescanning. Try number: "
"%(tries)s",
{'path': volume_path, 'tries': self.tries})
self.tries = self.tries + 1
else:
LOG.debug("SCSI mount point %s has been removed.", volume_path)
raise loopingcall.LoopingCallDone()
# Setup a loop here to give the kernel time
# to remove the volume from /dev/disk/by-path/
self.tries = 0
self.scan_attempts = 3
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_volume_removal, volume_path)
timer.start(interval=2).wait()
def get_device_info(self, device):
(out, _err) = self._execute('sg_scan', device, run_as_root=True,
root_helper=self._root_helper)
dev_info = {'device': device, 'host': None,
'channel': None, 'id': None, 'lun': None}
if out:
line = out.strip()
line = line.replace(device + ": ", "")
info = line.split(" ")
for item in info:
if '=' in item:
pair = item.split('=')
dev_info[pair[0]] = pair[1]
elif 'scsi' in item:
dev_info['host'] = item.replace('scsi', '')
return dev_info
def remove_multipath_device(self, multipath_name):
"""This removes LUNs associated with a multipath device
and the multipath device itself.
"""
LOG.debug("remove multipath device %s" % multipath_name)
mpath_dev = self.find_multipath_device(multipath_name)
if mpath_dev:
devices = mpath_dev['devices']
LOG.debug("multipath LUNs to remove %s" % devices)
for device in devices:
self.remove_scsi_device(device['device'])
self.flush_multipath_device(mpath_dev['id'])
def flush_device_io(self, device):
"""This is used to flush any remaining IO in the buffers."""
try:
LOG.debug("Flushing IO for device %s" % device)
self._execute('blockdev', '--flushbufs', device, run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
msg = _("Failed to flush IO buffers prior to removing"
" device: (%(code)s)") % {'code': exc.exit_code}
LOG.warn(msg)
def flush_multipath_device(self, device):
try:
LOG.debug("Flush multipath device %s" % device)
self._execute('multipath', '-f', device, run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
def flush_multipath_devices(self):
try:
self._execute('multipath', '-F', run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
def find_multipath_device(self, device):
"""Find a multipath device associated with a LUN device name.
device can be either a /dev/sdX entry or a multipath id.
"""
mdev = None
devices = []
out = None
try:
(out, _err) = self._execute('multipath', '-l', device,
run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
return None
if out:
lines = out.strip()
lines = lines.split("\n")
lines = [line for line in lines
if not re.match(MULTIPATH_ERROR_REGEX, line)]
if lines:
# Use the device name, be it the WWID, mpathN or custom alias
# of a device to build the device path. This should be the
# first item on the first line of output from `multipath -l
# ${path}` or `multipath -l ${wwid}`..
mdev_name = lines[0].split(" ")[0]
mdev = '/dev/mapper/%s' % mdev_name
# Find the WWID for the LUN if we are using mpathN or aliases.
wwid_search = MULTIPATH_WWID_REGEX.search(lines[0])
if wwid_search is not None:
mdev_id = wwid_search.group('wwid')
else:
mdev_id = mdev_name
# Confirm that the device is present.
try:
os.stat(mdev)
except OSError:
LOG.warn(_LW("Couldn't find multipath device %s"), mdev)
return None
LOG.debug("Found multipath device = %(mdev)s"
% {'mdev': mdev})
device_lines = lines[3:]
for dev_line in device_lines:
if dev_line.find("policy") != -1:
continue
dev_line = dev_line.lstrip(' |-`')
dev_info = dev_line.split()
address = dev_info[0].split(":")
dev = {'device': '/dev/%s' % dev_info[1],
'host': address[0], 'channel': address[1],
'id': address[2], 'lun': address[3]
}
devices.append(dev)
if mdev is not None:
info = {"device": mdev,
"id": mdev_id,
"name": mdev_name,
"devices": devices}
return info
return None
| 38.729614
| 79
| 0.542886
|
import os
import re
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from cinder.brick import exception
from cinder.brick import executor
from cinder.i18n import _, _LW, _LE
from cinder.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
MULTIPATH_ERROR_REGEX = re.compile("\w{3} \d+ \d\d:\d\d:\d\d \|.*$")
MULTIPATH_WWID_REGEX = re.compile("\((?P<wwid>.+)\)")
class LinuxSCSI(executor.Executor):
def __init__(self, root_helper, execute=putils.execute,
*args, **kwargs):
super(LinuxSCSI, self).__init__(root_helper, execute,
*args, **kwargs)
def echo_scsi_command(self, path, content):
args = ["-a", path]
kwargs = dict(process_input=content,
run_as_root=True,
root_helper=self._root_helper)
self._execute('tee', *args, **kwargs)
def get_name_from_path(self, path):
name = os.path.realpath(path)
if name.startswith("/dev/"):
return name
else:
return None
def remove_scsi_device(self, device):
path = "/sys/block/%s/device/delete" % device.replace("/dev/", "")
if os.path.exists(path):
self.flush_device_io(device)
LOG.debug("Remove SCSI device(%s) with %s" % (device, path))
self.echo_scsi_command(path, "1")
def wait_for_volume_removal(self, volume_path):
def _wait_for_volume_removal(volume_path):
LOG.debug("Waiting for SCSI mount point %s to be removed.",
volume_path)
if os.path.exists(volume_path):
if self.tries >= self.scan_attempts:
msg = _LE("Exceeded the number of attempts to detect "
"volume removal.")
LOG.error(msg)
raise exception.VolumePathNotRemoved(
volume_path=volume_path)
LOG.debug("%(path)s still exists, rescanning. Try number: "
"%(tries)s",
{'path': volume_path, 'tries': self.tries})
self.tries = self.tries + 1
else:
LOG.debug("SCSI mount point %s has been removed.", volume_path)
raise loopingcall.LoopingCallDone()
self.tries = 0
self.scan_attempts = 3
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_volume_removal, volume_path)
timer.start(interval=2).wait()
def get_device_info(self, device):
(out, _err) = self._execute('sg_scan', device, run_as_root=True,
root_helper=self._root_helper)
dev_info = {'device': device, 'host': None,
'channel': None, 'id': None, 'lun': None}
if out:
line = out.strip()
line = line.replace(device + ": ", "")
info = line.split(" ")
for item in info:
if '=' in item:
pair = item.split('=')
dev_info[pair[0]] = pair[1]
elif 'scsi' in item:
dev_info['host'] = item.replace('scsi', '')
return dev_info
def remove_multipath_device(self, multipath_name):
LOG.debug("remove multipath device %s" % multipath_name)
mpath_dev = self.find_multipath_device(multipath_name)
if mpath_dev:
devices = mpath_dev['devices']
LOG.debug("multipath LUNs to remove %s" % devices)
for device in devices:
self.remove_scsi_device(device['device'])
self.flush_multipath_device(mpath_dev['id'])
def flush_device_io(self, device):
try:
LOG.debug("Flushing IO for device %s" % device)
self._execute('blockdev', '--flushbufs', device, run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
msg = _("Failed to flush IO buffers prior to removing"
" device: (%(code)s)") % {'code': exc.exit_code}
LOG.warn(msg)
def flush_multipath_device(self, device):
try:
LOG.debug("Flush multipath device %s" % device)
self._execute('multipath', '-f', device, run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
def flush_multipath_devices(self):
try:
self._execute('multipath', '-F', run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
def find_multipath_device(self, device):
mdev = None
devices = []
out = None
try:
(out, _err) = self._execute('multipath', '-l', device,
run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
return None
if out:
lines = out.strip()
lines = lines.split("\n")
lines = [line for line in lines
if not re.match(MULTIPATH_ERROR_REGEX, line)]
if lines:
mdev_name = lines[0].split(" ")[0]
mdev = '/dev/mapper/%s' % mdev_name
wwid_search = MULTIPATH_WWID_REGEX.search(lines[0])
if wwid_search is not None:
mdev_id = wwid_search.group('wwid')
else:
mdev_id = mdev_name
try:
os.stat(mdev)
except OSError:
LOG.warn(_LW("Couldn't find multipath device %s"), mdev)
return None
LOG.debug("Found multipath device = %(mdev)s"
% {'mdev': mdev})
device_lines = lines[3:]
for dev_line in device_lines:
if dev_line.find("policy") != -1:
continue
dev_line = dev_line.lstrip(' |-`')
dev_info = dev_line.split()
address = dev_info[0].split(":")
dev = {'device': '/dev/%s' % dev_info[1],
'host': address[0], 'channel': address[1],
'id': address[2], 'lun': address[3]
}
devices.append(dev)
if mdev is not None:
info = {"device": mdev,
"id": mdev_id,
"name": mdev_name,
"devices": devices}
return info
return None
| true
| true
|
f702e1901db433d3ed3c73f09a3ca0003e4a5499
| 4,869
|
py
|
Python
|
src/proposals/tests/views/test_cancel.py
|
peihsuan/pycon.tw
|
4d75e629295b3eef92eff78b3604ab034bd406b0
|
[
"MIT"
] | null | null | null |
src/proposals/tests/views/test_cancel.py
|
peihsuan/pycon.tw
|
4d75e629295b3eef92eff78b3604ab034bd406b0
|
[
"MIT"
] | null | null | null |
src/proposals/tests/views/test_cancel.py
|
peihsuan/pycon.tw
|
4d75e629295b3eef92eff78b3604ab034bd406b0
|
[
"MIT"
] | null | null | null |
import pytest
from django.conf import settings
from django.contrib import messages
from proposals.models import TalkProposal, TutorialProposal
pytestmark = pytest.mark.skipif(
not settings.PROPOSALS_WITHDRAWABLE,
reason='proposal withdrawal disabled',
)
def test_talk_proposal_cancel_login(client):
response = client.get('/en-us/proposals/talk/42/cancel/', follow=True)
assert response.redirect_chain == [
('/en-us/accounts/login/?next=/en-us/proposals/talk/42/cancel/', 302),
]
def test_tutorial_proposal_cancel_login(client):
response = client.get('/en-us/proposals/tutorial/42/cancel/', follow=True)
assert response.redirect_chain == [
('/en-us/accounts/login/?next=/en-us/proposals/tutorial/42/cancel/',
302),
]
@pytest.mark.parametrize('method', ['get', 'post'])
def test_talk_proposal_cancel_denied(bare_user_client, method):
response = getattr(bare_user_client, method)(
'/en-us/proposals/talk/42/cancel/',
)
assert response.status_code == 403
@pytest.mark.parametrize('method', ['get', 'post'])
def test_tutorial_proposal_cancel_denied(bare_user_client, method):
response = getattr(bare_user_client, method)(
'/en-us/proposals/tutorial/42/cancel/',
)
assert response.status_code == 403
def test_talk_proposal_cancel_get(agreed_user_client, talk_proposal):
"""The cancel view should not allow GET, only POST.
"""
response = agreed_user_client.get('/en-us/proposals/talk/42/cancel/')
assert response.status_code == 405
def test_tutorial_proposal_cancel_get(agreed_user_client, tutorial_proposal):
"""The cancel view should not allow GET, only POST.
"""
response = agreed_user_client.get('/en-us/proposals/tutorial/42/cancel/')
assert response.status_code == 405
def test_talk_proposal_cancel_not_owned(another_agreed_user_client, talk_proposal):
response = another_agreed_user_client.post('/en-us/proposals/talk/42/cancel/')
assert response.status_code == 404
def test_tutorial_proposal_cancel_not_owned(
another_agreed_user_client, tutorial_proposal):
response = another_agreed_user_client.post('/en-us/proposals/tutorial/42/cancel/')
assert response.status_code == 404
def test_talk_proposal_cancel(agreed_user_client, talk_proposal):
assert not talk_proposal.cancelled
response = agreed_user_client.post('/en-us/proposals/talk/42/cancel/', {
'cancelled': True,
}, follow=True)
assert response.redirect_chain == [('/en-us/dashboard/', 302)], (
response.context['form'].errors
)
assert TalkProposal.objects.get(pk=42).cancelled
msgs = [(m.level, m.message) for m in response.context['messages']]
assert msgs == [
(messages.INFO,
'Talk proposal '
'<strong>Beyond the Style Guides<br></strong> withdrawn.'),
]
def test_talk_proposal_reactivate(agreed_user_client, cancelled_talk_proposal):
assert cancelled_talk_proposal.cancelled
response = agreed_user_client.post('/en-us/proposals/talk/42/cancel/', {
'cancelled': '',
}, follow=True)
assert response.redirect_chain == [('/en-us/dashboard/', 302)], (
response.context['form'].errors
)
assert not TalkProposal.objects.get(pk=42).cancelled
msgs = [(m.level, m.message) for m in response.context['messages']]
assert msgs == [
(messages.SUCCESS,
'Talk proposal '
'<strong>Beyond the Style Guides<br></strong> reactivated.'),
]
def test_tutorial_proposal_cancel(agreed_user_client, tutorial_proposal):
assert not tutorial_proposal.cancelled
response = agreed_user_client.post('/en-us/proposals/tutorial/42/cancel/', {
'cancelled': True,
}, follow=True)
assert response.redirect_chain == [('/en-us/dashboard/', 302)], (
response.context['form'].errors
)
assert TutorialProposal.objects.get(pk=42).cancelled
msgs = [(m.level, m.message) for m in response.context['messages']]
assert msgs == [
(messages.INFO,
'Tutorial proposal '
'<strong>Beyond the Style Guides<br></strong> withdrawn.'),
]
def test_tutorial_proposal_reactivate(
agreed_user_client, cancelled_tutorial_proposal):
assert cancelled_tutorial_proposal.cancelled
response = agreed_user_client.post('/en-us/proposals/tutorial/42/cancel/', {
'cancelled': '',
}, follow=True)
assert response.redirect_chain == [('/en-us/dashboard/', 302)], (
response.context['form'].errors
)
assert not TutorialProposal.objects.get(pk=42).cancelled
msgs = [(m.level, m.message) for m in response.context['messages']]
assert msgs == [
(messages.SUCCESS,
'Tutorial proposal '
'<strong>Beyond the Style Guides<br></strong> reactivated.'),
]
| 32.46
| 86
| 0.694804
|
import pytest
from django.conf import settings
from django.contrib import messages
from proposals.models import TalkProposal, TutorialProposal
pytestmark = pytest.mark.skipif(
not settings.PROPOSALS_WITHDRAWABLE,
reason='proposal withdrawal disabled',
)
def test_talk_proposal_cancel_login(client):
response = client.get('/en-us/proposals/talk/42/cancel/', follow=True)
assert response.redirect_chain == [
('/en-us/accounts/login/?next=/en-us/proposals/talk/42/cancel/', 302),
]
def test_tutorial_proposal_cancel_login(client):
response = client.get('/en-us/proposals/tutorial/42/cancel/', follow=True)
assert response.redirect_chain == [
('/en-us/accounts/login/?next=/en-us/proposals/tutorial/42/cancel/',
302),
]
@pytest.mark.parametrize('method', ['get', 'post'])
def test_talk_proposal_cancel_denied(bare_user_client, method):
response = getattr(bare_user_client, method)(
'/en-us/proposals/talk/42/cancel/',
)
assert response.status_code == 403
@pytest.mark.parametrize('method', ['get', 'post'])
def test_tutorial_proposal_cancel_denied(bare_user_client, method):
response = getattr(bare_user_client, method)(
'/en-us/proposals/tutorial/42/cancel/',
)
assert response.status_code == 403
def test_talk_proposal_cancel_get(agreed_user_client, talk_proposal):
response = agreed_user_client.get('/en-us/proposals/talk/42/cancel/')
assert response.status_code == 405
def test_tutorial_proposal_cancel_get(agreed_user_client, tutorial_proposal):
response = agreed_user_client.get('/en-us/proposals/tutorial/42/cancel/')
assert response.status_code == 405
def test_talk_proposal_cancel_not_owned(another_agreed_user_client, talk_proposal):
response = another_agreed_user_client.post('/en-us/proposals/talk/42/cancel/')
assert response.status_code == 404
def test_tutorial_proposal_cancel_not_owned(
another_agreed_user_client, tutorial_proposal):
response = another_agreed_user_client.post('/en-us/proposals/tutorial/42/cancel/')
assert response.status_code == 404
def test_talk_proposal_cancel(agreed_user_client, talk_proposal):
assert not talk_proposal.cancelled
response = agreed_user_client.post('/en-us/proposals/talk/42/cancel/', {
'cancelled': True,
}, follow=True)
assert response.redirect_chain == [('/en-us/dashboard/', 302)], (
response.context['form'].errors
)
assert TalkProposal.objects.get(pk=42).cancelled
msgs = [(m.level, m.message) for m in response.context['messages']]
assert msgs == [
(messages.INFO,
'Talk proposal '
'<strong>Beyond the Style Guides<br></strong> withdrawn.'),
]
def test_talk_proposal_reactivate(agreed_user_client, cancelled_talk_proposal):
assert cancelled_talk_proposal.cancelled
response = agreed_user_client.post('/en-us/proposals/talk/42/cancel/', {
'cancelled': '',
}, follow=True)
assert response.redirect_chain == [('/en-us/dashboard/', 302)], (
response.context['form'].errors
)
assert not TalkProposal.objects.get(pk=42).cancelled
msgs = [(m.level, m.message) for m in response.context['messages']]
assert msgs == [
(messages.SUCCESS,
'Talk proposal '
'<strong>Beyond the Style Guides<br></strong> reactivated.'),
]
def test_tutorial_proposal_cancel(agreed_user_client, tutorial_proposal):
assert not tutorial_proposal.cancelled
response = agreed_user_client.post('/en-us/proposals/tutorial/42/cancel/', {
'cancelled': True,
}, follow=True)
assert response.redirect_chain == [('/en-us/dashboard/', 302)], (
response.context['form'].errors
)
assert TutorialProposal.objects.get(pk=42).cancelled
msgs = [(m.level, m.message) for m in response.context['messages']]
assert msgs == [
(messages.INFO,
'Tutorial proposal '
'<strong>Beyond the Style Guides<br></strong> withdrawn.'),
]
def test_tutorial_proposal_reactivate(
agreed_user_client, cancelled_tutorial_proposal):
assert cancelled_tutorial_proposal.cancelled
response = agreed_user_client.post('/en-us/proposals/tutorial/42/cancel/', {
'cancelled': '',
}, follow=True)
assert response.redirect_chain == [('/en-us/dashboard/', 302)], (
response.context['form'].errors
)
assert not TutorialProposal.objects.get(pk=42).cancelled
msgs = [(m.level, m.message) for m in response.context['messages']]
assert msgs == [
(messages.SUCCESS,
'Tutorial proposal '
'<strong>Beyond the Style Guides<br></strong> reactivated.'),
]
| true
| true
|
f702e3fa5d22d565f73b30bff7f3d1d5d90b28ab
| 10,523
|
py
|
Python
|
moviepy/video/tools/drawing.py
|
andriyor/moviepy
|
8eaf3f02c5cf812e89f03e925cb2fa5e05b8d29a
|
[
"MIT"
] | 8,558
|
2015-01-03T05:14:12.000Z
|
2022-03-31T21:45:38.000Z
|
moviepy/video/tools/drawing.py
|
andriyor/moviepy
|
8eaf3f02c5cf812e89f03e925cb2fa5e05b8d29a
|
[
"MIT"
] | 1,592
|
2015-01-02T22:12:54.000Z
|
2022-03-30T13:10:40.000Z
|
moviepy/video/tools/drawing.py
|
andriyor/moviepy
|
8eaf3f02c5cf812e89f03e925cb2fa5e05b8d29a
|
[
"MIT"
] | 1,332
|
2015-01-02T18:01:53.000Z
|
2022-03-31T22:47:28.000Z
|
"""Deals with making images (np arrays). It provides drawing
methods that are difficult to do with the existing Python libraries.
"""
import numpy as np
def blit(im1, im2, pos=None, mask=None):
"""Blit an image over another.
Blits ``im1`` on ``im2`` as position ``pos=(x,y)``, using the
``mask`` if provided.
"""
if pos is None:
pos = (0, 0) # pragma: no cover
else:
# Cast to tuple in case pos is not subscriptable.
pos = tuple(pos)
im2.paste(im1, pos, mask)
return im2
def color_gradient(
size,
p1,
p2=None,
vector=None,
radius=None,
color_1=0.0,
color_2=1.0,
shape="linear",
offset=0,
):
"""Draw a linear, bilinear, or radial gradient.
The result is a picture of size ``size``, whose color varies
gradually from color `color_1` in position ``p1`` to color ``color_2``
in position ``p2``.
If it is a RGB picture the result must be transformed into
a 'uint8' array to be displayed normally:
Parameters
----------
size : tuple or list
Size (width, height) in pixels of the final image array.
p1 : tuple or list
Position for the first coordinate of the gradient in pixels (x, y).
The color 'before' ``p1`` is ``color_1`` and it gradually changes in
the direction of ``p2`` until it is ``color_2`` when it reaches ``p2``.
p2 : tuple or list, optional
Position for the second coordinate of the gradient in pixels (x, y).
Coordinates (x, y) of the limit point for ``color_1``
and ``color_2``.
vector : tuple or list, optional
A vector (x, y) in pixels that can be provided instead of ``p2``.
``p2`` is then defined as (p1 + vector).
color_1 : tuple or list, optional
Starting color for the gradient. As default, black. Either floats
between 0 and 1 (for gradients used in masks) or [R, G, B] arrays
(for colored gradients).
color_2 : tuple or list, optional
Color for the second point in the gradient. As default, white. Either
floats between 0 and 1 (for gradients used in masks) or [R, G, B]
arrays (for colored gradients).
shape : str, optional
Shape of the gradient. Can be either ``"linear"``, ``"bilinear"`` or
``"circular"``. In a linear gradient the color varies in one direction,
from point ``p1`` to point ``p2``. In a bilinear gradient it also
varies symmetrically from ``p1`` in the other direction. In a circular
gradient it goes from ``color_1`` to ``color_2`` in all directions.
radius : float, optional
If ``shape="radial"``, the radius of the gradient is defined with the
parameter ``radius``, in pixels.
offset : float, optional
Real number between 0 and 1 indicating the fraction of the vector
at which the gradient actually starts. For instance if ``offset``
is 0.9 in a gradient going from p1 to p2, then the gradient will
only occur near p2 (before that everything is of color ``color_1``)
If the offset is 0.9 in a radial gradient, the gradient will
occur in the region located between 90% and 100% of the radius,
this creates a blurry disc of radius ``d(p1, p2)``.
Returns
-------
image
An Numpy array of dimensions (width, height, n_colors) of type float
representing the image of the gradient.
Examples
--------
>>> color_gradient((10, 1), (0, 0), p2=(10, 0)) # from white to black
[[1. 0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1]]
>>>
>>> color_gradient( # from red to green
... (10, 1), # size
... (0, 0), # p1
... p2=(10, 0),
... color_1=(255, 0, 0), # red
... color_2=(0, 255, 0), # green
... )
[[[ 0. 255. 0. ]
[ 25.5 229.5 0. ]
[ 51. 204. 0. ]
[ 76.5 178.5 0. ]
[102. 153. 0. ]
[127.5 127.5 0. ]
[153. 102. 0. ]
[178.5 76.5 0. ]
[204. 51. 0. ]
[229.5 25.5 0. ]]]
"""
# np-arrayize and change x,y coordinates to y,x
w, h = size
color_1 = np.array(color_1).astype(float)
color_2 = np.array(color_2).astype(float)
if shape == "bilinear":
if vector is None:
if p2 is None:
raise ValueError("You must provide either 'p2' or 'vector'")
vector = np.array(p2) - np.array(p1)
m1, m2 = [
color_gradient(
size,
p1,
vector=v,
color_1=1.0,
color_2=0.0,
shape="linear",
offset=offset,
)
for v in [vector, [-v for v in vector]]
]
arr = np.maximum(m1, m2)
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return arr * color_1 + (1 - arr) * color_2
p1 = np.array(p1[::-1]).astype(float)
M = np.dstack(np.meshgrid(range(w), range(h))[::-1]).astype(float)
if shape == "linear":
if vector is None:
if p2 is not None:
vector = np.array(p2[::-1]) - p1
else:
raise ValueError("You must provide either 'p2' or 'vector'")
else:
vector = np.array(vector[::-1])
norm = np.linalg.norm(vector)
n_vec = vector / norm ** 2 # norm 1/norm(vector)
p1 = p1 + offset * vector
arr = (M - p1).dot(n_vec) / (1 - offset)
arr = np.minimum(1, np.maximum(0, arr))
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return arr * color_1 + (1 - arr) * color_2
elif shape == "radial":
if (radius or 0) == 0:
arr = np.ones((h, w))
else:
arr = (np.sqrt(((M - p1) ** 2).sum(axis=2))) - offset * radius
arr = arr / ((1 - offset) * radius)
arr = np.minimum(1.0, np.maximum(0, arr))
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return (1 - arr) * color_1 + arr * color_2
raise ValueError("Invalid shape, should be either 'radial', 'linear' or 'bilinear'")
def color_split(
size,
x=None,
y=None,
p1=None,
p2=None,
vector=None,
color_1=0,
color_2=1.0,
gradient_width=0,
):
"""Make an image split in 2 colored regions.
Returns an array of size ``size`` divided in two regions called 1 and
2 in what follows, and which will have colors color_1 and color_2
respectively.
Parameters
----------
x : int, optional
If provided, the image is split horizontally in x, the left
region being region 1.
y : int, optional
If provided, the image is split vertically in y, the top region
being region 1.
p1, p2: tuple or list, optional
Positions (x1, y1), (x2, y2) in pixels, where the numbers can be
floats. Region 1 is defined as the whole region on the left when
going from ``p1`` to ``p2``.
p1, vector: tuple or list, optional
``p1`` is (x1,y1) and vector (v1,v2), where the numbers can be
floats. Region 1 is then the region on the left when starting
in position ``p1`` and going in the direction given by ``vector``.
gradient_width : float, optional
If not zero, the split is not sharp, but gradual over a region of
width ``gradient_width`` (in pixels). This is preferable in many
situations (for instance for antialiasing).
Examples
--------
>>> size = [200, 200]
>>>
>>> # an image with all pixels with x<50 =0, the others =1
>>> color_split(size, x=50, color_1=0, color_2=1)
>>>
>>> # an image with all pixels with y<50 red, the others green
>>> color_split(size, x=50, color_1=[255, 0, 0], color_2=[0, 255, 0])
>>>
>>> # An image split along an arbitrary line (see below)
>>> color_split(size, p1=[20, 50], p2=[25, 70] color_1=0, color_2=1)
"""
if gradient_width or ((x is None) and (y is None)):
if p2 is not None:
vector = np.array(p2) - np.array(p1)
elif x is not None:
vector = np.array([0, -1.0])
p1 = np.array([x, 0])
elif y is not None:
vector = np.array([1.0, 0.0])
p1 = np.array([0, y])
x, y = vector
vector = np.array([y, -x]).astype("float")
norm = np.linalg.norm(vector)
vector = max(0.1, gradient_width) * vector / norm
return color_gradient(
size, p1, vector=vector, color_1=color_1, color_2=color_2, shape="linear"
)
else:
w, h = size
shape = (h, w) if np.isscalar(color_1) else (h, w, len(color_1))
arr = np.zeros(shape)
if x:
arr[:, :x] = color_1
arr[:, x:] = color_2
elif y:
arr[:y] = color_1
arr[y:] = color_2
return arr
def circle(screensize, center, radius, color=1.0, bg_color=0, blur=1):
"""Draw an image with a circle.
Draws a circle of color ``color``, on a background of color ``bg_color``,
on a screen of size ``screensize`` at the position ``center=(x, y)``,
with a radius ``radius`` but slightly blurred on the border by ``blur``
pixels.
Parameters
----------
screensize : tuple or list
Size of the canvas.
center : tuple or list
Center of the circle.
radius : float
Radius of the circle, in pixels.
bg_color : tuple or float, optional
Color for the background of the canvas. As default, black.
blur : float, optional
Blur for the border of the circle.
Examples
--------
>>> from moviepy.video.tools.drawing import circle
>>>
>>> circle(
... (5, 5), # size
... (2, 2), # center
... 2, # radius
... )
array([[0. , 0. , 0. , 0. , 0. ],
[0. , 0.58578644, 1. , 0.58578644, 0. ],
[0. , 1. , 1. , 1. , 0. ],
[0. , 0.58578644, 1. , 0.58578644, 0. ],
[0. , 0. , 0. , 0. , 0. ]])
"""
offset = 1.0 * (radius - blur) / radius if radius else 0
return color_gradient(
screensize,
p1=center,
radius=radius,
color_1=color,
color_2=bg_color,
shape="radial",
offset=offset,
)
| 31.887879
| 88
| 0.5411
|
import numpy as np
def blit(im1, im2, pos=None, mask=None):
if pos is None:
pos = (0, 0) else:
pos = tuple(pos)
im2.paste(im1, pos, mask)
return im2
def color_gradient(
size,
p1,
p2=None,
vector=None,
radius=None,
color_1=0.0,
color_2=1.0,
shape="linear",
offset=0,
):
w, h = size
color_1 = np.array(color_1).astype(float)
color_2 = np.array(color_2).astype(float)
if shape == "bilinear":
if vector is None:
if p2 is None:
raise ValueError("You must provide either 'p2' or 'vector'")
vector = np.array(p2) - np.array(p1)
m1, m2 = [
color_gradient(
size,
p1,
vector=v,
color_1=1.0,
color_2=0.0,
shape="linear",
offset=offset,
)
for v in [vector, [-v for v in vector]]
]
arr = np.maximum(m1, m2)
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return arr * color_1 + (1 - arr) * color_2
p1 = np.array(p1[::-1]).astype(float)
M = np.dstack(np.meshgrid(range(w), range(h))[::-1]).astype(float)
if shape == "linear":
if vector is None:
if p2 is not None:
vector = np.array(p2[::-1]) - p1
else:
raise ValueError("You must provide either 'p2' or 'vector'")
else:
vector = np.array(vector[::-1])
norm = np.linalg.norm(vector)
n_vec = vector / norm ** 2
p1 = p1 + offset * vector
arr = (M - p1).dot(n_vec) / (1 - offset)
arr = np.minimum(1, np.maximum(0, arr))
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return arr * color_1 + (1 - arr) * color_2
elif shape == "radial":
if (radius or 0) == 0:
arr = np.ones((h, w))
else:
arr = (np.sqrt(((M - p1) ** 2).sum(axis=2))) - offset * radius
arr = arr / ((1 - offset) * radius)
arr = np.minimum(1.0, np.maximum(0, arr))
if color_1.size > 1:
arr = np.dstack(3 * [arr])
return (1 - arr) * color_1 + arr * color_2
raise ValueError("Invalid shape, should be either 'radial', 'linear' or 'bilinear'")
def color_split(
size,
x=None,
y=None,
p1=None,
p2=None,
vector=None,
color_1=0,
color_2=1.0,
gradient_width=0,
):
if gradient_width or ((x is None) and (y is None)):
if p2 is not None:
vector = np.array(p2) - np.array(p1)
elif x is not None:
vector = np.array([0, -1.0])
p1 = np.array([x, 0])
elif y is not None:
vector = np.array([1.0, 0.0])
p1 = np.array([0, y])
x, y = vector
vector = np.array([y, -x]).astype("float")
norm = np.linalg.norm(vector)
vector = max(0.1, gradient_width) * vector / norm
return color_gradient(
size, p1, vector=vector, color_1=color_1, color_2=color_2, shape="linear"
)
else:
w, h = size
shape = (h, w) if np.isscalar(color_1) else (h, w, len(color_1))
arr = np.zeros(shape)
if x:
arr[:, :x] = color_1
arr[:, x:] = color_2
elif y:
arr[:y] = color_1
arr[y:] = color_2
return arr
def circle(screensize, center, radius, color=1.0, bg_color=0, blur=1):
offset = 1.0 * (radius - blur) / radius if radius else 0
return color_gradient(
screensize,
p1=center,
radius=radius,
color_1=color,
color_2=bg_color,
shape="radial",
offset=offset,
)
| true
| true
|
f702e59535b5c977cc3845ab265467cf5b3c87b7
| 917
|
py
|
Python
|
python/packages/pybind_nisar/products/readers/SLC/SLC.py
|
piyushrpt/isce3
|
1741af321470cb5939693459765d11a19c5c6fc2
|
[
"Apache-2.0"
] | null | null | null |
python/packages/pybind_nisar/products/readers/SLC/SLC.py
|
piyushrpt/isce3
|
1741af321470cb5939693459765d11a19c5c6fc2
|
[
"Apache-2.0"
] | null | null | null |
python/packages/pybind_nisar/products/readers/SLC/SLC.py
|
piyushrpt/isce3
|
1741af321470cb5939693459765d11a19c5c6fc2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import h5py
import pyre
from ..Base import Base
from .Identification import Identification
class SLC(Base, family='nisar.productreader.slc'):
'''
Class for parsing NISAR SLC products into isce structures.
'''
productValidationType = pyre.properties.str(default='SLC')
productValidationType.doc = 'Validation tag to ensure correct product type'
def __init__(self, **kwds):
'''
Constructor to initialize product with HDF5 file.
'''
###Read base product information like Identification
super().__init__(**kwds)
def populateIdentification(self):
'''
Read in the Identification information and assert identity.
'''
with h5py.File(self.filename, 'r', libver='latest', swmr=True) as f:
h5grp = f[self.IdentificationPath]
self.identification = Identification(h5grp)
| 28.65625
| 79
| 0.654308
|
import h5py
import pyre
from ..Base import Base
from .Identification import Identification
class SLC(Base, family='nisar.productreader.slc'):
productValidationType = pyre.properties.str(default='SLC')
productValidationType.doc = 'Validation tag to ensure correct product type'
def __init__(self, **kwds):
super().__init__(**kwds)
def populateIdentification(self):
with h5py.File(self.filename, 'r', libver='latest', swmr=True) as f:
h5grp = f[self.IdentificationPath]
self.identification = Identification(h5grp)
| true
| true
|
f702e79b9d3eac0b82b41061dc06802d153a2b1f
| 2,550
|
py
|
Python
|
doc/source/conf.py
|
mail2nsrajesh/tacker
|
dce6690659836c2885f1cf8227c19be234f8fe25
|
[
"Apache-2.0"
] | null | null | null |
doc/source/conf.py
|
mail2nsrajesh/tacker
|
dce6690659836c2885f1cf8227c19be234f8fe25
|
[
"Apache-2.0"
] | null | null | null |
doc/source/conf.py
|
mail2nsrajesh/tacker
|
dce6690659836c2885f1cf8227c19be234f8fe25
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'stevedore.sphinxext',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tacker'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['tacker.']
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
| 32.278481
| 79
| 0.701176
|
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
extensions = [
'sphinx.ext.autodoc',
'stevedore.sphinxext',
'oslosphinx'
]
source_suffix = '.rst'
master_doc = 'index'
project = u'tacker'
copyright = u'2013, OpenStack Foundation'
add_function_parentheses = True
add_module_names = True
pygments_style = 'sphinx'
modindex_common_prefix = ['tacker.']
htmlhelp_basename = '%sdoc' % project
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
| true
| true
|
f702e920c7085107291c4505fefbd5abbfa57472
| 13,942
|
py
|
Python
|
train_svdnet_xent.py
|
hsfzxjy/svdnet-pytorch
|
8f485d0b162c23b20449f7ee80c955e0b20950ae
|
[
"MIT"
] | 12
|
2019-04-14T06:44:35.000Z
|
2022-01-15T13:19:59.000Z
|
train_svdnet_xent.py
|
hsfzxjy/svdnet-pytorch
|
8f485d0b162c23b20449f7ee80c955e0b20950ae
|
[
"MIT"
] | 2
|
2019-06-28T07:18:43.000Z
|
2020-09-18T07:02:31.000Z
|
train_svdnet_xent.py
|
hsfzxjy/svdnet-pytorch
|
8f485d0b162c23b20449f7ee80c955e0b20950ae
|
[
"MIT"
] | 1
|
2021-03-30T13:31:22.000Z
|
2021-03-30T13:31:22.000Z
|
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import datetime
import os.path as osp
import numpy as np
import warnings
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from args import argument_parser, image_dataset_kwargs, optimizer_kwargs, lr_scheduler_kwargs
from torchreid.data_manager import ImageDataManager
from torchreid import models
from torchreid.losses import CrossEntropyLoss, DeepSupervision
from torchreid.utils.iotools import check_isfile
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.loggers import Logger, RankLogger
from torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers, accuracy, \
load_pretrained_weights, save_checkpoint, resume_from_checkpoint
from torchreid.utils.reidtools import visualize_ranked_results
from torchreid.utils.generaltools import set_random_seed
from torchreid.eval_metrics import evaluate
from torchreid.optimizers import init_optimizer
from torchreid.lr_schedulers import init_lr_scheduler
os.environ['TORCH_HOME'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '.torch'))
testloader_dict = trainloader = criterion = None
use_gpu = False
# global variables
parser = argument_parser()
args = parser.parse_args()
def corr_metric(W: 'K x N'):
G = W.permute(1, 0) @ W
return torch.trace(G) / abs(G).sum()
def replace_weight(layer):
with torch.no_grad():
# NECESSARY! The weight of Linear layer has been transposed!
A = layer.weight.t()
M, N = A.size()
M: 2048
N: 1024
U, S, V = torch.svd(A, some=False)
W = A @ V
W: '2048 x 1024 = M x N'
NW = torch.zeros_like(A)
for i in range(N):
curr_N = W.size(1)
W_norm = torch.norm(W, p=2, dim=0)
W_norm: 'curr_N'
index = i
vec_i = A[:, i]
vec_i_norm = torch.norm(vec_i)
co = (A[:, i].view(M, 1).t() @ W).view(curr_N)
co: 'curr_N'
co = co / vec_i_norm
absco = abs(co / W_norm)
maxco_index = torch.max(absco, 0)[1].item()
NW[:, index] = W[:, maxco_index] * torch.sign(co[maxco_index])
# Remove selected column vector from W
W = W[:, sorted({x for x in range(curr_N) if x != maxco_index})]
layer.weight.copy_(NW.t())
print(layer.weight)
return layer
def main():
global args, criterion, testloader_dict, trainloader, use_gpu
set_random_seed(args.seed)
if not args.use_avai_gpus:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu:
use_gpu = False
log_name = 'test.log' if args.evaluate else 'train.log'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print('==========\nArgs:{}\n=========='.format(args))
if use_gpu:
print('Currently using GPU {}'.format(args.gpu_devices))
cudnn.benchmark = True
else:
warnings.warn('Currently using CPU, however, GPU is highly recommended')
print('Initializing image data manager')
dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
trainloader, testloader_dict = dm.return_dataloaders()
print('Initializing model: {}'.format(args.arch))
model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent'}, pretrained=not args.no_pretrained, use_gpu=use_gpu)
print('Model size: {:.3f} M'.format(count_num_param(model)))
if args.load_weights and check_isfile(args.load_weights):
load_pretrained_weights(model, args.load_weights)
model = nn.DataParallel(model).cuda() if use_gpu else model
criterion = CrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth)
if args.resume and check_isfile(args.resume):
args.start_epoch = resume_from_checkpoint(args.resume, model, optimizer=None)
resumed = True
else:
resumed = False
if args.evaluate:
print('Evaluate only')
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
distmat = test(model, queryloader, galleryloader, use_gpu, return_distmat=True)
if args.visualize_ranks:
visualize_ranked_results(
distmat, dm.return_testdataset_by_name(name),
save_dir=osp.join(args.save_dir, 'ranked_results', name),
topk=20
)
return
time_start = time.time()
# ranklogger = RankLogger(args.source_names, args.target_names)
print('=> Start training')
if not resumed:
train_base(model)
train_RRI(model, 7)
elapsed = round(time.time() - time_start)
elapsed = str(datetime.timedelta(seconds=elapsed))
print('Elapsed {}'.format(elapsed))
# ranklogger.show_summary()
def train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=False):
losses = AverageMeter()
accs = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
model.train()
# if fixbase or args.always_fixbase:
# open_specified_layers(model, args.open_layers)
# else:
# open_all_layers(model)
end = time.time()
for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):
data_time.update(time.time() - end)
if use_gpu:
imgs, pids = imgs.cuda(), pids.cuda()
outputs = model(imgs)
loss = sum(criterion(x, pids) for x in outputs) / len(outputs)
# if isinstance(outputs, (tuple, list)):
# loss = DeepSupervision(criterion, outputs, pids)
# else:
# loss = criterion(outputs, pids)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
losses.update(loss.item(), pids.size(0))
accs.update(accuracy(outputs, pids)[0])
if (batch_idx + 1) % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format(
epoch + 1, batch_idx + 1, len(trainloader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
acc=accs
))
end = time.time()
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20], return_distmat=False):
batch_time = AverageMeter()
model.eval()
with torch.no_grad():
qf, q_pids, q_camids = [], [], []
for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
qf.append(features)
q_pids.extend(pids)
q_camids.extend(camids)
qf = torch.cat(qf, 0)
q_pids = np.asarray(q_pids)
q_camids = np.asarray(q_camids)
print('Extracted features for query set, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))
gf, g_pids, g_camids = [], [], []
end = time.time()
for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
gf.append(features)
g_pids.extend(pids)
g_camids.extend(camids)
gf = torch.cat(gf, 0)
g_pids = np.asarray(g_pids)
g_camids = np.asarray(g_camids)
print('Extracted features for gallery set, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))
print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(batch_time.avg, args.test_batch_size))
m, n = qf.size(0), gf.size(0)
distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
distmat.addmm_(1, -2, qf, gf.t())
distmat = distmat.numpy()
print('Computing CMC and mAP')
cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)
print('Results ----------')
print('mAP: {:.1%}'.format(mAP))
print('CMC curve')
for r in ranks:
print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))
print('------------------')
if return_distmat:
return distmat
return cmc[0]
def get_base_optimizer(model):
kwargs = {
'weight_decay': 5e-4,
'lr': 0.0003,
'betas': (0.9, 0.999),
}
param_groups = model.parameters()
optimizer = torch.optim.Adam(param_groups, **kwargs)
scheduler = init_lr_scheduler(optimizer, stepsize=[20, 40], gamma=0.1)
return optimizer, scheduler
def get_base_sgd_optimizer(model):
kwargs = {
'weight_decay': 5e-4,
'lr': 0.001,
'momentum': 0.9,
}
param_groups = model.parameters()
optimizer = torch.optim.SGD(param_groups, **kwargs)
scheduler = init_lr_scheduler(optimizer, stepsize=[25, 50], gamma=0.1)
return optimizer, scheduler
def get_RRI_optimizer(
model,
lr
):
kwargs = {
'weight_decay': 5e-4,
'lr': lr,
'momentum': 0.9,
}
param_groups = model.parameters()
optimizer = torch.optim.SGD(param_groups, **kwargs)
scheduler = init_lr_scheduler(optimizer, stepsize=[12], gamma=0.1)
return optimizer, scheduler
def train_R(model, lr, T, fix_eigen_layer: bool=False):
eigen_layers = model.module.get_fcs()
if fix_eigen_layer:
for eigen_layer in eigen_layers:
eigen_layer.eval()
for p in eigen_layer.parameters():
p.requires_grad = False
stage_name = 'restraint'
else:
model.train()
for p in model.parameters():
p.requires_grad = True
stage_name = 'relaxation'
prefix = '{}_{}_'.format(T, stage_name)
optimizer, scheduler = get_RRI_optimizer(model, lr)
for epoch in range(20):
train(epoch, model, criterion, optimizer, trainloader, use_gpu=use_gpu)
scheduler.step()
print('=> Test')
if (epoch + 1) % args.eval_freq == 0:
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
rank1 = test(model, queryloader, galleryloader, use_gpu)
save_checkpoint({
'state_dict': model.state_dict(),
'rank1': rank1,
'epoch': 0,
'arch': args.arch,
'optimizer': (),
}, args.save_dir, prefix=prefix)
def train_base(model):
use_sgd = os.environ.get('sgd') is not None
optimizer_getter = get_base_sgd_optimizer if use_sgd else get_base_optimizer
optimizer, scheduler = get_base_optimizer(model)
model.train()
print('=== train base ===')
if True:
open_layers = ['fc', 'classifier1', 'classifier2_1', 'classifier2_2', 'fc2_1', 'fc2_2', 'reduction', 'classifier']
print('Train {} for {} epochs while keeping other layers frozen'.format(open_layers, 10))
for epoch in range(10):
open_specified_layers(model, open_layers)
train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=True)
print('Done. All layers are open to train for {} epochs'.format(60))
open_all_layers(model)
optimizer, scheduler = optimizer_getter(model)
for epoch in range(60):
train(epoch, model, criterion, optimizer, trainloader, use_gpu=use_gpu)
scheduler.step()
print('=> Test')
if (epoch + 1) % args.eval_freq == 0:
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
rank1 = test(model, queryloader, galleryloader, use_gpu)
save_checkpoint({
'state_dict': model.state_dict(),
'rank1': rank1,
'epoch': 0,
'arch': args.arch,
'optimizer': optimizer.state_dict(),
}, args.save_dir, prefix='base_')
def train_RRI(model, Ts: int=7):
base_lrs = [0.001] * 3 + [0.0001] * 10
for T in range(Ts):
print('=== T = {} ==='.format(T))
print('Replacing eigen layer weight...')
for eigen_layer in model.module.get_fcs():
replace_weight(eigen_layer)
print('Replaced.')
print('--- Restraint ({}) ---'.format(T))
train_R(model, base_lrs[T], T, fix_eigen_layer=True)
print('--- Relaxation ({}) ---'.format(T))
train_R(model, base_lrs[T], T, fix_eigen_layer=False)
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
rank1 = test(model, queryloader, galleryloader, use_gpu)
save_checkpoint({
'state_dict': model.state_dict(),
'rank1': rank1,
'epoch': 0,
'arch': args.arch,
'optimizer': (),
}, args.save_dir, prefix='final_')
if __name__ == '__main__':
main()
| 30.641758
| 143
| 0.608378
|
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import datetime
import os.path as osp
import numpy as np
import warnings
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from args import argument_parser, image_dataset_kwargs, optimizer_kwargs, lr_scheduler_kwargs
from torchreid.data_manager import ImageDataManager
from torchreid import models
from torchreid.losses import CrossEntropyLoss, DeepSupervision
from torchreid.utils.iotools import check_isfile
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.loggers import Logger, RankLogger
from torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers, accuracy, \
load_pretrained_weights, save_checkpoint, resume_from_checkpoint
from torchreid.utils.reidtools import visualize_ranked_results
from torchreid.utils.generaltools import set_random_seed
from torchreid.eval_metrics import evaluate
from torchreid.optimizers import init_optimizer
from torchreid.lr_schedulers import init_lr_scheduler
os.environ['TORCH_HOME'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '.torch'))
testloader_dict = trainloader = criterion = None
use_gpu = False
parser = argument_parser()
args = parser.parse_args()
def corr_metric(W: 'K x N'):
G = W.permute(1, 0) @ W
return torch.trace(G) / abs(G).sum()
def replace_weight(layer):
with torch.no_grad():
A = layer.weight.t()
M, N = A.size()
M: 2048
N: 1024
U, S, V = torch.svd(A, some=False)
W = A @ V
W: '2048 x 1024 = M x N'
NW = torch.zeros_like(A)
for i in range(N):
curr_N = W.size(1)
W_norm = torch.norm(W, p=2, dim=0)
W_norm: 'curr_N'
index = i
vec_i = A[:, i]
vec_i_norm = torch.norm(vec_i)
co = (A[:, i].view(M, 1).t() @ W).view(curr_N)
co: 'curr_N'
co = co / vec_i_norm
absco = abs(co / W_norm)
maxco_index = torch.max(absco, 0)[1].item()
NW[:, index] = W[:, maxco_index] * torch.sign(co[maxco_index])
W = W[:, sorted({x for x in range(curr_N) if x != maxco_index})]
layer.weight.copy_(NW.t())
print(layer.weight)
return layer
def main():
global args, criterion, testloader_dict, trainloader, use_gpu
set_random_seed(args.seed)
if not args.use_avai_gpus:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu:
use_gpu = False
log_name = 'test.log' if args.evaluate else 'train.log'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print('==========\nArgs:{}\n=========='.format(args))
if use_gpu:
print('Currently using GPU {}'.format(args.gpu_devices))
cudnn.benchmark = True
else:
warnings.warn('Currently using CPU, however, GPU is highly recommended')
print('Initializing image data manager')
dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
trainloader, testloader_dict = dm.return_dataloaders()
print('Initializing model: {}'.format(args.arch))
model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent'}, pretrained=not args.no_pretrained, use_gpu=use_gpu)
print('Model size: {:.3f} M'.format(count_num_param(model)))
if args.load_weights and check_isfile(args.load_weights):
load_pretrained_weights(model, args.load_weights)
model = nn.DataParallel(model).cuda() if use_gpu else model
criterion = CrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth)
if args.resume and check_isfile(args.resume):
args.start_epoch = resume_from_checkpoint(args.resume, model, optimizer=None)
resumed = True
else:
resumed = False
if args.evaluate:
print('Evaluate only')
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
distmat = test(model, queryloader, galleryloader, use_gpu, return_distmat=True)
if args.visualize_ranks:
visualize_ranked_results(
distmat, dm.return_testdataset_by_name(name),
save_dir=osp.join(args.save_dir, 'ranked_results', name),
topk=20
)
return
time_start = time.time()
print('=> Start training')
if not resumed:
train_base(model)
train_RRI(model, 7)
elapsed = round(time.time() - time_start)
elapsed = str(datetime.timedelta(seconds=elapsed))
print('Elapsed {}'.format(elapsed))
def train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=False):
losses = AverageMeter()
accs = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
model.train()
end = time.time()
for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):
data_time.update(time.time() - end)
if use_gpu:
imgs, pids = imgs.cuda(), pids.cuda()
outputs = model(imgs)
loss = sum(criterion(x, pids) for x in outputs) / len(outputs)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
losses.update(loss.item(), pids.size(0))
accs.update(accuracy(outputs, pids)[0])
if (batch_idx + 1) % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format(
epoch + 1, batch_idx + 1, len(trainloader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
acc=accs
))
end = time.time()
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20], return_distmat=False):
batch_time = AverageMeter()
model.eval()
with torch.no_grad():
qf, q_pids, q_camids = [], [], []
for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
qf.append(features)
q_pids.extend(pids)
q_camids.extend(camids)
qf = torch.cat(qf, 0)
q_pids = np.asarray(q_pids)
q_camids = np.asarray(q_camids)
print('Extracted features for query set, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))
gf, g_pids, g_camids = [], [], []
end = time.time()
for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
gf.append(features)
g_pids.extend(pids)
g_camids.extend(camids)
gf = torch.cat(gf, 0)
g_pids = np.asarray(g_pids)
g_camids = np.asarray(g_camids)
print('Extracted features for gallery set, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))
print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(batch_time.avg, args.test_batch_size))
m, n = qf.size(0), gf.size(0)
distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
distmat.addmm_(1, -2, qf, gf.t())
distmat = distmat.numpy()
print('Computing CMC and mAP')
cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)
print('Results ----------')
print('mAP: {:.1%}'.format(mAP))
print('CMC curve')
for r in ranks:
print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))
print('------------------')
if return_distmat:
return distmat
return cmc[0]
def get_base_optimizer(model):
kwargs = {
'weight_decay': 5e-4,
'lr': 0.0003,
'betas': (0.9, 0.999),
}
param_groups = model.parameters()
optimizer = torch.optim.Adam(param_groups, **kwargs)
scheduler = init_lr_scheduler(optimizer, stepsize=[20, 40], gamma=0.1)
return optimizer, scheduler
def get_base_sgd_optimizer(model):
kwargs = {
'weight_decay': 5e-4,
'lr': 0.001,
'momentum': 0.9,
}
param_groups = model.parameters()
optimizer = torch.optim.SGD(param_groups, **kwargs)
scheduler = init_lr_scheduler(optimizer, stepsize=[25, 50], gamma=0.1)
return optimizer, scheduler
def get_RRI_optimizer(
model,
lr
):
kwargs = {
'weight_decay': 5e-4,
'lr': lr,
'momentum': 0.9,
}
param_groups = model.parameters()
optimizer = torch.optim.SGD(param_groups, **kwargs)
scheduler = init_lr_scheduler(optimizer, stepsize=[12], gamma=0.1)
return optimizer, scheduler
def train_R(model, lr, T, fix_eigen_layer: bool=False):
eigen_layers = model.module.get_fcs()
if fix_eigen_layer:
for eigen_layer in eigen_layers:
eigen_layer.eval()
for p in eigen_layer.parameters():
p.requires_grad = False
stage_name = 'restraint'
else:
model.train()
for p in model.parameters():
p.requires_grad = True
stage_name = 'relaxation'
prefix = '{}_{}_'.format(T, stage_name)
optimizer, scheduler = get_RRI_optimizer(model, lr)
for epoch in range(20):
train(epoch, model, criterion, optimizer, trainloader, use_gpu=use_gpu)
scheduler.step()
print('=> Test')
if (epoch + 1) % args.eval_freq == 0:
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
rank1 = test(model, queryloader, galleryloader, use_gpu)
save_checkpoint({
'state_dict': model.state_dict(),
'rank1': rank1,
'epoch': 0,
'arch': args.arch,
'optimizer': (),
}, args.save_dir, prefix=prefix)
def train_base(model):
use_sgd = os.environ.get('sgd') is not None
optimizer_getter = get_base_sgd_optimizer if use_sgd else get_base_optimizer
optimizer, scheduler = get_base_optimizer(model)
model.train()
print('=== train base ===')
if True:
open_layers = ['fc', 'classifier1', 'classifier2_1', 'classifier2_2', 'fc2_1', 'fc2_2', 'reduction', 'classifier']
print('Train {} for {} epochs while keeping other layers frozen'.format(open_layers, 10))
for epoch in range(10):
open_specified_layers(model, open_layers)
train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=True)
print('Done. All layers are open to train for {} epochs'.format(60))
open_all_layers(model)
optimizer, scheduler = optimizer_getter(model)
for epoch in range(60):
train(epoch, model, criterion, optimizer, trainloader, use_gpu=use_gpu)
scheduler.step()
print('=> Test')
if (epoch + 1) % args.eval_freq == 0:
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
rank1 = test(model, queryloader, galleryloader, use_gpu)
save_checkpoint({
'state_dict': model.state_dict(),
'rank1': rank1,
'epoch': 0,
'arch': args.arch,
'optimizer': optimizer.state_dict(),
}, args.save_dir, prefix='base_')
def train_RRI(model, Ts: int=7):
base_lrs = [0.001] * 3 + [0.0001] * 10
for T in range(Ts):
print('=== T = {} ==='.format(T))
print('Replacing eigen layer weight...')
for eigen_layer in model.module.get_fcs():
replace_weight(eigen_layer)
print('Replaced.')
print('--- Restraint ({}) ---'.format(T))
train_R(model, base_lrs[T], T, fix_eigen_layer=True)
print('--- Relaxation ({}) ---'.format(T))
train_R(model, base_lrs[T], T, fix_eigen_layer=False)
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
rank1 = test(model, queryloader, galleryloader, use_gpu)
save_checkpoint({
'state_dict': model.state_dict(),
'rank1': rank1,
'epoch': 0,
'arch': args.arch,
'optimizer': (),
}, args.save_dir, prefix='final_')
if __name__ == '__main__':
main()
| true
| true
|
f702e98b502f9918276dc6a5079495bd5c1a4194
| 4,527
|
py
|
Python
|
saas/management/commands/renewals.py
|
gikoluo/djaodjin-saas
|
badd7894ac327191008a1b3a0ebd0d07b55908c3
|
[
"BSD-2-Clause"
] | 383
|
2015-03-07T06:19:39.000Z
|
2022-03-12T20:53:37.000Z
|
saas/management/commands/renewals.py
|
gikoluo/djaodjin-saas
|
badd7894ac327191008a1b3a0ebd0d07b55908c3
|
[
"BSD-2-Clause"
] | 146
|
2015-03-25T22:45:44.000Z
|
2022-02-22T08:49:35.000Z
|
saas/management/commands/renewals.py
|
gikoluo/djaodjin-saas
|
badd7894ac327191008a1b3a0ebd0d07b55908c3
|
[
"BSD-2-Clause"
] | 111
|
2015-02-12T22:13:07.000Z
|
2022-03-11T05:45:53.000Z
|
# Copyright (c) 2018, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The renewals command is intended to be run as part of an automated script
run at least once a day. It will
- recognize revenue for past periods (see :doc:`ledger <ledger>`).
- extends active subscriptions
- create charges for new periods
- trigger expiration notices
Every functions part of the renewals script are explicitly written to be
idempotent. Calling the scripts multiple times for the same timestamp
(i.e. with the ``--at-time`` command line argument) will generate the
appropriate ``Transaction`` and ``Charge`` only once.
**Example cron setup**:
.. code-block:: bash
$ cat /etc/cron.daily/renewals
#!/bin/sh
cd /var/*mysite* && python manage.py renewals
"""
import logging, time
from django.core.management.base import BaseCommand
from ...models import get_broker
from ...renewals import (create_charges_for_balance, complete_charges,
extend_subscriptions, recognize_income, trigger_expiration_notices)
from ...utils import datetime_or_now
from ... import settings
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
help = """Recognized backlog, extends subscription and charge due balance
on credit cards"""
def add_arguments(self, parser):
parser.add_argument('--dry-run', action='store_true',
dest='dry_run', default=False,
help='Do not commit transactions nor submit charges to processor')
parser.add_argument('--no-charges', action='store_true',
dest='no_charges', default=False,
help='Do not submit charges to processor')
parser.add_argument('--at-time', action='store',
dest='at_time', default=None,
help='Specifies the time at which the command runs')
def handle(self, *args, **options):
#pylint:disable=broad-except
dry_run = options['dry_run']
no_charges = options['no_charges']
end_period = datetime_or_now(options['at_time'])
if dry_run:
LOGGER.warning("dry_run: no changes will be committed.")
if no_charges:
LOGGER.warning("no_charges: no charges will be submitted.")
try:
recognize_income(end_period, dry_run=dry_run)
except Exception as err:
LOGGER.exception("recognize_income: %s", err)
try:
extend_subscriptions(end_period, dry_run=dry_run)
except Exception as err:
LOGGER.exception("extend_subscriptions: %s", err)
try:
create_charges_for_balance(
end_period, dry_run=dry_run or no_charges)
except Exception as err:
LOGGER.exception(
"Unable to create charges for balance on broker '%s'",
get_broker())
if not (dry_run or no_charges):
# Let's complete the in flight charges after we have given
# them time to settle.
time.sleep(30)
complete_charges()
# Trigger 'expires soon' notifications
expiration_periods = settings.EXPIRE_NOTICE_DAYS
for period in expiration_periods:
trigger_expiration_notices(
end_period, nb_days=period, dry_run=dry_run)
| 40.061947
| 78
| 0.69958
|
import logging, time
from django.core.management.base import BaseCommand
from ...models import get_broker
from ...renewals import (create_charges_for_balance, complete_charges,
extend_subscriptions, recognize_income, trigger_expiration_notices)
from ...utils import datetime_or_now
from ... import settings
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
help = """Recognized backlog, extends subscription and charge due balance
on credit cards"""
def add_arguments(self, parser):
parser.add_argument('--dry-run', action='store_true',
dest='dry_run', default=False,
help='Do not commit transactions nor submit charges to processor')
parser.add_argument('--no-charges', action='store_true',
dest='no_charges', default=False,
help='Do not submit charges to processor')
parser.add_argument('--at-time', action='store',
dest='at_time', default=None,
help='Specifies the time at which the command runs')
def handle(self, *args, **options):
dry_run = options['dry_run']
no_charges = options['no_charges']
end_period = datetime_or_now(options['at_time'])
if dry_run:
LOGGER.warning("dry_run: no changes will be committed.")
if no_charges:
LOGGER.warning("no_charges: no charges will be submitted.")
try:
recognize_income(end_period, dry_run=dry_run)
except Exception as err:
LOGGER.exception("recognize_income: %s", err)
try:
extend_subscriptions(end_period, dry_run=dry_run)
except Exception as err:
LOGGER.exception("extend_subscriptions: %s", err)
try:
create_charges_for_balance(
end_period, dry_run=dry_run or no_charges)
except Exception as err:
LOGGER.exception(
"Unable to create charges for balance on broker '%s'",
get_broker())
if not (dry_run or no_charges):
# them time to settle.
time.sleep(30)
complete_charges()
# Trigger 'expires soon' notifications
expiration_periods = settings.EXPIRE_NOTICE_DAYS
for period in expiration_periods:
trigger_expiration_notices(
end_period, nb_days=period, dry_run=dry_run)
| true
| true
|
f702ea2613d3b67a2caf45adaefe4207ccb72a62
| 3,283
|
py
|
Python
|
Django-apiTest/polls/quickstart.py
|
hsuyeemon/Testing
|
3ff0e46baa9ce8db446d44cfc10b0cc8ef3a4ef0
|
[
"Apache-2.0"
] | 1
|
2020-02-18T06:06:24.000Z
|
2020-02-18T06:06:24.000Z
|
Django-apiTest/polls/quickstart.py
|
hsuyeemon/Testing
|
3ff0e46baa9ce8db446d44cfc10b0cc8ef3a4ef0
|
[
"Apache-2.0"
] | 4
|
2021-05-10T18:47:55.000Z
|
2022-02-26T19:48:52.000Z
|
Django-apiTest/polls/quickstart.py
|
hsuyeemon/Testing
|
3ff0e46baa9ce8db446d44cfc10b0cc8ef3a4ef0
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import datetime
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/calendar']
def main():
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
print(flow)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
# Call the Calendar API
#now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
#print('Getting the upcoming 10 events')
#events_result = service.events().list(calendarId='primary', timeMin=now,
# maxResults=10, singleEvents=True,
# orderBy='startTime').execute()
#events = events_result.get('items', [])
#if not events:
# print('No upcoming events found.')
#for event in events:
# start = event['start'].get('dateTime', event['start'].get('date'))
# print(start, event['summary'])
print("Creating events")
# Refer to the Python quickstart on how to setup the environment:
# https://developers.google.com/calendar/quickstart/python
# Change the scope to 'https://www.googleapis.com/auth/calendar' and delete any
# stored credentials.
event = {
'summary': 'Google I/O 2019',
'location': '800 Howard St., San Francisco, CA 94103',
'description': 'A chance to hear more about Google\'s developer products.',
'start': {
'dateTime': '2019-08-28T09:00:00-07:00',
'timeZone': 'America/Los_Angeles',
},
'end': {
'dateTime': '2019-09-01T17:00:00-07:00',
'timeZone': 'America/Los_Angeles',
},
'recurrence': [
'RRULE:FREQ=DAILY;COUNT=2'
],
'attendees': [
{'email': 'lpage@example.com'},
{'email': 'sbrin@example.com'},
],
'reminders': {
'useDefault': False,
'overrides': [
{'method': 'email', 'minutes': 24 * 60},
{'method': 'popup', 'minutes': 10},
],
},
}
event = service.events().insert(calendarId='primary', body=event).execute()
print ('Event created: %s' % (event.get('htmlLink')))
if __name__ == '__main__':
main()
| 35.301075
| 81
| 0.613463
|
from __future__ import print_function
import datetime
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
SCOPES = ['https://www.googleapis.com/auth/calendar']
def main():
creds = None
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
print(flow)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
# Call the Calendar API
#now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
#print('Getting the upcoming 10 events')
#events_result = service.events().list(calendarId='primary', timeMin=now,
# maxResults=10, singleEvents=True,
# orderBy='startTime').execute()
#events = events_result.get('items', [])
#if not events:
# print('No upcoming events found.')
#for event in events:
# start = event['start'].get('dateTime', event['start'].get('date'))
# print(start, event['summary'])
print("Creating events")
# Refer to the Python quickstart on how to setup the environment:
# https://developers.google.com/calendar/quickstart/python
# Change the scope to 'https://www.googleapis.com/auth/calendar' and delete any
# stored credentials.
event = {
'summary': 'Google I/O 2019',
'location': '800 Howard St., San Francisco, CA 94103',
'description': 'A chance to hear more about Google\'s developer products.',
'start': {
'dateTime': '2019-08-28T09:00:00-07:00',
'timeZone': 'America/Los_Angeles',
},
'end': {
'dateTime': '2019-09-01T17:00:00-07:00',
'timeZone': 'America/Los_Angeles',
},
'recurrence': [
'RRULE:FREQ=DAILY;COUNT=2'
],
'attendees': [
{'email': 'lpage@example.com'},
{'email': 'sbrin@example.com'},
],
'reminders': {
'useDefault': False,
'overrides': [
{'method': 'email', 'minutes': 24 * 60},
{'method': 'popup', 'minutes': 10},
],
},
}
event = service.events().insert(calendarId='primary', body=event).execute()
print ('Event created: %s' % (event.get('htmlLink')))
if __name__ == '__main__':
main()
| true
| true
|
f702eaa1c80314a8254a6bb995b9a1193fd51d26
| 486
|
py
|
Python
|
yama/shard.py
|
vitovitolo/yama
|
1d96530ac0b2700838dd9c65e6245e35b7f639cd
|
[
"MIT"
] | 1
|
2021-10-30T00:54:34.000Z
|
2021-10-30T00:54:34.000Z
|
yama/shard.py
|
vitovitolo/yama
|
1d96530ac0b2700838dd9c65e6245e35b7f639cd
|
[
"MIT"
] | null | null | null |
yama/shard.py
|
vitovitolo/yama
|
1d96530ac0b2700838dd9c65e6245e35b7f639cd
|
[
"MIT"
] | null | null | null |
import database
def load_shard_from_db(conf):
#TODO: load shard from cache if exists
shards = database.load_shard(conf)
return shards
def get_shard(shards, url):
"""
Hash function for shading scheme
returns a dict with hostname and table name
Eg: s = { 'hostname': 'node1', 'table_name': 'url_s1'}
"""
if not shards:
return {}
else:
return shards[hash(str(url['hostname'])+str(url['port'])+str(url['path'])) % len(shards)]
| 22.090909
| 97
| 0.62963
|
import database
def load_shard_from_db(conf):
shards = database.load_shard(conf)
return shards
def get_shard(shards, url):
if not shards:
return {}
else:
return shards[hash(str(url['hostname'])+str(url['port'])+str(url['path'])) % len(shards)]
| true
| true
|
f702ebe9e7c9ad39441f6932c5e3341872fd5138
| 298
|
py
|
Python
|
dragon/plugins/help.py
|
sahuang/DragonBot-ReDive
|
3efe51db42aa16f209077d082e9e148f2571c014
|
[
"MIT"
] | 10
|
2020-06-30T07:31:52.000Z
|
2022-02-22T01:43:17.000Z
|
dragon/plugins/help.py
|
sahuang/DragonBot
|
3efe51db42aa16f209077d082e9e148f2571c014
|
[
"MIT"
] | 1
|
2022-01-13T02:52:24.000Z
|
2022-01-13T02:52:24.000Z
|
dragon/plugins/help.py
|
sahuang/DragonBot
|
3efe51db42aa16f209077d082e9e148f2571c014
|
[
"MIT"
] | 5
|
2020-07-30T08:16:32.000Z
|
2021-09-08T03:16:50.000Z
|
from nonebot import on_command, CommandSession
@on_command('help', aliases=('h', '帮助'), only_to_me=False)
async def manual(session: CommandSession):
await session.send(f'[CQ:image,file=/admin/manual.png]')
@manual.args_parser
async def _(session: CommandSession):
# do nothing
return
| 27.090909
| 60
| 0.734899
|
from nonebot import on_command, CommandSession
@on_command('help', aliases=('h', '帮助'), only_to_me=False)
async def manual(session: CommandSession):
await session.send(f'[CQ:image,file=/admin/manual.png]')
@manual.args_parser
async def _(session: CommandSession):
return
| true
| true
|
f702ed24451be873a3ad174d6df0e922afb0bb2f
| 323
|
py
|
Python
|
web/pipeline/migrations/0005_remove_hospital_sv_name.py
|
stevenstuber/CIT
|
8c485e72084c06da6db45da1cb402bac26411ec2
|
[
"Apache-2.0"
] | 10
|
2020-11-12T15:13:40.000Z
|
2022-03-05T22:33:08.000Z
|
web/pipeline/migrations/0005_remove_hospital_sv_name.py
|
stevenstuber/CIT
|
8c485e72084c06da6db45da1cb402bac26411ec2
|
[
"Apache-2.0"
] | 28
|
2020-07-17T16:33:55.000Z
|
2022-03-21T16:24:25.000Z
|
web/pipeline/migrations/0005_remove_hospital_sv_name.py
|
stevenstuber/CIT
|
8c485e72084c06da6db45da1cb402bac26411ec2
|
[
"Apache-2.0"
] | 5
|
2020-11-02T23:39:53.000Z
|
2022-03-01T19:09:45.000Z
|
# Generated by Django 2.2.13 on 2020-06-30 06:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pipeline', '0004_hospital'),
]
operations = [
migrations.RemoveField(
model_name='hospital',
name='sv_name',
),
]
| 17.944444
| 48
| 0.585139
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pipeline', '0004_hospital'),
]
operations = [
migrations.RemoveField(
model_name='hospital',
name='sv_name',
),
]
| true
| true
|
f702efe7fbf48cf708ca30d341f9b765475630f0
| 775
|
py
|
Python
|
PollsDjango/app/urls.py
|
Bhaskers-Blu-Org2/PTVS-Samples
|
a82d0699bd2fd3f0f3a7a452fef930100776cfc7
|
[
"Apache-2.0"
] | 27
|
2015-05-05T13:06:20.000Z
|
2019-04-21T21:58:48.000Z
|
PollsDjango/app/urls.py
|
microsoft/PTVS-Samples
|
a82d0699bd2fd3f0f3a7a452fef930100776cfc7
|
[
"Apache-2.0"
] | 5
|
2015-06-09T22:10:14.000Z
|
2018-04-11T18:56:17.000Z
|
PollsDjango/app/urls.py
|
Microsoft/PTVS-Samples
|
a82d0699bd2fd3f0f3a7a452fef930100776cfc7
|
[
"Apache-2.0"
] | 11
|
2015-04-24T19:11:52.000Z
|
2017-09-13T03:46:22.000Z
|
"""
Definition of urls for polls viewing and voting.
"""
from django.conf.urls import url
from app.models import Poll
import app.views
urlpatterns = [
url(r'^$',
app.views.PollListView.as_view(
queryset=Poll.objects.order_by('-pub_date')[:5],
context_object_name='latest_poll_list',
template_name='app/index.html',),
name='home'),
url(r'^(?P<pk>\d+)/$',
app.views.PollDetailView.as_view(
template_name='app/details.html'),
name='detail'),
url(r'^(?P<pk>\d+)/results/$',
app.views.PollResultsView.as_view(
template_name='app/results.html'),
name='results'),
url(r'^(?P<poll_id>\d+)/vote/$', app.views.vote, name='vote'),
]
| 28.703704
| 67
| 0.572903
|
from django.conf.urls import url
from app.models import Poll
import app.views
urlpatterns = [
url(r'^$',
app.views.PollListView.as_view(
queryset=Poll.objects.order_by('-pub_date')[:5],
context_object_name='latest_poll_list',
template_name='app/index.html',),
name='home'),
url(r'^(?P<pk>\d+)/$',
app.views.PollDetailView.as_view(
template_name='app/details.html'),
name='detail'),
url(r'^(?P<pk>\d+)/results/$',
app.views.PollResultsView.as_view(
template_name='app/results.html'),
name='results'),
url(r'^(?P<poll_id>\d+)/vote/$', app.views.vote, name='vote'),
]
| true
| true
|
f702f19104a3c185b7314d4b033a56a62d07c064
| 2,770
|
py
|
Python
|
tacker/api/validation/__init__.py
|
takahashi-tsc/tacker
|
a0ae01a13dcc51bb374060adcbb4fd484ab37156
|
[
"Apache-2.0"
] | 116
|
2015-10-18T02:57:08.000Z
|
2022-03-15T04:09:18.000Z
|
tacker/api/validation/__init__.py
|
takahashi-tsc/tacker
|
a0ae01a13dcc51bb374060adcbb4fd484ab37156
|
[
"Apache-2.0"
] | 6
|
2016-11-07T22:15:54.000Z
|
2021-05-09T06:13:08.000Z
|
tacker/api/validation/__init__.py
|
takahashi-tsc/tacker
|
a0ae01a13dcc51bb374060adcbb4fd484ab37156
|
[
"Apache-2.0"
] | 166
|
2015-10-20T15:31:52.000Z
|
2021-11-12T08:39:49.000Z
|
# Copyright (C) 2019 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Request Body validating middleware.
"""
import functools
import webob
from tacker.api.validation import validators
from tacker.common import exceptions
def schema(request_body_schema):
"""Register a schema to validate request body.
Registered schema will be used for validating request body just before
API method executing.
:param dict request_body_schema: a schema to validate request body
"""
def add_validator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
schema_validator = validators._SchemaValidator(
request_body_schema)
try:
schema_validator.validate(kwargs['body'])
except KeyError:
raise webob.exc.HTTPBadRequest(
explanation=_("Malformed request body"))
return func(*args, **kwargs)
return wrapper
return add_validator
def query_schema(query_params_schema):
"""Register a schema to validate request query parameters.
Registered schema will be used for validating request query params just
before API method executing.
:param query_params_schema: A dict, the JSON-Schema for validating the
query parameters.
"""
def add_validator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# NOTE(tpatil): The second argument of the method
# calling this method should always be 'request'.
if 'request' in kwargs:
req = kwargs['request']
else:
req = args[1]
try:
req.GET.dict_of_lists()
except UnicodeDecodeError:
msg = _('Query string is not UTF-8 encoded')
raise exceptions.ValidationError(msg)
query_opts = {}
query_opts.update(req.GET)
schema_validator = validators._SchemaValidator(
query_params_schema)
schema_validator.validate(query_opts)
return func(*args, **kwargs)
return wrapper
return add_validator
| 30.108696
| 78
| 0.638267
|
import functools
import webob
from tacker.api.validation import validators
from tacker.common import exceptions
def schema(request_body_schema):
def add_validator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
schema_validator = validators._SchemaValidator(
request_body_schema)
try:
schema_validator.validate(kwargs['body'])
except KeyError:
raise webob.exc.HTTPBadRequest(
explanation=_("Malformed request body"))
return func(*args, **kwargs)
return wrapper
return add_validator
def query_schema(query_params_schema):
def add_validator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if 'request' in kwargs:
req = kwargs['request']
else:
req = args[1]
try:
req.GET.dict_of_lists()
except UnicodeDecodeError:
msg = _('Query string is not UTF-8 encoded')
raise exceptions.ValidationError(msg)
query_opts = {}
query_opts.update(req.GET)
schema_validator = validators._SchemaValidator(
query_params_schema)
schema_validator.validate(query_opts)
return func(*args, **kwargs)
return wrapper
return add_validator
| true
| true
|
f702f1f611d16ff3b225453713bd110e6a8457ef
| 9,165
|
py
|
Python
|
tests/scripts/thread-cert/Cert_5_5_02_LeaderReboot.py
|
BLUEGRioT/openthread
|
04a6a9b925db13a52790cc1b12cb2d854f222799
|
[
"BSD-3-Clause"
] | null | null | null |
tests/scripts/thread-cert/Cert_5_5_02_LeaderReboot.py
|
BLUEGRioT/openthread
|
04a6a9b925db13a52790cc1b12cb2d854f222799
|
[
"BSD-3-Clause"
] | null | null | null |
tests/scripts/thread-cert/Cert_5_5_02_LeaderReboot.py
|
BLUEGRioT/openthread
|
04a6a9b925db13a52790cc1b12cb2d854f222799
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import config
import thread_cert
from pktverify.consts import MLE_ADVERTISEMENT, MLE_PARENT_REQUEST, MLE_PARENT_RESPONSE, MLE_CHILD_UPDATE_RESPONSE, MLE_CHILD_ID_REQUEST, MLE_CHILD_ID_RESPONSE, MLE_LINK_REQUEST, MLE_LINK_ACCEPT_AND_REQUEST, ADDR_SOL_URI, SOURCE_ADDRESS_TLV, MODE_TLV, TIMEOUT_TLV, CHALLENGE_TLV, RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MLE_FRAME_COUNTER_TLV, ROUTE64_TLV, ADDRESS16_TLV, LEADER_DATA_TLV, NETWORK_DATA_TLV, TLV_REQUEST_TLV, SCAN_MASK_TLV, CONNECTIVITY_TLV, LINK_MARGIN_TLV, VERSION_TLV, ADDRESS_REGISTRATION_TLV, ACTIVE_TIMESTAMP_TLV
from pktverify.packet_verifier import PacketVerifier
from pktverify.null_field import nullField
LEADER = 1
ROUTER = 2
ED = 3
class Cert_5_5_2_LeaderReboot(thread_cert.TestCase):
TOPOLOGY = {
LEADER: {
'name': 'LEADER',
'mode': 'rsdn',
'panid': 0xface,
'router_selection_jitter': 1,
'whitelist': [ROUTER]
},
ROUTER: {
'name': 'ROUTER',
'mode': 'rsdn',
'panid': 0xface,
'router_selection_jitter': 1,
'whitelist': [LEADER, ED]
},
ED: {
'name': 'MED',
'is_mtd': True,
'mode': 'rsn',
'panid': 0xface,
'whitelist': [ROUTER]
},
}
def _setUpLeader(self):
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[LEADER].set_router_selection_jitter(1)
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ED].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED].get_state(), 'child')
self.nodes[LEADER].reset()
self._setUpLeader()
self.simulator.go(140)
self.assertEqual(self.nodes[ROUTER].get_state(), 'leader')
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'router')
addrs = self.nodes[ED].get_addrs()
for addr in addrs:
self.assertTrue(self.nodes[ROUTER].ping(addr))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
ROUTER = pv.vars['ROUTER']
MED = pv.vars['MED']
leader_pkts = pkts.filter_wpan_src64(LEADER)
_rpkts = pkts.filter_wpan_src64(ROUTER)
# Step 2: The DUT MUST send properly formatted MLE Advertisements
_rpkts.filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next()
_lpkts = leader_pkts.range(_rpkts.index)
_lpkts.filter_mle_cmd(MLE_ADVERTISEMENT).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ROUTE64_TLV} == set(p.mle.tlv.type))
_rpkts.filter_mle_cmd(MLE_ADVERTISEMENT).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ROUTE64_TLV} == set(p.mle.tlv.type))
# Step 4: Router_1 MUST attempt to reattach to its original partition by
# sending MLE Parent Requests to the All-Routers multicast
# address (FFxx::xx) with a hop limit of 255.
_rpkts.filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(p.mle.tlv.type))
lreset_start = _rpkts.index
# Step 5: Leader MUST NOT respond to the MLE Parent Requests
_lpkts.filter_mle_cmd(MLE_PARENT_RESPONSE).must_not_next()
# Step 6:Router_1 MUST attempt to attach to any other Partition
# within range by sending a MLE Parent Request.
_rpkts.filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(p.mle.tlv.type))
lreset_stop = _rpkts.index
# Step 3: The Leader MUST stop sending MLE advertisements.
leader_pkts.range(lreset_start, lreset_stop).filter_mle_cmd(MLE_ADVERTISEMENT).must_not_next()
# Step 7: Take over leader role of a new Partition and
# begin transmitting MLE Advertisements
with _rpkts.save_index():
_rpkts.filter_mle_cmd(MLE_ADVERTISEMENT).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ROUTE64_TLV} == set(p.mle.tlv.type))
# Step 8: Router_1 MUST respond with an MLE Child Update Response,
# with the updated TLVs of the new partition
_rpkts.filter_mle_cmd(MLE_CHILD_UPDATE_RESPONSE).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, MODE_TLV, LEADER_DATA_TLV, ADDRESS_REGISTRATION_TLV} < set(p.mle.tlv.type))
# Step 9: The Leader MUST send properly formatted MLE Parent
# Requests to the All-Routers multicast address
_lpkts.filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(p.mle.tlv.type))
# Step 10: Router_1 MUST send an MLE Parent Response
_rpkts.filter_mle_cmd(MLE_PARENT_RESPONSE).must_next().must_verify(
lambda p: {
SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, LINK_LAYER_FRAME_COUNTER_TLV, RESPONSE_TLV, CHALLENGE_TLV,
LINK_MARGIN_TLV, CONNECTIVITY_TLV, VERSION_TLV
} < set(p.mle.tlv.type))
# Step 11: Leader send MLE Child ID Request
_lpkts.filter_mle_cmd(MLE_CHILD_ID_REQUEST).must_next().must_verify(
lambda p: {
RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MODE_TLV, TIMEOUT_TLV, VERSION_TLV, TLV_REQUEST_TLV,
ADDRESS16_TLV, NETWORK_DATA_TLV, ROUTE64_TLV, ACTIVE_TIMESTAMP_TLV
} < set(p.mle.tlv.type))
#Step 12: Router_1 send MLE Child ID Response
_rpkts.filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ADDRESS16_TLV, NETWORK_DATA_TLV, ROUTE64_TLV} < set(
p.mle.tlv.type))
#Step 13: Leader send an Address Solicit Request
_lpkts.filter_coap_request(ADDR_SOL_URI).must_next().must_verify(
lambda p: p.coap.tlv.ext_mac_addr and p.coap.tlv.rloc16 is not nullField and p.coap.tlv.status != 0)
#Step 14: Router_1 send an Address Solicit Response
_rpkts.filter_coap_ack(
ADDR_SOL_URI).must_next().must_verify(lambda p: p.coap.tlv.router_mask_assigned and p.coap.tlv.rloc16 is
not nullField and p.coap.tlv.status == 0)
#Step 15: Leader Send a Multicast Link Request
_lpkts.filter_mle_cmd(MLE_LINK_REQUEST).must_next().must_verify(
lambda p: {VERSION_TLV, TLV_REQUEST_TLV, SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, CHALLENGE_TLV} < set(
p.mle.tlv.type))
#Step 16: Router_1 send a Unicast Link Accept
_rpkts.filter_mle_cmd(MLE_LINK_ACCEPT_AND_REQUEST).must_next().must_verify(lambda p: {
VERSION_TLV, SOURCE_ADDRESS_TLV, RESPONSE_TLV, MLE_FRAME_COUNTER_TLV, LINK_MARGIN_TLV, LEADER_DATA_TLV
} < set(p.mle.tlv.type))
#Step 17: Router_1 MUST respond with an ICMPv6 Echo Reply
_rpkts.filter_ping_request().filter_wpan_dst64(MED).must_next()
if __name__ == '__main__':
unittest.main()
| 46.522843
| 536
| 0.687179
|
import unittest
import config
import thread_cert
from pktverify.consts import MLE_ADVERTISEMENT, MLE_PARENT_REQUEST, MLE_PARENT_RESPONSE, MLE_CHILD_UPDATE_RESPONSE, MLE_CHILD_ID_REQUEST, MLE_CHILD_ID_RESPONSE, MLE_LINK_REQUEST, MLE_LINK_ACCEPT_AND_REQUEST, ADDR_SOL_URI, SOURCE_ADDRESS_TLV, MODE_TLV, TIMEOUT_TLV, CHALLENGE_TLV, RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MLE_FRAME_COUNTER_TLV, ROUTE64_TLV, ADDRESS16_TLV, LEADER_DATA_TLV, NETWORK_DATA_TLV, TLV_REQUEST_TLV, SCAN_MASK_TLV, CONNECTIVITY_TLV, LINK_MARGIN_TLV, VERSION_TLV, ADDRESS_REGISTRATION_TLV, ACTIVE_TIMESTAMP_TLV
from pktverify.packet_verifier import PacketVerifier
from pktverify.null_field import nullField
LEADER = 1
ROUTER = 2
ED = 3
class Cert_5_5_2_LeaderReboot(thread_cert.TestCase):
TOPOLOGY = {
LEADER: {
'name': 'LEADER',
'mode': 'rsdn',
'panid': 0xface,
'router_selection_jitter': 1,
'whitelist': [ROUTER]
},
ROUTER: {
'name': 'ROUTER',
'mode': 'rsdn',
'panid': 0xface,
'router_selection_jitter': 1,
'whitelist': [LEADER, ED]
},
ED: {
'name': 'MED',
'is_mtd': True,
'mode': 'rsn',
'panid': 0xface,
'whitelist': [ROUTER]
},
}
def _setUpLeader(self):
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[LEADER].set_router_selection_jitter(1)
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ED].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED].get_state(), 'child')
self.nodes[LEADER].reset()
self._setUpLeader()
self.simulator.go(140)
self.assertEqual(self.nodes[ROUTER].get_state(), 'leader')
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'router')
addrs = self.nodes[ED].get_addrs()
for addr in addrs:
self.assertTrue(self.nodes[ROUTER].ping(addr))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
ROUTER = pv.vars['ROUTER']
MED = pv.vars['MED']
leader_pkts = pkts.filter_wpan_src64(LEADER)
_rpkts = pkts.filter_wpan_src64(ROUTER)
_rpkts.filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next()
_lpkts = leader_pkts.range(_rpkts.index)
_lpkts.filter_mle_cmd(MLE_ADVERTISEMENT).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ROUTE64_TLV} == set(p.mle.tlv.type))
_rpkts.filter_mle_cmd(MLE_ADVERTISEMENT).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ROUTE64_TLV} == set(p.mle.tlv.type))
_rpkts.filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(p.mle.tlv.type))
lreset_start = _rpkts.index
_lpkts.filter_mle_cmd(MLE_PARENT_RESPONSE).must_not_next()
_rpkts.filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(p.mle.tlv.type))
lreset_stop = _rpkts.index
leader_pkts.range(lreset_start, lreset_stop).filter_mle_cmd(MLE_ADVERTISEMENT).must_not_next()
with _rpkts.save_index():
_rpkts.filter_mle_cmd(MLE_ADVERTISEMENT).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ROUTE64_TLV} == set(p.mle.tlv.type))
_rpkts.filter_mle_cmd(MLE_CHILD_UPDATE_RESPONSE).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, MODE_TLV, LEADER_DATA_TLV, ADDRESS_REGISTRATION_TLV} < set(p.mle.tlv.type))
_lpkts.filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(p.mle.tlv.type))
_rpkts.filter_mle_cmd(MLE_PARENT_RESPONSE).must_next().must_verify(
lambda p: {
SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, LINK_LAYER_FRAME_COUNTER_TLV, RESPONSE_TLV, CHALLENGE_TLV,
LINK_MARGIN_TLV, CONNECTIVITY_TLV, VERSION_TLV
} < set(p.mle.tlv.type))
_lpkts.filter_mle_cmd(MLE_CHILD_ID_REQUEST).must_next().must_verify(
lambda p: {
RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MODE_TLV, TIMEOUT_TLV, VERSION_TLV, TLV_REQUEST_TLV,
ADDRESS16_TLV, NETWORK_DATA_TLV, ROUTE64_TLV, ACTIVE_TIMESTAMP_TLV
} < set(p.mle.tlv.type))
_rpkts.filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next().must_verify(
lambda p: {SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ADDRESS16_TLV, NETWORK_DATA_TLV, ROUTE64_TLV} < set(
p.mle.tlv.type))
_lpkts.filter_coap_request(ADDR_SOL_URI).must_next().must_verify(
lambda p: p.coap.tlv.ext_mac_addr and p.coap.tlv.rloc16 is not nullField and p.coap.tlv.status != 0)
_rpkts.filter_coap_ack(
ADDR_SOL_URI).must_next().must_verify(lambda p: p.coap.tlv.router_mask_assigned and p.coap.tlv.rloc16 is
not nullField and p.coap.tlv.status == 0)
_lpkts.filter_mle_cmd(MLE_LINK_REQUEST).must_next().must_verify(
lambda p: {VERSION_TLV, TLV_REQUEST_TLV, SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, CHALLENGE_TLV} < set(
p.mle.tlv.type))
_rpkts.filter_mle_cmd(MLE_LINK_ACCEPT_AND_REQUEST).must_next().must_verify(lambda p: {
VERSION_TLV, SOURCE_ADDRESS_TLV, RESPONSE_TLV, MLE_FRAME_COUNTER_TLV, LINK_MARGIN_TLV, LEADER_DATA_TLV
} < set(p.mle.tlv.type))
_rpkts.filter_ping_request().filter_wpan_dst64(MED).must_next()
if __name__ == '__main__':
unittest.main()
| true
| true
|
f702f26251ff1e2e6cd0c0ea57344ea4624619b3
| 10,831
|
py
|
Python
|
libaraboly/ArabolyFree.py
|
lalbornoz/araboly
|
fd463004426800e39800b4446f950abcbaececc9
|
[
"MIT"
] | 4
|
2018-04-08T21:41:43.000Z
|
2021-11-24T18:26:34.000Z
|
libaraboly/ArabolyFree.py
|
lalbornoz/araboly
|
fd463004426800e39800b4446f950abcbaececc9
|
[
"MIT"
] | null | null | null |
libaraboly/ArabolyFree.py
|
lalbornoz/araboly
|
fd463004426800e39800b4446f950abcbaececc9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Araboly 2000 Advanced Server SP4 -- everyone's favourite board game... with IRC support and fancy colours!
# Copyright (c) 2018 Lucio Andrés Illanes Albornoz <lucio@lucioillanes.de>
# This project is licensed under the terms of the MIT licence.
#
from ArabolyGenerals import ArabolyGenerals
from ArabolyMonad import ArabolyDecorator
from ArabolyTypeClass import ArabolyTypeClass
from ArabolyState import ArabolyGameState, ArabolyOutputLevel, ArabolyStringType
from ArabolyTrade import ArabolyTrade
import copy, os, sys, yaml
@ArabolyDecorator()
class ArabolyFree(ArabolyTypeClass):
"""XXX"""
# {{{ dispatch_board(args, channel, context, output, src, status): XXX
@staticmethod
def dispatch_board(args, channel, context, output, src, status):
if context.state != ArabolyGameState.AUCTION \
and context.state != ArabolyGameState.GAME \
and context.state != ArabolyGameState.PROPERTY:
status = False
elif len(args) \
or src not in context.players["byName"]:
status = False
else:
output = ArabolyGenerals._board(channel, context, output, src)
return args, channel, context, output, src, status
# }}}
# {{{ dispatch_bugcheck(channel, context, srcFull, status): XXX
@staticmethod
def dispatch_bugcheck(channel, context, srcFull, status):
if not ArabolyGenerals._authorised(channel, context, srcFull):
status = False
else:
snapshotPath = os.path.join("assets", "savefiles", "snapshot.dmp.{}".format(context.clientParams["hostname"]))
print("Saving game snapshot to {}!".format(os.path.join("assets", "savefiles", snapshotPath)))
with open(snapshotPath, "w+") as fileObject:
yaml.dump(context, fileObject)
sys.exit(1)
return channel, context, srcFull, status
# }}}
# {{{ dispatch_help(channel, context): XXX
@staticmethod
def dispatch_help(channel, context, output):
for helpLine in context.graphics["help"]:
output = ArabolyGenerals._push_output(channel, context, output, helpLine, outputLevel=ArabolyOutputLevel.LEVEL_GRAPHICS)
return channel, context, output
# }}}
# {{{ dispatch_join(args, channel, context, output, src, status): XXX
@staticmethod
def dispatch_join(args, channel, context, output, src, status):
if context.state != ArabolyGameState.GAME \
and context.state != ArabolyGameState.SETUP:
status = False
elif src in context.players["byName"] \
or len(args):
status = False
else:
newNum = None
for otherNum in range(len(context.players["numMap"])):
if context.players["numMap"][otherNum] == None:
newNum = otherNum; break;
if newNum == None:
status = False
else:
context.players["byName"][src] = {"field":0, "name":src, "num":newNum, "properties":[], "wallet":1500}
context.players["numMap"][newNum] = src
output = ArabolyGenerals._push_output(channel, context, output, "Player {src} joins Araboly game!".format(**locals()))
return args, channel, context, output, src, status
# }}}
# {{{ dispatch_kick(args, channel, context, output, srcFull, status): XXX
@staticmethod
def dispatch_kick(args, channel, context, output, srcFull, status):
if context.state == ArabolyGameState.GAME \
or context.state == ArabolyGameState.SETUP:
if len(args) != 1 or len(args[0]) < 1 \
or args[0] not in context.players["byName"]:
status = False
elif ArabolyGenerals._authorised(channel, context, srcFull):
otherPlayers = [args[0]]
output = ArabolyGenerals._push_output(channel, context, output, "Kicking {args[0]} from current Araboly game!".format(**locals()))
context, output = ArabolyGenerals._remove_players(channel, context, output, otherPlayers)
else:
status = False
return args, channel, context, output, srcFull, status
# }}}
# {{{ dispatch_melp(channel, context, output): XXX
@staticmethod
def dispatch_melp(channel, context, output):
for explosionLine in context.graphics["explosion"]:
output = ArabolyGenerals._push_output(channel, context, output, explosionLine, outputLevel=ArabolyOutputLevel.LEVEL_GRAPHICS)
output = ArabolyGenerals._push_output(channel, context, output, "\u0001ACTION explodes.\u0001", outputLevel=ArabolyOutputLevel.LEVEL_GRAPHICS)
return channel, context, output
# }}}
# {{{ dispatch_part(args, channel, context, output, src, status): XXX
@staticmethod
def dispatch_part(args, channel, context, output, src, status):
if context.state == ArabolyGameState.GAME \
or context.state == ArabolyGameState.SETUP:
if len(args) > 0 \
or src not in context.players["byName"]:
status = False
else:
otherPlayers = [src]
output = ArabolyGenerals._push_output(channel, context, output, "Player {src} parts Araboly game!".format(**locals()))
context, output = ArabolyGenerals._remove_players(channel, context, output, otherPlayers)
else:
status = False
return args, channel, context, output, src, status
# }}}
# {{{ dispatch_save(args, channel, context, output, srcFull, status): XXX
def dispatch_save(args, channel, context, output, srcFull, status):
if context.state != ArabolyGameState.AUCTION \
and context.state != ArabolyGameState.BANKRUPTCY \
and context.state != ArabolyGameState.GAME \
and context.state != ArabolyGameState.PROPERTY:
status = False
elif len(args) != 1 \
or not ArabolyGenerals._authorised(channel, context, srcFull):
status = False
else:
snapshotPath = os.path.join("assets", "savefiles", os.path.basename(args[0]))
output = ArabolyGenerals._push_output(channel, context, output, "Saving snapshot to {snapshotPath}!".format(**locals()))
with open(snapshotPath, "w") as fileObject:
gameSnapshot = copy.deepcopy(context)
delattr(gameSnapshot, "clientParams")
delattr(gameSnapshot, "graphics")
delattr(gameSnapshot, "kades")
yaml.dump(gameSnapshot, fileObject)
output = ArabolyGenerals._push_output(channel, context, output, "Saved snapshot to {snapshotPath}!".format(**locals()))
return args, channel, context, output, srcFull, status
# }}}
# {{{ dispatch_status(args, channel, context, output, src, status): XXX
def dispatch_status(args, channel, context, output, src, status):
if context.state != ArabolyGameState.AUCTION \
and context.state != ArabolyGameState.BANKRUPTCY \
and context.state != ArabolyGameState.GAME \
and context.state != ArabolyGameState.PROPERTY:
status = False
elif len(args) == 0:
statusPlayer = src
elif len(args) == 1:
statusPlayer = args[0]
else:
status = False
if status:
if not statusPlayer in context.players["byName"].keys():
status = False
else:
playerField = context.board[context.players["byName"][statusPlayer]["field"]]
playerProps = context.players["byName"][statusPlayer]["properties"]
playerWallet = context.players["byName"][statusPlayer]["wallet"]
output = ArabolyGenerals._push_output(channel, context, output, "Araboly status for player {statusPlayer}:".format(**locals()), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
output = ArabolyGenerals._push_output(channel, context, output, "Field....: {playerField[title]}".format(**locals()), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
output = ArabolyGenerals._push_output(channel, context, output, "Wallet...: ${playerWallet}".format(**locals()), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
if len(playerProps):
output = ArabolyGenerals._push_output(channel, context, output, "Properties owned:", outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
for playerPropNum in playerProps:
playerProp = context.board[playerPropNum]
mortgagedString = " (\u001fMORTGAGED\u001f)" if playerProp["mortgaged"] else ""
developmentsList = []
for levelNum in range(playerProp["level"] + 1):
developmentsList += playerProp["strings"][ArabolyStringType.NAME][levelNum]
developmentsString = ", level {}, developments: {}".format(playerProp["level"], ", ".join(developmentsList))
output = ArabolyGenerals._push_output(channel, context, output, "\u0003{:02d}${}{} (#{}) -- {}{}".format(playerProp["colourMiRC"], playerProp["price"], mortgagedString, playerProp["field"], playerProp["title"], developmentsString), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
output = ArabolyTrade._status(channel, context, output, statusPlayer)
output = ArabolyGenerals._push_output(channel, context, output, "Current turn: {}".format(context.players["numMap"][context.players["curNum"]]), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
return args, channel, context, output, src, status
# }}}
# {{{ dispatch_stop(args, channel, context, output, src, srcFull, status): XXX
@staticmethod
def dispatch_stop(args, channel, context, output, src, srcFull, status):
if context.state == ArabolyGameState.AUCTION \
or context.state == ArabolyGameState.BANKRUPTCY \
or context.state == ArabolyGameState.GAME \
or context.state == ArabolyGameState.PROPERTY \
or context.state == ArabolyGameState.SETUP:
if len(args) > 0:
status = False
elif ArabolyGenerals._authorised(channel, context, srcFull):
otherPlayers = list(context.players["byName"].keys())
context, output = ArabolyGenerals._remove_players(channel, context, output, otherPlayers)
else:
status = False
return args, channel, context, output, src, srcFull, status
# }}}
# vim:expandtab foldmethod=marker sw=4 ts=4 tw=0
| 56.119171
| 301
| 0.627366
|
# Copyright (c) 2018 Lucio Andrés Illanes Albornoz <lucio@lucioillanes.de>
# This project is licensed under the terms of the MIT licence.
#
from ArabolyGenerals import ArabolyGenerals
from ArabolyMonad import ArabolyDecorator
from ArabolyTypeClass import ArabolyTypeClass
from ArabolyState import ArabolyGameState, ArabolyOutputLevel, ArabolyStringType
from ArabolyTrade import ArabolyTrade
import copy, os, sys, yaml
@ArabolyDecorator()
class ArabolyFree(ArabolyTypeClass):
# {{{ dispatch_board(args, channel, context, output, src, status): XXX
@staticmethod
def dispatch_board(args, channel, context, output, src, status):
if context.state != ArabolyGameState.AUCTION \
and context.state != ArabolyGameState.GAME \
and context.state != ArabolyGameState.PROPERTY:
status = False
elif len(args) \
or src not in context.players["byName"]:
status = False
else:
output = ArabolyGenerals._board(channel, context, output, src)
return args, channel, context, output, src, status
# }}}
# {{{ dispatch_bugcheck(channel, context, srcFull, status): XXX
@staticmethod
def dispatch_bugcheck(channel, context, srcFull, status):
if not ArabolyGenerals._authorised(channel, context, srcFull):
status = False
else:
snapshotPath = os.path.join("assets", "savefiles", "snapshot.dmp.{}".format(context.clientParams["hostname"]))
print("Saving game snapshot to {}!".format(os.path.join("assets", "savefiles", snapshotPath)))
with open(snapshotPath, "w+") as fileObject:
yaml.dump(context, fileObject)
sys.exit(1)
return channel, context, srcFull, status
# }}}
# {{{ dispatch_help(channel, context): XXX
@staticmethod
def dispatch_help(channel, context, output):
for helpLine in context.graphics["help"]:
output = ArabolyGenerals._push_output(channel, context, output, helpLine, outputLevel=ArabolyOutputLevel.LEVEL_GRAPHICS)
return channel, context, output
# }}}
# {{{ dispatch_join(args, channel, context, output, src, status): XXX
@staticmethod
def dispatch_join(args, channel, context, output, src, status):
if context.state != ArabolyGameState.GAME \
and context.state != ArabolyGameState.SETUP:
status = False
elif src in context.players["byName"] \
or len(args):
status = False
else:
newNum = None
for otherNum in range(len(context.players["numMap"])):
if context.players["numMap"][otherNum] == None:
newNum = otherNum; break;
if newNum == None:
status = False
else:
context.players["byName"][src] = {"field":0, "name":src, "num":newNum, "properties":[], "wallet":1500}
context.players["numMap"][newNum] = src
output = ArabolyGenerals._push_output(channel, context, output, "Player {src} joins Araboly game!".format(**locals()))
return args, channel, context, output, src, status
# }}}
# {{{ dispatch_kick(args, channel, context, output, srcFull, status): XXX
@staticmethod
def dispatch_kick(args, channel, context, output, srcFull, status):
if context.state == ArabolyGameState.GAME \
or context.state == ArabolyGameState.SETUP:
if len(args) != 1 or len(args[0]) < 1 \
or args[0] not in context.players["byName"]:
status = False
elif ArabolyGenerals._authorised(channel, context, srcFull):
otherPlayers = [args[0]]
output = ArabolyGenerals._push_output(channel, context, output, "Kicking {args[0]} from current Araboly game!".format(**locals()))
context, output = ArabolyGenerals._remove_players(channel, context, output, otherPlayers)
else:
status = False
return args, channel, context, output, srcFull, status
# }}}
# {{{ dispatch_melp(channel, context, output): XXX
@staticmethod
def dispatch_melp(channel, context, output):
for explosionLine in context.graphics["explosion"]:
output = ArabolyGenerals._push_output(channel, context, output, explosionLine, outputLevel=ArabolyOutputLevel.LEVEL_GRAPHICS)
output = ArabolyGenerals._push_output(channel, context, output, "\u0001ACTION explodes.\u0001", outputLevel=ArabolyOutputLevel.LEVEL_GRAPHICS)
return channel, context, output
# }}}
# {{{ dispatch_part(args, channel, context, output, src, status): XXX
@staticmethod
def dispatch_part(args, channel, context, output, src, status):
if context.state == ArabolyGameState.GAME \
or context.state == ArabolyGameState.SETUP:
if len(args) > 0 \
or src not in context.players["byName"]:
status = False
else:
otherPlayers = [src]
output = ArabolyGenerals._push_output(channel, context, output, "Player {src} parts Araboly game!".format(**locals()))
context, output = ArabolyGenerals._remove_players(channel, context, output, otherPlayers)
else:
status = False
return args, channel, context, output, src, status
# }}}
# {{{ dispatch_save(args, channel, context, output, srcFull, status): XXX
def dispatch_save(args, channel, context, output, srcFull, status):
if context.state != ArabolyGameState.AUCTION \
and context.state != ArabolyGameState.BANKRUPTCY \
and context.state != ArabolyGameState.GAME \
and context.state != ArabolyGameState.PROPERTY:
status = False
elif len(args) != 1 \
or not ArabolyGenerals._authorised(channel, context, srcFull):
status = False
else:
snapshotPath = os.path.join("assets", "savefiles", os.path.basename(args[0]))
output = ArabolyGenerals._push_output(channel, context, output, "Saving snapshot to {snapshotPath}!".format(**locals()))
with open(snapshotPath, "w") as fileObject:
gameSnapshot = copy.deepcopy(context)
delattr(gameSnapshot, "clientParams")
delattr(gameSnapshot, "graphics")
delattr(gameSnapshot, "kades")
yaml.dump(gameSnapshot, fileObject)
output = ArabolyGenerals._push_output(channel, context, output, "Saved snapshot to {snapshotPath}!".format(**locals()))
return args, channel, context, output, srcFull, status
# }}}
# {{{ dispatch_status(args, channel, context, output, src, status): XXX
def dispatch_status(args, channel, context, output, src, status):
if context.state != ArabolyGameState.AUCTION \
and context.state != ArabolyGameState.BANKRUPTCY \
and context.state != ArabolyGameState.GAME \
and context.state != ArabolyGameState.PROPERTY:
status = False
elif len(args) == 0:
statusPlayer = src
elif len(args) == 1:
statusPlayer = args[0]
else:
status = False
if status:
if not statusPlayer in context.players["byName"].keys():
status = False
else:
playerField = context.board[context.players["byName"][statusPlayer]["field"]]
playerProps = context.players["byName"][statusPlayer]["properties"]
playerWallet = context.players["byName"][statusPlayer]["wallet"]
output = ArabolyGenerals._push_output(channel, context, output, "Araboly status for player {statusPlayer}:".format(**locals()), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
output = ArabolyGenerals._push_output(channel, context, output, "Field....: {playerField[title]}".format(**locals()), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
output = ArabolyGenerals._push_output(channel, context, output, "Wallet...: ${playerWallet}".format(**locals()), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
if len(playerProps):
output = ArabolyGenerals._push_output(channel, context, output, "Properties owned:", outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
for playerPropNum in playerProps:
playerProp = context.board[playerPropNum]
mortgagedString = " (\u001fMORTGAGED\u001f)" if playerProp["mortgaged"] else ""
developmentsList = []
for levelNum in range(playerProp["level"] + 1):
developmentsList += playerProp["strings"][ArabolyStringType.NAME][levelNum]
developmentsString = ", level {}, developments: {}".format(playerProp["level"], ", ".join(developmentsList))
output = ArabolyGenerals._push_output(channel, context, output, "\u0003{:02d}${}{} (#{}) -- {}{}".format(playerProp["colourMiRC"], playerProp["price"], mortgagedString, playerProp["field"], playerProp["title"], developmentsString), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
output = ArabolyTrade._status(channel, context, output, statusPlayer)
output = ArabolyGenerals._push_output(channel, context, output, "Current turn: {}".format(context.players["numMap"][context.players["curNum"]]), outputLevel=ArabolyOutputLevel.LEVEL_NODELAY)
return args, channel, context, output, src, status
# }}}
# {{{ dispatch_stop(args, channel, context, output, src, srcFull, status): XXX
@staticmethod
def dispatch_stop(args, channel, context, output, src, srcFull, status):
if context.state == ArabolyGameState.AUCTION \
or context.state == ArabolyGameState.BANKRUPTCY \
or context.state == ArabolyGameState.GAME \
or context.state == ArabolyGameState.PROPERTY \
or context.state == ArabolyGameState.SETUP:
if len(args) > 0:
status = False
elif ArabolyGenerals._authorised(channel, context, srcFull):
otherPlayers = list(context.players["byName"].keys())
context, output = ArabolyGenerals._remove_players(channel, context, output, otherPlayers)
else:
status = False
return args, channel, context, output, src, srcFull, status
# }}}
# vim:expandtab foldmethod=marker sw=4 ts=4 tw=0
| true
| true
|
f702f4845b651ab7fece7302dae0852bbdf157e9
| 528
|
py
|
Python
|
setup.py
|
UnJavaScripter/video-to-ascii
|
f9b1fcafb55782195d36f4d77c4c20f3f08ee95b
|
[
"MIT"
] | null | null | null |
setup.py
|
UnJavaScripter/video-to-ascii
|
f9b1fcafb55782195d36f4d77c4c20f3f08ee95b
|
[
"MIT"
] | null | null | null |
setup.py
|
UnJavaScripter/video-to-ascii
|
f9b1fcafb55782195d36f4d77c4c20f3f08ee95b
|
[
"MIT"
] | null | null | null |
import setuptools
setuptools.setup(
name="video_to_ascii",
version="1.0.6",
author="Joel Ibaceta",
author_email="mail@joelibaceta.com",
description="A simple tool to play a video using ascii characters",
url="https://github.com/joelibaceta/video-to-ascii",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
scripts=['bin/video-to-ascii'],
)
| 31.058824
| 71
| 0.655303
|
import setuptools
setuptools.setup(
name="video_to_ascii",
version="1.0.6",
author="Joel Ibaceta",
author_email="mail@joelibaceta.com",
description="A simple tool to play a video using ascii characters",
url="https://github.com/joelibaceta/video-to-ascii",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
scripts=['bin/video-to-ascii'],
)
| true
| true
|
f702f4bd9851082af9d6bfb0a43ee691a1d3974f
| 1,612
|
py
|
Python
|
testsSDW__copy/agents/trade_agent_tests.py
|
jomyhuang/sdwle
|
9b6e916567e09c7cba4a171fe0adf0f47009a8c3
|
[
"MIT"
] | null | null | null |
testsSDW__copy/agents/trade_agent_tests.py
|
jomyhuang/sdwle
|
9b6e916567e09c7cba4a171fe0adf0f47009a8c3
|
[
"MIT"
] | null | null | null |
testsSDW__copy/agents/trade_agent_tests.py
|
jomyhuang/sdwle
|
9b6e916567e09c7cba4a171fe0adf0f47009a8c3
|
[
"MIT"
] | null | null | null |
import unittest
from SDWLE.agents.trade.possible_play import PossiblePlays
from SDWLE.cards import Wisp, WarGolem, BloodfenRaptor, RiverCrocolisk, AbusiveSergeant, ArgentSquire
from testsSDW.agents.trade.test_helpers import TestHelpers
from testsSDW.agents.trade.test_case_mixin import TestCaseMixin
class TestTradeAgent(TestCaseMixin, unittest.TestCase):
def test_setup_smoke(self):
game = TestHelpers().make_game()
self.add_minions(game, 0, Wisp(), WarGolem())
self.add_minions(game, 1, BloodfenRaptor())
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(1, len(game.players[1].minions))
def test_basic_trade(self):
game = TestHelpers().make_game()
self.add_minions(game, 1, Wisp(), WarGolem())
self.add_minions(game, 0, BloodfenRaptor())
self.make_all_active(game)
game.play_single_turn()
self.assert_minions(game.players[1], "War Golem")
self.assert_minions(game.players[0], "Bloodfen Raptor")
def test_buff_target(self):
game = TestHelpers().make_game()
self.add_minions(game, 0, BloodfenRaptor(), RiverCrocolisk())
self.make_all_active(game)
game.players[0].agent.player = game.players[0]
self.add_minions(game, 0, AbusiveSergeant())
game.play_single_turn()
def test_hero_power(self):
game = self.make_game()
cards = self.make_cards(game.current_player, ArgentSquire())
possible_plays = PossiblePlays(cards, 10, allow_hero_power=True)
self.assertEqual(1, len(possible_plays.plays()))
| 35.043478
| 101
| 0.697891
|
import unittest
from SDWLE.agents.trade.possible_play import PossiblePlays
from SDWLE.cards import Wisp, WarGolem, BloodfenRaptor, RiverCrocolisk, AbusiveSergeant, ArgentSquire
from testsSDW.agents.trade.test_helpers import TestHelpers
from testsSDW.agents.trade.test_case_mixin import TestCaseMixin
class TestTradeAgent(TestCaseMixin, unittest.TestCase):
def test_setup_smoke(self):
game = TestHelpers().make_game()
self.add_minions(game, 0, Wisp(), WarGolem())
self.add_minions(game, 1, BloodfenRaptor())
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(1, len(game.players[1].minions))
def test_basic_trade(self):
game = TestHelpers().make_game()
self.add_minions(game, 1, Wisp(), WarGolem())
self.add_minions(game, 0, BloodfenRaptor())
self.make_all_active(game)
game.play_single_turn()
self.assert_minions(game.players[1], "War Golem")
self.assert_minions(game.players[0], "Bloodfen Raptor")
def test_buff_target(self):
game = TestHelpers().make_game()
self.add_minions(game, 0, BloodfenRaptor(), RiverCrocolisk())
self.make_all_active(game)
game.players[0].agent.player = game.players[0]
self.add_minions(game, 0, AbusiveSergeant())
game.play_single_turn()
def test_hero_power(self):
game = self.make_game()
cards = self.make_cards(game.current_player, ArgentSquire())
possible_plays = PossiblePlays(cards, 10, allow_hero_power=True)
self.assertEqual(1, len(possible_plays.plays()))
| true
| true
|
f702f53a18087827a5f30cf07fb3cd0cba54fd1a
| 6,256
|
py
|
Python
|
src/poliastro/core/perturbations.py
|
kerel-fs/poliastro
|
1ad2074aebb7cf18f507ac44931d1e18fec53dad
|
[
"MIT"
] | null | null | null |
src/poliastro/core/perturbations.py
|
kerel-fs/poliastro
|
1ad2074aebb7cf18f507ac44931d1e18fec53dad
|
[
"MIT"
] | null | null | null |
src/poliastro/core/perturbations.py
|
kerel-fs/poliastro
|
1ad2074aebb7cf18f507ac44931d1e18fec53dad
|
[
"MIT"
] | null | null | null |
import numpy as np
from numpy.linalg import norm
from ._jit import jit
@jit
def J2_perturbation(t0, state, k, J2, R):
r"""Calculates J2_perturbation acceleration (km/s2)
.. math::
\vec{p} = \frac{3}{2}\frac{J_{2}\mu R^{2}}{r^{4}}\left [\frac{x}{r}\left ( 5\frac{z^{2}}{r^{2}}-1 \right )\vec{i} + \frac{y}{r}\left ( 5\frac{z^{2}}{r^{2}}-1 \right )\vec{j} + \frac{z}{r}\left ( 5\frac{z^{2}}{r^{2}}-3 \right )\vec{k}\right]
.. versionadded:: 0.9.0
Parameters
----------
t0 : float
Current time (s)
state : numpy.ndarray
Six component state vector [x, y, z, vx, vy, vz] (km, km/s).
k : float
gravitational constant, (km^3/s^2)
J2: float
oblateness factor
R: float
attractor radius
Note
----
The J2 accounts for the oblateness of the attractor. The formula is given in
Howard Curtis, (12.30)
"""
r_vec = state[:3]
r = norm(r_vec)
factor = (3.0 / 2.0) * k * J2 * (R ** 2) / (r ** 5)
a_x = 5.0 * r_vec[2] ** 2 / r ** 2 - 1
a_y = 5.0 * r_vec[2] ** 2 / r ** 2 - 1
a_z = 5.0 * r_vec[2] ** 2 / r ** 2 - 3
return np.array([a_x, a_y, a_z]) * r_vec * factor
@jit
def J3_perturbation(t0, state, k, J3, R):
r"""Calculates J3_perturbation acceleration (km/s2)
Parameters
----------
t0 : float
Current time (s)
state : numpy.ndarray
Six component state vector [x, y, z, vx, vy, vz] (km, km/s).
k : float
gravitational constant, (km^3/s^2)
J3: float
oblateness factor
R: float
attractor radius
Note
----
The J3 accounts for the oblateness of the attractor. The formula is given in
Howard Curtis, problem 12.8
This perturbation has not been fully validated, see https://github.com/poliastro/poliastro/pull/398
"""
r_vec = state[:3]
r = norm(r_vec)
factor = (1.0 / 2.0) * k * J3 * (R ** 3) / (r ** 5)
cos_phi = r_vec[2] / r
a_x = 5.0 * r_vec[0] / r * (7.0 * cos_phi ** 3 - 3.0 * cos_phi)
a_y = 5.0 * r_vec[1] / r * (7.0 * cos_phi ** 3 - 3.0 * cos_phi)
a_z = 3.0 * (35.0 / 3.0 * cos_phi ** 4 - 10.0 * cos_phi ** 2 + 1)
return np.array([a_x, a_y, a_z]) * factor
@jit
def atmospheric_drag(t0, state, k, R, C_D, A, m, H0, rho0):
r"""Calculates atmospheric drag acceleration (km/s2)
.. math::
\vec{p} = -\frac{1}{2}\rho v_{rel}\left ( \frac{C_{d}A}{m} \right )\vec{v_{rel}}
.. versionadded:: 0.9.0
Parameters
----------
t0 : float
Current time (s)
state : numpy.ndarray
Six component state vector [x, y, z, vx, vy, vz] (km, km/s).
k : float
gravitational constant, (km^3/s^2)
R : float
radius of the attractor (km)
C_D: float
dimensionless drag coefficient ()
A: float
frontal area of the spacecraft (km^2)
m: float
mass of the spacecraft (kg)
H0 : float
atmospheric scale height, (km)
rho0: float
the exponent density pre-factor, (kg / m^3)
Note
----
This function provides the acceleration due to atmospheric drag. We follow
Howard Curtis, section 12.4
the atmospheric density model is rho(H) = rho0 x exp(-H / H0)
"""
H = norm(state[:3])
v_vec = state[3:]
v = norm(v_vec)
B = C_D * A / m
rho = rho0 * np.exp(-(H - R) / H0)
return -(1.0 / 2.0) * rho * B * v * v_vec
@jit
def shadow_function(r_sat, r_sun, R):
r"""Determines whether the satellite is in attractor's shadow, uses algorithm 12.3 from Howard Curtis
Parameters
----------
r_sat : numpy.ndarray
position of the satellite in the frame of attractor (km)
r_sun : numpy.ndarray
position of star in the frame of attractor (km)
R : float
radius of body (attractor) that creates shadow (km)
"""
r_sat_norm = np.sqrt(np.sum(r_sat ** 2))
r_sun_norm = np.sqrt(np.sum(r_sun ** 2))
theta = np.arccos(np.dot(r_sat, r_sun) / r_sat_norm / r_sun_norm)
theta_1 = np.arccos(R / r_sat_norm)
theta_2 = np.arccos(R / r_sun_norm)
return theta < theta_1 + theta_2
def third_body(t0, state, k, k_third, third_body):
r"""Calculates 3rd body acceleration (km/s2)
.. math::
\vec{p} = \mu_{m}\left ( \frac{\vec{r_{m/s}}}{r_{m/s}^3} - \frac{\vec{r_{m}}}{r_{m}^3} \right )
Parameters
----------
t0 : float
Current time (s)
state : numpy.ndarray
Six component state vector [x, y, z, vx, vy, vz] (km, km/s).
k : float
gravitational constant, (km^3/s^2)
third_body: a callable object returning the position of 3rd body
third body that causes the perturbation
Note
----
This formula is taken from Howard Curtis, section 12.10. As an example, a third body could be
the gravity from the Moon acting on a small satellite.
"""
body_r = third_body(t0)
delta_r = body_r - state[:3]
return k_third * delta_r / norm(delta_r) ** 3 - k_third * body_r / norm(body_r) ** 3
def radiation_pressure(t0, state, k, R, C_R, A, m, Wdivc_s, star):
r"""Calculates radiation pressure acceleration (km/s2)
.. math::
\vec{p} = -\nu \frac{S}{c} \left ( \frac{C_{r}A}{m} \right )\frac{\vec{r}}{r}
Parameters
----------
t0 : float
Current time (s)
state : numpy.ndarray
Six component state vector [x, y, z, vx, vy, vz] (km, km/s).
k : float
gravitational constant, (km^3/s^2)
R : float
radius of the attractor
C_R: float
dimensionless radiation pressure coefficient, 1 < C_R < 2 ()
A: float
effective spacecraft area (km^2)
m: float
mass of the spacecraft (kg)
Wdivc_s : float
total star emitted power divided by the speed of light (W * s / km)
star: a callable object returning the position of star in attractor frame
star position
Note
----
This function provides the acceleration due to star light pressure. We follow
Howard Curtis, section 12.9
"""
r_star = star(t0)
r_sat = state[:3]
P_s = Wdivc_s / (norm(r_star) ** 2)
nu = float(shadow_function(r_sat, r_star, R))
return -nu * P_s * (C_R * A / m) * r_star / norm(r_star)
| 27.438596
| 248
| 0.574329
|
import numpy as np
from numpy.linalg import norm
from ._jit import jit
@jit
def J2_perturbation(t0, state, k, J2, R):
r_vec = state[:3]
r = norm(r_vec)
factor = (3.0 / 2.0) * k * J2 * (R ** 2) / (r ** 5)
a_x = 5.0 * r_vec[2] ** 2 / r ** 2 - 1
a_y = 5.0 * r_vec[2] ** 2 / r ** 2 - 1
a_z = 5.0 * r_vec[2] ** 2 / r ** 2 - 3
return np.array([a_x, a_y, a_z]) * r_vec * factor
@jit
def J3_perturbation(t0, state, k, J3, R):
r_vec = state[:3]
r = norm(r_vec)
factor = (1.0 / 2.0) * k * J3 * (R ** 3) / (r ** 5)
cos_phi = r_vec[2] / r
a_x = 5.0 * r_vec[0] / r * (7.0 * cos_phi ** 3 - 3.0 * cos_phi)
a_y = 5.0 * r_vec[1] / r * (7.0 * cos_phi ** 3 - 3.0 * cos_phi)
a_z = 3.0 * (35.0 / 3.0 * cos_phi ** 4 - 10.0 * cos_phi ** 2 + 1)
return np.array([a_x, a_y, a_z]) * factor
@jit
def atmospheric_drag(t0, state, k, R, C_D, A, m, H0, rho0):
H = norm(state[:3])
v_vec = state[3:]
v = norm(v_vec)
B = C_D * A / m
rho = rho0 * np.exp(-(H - R) / H0)
return -(1.0 / 2.0) * rho * B * v * v_vec
@jit
def shadow_function(r_sat, r_sun, R):
r_sat_norm = np.sqrt(np.sum(r_sat ** 2))
r_sun_norm = np.sqrt(np.sum(r_sun ** 2))
theta = np.arccos(np.dot(r_sat, r_sun) / r_sat_norm / r_sun_norm)
theta_1 = np.arccos(R / r_sat_norm)
theta_2 = np.arccos(R / r_sun_norm)
return theta < theta_1 + theta_2
def third_body(t0, state, k, k_third, third_body):
body_r = third_body(t0)
delta_r = body_r - state[:3]
return k_third * delta_r / norm(delta_r) ** 3 - k_third * body_r / norm(body_r) ** 3
def radiation_pressure(t0, state, k, R, C_R, A, m, Wdivc_s, star):
r_star = star(t0)
r_sat = state[:3]
P_s = Wdivc_s / (norm(r_star) ** 2)
nu = float(shadow_function(r_sat, r_star, R))
return -nu * P_s * (C_R * A / m) * r_star / norm(r_star)
| true
| true
|
f702f5a02fde6d1e2a47314c1104f816697796f8
| 2,081
|
py
|
Python
|
ask-smapi-model/ask_smapi_model/v1/isp/purchasable_state.py
|
alexa-labs/alexa-apis-for-python
|
52838be4f57ee1a2479402ea78b1247b56017942
|
[
"Apache-2.0"
] | 90
|
2018-09-19T21:56:42.000Z
|
2022-03-30T11:25:21.000Z
|
ask-smapi-model/ask_smapi_model/v1/isp/purchasable_state.py
|
ishitaojha/alexa-apis-for-python
|
a68f94b7a0e41f819595d6fe56e800403e8a4194
|
[
"Apache-2.0"
] | 11
|
2018-09-23T12:16:48.000Z
|
2021-06-10T19:49:45.000Z
|
ask-smapi-model/ask_smapi_model/v1/isp/purchasable_state.py
|
ishitaojha/alexa-apis-for-python
|
a68f94b7a0e41f819595d6fe56e800403e8a4194
|
[
"Apache-2.0"
] | 28
|
2018-09-19T22:30:38.000Z
|
2022-02-22T22:57:07.000Z
|
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
class PurchasableState(Enum):
"""
Whether or not the in-skill product is purchasable by customers. A product that is not purchasable will prevent new customers from being prompted to purchase the product. Customers who already own the product will see no effect and continue to have access to the product features.
Allowed enum values: [PURCHASABLE, NOT_PURCHASABLE]
"""
PURCHASABLE = "PURCHASABLE"
NOT_PURCHASABLE = "NOT_PURCHASABLE"
def to_dict(self):
# type: () -> Dict[str, Any]
"""Returns the model properties as a dict"""
result = {self.name: self.value}
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.value)
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (Any) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, PurchasableState):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (Any) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 31.059701
| 284
| 0.672273
|
import pprint
import re import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
class PurchasableState(Enum):
PURCHASABLE = "PURCHASABLE"
NOT_PURCHASABLE = "NOT_PURCHASABLE"
def to_dict(self):
result = {self.name: self.value}
return result
def to_str(self):
return pprint.pformat(self.value)
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, PurchasableState):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f702f6aa007a531bab63935e88b3d97af80e19c3
| 3,874
|
py
|
Python
|
cohesity_management_sdk/models/application_server_info.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18
|
2019-09-24T17:35:53.000Z
|
2022-03-25T08:08:47.000Z
|
cohesity_management_sdk/models/application_server_info.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18
|
2019-03-29T19:32:29.000Z
|
2022-01-03T23:16:45.000Z
|
cohesity_management_sdk/models/application_server_info.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 16
|
2019-02-27T06:54:12.000Z
|
2021-11-16T18:10:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import cohesity_management_sdk.models.exchange_database_copy_info
import cohesity_management_sdk.models.exchange_database_info
class ApplicationServerInfo(object):
"""Implementation of the 'ApplicationServerInfo' model.
Specifies the Information about the Exchange Server Node.
Attributes:
database_copy_info_list (list of ExchangeDatabaseCopyInfo): Specifies
the list of all the copies of the Exchange databases(that are part
of DAG) that are present on this Exchange Node.
database_info_list (list of ExchangeDatabaseInfo): Specifies the list
of all the databases available on the standalone Exchange server
node. This is populated for the Standlone Exchange Servers.
fqdn (string): Specifies the fully qualified domain name of the
Exchange Server.
guid (string): Specifies the Guid of the Exchange Application Server.
name (string): Specifies the display name of the Exchange
Application Server.
total_size_bytes (int): Specifies the total size of all Exchange
database copies in all the Exchange Application Servers that are
part of the DAG.
"""
# Create a mapping from Model property names to API property names
_names = {
"database_copy_info_list": 'databaseCopyInfoList',
"database_info_list":'databaseInfoList',
"fqdn": 'fqdn',
"guid": 'guid',
"name": 'name',
"total_size_bytes":'totalSizeBytes'
}
def __init__(self,
database_copy_info_list=None,
database_info_list=None,
fqdn=None,
guid=None,
name=None,
total_size_bytes=None):
"""Constructor for the ApplicationServerInfo class"""
# Initialize members of the class
self.database_copy_info_list = database_copy_info_list
self.database_info_list = database_info_list
self.fqdn = fqdn
self.guid = guid
self.name = name
self.total_size_bytes = total_size_bytes
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
database_copy_info_list = None
if dictionary.get('databaseCopyInfoList') != None:
database_copy_info_list = list()
for structure in dictionary.get('databaseCopyInfoList'):
database_copy_info_list.append(cohesity_management_sdk.models.exchange_database_copy_info.ExchangeDatabaseCopyInfo.from_dictionary(structure))
database_info_list = None
if dictionary.get('databaseInfoList') != None:
database_info_list = list()
for structure in dictionary.get('databaseInfoList'):
database_info_list.append(cohesity_management_sdk.models.exchange_database_info.ExchangeDatabaseInfo.from_dictionary(structure))
fqdn = dictionary.get('fqdn')
guid = dictionary.get('guid')
name = dictionary.get('name')
total_size_bytes = dictionary.get('totalSizeBytes')
# Return an object of this model
return cls(database_copy_info_list,
database_info_list,
fqdn,
guid,
name,
total_size_bytes)
| 38.74
| 158
| 0.648684
|
import cohesity_management_sdk.models.exchange_database_copy_info
import cohesity_management_sdk.models.exchange_database_info
class ApplicationServerInfo(object):
_names = {
"database_copy_info_list": 'databaseCopyInfoList',
"database_info_list":'databaseInfoList',
"fqdn": 'fqdn',
"guid": 'guid',
"name": 'name',
"total_size_bytes":'totalSizeBytes'
}
def __init__(self,
database_copy_info_list=None,
database_info_list=None,
fqdn=None,
guid=None,
name=None,
total_size_bytes=None):
self.database_copy_info_list = database_copy_info_list
self.database_info_list = database_info_list
self.fqdn = fqdn
self.guid = guid
self.name = name
self.total_size_bytes = total_size_bytes
@classmethod
def from_dictionary(cls,
dictionary):
if dictionary is None:
return None
database_copy_info_list = None
if dictionary.get('databaseCopyInfoList') != None:
database_copy_info_list = list()
for structure in dictionary.get('databaseCopyInfoList'):
database_copy_info_list.append(cohesity_management_sdk.models.exchange_database_copy_info.ExchangeDatabaseCopyInfo.from_dictionary(structure))
database_info_list = None
if dictionary.get('databaseInfoList') != None:
database_info_list = list()
for structure in dictionary.get('databaseInfoList'):
database_info_list.append(cohesity_management_sdk.models.exchange_database_info.ExchangeDatabaseInfo.from_dictionary(structure))
fqdn = dictionary.get('fqdn')
guid = dictionary.get('guid')
name = dictionary.get('name')
total_size_bytes = dictionary.get('totalSizeBytes')
return cls(database_copy_info_list,
database_info_list,
fqdn,
guid,
name,
total_size_bytes)
| true
| true
|
f702f759d6fd07e5788090253f85f8ef7d52ffbc
| 2,669
|
gyp
|
Python
|
chrome/browser/resources/settings/settings_page/compiled_resources2.gyp
|
google-ar/chromium
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777
|
2017-08-29T15:15:32.000Z
|
2022-03-21T05:29:41.000Z
|
chrome/browser/resources/settings/settings_page/compiled_resources2.gyp
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66
|
2017-08-30T18:31:18.000Z
|
2021-08-02T10:59:35.000Z
|
chrome/browser/resources/settings/settings_page/compiled_resources2.gyp
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123
|
2017-08-30T01:19:34.000Z
|
2022-03-17T22:55:31.000Z
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'main_page_behavior',
'dependencies': [
'../animation/compiled_resources2.gyp:animation',
'../compiled_resources2.gyp:route',
'settings_section',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:util',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'settings_animated_pages',
'dependencies': [
'../compiled_resources2.gyp:route',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:load_time_data',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'settings_page_visibility',
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'settings_section',
'dependencies': [
'../animation/compiled_resources2.gyp:animation',
'<(EXTERNS_GYP):web_animations',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'settings_subpage',
'dependencies': [
'../compiled_resources2.gyp:route',
'settings_subpage_search',
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/iron-resizable-behavior/compiled_resources2.gyp:iron-resizable-behavior-extracted',
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/neon-animation/compiled_resources2.gyp:neon-animatable-behavior-extracted',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'settings_subpage_search',
'dependencies': [
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/paper-icon-button/compiled_resources2.gyp:paper-icon-button-extracted',
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/paper-input/compiled_resources2.gyp:paper-input-container-extracted',
'<(DEPTH)/ui/webui/resources/cr_elements/cr_search_field/compiled_resources2.gyp:cr_search_field_behavior',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
],
}
| 43.754098
| 146
| 0.654927
|
{
'targets': [
{
'target_name': 'main_page_behavior',
'dependencies': [
'../animation/compiled_resources2.gyp:animation',
'../compiled_resources2.gyp:route',
'settings_section',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:util',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'settings_animated_pages',
'dependencies': [
'../compiled_resources2.gyp:route',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:load_time_data',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'settings_page_visibility',
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'settings_section',
'dependencies': [
'../animation/compiled_resources2.gyp:animation',
'<(EXTERNS_GYP):web_animations',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'settings_subpage',
'dependencies': [
'../compiled_resources2.gyp:route',
'settings_subpage_search',
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/iron-resizable-behavior/compiled_resources2.gyp:iron-resizable-behavior-extracted',
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/neon-animation/compiled_resources2.gyp:neon-animatable-behavior-extracted',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'settings_subpage_search',
'dependencies': [
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/paper-icon-button/compiled_resources2.gyp:paper-icon-button-extracted',
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/paper-input/compiled_resources2.gyp:paper-input-container-extracted',
'<(DEPTH)/ui/webui/resources/cr_elements/cr_search_field/compiled_resources2.gyp:cr_search_field_behavior',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
],
}
| true
| true
|
f702f882a18a8f31cd4aaa8b692e61a020b9c589
| 2,764
|
py
|
Python
|
test/test_xlsx_model.py
|
TRANTANKHOA/pptx-template
|
6f756359641278e1aecb7e04abcbed01cc20826c
|
[
"Apache-2.0"
] | 73
|
2017-06-23T08:58:37.000Z
|
2022-03-30T05:01:03.000Z
|
test/test_xlsx_model.py
|
TRANTANKHOA/pptx-template
|
6f756359641278e1aecb7e04abcbed01cc20826c
|
[
"Apache-2.0"
] | 26
|
2017-06-08T01:45:36.000Z
|
2021-09-23T19:13:40.000Z
|
test/test_xlsx_model.py
|
TRANTANKHOA/pptx-template
|
6f756359641278e1aecb7e04abcbed01cc20826c
|
[
"Apache-2.0"
] | 23
|
2017-07-05T02:29:21.000Z
|
2022-01-18T00:50:30.000Z
|
#
# coding=utf-8
import unittest
import sys
import os
from io import open
import openpyxl as xl
from pptx_template.xlsx_model import _build_tsv, _format_cell_value, generate_whole_model
class Cell:
def __init__(self, value, number_format):
self.value = value
self.number_format = number_format
def _to_cells(list_of_list):
return [[Cell(value, '') for value in list] for list in list_of_list]
class MyTest(unittest.TestCase):
def test_build_tsv(self):
tsv = _build_tsv([_to_cells([["Year","A","B"],["2016",100,200]])])
self.assertEqual([["Year","A","B"],["2016",100,200]], tsv)
def test_build_tsv_tranapose(self):
tsv = _build_tsv([_to_cells([["Year","A","B"],["2016",100,200]])], transpose=True)
self.assertEqual([["Year","2016"],["A",100],["B",200]], tsv)
def test_build_tsv_side_by_side(self):
tsv = _build_tsv([_to_cells([["Year","A"],["2016",100]]), _to_cells([["B"],[200]])], side_by_side=True)
self.assertEqual([["Year","A","B"],["2016",100,200]], tsv)
def test_format_cell_value(self):
self.assertEqual(123.45678, _format_cell_value(Cell(123.45678, '')))
self.assertEqual("123", _format_cell_value(Cell(123.45678, '0')))
self.assertEqual("123.46", _format_cell_value(Cell(123.45678, '0.00')))
self.assertEqual("123.5", _format_cell_value(Cell(123.45678, '0.0_')))
self.assertEqual("12345.7%", _format_cell_value(Cell(123.45678, '0.0%_')))
self.assertEqual("12345%", _format_cell_value(Cell(123.45678, '0%_')))
def test_generate_whole_model(self):
def read_expect(name):
file_name = os.path.join(os.path.dirname(__file__), 'data2', name)
f = open(file_name, mode = 'r', encoding = 'utf-8')
result = f.read()
f.close()
return result
xls_file = os.path.join(os.path.dirname(__file__), 'data2', 'in.xlsx')
slides = generate_whole_model(xls_file, {})
self.assertEqual(u'Hello!', slides['p01']['greeting']['en'])
self.assertEqual(u'こんにちは!', slides['p01']['greeting']['ja'])
self.assertEqual([
['Season', u'売り上げ', u'利益', u'利益率'],
[u'春', 100, 50, 0.5],
[u'夏', 110, 60, 0.5],
[u'秋', 120, 70, 0.5],
[u'冬', 130, 0, 0.6],
], slides['p02']['array'])
self.assertEqual(read_expect('p02-normal.tsv'), slides['p02']['normal']['tsv_body'])
self.assertEqual(read_expect('p02-transpose.tsv'), slides['p02']['transpose']['tsv_body'])
self.assertEqual(read_expect('p02-sidebyside.tsv'), slides['p02']['sidebyside']['tsv_body'])
if __name__ == '__main__':
unittest.main()
| 40.057971
| 112
| 0.599493
|
import unittest
import sys
import os
from io import open
import openpyxl as xl
from pptx_template.xlsx_model import _build_tsv, _format_cell_value, generate_whole_model
class Cell:
def __init__(self, value, number_format):
self.value = value
self.number_format = number_format
def _to_cells(list_of_list):
return [[Cell(value, '') for value in list] for list in list_of_list]
class MyTest(unittest.TestCase):
def test_build_tsv(self):
tsv = _build_tsv([_to_cells([["Year","A","B"],["2016",100,200]])])
self.assertEqual([["Year","A","B"],["2016",100,200]], tsv)
def test_build_tsv_tranapose(self):
tsv = _build_tsv([_to_cells([["Year","A","B"],["2016",100,200]])], transpose=True)
self.assertEqual([["Year","2016"],["A",100],["B",200]], tsv)
def test_build_tsv_side_by_side(self):
tsv = _build_tsv([_to_cells([["Year","A"],["2016",100]]), _to_cells([["B"],[200]])], side_by_side=True)
self.assertEqual([["Year","A","B"],["2016",100,200]], tsv)
def test_format_cell_value(self):
self.assertEqual(123.45678, _format_cell_value(Cell(123.45678, '')))
self.assertEqual("123", _format_cell_value(Cell(123.45678, '0')))
self.assertEqual("123.46", _format_cell_value(Cell(123.45678, '0.00')))
self.assertEqual("123.5", _format_cell_value(Cell(123.45678, '0.0_')))
self.assertEqual("12345.7%", _format_cell_value(Cell(123.45678, '0.0%_')))
self.assertEqual("12345%", _format_cell_value(Cell(123.45678, '0%_')))
def test_generate_whole_model(self):
def read_expect(name):
file_name = os.path.join(os.path.dirname(__file__), 'data2', name)
f = open(file_name, mode = 'r', encoding = 'utf-8')
result = f.read()
f.close()
return result
xls_file = os.path.join(os.path.dirname(__file__), 'data2', 'in.xlsx')
slides = generate_whole_model(xls_file, {})
self.assertEqual(u'Hello!', slides['p01']['greeting']['en'])
self.assertEqual(u'こんにちは!', slides['p01']['greeting']['ja'])
self.assertEqual([
['Season', u'売り上げ', u'利益', u'利益率'],
[u'春', 100, 50, 0.5],
[u'夏', 110, 60, 0.5],
[u'秋', 120, 70, 0.5],
[u'冬', 130, 0, 0.6],
], slides['p02']['array'])
self.assertEqual(read_expect('p02-normal.tsv'), slides['p02']['normal']['tsv_body'])
self.assertEqual(read_expect('p02-transpose.tsv'), slides['p02']['transpose']['tsv_body'])
self.assertEqual(read_expect('p02-sidebyside.tsv'), slides['p02']['sidebyside']['tsv_body'])
if __name__ == '__main__':
unittest.main()
| true
| true
|
f702f88302df20baac1ae553afce919d1fdf2aa8
| 23,276
|
py
|
Python
|
tests/lambdas/test_sdk_analysis.py
|
CuriBio/IaC
|
86d39038c7035442778f13eb29f10bafb628c89a
|
[
"MIT"
] | 2
|
2021-09-15T07:34:57.000Z
|
2021-09-15T07:35:48.000Z
|
tests/lambdas/test_sdk_analysis.py
|
CuriBio/IaC
|
86d39038c7035442778f13eb29f10bafb628c89a
|
[
"MIT"
] | 339
|
2021-02-22T19:02:04.000Z
|
2022-03-31T15:13:02.000Z
|
tests/lambdas/test_sdk_analysis.py
|
CuriBio/IaC
|
86d39038c7035442778f13eb29f10bafb628c89a
|
[
"MIT"
] | null | null | null |
import base64
import copy
import hashlib
import json
from botocore.exceptions import ClientError
import pytest
from ..test_utils import import_lambda
sdk_analysis = import_lambda(
"sdk_analysis",
mock_imports=[
"pulse3D.plate_recording",
"pulse3D.constants",
"pulse3D.excel_writer",
"pymysql",
"pandas",
],
)
TEST_BUCKET_NAME = "test_name"
TEST_OBJECT_KEY = "customer_id/username/test_key"
TEST_RECORD = {"s3": {"bucket": {"name": TEST_BUCKET_NAME}, "object": {"key": TEST_OBJECT_KEY}}}
TEST_FILENAME = TEST_OBJECT_KEY.rsplit("/", 1)[1]
@pytest.fixture(scope="function", name="mocked_boto3_client")
def fixture_mocked_boto3_client(mocker):
mocked_sqs_client = mocker.Mock()
mocked_ssm_client = mocker.Mock()
mocked_s3_client = mocker.Mock()
mocked_ec2_client = mocker.Mock()
mocked_s3_client.head_object.return_value = {"Metadata": {"upload-id": "test-id"}}
mocked_dynamodb_client = mocker.Mock()
def se(client_type):
if client_type == "sqs":
return mocked_sqs_client
if client_type == "s3":
return mocked_s3_client
if client_type == "dynamodb":
return mocked_dynamodb_client
if client_type == "secretsmanager":
return mocked_ssm_client
if client_type == "ec2":
return mocked_ec2_client
mocker.patch.object(sdk_analysis.boto3, "client", autospec=True, side_effect=se)
yield {
"sqs": mocked_sqs_client,
"s3": mocked_s3_client,
"dynamodb": mocked_dynamodb_client,
"secretsmanager": mocked_ssm_client,
"ec2": mocked_ec2_client,
}
def test_sdk_analysis__logs_exception_when_receiving_message_from_sqs_fails(mocker, mocked_boto3_client):
mocked_sqs_client = mocked_boto3_client["sqs"]
expected_error = ClientError({}, "")
mocked_sqs_client.receive_message.side_effect = expected_error
spied_logger_exception = mocker.spy(sdk_analysis.logger, "exception")
sdk_analysis.handler(max_num_loops=1)
spied_logger_exception.assert_called_once_with(f"receive_message failed. Error: {expected_error}")
def test_sdk_analysis__sleeps_after_each_loop_but_not_in_final_loop(mocker, mocked_boto3_client):
mocked_sqs_client = mocked_boto3_client["sqs"]
mocked_sleep = mocker.patch.object(sdk_analysis, "sleep", autospec=True)
# Tanner (9/23/21): mocking receive_message to have error raised here in order to avoid mocking multiple other objects
mocked_sqs_client.receive_message.side_effect = ClientError({}, "")
sdk_analysis.handler(max_num_loops=2)
mocked_sleep.assert_called_once_with(5)
def test_sdk_analysis__gets_messages_from_sqs_queue_correctly(mocker, mocked_boto3_client):
mocked_sqs_client = mocked_boto3_client["sqs"]
mocked_sqs_client.receive_message.return_value = {}
expected_sqs_url = "test_url"
mocker.patch.object(sdk_analysis, "SQS_URL", expected_sqs_url)
sdk_analysis.handler(max_num_loops=1)
mocked_sqs_client.receive_message.assert_called_once_with(
QueueUrl=expected_sqs_url, MaxNumberOfMessages=1, WaitTimeSeconds=10
)
def test_sdk_analysis__deletes_messages_from_sqs_queue_after_processing_them(mocker, mocked_boto3_client):
mocked_sqs_client = mocked_boto3_client["sqs"]
expected_sqs_url = "test_url"
mocker.patch.object(sdk_analysis, "SQS_URL", expected_sqs_url)
test_message = {"ReceiptHandle": "rh"}
test_message_list = [test_message] * 3
mocked_sqs_client.receive_message.return_value = {"Messages": test_message_list}
sdk_analysis.handler(max_num_loops=1)
assert mocked_sqs_client.delete_message.call_count == len(test_message_list)
mocked_sqs_client.delete_message.called_with(
QueueUrl=expected_sqs_url, ReceiptHandle=test_message["ReceiptHandle"]
)
@pytest.mark.parametrize(
"test_message",
[
{},
{"Body": json.dumps({})},
{"Body": json.dumps({"other_key": "val"})},
{"Body": json.dumps({"Records": []})},
{"Body": json.dumps({"Records": [{}]})},
{"Body": json.dumps({"Records": [{"eventSource": "aws:s3"}]})},
{"Body": json.dumps({"Records": [{"eventName": "ObjectCreated:Post"}]})},
],
)
def test_sdk_analysis__does_not_process_message_or_record_from_sqs_queue_that_is_not_formatted_correctly(
test_message, mocker, mocked_boto3_client
):
mocked_sqs_client = mocked_boto3_client["sqs"]
test_message.update({"ReceiptHandle": "rh"})
mocked_sqs_client.receive_message.return_value = {"Messages": [test_message]}
spied_process_record = mocker.spy(sdk_analysis, "process_record")
sdk_analysis.handler(max_num_loops=1)
spied_process_record.assert_not_called()
def test_sdk_analysis__processes_each_record_of_each_record_of_each_message_from_sqs_queue(
mocker, mocked_boto3_client
):
mocked_sqs_client = mocked_boto3_client["sqs"]
mocked_s3_client = mocked_boto3_client["s3"]
mocked_dynamodb_client = mocked_boto3_client["dynamodb"]
test_num_records = 5
test_records = [
{"eventSource": "aws:s3", "eventName": "ObjectCreated:Post", "num": i}
for i in range(test_num_records)
]
test_messages = [
{"Body": json.dumps({"Records": records}), "ReceiptHandle": "rh"}
for records in (test_records[:2], test_records[2:])
]
mocked_sqs_client.receive_message.return_value = {"Messages": test_messages}
mocked_process_record = mocker.patch.object(sdk_analysis, "process_record")
sdk_analysis.handler(max_num_loops=1)
assert mocked_process_record.call_count == test_num_records
for record in test_records:
mocked_process_record.assert_any_call(record, mocked_s3_client, mocked_dynamodb_client)
def test_sdk_analysis__handles_info_logging_pertaining_to_sqs_queue(mocker, mocked_boto3_client):
mocked_sqs_client = mocked_boto3_client["sqs"]
test_message_list = []
mocked_sqs_client.receive_message.return_value = {"Messages": test_message_list}
expected_sqs_url = "test_url"
mocker.patch.object(sdk_analysis, "SQS_URL", expected_sqs_url)
spied_logger_info = mocker.spy(sdk_analysis.logger, "info")
sdk_analysis.handler(max_num_loops=1)
spied_logger_info.assert_any_call(f"Receiving messages on {expected_sqs_url}")
spied_logger_info.assert_any_call(f"Received: {len(test_message_list)}")
spied_logger_info.assert_any_call("Received: 0")
def test_process_record__retrieves_metadata_of_file_correctly(mocked_boto3_client):
mocked_s3_client = mocked_boto3_client["s3"]
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
mocked_s3_client.head_object.assert_called_once_with(Bucket=TEST_BUCKET_NAME, Key=TEST_OBJECT_KEY)
def test_process_record__logs_error_when_one_is_raised_while_retrieving_metadata_from_s3_and_does_not_attempt_to_download_the_file(
mocker, mocked_boto3_client
):
mocked_s3_client = mocked_boto3_client["s3"]
expected_error = ClientError({}, "")
mocked_s3_client.head_object.side_effect = expected_error
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
spied_logger_error.assert_called_once_with(
f"Error occurred while retrieving head object of {TEST_BUCKET_NAME}/{TEST_OBJECT_KEY}: {expected_error}"
)
mocked_s3_client.download_file.assert_not_called()
def test_process_record__correctly_downloads_file_to_temporary_directory(mocker, mocked_boto3_client):
mocked_s3_client = mocked_boto3_client["s3"]
spied_temporary_dir = mocker.spy(sdk_analysis.tempfile, "TemporaryDirectory")
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
spied_temporary_dir.assert_called_once_with(dir="/tmp")
mocked_s3_client.download_file.assert_called_once_with(
TEST_BUCKET_NAME, TEST_OBJECT_KEY, f"{spied_temporary_dir.spy_return.name}/{TEST_FILENAME}"
)
def test_process_record__handles_error_raised_while_downloading_file_from_s3(mocker, mocked_boto3_client):
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
expected_error = ClientError({}, "")
mocked_s3_client.download_file.side_effect = expected_error
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
spied_update_status = mocker.spy(sdk_analysis, "update_sdk_status")
spied_pr_from_dir = mocker.spy(sdk_analysis.PlateRecording, "from_directory")
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
spied_logger_error.assert_called_once_with(
f"Failed to download {TEST_BUCKET_NAME}/{TEST_OBJECT_KEY}: {expected_error}"
)
spied_update_status.assert_called_once_with(
mocked_boto3_client["dynamodb"], expected_upload_id, "error accessing file"
)
spied_pr_from_dir.assert_not_called()
def test_process_record__sets_file_status_to_analysis_running_then_runs_sdk_analysis_on_file(
mocker, mocked_boto3_client
):
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
spied_temporary_dir = mocker.spy(sdk_analysis.tempfile, "TemporaryDirectory")
mocked_pr_from_dir = mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
pr = mocked_pr_from_dir.return_value.__next__()
error_tracker = {"funcs_called_out_of_order": False}
def se(*args):
if args[-1] == "analysis running":
error_tracker["funcs_called_out_of_order"] = mocked_pr_from_dir.call_count != 0
mocked_update_status = mocker.patch.object(
sdk_analysis, "update_sdk_status", autospec=True, side_effect=se
)
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
assert error_tracker["funcs_called_out_of_order"] is False
assert mocked_update_status.call_args_list[0] == mocker.call(
mocked_boto3_client["dynamodb"], expected_upload_id, "analysis running"
)
mocked_pr_from_dir.assert_called_once_with(spied_temporary_dir.spy_return)
sdk_analysis.write_xlsx.assert_called_with(pr, name=f"{TEST_FILENAME}.xlsx")
def test_process_record__handles_error_raised_while_running_sdk_analysis(mocker, mocked_boto3_client):
expected_upload_id = mocked_boto3_client["s3"].head_object.return_value["Metadata"]["upload-id"]
expected_error = Exception("test_exception")
mocker.patch.object(
sdk_analysis.PlateRecording, "from_directory", autospec=True, side_effect=expected_error
)
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
sdk_analysis.process_record(
copy.deepcopy(TEST_RECORD), mocked_boto3_client["s3"], mocked_boto3_client["dynamodb"]
)
spied_logger_error.assert_called_once_with(f"SDK analysis failed: {expected_error}")
mocked_update_status.assert_called_with(
mocked_boto3_client["dynamodb"], expected_upload_id, "error during analysis"
)
def test_process_record__uploads_file_created_by_sdk_analysis_to_s3_bucket_correctly_and_sets_file_status_to_analysis_complete(
mocker, mocked_boto3_client
):
mocked_s3_client = mocked_boto3_client["s3"]
mocked_dynamo_client = mocked_boto3_client["dynamodb"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
expected_upload_bucket = "test_url"
mocker.patch.object(hashlib, "md5")
mocked_base64 = mocker.patch.object(base64, "b64encode")
expected_md5 = mocked_base64().decode()
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocked_open = mocker.patch("builtins.open", autospec=True)
mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
mocker.patch.object(sdk_analysis.main, "handle_db_metadata_insertions", autospec=True)
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
mocked_open.assert_called_with(f"{TEST_FILENAME}.xlsx", "rb")
mocked_s3_client.put_object.assert_called_once_with(
Body=mocked_open.return_value.__enter__(),
Bucket=expected_upload_bucket,
Key=f"{TEST_OBJECT_KEY}.xlsx",
ContentMD5=expected_md5,
)
assert mocked_update_status.call_args_list[1] == mocker.call(
mocked_dynamo_client, expected_upload_id, "analysis complete"
)
def test_process_record__handles_error_raised_while_uploading_file_to_s3(mocker, mocked_boto3_client):
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
mocker.patch.object(hashlib, "md5")
mocker.patch.object(base64, "b64encode")
expected_error = Exception("test_exception")
mocked_s3_client.put_object.side_effect = expected_error
expected_upload_bucket = "test_url"
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocker.patch("builtins.open", autospec=True)
mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
mocked_db_handling = mocker.patch.object(
sdk_analysis.main, "handle_db_metadata_insertions", autospec=True
)
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
expected_file_name = f"{TEST_FILENAME}.xlsx"
spied_logger_error.assert_called_with(
f"S3 Upload failed for {expected_file_name} to {expected_upload_bucket}/{TEST_OBJECT_KEY}.xlsx: {expected_error}"
)
mocked_update_status.assert_called_with(
mocked_boto3_client["dynamodb"], expected_upload_id, "error during upload of analyzed file"
)
mocked_db_handling.assert_not_called()
def test_process_record__after_successful_upload_logger_handles_failed_aurora_db_insertion(
mocker, mocked_boto3_client
):
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
mocker.patch.object(hashlib, "md5")
mocker.patch.object(base64, "b64encode")
expected_upload_bucket = "test_url"
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocker.spy(sdk_analysis.tempfile, "TemporaryDirectory")
mocker.patch("builtins.open", autospec=True)
mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
# mocker.patch.object(sdk_analysis, "write_xslx", autospec=True)
mocker.patch.object(sdk_analysis.main, "handle_db_metadata_insertions", side_effect=Exception("ERROR"))
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
mocked_update_status.assert_called_with(
mocked_boto3_client["dynamodb"], expected_upload_id, "error inserting analysis to database"
)
spied_logger_error.assert_called_with("Recording metadata failed to store in aurora database: ERROR")
def test_process_record__after_successful_upload_logger_handles_successful_aurora_db_insertion(
mocker, mocked_boto3_client
):
spied_logger_info = mocker.spy(sdk_analysis.logger, "info")
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
expected_upload_bucket = "test_bucket"
expected_db_cluster_endpoint = "test_host"
expected_file_name = f"{TEST_OBJECT_KEY}.xlsx"
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocker.patch.object(sdk_analysis, "DB_CLUSTER_ENDPOINT", expected_db_cluster_endpoint)
mocker.patch.object(hashlib, "md5")
mocked_base64 = mocker.patch.object(base64, "b64encode")
expected_md5 = mocked_base64().decode()
mocked_open = mocker.patch("builtins.open", autospec=True)
mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocked_PR_instance = mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
mocked_db_handling = mocker.patch.object(
sdk_analysis.main, "handle_db_metadata_insertions", autospec=True
)
mocker.patch.object(mocked_s3_client, "put_object")
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
mocked_update_status.assert_any_call(
mocked_boto3_client["dynamodb"], expected_upload_id, "analysis successfully inserted into database"
)
spied_logger_info.assert_any_call(f"Inserting {TEST_FILENAME}.xlsx metadata into aurora database")
test_args = [
mocked_open.return_value.__enter__(),
mocked_PR_instance.return_value.__next__(),
expected_md5,
]
mocked_db_handling.assert_called_with(
expected_upload_bucket, expected_file_name, expected_db_cluster_endpoint, test_args
)
def test_set_info_dict__correctly_retrieves_aws_credentials(mocker, mocked_boto3_client):
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_bucket = "test_url"
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocker.patch.object(hashlib, "md5")
mocker.patch.object(base64, "b64encode")
mocker.patch.object(sdk_analysis.main, "get_ssm_secrets", return_value=("test_username", "test_password"))
mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocker.patch("builtins.open", autospec=True)
mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
expected_info_dict = {
"db_name": "mantarray_recordings",
"db_password": "test_password",
"db_username": "test_username",
}
assert sdk_analysis.main.INFO_DICT == expected_info_dict
def test_load_data_into_dataframe__successfully_gets_called_after_successful_db_connection(
mocker, mocked_boto3_client
):
mocked_s3_client = mocked_boto3_client["s3"]
mocker.patch.object(hashlib, "md5")
mocker.patch.object(base64, "b64encode")
mocker.patch.object(sdk_analysis.main, "get_ssm_secrets", return_value=("test_username", "test_password"))
expected_db_cluster_endpoint = "test_host"
expected_upload_bucket = "test_url"
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocker.patch.object(sdk_analysis, "DB_CLUSTER_ENDPOINT", expected_db_cluster_endpoint)
mocker.patch.object(sdk_analysis.main.pymysql, "connect")
format_spy = mocker.patch.object(sdk_analysis.main, "load_data_to_dataframe")
mocked_open = mocker.patch("builtins.open", autospec=True)
mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocker.patch.object(mocked_s3_client, "put_object", autospec=True)
mocked_PR_instance = mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
format_spy.assert_any_call(
mocked_open.return_value.__enter__(), mocked_PR_instance.return_value.__next__()
)
def test_process_record__handles_info_logging(mocker, mocked_boto3_client):
spied_logger_info = mocker.spy(sdk_analysis.logger, "info")
spied_temporary_dir = mocker.spy(sdk_analysis.tempfile, "TemporaryDirectory")
sdk_analysis.process_record(
copy.deepcopy(TEST_RECORD), mocked_boto3_client["s3"], mocked_boto3_client["dynamodb"]
)
spied_logger_info.assert_any_call(f"Retrieving Head Object of {TEST_BUCKET_NAME}/{TEST_OBJECT_KEY}")
spied_logger_info.assert_any_call(
f"Download {TEST_BUCKET_NAME}/{TEST_OBJECT_KEY} to {spied_temporary_dir.spy_return.name}/{TEST_FILENAME}"
)
def test_update_sdk_status__updates_item_correctly(mocker, mocked_boto3_client):
mocked_dynamodb_client = mocked_boto3_client["dynamodb"]
expected_table_name = "test_table"
mocker.patch.object(sdk_analysis, "SDK_STATUS_TABLE", expected_table_name)
test_upload_id = "test_id"
test_status = "test_status"
sdk_analysis.update_sdk_status(mocked_dynamodb_client, test_upload_id, test_status)
mocked_dynamodb_client.update_item.assert_called_once_with(
TableName=expected_table_name,
Key={"upload_id": {"S": test_upload_id}},
UpdateExpression="SET sdk_status = :val",
ExpressionAttributeValues={":val": {"S": test_status}},
ConditionExpression="attribute_exists(upload_id)",
)
def test_update_sdk_status__handles_conditional_check_failed_exceptions_raised_from_updating_item(
mocker, mocked_boto3_client
):
mocked_dynamodb_client = mocked_boto3_client["dynamodb"]
expected_error = ClientError({"Error": {"Code": "ConditionalCheckFailedException"}}, "")
mocked_dynamodb_client.update_item.side_effect = expected_error
expected_table_name = "test_table"
mocker.patch.object(sdk_analysis, "SDK_STATUS_TABLE", expected_table_name)
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
test_upload_id = "test_id"
test_status = "test_status"
sdk_analysis.update_sdk_status(mocked_dynamodb_client, test_upload_id, test_status)
spied_logger_error.assert_any_call(f"Error: {expected_error}")
spied_logger_error.assert_any_call(
f"Upload ID: {test_upload_id} was not found in table {expected_table_name}"
)
mocked_dynamodb_client.put_item.assert_called_once_with(
TableName=expected_table_name,
Item={"upload_id": {"S": test_upload_id}, "sdk_status": {"S": test_status}},
)
def test_update_sdk_status__logs_other_aws_errors_raised_from_updating_item(mocker, mocked_boto3_client):
mocked_dynamodb_client = mocked_boto3_client["dynamodb"]
expected_error = ClientError({"Error": {"Code": "SomeOtherException"}}, "")
mocked_dynamodb_client.update_item.side_effect = expected_error
expected_table_name = "test_table"
mocker.patch.object(sdk_analysis, "SDK_STATUS_TABLE", expected_table_name)
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
test_upload_id = "test_id"
test_status = "test_status"
sdk_analysis.update_sdk_status(mocked_dynamodb_client, test_upload_id, test_status)
spied_logger_error.assert_called_once_with(f"Error: {expected_error}")
mocked_dynamodb_client.put_item.assert_not_called()
| 43.02403
| 131
| 0.768216
|
import base64
import copy
import hashlib
import json
from botocore.exceptions import ClientError
import pytest
from ..test_utils import import_lambda
sdk_analysis = import_lambda(
"sdk_analysis",
mock_imports=[
"pulse3D.plate_recording",
"pulse3D.constants",
"pulse3D.excel_writer",
"pymysql",
"pandas",
],
)
TEST_BUCKET_NAME = "test_name"
TEST_OBJECT_KEY = "customer_id/username/test_key"
TEST_RECORD = {"s3": {"bucket": {"name": TEST_BUCKET_NAME}, "object": {"key": TEST_OBJECT_KEY}}}
TEST_FILENAME = TEST_OBJECT_KEY.rsplit("/", 1)[1]
@pytest.fixture(scope="function", name="mocked_boto3_client")
def fixture_mocked_boto3_client(mocker):
mocked_sqs_client = mocker.Mock()
mocked_ssm_client = mocker.Mock()
mocked_s3_client = mocker.Mock()
mocked_ec2_client = mocker.Mock()
mocked_s3_client.head_object.return_value = {"Metadata": {"upload-id": "test-id"}}
mocked_dynamodb_client = mocker.Mock()
def se(client_type):
if client_type == "sqs":
return mocked_sqs_client
if client_type == "s3":
return mocked_s3_client
if client_type == "dynamodb":
return mocked_dynamodb_client
if client_type == "secretsmanager":
return mocked_ssm_client
if client_type == "ec2":
return mocked_ec2_client
mocker.patch.object(sdk_analysis.boto3, "client", autospec=True, side_effect=se)
yield {
"sqs": mocked_sqs_client,
"s3": mocked_s3_client,
"dynamodb": mocked_dynamodb_client,
"secretsmanager": mocked_ssm_client,
"ec2": mocked_ec2_client,
}
def test_sdk_analysis__logs_exception_when_receiving_message_from_sqs_fails(mocker, mocked_boto3_client):
mocked_sqs_client = mocked_boto3_client["sqs"]
expected_error = ClientError({}, "")
mocked_sqs_client.receive_message.side_effect = expected_error
spied_logger_exception = mocker.spy(sdk_analysis.logger, "exception")
sdk_analysis.handler(max_num_loops=1)
spied_logger_exception.assert_called_once_with(f"receive_message failed. Error: {expected_error}")
def test_sdk_analysis__sleeps_after_each_loop_but_not_in_final_loop(mocker, mocked_boto3_client):
mocked_sqs_client = mocked_boto3_client["sqs"]
mocked_sleep = mocker.patch.object(sdk_analysis, "sleep", autospec=True)
mocked_sqs_client.receive_message.side_effect = ClientError({}, "")
sdk_analysis.handler(max_num_loops=2)
mocked_sleep.assert_called_once_with(5)
def test_sdk_analysis__gets_messages_from_sqs_queue_correctly(mocker, mocked_boto3_client):
mocked_sqs_client = mocked_boto3_client["sqs"]
mocked_sqs_client.receive_message.return_value = {}
expected_sqs_url = "test_url"
mocker.patch.object(sdk_analysis, "SQS_URL", expected_sqs_url)
sdk_analysis.handler(max_num_loops=1)
mocked_sqs_client.receive_message.assert_called_once_with(
QueueUrl=expected_sqs_url, MaxNumberOfMessages=1, WaitTimeSeconds=10
)
def test_sdk_analysis__deletes_messages_from_sqs_queue_after_processing_them(mocker, mocked_boto3_client):
mocked_sqs_client = mocked_boto3_client["sqs"]
expected_sqs_url = "test_url"
mocker.patch.object(sdk_analysis, "SQS_URL", expected_sqs_url)
test_message = {"ReceiptHandle": "rh"}
test_message_list = [test_message] * 3
mocked_sqs_client.receive_message.return_value = {"Messages": test_message_list}
sdk_analysis.handler(max_num_loops=1)
assert mocked_sqs_client.delete_message.call_count == len(test_message_list)
mocked_sqs_client.delete_message.called_with(
QueueUrl=expected_sqs_url, ReceiptHandle=test_message["ReceiptHandle"]
)
@pytest.mark.parametrize(
"test_message",
[
{},
{"Body": json.dumps({})},
{"Body": json.dumps({"other_key": "val"})},
{"Body": json.dumps({"Records": []})},
{"Body": json.dumps({"Records": [{}]})},
{"Body": json.dumps({"Records": [{"eventSource": "aws:s3"}]})},
{"Body": json.dumps({"Records": [{"eventName": "ObjectCreated:Post"}]})},
],
)
def test_sdk_analysis__does_not_process_message_or_record_from_sqs_queue_that_is_not_formatted_correctly(
test_message, mocker, mocked_boto3_client
):
mocked_sqs_client = mocked_boto3_client["sqs"]
test_message.update({"ReceiptHandle": "rh"})
mocked_sqs_client.receive_message.return_value = {"Messages": [test_message]}
spied_process_record = mocker.spy(sdk_analysis, "process_record")
sdk_analysis.handler(max_num_loops=1)
spied_process_record.assert_not_called()
def test_sdk_analysis__processes_each_record_of_each_record_of_each_message_from_sqs_queue(
mocker, mocked_boto3_client
):
mocked_sqs_client = mocked_boto3_client["sqs"]
mocked_s3_client = mocked_boto3_client["s3"]
mocked_dynamodb_client = mocked_boto3_client["dynamodb"]
test_num_records = 5
test_records = [
{"eventSource": "aws:s3", "eventName": "ObjectCreated:Post", "num": i}
for i in range(test_num_records)
]
test_messages = [
{"Body": json.dumps({"Records": records}), "ReceiptHandle": "rh"}
for records in (test_records[:2], test_records[2:])
]
mocked_sqs_client.receive_message.return_value = {"Messages": test_messages}
mocked_process_record = mocker.patch.object(sdk_analysis, "process_record")
sdk_analysis.handler(max_num_loops=1)
assert mocked_process_record.call_count == test_num_records
for record in test_records:
mocked_process_record.assert_any_call(record, mocked_s3_client, mocked_dynamodb_client)
def test_sdk_analysis__handles_info_logging_pertaining_to_sqs_queue(mocker, mocked_boto3_client):
mocked_sqs_client = mocked_boto3_client["sqs"]
test_message_list = []
mocked_sqs_client.receive_message.return_value = {"Messages": test_message_list}
expected_sqs_url = "test_url"
mocker.patch.object(sdk_analysis, "SQS_URL", expected_sqs_url)
spied_logger_info = mocker.spy(sdk_analysis.logger, "info")
sdk_analysis.handler(max_num_loops=1)
spied_logger_info.assert_any_call(f"Receiving messages on {expected_sqs_url}")
spied_logger_info.assert_any_call(f"Received: {len(test_message_list)}")
spied_logger_info.assert_any_call("Received: 0")
def test_process_record__retrieves_metadata_of_file_correctly(mocked_boto3_client):
mocked_s3_client = mocked_boto3_client["s3"]
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
mocked_s3_client.head_object.assert_called_once_with(Bucket=TEST_BUCKET_NAME, Key=TEST_OBJECT_KEY)
def test_process_record__logs_error_when_one_is_raised_while_retrieving_metadata_from_s3_and_does_not_attempt_to_download_the_file(
mocker, mocked_boto3_client
):
mocked_s3_client = mocked_boto3_client["s3"]
expected_error = ClientError({}, "")
mocked_s3_client.head_object.side_effect = expected_error
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
spied_logger_error.assert_called_once_with(
f"Error occurred while retrieving head object of {TEST_BUCKET_NAME}/{TEST_OBJECT_KEY}: {expected_error}"
)
mocked_s3_client.download_file.assert_not_called()
def test_process_record__correctly_downloads_file_to_temporary_directory(mocker, mocked_boto3_client):
mocked_s3_client = mocked_boto3_client["s3"]
spied_temporary_dir = mocker.spy(sdk_analysis.tempfile, "TemporaryDirectory")
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
spied_temporary_dir.assert_called_once_with(dir="/tmp")
mocked_s3_client.download_file.assert_called_once_with(
TEST_BUCKET_NAME, TEST_OBJECT_KEY, f"{spied_temporary_dir.spy_return.name}/{TEST_FILENAME}"
)
def test_process_record__handles_error_raised_while_downloading_file_from_s3(mocker, mocked_boto3_client):
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
expected_error = ClientError({}, "")
mocked_s3_client.download_file.side_effect = expected_error
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
spied_update_status = mocker.spy(sdk_analysis, "update_sdk_status")
spied_pr_from_dir = mocker.spy(sdk_analysis.PlateRecording, "from_directory")
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
spied_logger_error.assert_called_once_with(
f"Failed to download {TEST_BUCKET_NAME}/{TEST_OBJECT_KEY}: {expected_error}"
)
spied_update_status.assert_called_once_with(
mocked_boto3_client["dynamodb"], expected_upload_id, "error accessing file"
)
spied_pr_from_dir.assert_not_called()
def test_process_record__sets_file_status_to_analysis_running_then_runs_sdk_analysis_on_file(
mocker, mocked_boto3_client
):
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
spied_temporary_dir = mocker.spy(sdk_analysis.tempfile, "TemporaryDirectory")
mocked_pr_from_dir = mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
pr = mocked_pr_from_dir.return_value.__next__()
error_tracker = {"funcs_called_out_of_order": False}
def se(*args):
if args[-1] == "analysis running":
error_tracker["funcs_called_out_of_order"] = mocked_pr_from_dir.call_count != 0
mocked_update_status = mocker.patch.object(
sdk_analysis, "update_sdk_status", autospec=True, side_effect=se
)
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
assert error_tracker["funcs_called_out_of_order"] is False
assert mocked_update_status.call_args_list[0] == mocker.call(
mocked_boto3_client["dynamodb"], expected_upload_id, "analysis running"
)
mocked_pr_from_dir.assert_called_once_with(spied_temporary_dir.spy_return)
sdk_analysis.write_xlsx.assert_called_with(pr, name=f"{TEST_FILENAME}.xlsx")
def test_process_record__handles_error_raised_while_running_sdk_analysis(mocker, mocked_boto3_client):
expected_upload_id = mocked_boto3_client["s3"].head_object.return_value["Metadata"]["upload-id"]
expected_error = Exception("test_exception")
mocker.patch.object(
sdk_analysis.PlateRecording, "from_directory", autospec=True, side_effect=expected_error
)
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
sdk_analysis.process_record(
copy.deepcopy(TEST_RECORD), mocked_boto3_client["s3"], mocked_boto3_client["dynamodb"]
)
spied_logger_error.assert_called_once_with(f"SDK analysis failed: {expected_error}")
mocked_update_status.assert_called_with(
mocked_boto3_client["dynamodb"], expected_upload_id, "error during analysis"
)
def test_process_record__uploads_file_created_by_sdk_analysis_to_s3_bucket_correctly_and_sets_file_status_to_analysis_complete(
mocker, mocked_boto3_client
):
mocked_s3_client = mocked_boto3_client["s3"]
mocked_dynamo_client = mocked_boto3_client["dynamodb"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
expected_upload_bucket = "test_url"
mocker.patch.object(hashlib, "md5")
mocked_base64 = mocker.patch.object(base64, "b64encode")
expected_md5 = mocked_base64().decode()
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocked_open = mocker.patch("builtins.open", autospec=True)
mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
mocker.patch.object(sdk_analysis.main, "handle_db_metadata_insertions", autospec=True)
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
mocked_open.assert_called_with(f"{TEST_FILENAME}.xlsx", "rb")
mocked_s3_client.put_object.assert_called_once_with(
Body=mocked_open.return_value.__enter__(),
Bucket=expected_upload_bucket,
Key=f"{TEST_OBJECT_KEY}.xlsx",
ContentMD5=expected_md5,
)
assert mocked_update_status.call_args_list[1] == mocker.call(
mocked_dynamo_client, expected_upload_id, "analysis complete"
)
def test_process_record__handles_error_raised_while_uploading_file_to_s3(mocker, mocked_boto3_client):
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
mocker.patch.object(hashlib, "md5")
mocker.patch.object(base64, "b64encode")
expected_error = Exception("test_exception")
mocked_s3_client.put_object.side_effect = expected_error
expected_upload_bucket = "test_url"
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocker.patch("builtins.open", autospec=True)
mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
mocked_db_handling = mocker.patch.object(
sdk_analysis.main, "handle_db_metadata_insertions", autospec=True
)
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
expected_file_name = f"{TEST_FILENAME}.xlsx"
spied_logger_error.assert_called_with(
f"S3 Upload failed for {expected_file_name} to {expected_upload_bucket}/{TEST_OBJECT_KEY}.xlsx: {expected_error}"
)
mocked_update_status.assert_called_with(
mocked_boto3_client["dynamodb"], expected_upload_id, "error during upload of analyzed file"
)
mocked_db_handling.assert_not_called()
def test_process_record__after_successful_upload_logger_handles_failed_aurora_db_insertion(
mocker, mocked_boto3_client
):
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
mocker.patch.object(hashlib, "md5")
mocker.patch.object(base64, "b64encode")
expected_upload_bucket = "test_url"
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocker.spy(sdk_analysis.tempfile, "TemporaryDirectory")
mocker.patch("builtins.open", autospec=True)
mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
mocker.patch.object(sdk_analysis.main, "handle_db_metadata_insertions", side_effect=Exception("ERROR"))
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
mocked_update_status.assert_called_with(
mocked_boto3_client["dynamodb"], expected_upload_id, "error inserting analysis to database"
)
spied_logger_error.assert_called_with("Recording metadata failed to store in aurora database: ERROR")
def test_process_record__after_successful_upload_logger_handles_successful_aurora_db_insertion(
mocker, mocked_boto3_client
):
spied_logger_info = mocker.spy(sdk_analysis.logger, "info")
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_id = mocked_s3_client.head_object.return_value["Metadata"]["upload-id"]
expected_upload_bucket = "test_bucket"
expected_db_cluster_endpoint = "test_host"
expected_file_name = f"{TEST_OBJECT_KEY}.xlsx"
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocker.patch.object(sdk_analysis, "DB_CLUSTER_ENDPOINT", expected_db_cluster_endpoint)
mocker.patch.object(hashlib, "md5")
mocked_base64 = mocker.patch.object(base64, "b64encode")
expected_md5 = mocked_base64().decode()
mocked_open = mocker.patch("builtins.open", autospec=True)
mocked_update_status = mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocked_PR_instance = mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
mocked_db_handling = mocker.patch.object(
sdk_analysis.main, "handle_db_metadata_insertions", autospec=True
)
mocker.patch.object(mocked_s3_client, "put_object")
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
mocked_update_status.assert_any_call(
mocked_boto3_client["dynamodb"], expected_upload_id, "analysis successfully inserted into database"
)
spied_logger_info.assert_any_call(f"Inserting {TEST_FILENAME}.xlsx metadata into aurora database")
test_args = [
mocked_open.return_value.__enter__(),
mocked_PR_instance.return_value.__next__(),
expected_md5,
]
mocked_db_handling.assert_called_with(
expected_upload_bucket, expected_file_name, expected_db_cluster_endpoint, test_args
)
def test_set_info_dict__correctly_retrieves_aws_credentials(mocker, mocked_boto3_client):
mocked_s3_client = mocked_boto3_client["s3"]
expected_upload_bucket = "test_url"
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocker.patch.object(hashlib, "md5")
mocker.patch.object(base64, "b64encode")
mocker.patch.object(sdk_analysis.main, "get_ssm_secrets", return_value=("test_username", "test_password"))
mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocker.patch("builtins.open", autospec=True)
mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
expected_info_dict = {
"db_name": "mantarray_recordings",
"db_password": "test_password",
"db_username": "test_username",
}
assert sdk_analysis.main.INFO_DICT == expected_info_dict
def test_load_data_into_dataframe__successfully_gets_called_after_successful_db_connection(
mocker, mocked_boto3_client
):
mocked_s3_client = mocked_boto3_client["s3"]
mocker.patch.object(hashlib, "md5")
mocker.patch.object(base64, "b64encode")
mocker.patch.object(sdk_analysis.main, "get_ssm_secrets", return_value=("test_username", "test_password"))
expected_db_cluster_endpoint = "test_host"
expected_upload_bucket = "test_url"
mocker.patch.object(sdk_analysis, "S3_UPLOAD_BUCKET", expected_upload_bucket)
mocker.patch.object(sdk_analysis, "DB_CLUSTER_ENDPOINT", expected_db_cluster_endpoint)
mocker.patch.object(sdk_analysis.main.pymysql, "connect")
format_spy = mocker.patch.object(sdk_analysis.main, "load_data_to_dataframe")
mocked_open = mocker.patch("builtins.open", autospec=True)
mocker.patch.object(sdk_analysis, "update_sdk_status", autospec=True)
mocker.patch.object(mocked_s3_client, "put_object", autospec=True)
mocked_PR_instance = mocker.patch.object(sdk_analysis.PlateRecording, "from_directory", autospec=True)
sdk_analysis.process_record(copy.deepcopy(TEST_RECORD), mocked_s3_client, mocked_boto3_client["dynamodb"])
format_spy.assert_any_call(
mocked_open.return_value.__enter__(), mocked_PR_instance.return_value.__next__()
)
def test_process_record__handles_info_logging(mocker, mocked_boto3_client):
spied_logger_info = mocker.spy(sdk_analysis.logger, "info")
spied_temporary_dir = mocker.spy(sdk_analysis.tempfile, "TemporaryDirectory")
sdk_analysis.process_record(
copy.deepcopy(TEST_RECORD), mocked_boto3_client["s3"], mocked_boto3_client["dynamodb"]
)
spied_logger_info.assert_any_call(f"Retrieving Head Object of {TEST_BUCKET_NAME}/{TEST_OBJECT_KEY}")
spied_logger_info.assert_any_call(
f"Download {TEST_BUCKET_NAME}/{TEST_OBJECT_KEY} to {spied_temporary_dir.spy_return.name}/{TEST_FILENAME}"
)
def test_update_sdk_status__updates_item_correctly(mocker, mocked_boto3_client):
mocked_dynamodb_client = mocked_boto3_client["dynamodb"]
expected_table_name = "test_table"
mocker.patch.object(sdk_analysis, "SDK_STATUS_TABLE", expected_table_name)
test_upload_id = "test_id"
test_status = "test_status"
sdk_analysis.update_sdk_status(mocked_dynamodb_client, test_upload_id, test_status)
mocked_dynamodb_client.update_item.assert_called_once_with(
TableName=expected_table_name,
Key={"upload_id": {"S": test_upload_id}},
UpdateExpression="SET sdk_status = :val",
ExpressionAttributeValues={":val": {"S": test_status}},
ConditionExpression="attribute_exists(upload_id)",
)
def test_update_sdk_status__handles_conditional_check_failed_exceptions_raised_from_updating_item(
mocker, mocked_boto3_client
):
mocked_dynamodb_client = mocked_boto3_client["dynamodb"]
expected_error = ClientError({"Error": {"Code": "ConditionalCheckFailedException"}}, "")
mocked_dynamodb_client.update_item.side_effect = expected_error
expected_table_name = "test_table"
mocker.patch.object(sdk_analysis, "SDK_STATUS_TABLE", expected_table_name)
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
test_upload_id = "test_id"
test_status = "test_status"
sdk_analysis.update_sdk_status(mocked_dynamodb_client, test_upload_id, test_status)
spied_logger_error.assert_any_call(f"Error: {expected_error}")
spied_logger_error.assert_any_call(
f"Upload ID: {test_upload_id} was not found in table {expected_table_name}"
)
mocked_dynamodb_client.put_item.assert_called_once_with(
TableName=expected_table_name,
Item={"upload_id": {"S": test_upload_id}, "sdk_status": {"S": test_status}},
)
def test_update_sdk_status__logs_other_aws_errors_raised_from_updating_item(mocker, mocked_boto3_client):
mocked_dynamodb_client = mocked_boto3_client["dynamodb"]
expected_error = ClientError({"Error": {"Code": "SomeOtherException"}}, "")
mocked_dynamodb_client.update_item.side_effect = expected_error
expected_table_name = "test_table"
mocker.patch.object(sdk_analysis, "SDK_STATUS_TABLE", expected_table_name)
spied_logger_error = mocker.spy(sdk_analysis.logger, "error")
test_upload_id = "test_id"
test_status = "test_status"
sdk_analysis.update_sdk_status(mocked_dynamodb_client, test_upload_id, test_status)
spied_logger_error.assert_called_once_with(f"Error: {expected_error}")
mocked_dynamodb_client.put_item.assert_not_called()
| true
| true
|
f702f8cb30c1d993d051deeb4a1219efe4d96cde
| 1,676
|
py
|
Python
|
chris/cube/client.py
|
FNNDSC/chrisomatic
|
6eacc7716ed40c7fdac9b1fbfd467433ab0b2bec
|
[
"MIT"
] | null | null | null |
chris/cube/client.py
|
FNNDSC/chrisomatic
|
6eacc7716ed40c7fdac9b1fbfd467433ab0b2bec
|
[
"MIT"
] | 4
|
2022-02-24T22:38:16.000Z
|
2022-02-25T22:50:01.000Z
|
chris/cube/client.py
|
FNNDSC/chrisomatic
|
6eacc7716ed40c7fdac9b1fbfd467433ab0b2bec
|
[
"MIT"
] | null | null | null |
from typing import TypeVar, AsyncIterator, Sequence
from chris.common.types import PluginUrl
from chris.common.client import AuthenticatedClient
from chris.common.search import get_paginated, to_sequence
import chris.common.decorator as http
from chris.cube.types import ComputeResourceName, PfconUrl
from chris.cube.deserialization import CubeCollectionLinks, CubePlugin, ComputeResource
_T = TypeVar("_T")
class CubeClient(AuthenticatedClient[CubeCollectionLinks, CubePlugin, "CubeClient"]):
@http.post("/chris-admin/api/v1/")
async def register_plugin(
self, plugin_store_url: PluginUrl, compute_name: ComputeResourceName
) -> CubePlugin:
...
@http.post("/chris-admin/api/v1/computeresources/")
async def create_compute_resource(
self,
name: ComputeResourceName,
compute_url: PfconUrl,
compute_user: str,
compute_password: str,
description: str = "",
) -> ComputeResource:
...
def get_compute_resources_of(
self, plugin: CubePlugin
) -> AsyncIterator[ComputeResource]:
return get_paginated(
session=self.s, url=plugin.compute_resources, element_type=ComputeResource
)
def search_compute_resources(
self, max_requests=100, **query
) -> AsyncIterator[ComputeResource]:
return self.search(
url=self.collection_links.compute_resources,
query=query,
element_type=ComputeResource,
max_requests=max_requests,
)
async def get_all_compute_resources(self) -> Sequence[ComputeResource]:
return await to_sequence(self.search_compute_resources())
| 34.204082
| 87
| 0.704654
|
from typing import TypeVar, AsyncIterator, Sequence
from chris.common.types import PluginUrl
from chris.common.client import AuthenticatedClient
from chris.common.search import get_paginated, to_sequence
import chris.common.decorator as http
from chris.cube.types import ComputeResourceName, PfconUrl
from chris.cube.deserialization import CubeCollectionLinks, CubePlugin, ComputeResource
_T = TypeVar("_T")
class CubeClient(AuthenticatedClient[CubeCollectionLinks, CubePlugin, "CubeClient"]):
@http.post("/chris-admin/api/v1/")
async def register_plugin(
self, plugin_store_url: PluginUrl, compute_name: ComputeResourceName
) -> CubePlugin:
...
@http.post("/chris-admin/api/v1/computeresources/")
async def create_compute_resource(
self,
name: ComputeResourceName,
compute_url: PfconUrl,
compute_user: str,
compute_password: str,
description: str = "",
) -> ComputeResource:
...
def get_compute_resources_of(
self, plugin: CubePlugin
) -> AsyncIterator[ComputeResource]:
return get_paginated(
session=self.s, url=plugin.compute_resources, element_type=ComputeResource
)
def search_compute_resources(
self, max_requests=100, **query
) -> AsyncIterator[ComputeResource]:
return self.search(
url=self.collection_links.compute_resources,
query=query,
element_type=ComputeResource,
max_requests=max_requests,
)
async def get_all_compute_resources(self) -> Sequence[ComputeResource]:
return await to_sequence(self.search_compute_resources())
| true
| true
|
f702f9800fd73e3aeb9520829b92e3d60e774d55
| 438
|
py
|
Python
|
habari/apps/crawl/migrations/0016_auto_20200407_2042.py
|
ppolle/habari
|
671b98c361ce593f708bc15f69dd3aa6fe72b128
|
[
"MIT"
] | 3
|
2020-06-08T08:39:06.000Z
|
2020-07-30T10:46:22.000Z
|
habari/apps/crawl/migrations/0016_auto_20200407_2042.py
|
ppolle/habari
|
671b98c361ce593f708bc15f69dd3aa6fe72b128
|
[
"MIT"
] | 9
|
2021-03-19T11:18:58.000Z
|
2022-02-10T15:48:35.000Z
|
habari/apps/crawl/migrations/0016_auto_20200407_2042.py
|
ppolle/habari
|
671b98c361ce593f708bc15f69dd3aa6fe72b128
|
[
"MIT"
] | 1
|
2021-09-22T07:23:03.000Z
|
2021-09-22T07:23:03.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-04-07 17:42
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crawl', '0015_remove_article_news_source'),
]
operations = [
migrations.RenameField(
model_name='article',
old_name='source',
new_name='news_source',
),
]
| 20.857143
| 53
| 0.616438
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crawl', '0015_remove_article_news_source'),
]
operations = [
migrations.RenameField(
model_name='article',
old_name='source',
new_name='news_source',
),
]
| true
| true
|
f702fa92c59e613696990e2cb10c7e8d331bd0f1
| 1,523
|
bzl
|
Python
|
lib/dicts.bzl
|
laszlocsomor/bazel-skylib
|
f4a2bae427c4958af834c34624767b0144f7ab12
|
[
"Apache-2.0"
] | 31
|
2020-08-05T23:27:36.000Z
|
2022-02-09T18:53:57.000Z
|
lib/dicts.bzl
|
laszlocsomor/bazel-skylib
|
f4a2bae427c4958af834c34624767b0144f7ab12
|
[
"Apache-2.0"
] | 2
|
2020-08-06T00:07:42.000Z
|
2022-03-11T20:36:35.000Z
|
lib/dicts.bzl
|
laszlocsomor/bazel-skylib
|
f4a2bae427c4958af834c34624767b0144f7ab12
|
[
"Apache-2.0"
] | 7
|
2020-08-06T00:06:50.000Z
|
2022-03-11T20:35:19.000Z
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Skylib module containing functions that operate on dictionaries."""
def _add(*dictionaries):
"""Returns a new `dict` that has all the entries of the given dictionaries.
If the same key is present in more than one of the input dictionaries, the
last of them in the argument list overrides any earlier ones.
This function is designed to take zero or one arguments as well as multiple
dictionaries, so that it follows arithmetic identities and callers can avoid
special cases for their inputs: the sum of zero dictionaries is the empty
dictionary, and the sum of a single dictionary is a copy of itself.
Args:
*dictionaries: Zero or more dictionaries to be added.
Returns:
A new `dict` that has all the entries of the given dictionaries.
"""
result = {}
for d in dictionaries:
result.update(d)
return result
dicts = struct(
add = _add,
)
| 36.261905
| 80
| 0.730794
|
def _add(*dictionaries):
result = {}
for d in dictionaries:
result.update(d)
return result
dicts = struct(
add = _add,
)
| true
| true
|
f702fa93839e273a82c26a78e2b344bd75c7baab
| 7,542
|
py
|
Python
|
pySpatialTools/utils/artificial_data/artificial_measure.py
|
tgquintela/pySpatialTools
|
e028008f9750521bf7d311f7cd3323c88d621ea4
|
[
"MIT"
] | 8
|
2015-07-21T05:15:16.000Z
|
2018-06-12T18:22:52.000Z
|
pySpatialTools/utils/artificial_data/artificial_measure.py
|
tgquintela/pySpatialTools
|
e028008f9750521bf7d311f7cd3323c88d621ea4
|
[
"MIT"
] | 6
|
2016-01-11T22:25:28.000Z
|
2016-01-28T16:17:46.000Z
|
pySpatialTools/utils/artificial_data/artificial_measure.py
|
tgquintela/pySpatialTools
|
e028008f9750521bf7d311f7cd3323c88d621ea4
|
[
"MIT"
] | null | null | null |
"""
artificial measure
------------------
Creation of artificial measure
"""
import numpy as np
############################### Create measure ################################
###############################################################################
def create_artificial_measure_array(n_k, n_vals_i, n_feats):
"""Create artificial random measure in the array form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
measure: np.ndarray
the transformed measure computed by the whole spatial descriptor model.
"""
measure = np.random.random((n_vals_i, n_feats, n_k))
return measure
def create_artificial_measure_append(n_k, n_vals_i, n_feats):
"""Create artificial random measure in the list form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
measure: list
the transformed measure computed by the whole spatial descriptor model.
"""
rounds = np.random.randint(1, 40)
measure = create_empty_append(n_k, n_vals_i, n_feats)
for i in range(rounds):
n_iss = np.random.randint(1, 10)
vals_i = create_vals_i(n_iss, n_vals_i, n_k)
x_i = create_features_i_dict(n_feats, n_iss, n_k)
for k in range(len(vals_i)):
for i in range(len(vals_i[k])):
measure[k][vals_i[k][i]].append(x_i[k][i])
return measure
def create_artificial_measure_replacelist(n_k, n_vals_i, n_feats,
unique_=False):
"""Create artificial random measure in the replacelist form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
unique_: boolean (default=False)
if there are no collapse.
Returns
-------
measure: list
the transformed measure computed by the whole spatial descriptor model.
"""
last = 0
rounds = np.random.randint(1, 40)
measure = create_empty_replacelist(n_k, n_vals_i, n_feats)
for i in range(rounds):
n_iss = np.random.randint(1, 10)
if unique_:
vals_i = np.array([last+np.arange(n_iss)]*n_k)
last += n_iss
else:
vals_i = create_vals_i(n_iss, n_vals_i, n_k)
x_i = create_features_i_dict(n_feats, n_iss, n_k)
for k in range(len(vals_i)):
measure[k][0].append(x_i[k])
measure[k][1].append(vals_i[k])
return measure
############################### Empty measure #################################
###############################################################################
def create_empty_array(n_k, n_vals_i, n_feats):
"""Create null measure in the array form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
measure: np.ndarray
the null measure to be fill by the computation of the spatial
descriptor model.
"""
return np.zeros((n_vals_i, n_feats, n_k))
def create_empty_append(n_k, n_iss, n_feats):
"""Create null measure in the list form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
measure: list
the null measure to be fill by the computation of the spatial
descriptor model.
"""
return [[[]]*n_iss]*n_k
def create_empty_replacelist(n_k, n_iss, n_feats):
"""Create null measure in the replacelist form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
measure: list
the null measure to be fill by the computation of the spatial
descriptor model.
"""
return [[[], []]]*n_k
############################### Vals_i creation ###############################
###############################################################################
def create_vals_i(n_iss, nvals, n_k):
"""
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
vals_i: np.ndarray
the associated stored indices for the element indices.
"""
return np.random.randint(1, nvals, n_iss*n_k).reshape((n_k, n_iss))
############################### Empty features ################################
###############################################################################
def create_empty_features_array(n_feats, n_iss, n_k):
"""Create null features for different iss in an array-form.
Parameters
----------
n_feats: int
the number of features.
n_iss: int
the number of the elements to create their features.
n_k: int
the number of perturbations.
Returns
-------
features: np.ndarray
the null features we want to compute.
"""
return np.zeros((n_k, n_iss, n_feats))
def create_empty_features_dict(n_feats, n_iss, n_k):
"""Create null features for different iss in an listdict-form.
Parameters
----------
n_feats: int
the number of features.
n_iss: int
the number of the elements to create their features.
n_k: int
the number of perturbations.
Returns
-------
features: list
the null features we want to compute.
"""
return [[{}]*n_iss]*n_k
################################ X_i features #################################
###############################################################################
def create_features_i_array(n_feats, n_iss, n_k):
"""Create null features for different iss in an array-form.
Parameters
----------
n_feats: int
the number of features.
n_iss: int
the number of the elements to create their features.
n_k: int
the number of perturbations.
Returns
-------
features: np.ndarray
the null features we want to compute.
"""
x_i = np.random.random((n_k, n_iss, n_feats))
return x_i
def create_features_i_dict(n_feats, n_iss, n_k):
"""Create null features for different iss in an listdict-form.
Parameters
----------
n_feats: int
the number of features.
n_iss: int
the number of the elements to create their features.
n_k: int
the number of perturbations.
Returns
-------
features: list
the null features we want to compute.
"""
x_i = []
for k in range(n_k):
x_i_k = []
for i in range(n_iss):
keys = np.unique(np.random.randint(1, n_feats, n_feats))
keys = [str(e) for e in keys]
values = np.random.random(len(keys))
x_i_k.append(dict(zip(keys, values)))
x_i.append(x_i_k)
return x_i
| 26.006897
| 79
| 0.549589
|
import numpy as np
def create_artificial_measure_array(n_k, n_vals_i, n_feats):
measure = np.random.random((n_vals_i, n_feats, n_k))
return measure
def create_artificial_measure_append(n_k, n_vals_i, n_feats):
rounds = np.random.randint(1, 40)
measure = create_empty_append(n_k, n_vals_i, n_feats)
for i in range(rounds):
n_iss = np.random.randint(1, 10)
vals_i = create_vals_i(n_iss, n_vals_i, n_k)
x_i = create_features_i_dict(n_feats, n_iss, n_k)
for k in range(len(vals_i)):
for i in range(len(vals_i[k])):
measure[k][vals_i[k][i]].append(x_i[k][i])
return measure
def create_artificial_measure_replacelist(n_k, n_vals_i, n_feats,
unique_=False):
last = 0
rounds = np.random.randint(1, 40)
measure = create_empty_replacelist(n_k, n_vals_i, n_feats)
for i in range(rounds):
n_iss = np.random.randint(1, 10)
if unique_:
vals_i = np.array([last+np.arange(n_iss)]*n_k)
last += n_iss
else:
vals_i = create_vals_i(n_iss, n_vals_i, n_k)
x_i = create_features_i_dict(n_feats, n_iss, n_k)
for k in range(len(vals_i)):
measure[k][0].append(x_i[k])
measure[k][1].append(vals_i[k])
return measure
def create_empty_array(n_k, n_vals_i, n_feats):
return np.zeros((n_vals_i, n_feats, n_k))
def create_empty_append(n_k, n_iss, n_feats):
return [[[]]*n_iss]*n_k
def create_empty_replacelist(n_k, n_iss, n_feats):
return [[[], []]]*n_k
def create_vals_i(n_iss, nvals, n_k):
return np.random.randint(1, nvals, n_iss*n_k).reshape((n_k, n_iss))
def create_empty_features_array(n_feats, n_iss, n_k):
return np.zeros((n_k, n_iss, n_feats))
def create_empty_features_dict(n_feats, n_iss, n_k):
return [[{}]*n_iss]*n_k
def create_features_i_array(n_feats, n_iss, n_k):
x_i = np.random.random((n_k, n_iss, n_feats))
return x_i
def create_features_i_dict(n_feats, n_iss, n_k):
x_i = []
for k in range(n_k):
x_i_k = []
for i in range(n_iss):
keys = np.unique(np.random.randint(1, n_feats, n_feats))
keys = [str(e) for e in keys]
values = np.random.random(len(keys))
x_i_k.append(dict(zip(keys, values)))
x_i.append(x_i_k)
return x_i
| true
| true
|
f702fb01b8cc397b472b0efaeea890401516c4ba
| 384
|
py
|
Python
|
pipenv/patched/prettytoml/test_prettifier.py
|
Enzime/pipenv
|
d4f710be4a39e09a82a5133b7b3a277ee9bfb13a
|
[
"MIT"
] | 11
|
2016-04-15T10:02:20.000Z
|
2022-03-25T13:39:53.000Z
|
pipenv/patched/prettytoml/test_prettifier.py
|
Enzime/pipenv
|
d4f710be4a39e09a82a5133b7b3a277ee9bfb13a
|
[
"MIT"
] | 4
|
2020-03-24T16:06:51.000Z
|
2021-06-10T20:48:41.000Z
|
pipenv/patched/prettytoml/test_prettifier.py
|
Enzime/pipenv
|
d4f710be4a39e09a82a5133b7b3a277ee9bfb13a
|
[
"MIT"
] | 6
|
2017-10-09T21:45:28.000Z
|
2022-02-16T15:09:42.000Z
|
from .prettifier import prettify
from .prettifier.common import assert_prettifier_works
import pytoml
def test_prettifying_against_humanly_verified_sample():
toml_source = open('sample.toml').read()
expected = open('sample-prettified.toml').read()
assert_prettifier_works(toml_source, expected, prettify)
assert pytoml.loads(toml_source) == pytoml.loads(expected)
| 29.538462
| 62
| 0.786458
|
from .prettifier import prettify
from .prettifier.common import assert_prettifier_works
import pytoml
def test_prettifying_against_humanly_verified_sample():
toml_source = open('sample.toml').read()
expected = open('sample-prettified.toml').read()
assert_prettifier_works(toml_source, expected, prettify)
assert pytoml.loads(toml_source) == pytoml.loads(expected)
| true
| true
|
f702fcbc8a7a2d562be1b856c7837695c9f46e8c
| 3,759
|
py
|
Python
|
mux_python/models/signal_live_stream_complete_response.py
|
gts-work/mux-python
|
826e52730bad7acd08c31a3e1951a281521f1b4f
|
[
"MIT"
] | null | null | null |
mux_python/models/signal_live_stream_complete_response.py
|
gts-work/mux-python
|
826e52730bad7acd08c31a3e1951a281521f1b4f
|
[
"MIT"
] | null | null | null |
mux_python/models/signal_live_stream_complete_response.py
|
gts-work/mux-python
|
826e52730bad7acd08c31a3e1951a281521f1b4f
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Mux API
Mux is how developers build online video. This API encompasses both Mux Video and Mux Data functionality to help you build your video-related projects better and faster than ever before. # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import inspect
import pprint
import re # noqa: F401
import six
from mux_python.configuration import Configuration
class SignalLiveStreamCompleteResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'data': 'object'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None, local_vars_configuration=None): # noqa: E501
"""SignalLiveStreamCompleteResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
"""Gets the data of this SignalLiveStreamCompleteResponse. # noqa: E501
:return: The data of this SignalLiveStreamCompleteResponse. # noqa: E501
:rtype: object
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this SignalLiveStreamCompleteResponse.
:param data: The data of this SignalLiveStreamCompleteResponse. # noqa: E501
:type data: object
"""
self._data = data
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = inspect.getargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SignalLiveStreamCompleteResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SignalLiveStreamCompleteResponse):
return True
return self.to_dict() != other.to_dict()
| 29.139535
| 205
| 0.586326
|
import inspect
import pprint
import re import six
from mux_python.configuration import Configuration
class SignalLiveStreamCompleteResponse(object):
openapi_types = {
'data': 'object'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None, local_vars_configuration=None): if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
def to_dict(self, serialize=False):
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = inspect.getargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, SignalLiveStreamCompleteResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, SignalLiveStreamCompleteResponse):
return True
return self.to_dict() != other.to_dict()
| true
| true
|
f702fdacc80239cb68f714c266ba5d5ed7d3b8b7
| 18,330
|
py
|
Python
|
keras_ocr/_version.py
|
bayethiernodiop/keras-ocr
|
73349ce88237e9b9dc7e1ac0754042f89fb4e13e
|
[
"MIT"
] | 8
|
2020-08-27T14:37:46.000Z
|
2021-09-24T07:33:46.000Z
|
keras_ocr/_version.py
|
bayethiernodiop/keras-ocr
|
73349ce88237e9b9dc7e1ac0754042f89fb4e13e
|
[
"MIT"
] | 4
|
2021-06-08T22:59:39.000Z
|
2022-03-12T00:59:11.000Z
|
keras_ocr/_version.py
|
bayethiernodiop/keras-ocr
|
73349ce88237e9b9dc7e1ac0754042f89fb4e13e
|
[
"MIT"
] | 5
|
2020-11-01T21:03:05.000Z
|
2021-08-19T15:55:57.000Z
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440-pre"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "keras_ocr/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands, ))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
["describe", "--tags", "--dirty", "--always", "--long", "--match",
"%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date")
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None
}
| 34.070632
| 98
| 0.588598
|
import errno
import os
import re
import subprocess
import sys
def get_keywords():
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
def get_config():
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440-pre"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "keras_ocr/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
p = subprocess.Popen([c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands, ))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None
}
else:
rootdirs.append(root)
root = os.path.dirname(root)
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# -like" string, which we must then edit to make compliant), because
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date
}
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
describe_out, rc = run_command(
GITS,
["describe", "--tags", "--dirty", "--always", "--long", "--match",
"%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date")
}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None
}
| true
| true
|
f702fe7bb0f5dd86a6d9b1e9444a99e6a59063b4
| 3,239
|
py
|
Python
|
vistrails/db/versions/v0_5_0/persistence/xml/xml_dao.py
|
celiafish/VisTrails
|
d8cb575b8b121941de190fe608003ad1427ef9f6
|
[
"BSD-3-Clause"
] | 1
|
2015-05-11T16:46:49.000Z
|
2015-05-11T16:46:49.000Z
|
vistrails/db/versions/v0_5_0/persistence/xml/xml_dao.py
|
celiafish/VisTrails
|
d8cb575b8b121941de190fe608003ad1427ef9f6
|
[
"BSD-3-Clause"
] | null | null | null |
vistrails/db/versions/v0_5_0/persistence/xml/xml_dao.py
|
celiafish/VisTrails
|
d8cb575b8b121941de190fe608003ad1427ef9f6
|
[
"BSD-3-Clause"
] | null | null | null |
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from datetime import date, datetime
from vistrails.core.system import strftime, time_strptime
class XMLDAO:
def __init__(self):
pass
def getAttribute(self, node, attr):
try:
attribute = node.attributes.get(attr)
if attribute is not None:
return attribute.value
except KeyError:
pass
return None
def convertFromStr(self, value, type):
if value is not None:
if type == 'str':
return str(value)
elif value.strip() != '':
if type == 'long':
return long(value)
elif type == 'float':
return float(value)
elif type == 'int':
return int(value)
elif type == 'date':
return date(*time_strptime(value, '%Y-%m-%d')[0:3])
elif type == 'datetime':
return datetime(*time_strptime(value, '%Y-%m-%d %H:%M:%S')[0:6])
return None
def convertToStr(self, value, type):
if value is not None:
if type == 'date':
return value.isoformat()
elif type == 'datetime':
return strftime(value, '%Y-%m-%d %H:%M:%S')
else:
return str(value)
return ''
| 41
| 84
| 0.600494
|
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
from datetime import date, datetime
from vistrails.core.system import strftime, time_strptime
class XMLDAO:
def __init__(self):
pass
def getAttribute(self, node, attr):
try:
attribute = node.attributes.get(attr)
if attribute is not None:
return attribute.value
except KeyError:
pass
return None
def convertFromStr(self, value, type):
if value is not None:
if type == 'str':
return str(value)
elif value.strip() != '':
if type == 'long':
return long(value)
elif type == 'float':
return float(value)
elif type == 'int':
return int(value)
elif type == 'date':
return date(*time_strptime(value, '%Y-%m-%d')[0:3])
elif type == 'datetime':
return datetime(*time_strptime(value, '%Y-%m-%d %H:%M:%S')[0:6])
return None
def convertToStr(self, value, type):
if value is not None:
if type == 'date':
return value.isoformat()
elif type == 'datetime':
return strftime(value, '%Y-%m-%d %H:%M:%S')
else:
return str(value)
return ''
| true
| true
|
f702fea9527715af2b456968c66f01b355926e39
| 50,389
|
py
|
Python
|
pylib/gyp/msvs_emulation.py
|
xforce/gyp
|
a079e0aeab3470d14055657bba75adaa94e974e6
|
[
"BSD-3-Clause"
] | null | null | null |
pylib/gyp/msvs_emulation.py
|
xforce/gyp
|
a079e0aeab3470d14055657bba75adaa94e974e6
|
[
"BSD-3-Clause"
] | null | null | null |
pylib/gyp/msvs_emulation.py
|
xforce/gyp
|
a079e0aeab3470d14055657bba75adaa94e974e6
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import collections
import os
import pickle
import re
import subprocess
import sys
import time
import hashlib
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
from gyp import DebugOutput, DEBUG_GENERAL
try:
import sys
reload(sys)
sys.setdefaultencoding('utf8')
except:
pass
try:
basestring = basestring
except NameError:
basestring = str
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# Use a heuristic to try to find args that are paths, and normalize them
if arg.find('/') > 0 or arg.count('/') > 1:
arg = os.path.normpath(arg)
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
if (isinstance(element, collections.Iterable) and
not isinstance(element, basestring)):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if (isinstance(element, collections.Iterable) and
not isinstance(element, basestring)):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if (isinstance(element, collections.Iterable) and
not isinstance(element, basestring)):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if isinstance(line, bytes):
line = line.decode()
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path().decode(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.items():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.lower().endswith('.lib') else lib
for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release'), VS2015 and later only use this level
if int(self.vs_version.short_name) >= 2015:
return config
# and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
#arch = self.GetArch(config)
#if arch == 'x64' and not config.endswith('_x64'):
# config += '_x64'
#if arch == 'x86' and config.endswith('_x64'):
# config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('CompileAsManaged', map={'false': '', 'true': '/clr'})
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if float(self.vs_version.project_version) >= 12.0:
# New flag introduced in VS2013 (project version 12.0) Forces writes to
# the program database (PDB) to be serialized through MSPDBSRV.EXE.
# https://msdn.microsoft.com/en-us/library/dn502518.aspx
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = [x for x in cflags if not x.startswith('/MP')]
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = self.msvs_precompiled_header[config]
pchbase = os.path.split(pch)[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pchbase + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', [])
if s.lower().endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
stack_reserve_size = self._Setting(
('VCLinkerTool', 'StackReserveSize'), config, default='')
if stack_reserve_size:
stack_commit_size = self._Setting(
('VCLinkerTool', 'StackCommitSize'), config, default='')
if stack_commit_size:
stack_commit_size = ',' + stack_commit_size
ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size))
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
if not any('DYNAMICBASE' in flag or flag == '/FIXED' for flag in ldflags):
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not any('NXCOMPAT' in flag for flag in ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = any(flag.startswith('/DEF:') for flag in ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return self.settings.msvs_precompiled_header[self.config]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.items():
if isinstance(new, bytes):
new = new.decode()
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set, arch):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
# This occasionally happens and leads to misleading SYSTEMROOT error messages
# if not caught here.
cl_find = 'cl.exe'
if 'Visual Studio 2017'.encode('utf-8') in output_of_set:
cl_find = arch + '.' + cl_find
if output_of_set.count('='.encode('utf-8')) == 0:
raise Exception('Invalid output_of_set. Value is:\n%s' % output_of_set)
for line in output_of_set.splitlines():
if re.search(cl_find.encode(), line, re.I):
env['GYP_CL_PATH'] = line
continue
for envvar in envvars_to_save:
if re.match((envvar + '=').encode(), line, re.I):
var, setting = line.split('='.encode(), 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting.decode()
env[var.upper()] = setting
break
for required in (b'SYSTEMROOT', b'TEMP', b'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.items():
try:
block += key
except:
block += key.decode()
block += '='
try:
block += value
except:
block += value.decode()
block += nul
block += nul
return block
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
env = _GetEnvironment(arch, vs, open_out)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'w')
f.write(env_block)
f.close()
cl_paths[arch] = env['GYP_CL_PATH']
return cl_paths
def _GetEnvironment(arch, vs, open_out):
"""
This function will run the VC environment setup script, retrieve variables,
and also the path on cl.exe.
It will then try to cache the values to disk, and on next run will try to
lookup the cache. The cache key is the path to the setup script (which is
embedded within each Visual Studio installed instance) + it's args.
Even after a cache hit we do some validation of the cached values,
since parts of the tool-set can be upgraded with in the installed lifecycle
so paths and version numbers may change.
Args:
arch: {string} target architecture
vs: VisualStudioVersion
open_out: file open wrapper
Returns: {dict} the important environment variables VC need to run
"""
env = {}
args = vs.SetupScript(arch)
args.extend(('&&', 'set', '&&', 'where', 'cl.exe'))
cache_key = hashlib.md5(''.join(args).encode('utf-8')).hexdigest()
# The default value for %TEMP% will make all cache look ups to safely miss
appdata_dir = os.environ.get('TEMP', '')
cache_path = os.path.join(appdata_dir, '.gyp-cache')
cache_keyed_file = os.path.join(cache_path, cache_key)
if os.path.exists(cache_keyed_file):
try:
with file(cache_keyed_file) as f:
env = pickle.load(f)
except Exception:
pass
cl_path = env.get('GYP_CL_PATH', '')
if os.path.exists(cl_path):
return env
else:
# cache has become invalid (probably form a tool set update)
os.remove(cache_keyed_file)
start_time = time.clock()
# Extract environment variables for subprocesses.
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
std_out, _ = popen.communicate()
if popen.returncode != 0:
raise Exception('"%s" failed with error %d' % (args, popen.returncode))
end_time = time.clock()
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "vcvars %s time: %f" %
(' '.join(args), end_time - start_time))
env = _ExtractImportantEnvironment(std_out, arch)
if os.path.exists(appdata_dir):
try:
with open_out(cache_keyed_file) as f:
pickle.dump(env, f)
except Exception as e:
print (e)
return env
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = [x for x in relative if not os.path.exists(x)]
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
| 42.702542
| 83
| 0.672806
|
import collections
import os
import pickle
import re
import subprocess
import sys
import time
import hashlib
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
from gyp import DebugOutput, DEBUG_GENERAL
try:
import sys
reload(sys)
sys.setdefaultencoding('utf8')
except:
pass
try:
basestring = basestring
except NameError:
basestring = str
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# Use a heuristic to try to find args that are paths, and normalize them
if arg.find('/') > 0 or arg.count('/') > 1:
arg = os.path.normpath(arg)
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
arg = arg.replace('%', '%%')
return '"' + arg + '"'
def EncodeRspFileList(args):
# Note that the first argument is assumed to be the command. Don't add
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
if element is None:
return element
if (isinstance(element, collections.Iterable) and
not isinstance(element, basestring)):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if (isinstance(element, collections.Iterable) and
not isinstance(element, basestring)):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
if append is not None and element is not None:
if (isinstance(element, collections.Iterable) and
not isinstance(element, basestring)):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if isinstance(line, bytes):
line = line.decode()
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
env = {}
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path().decode(), 'VC') + '\\'
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.items():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.lower().endswith('.lib') else lib
for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
# There's two levels of architecture/platform specification in VS. The
if int(self.vs_version.short_name) >= 2015:
return config
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('CompileAsManaged', map={'false': '', 'true': '/clr'})
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if float(self.vs_version.project_version) >= 12.0:
cflags.append('/FS')
cflags = [x for x in cflags if not x.startswith('/MP')]
return cflags
def _GetPchFlags(self, config, extension):
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = self.msvs_precompiled_header[config]
pchbase = os.path.split(pch)[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pchbase + '.pch']
return []
def GetCflagsC(self, config):
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', [])
if s.lower().endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
stack_reserve_size = self._Setting(
('VCLinkerTool', 'StackReserveSize'), config, default='')
if stack_reserve_size:
stack_commit_size = self._Setting(
('VCLinkerTool', 'StackCommitSize'), config, default='')
if stack_commit_size:
stack_commit_size = ',' + stack_commit_size
ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size))
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
if not any('DYNAMICBASE' in flag or flag == '/FIXED' for flag in ldflags):
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not any('NXCOMPAT' in flag for flag in ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = any(flag.startswith('/DEF:') for flag in ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
return self.settings.msvs_precompiled_header[self.config]
def GetObjDependencies(self, sources, objs, arch):
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
if '$' in string:
for old, new in expansions.items():
if isinstance(new, bytes):
new = new.decode()
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set, arch):
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
# This occasionally happens and leads to misleading SYSTEMROOT error messages
# if not caught here.
cl_find = 'cl.exe'
if 'Visual Studio 2017'.encode('utf-8') in output_of_set:
cl_find = arch + '.' + cl_find
if output_of_set.count('='.encode('utf-8')) == 0:
raise Exception('Invalid output_of_set. Value is:\n%s' % output_of_set)
for line in output_of_set.splitlines():
if re.search(cl_find.encode(), line, re.I):
env['GYP_CL_PATH'] = line
continue
for envvar in envvars_to_save:
if re.match((envvar + '=').encode(), line, re.I):
var, setting = line.split('='.encode(), 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
setting = os.path.dirname(sys.executable) + os.pathsep + setting.decode()
env[var.upper()] = setting
break
for required in (b'SYSTEMROOT', b'TEMP', b'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
block = ''
nul = '\0'
for key, value in envvar_dict.items():
try:
block += key
except:
block += key.decode()
block += '='
try:
block += value
except:
block += value.decode()
block += nul
block += nul
return block
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
env = _GetEnvironment(arch, vs, open_out)
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'w')
f.write(env_block)
f.close()
cl_paths[arch] = env['GYP_CL_PATH']
return cl_paths
def _GetEnvironment(arch, vs, open_out):
env = {}
args = vs.SetupScript(arch)
args.extend(('&&', 'set', '&&', 'where', 'cl.exe'))
cache_key = hashlib.md5(''.join(args).encode('utf-8')).hexdigest()
appdata_dir = os.environ.get('TEMP', '')
cache_path = os.path.join(appdata_dir, '.gyp-cache')
cache_keyed_file = os.path.join(cache_path, cache_key)
if os.path.exists(cache_keyed_file):
try:
with file(cache_keyed_file) as f:
env = pickle.load(f)
except Exception:
pass
cl_path = env.get('GYP_CL_PATH', '')
if os.path.exists(cl_path):
return env
else:
os.remove(cache_keyed_file)
start_time = time.clock()
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
std_out, _ = popen.communicate()
if popen.returncode != 0:
raise Exception('"%s" failed with error %d' % (args, popen.returncode))
end_time = time.clock()
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "vcvars %s time: %f" %
(' '.join(args), end_time - start_time))
env = _ExtractImportantEnvironment(std_out, arch)
if os.path.exists(appdata_dir):
try:
with open_out(cache_keyed_file) as f:
pickle.dump(env, f)
except Exception as e:
print (e)
return env
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = [x for x in relative if not os.path.exists(x)]
if missing:
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
| true
| true
|
f702fee5a19306d06dfa47f23154ec7fca804920
| 7,915
|
py
|
Python
|
keystone/common/tokenless_auth.py
|
rajivmucheli/keystone
|
d55099d4a17e3672d478aae8c367bcdf9af15fb9
|
[
"Apache-2.0"
] | null | null | null |
keystone/common/tokenless_auth.py
|
rajivmucheli/keystone
|
d55099d4a17e3672d478aae8c367bcdf9af15fb9
|
[
"Apache-2.0"
] | 4
|
2020-02-10T12:02:37.000Z
|
2021-07-14T15:16:57.000Z
|
keystone/common/tokenless_auth.py
|
rajivmucheli/keystone
|
d55099d4a17e3672d478aae8c367bcdf9af15fb9
|
[
"Apache-2.0"
] | 5
|
2019-06-06T15:11:37.000Z
|
2021-06-07T08:23:23.000Z
|
# Copyright 2015 Hewlett-Packard
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from oslo_log import log
from keystone.auth import core
from keystone.common import provider_api
import keystone.conf
from keystone import exception
from keystone.federation import constants as federation_constants
from keystone.federation import utils
from keystone.i18n import _
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
class TokenlessAuthHelper(provider_api.ProviderAPIMixin, object):
def __init__(self, env):
"""A init class for TokenlessAuthHelper.
:param env: The HTTP request environment that should contain
client certificate attributes. These attributes should match
with what the mapping defines. Or a user cannot be mapped and
results un-authenticated. The following examples are for the
attributes that reference to the client certificate's Subject's
Common Name and Organization:
SSL_CLIENT_S_DN_CN, SSL_CLIENT_S_DN_O
:type env: dict
"""
self.env = env
def _build_scope_info(self):
"""Build the token request scope based on the headers.
:returns: scope data
:rtype: dict
"""
project_id = self.env.get('HTTP_X_PROJECT_ID')
project_name = self.env.get('HTTP_X_PROJECT_NAME')
project_domain_id = self.env.get('HTTP_X_PROJECT_DOMAIN_ID')
project_domain_name = self.env.get('HTTP_X_PROJECT_DOMAIN_NAME')
domain_id = self.env.get('HTTP_X_DOMAIN_ID')
domain_name = self.env.get('HTTP_X_DOMAIN_NAME')
scope = {}
if project_id:
scope['project'] = {'id': project_id}
elif project_name:
scope['project'] = {'name': project_name}
if project_domain_id:
scope['project']['domain'] = {'id': project_domain_id}
elif project_domain_name:
scope['project']['domain'] = {'name': project_domain_name}
else:
msg = _('Neither Project Domain ID nor Project Domain Name '
'was provided.')
raise exception.ValidationError(msg)
elif domain_id:
scope['domain'] = {'id': domain_id}
elif domain_name:
scope['domain'] = {'name': domain_name}
else:
raise exception.ValidationError(
attribute='project or domain',
target='scope')
return scope
def get_scope(self):
auth = {}
# NOTE(chioleong): Auth methods here are insignificant because
# we only care about using auth.controllers.AuthInfo
# to validate the scope information. Therefore,
# we don't provide any identity.
auth['scope'] = self._build_scope_info()
# NOTE(chioleong): We'll let AuthInfo validate the scope for us
auth_info = core.AuthInfo.create(auth, scope_only=True)
return auth_info.get_scope()
def get_mapped_user(self, project_id=None, domain_id=None):
"""Map client certificate to an existing user.
If user is ephemeral, there is no validation on the user himself;
however it will be mapped to a corresponding group(s) and the scope
of this ephemeral user is the same as what is assigned to the group.
:param project_id: Project scope of the mapped user.
:param domain_id: Domain scope of the mapped user.
:returns: A dictionary that contains the keys, such as
user_id, user_name, domain_id, domain_name
:rtype: dict
"""
idp_id = self._build_idp_id()
LOG.debug('The IdP Id %s and protocol Id %s are used to look up '
'the mapping.', idp_id, CONF.tokenless_auth.protocol)
mapped_properties, mapping_id = self.federation_api.evaluate(
idp_id, CONF.tokenless_auth.protocol, self.env)
user = mapped_properties.get('user', {})
user_id = user.get('id')
user_name = user.get('name')
user_type = user.get('type')
if user.get('domain') is not None:
user_domain_id = user.get('domain').get('id')
user_domain_name = user.get('domain').get('name')
else:
user_domain_id = None
user_domain_name = None
# if user is ephemeral type, we don't care if the user exists
# or not, but just care if the mapped group(s) is valid.
if user_type == utils.UserType.EPHEMERAL:
user_ref = {'type': utils.UserType.EPHEMERAL}
group_ids = mapped_properties['group_ids']
utils.validate_mapped_group_ids(group_ids,
mapping_id,
self.identity_api)
group_ids.extend(
utils.transform_to_group_ids(
mapped_properties['group_names'], mapping_id,
self.identity_api, self.assignment_api))
roles = self.assignment_api.get_roles_for_groups(group_ids,
project_id,
domain_id)
if roles is not None:
role_names = [role['name'] for role in roles]
user_ref['roles'] = role_names
user_ref['group_ids'] = list(group_ids)
user_ref[federation_constants.IDENTITY_PROVIDER] = idp_id
user_ref[federation_constants.PROTOCOL] = (
CONF.tokenless_auth.protocol)
return user_ref
if user_id:
user_ref = self.identity_api.get_user(user_id)
elif user_name and (user_domain_name or user_domain_id):
if user_domain_name:
user_domain = self.resource_api.get_domain_by_name(
user_domain_name)
self.resource_api.assert_domain_enabled(user_domain['id'],
user_domain)
user_domain_id = user_domain['id']
user_ref = self.identity_api.get_user_by_name(user_name,
user_domain_id)
else:
msg = _('User auth cannot be built due to missing either '
'user id, or user name with domain id, or user name '
'with domain name.')
raise exception.ValidationError(msg)
self.identity_api.assert_user_enabled(
user_id=user_ref['id'],
user=user_ref)
user_ref['type'] = utils.UserType.LOCAL
return user_ref
def _build_idp_id(self):
"""Build the IdP name from the given config option issuer_attribute.
The default issuer attribute SSL_CLIENT_I_DN in the environment is
built with the following formula -
base64_idp = sha1(env['SSL_CLIENT_I_DN'])
:returns: base64_idp like the above example
:rtype: str
"""
idp = self.env.get(CONF.tokenless_auth.issuer_attribute)
if idp is None:
raise exception.TokenlessAuthConfigError(
issuer_attribute=CONF.tokenless_auth.issuer_attribute)
hashed_idp = hashlib.sha256(idp.encode('utf-8'))
return hashed_idp.hexdigest()
| 41.439791
| 78
| 0.61175
|
import hashlib
from oslo_log import log
from keystone.auth import core
from keystone.common import provider_api
import keystone.conf
from keystone import exception
from keystone.federation import constants as federation_constants
from keystone.federation import utils
from keystone.i18n import _
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
class TokenlessAuthHelper(provider_api.ProviderAPIMixin, object):
def __init__(self, env):
self.env = env
def _build_scope_info(self):
project_id = self.env.get('HTTP_X_PROJECT_ID')
project_name = self.env.get('HTTP_X_PROJECT_NAME')
project_domain_id = self.env.get('HTTP_X_PROJECT_DOMAIN_ID')
project_domain_name = self.env.get('HTTP_X_PROJECT_DOMAIN_NAME')
domain_id = self.env.get('HTTP_X_DOMAIN_ID')
domain_name = self.env.get('HTTP_X_DOMAIN_NAME')
scope = {}
if project_id:
scope['project'] = {'id': project_id}
elif project_name:
scope['project'] = {'name': project_name}
if project_domain_id:
scope['project']['domain'] = {'id': project_domain_id}
elif project_domain_name:
scope['project']['domain'] = {'name': project_domain_name}
else:
msg = _('Neither Project Domain ID nor Project Domain Name '
'was provided.')
raise exception.ValidationError(msg)
elif domain_id:
scope['domain'] = {'id': domain_id}
elif domain_name:
scope['domain'] = {'name': domain_name}
else:
raise exception.ValidationError(
attribute='project or domain',
target='scope')
return scope
def get_scope(self):
auth = {}
auth['scope'] = self._build_scope_info()
# NOTE(chioleong): We'll let AuthInfo validate the scope for us
auth_info = core.AuthInfo.create(auth, scope_only=True)
return auth_info.get_scope()
def get_mapped_user(self, project_id=None, domain_id=None):
idp_id = self._build_idp_id()
LOG.debug('The IdP Id %s and protocol Id %s are used to look up '
'the mapping.', idp_id, CONF.tokenless_auth.protocol)
mapped_properties, mapping_id = self.federation_api.evaluate(
idp_id, CONF.tokenless_auth.protocol, self.env)
user = mapped_properties.get('user', {})
user_id = user.get('id')
user_name = user.get('name')
user_type = user.get('type')
if user.get('domain') is not None:
user_domain_id = user.get('domain').get('id')
user_domain_name = user.get('domain').get('name')
else:
user_domain_id = None
user_domain_name = None
# or not, but just care if the mapped group(s) is valid.
if user_type == utils.UserType.EPHEMERAL:
user_ref = {'type': utils.UserType.EPHEMERAL}
group_ids = mapped_properties['group_ids']
utils.validate_mapped_group_ids(group_ids,
mapping_id,
self.identity_api)
group_ids.extend(
utils.transform_to_group_ids(
mapped_properties['group_names'], mapping_id,
self.identity_api, self.assignment_api))
roles = self.assignment_api.get_roles_for_groups(group_ids,
project_id,
domain_id)
if roles is not None:
role_names = [role['name'] for role in roles]
user_ref['roles'] = role_names
user_ref['group_ids'] = list(group_ids)
user_ref[federation_constants.IDENTITY_PROVIDER] = idp_id
user_ref[federation_constants.PROTOCOL] = (
CONF.tokenless_auth.protocol)
return user_ref
if user_id:
user_ref = self.identity_api.get_user(user_id)
elif user_name and (user_domain_name or user_domain_id):
if user_domain_name:
user_domain = self.resource_api.get_domain_by_name(
user_domain_name)
self.resource_api.assert_domain_enabled(user_domain['id'],
user_domain)
user_domain_id = user_domain['id']
user_ref = self.identity_api.get_user_by_name(user_name,
user_domain_id)
else:
msg = _('User auth cannot be built due to missing either '
'user id, or user name with domain id, or user name '
'with domain name.')
raise exception.ValidationError(msg)
self.identity_api.assert_user_enabled(
user_id=user_ref['id'],
user=user_ref)
user_ref['type'] = utils.UserType.LOCAL
return user_ref
def _build_idp_id(self):
idp = self.env.get(CONF.tokenless_auth.issuer_attribute)
if idp is None:
raise exception.TokenlessAuthConfigError(
issuer_attribute=CONF.tokenless_auth.issuer_attribute)
hashed_idp = hashlib.sha256(idp.encode('utf-8'))
return hashed_idp.hexdigest()
| true
| true
|
f702ff17b34fbd489d3cfceaa9c5286e6c4611ca
| 1,058
|
py
|
Python
|
FLAAT/ch6/algo_6_3.py
|
colddrizzle/FLACT
|
d23ec807be3f5ea21cfa9a7a1499198d14681262
|
[
"MIT"
] | null | null | null |
FLAAT/ch6/algo_6_3.py
|
colddrizzle/FLACT
|
d23ec807be3f5ea21cfa9a7a1499198d14681262
|
[
"MIT"
] | null | null | null |
FLAAT/ch6/algo_6_3.py
|
colddrizzle/FLACT
|
d23ec807be3f5ea21cfa9a7a1499198d14681262
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from common.BNFParser import *
from common.Grammar import Grammar
# 求文法G的可空变量集
# 该算法只跟G的P有关系
def algo_6_3(P):
"""
测试数据来源于第6章习题12(2)
>>> from common.production import Production
>>> p1 = Production(['S'], [['A', 'B', 'D', 'C']])
>>> p2 = Production(['A'], [['B', 'D'], ['\\"a\\"', '\\"a\\"'], ['\\"ε\\"']])
>>> p3 = Production(['B'], [['\\"a\\"', 'B'], ['\\"a\\"']])
>>> p4 = Production(['C'], [['D','C'], ['\\"c\\"'], ['\\"ε\\"']])
>>> p5 = Production(['D'], [['\\"ε\\"']])
>>> p = [p1, p2, p3, p4, p5]
>>> u = algo_6_3(p)
>>> set(u) == set(['A', 'C', 'D'])
True
"""
simple_plist = []
for p in P:
simple_plist.extend(Production.toSimpleProduction(p))
old_u = set()
new_u = set()
for p in simple_plist:
if Production.isDirectEmpty(p):
new_u.add(p.left[0])
while new_u != old_u:
old_u = new_u
for p in simple_plist:
if set(p.right[0]) <= old_u:
new_u.add(p.left[0])
return new_u
| 27.128205
| 81
| 0.464083
|
from common.BNFParser import *
from common.Grammar import Grammar
def algo_6_3(P):
simple_plist = []
for p in P:
simple_plist.extend(Production.toSimpleProduction(p))
old_u = set()
new_u = set()
for p in simple_plist:
if Production.isDirectEmpty(p):
new_u.add(p.left[0])
while new_u != old_u:
old_u = new_u
for p in simple_plist:
if set(p.right[0]) <= old_u:
new_u.add(p.left[0])
return new_u
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.