code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import unittest
import pathlib
import cpptypeinfo
from cpptypeinfo.usertype import (Field, Struct, Pointer, Param, Function)
HERE = pathlib.Path(__file__).absolute().parent
IMGUI_H = HERE.parent / 'libs/imgui/imgui.h'
parser = cpptypeinfo.TypeParser()
EXPECTS = {
'ImDrawChannel':
parser.parse('struct ImDrawChannel'),
'ImDrawCmd':
parser.parse('struct ImDrawCmd'),
'ImDrawData':
parser.parse('struct ImDrawData'),
'ImDrawList':
parser.parse('struct ImDrawList'),
'ImDrawListSharedData':
parser.parse('struct ImDrawListSharedData'),
'ImDrawListSplitter':
parser.parse('struct ImDrawListSplitter'),
'ImDrawVert':
parser.parse('struct ImDrawVert'),
'ImFont':
parser.parse('struct ImFont'),
'ImFontAtlas':
parser.parse('struct ImFontAtlas'),
'ImFontConfig':
parser.parse('struct ImFontConfig'),
'ImFontGlyph':
parser.parse('struct ImFontGlyph'),
'ImFontGlyphRangesBuilder':
parser.parse('struct ImFontGlyphRangesBuilder'),
'ImColor':
parser.parse('struct ImColor'),
'ImGuiContext':
parser.parse('struct ImGuiContext'),
'ImGuiIO':
parser.parse('struct ImGuiIO'),
'ImGuiInputTextCallbackData':
parser.parse('struct ImGuiInputTextCallbackData'),
'ImGuiListClipper':
parser.parse('struct ImGuiListClipper'),
'ImGuiOnceUponAFrame':
parser.parse('struct ImGuiOnceUponAFrame'),
'ImGuiPayload':
parser.parse('struct ImGuiPayload'),
'ImGuiSizeCallbackData':
parser.parse('struct ImGuiSizeCallbackData'),
'ImGuiStorage':
parser.parse('struct ImGuiStorage'),
'ImGuiStyle':
parser.parse('struct ImGuiStyle'),
'ImGuiTextBuffer':
parser.parse('struct ImGuiTextBuffer'),
'ImGuiTextFilter':
parser.parse('struct ImGuiTextFilter'),
'ImTextureID':
parser.typedef('ImTextureID', Pointer(cpptypeinfo.Void())),
'ImGuiID':
parser.typedef('ImGuiID', cpptypeinfo.UInt32()),
'ImWchar':
parser.typedef('ImWchar', cpptypeinfo.UInt16()),
'ImGuiCol':
parser.typedef('ImGuiCol', cpptypeinfo.Int32()),
'ImGuiCond':
parser.typedef('ImGuiCond', cpptypeinfo.Int32()),
'ImGuiDataType':
parser.typedef('ImGuiDataType', cpptypeinfo.Int32()),
'ImGuiDir':
parser.typedef('ImGuiDir', cpptypeinfo.Int32()),
'ImGuiKey':
parser.typedef('ImGuiKey', cpptypeinfo.Int32()),
'ImGuiNavInput':
parser.typedef('ImGuiNavInput', cpptypeinfo.Int32()),
'ImGuiMouseCursor':
parser.typedef('ImGuiMouseCursor', cpptypeinfo.Int32()),
'ImGuiStyleVar':
parser.typedef('ImGuiStyleVar', cpptypeinfo.Int32()),
'ImDrawCornerFlags':
parser.typedef('ImDrawCornerFlags', cpptypeinfo.Int32()),
'ImDrawListFlags':
parser.typedef('ImDrawListFlags', cpptypeinfo.Int32()),
'ImFontAtlasFlags':
parser.typedef('ImFontAtlasFlags', cpptypeinfo.Int32()),
'ImGuiBackendFlags':
parser.typedef('ImGuiBackendFlags', cpptypeinfo.Int32()),
'ImGuiColorEditFlags':
parser.typedef('ImGuiColorEditFlags', cpptypeinfo.Int32()),
'ImGuiConfigFlags':
parser.typedef('ImGuiConfigFlags', cpptypeinfo.Int32()),
'ImGuiComboFlags':
parser.typedef('ImGuiComboFlags', cpptypeinfo.Int32()),
'ImGuiDragDropFlags':
parser.typedef('ImGuiDragDropFlags', cpptypeinfo.Int32()),
'ImGuiFocusedFlags':
parser.typedef('ImGuiFocusedFlags', cpptypeinfo.Int32()),
'ImGuiHoveredFlags':
parser.typedef('ImGuiHoveredFlags', cpptypeinfo.Int32()),
'ImGuiInputTextFlags':
parser.typedef('ImGuiInputTextFlags', cpptypeinfo.Int32()),
'ImGuiSelectableFlags':
parser.typedef('ImGuiSelectableFlags', cpptypeinfo.Int32()),
'ImGuiTabBarFlags':
parser.typedef('ImGuiTabBarFlags', cpptypeinfo.Int32()),
'ImGuiTabItemFlags':
parser.typedef('ImGuiTabItemFlags', cpptypeinfo.Int32()),
'ImGuiTreeNodeFlags':
parser.typedef('ImGuiTreeNodeFlags', cpptypeinfo.Int32()),
'ImGuiWindowFlags':
parser.typedef('ImGuiWindowFlags', cpptypeinfo.Int32()),
'ImGuiInputTextCallback':
parser.typedef(
'ImGuiInputTextCallback',
Function(cpptypeinfo.Int32(), [
Param(Pointer(parser.parse('struct ImGuiInputTextCallbackData')))
])),
'ImGuiSizeCallback':
parser.typedef(
'ImGuiSizeCallback',
Function(
cpptypeinfo.Void(),
[Param(Pointer(parser.parse('struct ImGuiSizeCallbackData')))])),
'ImS8':
parser.typedef('ImS8', cpptypeinfo.Int8()),
'ImU8':
parser.typedef('ImU8', cpptypeinfo.UInt8()),
'ImS16':
parser.typedef('ImS16', cpptypeinfo.Int16()),
'ImU16':
parser.typedef('ImU16', cpptypeinfo.UInt16()),
'ImS32':
parser.typedef('ImS32', cpptypeinfo.Int32()),
'ImU32':
parser.typedef('ImU32', cpptypeinfo.UInt32()),
'ImS64':
parser.typedef('ImS64', cpptypeinfo.Int64()),
'ImU64':
parser.typedef('ImU64', cpptypeinfo.UInt64()),
'ImVec2':
parser.struct(
'ImVec2',
[Field(cpptypeinfo.Float(), 'x'),
Field(cpptypeinfo.Float(), 'y')]),
'ImVec4':
parser.struct('ImVec4', [
Field(cpptypeinfo.Float(), 'x'),
Field(cpptypeinfo.Float(), 'y'),
Field(cpptypeinfo.Float(), 'z'),
Field(cpptypeinfo.Float(), 'w')
]),
'CreateContext':
Function(Pointer(parser.parse('struct ImGuiContext')), [
Param(Pointer(parser.parse('struct ImFontAtlas')), 'shared_font_atlas',
'NULL')
]),
'DestroyContext':
Function(
cpptypeinfo.Void(),
[Param(Pointer(parser.parse('struct ImGuiContext')), 'ctx', 'NULL')]),
'GetCurrentContext':
Function(Pointer(parser.parse('struct ImGuiContext')), []),
'SetCurrentContext':
Function(cpptypeinfo.Void(),
[Param(Pointer(parser.parse('struct ImGuiContext')), 'ctx')]),
'DebugCheckVersionAndDataLayout':
Function(cpptypeinfo.Bool(), [
Param(parser.parse('const char*'), 'version_str'),
Param(cpptypeinfo.UInt64(), 'sz_io'),
Param(cpptypeinfo.UInt64(), 'sz_style'),
Param(cpptypeinfo.UInt64(), 'sz_vec2'),
Param(cpptypeinfo.UInt64(), 'sz_vec4'),
Param(cpptypeinfo.UInt64(), 'sz_drawvert'),
Param(cpptypeinfo.UInt64(), 'sz_drawidx'),
]),
# ImGuiIO & GetIO ( )
'GetIO':
Function(parser.parse('ImGuiIO &'), []),
# ImGuiStyle & GetStyle ( )
'GetStyle':
Function(parser.parse('ImGuiStyle &'), []),
# void NewFrame ( )
'NewFrame':
Function(cpptypeinfo.Void(), []),
'EndFrame':
Function(cpptypeinfo.Void(), []),
'Render':
Function(cpptypeinfo.Void(), []),
# ImDrawData * GetDrawData ( )
'GetDrawData':
Function(parser.parse('ImDrawData *'), []),
# void ShowDemoWindow ( bool * p_open = NULL )
'ShowDemoWindow':
Function(cpptypeinfo.Void(),
[Param(parser.parse('bool *'), 'p_open', 'NULL')]),
# void ShowAboutWindow ( bool * p_open = NULL )
'ShowAboutWindow':
Function(cpptypeinfo.Void(),
[Param(parser.parse('bool *'), 'p_open', 'NULL')]),
# void ShowMetricsWindow ( bool * p_open = NULL )
'ShowMetricsWindow':
Function(cpptypeinfo.Void(),
[Param(parser.parse('bool *'), 'p_open', 'NULL')]),
# void ShowStyleEditor ( ImGuiStyle * ref = NULL )
'ShowStyleEditor':
Function(cpptypeinfo.Void(),
[Param(parser.parse('ImGuiStyle *'), 'ref', 'NULL')]),
# bool ShowStyleSelector ( const char * label )
'ShowStyleSelector':
Function(cpptypeinfo.Bool(),
[Param(parser.parse('const char*'), 'label')]),
# void ShowFontSelector ( const char * label )
'ShowFontSelector':
Function(cpptypeinfo.Void(),
[Param(parser.parse('const char*'), 'label')]),
# void ShowUserGuide ( )
'ShowUserGuide':
Function(cpptypeinfo.Void(), []),
# const char * GetVersion ( )
'GetVersion':
Function(parser.parse('const char*'), []),
# void StyleColorsDark ( ImGuiStyle * dst = NULL )
'StyleColorsDark':
Function(cpptypeinfo.Void(),
[Param(parser.parse('ImGuiStyle *'), 'dst', 'NULL')]),
# void StyleColorsClassic ( ImGuiStyle * dst = NULL )
'StyleColorsClassic':
Function(cpptypeinfo.Void(),
[Param(parser.parse('ImGuiStyle *'), 'dst', 'NULL')]),
# void StyleColorsLight ( ImGuiStyle * dst = NULL )
'StyleColorsLight':
Function(cpptypeinfo.Void(),
[Param(parser.parse('ImGuiStyle *'), 'dst', 'NULL')]),
# bool Begin ( const char * name , bool * p_open = NULL , ImGuiWindowFlags flags = 0 )
'Begin':
Function(cpptypeinfo.Bool(), [
Param(parser.parse('const char *'), 'name'),
Param(parser.parse('bool *'), 'p_open', 'NULL'),
Param(parser.parse('ImGuiWindowFlags'), 'flags', '0')
]),
'End':
Function(cpptypeinfo.Void(), []),
# bool BeginChild ( const char * str_id , const ImVec2 & size = ImVec2 ( 0 , 0 ) , bool border = false , ImGuiWindowFlags flags = 0 )
# bool BeginChild(ImGuiID id, const ImVec2& size = ImVec2(0,0), bool border = false, ImGuiWindowFlags flags = 0);
# function overloading
'BeginChild': [
Function(cpptypeinfo.Bool(), [
Param(parser.parse('const char *'), 'str_id'),
Param(Param('const ImVec2 &'), 'size', 'ImVec2(0,0)'),
Param(Param('bool'), 'border', 'false'),
Param(parser.parse('ImGuiWindowFlags'), 'flags', '0')
])
],
'__dummy__0':
None,
'EndChild':
Function(cpptypeinfo.Void(), []),
# bool IsWindowAppearing ( )
'IsWindowAppearing':
Function(cpptypeinfo.Bool(), []),
# bool IsWindowCollapsed ( )
'IsWindowCollapsed':
Function(cpptypeinfo.Bool(), []),
# bool IsWindowFocused ( ImGuiFocusedFlags flags = 0 )
'IsWindowFocused':
Function(cpptypeinfo.Bool(),
[Param(parser.parse('ImGuiFocusedFlags'), 'flags', '0')]),
# bool IsWindowHovered ( ImGuiHoveredFlags flags = 0 )
'IsWindowHovered':
Function(cpptypeinfo.Bool(),
[Param(parser.parse('ImGuiHoveredFlags'), 'flags', '0')]),
# ImDrawList * GetWindowDrawList ( )
'GetWindowDrawList':
Function(parser.parse('ImDrawList*'), []),
# ImVec2 GetWindowPos ( )
'GetWindowPos':
Function(parser.parse('ImVec2'), []),
# ImVec2 GetWindowSize ( )
'GetWindowSize':
Function(parser.parse('ImVec2'), []),
# float GetWindowWidth ( )
'GetWindowWidth':
Function(cpptypeinfo.Float(), []),
'GetWindowHeight':
Function(cpptypeinfo.Float(), []),
# void SetNextWindowPos ( const ImVec2 & pos , ImGuiCond cond = 0 , const ImVec2 & pivot = ImVec2 ( 0 , 0 ) )
'SetNextWindowPos':
Function(cpptypeinfo.Void(), [
Param(parser.parse('const ImVec2&'), 'pos'),
Param(parser.parse('ImGuiCond'), 'cond', '0'),
Param(parser.parse('const ImVec2 &'), 'pivot', 'ImVec2(0,0)'),
]),
# void SetNextWindowSize ( const ImVec2 & size , ImGuiCond cond = 0 )
'SetNextWindowSize':
Function(cpptypeinfo.Void(), [
Param(parser.parse('const ImVec2 &'), 'size'),
Param(parser.parse('ImGuiCond'), 'cond', '0')
]),
# void SetNextWindowSizeConstraints ( const ImVec2 & size_min , const ImVec2 & size_max , ImGuiSizeCallback custom_callback = NULL , void * custom_callback_data = NULL )
'SetNextWindowSizeConstraints':
Function(cpptypeinfo.Void(), [
Param(parser.parse('const ImVec2 &'), 'size_min'),
Param(parser.parse('const ImVec2 &'), 'size_max'),
Param(parser.parse('ImGuiSizeCallback'), 'custom_callback', 'NULL'),
Param(parser.parse('void *'), 'custom_callback_data', 'NULL')
]),
# void SetNextWindowContentSize ( const ImVec2 & size )
'SetNextWindowContentSize':
Function(cpptypeinfo.Void(), [
Param(parser.parse('const ImVec2 &'), 'size'),
]),
# void SetNextWindowCollapsed ( bool collapsed , ImGuiCond cond = 0 )
'SetNextWindowCollapsed':
Function(cpptypeinfo.Void(), [
Param(cpptypeinfo.Bool(), 'collapsed'),
Param(parser.parse('ImGuiCond'), 'cond', '0'),
]),
# void SetNextWindowFocus ( )
'SetNextWindowFocus':
Function(cpptypeinfo.Void(), []),
# void SetNextWindowBgAlpha ( float alpha )
'SetNextWindowBgAlpha':
Function(cpptypeinfo.Void(), [Param(cpptypeinfo.Float(), 'alpha')]),
# void SetWindowPos ( const ImVec2 & pos , ImGuiCond cond = 0 )
# void SetWindowPos(const char* name, const ImVec2& pos, ImGuiCond cond = 0);
# function overloading
'SetWindowPos': [
Function(cpptypeinfo.Void(), [
Param(parser.parse('const ImVec2 &'), 'pos'),
Param(parser.parse('ImGuiCond'), 'cond', '0'),
])
],
'__dummy__1':
None,
# void SetWindowSize ( const ImVec2 & size , ImGuiCond cond = 0 )
# void SetWindowSize(const char* name, const ImVec2& size, ImGuiCond cond = 0);
# function overloading
'SetWindowSize': [
Function(cpptypeinfo.Void(), [
Param(parser.parse('const ImVec2 &'), 'size'),
Param(parser.parse('ImGuiCond'), 'cond', '0'),
])
],
'__dummy__2':
None,
# void SetWindowCollapsed ( bool collapsed , ImGuiCond cond = 0 )
# IMGUI_API void SetWindowCollapsed(const char* name, bool collapsed, ImGuiCond cond = 0); // set named window collapsed state
'SetWindowCollapsed': [
Function(cpptypeinfo.Void(), [
Param(cpptypeinfo.Bool(), 'collapsed'),
Param(parser.parse('ImGuiCond'), 'cond', '0'),
])
],
'__dummy__3':
None,
# void SetWindowFocus ( )
# IMGUI_API void SetWindowFocus(const char* name);
'SetWindowFocus': [Function(cpptypeinfo.Void(), [])],
'__dummy__4':
None,
# void SetWindowFontScale ( float scale )
'SetWindowFontScale':
Function(cpptypeinfo.Void(), [Param(cpptypeinfo.Float(), 'scale')]),
# ImVec2 GetContentRegionMax ( )
'GetContentRegionMax':
Function(parser.parse('ImVec2'), []),
# ImVec2 GetContentRegionAvail ( )
'GetContentRegionAvail':
Function(parser.parse('ImVec2'), []),
# ImVec2 GetWindowContentRegionMin ( )
'GetWindowContentRegionMin':
Function(parser.parse('ImVec2'), []),
# ImVec2 GetWindowContentRegionMax ( )
'GetWindowContentRegionMax':
Function(parser.parse('ImVec2'), []),
# float GetWindowContentRegionWidth ( )
'GetWindowContentRegionWidth':
Function(cpptypeinfo.Float(), []),
# float GetScrollX ( )
'GetScrollX':
Function(cpptypeinfo.Float(), []),
'GetScrollY':
Function(cpptypeinfo.Float(), []),
'GetScrollMaxX':
Function(cpptypeinfo.Float(), []),
'GetScrollMaxY':
Function(cpptypeinfo.Float(), []),
# void SetScrollX ( float scroll_x )
'SetScrollX':
Function(cpptypeinfo.Void(), [Param(cpptypeinfo.Float(), 'scroll_x')]),
'SetScrollY':
Function(cpptypeinfo.Void(), [Param(cpptypeinfo.Float(), 'scroll_y')]),
# void SetScrollHereX ( float center_x_ratio = 0.5f )
'SetScrollHereX':
Function(cpptypeinfo.Void(),
[Param(cpptypeinfo.Float(), 'center_x_ratio', '0.5f')]),
'SetScrollHereY':
Function(cpptypeinfo.Void(),
[Param(cpptypeinfo.Float(), 'center_y_ratio', '0.5f')]),
# void SetScrollFromPosX ( float local_x , float center_x_ratio = 0.5f )
'SetScrollFromPosX':
Function(cpptypeinfo.Void(), [
Param(cpptypeinfo.Float(), 'local_x'),
Param(cpptypeinfo.Float(), 'center_x_ratio', '0.5f')
]),
'SetScrollFromPosY':
Function(cpptypeinfo.Void(), [
Param(cpptypeinfo.Float(), 'local_y'),
Param(cpptypeinfo.Float(), 'center_y_ratio', '0.5f')
]),
# void PushFont ( ImFont * font )
'PushFont':
Function(cpptypeinfo.Void(), [Param(parser.parse('ImFont*'), 'font')]),
# void PopFont ( )
'PopFont':
Function(cpptypeinfo.Void(), []),
# void PushStyleColor ( ImGuiCol idx , ImU32 col )
# void PushStyleColor ( ImGuiCol idx , ImU32 col )
'PushStyleColor': [
Function(cpptypeinfo.Void(), [
Param(parser.parse('ImGuiCol'), 'idx'),
Param(parser.parse('ImU32'), 'col')
])
],
'__dummy__5':
None,
# void PopStyleColor ( int count = 1 )
'PopStyleColor':
Function(cpptypeinfo.Void(), [Param(cpptypeinfo.Int32(), 'count', '1')]),
# void PushStyleVar ( ImGuiStyleVar idx , float val )
# void PushStyleVar(ImGuiStyleVar idx, const ImVec2& val);
'PushStyleVar': [
Function(cpptypeinfo.Void(), [
Param(parser.parse('ImGuiCol'), 'idx'),
Param(cpptypeinfo.Float(), 'val')
])
],
'__dummy__6':
None,
# :void PopStyleVar ( int count = 1 )
'PopStyleVar':
Function(cpptypeinfo.Void(), [
Param(cpptypeinfo.Int32(), 'count', '1'),
]),
# const ImVec4 & GetStyleColorVec4 ( ImGuiCol idx )
'GetStyleColorVec4':
Function(parser.parse('const ImVec4 &'),
[Param(parser.parse('ImGuiCol'), 'idx')]),
# ImFont * GetFont ( )
'GetFont':
Function(parser.parse('ImFont*'), []),
'GetFontSize': [],
'GetFontTexUvWhitePixel': [],
# 3 overloading
'GetColorU32': [],
'__dummy__7':
None,
'__dummy__8':
None,
'PushItemWidth': [],
'PopItemWidth': [],
'SetNextItemWidth': [],
'CalcItemWidth': [],
'PushTextWrapPos': [],
'PopTextWrapPos': [],
'PushAllowKeyboardFocus': [],
'PopAllowKeyboardFocus': [],
'PushButtonRepeat': [],
'PopButtonRepeat': [],
'Separator': [],
'SameLine': [],
'NewLine': [],
'Spacing': [],
'Dummy': [],
'Indent': [],
'Unindent': [],
'BeginGroup': [],
'EndGroup': [],
'GetCursorPos': [],
'GetCursorPosX': [],
'GetCursorPosY': [],
'SetCursorPos': [],
'SetCursorPosX': [],
'SetCursorPosY': [],
'GetCursorStartPos': [],
'GetCursorScreenPos': [],
'SetCursorScreenPos': [],
'AlignTextToFramePadding': [],
'GetTextLineHeight': [],
'GetTextLineHeightWithSpacing': [],
'GetFrameHeight': [],
'GetFrameHeightWithSpacing': [],
'PushID': [],
'__dummy__9': [],
'__dummy__10': [],
'__dummy__11': [],
'PopID': [],
'GetID': [],
'__dummy__12': [],
'__dummy__13': [],
'TextUnformatted': [],
'Text': [],
'TextV': [],
'TextColored': [],
'TextColoredV': [],
'TextDisabled': [],
'TextDisabledV': [],
'TextWrapped': [],
'TextWrappedV': [],
'LabelText': [],
'LabelTextV': [],
'BulletText': [],
'BulletTextV': [],
'Button': [],
'SmallButton': [],
'InvisibleButton': [],
'ArrowButton': [],
'Image': [],
'ImageButton': [],
'Checkbox': [],
'CheckboxFlags': [],
'RadioButton': [],
'__dummy__14':
None,
'ProgressBar': [],
'Bullet': [],
'BeginCombo': [],
'EndCombo': [],
'Combo': [],
'__dummy__15': [],
'__dummy__16': [],
'DragFloat': [],
'DragFloat2': [],
'DragFloat3': [],
'DragFloat4': [],
'DragFloatRange2': [],
'DragInt': [],
'DragInt2': [],
'DragInt3': [],
'DragInt4': [],
'DragIntRange2': [],
'DragScalar': [],
'DragScalarN': [],
'SliderFloat': [],
'SliderFloat2': [],
'SliderFloat3': [],
'SliderFloat4': [],
'SliderAngle': [],
'SliderInt': [],
'SliderInt2': [],
'SliderInt3': [],
'SliderInt4': [],
'SliderScalar': [],
'SliderScalarN': [],
'VSliderFloat': [],
'VSliderInt': [],
'VSliderScalar': [],
'InputText': [],
'InputTextMultiline': [],
'InputTextWithHint': [],
'InputFloat': [],
'InputFloat2': [],
'InputFloat3': [],
'InputFloat4': [],
'InputInt': [],
'InputInt2': [],
'InputInt3': [],
'InputInt4': [],
'InputDouble': [],
'InputScalar': [],
'InputScalarN': [],
'ColorEdit3': [],
'ColorEdit4': [],
'ColorPicker3': [],
'ColorPicker4': [],
'ColorButton': [],
'SetColorEditOptions': [],
'TreeNode': [],
'__dummy__17': [],
'__dummy__18': [],
'TreeNodeV': [],
'__dummy__19': [],
'TreeNodeEx': [],
'__dummy__20': [],
'__dummy__21': [],
'TreeNodeExV': [],
'__dummy__22': [],
'TreePush': [],
'__dummy__23': [],
'TreePop': [],
'GetTreeNodeToLabelSpacing': [],
'CollapsingHeader': [],
'__dummy__24': [],
'SetNextItemOpen': [],
'Selectable': [],
'__dummy__25': [],
'ListBox': [],
'__dummy__26': [],
'ListBoxHeader': [],
'__dummy__27': [],
'ListBoxFooter': [],
'PlotLines': [],
'__dummy__28': [],
'PlotHistogram': [],
'__dummy__29': [],
'Value': [],
'__dummy__30': [],
'__dummy__31': [],
'__dummy__32': [],
'BeginMainMenuBar': [],
'EndMainMenuBar': [],
'BeginMenuBar': [],
'EndMenuBar': [],
'BeginMenu': [],
'EndMenu': [],
'MenuItem': [],
'__dummy__33': [],
'BeginTooltip': [],
'EndTooltip': [],
'SetTooltip': [],
'SetTooltipV': [],
'OpenPopup': [],
'BeginPopup': [],
'BeginPopupContextItem': [],
'BeginPopupContextWindow': [],
'BeginPopupContextVoid': [],
'BeginPopupModal': [],
'EndPopup': [],
'OpenPopupOnItemClick': [],
'IsPopupOpen': [],
'CloseCurrentPopup': [],
'Columns': [],
'NextColumn': [],
'GetColumnIndex': [],
'GetColumnWidth': [],
'SetColumnWidth': [],
'GetColumnOffset': [],
'SetColumnOffset': [],
'GetColumnsCount': [],
'BeginTabBar': [],
'EndTabBar': [],
'BeginTabItem': [],
'EndTabItem': [],
'SetTabItemClosed': [],
'LogToTTY': [],
'LogToFile': [],
'LogToClipboard': [],
'LogFinish': [],
'LogButtons': [],
'LogText': [],
'BeginDragDropSource': [],
'SetDragDropPayload': [],
'EndDragDropSource': [],
'BeginDragDropTarget': [],
'AcceptDragDropPayload': [],
'EndDragDropTarget': [],
'GetDragDropPayload': [],
'PushClipRect': [],
'PopClipRect': [],
'SetItemDefaultFocus': [],
'SetKeyboardFocusHere': [],
'IsItemHovered': [],
'IsItemActive': [],
'IsItemFocused': [],
'IsItemClicked': [],
'IsItemVisible': [],
'IsItemEdited': [],
'IsItemActivated': [],
'IsItemDeactivated': [],
'IsItemDeactivatedAfterEdit': [],
'IsAnyItemHovered': [],
'IsAnyItemActive': [],
'IsAnyItemFocused': [],
'GetItemRectMin': [],
'GetItemRectMax': [],
'GetItemRectSize': [],
'SetItemAllowOverlap': [],
'IsRectVisible': [],
'__dummy__34': [],
'GetTime': [],
'GetFrameCount': [],
'GetBackgroundDrawList': [],
'GetForegroundDrawList': [],
'GetDrawListSharedData': [],
'GetStyleColorName': [],
'SetStateStorage': [],
'GetStateStorage': [],
'CalcTextSize': [],
'CalcListClipping': [],
'BeginChildFrame': [],
'EndChildFrame': [],
'ColorConvertU32ToFloat4': [],
'ColorConvertFloat4ToU32': [],
'ColorConvertRGBtoHSV': [],
'ColorConvertHSVtoRGB': [],
'GetKeyIndex': [],
'IsKeyDown': [],
'IsKeyPressed': [],
'IsKeyReleased': [],
'GetKeyPressedAmount': [],
'IsMouseDown': [],
'IsAnyMouseDown': [],
'IsMouseClicked': [],
'IsMouseDoubleClicked': [],
'IsMouseReleased': [],
'IsMouseDragging': [],
'IsMouseHoveringRect': [],
'IsMousePosValid': [],
'GetMousePos': [],
'GetMousePosOnOpeningCurrentPopup': [],
'GetMouseDragDelta': [],
'ResetMouseDragDelta': [],
'GetMouseCursor': [],
'SetMouseCursor': [],
'CaptureKeyboardFromApp': [],
'CaptureMouseFromApp': [],
'GetClipboardText': [],
'SetClipboardText': [],
'LoadIniSettingsFromDisk': [],
'LoadIniSettingsFromMemory': [],
'SaveIniSettingsToDisk': [],
'SaveIniSettingsToMemory': [],
'SetAllocatorFunctions': [],
'MemAlloc': [],
'MemFree': [],
# enum
'ImGuiWindowFlags_': [],
'ImGuiInputTextFlags_': [],
'ImGuiTreeNodeFlags_': [],
'ImGuiSelectableFlags_': [],
'ImGuiComboFlags_': [],
'ImGuiTabBarFlags_': [],
'ImGuiFocusedFlags_': [],
'ImGuiDragDropFlags_': [],
'ImGuiDataType_': [],
'ImGuiDir_': [],
'ImGuiKey_': [],
'ImGuiNavInput_': [],
'ImGuiConfigFlags_': [],
'ImGuiBackendFlags_': [],
'ImGuiCol_': [],
'ImGuiStyleVar_': [],
'ImGuiColorEditFlags_': [],
'ImGuiMouseCursor_': [],
'ImGuiCond_': [],
'ImGuiHoveredFlags_': [],
'ImGuiTabItemFlags_': [],
# allocator
'ImNewDummy':
parser.parse('struct ImNewDummy'),
'operator new': [],
'operator delete': [],
'IM_DELETE': [],
#
'ImVector':
parser.parse('struct ImVector'),
#
'TreeAdvanceToLabelPos': [],
'SetNextTreeNodeOpen': [],
'GetContentRegionAvailWidth': [],
'GetOverlayDrawList': [],
'SetScrollHere': [],
'IsItemDeactivatedAfterChange': [],
'IsAnyWindowFocused': [],
'IsAnyWindowHovered': [],
'CalcItemRectClosestPoint': [],
'ShowTestWindow': [],
'IsRootWindowFocused': [],
'IsRootWindowOrAnyChildFocused': [],
'SetNextWindowContentWidth': [],
'GetItemsLineHeightWithSpacing': [],
'IsRootWindowOrAnyChildHovered': [],
'AlignFirstTextHeightToWidgets': [],
'SetNextWindowPosCenter': [],
#
'ImGuiTextEditCallback': [],
'ImGuiTextEditCallbackData': [],
'ImDrawCallback': [],
'ImDrawIdx':
parser.typedef('ImDrawIdx', cpptypeinfo.UInt16()),
'ImDrawCornerFlags_': [],
'ImDrawListFlags_': [],
'ImFontAtlasCustomRect': [],
'ImFontAtlasFlags_': [],
}
class ImGuiTest(unittest.TestCase):
def test_imgui_h(self) -> None:
cpptypeinfo.parse_files(parser,
IMGUI_H,
cpp_flags=[
'-DIMGUI_DISABLE_OBSOLETE_FUNCTIONS',
])
for ns in parser.root_namespace.traverse():
if ns.struct:
continue
for k, v in ns.user_type_map.items():
with self.subTest(name=k):
# print(f'{ns}{v}')
expected = EXPECTS.get(k)
if expected is None:
raise Exception('not found :' + k)
else:
if isinstance(expected, list):
pass
else:
self.assertEqual(expected, v)
for v in ns.functions:
if v.name:
with self.subTest(name=v.name):
# print(f'{ns}{v}')
expected = EXPECTS.get(v.name)
if expected is None:
raise Exception('not found :' + v.name)
else:
if isinstance(expected, list):
pass
else:
self.assertEqual(expected, v)
if __name__ == '__main__':
unittest.main()
| [
"cpptypeinfo.UInt16",
"pathlib.Path",
"cpptypeinfo.Void",
"cpptypeinfo.Int8",
"cpptypeinfo.parse_files",
"cpptypeinfo.UInt32",
"cpptypeinfo.TypeParser",
"cpptypeinfo.UInt64",
"cpptypeinfo.Float",
"cpptypeinfo.Int16",
"unittest.main",
"cpptypeinfo.Int64",
"cpptypeinfo.Bool",
"cpptypeinfo.In... | [((237, 261), 'cpptypeinfo.TypeParser', 'cpptypeinfo.TypeParser', ([], {}), '()\n', (259, 261), False, 'import cpptypeinfo\n'), ((27961, 27976), 'unittest.main', 'unittest.main', ([], {}), '()\n', (27974, 27976), False, 'import unittest\n'), ((1995, 2015), 'cpptypeinfo.UInt32', 'cpptypeinfo.UInt32', ([], {}), '()\n', (2013, 2015), False, 'import cpptypeinfo\n'), ((2065, 2085), 'cpptypeinfo.UInt16', 'cpptypeinfo.UInt16', ([], {}), '()\n', (2083, 2085), False, 'import cpptypeinfo\n'), ((2137, 2156), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (2154, 2156), False, 'import cpptypeinfo\n'), ((2210, 2229), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (2227, 2229), False, 'import cpptypeinfo\n'), ((2291, 2310), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (2308, 2310), False, 'import cpptypeinfo\n'), ((2362, 2381), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (2379, 2381), False, 'import cpptypeinfo\n'), ((2433, 2452), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (2450, 2452), False, 'import cpptypeinfo\n'), ((2514, 2533), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (2531, 2533), False, 'import cpptypeinfo\n'), ((2601, 2620), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (2618, 2620), False, 'import cpptypeinfo\n'), ((2682, 2701), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (2699, 2701), False, 'import cpptypeinfo\n'), ((2771, 2790), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (2788, 2790), False, 'import cpptypeinfo\n'), ((2856, 2875), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (2873, 2875), False, 'import cpptypeinfo\n'), ((2943, 2962), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (2960, 2962), False, 'import cpptypeinfo\n'), ((3032, 3051), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (3049, 3051), False, 'import cpptypeinfo\n'), ((3125, 3144), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (3142, 3144), False, 'import cpptypeinfo\n'), ((3212, 3231), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (3229, 3231), False, 'import cpptypeinfo\n'), ((3297, 3316), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (3314, 3316), False, 'import cpptypeinfo\n'), ((3388, 3407), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (3405, 3407), False, 'import cpptypeinfo\n'), ((3477, 3496), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (3494, 3496), False, 'import cpptypeinfo\n'), ((3566, 3585), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (3583, 3585), False, 'import cpptypeinfo\n'), ((3659, 3678), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (3676, 3678), False, 'import cpptypeinfo\n'), ((3754, 3773), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (3771, 3773), False, 'import cpptypeinfo\n'), ((3841, 3860), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (3858, 3860), False, 'import cpptypeinfo\n'), ((3930, 3949), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (3947, 3949), False, 'import cpptypeinfo\n'), ((4021, 4040), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (4038, 4040), False, 'import cpptypeinfo\n'), ((4108, 4127), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (4125, 4127), False, 'import cpptypeinfo\n'), ((4600, 4618), 'cpptypeinfo.Int8', 'cpptypeinfo.Int8', ([], {}), '()\n', (4616, 4618), False, 'import cpptypeinfo\n'), ((4662, 4681), 'cpptypeinfo.UInt8', 'cpptypeinfo.UInt8', ([], {}), '()\n', (4679, 4681), False, 'import cpptypeinfo\n'), ((4727, 4746), 'cpptypeinfo.Int16', 'cpptypeinfo.Int16', ([], {}), '()\n', (4744, 4746), False, 'import cpptypeinfo\n'), ((4792, 4812), 'cpptypeinfo.UInt16', 'cpptypeinfo.UInt16', ([], {}), '()\n', (4810, 4812), False, 'import cpptypeinfo\n'), ((4858, 4877), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (4875, 4877), False, 'import cpptypeinfo\n'), ((4923, 4943), 'cpptypeinfo.UInt32', 'cpptypeinfo.UInt32', ([], {}), '()\n', (4941, 4943), False, 'import cpptypeinfo\n'), ((4989, 5008), 'cpptypeinfo.Int64', 'cpptypeinfo.Int64', ([], {}), '()\n', (5006, 5008), False, 'import cpptypeinfo\n'), ((5054, 5074), 'cpptypeinfo.UInt64', 'cpptypeinfo.UInt64', ([], {}), '()\n', (5072, 5074), False, 'import cpptypeinfo\n'), ((5685, 5703), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (5701, 5703), False, 'import cpptypeinfo\n'), ((5916, 5934), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (5932, 5934), False, 'import cpptypeinfo\n'), ((6066, 6084), 'cpptypeinfo.Bool', 'cpptypeinfo.Bool', ([], {}), '()\n', (6082, 6084), False, 'import cpptypeinfo\n'), ((6699, 6717), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (6715, 6717), False, 'import cpptypeinfo\n'), ((6755, 6773), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (6771, 6773), False, 'import cpptypeinfo\n'), ((6809, 6827), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (6825, 6827), False, 'import cpptypeinfo\n'), ((7028, 7046), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (7044, 7046), False, 'import cpptypeinfo\n'), ((7205, 7223), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (7221, 7223), False, 'import cpptypeinfo\n'), ((7386, 7404), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (7402, 7404), False, 'import cpptypeinfo\n'), ((7566, 7584), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (7582, 7584), False, 'import cpptypeinfo\n'), ((7748, 7766), 'cpptypeinfo.Bool', 'cpptypeinfo.Bool', ([], {}), '()\n', (7764, 7766), False, 'import cpptypeinfo\n'), ((7921, 7939), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (7937, 7939), False, 'import cpptypeinfo\n'), ((8069, 8087), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (8085, 8087), False, 'import cpptypeinfo\n'), ((8290, 8308), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (8306, 8308), False, 'import cpptypeinfo\n'), ((8479, 8497), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (8495, 8497), False, 'import cpptypeinfo\n'), ((8664, 8682), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (8680, 8682), False, 'import cpptypeinfo\n'), ((8873, 8891), 'cpptypeinfo.Bool', 'cpptypeinfo.Bool', ([], {}), '()\n', (8889, 8891), False, 'import cpptypeinfo\n'), ((9105, 9123), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (9121, 9123), False, 'import cpptypeinfo\n'), ((9807, 9825), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (9823, 9825), False, 'import cpptypeinfo\n'), ((9906, 9924), 'cpptypeinfo.Bool', 'cpptypeinfo.Bool', ([], {}), '()\n', (9922, 9924), False, 'import cpptypeinfo\n'), ((10005, 10023), 'cpptypeinfo.Bool', 'cpptypeinfo.Bool', ([], {}), '()\n', (10021, 10023), False, 'import cpptypeinfo\n'), ((10128, 10146), 'cpptypeinfo.Bool', 'cpptypeinfo.Bool', ([], {}), '()\n', (10144, 10146), False, 'import cpptypeinfo\n'), ((10319, 10337), 'cpptypeinfo.Bool', 'cpptypeinfo.Bool', ([], {}), '()\n', (10335, 10337), False, 'import cpptypeinfo\n'), ((10789, 10808), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (10806, 10808), False, 'import cpptypeinfo\n'), ((10853, 10872), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (10870, 10872), False, 'import cpptypeinfo\n'), ((11033, 11051), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (11049, 11051), False, 'import cpptypeinfo\n'), ((11361, 11379), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (11377, 11379), False, 'import cpptypeinfo\n'), ((11729, 11747), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (11745, 11747), False, 'import cpptypeinfo\n'), ((12137, 12155), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (12153, 12155), False, 'import cpptypeinfo\n'), ((12344, 12362), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (12360, 12362), False, 'import cpptypeinfo\n'), ((12556, 12574), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (12572, 12574), False, 'import cpptypeinfo\n'), ((12673, 12691), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (12689, 12691), False, 'import cpptypeinfo\n'), ((14292, 14310), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (14308, 14310), False, 'import cpptypeinfo\n'), ((14911, 14930), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (14928, 14930), False, 'import cpptypeinfo\n'), ((14998, 15017), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (15015, 15017), False, 'import cpptypeinfo\n'), ((15057, 15076), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (15074, 15076), False, 'import cpptypeinfo\n'), ((15119, 15138), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (15136, 15138), False, 'import cpptypeinfo\n'), ((15181, 15200), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (15198, 15200), False, 'import cpptypeinfo\n'), ((15282, 15300), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (15298, 15300), False, 'import cpptypeinfo\n'), ((15378, 15396), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (15394, 15396), False, 'import cpptypeinfo\n'), ((15537, 15555), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (15553, 15555), False, 'import cpptypeinfo\n'), ((15665, 15683), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (15681, 15683), False, 'import cpptypeinfo\n'), ((15874, 15892), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (15890, 15892), False, 'import cpptypeinfo\n'), ((16055, 16073), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (16071, 16073), False, 'import cpptypeinfo\n'), ((16266, 16284), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (16282, 16284), False, 'import cpptypeinfo\n'), ((16383, 16401), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (16399, 16401), False, 'import cpptypeinfo\n'), ((16817, 16835), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (16833, 16835), False, 'import cpptypeinfo\n'), ((17295, 17313), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (17311, 17313), False, 'import cpptypeinfo\n'), ((26285, 26305), 'cpptypeinfo.UInt16', 'cpptypeinfo.UInt16', ([], {}), '()\n', (26303, 26305), False, 'import cpptypeinfo\n'), ((26522, 26617), 'cpptypeinfo.parse_files', 'cpptypeinfo.parse_files', (['parser', 'IMGUI_H'], {'cpp_flags': "['-DIMGUI_DISABLE_OBSOLETE_FUNCTIONS']"}), "(parser, IMGUI_H, cpp_flags=[\n '-DIMGUI_DISABLE_OBSOLETE_FUNCTIONS'])\n", (26545, 26617), False, 'import cpptypeinfo\n'), ((138, 160), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (150, 160), False, 'import pathlib\n'), ((1926, 1944), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (1942, 1944), False, 'import cpptypeinfo\n'), ((4235, 4254), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (4252, 4254), False, 'import cpptypeinfo\n'), ((4460, 4478), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (4476, 4478), False, 'import cpptypeinfo\n'), ((9455, 9473), 'cpptypeinfo.Bool', 'cpptypeinfo.Bool', ([], {}), '()\n', (9471, 9473), False, 'import cpptypeinfo\n'), ((12954, 12972), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (12970, 12972), False, 'import cpptypeinfo\n'), ((13380, 13398), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (13396, 13398), False, 'import cpptypeinfo\n'), ((13834, 13852), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (13850, 13852), False, 'import cpptypeinfo\n'), ((14148, 14166), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (14164, 14166), False, 'import cpptypeinfo\n'), ((16563, 16581), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (16579, 16581), False, 'import cpptypeinfo\n'), ((17046, 17064), 'cpptypeinfo.Void', 'cpptypeinfo.Void', ([], {}), '()\n', (17062, 17064), False, 'import cpptypeinfo\n'), ((5147, 5166), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (5164, 5166), False, 'import cpptypeinfo\n'), ((5190, 5209), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (5207, 5209), False, 'import cpptypeinfo\n'), ((5280, 5299), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (5297, 5299), False, 'import cpptypeinfo\n'), ((5322, 5341), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (5339, 5341), False, 'import cpptypeinfo\n'), ((5364, 5383), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (5381, 5383), False, 'import cpptypeinfo\n'), ((5406, 5425), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (5423, 5425), False, 'import cpptypeinfo\n'), ((6163, 6183), 'cpptypeinfo.UInt64', 'cpptypeinfo.UInt64', ([], {}), '()\n', (6181, 6183), False, 'import cpptypeinfo\n'), ((6210, 6230), 'cpptypeinfo.UInt64', 'cpptypeinfo.UInt64', ([], {}), '()\n', (6228, 6230), False, 'import cpptypeinfo\n'), ((6260, 6280), 'cpptypeinfo.UInt64', 'cpptypeinfo.UInt64', ([], {}), '()\n', (6278, 6280), False, 'import cpptypeinfo\n'), ((6309, 6329), 'cpptypeinfo.UInt64', 'cpptypeinfo.UInt64', ([], {}), '()\n', (6327, 6329), False, 'import cpptypeinfo\n'), ((6358, 6378), 'cpptypeinfo.UInt64', 'cpptypeinfo.UInt64', ([], {}), '()\n', (6376, 6378), False, 'import cpptypeinfo\n'), ((6411, 6431), 'cpptypeinfo.UInt64', 'cpptypeinfo.UInt64', ([], {}), '()\n', (6429, 6431), False, 'import cpptypeinfo\n'), ((12381, 12399), 'cpptypeinfo.Bool', 'cpptypeinfo.Bool', ([], {}), '()\n', (12397, 12399), False, 'import cpptypeinfo\n'), ((12700, 12719), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (12717, 12719), False, 'import cpptypeinfo\n'), ((14319, 14338), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (14336, 14338), False, 'import cpptypeinfo\n'), ((15309, 15328), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (15326, 15328), False, 'import cpptypeinfo\n'), ((15405, 15424), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (15422, 15424), False, 'import cpptypeinfo\n'), ((15578, 15597), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (15595, 15597), False, 'import cpptypeinfo\n'), ((15706, 15725), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (15723, 15725), False, 'import cpptypeinfo\n'), ((15911, 15930), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (15928, 15930), False, 'import cpptypeinfo\n'), ((15959, 15978), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (15976, 15978), False, 'import cpptypeinfo\n'), ((16092, 16111), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (16109, 16111), False, 'import cpptypeinfo\n'), ((16140, 16159), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (16157, 16159), False, 'import cpptypeinfo\n'), ((16844, 16863), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (16861, 16863), False, 'import cpptypeinfo\n'), ((17332, 17351), 'cpptypeinfo.Int32', 'cpptypeinfo.Int32', ([], {}), '()\n', (17349, 17351), False, 'import cpptypeinfo\n'), ((9556, 9579), 'cpptypeinfo.usertype.Param', 'Param', (['"""const ImVec2 &"""'], {}), "('const ImVec2 &')\n", (9561, 9579), False, 'from cpptypeinfo.usertype import Field, Struct, Pointer, Param, Function\n'), ((9624, 9637), 'cpptypeinfo.usertype.Param', 'Param', (['"""bool"""'], {}), "('bool')\n", (9629, 9637), False, 'from cpptypeinfo.usertype import Field, Struct, Pointer, Param, Function\n'), ((13875, 13893), 'cpptypeinfo.Bool', 'cpptypeinfo.Bool', ([], {}), '()\n', (13891, 13893), False, 'import cpptypeinfo\n'), ((17140, 17159), 'cpptypeinfo.Float', 'cpptypeinfo.Float', ([], {}), '()\n', (17157, 17159), False, 'import cpptypeinfo\n')] |
#!/usr/bin/python
import cgi
import sys
import json
import re
import mysql.connector
from cloudNG import *
y=sys.stdin.readline().split(",")
x = sys.stdin.read()
#con=mysql.connector.connect(user='brewerslab',password='<PASSWORD>',database="brewerslab")
#sys.stdout.write("Content-Type:text/plain\n\n")
sys.stdout.write("Content-Type:text/xml\n\n")
form=cgi.FieldStorage()
def xmlsafe(text):
text=re.compile("[\n\r]").sub("</br>",text)
safe=re.compile("<").sub("{:leftbracket:}", re.compile(">").sub("{:rightbracket:}", re.compile("&").sub("{:ampersand:}", re.compile("/").sub("{:forwardslash:}", text ) ) ) )
return text
sys.stderr.write("brewlog: %s\n" %(y[0]))
sys.stderr.write("act %s\n" %(y[1]))
sys.stderr.write("step: %s\n" %(y[2]))
saveComments=brewerslabCloudApi().saveComment("<EMAIL>", y[0],y[1],y[2],x )
sys.stdout.write("<xml>\n")
sys.stdout.write("<stepNum>%s</stepNum>\n" %( y[2] ))
sys.stdout.write("</xml>")
sys.stdout.flush()
| [
"cgi.FieldStorage",
"re.compile",
"sys.stdin.read",
"sys.stdin.readline",
"sys.stderr.write",
"sys.stdout.flush",
"sys.stdout.write"
] | [((146, 162), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (160, 162), False, 'import sys\n'), ((304, 349), 'sys.stdout.write', 'sys.stdout.write', (['"""Content-Type:text/xml\n\n"""'], {}), "('Content-Type:text/xml\\n\\n')\n", (320, 349), False, 'import sys\n'), ((355, 373), 'cgi.FieldStorage', 'cgi.FieldStorage', ([], {}), '()\n', (371, 373), False, 'import cgi\n'), ((630, 670), 'sys.stderr.write', 'sys.stderr.write', (["('brewlog: %s\\n' % y[0])"], {}), "('brewlog: %s\\n' % y[0])\n", (646, 670), False, 'import sys\n'), ((672, 707), 'sys.stderr.write', 'sys.stderr.write', (["('act %s\\n' % y[1])"], {}), "('act %s\\n' % y[1])\n", (688, 707), False, 'import sys\n'), ((709, 746), 'sys.stderr.write', 'sys.stderr.write', (["('step: %s\\n' % y[2])"], {}), "('step: %s\\n' % y[2])\n", (725, 746), False, 'import sys\n'), ((826, 853), 'sys.stdout.write', 'sys.stdout.write', (['"""<xml>\n"""'], {}), "('<xml>\\n')\n", (842, 853), False, 'import sys\n'), ((854, 904), 'sys.stdout.write', 'sys.stdout.write', (["('<stepNum>%s</stepNum>\\n' % y[2])"], {}), "('<stepNum>%s</stepNum>\\n' % y[2])\n", (870, 904), False, 'import sys\n'), ((908, 934), 'sys.stdout.write', 'sys.stdout.write', (['"""</xml>"""'], {}), "('</xml>')\n", (924, 934), False, 'import sys\n'), ((935, 953), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (951, 953), False, 'import sys\n'), ((110, 130), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (128, 130), False, 'import sys\n'), ((400, 420), 're.compile', 're.compile', (["'[\\n\\r]'"], {}), "('[\\n\\r]')\n", (410, 420), False, 'import re\n'), ((445, 460), 're.compile', 're.compile', (['"""<"""'], {}), "('<')\n", (455, 460), False, 'import re\n'), ((485, 500), 're.compile', 're.compile', (['""">"""'], {}), "('>')\n", (495, 500), False, 'import re\n'), ((526, 541), 're.compile', 're.compile', (['"""&"""'], {}), "('&')\n", (536, 541), False, 'import re\n'), ((563, 578), 're.compile', 're.compile', (['"""/"""'], {}), "('/')\n", (573, 578), False, 'import re\n')] |
import pytest
from tests.common.helpers.assertions import pytest_require
from tests.common.fixtures.conn_graph_facts import conn_graph_facts,\
fanout_graph_facts
from tests.common.ixia.ixia_fixtures import ixia_api_serv_ip, ixia_api_serv_port,\
ixia_api_serv_user, ixia_api_serv_passwd, ixia_api, ixia_testbed
from tests.common.ixia.qos_fixtures import prio_dscp_map, all_prio_list, lossless_prio_list,\
lossy_prio_list
from files.helper import run_pfc_test
@pytest.mark.topology("tgen")
def test_pfc_pause_single_lossless_prio(ixia_api,
ixia_testbed,
conn_graph_facts,
fanout_graph_facts,
duthosts,
rand_one_dut_hostname,
enum_dut_portname_oper_up,
enum_dut_lossless_prio,
all_prio_list,
prio_dscp_map):
"""
Test if PFC can pause a single lossless priority
Args:
ixia_api (pytest fixture): IXIA session
ixia_testbed (pytest fixture): L2/L3 config of a T0 testbed
conn_graph_facts (pytest fixture): connection graph
fanout_graph_facts (pytest fixture): fanout graph
duthosts (pytest fixture): list of DUTs
rand_one_dut_hostname (str): hostname of DUT
enum_dut_portname_oper_up (str): name of port to test, e.g., 's6100-1|Ethernet0'
enum_dut_lossless_prio (str): name of lossless priority to test, e.g., 's6100-1|3'
all_prio_list (pytest fixture): list of all the priorities
prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority).
Returns:
None
"""
dut_hostname, dut_port = enum_dut_portname_oper_up.split('|')
dut_hostname2, lossless_prio = enum_dut_lossless_prio.split('|')
pytest_require(rand_one_dut_hostname == dut_hostname == dut_hostname2,
"Priority and port are not mapped to the expected DUT")
duthost = duthosts[rand_one_dut_hostname]
lossless_prio = int(lossless_prio)
pause_prio_list = [lossless_prio]
test_prio_list = [lossless_prio]
bg_prio_list = [p for p in all_prio_list]
bg_prio_list.remove(lossless_prio)
run_pfc_test(api=ixia_api,
testbed_config=ixia_testbed,
conn_data=conn_graph_facts,
fanout_data=fanout_graph_facts,
duthost=duthost,
dut_port=dut_port,
global_pause=False,
pause_prio_list=pause_prio_list,
test_prio_list=test_prio_list,
bg_prio_list=bg_prio_list,
prio_dscp_map=prio_dscp_map,
test_traffic_pause=True)
def test_pfc_pause_multi_lossless_prio(ixia_api,
ixia_testbed,
conn_graph_facts,
fanout_graph_facts,
duthosts,
rand_one_dut_hostname,
enum_dut_portname_oper_up,
lossless_prio_list,
lossy_prio_list,
prio_dscp_map):
"""
Test if PFC can pause multiple lossless priorities
Args:
ixia_api (pytest fixture): IXIA session
ixia_testbed (pytest fixture): L2/L3 config of a T0 testbed
conn_graph_facts (pytest fixture): connection graph
fanout_graph_facts (pytest fixture): fanout graph
duthosts (pytest fixture): list of DUTs
rand_one_dut_hostname (str): hostname of DUT
enum_dut_portname_oper_up (str): name of port to test, e.g., 's6100-1|Ethernet0'
lossless_prio_list (pytest fixture): list of all the lossless priorities
lossy_prio_list (pytest fixture): list of all the lossy priorities
prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority).
Returns:
None
"""
dut_hostname, dut_port = enum_dut_portname_oper_up.split('|')
pytest_require(rand_one_dut_hostname == dut_hostname,
"Port is not mapped to the expected DUT")
duthost = duthosts[rand_one_dut_hostname]
pause_prio_list = lossless_prio_list
test_prio_list = lossless_prio_list
bg_prio_list = lossy_prio_list
run_pfc_test(api=ixia_api,
testbed_config=ixia_testbed,
conn_data=conn_graph_facts,
fanout_data=fanout_graph_facts,
duthost=duthost,
dut_port=dut_port,
global_pause=False,
pause_prio_list=pause_prio_list,
test_prio_list=test_prio_list,
bg_prio_list=bg_prio_list,
prio_dscp_map=prio_dscp_map,
test_traffic_pause=True)
| [
"pytest.mark.topology",
"tests.common.helpers.assertions.pytest_require",
"files.helper.run_pfc_test"
] | [((474, 502), 'pytest.mark.topology', 'pytest.mark.topology', (['"""tgen"""'], {}), "('tgen')\n", (494, 502), False, 'import pytest\n'), ((1990, 2120), 'tests.common.helpers.assertions.pytest_require', 'pytest_require', (['(rand_one_dut_hostname == dut_hostname == dut_hostname2)', '"""Priority and port are not mapped to the expected DUT"""'], {}), "(rand_one_dut_hostname == dut_hostname == dut_hostname2,\n 'Priority and port are not mapped to the expected DUT')\n", (2004, 2120), False, 'from tests.common.helpers.assertions import pytest_require\n'), ((2388, 2722), 'files.helper.run_pfc_test', 'run_pfc_test', ([], {'api': 'ixia_api', 'testbed_config': 'ixia_testbed', 'conn_data': 'conn_graph_facts', 'fanout_data': 'fanout_graph_facts', 'duthost': 'duthost', 'dut_port': 'dut_port', 'global_pause': '(False)', 'pause_prio_list': 'pause_prio_list', 'test_prio_list': 'test_prio_list', 'bg_prio_list': 'bg_prio_list', 'prio_dscp_map': 'prio_dscp_map', 'test_traffic_pause': '(True)'}), '(api=ixia_api, testbed_config=ixia_testbed, conn_data=\n conn_graph_facts, fanout_data=fanout_graph_facts, duthost=duthost,\n dut_port=dut_port, global_pause=False, pause_prio_list=pause_prio_list,\n test_prio_list=test_prio_list, bg_prio_list=bg_prio_list, prio_dscp_map\n =prio_dscp_map, test_traffic_pause=True)\n', (2400, 2722), False, 'from files.helper import run_pfc_test\n'), ((4299, 4398), 'tests.common.helpers.assertions.pytest_require', 'pytest_require', (['(rand_one_dut_hostname == dut_hostname)', '"""Port is not mapped to the expected DUT"""'], {}), "(rand_one_dut_hostname == dut_hostname,\n 'Port is not mapped to the expected DUT')\n", (4313, 4398), False, 'from tests.common.helpers.assertions import pytest_require\n'), ((4582, 4916), 'files.helper.run_pfc_test', 'run_pfc_test', ([], {'api': 'ixia_api', 'testbed_config': 'ixia_testbed', 'conn_data': 'conn_graph_facts', 'fanout_data': 'fanout_graph_facts', 'duthost': 'duthost', 'dut_port': 'dut_port', 'global_pause': '(False)', 'pause_prio_list': 'pause_prio_list', 'test_prio_list': 'test_prio_list', 'bg_prio_list': 'bg_prio_list', 'prio_dscp_map': 'prio_dscp_map', 'test_traffic_pause': '(True)'}), '(api=ixia_api, testbed_config=ixia_testbed, conn_data=\n conn_graph_facts, fanout_data=fanout_graph_facts, duthost=duthost,\n dut_port=dut_port, global_pause=False, pause_prio_list=pause_prio_list,\n test_prio_list=test_prio_list, bg_prio_list=bg_prio_list, prio_dscp_map\n =prio_dscp_map, test_traffic_pause=True)\n', (4594, 4916), False, 'from files.helper import run_pfc_test\n')] |
'''
load lottery tickets and evaluation
support datasets: cifar10, Fashionmnist, cifar100
'''
import os
import time
import random
import shutil
import argparse
import numpy as np
from copy import deepcopy
import matplotlib.pyplot as plt
import torch
import torch.optim
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
import torchvision.models as models
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data.sampler import SubsetRandomSampler
from advertorch.utils import NormalizeByChannelMeanStd
from utils import *
from pruning_utils_2 import *
from pruning_utils_unprune import *
parser = argparse.ArgumentParser(description='PyTorch Evaluation Tickets')
##################################### general setting #################################################
parser.add_argument('--data', type=str, default='../../data', help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset')
parser.add_argument('--arch', type=str, default='res18', help='model architecture')
parser.add_argument('--seed', default=None, type=int, help='random seed')
parser.add_argument('--save_dir', help='The directory used to save the trained models', default=None, type=str)
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--save_model', action="store_true", help="whether saving model")
##################################### training setting #################################################
parser.add_argument('--optim', type=str, default='sgd', help='optimizer')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--lr', default=0.1, type=float, help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight_decay', default=1e-4, type=float, help='weight decay')
parser.add_argument('--epochs', default=182, type=int, help='number of total epochs to run')
parser.add_argument('--warmup', default=0, type=int, help='warm up epochs')
parser.add_argument('--print_freq', default=50, type=int, help='print frequency')
parser.add_argument('--decreasing_lr', default='91,136', help='decreasing strategy')
##################################### Pruning setting #################################################
parser.add_argument('--pretrained', default=None, type=str, help='pretrained weight for pt')
parser.add_argument('--mask_dir', default=None, type=str, help='mask direction for ticket')
parser.add_argument('--conv1', action="store_true", help="whether pruning&rewind conv1")
parser.add_argument('--fc', action="store_true", help="whether rewind fc")
parser.add_argument('--type', type=str, default=None, choices=['ewp', 'random_path', 'betweenness', 'hessian_abs', 'taylor1_abs','intgrads','identity', 'omp'])
parser.add_argument('--add-back', action="store_true", help="add back weights")
parser.add_argument('--prune-type', type=str, choices=["lt", 'pt', 'st', 'mt', 'trained', 'transfer'])
parser.add_argument('--num-paths', default=50000, type=int)
parser.add_argument('--evaluate', action="store_true")
parser.add_argument('--evaluate-p', type=float, default=0.00)
parser.add_argument('--evaluate-random', action="store_true")
parser.add_argument('--evaluate-full', action="store_true")
parser.add_argument('--checkpoint', type=str)
best_sa = 0
def main():
global args, best_sa
args = parser.parse_args()
print(args)
print('*'*50)
print('conv1 included for prune and rewind: {}'.format(args.conv1))
print('fc included for rewind: {}'.format(args.fc))
print('*'*50)
torch.cuda.set_device(int(args.gpu))
os.makedirs(args.save_dir, exist_ok=True)
if args.seed:
setup_seed(args.seed)
# prepare dataset
model, train_loader, val_loader, test_loader = setup_model_dataset(args)
criterion = nn.CrossEntropyLoss()
if args.evaluate:
state_dict = torch.load(args.checkpoint, map_location="cpu")['state_dict']
if not args.evaluate_full:
current_mask = extract_mask(state_dict)
print(current_mask.keys())
prune_model_custom(model, current_mask, conv1=False)
check_sparsity(model, conv1=False)
try:
model.load_state_dict(state_dict)
except:
state_dict['normalize.mean'] = model.state_dict()['normalize.mean']
state_dict['normalize.std'] = model.state_dict()['normalize.std']
model.load_state_dict(state_dict)
model.cuda()
validate(val_loader, model, criterion)
if args.evaluate_p > 0:
pruning_model(model, args.evaluate_p, random=args.evaluate_random)
check_sparsity(model, conv1=False)
tacc = validate(val_loader, model, criterion)
# evaluate on test set
test_tacc = validate(test_loader, model, criterion)
print(tacc)
print(test_tacc)
return
#loading tickets
model.cuda()
load_ticket(model, args)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=decreasing_lr, gamma=0.1)
all_result = {}
all_result['train'] = []
all_result['test_ta'] = []
all_result['ta'] = []
start_epoch = 0
remain_weight = check_sparsity(model, conv1=args.conv1)
for epoch in range(start_epoch, args.epochs):
print(optimizer.state_dict()['param_groups'][0]['lr'])
acc = train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
tacc = validate(val_loader, model, criterion)
# evaluate on test set
test_tacc = validate(test_loader, model, criterion)
scheduler.step()
all_result['train'].append(acc)
all_result['ta'].append(tacc)
all_result['test_ta'].append(test_tacc)
all_result['remain_weight'] = remain_weight
# remember best prec@1 and save checkpoint
is_best_sa = tacc > best_sa
best_sa = max(tacc, best_sa)
if args.save_model:
save_checkpoint({
'result': all_result,
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_sa': best_sa,
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict()
}, is_SA_best=is_best_sa, save_path=args.save_dir)
else:
save_checkpoint({
'result': all_result
}, is_SA_best=False, save_path=args.save_dir)
plt.plot(all_result['train'], label='train_acc')
plt.plot(all_result['ta'], label='val_acc')
plt.plot(all_result['test_ta'], label='test_acc')
plt.legend()
plt.savefig(os.path.join(args.save_dir, 'net_train.png'))
plt.close()
check_sparsity(model, conv1=args.conv1)
print('* best SA={}'.format(all_result['test_ta'][np.argmax(np.array(all_result['ta']))]))
def train(train_loader, model, criterion, optimizer, epoch):
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
start = time.time()
for i, (image, target) in enumerate(train_loader):
if epoch < args.warmup:
warmup_lr(epoch, i+1, optimizer, one_epoch_step=len(train_loader))
image = image.cuda()
target = target.cuda()
# compute output
output_clean = model(image)
loss = criterion(output_clean, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
output = output_clean.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
if i % args.print_freq == 0:
end = time.time()
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Accuracy {top1.val:.3f} ({top1.avg:.3f})\t'
'Time {3:.2f}'.format(
epoch, i, len(train_loader), end-start, loss=losses, top1=top1))
start = time.time()
print('train_accuracy {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def validate(val_loader, model, criterion):
"""
Run evaluation
"""
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
for i, (image, target) in enumerate(val_loader):
image = image.cuda()
target = target.cuda()
# compute output
with torch.no_grad():
output = model(image)
loss = criterion(output, target)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Accuracy {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), loss=losses, top1=top1))
print('valid_accuracy {top1.avg:.3f}'
.format(top1=top1))
return top1.avg
def save_checkpoint(state, is_SA_best, save_path, filename='checkpoint.pth.tar'):
filepath = os.path.join(save_path, filename)
torch.save(state, filepath)
if is_SA_best:
shutil.copyfile(filepath, os.path.join(save_path, 'model_SA_best.pth.tar'))
def load_ticket(model, args):
# weight
if args.pretrained:
initalization = torch.load(args.pretrained, map_location = torch.device('cuda:'+str(args.gpu)))
if 'init_weight' in initalization.keys():
print('loading from init_weight')
initalization = initalization['init_weight']
elif 'state_dict' in initalization.keys():
print('loading from state_dict')
initalization = initalization['state_dict']
loading_weight = extract_main_weight(initalization, fc=True, conv1=True)
new_initialization = model.state_dict()
if not 'normalize.std' in loading_weight:
loading_weight['normalize.std'] = new_initialization['normalize.std']
loading_weight['normalize.mean'] = new_initialization['normalize.mean']
if not (args.prune_type == 'lt' or args.prune_type == 'trained'):
keys = list(loading_weight.keys())
for key in keys:
if key.startswith('fc') or key.startswith('conv1'):
del loading_weight[key]
loading_weight['fc.weight'] = new_initialization['fc.weight']
loading_weight['fc.bias'] = new_initialization['fc.bias']
loading_weight['conv1.weight'] = new_initialization['conv1.weight']
print('*number of loading weight={}'.format(len(loading_weight.keys())))
print('*number of model weight={}'.format(len(model.state_dict().keys())))
model.load_state_dict(loading_weight)
# mask
if args.mask_dir:
print('loading mask')
current_mask_weight = torch.load(args.mask_dir, map_location = torch.device('cuda:'+str(args.gpu)))
if 'state_dict' in current_mask_weight.keys():
current_mask_weight = current_mask_weight['state_dict']
current_mask = extract_mask(current_mask_weight)
#check_sparsity(model, conv1=args.conv1)
if args.arch == 'res18':
downsample = 100
else:
downsample = 1000
custom_prune(model, current_mask, args.type, args.num_paths, args, args.add_back)
#prune_random_betweeness(model, current_mask, int(args.num_paths), downsample=downsample, conv1=args.conv1)
check_sparsity(model, conv1=args.conv1)
def warmup_lr(epoch, step, optimizer, one_epoch_step):
overall_steps = args.warmup*one_epoch_step
current_steps = epoch*one_epoch_step + step
lr = args.lr * current_steps/overall_steps
lr = min(lr, args.lr)
for p in optimizer.param_groups:
p['lr']=lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
main()
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.optim.lr_scheduler.MultiStepLR",
"os.makedirs",
"torch.nn.CrossEntropyLoss",
"argparse.ArgumentParser",
"torch.load",
"matplotlib.pyplot.plot",
"os.path.join",
"random.seed",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.random.see... | [((719, 784), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Evaluation Tickets"""'}), "(description='PyTorch Evaluation Tickets')\n", (742, 784), False, 'import argparse\n'), ((3787, 3828), 'os.makedirs', 'os.makedirs', (['args.save_dir'], {'exist_ok': '(True)'}), '(args.save_dir, exist_ok=True)\n', (3798, 3828), False, 'import os\n'), ((3999, 4020), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4018, 4020), True, 'import torch.nn as nn\n'), ((5437, 5525), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': 'decreasing_lr', 'gamma': '(0.1)'}), '(optimizer, milestones=decreasing_lr,\n gamma=0.1)\n', (5473, 5525), False, 'import torch\n'), ((7533, 7544), 'time.time', 'time.time', ([], {}), '()\n', (7542, 7544), False, 'import time\n'), ((9838, 9871), 'os.path.join', 'os.path.join', (['save_path', 'filename'], {}), '(save_path, filename)\n', (9850, 9871), False, 'import os\n'), ((9876, 9903), 'torch.save', 'torch.save', (['state', 'filepath'], {}), '(state, filepath)\n', (9886, 9903), False, 'import torch\n'), ((13467, 13490), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (13484, 13490), False, 'import torch\n'), ((13496, 13528), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (13522, 13528), False, 'import torch\n'), ((13534, 13554), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (13548, 13554), True, 'import numpy as np\n'), ((13560, 13577), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (13571, 13577), False, 'import random\n'), ((6947, 6995), 'matplotlib.pyplot.plot', 'plt.plot', (["all_result['train']"], {'label': '"""train_acc"""'}), "(all_result['train'], label='train_acc')\n", (6955, 6995), True, 'import matplotlib.pyplot as plt\n'), ((7004, 7047), 'matplotlib.pyplot.plot', 'plt.plot', (["all_result['ta']"], {'label': '"""val_acc"""'}), "(all_result['ta'], label='val_acc')\n", (7012, 7047), True, 'import matplotlib.pyplot as plt\n'), ((7056, 7105), 'matplotlib.pyplot.plot', 'plt.plot', (["all_result['test_ta']"], {'label': '"""test_acc"""'}), "(all_result['test_ta'], label='test_acc')\n", (7064, 7105), True, 'import matplotlib.pyplot as plt\n'), ((7114, 7126), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7124, 7126), True, 'import matplotlib.pyplot as plt\n'), ((7201, 7212), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7210, 7212), True, 'import matplotlib.pyplot as plt\n'), ((4065, 4112), 'torch.load', 'torch.load', (['args.checkpoint'], {'map_location': '"""cpu"""'}), "(args.checkpoint, map_location='cpu')\n", (4075, 4112), False, 'import torch\n'), ((7147, 7191), 'os.path.join', 'os.path.join', (['args.save_dir', '"""net_train.png"""'], {}), "(args.save_dir, 'net_train.png')\n", (7159, 7191), False, 'import os\n'), ((8277, 8288), 'time.time', 'time.time', ([], {}), '()\n', (8286, 8288), False, 'import time\n'), ((8595, 8606), 'time.time', 'time.time', ([], {}), '()\n', (8604, 8606), False, 'import time\n'), ((9034, 9049), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9047, 9049), False, 'import torch\n'), ((9957, 10005), 'os.path.join', 'os.path.join', (['save_path', '"""model_SA_best.pth.tar"""'], {}), "(save_path, 'model_SA_best.pth.tar')\n", (9969, 10005), False, 'import os\n'), ((7322, 7348), 'numpy.array', 'np.array', (["all_result['ta']"], {}), "(all_result['ta'])\n", (7330, 7348), True, 'import numpy as np\n')] |
import time
from absl import app, flags, logging
from absl.flags import FLAGS
import cv2
import numpy as np
import tensorflow as tf
from yolov3_tf2.models import (
YoloV3, YoloV3Tiny
)
from yolov3_tf2.dataset import transform_images, load_tfrecord_dataset
from yolov3_tf2.utils import draw_outputs
flags.DEFINE_string('classes', './data/vocmine.names', 'path to classes file')
flags.DEFINE_string('weights', './checkpoints/yolov3_train_9.tf',
'path to weights file')
flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_string('image', './data/girl.png', 'path to input image')
flags.DEFINE_string('tfrecord', None, 'tfrecord instead of image')
flags.DEFINE_string('output', './output.jpg', 'path to output image')
flags.DEFINE_integer('num_classes', 80, 'number of classes in the model')
def main(_argv):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
for physical_device in physical_devices:
tf.config.experimental.set_memory_growth(physical_device, True)
if FLAGS.tiny:
yolo = YoloV3Tiny(classes=FLAGS.num_classes)
else:
yolo = YoloV3(classes=FLAGS.num_classes)
yolo.load_weights(FLAGS.weights).expect_partial()
logging.info('weights loaded')
class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
logging.info('classes loaded')
if FLAGS.tfrecord:
dataset = load_tfrecord_dataset(
FLAGS.tfrecord, FLAGS.classes, FLAGS.size)
dataset = dataset.shuffle(512)
img_raw, _label = next(iter(dataset.take(1)))
else:
img_raw = tf.image.decode_image(
open(FLAGS.image, 'rb').read(), channels=3)
img = tf.expand_dims(img_raw, 0)
img = transform_images(img, FLAGS.size)
t1 = time.time()
boxes, scores, classes, nums = yolo(img)
t2 = time.time()
logging.info('time: {}'.format(t2 - t1))
logging.info('detections:')
for i in range(nums[0]):
logging.info('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
img = cv2.cvtColor(img_raw.numpy(), cv2.COLOR_RGB2BGR)
img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
cv2.imwrite(FLAGS.output, img)
logging.info('output saved to: {}'.format(FLAGS.output))
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
| [
"cv2.imwrite",
"yolov3_tf2.dataset.transform_images",
"tensorflow.config.experimental.set_memory_growth",
"absl.flags.DEFINE_integer",
"absl.logging.info",
"absl.flags.DEFINE_boolean",
"absl.app.run",
"numpy.array",
"yolov3_tf2.dataset.load_tfrecord_dataset",
"yolov3_tf2.utils.draw_outputs",
"ti... | [((303, 381), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""classes"""', '"""./data/vocmine.names"""', '"""path to classes file"""'], {}), "('classes', './data/vocmine.names', 'path to classes file')\n", (322, 381), False, 'from absl import app, flags, logging\n'), ((382, 475), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""weights"""', '"""./checkpoints/yolov3_train_9.tf"""', '"""path to weights file"""'], {}), "('weights', './checkpoints/yolov3_train_9.tf',\n 'path to weights file')\n", (401, 475), False, 'from absl import app, flags, logging\n'), ((492, 552), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""tiny"""', '(False)', '"""yolov3 or yolov3-tiny"""'], {}), "('tiny', False, 'yolov3 or yolov3-tiny')\n", (512, 552), False, 'from absl import app, flags, logging\n'), ((553, 606), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""size"""', '(416)', '"""resize images to"""'], {}), "('size', 416, 'resize images to')\n", (573, 606), False, 'from absl import app, flags, logging\n'), ((607, 677), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""image"""', '"""./data/girl.png"""', '"""path to input image"""'], {}), "('image', './data/girl.png', 'path to input image')\n", (626, 677), False, 'from absl import app, flags, logging\n'), ((678, 744), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""tfrecord"""', 'None', '"""tfrecord instead of image"""'], {}), "('tfrecord', None, 'tfrecord instead of image')\n", (697, 744), False, 'from absl import app, flags, logging\n'), ((745, 814), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output"""', '"""./output.jpg"""', '"""path to output image"""'], {}), "('output', './output.jpg', 'path to output image')\n", (764, 814), False, 'from absl import app, flags, logging\n'), ((815, 888), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_classes"""', '(80)', '"""number of classes in the model"""'], {}), "('num_classes', 80, 'number of classes in the model')\n", (835, 888), False, 'from absl import app, flags, logging\n'), ((931, 982), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (975, 982), True, 'import tensorflow as tf\n'), ((1291, 1321), 'absl.logging.info', 'logging.info', (['"""weights loaded"""'], {}), "('weights loaded')\n", (1303, 1321), False, 'from absl import app, flags, logging\n'), ((1398, 1428), 'absl.logging.info', 'logging.info', (['"""classes loaded"""'], {}), "('classes loaded')\n", (1410, 1428), False, 'from absl import app, flags, logging\n'), ((1760, 1786), 'tensorflow.expand_dims', 'tf.expand_dims', (['img_raw', '(0)'], {}), '(img_raw, 0)\n', (1774, 1786), True, 'import tensorflow as tf\n'), ((1797, 1830), 'yolov3_tf2.dataset.transform_images', 'transform_images', (['img', 'FLAGS.size'], {}), '(img, FLAGS.size)\n', (1813, 1830), False, 'from yolov3_tf2.dataset import transform_images, load_tfrecord_dataset\n'), ((1841, 1852), 'time.time', 'time.time', ([], {}), '()\n', (1850, 1852), False, 'import time\n'), ((1907, 1918), 'time.time', 'time.time', ([], {}), '()\n', (1916, 1918), False, 'import time\n'), ((1969, 1996), 'absl.logging.info', 'logging.info', (['"""detections:"""'], {}), "('detections:')\n", (1981, 1996), False, 'from absl import app, flags, logging\n'), ((2306, 2368), 'yolov3_tf2.utils.draw_outputs', 'draw_outputs', (['img', '(boxes, scores, classes, nums)', 'class_names'], {}), '(img, (boxes, scores, classes, nums), class_names)\n', (2318, 2368), False, 'from yolov3_tf2.utils import draw_outputs\n'), ((2373, 2403), 'cv2.imwrite', 'cv2.imwrite', (['FLAGS.output', 'img'], {}), '(FLAGS.output, img)\n', (2384, 2403), False, 'import cv2\n'), ((1036, 1099), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_device', '(True)'], {}), '(physical_device, True)\n', (1076, 1099), True, 'import tensorflow as tf\n'), ((1135, 1172), 'yolov3_tf2.models.YoloV3Tiny', 'YoloV3Tiny', ([], {'classes': 'FLAGS.num_classes'}), '(classes=FLAGS.num_classes)\n', (1145, 1172), False, 'from yolov3_tf2.models import YoloV3, YoloV3Tiny\n'), ((1198, 1231), 'yolov3_tf2.models.YoloV3', 'YoloV3', ([], {'classes': 'FLAGS.num_classes'}), '(classes=FLAGS.num_classes)\n', (1204, 1231), False, 'from yolov3_tf2.models import YoloV3, YoloV3Tiny\n'), ((1471, 1535), 'yolov3_tf2.dataset.load_tfrecord_dataset', 'load_tfrecord_dataset', (['FLAGS.tfrecord', 'FLAGS.classes', 'FLAGS.size'], {}), '(FLAGS.tfrecord, FLAGS.classes, FLAGS.size)\n', (1492, 1535), False, 'from yolov3_tf2.dataset import transform_images, load_tfrecord_dataset\n'), ((2511, 2524), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (2518, 2524), False, 'from absl import app, flags, logging\n'), ((2145, 2167), 'numpy.array', 'np.array', (['scores[0][i]'], {}), '(scores[0][i])\n', (2153, 2167), True, 'import numpy as np\n'), ((2212, 2233), 'numpy.array', 'np.array', (['boxes[0][i]'], {}), '(boxes[0][i])\n', (2220, 2233), True, 'import numpy as np\n')] |
'''
limitcalls.py: implement rate limit handling for API calls
Object class RateLimit implements a token tracking mechanism that arranges
to return a maximum of a given number of tokens in a given amount of time.
There are to ways of using this. The first way is simple but only suitable
when there is only one function making calls to a particular API. Then,
the @rate_limit() decorator can be used on the function making the calls.
E.g.,
@rate_limit(max_calls = 100, time_limit = 60)
The other way of using this system is suitable when multiple functions may
call the same API endpoint. In that case, first create a RateLimit object
and hand that object to the @rate_limit() decorator. E.g.:
limits = RateLimit(30, 1)
@rate_limit(limits)
def first_function_calling_api():
... code calling the network API ...
@rate_limit(limits)
def second_function_calling_api():
... code calling the network API ...
Acknowledgments
---------------
The approach and initial code for this module is based on a Stack Overflow
posting by user "TitouanT" at https://stackoverflow.com/a/52133209/743730
According to the terms of use of Stack Overflow, code posted there is
licensed CC BY-SA 3.0: https://creativecommons.org/licenses/by-sa/3.0/
This has been modified from the original.
Authors
-------
<NAME> <<EMAIL>> -- Caltech Library
'''
import functools
from time import sleep, perf_counter
from .debug import log
# Classes.
# .............................................................................
class RateLimit:
'''Object that distributes a maximum number of tokens every
time_limit seconds.'''
def __init__(self, max_calls, time_limit):
self.max_calls = max_calls
self.time_limit = time_limit
self.token = max_calls
self.time = perf_counter()
def pause(self):
if self.token <= 0 and not self.restock():
return True
self.token -= 1
return False
def restock(self):
now = perf_counter()
if (now - self.time) < self.time_limit:
return False
self.token = self.max_calls
self.time = now
return True
# Decorator function.
# .............................................................................
def rate_limit(obj = None, *, max_calls = 30, time_limit = 60):
'''Time_limit is in units of seconds.'''
if obj is None:
obj = RateLimit(max_calls, time_limit)
def limit_decorator(func):
@functools.wraps(func)
def limit_wrapper(*args, **kwargs):
while obj.pause():
if __debug__: log('waiting on rate limit')
now = perf_counter()
sleep(obj.time_limit - (now - obj.time))
return func(*args, **kwargs)
return limit_wrapper
return limit_decorator
| [
"time.sleep",
"time.perf_counter",
"functools.wraps"
] | [((1830, 1844), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (1842, 1844), False, 'from time import sleep, perf_counter\n'), ((2027, 2041), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (2039, 2041), False, 'from time import sleep, perf_counter\n'), ((2519, 2540), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (2534, 2540), False, 'import functools\n'), ((2697, 2711), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (2709, 2711), False, 'from time import sleep, perf_counter\n'), ((2728, 2768), 'time.sleep', 'sleep', (['(obj.time_limit - (now - obj.time))'], {}), '(obj.time_limit - (now - obj.time))\n', (2733, 2768), False, 'from time import sleep, perf_counter\n')] |
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, FunctionTransformer
from sklearn.compose import ColumnTransformer, make_column_selector
# This will select columns that match "feature*"
# A blank FunctionTransformer is idiomatic sklearn to pass thru
col_selector = make_column_selector(pattern='feature*')
select_cols = ColumnTransformer([
('select', FunctionTransformer(), col_selector)
])
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
scaler = StandardScaler()
log_reg = LogisticRegression(
C=0.1,
max_iter=1000,
tol=0.1,
verbose=10,
penalty='l1',
solver='liblinear',
random_state=42
)
lr_pipe = Pipeline(steps=[
('select', select_cols),
('imputer', imp_mean),
('scaler', scaler),
('log_reg', log_reg)
])
models = {
'baseline_log_reg': lr_pipe
}
| [
"sklearn.linear_model.LogisticRegression",
"sklearn.preprocessing.StandardScaler",
"sklearn.impute.SimpleImputer",
"sklearn.pipeline.Pipeline",
"sklearn.preprocessing.FunctionTransformer",
"sklearn.compose.make_column_selector"
] | [((418, 458), 'sklearn.compose.make_column_selector', 'make_column_selector', ([], {'pattern': '"""feature*"""'}), "(pattern='feature*')\n", (438, 458), False, 'from sklearn.compose import ColumnTransformer, make_column_selector\n'), ((561, 614), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'missing_values': 'np.nan', 'strategy': '"""mean"""'}), "(missing_values=np.nan, strategy='mean')\n", (574, 614), False, 'from sklearn.impute import SimpleImputer\n'), ((624, 640), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (638, 640), False, 'from sklearn.preprocessing import StandardScaler, FunctionTransformer\n'), ((651, 767), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(0.1)', 'max_iter': '(1000)', 'tol': '(0.1)', 'verbose': '(10)', 'penalty': '"""l1"""', 'solver': '"""liblinear"""', 'random_state': '(42)'}), "(C=0.1, max_iter=1000, tol=0.1, verbose=10, penalty='l1',\n solver='liblinear', random_state=42)\n", (669, 767), False, 'from sklearn.linear_model import LogisticRegression\n'), ((805, 915), 'sklearn.pipeline.Pipeline', 'Pipeline', ([], {'steps': "[('select', select_cols), ('imputer', imp_mean), ('scaler', scaler), (\n 'log_reg', log_reg)]"}), "(steps=[('select', select_cols), ('imputer', imp_mean), ('scaler',\n scaler), ('log_reg', log_reg)])\n", (813, 915), False, 'from sklearn.pipeline import Pipeline\n'), ((508, 529), 'sklearn.preprocessing.FunctionTransformer', 'FunctionTransformer', ([], {}), '()\n', (527, 529), False, 'from sklearn.preprocessing import StandardScaler, FunctionTransformer\n')] |
#!/usr/bin/env python3
import requests, random, time, subprocess
url = "http://rtp-debtor-payment-service-rtp-reference.apps.nyc-f63a.open.redhat.com/payments-service/payments"
print("Connecting to URL: " + url)
for x in range(20):
data = {"payments":[{"senderAccountNumber":"12000194212199004","amount":random.randint(1,1000),"receiverFirstName":"Edward","receiverLastName":"Garcia","receiverEmail":"<EMAIL>","receiverCellPhone":"null"}]}
requests.post(url, json=data)
time.sleep(.001)
| [
"requests.post",
"random.randint",
"time.sleep"
] | [((450, 479), 'requests.post', 'requests.post', (['url'], {'json': 'data'}), '(url, json=data)\n', (463, 479), False, 'import requests, random, time, subprocess\n'), ((484, 501), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (494, 501), False, 'import requests, random, time, subprocess\n'), ((310, 333), 'random.randint', 'random.randint', (['(1)', '(1000)'], {}), '(1, 1000)\n', (324, 333), False, 'import requests, random, time, subprocess\n')] |
import requests
import xml.etree.ElementTree as ET
import logging
from logging.config import dictConfig
import json
import copy
import tempfile
import os
import calendar
import time
import sys
from requests.auth import HTTPBasicAuth
import xml.dom.minidom
import datetime
import shutil
from io import open
import platform
from splunkversioncontrol_utility import runOSProcess, get_password
"""
Restore Knowledge Objects
Query a remote lookup file to determine what items should be restored from git into a Splunk instance
In general this will be running against the localhost unless it is been tested as the lookup file will be updated
by a user accessible dashboard
Basic validation will be done to ensure someone without the required access cannot restore someone else's knowledge objects
"""
splunkLogsDir = os.environ['SPLUNK_HOME'] + "/var/log/splunk"
#Setup the logging
logging_config = dict(
version = 1,
formatters = {
'f': {'format':
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s'}
},
handlers = {
'h': {'class': 'logging.StreamHandler',
'formatter': 'f',
'level': logging.WARN},
'file': {'class' : 'logging.handlers.RotatingFileHandler',
'filename' : splunkLogsDir + '/splunkversioncontrol_restore.log',
'formatter': 'f',
'maxBytes' : 2097152,
'level': logging.DEBUG,
'backupCount' : 5 }
},
root = {
'handlers': ['h','file'],
'level': logging.DEBUG,
},
)
dictConfig(logging_config)
logger = logging.getLogger()
logging.getLogger().setLevel(logging.INFO)
class SplunkVersionControlRestore:
splunk_rest = None
destUsername = None
destPassword = None
session_key = None
gitTempDir = None
gitRootDir = None
appName = "SplunkVersionControl"
gitRepoURL = None
stanzaName = None
sslVerify = False
# read XML configuration passed from splunkd
def get_config(self):
config = {}
try:
# read everything from stdin
config_str = sys.stdin.read()
# parse the config XML
doc = xml.dom.minidom.parseString(config_str)
root = doc.documentElement
session_key = root.getElementsByTagName("session_key")[0].firstChild.data
#Grab the session key in case we need it
config['session_key'] = session_key
conf_node = root.getElementsByTagName("configuration")[0]
if conf_node:
logger.debug("XML: found configuration")
stanza = conf_node.getElementsByTagName("stanza")[0]
if stanza:
stanza_name = stanza.getAttribute("name")
if stanza_name:
logger.debug("XML: found stanza " + stanza_name)
config["name"] = stanza_name
shortName = stanza_name.replace("splunkversioncontrol_restore://", "")
params = stanza.getElementsByTagName("param")
for param in params:
param_name = param.getAttribute("name")
logger.debug("i=\"%s\" XML: found param=\"%s\"" % (shortName, param_name))
if param_name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
data = param.firstChild.data
config[param_name] = data
logger.debug("i=\"%s\" XML: \"%s\"=\"%s\"" % (shortName, param_name, data))
if not config:
raise Exception("Invalid configuration received from Splunk.")
except Exception as e:
raise Exception("Error getting Splunk configuration via STDIN: %s" % str(e))
return config
###########################
#
# runQueries (generic version)
# This attempts to read the config data from git (stored in json format), if found it will attempt to restore the config to the
# destination server
# This method works for everything excluding macros which have a different process
# Due to variations in the REST API there are a few hacks inside this method to handle specific use cases, however the majority are straightforward
#
###########################
def runQueries(self, app, endpoint, type, name, scope, user, restoreAsUser, adminLevel):
logger.info("i=\"%s\" user=%s, attempting to restore name=%s in app=%s of type=%s in scope=%s, restoreAsUser=%s, adminLevel=%s" % (self.stanzaName, user, name, app, type, scope, restoreAsUser, adminLevel))
url = None
#Check if the object exists or not
#Data models require a slightly different URL to just about everything else
if type=="datamodels" and (scope=="app" or scope=="global"):
url = self.splunk_rest + "/servicesNS/nobody/%s%s/%s?output_mode=json" % (app, endpoint, name)
elif type=="datamodels":
url = self.splunk_rest + "/servicesNS/%s/%s%s/%s?output_mode=json" % (user, app, endpoint, name)
else:
url = self.splunk_rest + "/servicesNS/-/%s%s/%s?output_mode=json" % (app, endpoint, name)
logger.debug("i=\"%s\" Running requests.get() on url=%s with user=%s in app=%s proxies_length=%s" % (self.stanzaName, url, self.destUsername, app, len(self.proxies)))
#Determine scope that we will attempt to restore
appScope = False
userScope = False
if scope == "all":
appScope = True
userScope = True
elif scope == "app":
appScope = True
elif scope == "user":
userScope = True
else:
logger.error("i=\"%s\" user=%s, while attempting to restore name=%s, found invalid scope of scope=%s" % (self.stanzaName, user, name, scope))
headers = {}
auth = None
if not self.destUsername:
headers={'Authorization': 'Splunk %s' % self.session_key}
else:
auth = HTTPBasicAuth(self.destUsername, self.destPassword)
message = ""
res_result = False
#Verify=false is hardcoded to workaround local SSL issues
res = requests.get(url, auth=auth, headers=headers, verify=self.sslVerify, proxies=self.proxies)
objExists = False
#If we get 404 it definitely does not exist or it has a name override
if (res.status_code == 404):
logger.debug("i=\"%s\" URL=%s is throwing a 404, assuming new object creation" % (self.stanzaName, url))
elif (res.status_code != requests.codes.ok):
logger.error("i=\"%s\" URL=%s in app=%s statuscode=%s reason=%s response=\"%s\"" % (self.stanzaName, url, app, res.status_code, res.reason, res.text))
else:
#However the fact that we did not get a 404 does not mean it exists in the context we expect it to, perhaps it's global and from another app context?
#or perhaps it's app level but we're restoring a private object...
logger.debug("i=\"%s\" Attempting to JSON loads on %s" % (self.stanzaName, res.text))
resDict = json.loads(res.text)
for entry in resDict['entry']:
sharingLevel = entry['acl']['sharing']
appContext = entry['acl']['app']
if appContext == app and appScope == True and (sharingLevel == 'app' or sharingLevel == 'global'):
objExists = True
elif appContext == app and userScope == True and sharingLevel == "user":
objExists = True
configList = []
foundAtAnyScope = False
#We need to work with user scope
if userScope == True:
userDir = self.gitTempDir + "/" + app + "/" + "user"
#user directory exists
if os.path.isdir(userDir):
typeFile = userDir + "/" + type
if os.path.isfile(typeFile):
#The file exists, open it and read the config
logger.debug("i=\"%s\" user=%s, name=%s, found typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
with open(typeFile, 'r') as f:
configList = json.load(f)
found = False
for configItem in configList:
if configItem['name'] == name or ('origName' in configItem and configItem['origName'] == name):
#We found the configItem we need, run the restoration
logger.debug("i=\"%s\" user=%s, name=%s is found, dictionary is %s" % (self.stanzaName, user, name, configItem))
(res_result, message) = self.runRestore(configItem, type, endpoint, app, name, user, restoreAsUser, adminLevel, objExists)
found = True
foundAtAnyScope = True
#Let the logs know we never found it at this scope
if found == False:
logger.info("i=\"%s\" user=%s, name=%s not found at scope=user in file=%s" % (self.stanzaName, user, name, typeFile))
#We never found a file that we could use to restore from at this scope
else:
logger.info("i=\"%s\" user=%s, name=%s, did not find a typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
else:
#There are no user level objects for this app, therefore the restore will not occur at this scope
logger.info("i=\"%s\" user directory of dir=%s does not exist" % (self.stanzaName, userDir))
#It's either app level of globally scoped
if appScope == True:
appDir = self.gitTempDir + "/" + app + "/" + "app"
#app directory exists
if os.path.isdir(appDir):
typeFile = appDir + "/" + type
if os.path.isfile(typeFile):
#The file we need exists
logger.debug("i=\"%s\" user=%s, name=%s, found typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
with open(typeFile, 'r') as f:
configList = json.load(f)
found = False
for configItem in configList:
#We found the required configuration file, now we restore the object
if configItem['name'] == name:
logger.debug("i=\"%s\" user=%s, name=%s is found, dictionary is %s" % (self.stanzaName, user, name, configItem))
(res_result, message) = self.runRestore(configItem, type, endpoint, app, name, user, restoreAsUser, adminLevel, objExists)
found = True
foundAtAnyScope = True
#We never found the object we wanted to restore
if found == False:
logger.info("i=\"%s\" user=%s, name=%s not found at app level scope in typeFile=%s" % (self.stanzaName, user, name, typeFile))
#We did not find the file we wanted to restore from
else:
logger.info("i=\"%s\" user=%s, name=%s, did not find a typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
else:
#The app level scope directory does not exist for this app
logger.info("i=\"%s\" app directory of dir=%s does not exist" % (self.stanzaName, appDir))
#If could also be a global level restore...
globalDir = self.gitTempDir + "/" + app + "/" + "global"
#user directory exists
if os.path.isdir(globalDir):
typeFile = globalDir + "/" + type
if os.path.isfile(typeFile):
#We found the file to restore from
logger.debug("i=\"%s\" user=%s, name=%s, found typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
with open(typeFile, 'r') as f:
configList = json.load(f)
found = False
for configItem in configList:
#We found the relevant piece of configuration to restore, now run the restore
if configItem['name'] == name:
logger.debug("i=\"%s\" user=%s, name=%s is found, dictionary is %s" % (self.stanzaName, user, name, configItem))
(res_result, message) = self.runRestore(configItem, type, endpoint, app, name, user, restoreAsUser, adminLevel, objExists)
found = True
foundAtAnyScope = True
#We never found the config we wanted to restore
if found == False:
logger.info("i=\"%s\" user=%s, name=%s not found at scope=global in typeFile=%s" % (self.stanzaName, user, name, typeFile))
#This type of configuration does not exist at the global level
else:
logger.info("i=\"%s\" user=%s, name=%s, did not find a typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
#The global directory for this app does not exist
else:
logger.debug("i=\"%s\" global directory of dir=%s does not exist" % (self.stanzaName, globalDir))
if foundAtAnyScope == True and res_result!=False:
logger.info("i=\"%s\" user=%s restore has run successfully for name=%s, type=%s, restoreAsUser=%s, adminLevel=%s" % (self.stanzaName, user, name, type, restoreAsUser, adminLevel))
return True, message
elif res_result == False and foundAtAnyScope == True:
logger.warn("i=\"%s\" user=%s attempted to restore name=%s, type=%s, restoreAsUser=%s, adminLevel=%s the object was found, but the restore failed" % (self.stanzaName, user, name, type, restoreAsUser, adminLevel))
return False, message
else:
message = "The object was not found, the restore was unsuccessful. Perhaps check the restore date, scope & capitilisation before trying again?"
logger.warn("i=\"%s\" user=%s attempted to restore name=%s, type=%s, restoreAsUser=%s, adminLevel=%s however the object was not found, the restore was unsuccessful. Perhaps check the restore date, scope & capitilisation before trying again?" % (self.stanzaName, user, name, type, restoreAsUser, adminLevel))
return False, message
###########################
#
# runRestore (generic version)
# Once we have received the required configuration, type, app, endpoint, name et cetera we attempt
# to run the post to restore or create the object
#
###########################
def runRestore(self, config, type, endpoint, app, name, user, restoreAsUser, adminLevel, objExists):
result = True
#Only an admin can restore an object owned by someone else
if config['owner'] != user and adminLevel == False:
message = "Owner of the object is listed as owner=%s, however user user=%s requested the restore and is not an admin, rejected" % (config['owner'], user)
logger.error("i=\"" + self.stanzaName + "\"" + message)
return False, message
#Only an admin can use the restoreAsUser option
if restoreAsUser != "" and restoreAsUser != user and adminLevel == False:
message = "restoreAsUser=%s which is not user=%s, this user is not an admin, rejected" % (restoreAsUser, user)
logger.error("i=\"" + self.stanzaName + "\"" + message)
return False, message
#Change the owner to the new oner
if restoreAsUser != "" and adminLevel == True:
config["owner"] = restoreAsUser
logger.info("i=\"%s\" Attempting to run restore for name=%s of type=%s with endpoint=%s user=%s, restoreAsUser=%s, adminLevel=%s, objExists=%s" % (self.stanzaName, name, type, endpoint, user, restoreAsUser, adminLevel, objExists))
sharing = config["sharing"]
owner = config["owner"]
message = ""
createOrUpdate = None
if objExists == True:
createOrUpdate = "update"
else:
createOrUpdate = "create"
headers = {}
auth = None
if not self.destUsername:
headers={'Authorization': 'Splunk %s' % self.session_key}
else:
auth = HTTPBasicAuth(self.destUsername, self.destPassword)
#We cannot post the sharing/owner information to the REST API, we use them later
del config["sharing"]
del config["owner"]
#App / Global scope required the /nobody/ context to be used for POST requests (GET requests do not care)
url = ""
if sharing == "user":
url = "%s/servicesNS/%s/%s%s" % (self.splunk_rest, owner, app, endpoint)
else:
url = "%s/servicesNS/nobody/%s%s" % (self.splunk_rest, app, endpoint)
payload = config
#The config has an origName in it, therefore the object exists lookup may have not worked as expected
#repeat it here for the edge cases (field extractions, field transforms and automatic lookups)
origName = None
if 'origName' in config:
origName = config['origName']
del config['origName']
objExistsURL = "%s/%s?output_mode=json" % (url, origName)
logger.debug("i=\"%s\" URL=%s re-checking object exists URL due to name override from %s to original name of %s proxies_length=%s" % (self.stanzaName, objExistsURL, name, origName, len(self.proxies)))
#Verify=false is hardcoded to workaround local SSL issues
res = requests.get(objExistsURL, auth=auth, headers=headers, verify=self.sslVerify, proxies=self.proxies)
#If we get 404 it definitely does not exist or it has a name override
if (res.status_code == 404):
logger.debug("i=\"%s\" URL=%s is throwing a 404, assuming new object creation" % (self.stanzaName, objExistsURL))
objExists = False
elif (res.status_code != requests.codes.ok):
logger.error("i=\"%s\" URL=%s in app=%s statuscode=%s reason=%s response=\"%s\"" % (self.stanzaName, objExistsURL, app, res.status_code, res.reason, res.text))
else:
#However the fact that we did not get a 404 does not mean it exists in the context we expect it to, perhaps it's global and from another app context?
#or perhaps it's app level but we're restoring a private object...
logger.debug("i=\"%s\" Attempting to JSON loads on %s" % (self.stanzaName, res.text))
resDict = json.loads(res.text)
for entry in resDict['entry']:
sharingLevel = entry['acl']['sharing']
appContext = entry['acl']['app']
appScope = False
userScope = False
if sharing == "global" or sharing == "app":
appScope = True
else:
userScope = True
if appContext == app and appScope == True and (sharingLevel == 'app' or sharingLevel == 'global'):
objExists = True
elif appContext == app and userScope == True and sharingLevel == "user":
objExists = True
logger.debug("i=\"%s\" app=%s objExists=%s after re-checking on %s" % (self.stanzaName, app, objExists, objExistsURL))
#This is an existing object we are modifying
if objExists == True:
createOrUpdate = "update"
if origName:
url = url + "/" + origName
else:
url = url + "/" + name
del config["name"]
#Cannot post type/stanza when updating field extractions or a few other object types, but require them for creation?!
if 'type' in config:
del config['type']
if 'stanza' in config:
del config['stanza']
#Hack to handle the times (conf-times) not including required attributes for creation in existing entries
#not sure how this happens but it fails to create in 7.0.5 but works fine in 7.2.x, fixing for the older versions
if type=="times_conf-times" and "is_sub_menu" not in payload:
payload["is_sub_menu"] = "0"
elif type=="collections_kvstore" and 'disabled' in payload:
del payload['disabled']
logger.debug("i=\"%s\" Attempting to %s type=%s with name=%s on URL=%s with payload=\"%s\" in app=%s proxies_length=%s" % (self.stanzaName, createOrUpdate, type, name, url, payload, app, len(self.proxies)))
res = requests.post(url, auth=auth, headers=headers, verify=self.sslVerify, data=payload, proxies=self.proxies)
if (res.status_code != requests.codes.ok and res.status_code != 201):
logger.error("i=\"%s\" user=%s, name=%s of type=%s with URL=%s statuscode=%s reason=%s, response=\"%s\", in app=%s, owner=%s" % (self.stanzaName, user, name, type, url, res.status_code, res.reason, res.text, app, owner))
#Saved Searches sometimes fail due to the VSID field, auto-retry in case that solves the problem...
if type=="savedsearches":
if 'vsid' in payload:
del payload['vsid']
res = requests.post(url, auth=auth, headers=headers, verify=self.sslVerify, data=payload, proxies=self.proxies)
if (res.status_code != requests.codes.ok and res.status_code != 201):
logger.error("i=\"%s\" user=%s, re-attempted without vsid but result for name=%s of type=%s with URL=%s statuscode=%s reason=%s, response=\"%s\", in app=%s, owner=%s" % (self.stanzaName, user, name, type, url, res.status_code, res.reason, res.text, app, owner))
result = False
else:
logger.info("i=\"%s\" user=%s, name=%s of type=%s with URL=%s successfully %s with the vsid field removed, feel free to ignore the previous error" % (self.stanzaName, user, name, type, url, createOrUpdate))
else:
logger.debug("i=\"%s\" %s name=%s of type=%s in app=%s with URL=%s result=\"%s\" owner=%s" % (self.stanzaName, createOrUpdate, name, type, app, url, res.text, owner))
#Parse the result to find re-confirm the URL and check for messages from Splunk (and log warnings about them)
root = ET.fromstring(res.text)
objURL = None
for child in root:
#Working per entry in the results
if child.tag.endswith("entry"):
#Down to each entry level
for innerChild in child:
#print innerChild.tag
if innerChild.tag.endswith("link") and innerChild.attrib["rel"]=="list":
objURL = "%s/%s" % (self.splunk_rest, innerChild.attrib["href"])
logger.debug("i=\"%s\" name=%s of type=%s in app=%s URL=%s" % (self.stanzaName, name, type, app, objURL))
elif child.tag.endswith("messages"):
for innerChild in child:
if innerChild.tag.endswith("msg") and innerChild.attrib["type"]=="ERROR" or "WARN" in innerChild.attrib:
logger.warn("i=\"%s\" name=%s of type=%s in app=%s had a warn/error message of '%s' owner=%s" % (self.stanzaName, name, type, app, innerChild.text, owner))
#Sometimes the object appears to be create but is unusable which is annoying, at least provide the warning to the logs
if not objURL:
message = "never found objURL so cannot complete ACL change with url=%s, response text=\"%s\" when looking for name=%s, type=%s app=%s, owner=%s" % (url, res.text, name, type, app, owner)
logger.warn("i=\"" + self.stanzaName + "\"" + message)
return False, message
#Re-owning it to the previous owner and sharing level
url = "%s/acl" % (objURL)
payload = { "owner": owner, "sharing" : sharing }
logger.info("i=\"%s\" Attempting to change ownership of type=%s with name=%s via URL=%s to owner=%s in app=%s with sharing=%s" % (self.stanzaName, type, name, url, owner, app, sharing))
res = requests.post(url, auth=auth, headers=headers, verify=self.sslVerify, data=payload, proxies=self.proxies)
#If re-own fails log this for investigation
if (res.status_code != requests.codes.ok):
logger.error("i=\"%s\" user=%s, name=%s of type=%s in app=%s with URL=%s statuscode=%s reason=%s, response=\"%s\", owner=%s" % (self.stanzaName, user, name, type, app, url, res.status_code, res.reason, res.text, owner))
result = False
else:
logger.debug("i=\"%s\" user=%s, name=%s of type=%s in app=%s, ownership changed with response=\"%s\", owner=%s, sharing=%s" % (self.stanzaName, user, name, type, app, res.text, owner, sharing))
logger.info("i=\"%s\" %s name=%s of type=%s in app=%s owner=%s sharing=%s" % (self.stanzaName, createOrUpdate, name, type, app, owner, sharing))
return result, message
###########################
#
# macroCreation
# Runs the required queries to create or update the macro knowledge objects and then re-owns them to the correct user
#
###########################
def runRestoreMacro(self, config, app, name, username, restoreAsUser, adminLevel, objExists):
result = True
#Only admins can restore objects on behalf of someone else
if config['owner'] != username and adminLevel == False:
message = "Owner of the object is listed as owner=%s, however user=%s requested the restore and is not an admin, rejected" % (config['owner'], username)
logger.error("i=\"" + self.stanzaName + "\"" + message)
return False, message
#Only admins can restore objects into someone else's name
if restoreAsUser != "" and restoreAsUser != username and adminLevel == False:
message = "restoreAsUser=%s which is not the user=%s, this user is not an admin, rejected" % (restoreAsUser, username)
logger.error("i=\"" + self.stanzaName + "\"" + message)
return False, message
logger.info("i=\"%s\" Attempting to run macro restore with name=%s, user=%s, restoreAsUser=%s, adminLevel=%s, objExists=%s" % (self.stanzaName, name, username, restoreAsUser, adminLevel, objExists))
#Change the owner to the new oner
if restoreAsUser != "" and adminLevel == True:
config["owner"] = restoreAsUser
sharing = config["sharing"]
name = config["name"]
owner = config["owner"]
headers = {}
auth = None
if not self.destUsername:
headers={'Authorization': 'Splunk %s' % self.session_key}
else:
auth = HTTPBasicAuth(self.destUsername, self.destPassword)
message = ""
#We are creating the macro
if objExists == False:
url = "%s/servicesNS/%s/%s/properties/macros" % (self.splunk_rest, owner, app)
logger.info("i=\"%s\" Attempting to create type=macro name=%s on URL=%s in app=%s" % (self.stanzaName, name, url, app))
payload = { "__stanza" : name }
#Create macro
#I cannot seem to get this working on the /conf URL but this works so good enough, and it's in the REST API manual...
#servicesNS/-/search/properties/macros
#__stanza = <name>
res = requests.post(url, auth=auth, headers=headers, verify=self.sslVerify, data=payload, proxies=self.proxies)
if (res.status_code != requests.codes.ok and res.status_code != 201):
message = "name=%s of type=macro in app=%s with URL=%s statuscode=%s reason=%s, response=\"%s\", owner=%s" % (name, app, url, res.status_code, res.reason, res.text, owner)
logger.error("i=\"" + self.stanzaName + "\"" + message)
return False, message
else:
#Macros always have the username in this URL context
objURL = "%s/servicesNS/%s/%s/configs/conf-macros/%s" % (self.splunk_rest, owner, app, name)
logger.debug("i=\"%s\" name=%s of type=macro in app=%s URL=%s with owner=%s" % (self.stanzaName, name, app, objURL, owner))
logger.debug("i=\"%s\" name=%s of type=macro in app=%s, received response=\"%s\"" % (self.stanzaName, name, app, res.text))
#Now we have created the macro, modify it so it has some real content (or it's an existing macro we're fixing)
#If this is an app or globally scoped object use the nobody in the URL
url = ""
if objExists == True and sharing != "user":
url = "%s/servicesNS/nobody/%s/properties/macros/%s" % (self.splunk_rest, app, name)
else:
url = "%s/servicesNS/%s/%s/properties/macros/%s" % (self.splunk_rest, owner, app, name)
#Remove parts that cannot be posted to the REST API, sharing/owner we change later
del config["sharing"]
del config["name"]
del config["owner"]
payload = config
logger.debug("i=\"%s\" Attempting to modify type=macro name=%s on URL=%s with payload=\"%s\" in app=%s proxies_length=%s" % (self.stanzaName, name, url, payload, app, len(self.proxies)))
res = requests.post(url, auth=auth, headers=headers, verify=self.sslVerify, data=payload, proxies=self.proxies)
if (res.status_code != requests.codes.ok and res.status_code != 201):
logger.error("i=\"%s\" name=%s of type=macro in app=%s with URL=%s statuscode=%s reason=%s, response=\"%s\"" % (self.stanzaName, name, app, url, res.status_code, res.reason, res.text))
result = False
else:
#Re-owning it, I've switched URL's again here but it seems to be working so will not change it
url = "%s/servicesNS/%s/%s/configs/conf-macros/%s/acl" % (self.splunk_rest, owner, app, name)
payload = { "owner": owner, "sharing" : sharing }
logger.info("i=\"%s\" Attempting to change ownership of type=macro name=%s via URL=%s to owner=%s in app=%s with sharing=%s" % (self.stanzaName, name, url, owner, app, sharing))
res = requests.post(url, auth=auth, headers=headers, verify=self.sslVerify, data=payload, proxies=self.proxies)
if (res.status_code != requests.codes.ok):
logger.error("i=\"%s\" name=%s of type=macro in app=%s with URL=%s statuscode=%s reason=%s, response=\"%s\", owner=%s sharing=%s" % (self.stanzaName, name, app, url, res.status_code, res.reason, res.text, owner, sharing))
else:
logger.debug("i=\"%s\" name=%s of type=macro in app=%s, ownership changed with response=\"%s\", newOwner=%s and sharing=%s" % (self.stanzaName, name, app, res.text, owner, sharing))
return result, ""
###########################
#
# macros
#
###########################
#macro use cases are slightly different to everything else on the REST API
#enough that this code has not been integrated into the runQuery() function
def macros(self, app, name, scope, user, restoreAsUser, adminLevel):
logger.info("i=\"%s\" user=%s, attempting to restore name=%s in app=%s of type=macro in scope=%s, restoreAsUser=%s, adminLevel=%s" % (self.stanzaName, user, name, app, scope, restoreAsUser, adminLevel))
#servicesNS/-/-/properties/macros doesn't show private macros so using /configs/conf-macros to find all the macros
#again with count=-1 to find all the available macros
url = self.splunk_rest + "/servicesNS/-/" + app + "/configs/conf-macros/" + name + "?output_mode=json"
logger.debug("i=\"%s\" Running requests.get() on url=%s with user=%s in app=%s for type=macro proxies_length=%s" % (self.stanzaName, url, self.destUsername, app, len(self.proxies)))
#Determine scope that we will attempt to restore
appScope = False
userScope = False
if scope == "all":
appScope = True
userScope = True
elif scope == "app":
appScope = True
elif scope == "user":
userScope = True
else:
logger.error("i=\"%s\" user=%s, while attempting to restore name=%s, found invalid scope=%s" % (self.stanzaName, user, name, scope))
headers = {}
auth = None
if not self.destUsername:
headers={'Authorization': 'Splunk %s' % self.session_key}
else:
auth = HTTPBasicAuth(self.destUsername, self.destPassword)
#Verify=false is hardcoded to workaround local SSL issues
res = requests.get(url, auth=auth, headers=headers, verify=self.sslVerify, proxies=self.proxies)
objExists = False
if (res.status_code == 404):
logger.debug("i=\"%s\" URL=%s is throwing a 404, assuming new object creation" % (self.stanzaName, url))
elif (res.status_code != requests.codes.ok):
logger.error("i=\"%s\" type=macro in app=%s, URL=%s statuscode=%s reason=%s, response=\"%s\"" % (self.stanzaName, app, url, res.status_code, res.reason, res.text))
else:
#However the fact that we did not get a 404 does not mean it exists in the context we expect it to, perhaps it's global and from another app context?
#or perhaps it's app level but we're restoring a private object...
logger.debug("i=\"%s\" Attempting to JSON loads on %s" % (self.stanzaName, res.text))
resDict = json.loads(res.text)
for entry in resDict['entry']:
sharingLevel = entry['acl']['sharing']
appContext = entry['acl']['app']
if appContext == app and appScope == True and (sharingLevel == 'app' or sharingLevel == 'global'):
objExists = True
elif appContext == app and userScope == True and sharingLevel == "user":
objExists = True
configList = []
foundAtAnyScope = False
#This object is at user scope or may be at user scope
if userScope == True:
userDir = self.gitTempDir + "/" + app + "/" + "user"
#user directory exists
if os.path.isdir(userDir):
typeFile = userDir + "/macros"
#We found the file, now open it to obtain the contents
if os.path.isfile(typeFile):
logger.debug("i=\"%s\" user=%s, name=%s, found typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
with open(typeFile, 'r') as f:
configList = json.load(f)
found = False
for configItem in configList:
#We found the relevant item, now restore it
if configItem['name'] == name:
logger.debug("i=\"%s\" user=%s, name=%s is found, dictionary=\"%s\"" % (self.stanzaName, user, name, configItem))
(res_result, message) = self.runRestoreMacro(configItem, app, name, user, restoreAsUser, adminLevel, objExists)
found = True
foundAtAnyScope = True
#We never found the relevant item
if found == False:
logger.info("i=\"%s\" user=%s, name=%s not found at scope=user in typeFile=%s" % (self.stanzaName, user, name, typeFile))
#The config file did not exist
else:
logger.info("i=\"%s\" user=%s, name=%s, did not find a typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
else:
#There are no user level objects for this app, therefore the restore will not occur at this scope
logger.info("i=\"%s\" user directory of dir=%s does not exist" % (self.stanzaName, userDir))
#The object is either app or globally scoped
if appScope == True:
appDir = self.gitTempDir + "/" + app + "/" + "app"
#app directory exists
if os.path.isdir(appDir):
typeFile = appDir + "/macros"
#We found the file, open it and load the config
if os.path.isfile(typeFile):
logger.debug("i=\"%s\" user=%s, name=%s, found typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
with open(typeFile, 'r') as f:
configList = json.load(f)
found = False
#We found the item, now restore it
for configItem in configList:
if configItem['name'] == name:
logger.debug("i=\"%s\" user=%s, name=%s is found, dictionary is %s" % (self.stanzaName, user, name, configItem))
(res_result, message) = self.runRestoreMacro(configItem, app, name, user, restoreAsUser, adminLevel, objExists)
found = True
foundAtAnyScope = True
#We never found the item
if found == False:
logger.info("i=\"%s\" user=%s, name=%s not found at scope=app in typeFile=%s" % (self.stanzaName, user, name, typeFile))
#We never found the file to restore from
else:
logger.info("i=\"%s\" user=%s, name=%s, did not find a typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
else:
#There are no app level objects for this app, therefore the restore will not occur at this scope
logger.info("i=\"%s\" app directory of dir=%s does not exist" % (self.stanzaName, appDir))
globalDir = self.gitTempDir + "/" + app + "/" + "global"
#global directory exists
if os.path.isdir(globalDir):
typeFile = globalDir + "/macros"
#We found the file, attempt to load the config
if os.path.isfile(typeFile):
logger.debug("i=\"%s\" user=%s, name=%s, found typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
with open(typeFile, 'r') as f:
configList = json.load(f)
found = False
for configItem in configList:
#We found the item, now restore it
if configItem['name'] == name:
logger.debug("i=\"%s\" user=%s, name=%s is found, dictionary is %s" % (self.stanzaName, user, name, configItem))
(res_result, message) = self.runRestoreMacro(configItem, app, name, user, restoreAsUser, adminLevel, objExists)
found = True
foundAtAnyScope = True
#We never found the item
if found == False:
logger.info("i=\"%s\" user=%s, name=%s not found at scope=global in typeFile=%s" % (self.stanzaName, user, name, typeFile))
#We did not find the file to restore from
else:
logger.info("i=\"%s\" user=%s, name=%s, did not find a typeFile=%s to restore from" % (self.stanzaName, user, name, typeFile))
else:
#There are no global level objects for this app, therefore the restore will not occur at this scope
logger.info("i=\"%s\" global directory of dir=%s does not exist" % (self.stanzaName, globalDir))
if foundAtAnyScope == True and res_result!=False:
logger.info("i=\"%s\" user=%s restore has run successfully for name=%s, type=macro, restoreAsUser=%s, adminLevel=%s" % (self.stanzaName, user, name, restoreAsUser, adminLevel))
return True, message
elif res_result == False and foundAtAnyScope == True:
logger.warn("i=\"%s\" user=%s attempted to restore name=%s, type=macro, restoreAsUser=%s, adminLevel=%s the object was found, but the restore was unsuccessful" % (self.stanzaName, user, name, restoreAsUser, adminLevel))
return False, message
else:
logger.warn("i=\"%s\" user=%s attempted to restore name=%s, type=macro, restoreAsUser=%s, adminLevel=%s however the object was not found, the restore was unsuccessful. Perhaps check the restore date, scope & capitalisation before trying again?" % (self.stanzaName, user, name, restoreAsUser, adminLevel))
return False, message
###########################
#
# Migration functions
# These functions migrate the various knowledge objects mainly by calling the runQueries
# with the appropriate options for that type
# Excluding macros, they have their own function
#
###########################
###########################
#
# Dashboards
#
###########################
def dashboards(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/ui/views", "dashboards", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# Saved Searches
#
###########################
def savedsearches(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/saved/searches", "savedsearches",name, scope, username, restoreAsUser, adminLevel)
###########################
#
# field definitions
#
###########################
def calcfields(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/props/calcfields", "calcfields", name, scope, username, restoreAsUser, adminLevel)
def fieldaliases(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/props/fieldaliases", "fieldaliases", name, scope, username, restoreAsUser, adminLevel)
def fieldextractions(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/props/extractions", "fieldextractions", name, scope, username, restoreAsUser, adminLevel)
def fieldtransformations(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/transforms/extractions", "fieldtransformations", name, scope, username, restoreAsUser, adminLevel)
def workflowactions(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/ui/workflow-actions", "workflow-actions", name, scope, username, restoreAsUser, adminLevel)
def sourcetyperenaming(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/props/sourcetype-rename", "sourcetype-rename", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# tags
#
##########################
def tags(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/configs/conf-tags", "tags", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# eventtypes
#
##########################
def eventtypes(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/saved/eventtypes", "eventtypes", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# navMenus
#
##########################
def navMenu(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/ui/nav", "navMenu", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# data models
#
##########################
def datamodels(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/datamodel/model", "datamodels", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# collections
#
##########################
def collections(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/storage/collections/config", "collections_kvstore", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# viewstates
#
##########################
def viewstates(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/configs/conf-viewstates", "viewstates", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# time labels (conf-times)
#
##########################
def times(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/configs/conf-times", "times_conf-times", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# panels
#
##########################
def panels(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/ui/panels", "pre-built_dashboard_panels", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# lookups (definition/automatic)
#
##########################
def lookupDefinitions(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/transforms/lookups", "lookup_definition", name, scope, username, restoreAsUser, adminLevel)
def automaticLookups(self, app, name, scope, username, restoreAsUser, adminLevel):
return self.runQueries(app, "/data/props/lookups", "automatic_lookups", name, scope, username, restoreAsUser, adminLevel)
###########################
#
# Helper/utility functions
#
##########################
#helper function as per https://stackoverflow.com/questions/31433989/return-copy-of-dictionary-excluding-specified-keys
def without_keys(self, d, keys):
return {x: d[x] for x in d if x not in keys}
#Run a Splunk query via the search/jobs endpoint
def runSearchJob(self, query, earliest_time="-1h"):
url = self.splunk_rest + "/servicesNS/-/%s/search/jobs" % (self.appName)
logger.debug("i=\"%s\" Running requests.post() on url=%s with user=%s query=\"%s\" proxies_length=%s" % (self.stanzaName, url, self.destUsername, query, len(self.proxies)))
data = { "search" : query, "output_mode" : "json", "exec_mode" : "oneshot", "earliest_time" : earliest_time }
#no destUsername, use the session_key method
headers = {}
auth = None
if not self.destUsername:
headers = {'Authorization': 'Splunk %s' % self.session_key }
else:
auth = HTTPBasicAuth(self.destUsername, self.destPassword)
res = requests.post(url, auth=auth, headers=headers, verify=self.sslVerify, data=data, proxies=self.proxies)
if (res.status_code != requests.codes.ok):
logger.error("i=\"%s\" URL=%s statuscode=%s reason=%s, response=\"%s\"" % (self.stanzaName, url, res.status_code, res.reason, res.text))
res = json.loads(res.text)
#Log return messages from Splunk, often these advise of an issue but not always...
if len(res["messages"]) > 0:
firstMessage = res["messages"][0]
if 'type' in firstMessage and firstMessage['type'] == "INFO":
#This is a harmless info message ,most other messages are likely an issue
logger.info("i=\"%s\" messages from query=\"%s\" were messages=\"%s\"" % (self.stanzaName, query, res["messages"]))
else:
logger.warn("i=\"%s\" messages from query=\"%s\" were messages=\"%s\"" % (self.stanzaName, query, res["messages"]))
return res
###########################
#
# Main logic section
#
##########################
#restlist_override is when we are passed a dictionary with info on the restore requirements rather than obtaining this via a lookup commmand
#config_override is for when we are passed a configuration dictionary and we do not need to read our config from stdin (i.e. we were not called by Splunk in the normal fashion)
def run_script(self, restlist_override=None, config_override=None):
if not config_override:
config = self.get_config()
else:
config = config_override
#If we want debugMode, keep the debug logging, otherwise drop back to INFO level
if 'debugMode' in config:
debugMode = config['debugMode'].lower()
if debugMode == "true" or debugMode == "t":
logging.getLogger().setLevel(logging.DEBUG)
self.stanzaName = config["name"].replace("splunkversioncontrol_restore://", "")
useLocalAuth = False
if 'useLocalAuth' in config:
useLocalAuth = config['useLocalAuth'].lower()
if useLocalAuth == "true" or useLocalAuth=="t":
useLocalAuth = True
else:
useLocalAuth = False
#If we're not using the useLocalAuth we must have a username/password to work with
if useLocalAuth == False and ('destUsername' not in config or 'destPassword' not in config):
logger.fatal("i=\"%s\" useLocalAuth is not set to true and destUsername/destPassword not set, exiting with failure" % (self.stanzaName))
sys.exit(1)
if useLocalAuth == False:
self.destUsername = config['destUsername']
self.destPassword = config['destPassword']
if 'remoteAppName' in config:
self.appName = config['remoteAppName']
auditLogsLookupBackTime = "-1h"
if 'auditLogsLookupBackTime' in config:
auditLogsLookupBackTime = config['auditLogsLookupBackTime']
self.gitRepoURL = config['gitRepoURL']
#From server
self.splunk_rest = config['destURL']
excludedList = [ "destPassword", "session_key" ]
cleanArgs = self.without_keys(config, excludedList)
logger.info("i=\"%s\" Splunk Version Control Restore run with arguments=\"%s\"" % (self.stanzaName, cleanArgs))
self.session_key = config['session_key']
if not useLocalAuth and self.destPassword.find("password:") == 0:
self.destPassword = get_password(self.destPassword[9:], self.session_key, logger)
knownAppList = []
self.gitTempDir = config['gitTempDir']
self.gitRootDir = config['gitTempDir']
if 'git_command' in config:
self.git_command = config['git_command'].strip()
logger.debug("Overriding git command to %s" % (self.git_command))
else:
self.git_command = "git"
if 'ssh_command' in config:
self.ssh_command = config['ssh_command'].strip()
logger.debug("Overriding ssh command to %s" % (self.ssh_command))
else:
self.ssh_command = "ssh"
gitFailure = False
if platform.system() == "Windows":
self.windows = True
else:
self.windows = False
proxies = {}
if 'proxy' in config:
proxies['https'] = config['proxy']
if proxies['https'].find("password:") != -1:
start = proxies['https'].find("password:") + 9
end = proxies['https'].find("@")
logger.debug("Attempting to replace proxy=%s by subsituting=%s with a password" % (proxies['https'], proxies['https'][start:end]))
temp_password = get_password(proxies['https'][start:end], session_key, logger)
proxies['https'] = proxies['https'][0:start-9] + temp_password + proxies['https'][end:]
self.proxies = proxies
if 'sslVerify' in config:
self.sslVerify = config['sslVerify']
dirExists = os.path.isdir(self.gitTempDir)
if dirExists and len(os.listdir(self.gitTempDir)) != 0:
if not ".git" in os.listdir(self.gitTempDir):
#include the subdirectory which is the git repo
self.gitTempDir = self.gitTempDir + "/" + os.listdir(self.gitTempDir)[0]
logger.info("gitTempDir=%s" % (self.gitTempDir))
else:
if not dirExists:
#make the directory and clone under here
os.mkdir(self.gitTempDir)
#Initially we must trust our remote repo URL
(output, stderrout, res) = runOSProcess(self.ssh_command + " -n -o \"BatchMode yes\" -o StrictHostKeyChecking=no " + self.gitRepoURL[:self.gitRepoURL.find(":")], logger)
if res == False:
logger.warn("i=\"%s\" Unexpected failure while attempting to trust the remote git repo?! stdout '%s' stderr '%s'" % (self.stanzaName, output, stderrout))
#Clone the remote git repo
(output, stderrout, res) = runOSProcess("%s clone %s %s" % (self.git_command, self.gitRepoURL, self.gitRootDir), logger, timeout=300)
if res == False:
logger.fatal("i=\"%s\" git clone failed for some reason...on url=%s stdout of '%s' with stderrout of '%s'" % (self.stanzaName, self.gitRepoURL, output, stderrout))
sys.exit(1)
else:
logger.debug("i=\"%s\" result from git command: %s, output '%s' with stderroutput of '%s'" % (self.stanzaName, res, output, stderrout))
logger.info("i=\"%s\" Successfully cloned the git URL=%s into directory dir=%s" % (self.stanzaName, self.gitRepoURL, self.gitTempDir))
if not ".git" in os.listdir(self.gitTempDir):
#include the subdirectory which is the git repo
self.gitTempDir = self.gitTempDir + "/" + os.listdir(self.gitTempDir)[0]
logger.debug("gitTempDir=%s" % (self.gitTempDir))
if stderrout.find("error:") != -1 or stderrout.find("fatal:") != -1 or stderrout.find("timeout after") != -1:
logger.warn("i=\"%s\" error/fatal messages in git stderroutput please review. stderrout=\"%s\"" % (self.stanzaName, stderrout))
gitFailure = True
if not restlist_override:
#Version Control File that lists what restore we need to do...
restoreList = "splunkversioncontrol_restorelist"
res = self.runSearchJob("| inputlookup %s" % (restoreList))
resList = res["results"]
else:
resList = restlist_override
result = False
if len(resList) == 0:
logger.info("i=\"%s\" No restore required at this point in time" % (self.stanzaName))
else:
#Do a git pull to ensure we are up-to-date
if self.windows:
(output, stderrout, res) = runOSProcess("cd /d %s & %s checkout master & %s pull" % (self.gitTempDir, self.git_command, self.git_command), logger, timeout=300, shell=True)
else:
(output, stderrout, res) = runOSProcess("cd %s; %s checkout master; %s pull" % (self.gitTempDir, self.git_command, self.git_command), logger, timeout=300, shell=True)
if res == False:
logger.fatal("i=\"%s\" git pull failed for some reason...on url=%s stdout of '%s' with stderrout of '%s'. Wiping the git directory to re-clone" % (self.stanzaName, self.gitRepoURL, output, stderrout))
shutil.rmtree(self.gitTempDir)
if self.windows:
(output, stderrout, res) = runOSProcess("cd /d %s & %s checkout master & %s pull" % (self.gitTempDir, self.git_command, self.git_command), logger, timeout=300, shell=True)
else:
(output, stderrout, res) = runOSProcess("cd %s; %s checkout master; %s pull" % (self.gitTempDir, self.git_command, self.git_command), logger, timeout=300, shell=True)
if res == False:
logger.fatal("i=\"%s\" git clone failed for some reason...on url=%s stdout of '%s' with stderrout of '%s'" % (self.stanzaName, self.gitRepoURL, output, stderrout))
sys.exit(1)
else:
logger.debug("i=\"%s\" result from git command: %s, output '%s' with stderroutput of '%s'" % (self.stanzaName, res, output, stderrout))
logger.info("i=\"%s\" Successfully cloned the git URL=%s into directory dir=%s" % (self.stanzaName, self.gitRepoURL, self.gitRootDir))
else:
logger.info("i=\"%s\" Successfully ran the git pull for URL=%s from directory dir=%s" % (self.stanzaName, self.gitRepoURL, self.gitRootDir))
if stderrout.find("error:") != -1 or stderrout.find("fatal:") != -1 or stderrout.find("timeout after") != -1:
logger.warn("i=\"%s\" error/fatal messages in git stderroutput please review. stderrout=\"%s\"" % (self.stanzaName, stderrout))
gitFailure = True
if stderrout.find("timeout after") != -1:
return (False, "git command timed out")
logger.debug("i=\"%s\" The restore list is %s" % (self.stanzaName, resList))
#Attempt to determine all users involved in this restore so we can run a single query and determine if they are admins or not
userList = []
for aRes in resList:
user = aRes['user']
userList.append(user)
#obtain a list of unique user id's
userList = list(set(userList))
ldapFilter = None
usernameFilter = None
for user in userList:
if not ldapFilter:
ldapFilter = "*%s*" % (user)
usernameFilter = user
else:
ldapFilter = "%s, *%s*" % (ldapFilter, user)
usernameFilter = "%s, %s" % (usernameFilter, user)
#Query Splunk and determine if the mentioned users have the required admin role, if not they can only restore the objects they own
res = self.runSearchJob("| savedsearch \"SplunkVersionControl CheckAdmin\" ldapFilter=\"%s\", usernameFilter=\"%s\"" % (ldapFilter, usernameFilter))
userResList = []
if 'results' not in res:
logger.warn("i=\"%s\" Unable to run 'SplunkVersionControl CheckAdmin' for some reason with ldapFilter=%s and usernameFilter=%s" % (self.stanzaName, ldapFilter, usernameFilter))
else:
userResList = res["results"]
#Create a list of admins
adminList = []
for userRes in userResList:
username = userRes["username"]
logger.debug("i=\"%s\" Adding user=%s as an admin username" % (self.stanzaName, username))
adminList.append(username)
if not restlist_override:
# Run yet another query, this one provides a list of times/usernames at which valid entries were added to the lookup file
# if the addition to the lookup file was not done via the required report then the restore is not done (as anyone can add a new role
# and put the username as an admin user!)
res = self.runSearchJob("| savedsearch \"SplunkVersionControl Audit Query\"", earliest_time=auditLogsLookupBackTime)
auditEntries = []
if 'results' not in res:
logger.warn("i=\"%s\" Unable to run 'SplunkVersionControl Audit Query' for some reason with earliest_time=%s" % (self.stanzaName, auditLogsLookupBackTime))
else:
auditEntries = res["results"]
logger.debug("i=\"%s\" Audit Entries are: '%s'" % (self.stanzaName, auditEntries))
#Cycle through each result from the earlier lookup and run the required restoration
for aRes in resList:
if not all (entry in aRes for entry in ('time', 'app', 'name', 'restoreAsUser', 'tag', 'type', 'user', 'scope')):
logger.warn("i=\"%s\" this row is invalid, skipping this row of the results, res=\"%s\"" % (self.stanzaName, aRes))
continue
time = aRes['time']
app = aRes['app']
name = aRes['name']
restoreAsUser = aRes['restoreAsUser']
tag = aRes['tag']
type = aRes['type']
user = aRes['user']
scope = aRes['scope']
logger.info("i=\"%s\" user=%s has requested the object with name=%s of type=%s to be restored from tag=%s and scope=%s, restoreAsUser=%s, this was requested at time=%s in app context of app=%s" % (self.stanzaName, user, name, type, tag, scope, restoreAsUser, time, app))
if not restlist_override:
#If we have an entry in the lookup file it should be listed in the audit entries file
found = False
for entry in auditEntries:
#The audit logs are accurate to milliseconds, the lookup *is not* so sometimes it's off by about a second
timeEntry = entry['time']
timeEntryPlus1 = str(int(entry['time']) + 1)
timeEntryMinus1 = str(int(entry['time']) - 1)
if timeEntry == time or timeEntryPlus1 == time or timeEntryMinus1 == time:
found = True
auditUser = entry['user']
if user != auditUser:
logger.warn("i=\"%s\" user=%s found time entry of time=%s with auditUser=%s, this does not match the expected username (%s), rejecting this entry for name=%s of type=%s in app=%s with restoreAsUser=%s" % (self.stanzaName, user, time, auditUser, user, name, type, app, restoreAsUser))
found = False
else:
logger.debug("i=\"%s\" user=%s, found time entry of time=%s, considering this a valid entry and proceeding to restore" % (self.stanzaName, user, time))
if found == False:
logger.warn("i=\"%s\" user=%s, unable to find a time entry of time=%s matching the auditEntries list of %s, skipping this entry" % (self.stanzaName, user, time, auditEntries))
continue
#else we were provided with the override list and the username/audit logs were already checked
adminLevel = False
if user in adminList:
logger.debug("i=\"%s\" user=%s is an admin and has requested object name=%s of type=%s in app=%s to be restored with user=%s and time=%s" % (self.stanzaName, user, name, type, app, restoreAsUser, time))
adminLevel = True
#Only admins can restore objects as another user
if restoreAsUser != "" and restoreAsUser != user and adminLevel == False:
logger.error("i=\"%s\" user=%s is not an admin and has attempted to restore as a different user, requested user=%s, object=%s of type=%s in app=%s to be restored with restoreAsUser=%s time=%s, rejected" % (self.stanzaName, user, restoreAsUser, name, type, app, restoreAsUser, time))
continue
#Do a git pull to ensure we are up-to-date
if self.windows:
(output, stderrout, res) = runOSProcess("cd /d %s & %s checkout %s" % (self.gitTempDir, self.git_command, tag), logger, shell=True)
else:
(output, stderrout, res) = runOSProcess("cd %s; %s checkout %s" % (self.gitTempDir, self.git_command, tag), logger, shell=True)
if res == False:
logger.error("i=\"%s\" user=%s, object name=%s, type=%s, time=%s, git checkout of tag=%s failed in directory dir=%s stdout of '%s' with stderrout of '%s'" % (self.stanzaName, user, name, type, time, tag, self.gitTempDir, output, stderrout))
else:
logger.info("i=\"%s\" Successfully ran the git checkout for URL=%s from directory dir=%s" % (self.stanzaName, self.gitRepoURL, self.gitTempDir))
if stderrout.find("error:") != -1 or stderrout.find("fatal:") != -1 or stderrout.find("timeout after") != -1:
logger.warn("i=\"%s\" error/fatal messages in git stderroutput please review. stderrout=\"%s\"" % (self.stanzaName, stderrout))
gitFailure = True
if stderrout.find("timeout after") != -1:
return (False, "git command timed out")
knownAppList = []
if os.path.isdir(self.gitTempDir):
#include the subdirectory which is the git repo
knownAppList = os.listdir(self.gitTempDir)
logger.debug("i=\"%s\" Known app list is %s" % (self.stanzaName, knownAppList))
#If the app is not known, the restore stops here as we have nothing to restore from!
if app not in knownAppList:
logger.error("i=\"%s\" user=%s requested a restore from app=%s but this is not in the knownAppList therefore restore cannot occur, object=%s of type=%s to be restored with user=%s and time=%s" % (self.stanzaName, user, app, name, type, restoreAsUser, time))
continue
#Deal with the different types of restores that might be required, we only do one row at a time...
if type == "dashboard":
(result, message) = self.dashboards(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "savedsearch":
(result, message) = self.savedsearches(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "macro":
(result, message) = self.macros(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "fieldalias":
(result, message) = self.fieldaliases(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "fieldextraction":
(result, message) = self.fieldextractions(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "fieldtransformation":
(result, message) = self.fieldtransformations(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "navmenu":
(result, message) = self.navMenu(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "datamodel":
(result, message) = self.datamodels(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "panels":
(result, message) = self.panels(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "calcfields":
(result, message) = self.calcfields(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "workflowaction":
(result, message) = self.workflowactions(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "sourcetyperenaming":
(result, message) = self.sourcetyperenaming(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "tags":
(result, message) = self.tags(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "eventtypes":
(result, message) = self.eventtypes(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "lookupdef":
(result, message) = self.lookupDefinitions(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "automaticlookup":
(result, message) = self.automaticLookups(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "collection":
(result, message) = self.collections(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "viewstate":
(result, message) = self.viewstates(app, name, scope, user, restoreAsUser, adminLevel)
elif type == "times":
(result, message) = self.times(app, name, scope, user, restoreAsUser, adminLevel)
else:
logger.error("i=\"%s\" user=%s, unknown type, no restore will occur for object=%s of type=%s in app=%s to be restored with restoreAsUser=%s and time=%s" % (self.stanzaName, user, name, type, app, restoreAsUser, time))
if not restlist_override:
#Wipe the lookup file so we do not attempt to restore these entries again
if len(resList) != 0:
if not gitFailure:
res = self.runSearchJob("| makeresults | fields - _time | outputlookup %s" % (restoreList))
logger.info("i=\"%s\" Cleared the lookup file to ensure we do not attempt to restore the same entries again" % (self.stanzaName))
else:
logger.error("i=\"%s\" git failure occurred during runtime, not wiping the lookup value. This failure may require investigation, please refer to the WARNING messages in the logs" % (self.stanzaName))
if gitFailure:
logger.warn("i=\"%s\" wiping the git directory, dir=%s to allow re-cloning on next run of the script" % (self.stanzaName, self.gitTempDir))
shutil.rmtree(self.gitTempDir)
logger.info("i=\"%s\" Done" % (self.stanzaName))
return (result, message)
| [
"logging.getLogger",
"splunkversioncontrol_utility.runOSProcess",
"requests.post",
"io.open",
"sys.exit",
"sys.stdin.read",
"requests.auth.HTTPBasicAuth",
"os.listdir",
"platform.system",
"os.path.isdir",
"os.mkdir",
"xml.etree.ElementTree.fromstring",
"json.loads",
"requests.get",
"os.p... | [((1583, 1609), 'logging.config.dictConfig', 'dictConfig', (['logging_config'], {}), '(logging_config)\n', (1593, 1609), False, 'from logging.config import dictConfig\n'), ((1620, 1639), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1637, 1639), False, 'import logging\n'), ((1640, 1659), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1657, 1659), False, 'import logging\n'), ((6391, 6485), 'requests.get', 'requests.get', (['url'], {'auth': 'auth', 'headers': 'headers', 'verify': 'self.sslVerify', 'proxies': 'self.proxies'}), '(url, auth=auth, headers=headers, verify=self.sslVerify,\n proxies=self.proxies)\n', (6403, 6485), False, 'import requests\n'), ((21304, 21414), 'requests.post', 'requests.post', (['url'], {'auth': 'auth', 'headers': 'headers', 'verify': 'self.sslVerify', 'data': 'payload', 'proxies': 'self.proxies'}), '(url, auth=auth, headers=headers, verify=self.sslVerify, data=\n payload, proxies=self.proxies)\n', (21317, 21414), False, 'import requests\n'), ((30175, 30285), 'requests.post', 'requests.post', (['url'], {'auth': 'auth', 'headers': 'headers', 'verify': 'self.sslVerify', 'data': 'payload', 'proxies': 'self.proxies'}), '(url, auth=auth, headers=headers, verify=self.sslVerify, data=\n payload, proxies=self.proxies)\n', (30188, 30285), False, 'import requests\n'), ((33521, 33615), 'requests.get', 'requests.get', (['url'], {'auth': 'auth', 'headers': 'headers', 'verify': 'self.sslVerify', 'proxies': 'self.proxies'}), '(url, auth=auth, headers=headers, verify=self.sslVerify,\n proxies=self.proxies)\n', (33533, 33615), False, 'import requests\n'), ((47989, 48096), 'requests.post', 'requests.post', (['url'], {'auth': 'auth', 'headers': 'headers', 'verify': 'self.sslVerify', 'data': 'data', 'proxies': 'self.proxies'}), '(url, auth=auth, headers=headers, verify=self.sslVerify, data=\n data, proxies=self.proxies)\n', (48002, 48096), False, 'import requests\n'), ((48306, 48326), 'json.loads', 'json.loads', (['res.text'], {}), '(res.text)\n', (48316, 48326), False, 'import json\n'), ((53061, 53091), 'os.path.isdir', 'os.path.isdir', (['self.gitTempDir'], {}), '(self.gitTempDir)\n', (53074, 53091), False, 'import os\n'), ((2138, 2154), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (2152, 2154), False, 'import sys\n'), ((6201, 6252), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['self.destUsername', 'self.destPassword'], {}), '(self.destUsername, self.destPassword)\n', (6214, 6252), False, 'from requests.auth import HTTPBasicAuth\n'), ((8023, 8045), 'os.path.isdir', 'os.path.isdir', (['userDir'], {}), '(userDir)\n', (8036, 8045), False, 'import os\n'), ((10102, 10123), 'os.path.isdir', 'os.path.isdir', (['appDir'], {}), '(appDir)\n', (10115, 10123), False, 'import os\n'), ((12033, 12057), 'os.path.isdir', 'os.path.isdir', (['globalDir'], {}), '(globalDir)\n', (12046, 12057), False, 'import os\n'), ((16914, 16965), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['self.destUsername', 'self.destPassword'], {}), '(self.destUsername, self.destPassword)\n', (16927, 16965), False, 'from requests.auth import HTTPBasicAuth\n'), ((18202, 18306), 'requests.get', 'requests.get', (['objExistsURL'], {'auth': 'auth', 'headers': 'headers', 'verify': 'self.sslVerify', 'proxies': 'self.proxies'}), '(objExistsURL, auth=auth, headers=headers, verify=self.\n sslVerify, proxies=self.proxies)\n', (18214, 18306), False, 'import requests\n'), ((23088, 23111), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['res.text'], {}), '(res.text)\n', (23101, 23111), True, 'import xml.etree.ElementTree as ET\n'), ((25010, 25120), 'requests.post', 'requests.post', (['url'], {'auth': 'auth', 'headers': 'headers', 'verify': 'self.sslVerify', 'data': 'payload', 'proxies': 'self.proxies'}), '(url, auth=auth, headers=headers, verify=self.sslVerify, data=\n payload, proxies=self.proxies)\n', (25023, 25120), False, 'import requests\n'), ((27660, 27711), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['self.destUsername', 'self.destPassword'], {}), '(self.destUsername, self.destPassword)\n', (27673, 27711), False, 'from requests.auth import HTTPBasicAuth\n'), ((28325, 28435), 'requests.post', 'requests.post', (['url'], {'auth': 'auth', 'headers': 'headers', 'verify': 'self.sslVerify', 'data': 'payload', 'proxies': 'self.proxies'}), '(url, auth=auth, headers=headers, verify=self.sslVerify, data=\n payload, proxies=self.proxies)\n', (28338, 28435), False, 'import requests\n'), ((31080, 31190), 'requests.post', 'requests.post', (['url'], {'auth': 'auth', 'headers': 'headers', 'verify': 'self.sslVerify', 'data': 'payload', 'proxies': 'self.proxies'}), '(url, auth=auth, headers=headers, verify=self.sslVerify, data=\n payload, proxies=self.proxies)\n', (31093, 31190), False, 'import requests\n'), ((33388, 33439), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['self.destUsername', 'self.destPassword'], {}), '(self.destUsername, self.destPassword)\n', (33401, 33439), False, 'from requests.auth import HTTPBasicAuth\n'), ((35107, 35129), 'os.path.isdir', 'os.path.isdir', (['userDir'], {}), '(userDir)\n', (35120, 35129), False, 'import os\n'), ((37050, 37071), 'os.path.isdir', 'os.path.isdir', (['appDir'], {}), '(appDir)\n', (37063, 37071), False, 'import os\n'), ((38894, 38918), 'os.path.isdir', 'os.path.isdir', (['globalDir'], {}), '(globalDir)\n', (38907, 38918), False, 'import os\n'), ((47922, 47973), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['self.destUsername', 'self.destPassword'], {}), '(self.destUsername, self.destPassword)\n', (47935, 47973), False, 'from requests.auth import HTTPBasicAuth\n'), ((50598, 50609), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (50606, 50609), False, 'import sys\n'), ((51515, 51576), 'splunkversioncontrol_utility.get_password', 'get_password', (['self.destPassword[9:]', 'self.session_key', 'logger'], {}), '(self.destPassword[9:], self.session_key, logger)\n', (51527, 51576), False, 'from splunkversioncontrol_utility import runOSProcess, get_password\n'), ((52199, 52216), 'platform.system', 'platform.system', ([], {}), '()\n', (52214, 52216), False, 'import platform\n'), ((54104, 54215), 'splunkversioncontrol_utility.runOSProcess', 'runOSProcess', (["('%s clone %s %s' % (self.git_command, self.gitRepoURL, self.gitRootDir))", 'logger'], {'timeout': '(300)'}), "('%s clone %s %s' % (self.git_command, self.gitRepoURL, self.\n gitRootDir), logger, timeout=300)\n", (54116, 54215), False, 'from splunkversioncontrol_utility import runOSProcess, get_password\n'), ((70825, 70855), 'shutil.rmtree', 'shutil.rmtree', (['self.gitTempDir'], {}), '(self.gitTempDir)\n', (70838, 70855), False, 'import shutil\n'), ((7333, 7353), 'json.loads', 'json.loads', (['res.text'], {}), '(res.text)\n', (7343, 7353), False, 'import json\n'), ((8114, 8138), 'os.path.isfile', 'os.path.isfile', (['typeFile'], {}), '(typeFile)\n', (8128, 8138), False, 'import os\n'), ((10191, 10215), 'os.path.isfile', 'os.path.isfile', (['typeFile'], {}), '(typeFile)\n', (10205, 10215), False, 'import os\n'), ((12128, 12152), 'os.path.isfile', 'os.path.isfile', (['typeFile'], {}), '(typeFile)\n', (12142, 12152), False, 'import os\n'), ((34396, 34416), 'json.loads', 'json.loads', (['res.text'], {}), '(res.text)\n', (34406, 34416), False, 'import json\n'), ((35268, 35292), 'os.path.isfile', 'os.path.isfile', (['typeFile'], {}), '(typeFile)\n', (35282, 35292), False, 'import os\n'), ((37202, 37226), 'os.path.isfile', 'os.path.isfile', (['typeFile'], {}), '(typeFile)\n', (37216, 37226), False, 'import os\n'), ((39051, 39075), 'os.path.isfile', 'os.path.isfile', (['typeFile'], {}), '(typeFile)\n', (39065, 39075), False, 'import os\n'), ((52757, 52819), 'splunkversioncontrol_utility.get_password', 'get_password', (["proxies['https'][start:end]", 'session_key', 'logger'], {}), "(proxies['https'][start:end], session_key, logger)\n", (52769, 52819), False, 'from splunkversioncontrol_utility import runOSProcess, get_password\n'), ((53549, 53574), 'os.mkdir', 'os.mkdir', (['self.gitTempDir'], {}), '(self.gitTempDir)\n', (53557, 53574), False, 'import os\n'), ((54436, 54447), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (54444, 54447), False, 'import sys\n'), ((56011, 56159), 'splunkversioncontrol_utility.runOSProcess', 'runOSProcess', (["('cd /d %s & %s checkout master & %s pull' % (self.gitTempDir, self.\n git_command, self.git_command))", 'logger'], {'timeout': '(300)', 'shell': '(True)'}), "('cd /d %s & %s checkout master & %s pull' % (self.gitTempDir,\n self.git_command, self.git_command), logger, timeout=300, shell=True)\n", (56023, 56159), False, 'from splunkversioncontrol_utility import runOSProcess, get_password\n'), ((56217, 56361), 'splunkversioncontrol_utility.runOSProcess', 'runOSProcess', (["('cd %s; %s checkout master; %s pull' % (self.gitTempDir, self.git_command,\n self.git_command))", 'logger'], {'timeout': '(300)', 'shell': '(True)'}), "('cd %s; %s checkout master; %s pull' % (self.gitTempDir, self.\n git_command, self.git_command), logger, timeout=300, shell=True)\n", (56229, 56361), False, 'from splunkversioncontrol_utility import runOSProcess, get_password\n'), ((56619, 56649), 'shutil.rmtree', 'shutil.rmtree', (['self.gitTempDir'], {}), '(self.gitTempDir)\n', (56632, 56649), False, 'import shutil\n'), ((65972, 66002), 'os.path.isdir', 'os.path.isdir', (['self.gitTempDir'], {}), '(self.gitTempDir)\n', (65985, 66002), False, 'import os\n'), ((19218, 19238), 'json.loads', 'json.loads', (['res.text'], {}), '(res.text)\n', (19228, 19238), False, 'import json\n'), ((21975, 22085), 'requests.post', 'requests.post', (['url'], {'auth': 'auth', 'headers': 'headers', 'verify': 'self.sslVerify', 'data': 'payload', 'proxies': 'self.proxies'}), '(url, auth=auth, headers=headers, verify=self.sslVerify, data=\n payload, proxies=self.proxies)\n', (21988, 22085), False, 'import requests\n'), ((53121, 53148), 'os.listdir', 'os.listdir', (['self.gitTempDir'], {}), '(self.gitTempDir)\n', (53131, 53148), False, 'import os\n'), ((53185, 53212), 'os.listdir', 'os.listdir', (['self.gitTempDir'], {}), '(self.gitTempDir)\n', (53195, 53212), False, 'import os\n'), ((56730, 56878), 'splunkversioncontrol_utility.runOSProcess', 'runOSProcess', (["('cd /d %s & %s checkout master & %s pull' % (self.gitTempDir, self.\n git_command, self.git_command))", 'logger'], {'timeout': '(300)', 'shell': '(True)'}), "('cd /d %s & %s checkout master & %s pull' % (self.gitTempDir,\n self.git_command, self.git_command), logger, timeout=300, shell=True)\n", (56742, 56878), False, 'from splunkversioncontrol_utility import runOSProcess, get_password\n'), ((56944, 57088), 'splunkversioncontrol_utility.runOSProcess', 'runOSProcess', (["('cd %s; %s checkout master; %s pull' % (self.gitTempDir, self.git_command,\n self.git_command))", 'logger'], {'timeout': '(300)', 'shell': '(True)'}), "('cd %s; %s checkout master; %s pull' % (self.gitTempDir, self.\n git_command, self.git_command), logger, timeout=300, shell=True)\n", (56956, 57088), False, 'from splunkversioncontrol_utility import runOSProcess, get_password\n'), ((57321, 57332), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (57329, 57332), False, 'import sys\n'), ((64706, 64815), 'splunkversioncontrol_utility.runOSProcess', 'runOSProcess', (["('cd /d %s & %s checkout %s' % (self.gitTempDir, self.git_command, tag))", 'logger'], {'shell': '(True)'}), "('cd /d %s & %s checkout %s' % (self.gitTempDir, self.\n git_command, tag), logger, shell=True)\n", (64718, 64815), False, 'from splunkversioncontrol_utility import runOSProcess, get_password\n'), ((64880, 64984), 'splunkversioncontrol_utility.runOSProcess', 'runOSProcess', (["('cd %s; %s checkout %s' % (self.gitTempDir, self.git_command, tag))", 'logger'], {'shell': '(True)'}), "('cd %s; %s checkout %s' % (self.gitTempDir, self.git_command,\n tag), logger, shell=True)\n", (64892, 64984), False, 'from splunkversioncontrol_utility import runOSProcess, get_password\n'), ((66107, 66134), 'os.listdir', 'os.listdir', (['self.gitTempDir'], {}), '(self.gitTempDir)\n', (66117, 66134), False, 'import os\n'), ((8370, 8389), 'io.open', 'open', (['typeFile', '"""r"""'], {}), "(typeFile, 'r')\n", (8374, 8389), False, 'from io import open\n'), ((8433, 8445), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8442, 8445), False, 'import json\n'), ((10426, 10445), 'io.open', 'open', (['typeFile', '"""r"""'], {}), "(typeFile, 'r')\n", (10430, 10445), False, 'from io import open\n'), ((10489, 10501), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10498, 10501), False, 'import json\n'), ((12373, 12392), 'io.open', 'open', (['typeFile', '"""r"""'], {}), "(typeFile, 'r')\n", (12377, 12392), False, 'from io import open\n'), ((12436, 12448), 'json.load', 'json.load', (['f'], {}), '(f)\n', (12445, 12448), False, 'import json\n'), ((35458, 35477), 'io.open', 'open', (['typeFile', '"""r"""'], {}), "(typeFile, 'r')\n", (35462, 35477), False, 'from io import open\n'), ((35521, 35533), 'json.load', 'json.load', (['f'], {}), '(f)\n', (35530, 35533), False, 'import json\n'), ((37392, 37411), 'io.open', 'open', (['typeFile', '"""r"""'], {}), "(typeFile, 'r')\n", (37396, 37411), False, 'from io import open\n'), ((37455, 37467), 'json.load', 'json.load', (['f'], {}), '(f)\n', (37464, 37467), False, 'import json\n'), ((39241, 39260), 'io.open', 'open', (['typeFile', '"""r"""'], {}), "(typeFile, 'r')\n", (39245, 39260), False, 'from io import open\n'), ((39304, 39316), 'json.load', 'json.load', (['f'], {}), '(f)\n', (39313, 39316), False, 'import json\n'), ((49836, 49855), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (49853, 49855), False, 'import logging\n'), ((53336, 53363), 'os.listdir', 'os.listdir', (['self.gitTempDir'], {}), '(self.gitTempDir)\n', (53346, 53363), False, 'import os\n'), ((54802, 54829), 'os.listdir', 'os.listdir', (['self.gitTempDir'], {}), '(self.gitTempDir)\n', (54812, 54829), False, 'import os\n'), ((54961, 54988), 'os.listdir', 'os.listdir', (['self.gitTempDir'], {}), '(self.gitTempDir)\n', (54971, 54988), False, 'import os\n')] |
import tensorflow as tf
import math
from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell
from rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell
import utils, beam_search
def auto_reuse(fun):
"""
Wrapper that automatically handles the `reuse' parameter.
This is rather risky, as it can lead to reusing variables
by mistake.
"""
def fun_(*args, **kwargs):
try:
return fun(*args, **kwargs)
except ValueError as e:
if 'reuse' in str(e):
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
return fun(*args, **kwargs)
else:
raise e
return fun_
get_variable = auto_reuse(tf.get_variable)
dense = auto_reuse(tf.layers.dense)
class CellWrapper(RNNCell):
"""
Wrapper around LayerNormBasicLSTMCell, BasicLSTMCell and MultiRNNCell, to keep
the state_is_tuple=False behavior (soon to be deprecated).
"""
def __init__(self, cell):
super(CellWrapper, self).__init__()
self.cell = cell
self.num_splits = len(cell.state_size) if isinstance(cell.state_size, tuple) else 1
@property
def state_size(self):
return sum(self.cell.state_size)
@property
def output_size(self):
return self.cell.output_size
def __call__(self, inputs, state, scope=None):
state = tf.split(value=state, num_or_size_splits=self.num_splits, axis=1)
new_h, new_state = self.cell(inputs, state, scope=scope)
return new_h, tf.concat(new_state, 1)
def multi_encoder(encoder_inputs, encoders, encoder_input_length, other_inputs=None, **kwargs):
"""
Build multiple encoders according to the configuration in `encoders`, reading from `encoder_inputs`.
The result is a list of the outputs produced by those encoders (for each time-step), and their final state.
:param encoder_inputs: list of tensors of shape (batch_size, input_length), one tensor for each encoder.
:param encoders: list of encoder configurations
:param encoder_input_length: list of tensors of shape (batch_size,) (one tensor for each encoder)
:return:
encoder outputs: a list of tensors of shape (batch_size, input_length, encoder_cell_size), hidden states of the
encoders.
encoder state: concatenation of the final states of all encoders, tensor of shape (batch_size, sum_of_state_sizes)
new_encoder_input_length: list of tensors of shape (batch_size,) with the true length of the encoder outputs.
May be different than `encoder_input_length` because of maxout strides, and time pooling.
"""
encoder_states = []
encoder_outputs = []
# create embeddings in the global scope (allows sharing between encoder and decoder)
embedding_variables = []
for encoder in encoders:
if encoder.binary:
embedding_variables.append(None)
continue
# inputs are token ids, which need to be mapped to vectors (embeddings)
embedding_shape = [encoder.vocab_size, encoder.embedding_size]
if encoder.embedding_initializer == 'sqrt3':
initializer = tf.random_uniform_initializer(-math.sqrt(3), math.sqrt(3))
else:
initializer = None
device = '/cpu:0' if encoder.embeddings_on_cpu else None
with tf.device(device): # embeddings can take a very large amount of memory, so
# storing them in GPU memory can be impractical
embedding = get_variable('embedding_{}'.format(encoder.name), shape=embedding_shape,
initializer=initializer)
embedding_variables.append(embedding)
new_encoder_input_length = []
for i, encoder in enumerate(encoders):
if encoder.use_lstm is False:
encoder.cell_type = 'GRU'
with tf.variable_scope('encoder_{}'.format(encoder.name)):
encoder_inputs_ = encoder_inputs[i]
encoder_input_length_ = encoder_input_length[i]
def get_cell(input_size=None, reuse=False):
if encoder.cell_type.lower() == 'lstm':
cell = CellWrapper(BasicLSTMCell(encoder.cell_size, reuse=reuse))
elif encoder.cell_type.lower() == 'dropoutgru':
cell = DropoutGRUCell(encoder.cell_size, reuse=reuse, layer_norm=encoder.layer_norm,
input_size=input_size, input_keep_prob=encoder.rnn_input_keep_prob,
state_keep_prob=encoder.rnn_state_keep_prob)
else:
cell = GRUCell(encoder.cell_size, reuse=reuse, layer_norm=encoder.layer_norm)
if encoder.use_dropout and encoder.cell_type.lower() != 'dropoutgru':
cell = DropoutWrapper(cell, input_keep_prob=encoder.rnn_input_keep_prob,
output_keep_prob=encoder.rnn_output_keep_prob,
state_keep_prob=encoder.rnn_state_keep_prob,
variational_recurrent=encoder.pervasive_dropout,
dtype=tf.float32, input_size=input_size)
return cell
embedding = embedding_variables[i]
batch_size = tf.shape(encoder_inputs_)[0]
time_steps = tf.shape(encoder_inputs_)[1]
if embedding is not None:
flat_inputs = tf.reshape(encoder_inputs_, [tf.multiply(batch_size, time_steps)])
flat_inputs = tf.nn.embedding_lookup(embedding, flat_inputs)
encoder_inputs_ = tf.reshape(flat_inputs,
tf.stack([batch_size, time_steps, flat_inputs.get_shape()[1].value]))
if other_inputs is not None:
encoder_inputs_ = tf.concat([encoder_inputs_, other_inputs], axis=2)
if encoder.use_dropout:
noise_shape = [1, time_steps, 1] if encoder.pervasive_dropout else [batch_size, time_steps, 1]
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.word_keep_prob,
noise_shape=noise_shape)
size = tf.shape(encoder_inputs_)[2]
noise_shape = [1, 1, size] if encoder.pervasive_dropout else [batch_size, time_steps, size]
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.embedding_keep_prob,
noise_shape=noise_shape)
if encoder.input_layers:
for j, layer_size in enumerate(encoder.input_layers):
if encoder.input_layer_activation is not None and encoder.input_layer_activation.lower() == 'relu':
activation = tf.nn.relu
else:
activation = tf.tanh
encoder_inputs_ = dense(encoder_inputs_, layer_size, activation=activation, use_bias=True,
name='layer_{}'.format(j))
if encoder.use_dropout:
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.input_layer_keep_prob)
# Contrary to Theano's RNN implementation, states after the sequence length are zero
# (while Theano repeats last state)
inter_layer_keep_prob = None if not encoder.use_dropout else encoder.inter_layer_keep_prob
parameters = dict(
inputs=encoder_inputs_, sequence_length=encoder_input_length_,
dtype=tf.float32, parallel_iterations=encoder.parallel_iterations
)
input_size = encoder_inputs_.get_shape()[2].value
state_size = (encoder.cell_size * 2 if encoder.cell_type.lower() == 'lstm' else encoder.cell_size)
def get_initial_state(name='initial_state'):
if encoder.train_initial_states:
initial_state = get_variable(name, initializer=tf.zeros(state_size))
return tf.tile(tf.expand_dims(initial_state, axis=0), [batch_size, 1])
else:
return None
if encoder.bidir:
rnn = lambda reuse: stack_bidirectional_dynamic_rnn(
cells_fw=[get_cell(input_size if j == 0 else 2 * encoder.cell_size, reuse=reuse)
for j in range(encoder.layers)],
cells_bw=[get_cell(input_size if j == 0 else 2 * encoder.cell_size, reuse=reuse)
for j in range(encoder.layers)],
initial_states_fw=[get_initial_state('initial_state_fw')] * encoder.layers,
initial_states_bw=[get_initial_state('initial_state_bw')] * encoder.layers,
time_pooling=encoder.time_pooling, pooling_avg=encoder.pooling_avg,
**parameters)
initializer = CellInitializer(encoder.cell_size) if encoder.orthogonal_init else None
with tf.variable_scope(tf.get_variable_scope(), initializer=initializer):
try:
encoder_outputs_, _, encoder_states_ = rnn(reuse=False)
except ValueError: # Multi-task scenario where we're reusing the same RNN parameters
encoder_outputs_, _, encoder_states_ = rnn(reuse=True)
else:
if encoder.time_pooling or encoder.final_state == 'concat_last':
raise NotImplementedError
if encoder.layers > 1:
cell = MultiRNNCell([get_cell(input_size if j == 0 else encoder.cell_size)
for j in range(encoder.layers)])
initial_state = (get_initial_state(),) * encoder.layers
else:
cell = get_cell(input_size)
initial_state = get_initial_state()
encoder_outputs_, encoder_states_ = auto_reuse(tf.nn.dynamic_rnn)(cell=cell,
initial_state=initial_state,
**parameters)
last_backward = encoder_outputs_[:, 0, encoder.cell_size:]
indices = tf.stack([tf.range(batch_size), encoder_input_length_ - 1], axis=1)
last_forward = tf.gather_nd(encoder_outputs_[:, :, :encoder.cell_size], indices)
last_forward.set_shape([None, encoder.cell_size])
if encoder.final_state == 'concat_last': # concats last states of all backward layers (full LSTM states)
encoder_state_ = tf.concat(encoder_states_, axis=1)
elif encoder.final_state == 'average':
mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_outputs_)[1], dtype=tf.float32)
mask = tf.expand_dims(mask, axis=2)
encoder_state_ = tf.reduce_sum(mask * encoder_outputs_, axis=1) / tf.reduce_sum(mask, axis=1)
elif encoder.final_state == 'average_inputs':
mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_inputs_)[1], dtype=tf.float32)
mask = tf.expand_dims(mask, axis=2)
encoder_state_ = tf.reduce_sum(mask * encoder_inputs_, axis=1) / tf.reduce_sum(mask, axis=1)
elif encoder.bidir and encoder.final_state == 'last_both':
encoder_state_ = tf.concat([last_forward, last_backward], axis=1)
elif encoder.bidir and not encoder.final_state == 'last_forward': # last backward hidden state
encoder_state_ = last_backward
else: # last forward hidden state
encoder_state_ = last_forward
if encoder.bidir and encoder.bidir_projection:
encoder_outputs_ = dense(encoder_outputs_, encoder.cell_size, use_bias=False, name='bidir_projection')
encoder_outputs.append(encoder_outputs_)
encoder_states.append(encoder_state_)
new_encoder_input_length.append(encoder_input_length_)
encoder_state = tf.concat(encoder_states, 1)
return encoder_outputs, encoder_state, new_encoder_input_length
def compute_energy(hidden, state, attn_size, attn_keep_prob=None, pervasive_dropout=False, layer_norm=False,
mult_attn=False, **kwargs):
if attn_keep_prob is not None:
state_noise_shape = [1, tf.shape(state)[1]] if pervasive_dropout else None
state = tf.nn.dropout(state, keep_prob=attn_keep_prob, noise_shape=state_noise_shape)
hidden_noise_shape = [1, 1, tf.shape(hidden)[2]] if pervasive_dropout else None
hidden = tf.nn.dropout(hidden, keep_prob=attn_keep_prob, noise_shape=hidden_noise_shape)
if mult_attn:
state = dense(state, attn_size, use_bias=False, name='state')
hidden = dense(hidden, attn_size, use_bias=False, name='hidden')
return tf.einsum('ijk,ik->ij', hidden, state)
else:
y = dense(state, attn_size, use_bias=not layer_norm, name='W_a')
y = tf.expand_dims(y, axis=1)
if layer_norm:
y = tf.contrib.layers.layer_norm(y, scope='layer_norm_state')
hidden = tf.contrib.layers.layer_norm(hidden, center=False, scope='layer_norm_hidden')
f = dense(hidden, attn_size, use_bias=False, name='U_a')
v = get_variable('v_a', [attn_size])
s = f + y
return tf.reduce_sum(v * tf.tanh(s), axis=2)
def compute_energy_with_filter(hidden, state, prev_weights, attn_filters, attn_filter_length,
**kwargs):
hidden = tf.expand_dims(hidden, 2)
batch_size = tf.shape(hidden)[0]
time_steps = tf.shape(hidden)[1]
attn_size = hidden.get_shape()[3].value
filter_shape = [attn_filter_length * 2 + 1, 1, 1, attn_filters]
filter_ = get_variable('filter', filter_shape)
u = get_variable('U', [attn_filters, attn_size])
prev_weights = tf.reshape(prev_weights, tf.stack([batch_size, time_steps, 1, 1]))
conv = tf.nn.conv2d(prev_weights, filter_, [1, 1, 1, 1], 'SAME')
shape = tf.stack([tf.multiply(batch_size, time_steps), attn_filters])
conv = tf.reshape(conv, shape)
z = tf.matmul(conv, u)
z = tf.reshape(z, tf.stack([batch_size, time_steps, 1, attn_size]))
y = dense(state, attn_size, use_bias=True, name='y')
y = tf.reshape(y, [-1, 1, 1, attn_size])
k = get_variable('W', [attn_size, attn_size])
# dot product between tensors requires reshaping
hidden = tf.reshape(hidden, tf.stack([tf.multiply(batch_size, time_steps), attn_size]))
f = tf.matmul(hidden, k)
f = tf.reshape(f, tf.stack([batch_size, time_steps, 1, attn_size]))
v = get_variable('V', [attn_size])
s = f + y + z
return tf.reduce_sum(v * tf.tanh(s), [2, 3])
def global_attention(state, hidden_states, encoder, encoder_input_length, scope=None, context=None, **kwargs):
with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)):
if context is not None and encoder.use_context:
state = tf.concat([state, context], axis=1)
if encoder.attn_filters:
e = compute_energy_with_filter(hidden_states, state, attn_size=encoder.attn_size,
attn_filters=encoder.attn_filters,
attn_filter_length=encoder.attn_filter_length, **kwargs)
else:
e = compute_energy(hidden_states, state, attn_size=encoder.attn_size,
attn_keep_prob=encoder.attn_keep_prob, pervasive_dropout=encoder.pervasive_dropout,
layer_norm=encoder.layer_norm, mult_attn=encoder.mult_attn, **kwargs)
e -= tf.reduce_max(e, axis=1, keep_dims=True)
mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1], dtype=tf.float32)
T = encoder.attn_temperature or 1.0
exp = tf.exp(e / T) * mask
weights = exp / tf.reduce_sum(exp, axis=-1, keep_dims=True)
weighted_average = tf.reduce_sum(tf.expand_dims(weights, 2) * hidden_states, axis=1)
return weighted_average, weights
def no_attention(state, hidden_states, *args, **kwargs):
batch_size = tf.shape(state)[0]
weighted_average = tf.zeros(shape=tf.stack([batch_size, 0]))
weights = tf.zeros(shape=[batch_size, tf.shape(hidden_states)[1]])
return weighted_average, weights
def average_attention(hidden_states, encoder_input_length, *args, **kwargs):
# attention with fixed weights (average of all hidden states)
lengths = tf.to_float(tf.expand_dims(encoder_input_length, axis=1))
mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1])
weights = tf.to_float(mask) / lengths
weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1)
return weighted_average, weights
def last_state_attention(hidden_states, encoder_input_length, *args, **kwargs):
weights = tf.one_hot(encoder_input_length - 1, tf.shape(hidden_states)[1])
weights = tf.to_float(weights)
weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1)
return weighted_average, weights
def local_attention(state, hidden_states, encoder, encoder_input_length, pos=None, scope=None,
context=None, **kwargs):
batch_size = tf.shape(state)[0]
attn_length = tf.shape(hidden_states)[1]
if context is not None and encoder.use_context:
state = tf.concat([state, context], axis=1)
state_size = state.get_shape()[1].value
with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)):
encoder_input_length = tf.to_float(tf.expand_dims(encoder_input_length, axis=1))
if pos is not None:
pos = tf.reshape(pos, [-1, 1])
pos = tf.minimum(pos, encoder_input_length - 1)
if pos is not None and encoder.attn_window_size > 0:
# `pred_edits` scenario, where we know the aligned pos
# when the windows size is non-zero, we concatenate consecutive encoder states
# and map it to the right attention vector size.
weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length))
weighted_average = []
for offset in range(-encoder.attn_window_size, encoder.attn_window_size + 1):
pos_ = pos + offset
pos_ = tf.minimum(pos_, encoder_input_length - 1)
pos_ = tf.maximum(pos_, 0) # TODO: when pos is < 0, use <S> or </S>
weights_ = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos_, axis=1)), depth=attn_length))
weighted_average_ = tf.reduce_sum(tf.expand_dims(weights_, axis=2) * hidden_states, axis=1)
weighted_average.append(weighted_average_)
weighted_average = tf.concat(weighted_average, axis=1)
weighted_average = dense(weighted_average, encoder.attn_size)
elif pos is not None:
weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length))
weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1)
else:
# Local attention of Luong et al. (http://arxiv.org/abs/1508.04025)
wp = get_variable('Wp', [state_size, state_size])
vp = get_variable('vp', [state_size, 1])
pos = tf.nn.sigmoid(tf.matmul(tf.nn.tanh(tf.matmul(state, wp)), vp))
pos = tf.floor(encoder_input_length * pos)
pos = tf.reshape(pos, [-1, 1])
pos = tf.minimum(pos, encoder_input_length - 1)
idx = tf.tile(tf.to_float(tf.range(attn_length)), tf.stack([batch_size]))
idx = tf.reshape(idx, [-1, attn_length])
low = pos - encoder.attn_window_size
high = pos + encoder.attn_window_size
mlow = tf.to_float(idx < low)
mhigh = tf.to_float(idx > high)
m = mlow + mhigh
m += tf.to_float(idx >= encoder_input_length)
mask = tf.to_float(tf.equal(m, 0.0))
e = compute_energy(hidden_states, state, attn_size=encoder.attn_size, **kwargs)
weights = softmax(e, mask=mask)
sigma = encoder.attn_window_size / 2
numerator = -tf.pow((idx - pos), tf.convert_to_tensor(2, dtype=tf.float32))
div = tf.truediv(numerator, 2 * sigma ** 2)
weights *= tf.exp(div) # result of the truncated normal distribution
# normalize to keep a probability distribution
# weights /= (tf.reduce_sum(weights, axis=1, keep_dims=True) + 10e-12)
weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1)
return weighted_average, weights
def attention(encoder, **kwargs):
attention_functions = {
'global': global_attention,
'local': local_attention,
'none': no_attention,
'average': average_attention,
'last_state': last_state_attention
}
attention_function = attention_functions.get(encoder.attention_type, global_attention)
return attention_function(encoder=encoder, **kwargs)
def multi_attention(state, hidden_states, encoders, encoder_input_length, pos=None, aggregation_method='sum',
prev_weights=None, **kwargs):
attns = []
weights = []
context_vector = None
for i, (hidden, encoder, input_length) in enumerate(zip(hidden_states, encoders, encoder_input_length)):
pos_ = pos[i] if pos is not None else None
prev_weights_ = prev_weights[i] if prev_weights is not None else None
hidden = beam_search.resize_like(hidden, state)
input_length = beam_search.resize_like(input_length, state)
context_vector, weights_ = attention(state=state, hidden_states=hidden, encoder=encoder,
encoder_input_length=input_length, pos=pos_, context=context_vector,
prev_weights=prev_weights_, **kwargs)
attns.append(context_vector)
weights.append(weights_)
if aggregation_method == 'sum':
context_vector = tf.reduce_sum(tf.stack(attns, axis=2), axis=2)
else:
context_vector = tf.concat(attns, axis=1)
return context_vector, weights
def attention_decoder(decoder_inputs, initial_state, attention_states, encoders, decoder, encoder_input_length,
feed_previous=0.0, align_encoder_id=0, feed_argmax=True, **kwargs):
"""
:param decoder_inputs: int32 tensor of shape (batch_size, output_length)
:param initial_state: initial state of the decoder (usually the final state of the encoder),
as a float32 tensor of shape (batch_size, initial_state_size). This state is mapped to the
correct state size for the decoder.
:param attention_states: list of tensors of shape (batch_size, input_length, encoder_cell_size),
the hidden states of the encoder(s) (one tensor for each encoder).
:param encoders: configuration of the encoders
:param decoder: configuration of the decoder
:param encoder_input_length: list of int32 tensors of shape (batch_size,), tells for each encoder,
the true length of each sequence in the batch (sequences in the same batch are padded to all have the same
length).
:param feed_previous: scalar tensor corresponding to the probability to use previous decoder output
instead of the ground truth as input for the decoder (1 when decoding, between 0 and 1 when training)
:param feed_argmax: boolean tensor, when True the greedy decoder outputs the word with the highest
probability (argmax). When False, it samples a word from the probability distribution (softmax).
:param align_encoder_id: outputs attention weights for this encoder. Also used when predicting edit operations
(pred_edits), to specifify which encoder reads the sequence to post-edit (MT).
:return:
outputs of the decoder as a tensor of shape (batch_size, output_length, decoder_cell_size)
attention weights as a tensor of shape (output_length, encoders, batch_size, input_length)
"""
assert not decoder.pred_maxout_layer or decoder.cell_size % 2 == 0, 'cell size must be a multiple of 2'
if decoder.use_lstm is False:
decoder.cell_type = 'GRU'
embedding_shape = [decoder.vocab_size, decoder.embedding_size]
if decoder.embedding_initializer == 'sqrt3':
initializer = tf.random_uniform_initializer(-math.sqrt(3), math.sqrt(3))
else:
initializer = None
device = '/cpu:0' if decoder.embeddings_on_cpu else None
with tf.device(device):
embedding = get_variable('embedding_{}'.format(decoder.name), shape=embedding_shape, initializer=initializer)
input_shape = tf.shape(decoder_inputs)
batch_size = input_shape[0]
time_steps = input_shape[1]
scope_name = 'decoder_{}'.format(decoder.name)
scope_name += '/' + '_'.join(encoder.name for encoder in encoders)
def embed(input_):
embedded_input = tf.nn.embedding_lookup(embedding, input_)
if decoder.use_dropout and decoder.word_keep_prob is not None:
noise_shape = [1, 1] if decoder.pervasive_dropout else [batch_size, 1]
embedded_input = tf.nn.dropout(embedded_input, keep_prob=decoder.word_keep_prob, noise_shape=noise_shape)
if decoder.use_dropout and decoder.embedding_keep_prob is not None:
size = tf.shape(embedded_input)[1]
noise_shape = [1, size] if decoder.pervasive_dropout else [batch_size, size]
embedded_input = tf.nn.dropout(embedded_input, keep_prob=decoder.embedding_keep_prob,
noise_shape=noise_shape)
return embedded_input
def get_cell(input_size=None, reuse=False):
cells = []
for j in range(decoder.layers):
input_size_ = input_size if j == 0 else decoder.cell_size
if decoder.cell_type.lower() == 'lstm':
cell = CellWrapper(BasicLSTMCell(decoder.cell_size, reuse=reuse))
elif decoder.cell_type.lower() == 'dropoutgru':
cell = DropoutGRUCell(decoder.cell_size, reuse=reuse, layer_norm=decoder.layer_norm,
input_size=input_size_, input_keep_prob=decoder.rnn_input_keep_prob,
state_keep_prob=decoder.rnn_state_keep_prob)
else:
cell = GRUCell(decoder.cell_size, reuse=reuse, layer_norm=decoder.layer_norm)
if decoder.use_dropout and decoder.cell_type.lower() != 'dropoutgru':
cell = DropoutWrapper(cell, input_keep_prob=decoder.rnn_input_keep_prob,
output_keep_prob=decoder.rnn_output_keep_prob,
state_keep_prob=decoder.rnn_state_keep_prob,
variational_recurrent=decoder.pervasive_dropout,
dtype=tf.float32, input_size=input_size_)
cells.append(cell)
if len(cells) == 1:
return cells[0]
else:
return CellWrapper(MultiRNNCell(cells))
def look(state, input_, prev_weights=None, pos=None):
prev_weights_ = [prev_weights if i == align_encoder_id else None for i in range(len(encoders))]
pos_ = None
if decoder.pred_edits:
pos_ = [pos if i == align_encoder_id else None for i in range(len(encoders))]
if decoder.attn_prev_word:
state = tf.concat([state, input_], axis=1)
parameters = dict(hidden_states=attention_states, encoder_input_length=encoder_input_length,
encoders=encoders, aggregation_method=decoder.aggregation_method)
context, new_weights = multi_attention(state, pos=pos_, prev_weights=prev_weights_, **parameters)
if decoder.context_mapping:
with tf.variable_scope(scope_name):
activation = tf.nn.tanh if decoder.context_mapping_activation == 'tanh' else None
use_bias = not decoder.context_mapping_no_bias
context = dense(context, decoder.context_mapping, use_bias=use_bias, activation=activation,
name='context_mapping')
return context, new_weights[align_encoder_id]
def update(state, input_, context=None, symbol=None):
if context is not None and decoder.rnn_feed_attn:
input_ = tf.concat([input_, context], axis=1)
input_size = input_.get_shape()[1].value
initializer = CellInitializer(decoder.cell_size) if decoder.orthogonal_init else None
with tf.variable_scope(tf.get_variable_scope(), initializer=initializer):
try:
output, new_state = get_cell(input_size)(input_, state)
except ValueError: # auto_reuse doesn't work with LSTM cells
output, new_state = get_cell(input_size, reuse=True)(input_, state)
if decoder.skip_update and decoder.pred_edits and symbol is not None:
is_del = tf.equal(symbol, utils.DEL_ID)
new_state = tf.where(is_del, state, new_state)
if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
output = new_state
return output, new_state
def update_pos(pos, symbol, max_pos=None):
if not decoder.pred_edits:
return pos
is_keep = tf.equal(symbol, utils.KEEP_ID)
is_del = tf.equal(symbol, utils.DEL_ID)
is_not_ins = tf.logical_or(is_keep, is_del)
pos = beam_search.resize_like(pos, symbol)
max_pos = beam_search.resize_like(max_pos, symbol)
pos += tf.to_float(is_not_ins)
if max_pos is not None:
pos = tf.minimum(pos, tf.to_float(max_pos))
return pos
def generate(state, input_, context):
if decoder.pred_use_lstm_state is False: # for back-compatibility
state = state[:,-decoder.cell_size:]
projection_input = [state, context]
if decoder.use_previous_word:
projection_input.insert(1, input_) # for back-compatibility
output_ = tf.concat(projection_input, axis=1)
if decoder.pred_deep_layer:
deep_layer_size = decoder.pred_deep_layer_size or decoder.embedding_size
if decoder.layer_norm:
output_ = dense(output_, deep_layer_size, use_bias=False, name='deep_output')
output_ = tf.contrib.layers.layer_norm(output_, activation_fn=tf.nn.tanh, scope='output_layer_norm')
else:
output_ = dense(output_, deep_layer_size, activation=tf.tanh, use_bias=True, name='deep_output')
if decoder.use_dropout:
size = tf.shape(output_)[1]
noise_shape = [1, size] if decoder.pervasive_dropout else None
output_ = tf.nn.dropout(output_, keep_prob=decoder.deep_layer_keep_prob, noise_shape=noise_shape)
else:
if decoder.pred_maxout_layer:
maxout_size = decoder.maxout_size or decoder.cell_size
output_ = dense(output_, maxout_size, use_bias=True, name='maxout')
if decoder.old_maxout: # for back-compatibility with old models
output_ = tf.nn.pool(tf.expand_dims(output_, axis=2), window_shape=[2], pooling_type='MAX',
padding='SAME', strides=[2])
output_ = tf.squeeze(output_, axis=2)
else:
output_ = tf.maximum(*tf.split(output_, num_or_size_splits=2, axis=1))
if decoder.pred_embed_proj:
# intermediate projection to embedding size (before projecting to vocabulary size)
# this is useful to reduce the number of parameters, and
# to use the output embeddings for output projection (tie_embeddings parameter)
output_ = dense(output_, decoder.embedding_size, use_bias=False, name='softmax0')
if decoder.tie_embeddings and (decoder.pred_embed_proj or decoder.pred_deep_layer):
bias = get_variable('softmax1/bias', shape=[decoder.vocab_size])
output_ = tf.matmul(output_, tf.transpose(embedding)) + bias
else:
output_ = dense(output_, decoder.vocab_size, use_bias=True, name='softmax1')
return output_
state_size = (decoder.cell_size * 2 if decoder.cell_type.lower() == 'lstm' else decoder.cell_size) * decoder.layers
if decoder.use_dropout:
initial_state = tf.nn.dropout(initial_state, keep_prob=decoder.initial_state_keep_prob)
with tf.variable_scope(scope_name):
if decoder.layer_norm:
initial_state = dense(initial_state, state_size, use_bias=False, name='initial_state_projection')
initial_state = tf.contrib.layers.layer_norm(initial_state, activation_fn=tf.nn.tanh,
scope='initial_state_layer_norm')
else:
initial_state = dense(initial_state, state_size, use_bias=True, name='initial_state_projection',
activation=tf.nn.tanh)
if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
initial_output = initial_state
else:
initial_output = initial_state[:, -decoder.cell_size:]
time = tf.constant(0, dtype=tf.int32, name='time')
outputs = tf.TensorArray(dtype=tf.float32, size=time_steps)
samples = tf.TensorArray(dtype=tf.int64, size=time_steps)
inputs = tf.TensorArray(dtype=tf.int64, size=time_steps).unstack(tf.to_int64(tf.transpose(decoder_inputs)))
states = tf.TensorArray(dtype=tf.float32, size=time_steps)
weights = tf.TensorArray(dtype=tf.float32, size=time_steps)
attns = tf.TensorArray(dtype=tf.float32, size=time_steps)
initial_symbol = inputs.read(0) # first symbol is BOS
initial_input = embed(initial_symbol)
initial_pos = tf.zeros([batch_size], tf.float32)
initial_weights = tf.zeros(tf.shape(attention_states[align_encoder_id])[:2])
initial_context, _ = look(initial_output, initial_input, pos=initial_pos, prev_weights=initial_weights)
initial_data = tf.concat([initial_state, initial_context, tf.expand_dims(initial_pos, axis=1), initial_weights],
axis=1)
context_size = initial_context.shape[1].value
def get_logits(state, ids, time): # for beam-search decoding
with tf.variable_scope('decoder_{}'.format(decoder.name)):
state, context, pos, prev_weights = tf.split(state, [state_size, context_size, 1, -1], axis=1)
input_ = embed(ids)
pos = tf.squeeze(pos, axis=1)
pos = tf.cond(tf.equal(time, 0),
lambda: pos,
lambda: update_pos(pos, ids, encoder_input_length[align_encoder_id]))
if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
output = state
else:
# output is always the right-most part of state. However, this only works at test time,
# because different dropout operations can be used on state and output.
output = state[:, -decoder.cell_size:]
if decoder.conditional_rnn:
with tf.variable_scope('conditional_1'):
output, state = update(state, input_)
elif decoder.update_first:
output, state = update(state, input_, None, ids)
elif decoder.generate_first:
output, state = tf.cond(tf.equal(time, 0),
lambda: (output, state),
lambda: update(state, input_, context, ids))
context, new_weights = look(output, input_, pos=pos, prev_weights=prev_weights)
if decoder.conditional_rnn:
with tf.variable_scope('conditional_2'):
output, state = update(state, context)
elif not decoder.generate_first:
output, state = update(state, input_, context, ids)
logits = generate(output, input_, context)
pos = tf.expand_dims(pos, axis=1)
state = tf.concat([state, context, pos, new_weights], axis=1)
return state, logits
def _time_step(time, input_, input_symbol, pos, state, output, outputs, states, weights, attns, prev_weights,
samples):
if decoder.conditional_rnn:
with tf.variable_scope('conditional_1'):
output, state = update(state, input_)
elif decoder.update_first:
output, state = update(state, input_, None, input_symbol)
context, new_weights = look(output, input_, pos=pos, prev_weights=prev_weights)
if decoder.conditional_rnn:
with tf.variable_scope('conditional_2'):
output, state = update(state, context)
elif not decoder.generate_first:
output, state = update(state, input_, context, input_symbol)
output_ = generate(output, input_, context)
argmax = lambda: tf.argmax(output_, 1)
target = lambda: inputs.read(time + 1)
softmax = lambda: tf.squeeze(tf.multinomial(tf.log(tf.nn.softmax(output_)), num_samples=1),
axis=1)
use_target = tf.logical_and(time < time_steps - 1, tf.random_uniform([]) >= feed_previous)
predicted_symbol = tf.case([
(use_target, target),
(tf.logical_not(feed_argmax), softmax)],
default=argmax) # default case is useful for beam-search
predicted_symbol.set_shape([None])
predicted_symbol = tf.stop_gradient(predicted_symbol)
samples = samples.write(time, predicted_symbol)
input_ = embed(predicted_symbol)
pos = update_pos(pos, predicted_symbol, encoder_input_length[align_encoder_id])
attns = attns.write(time, context)
weights = weights.write(time, new_weights)
states = states.write(time, state)
outputs = outputs.write(time, output_)
if not decoder.conditional_rnn and not decoder.update_first and decoder.generate_first:
output, state = update(state, input_, context, predicted_symbol)
return (time + 1, input_, predicted_symbol, pos, state, output, outputs, states, weights, attns, new_weights,
samples)
with tf.variable_scope('decoder_{}'.format(decoder.name)):
_, _, _, new_pos, new_state, _, outputs, states, weights, attns, new_weights, samples = tf.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, initial_input, initial_symbol, initial_pos, initial_state, initial_output, outputs,
weights, states, attns, initial_weights, samples),
parallel_iterations=decoder.parallel_iterations,
swap_memory=decoder.swap_memory)
outputs = outputs.stack()
weights = weights.stack() # batch_size, encoders, output time, input time
states = states.stack()
attns = attns.stack()
samples = samples.stack()
# put batch_size as first dimension
outputs = tf.transpose(outputs, perm=(1, 0, 2))
weights = tf.transpose(weights, perm=(1, 0, 2))
states = tf.transpose(states, perm=(1, 0, 2))
attns = tf.transpose(attns, perm=(1, 0, 2))
samples = tf.transpose(samples)
return outputs, weights, states, attns, samples, get_logits, initial_data
def encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous, align_encoder_id=0,
encoder_input_length=None, feed_argmax=True, **kwargs):
decoder = decoders[0]
targets = targets[0] # single decoder
if encoder_input_length is None:
encoder_input_length = []
for encoder_inputs_ in encoder_inputs:
weights = get_weights(encoder_inputs_, utils.EOS_ID, include_first_eos=True)
encoder_input_length.append(tf.to_int32(tf.reduce_sum(weights, axis=1)))
parameters = dict(encoders=encoders, decoder=decoder, encoder_inputs=encoder_inputs,
feed_argmax=feed_argmax)
target_weights = get_weights(targets[:, 1:], utils.EOS_ID, include_first_eos=True)
attention_states, encoder_state, encoder_input_length = multi_encoder(
encoder_input_length=encoder_input_length, **parameters)
outputs, attention_weights, _, _, samples, beam_fun, initial_data = attention_decoder(
attention_states=attention_states, initial_state=encoder_state, feed_previous=feed_previous,
decoder_inputs=targets[:, :-1], align_encoder_id=align_encoder_id, encoder_input_length=encoder_input_length,
**parameters
)
xent_loss = sequence_loss(logits=outputs, targets=targets[:, 1:], weights=target_weights)
losses = xent_loss
return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data
def chained_encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous,
chaining_strategy=None, align_encoder_id=0, chaining_non_linearity=False,
chaining_loss_ratio=1.0, chaining_stop_gradient=False, **kwargs):
decoder = decoders[0]
targets = targets[0] # single decoder
assert len(encoders) == 2
encoder_input_length = []
input_weights = []
for encoder_inputs_ in encoder_inputs:
weights = get_weights(encoder_inputs_, utils.EOS_ID, include_first_eos=True)
input_weights.append(weights)
encoder_input_length.append(tf.to_int32(tf.reduce_sum(weights, axis=1)))
target_weights = get_weights(targets[:, 1:], utils.EOS_ID, include_first_eos=True)
parameters = dict(encoders=encoders[1:], decoder=encoders[0])
attention_states, encoder_state, encoder_input_length[1:] = multi_encoder(
encoder_inputs[1:], encoder_input_length=encoder_input_length[1:], **parameters)
decoder_inputs = encoder_inputs[0][:, :-1]
batch_size = tf.shape(decoder_inputs)[0]
pad = tf.ones(shape=tf.stack([batch_size, 1]), dtype=tf.int32) * utils.BOS_ID
decoder_inputs = tf.concat([pad, decoder_inputs], axis=1)
outputs, _, states, attns, _, _, _ = attention_decoder(
attention_states=attention_states, initial_state=encoder_state, decoder_inputs=decoder_inputs,
encoder_input_length=encoder_input_length[1:], **parameters
)
chaining_loss = sequence_loss(logits=outputs, targets=encoder_inputs[0], weights=input_weights[0])
if decoder.cell_type.lower() == 'lstm':
size = states.get_shape()[2].value
decoder_outputs = states[:, :, size // 2:]
else:
decoder_outputs = states
if chaining_strategy == 'share_states':
other_inputs = states
elif chaining_strategy == 'share_outputs':
other_inputs = decoder_outputs
else:
other_inputs = None
if other_inputs is not None and chaining_stop_gradient:
other_inputs = tf.stop_gradient(other_inputs)
parameters = dict(encoders=encoders[:1], decoder=decoder, encoder_inputs=encoder_inputs[:1],
other_inputs=other_inputs)
attention_states, encoder_state, encoder_input_length[:1] = multi_encoder(
encoder_input_length=encoder_input_length[:1], **parameters)
if chaining_stop_gradient:
attns = tf.stop_gradient(attns)
states = tf.stop_gradient(states)
decoder_outputs = tf.stop_gradient(decoder_outputs)
if chaining_strategy == 'concat_attns':
attention_states[0] = tf.concat([attention_states[0], attns], axis=2)
elif chaining_strategy == 'concat_states':
attention_states[0] = tf.concat([attention_states[0], states], axis=2)
elif chaining_strategy == 'sum_attns':
attention_states[0] += attns
elif chaining_strategy in ('map_attns', 'map_states', 'map_outputs'):
if chaining_strategy == 'map_attns':
x = attns
elif chaining_strategy == 'map_outputs':
x = decoder_outputs
else:
x = states
shape = [x.get_shape()[-1], attention_states[0].get_shape()[-1]]
w = tf.get_variable("map_attns/matrix", shape=shape)
b = tf.get_variable("map_attns/bias", shape=shape[-1:])
x = tf.einsum('ijk,kl->ijl', x, w) + b
if chaining_non_linearity:
x = tf.nn.tanh(x)
attention_states[0] += x
outputs, attention_weights, _, _, samples, beam_fun, initial_data = attention_decoder(
attention_states=attention_states, initial_state=encoder_state,
feed_previous=feed_previous, decoder_inputs=targets[:,:-1],
align_encoder_id=align_encoder_id, encoder_input_length=encoder_input_length[:1],
**parameters
)
xent_loss = sequence_loss(logits=outputs, targets=targets[:, 1:],
weights=target_weights)
if chaining_loss is not None and chaining_loss_ratio:
xent_loss += chaining_loss_ratio * chaining_loss
losses = [xent_loss, None, None]
return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data
def softmax(logits, dim=-1, mask=None):
e = tf.exp(logits)
if mask is not None:
e *= mask
return e / tf.clip_by_value(tf.reduce_sum(e, axis=dim, keep_dims=True), 10e-37, 10e+37)
def sequence_loss(logits, targets, weights, average_across_timesteps=False, average_across_batch=True):
batch_size = tf.shape(targets)[0]
time_steps = tf.shape(targets)[1]
logits_ = tf.reshape(logits, tf.stack([time_steps * batch_size, logits.get_shape()[2].value]))
targets_ = tf.reshape(targets, tf.stack([time_steps * batch_size]))
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_, labels=targets_)
crossent = tf.reshape(crossent, tf.stack([batch_size, time_steps]))
log_perp = tf.reduce_sum(crossent * weights, axis=1)
if average_across_timesteps:
total_size = tf.reduce_sum(weights, axis=1)
total_size += 1e-12 # just to avoid division by 0 for all-0 weights
log_perp /= total_size
cost = tf.reduce_sum(log_perp)
if average_across_batch:
return cost / tf.to_float(batch_size)
else:
return cost
def get_weights(sequence, eos_id, include_first_eos=True):
cumsum = tf.cumsum(tf.to_float(tf.not_equal(sequence, eos_id)), axis=1)
range_ = tf.range(start=1, limit=tf.shape(sequence)[1] + 1)
range_ = tf.tile(tf.expand_dims(range_, axis=0), [tf.shape(sequence)[0], 1])
weights = tf.to_float(tf.equal(cumsum, tf.to_float(range_)))
if include_first_eos:
weights = weights[:,:-1]
shape = [tf.shape(weights)[0], 1]
weights = tf.concat([tf.ones(tf.stack(shape)), weights], axis=1)
return tf.stop_gradient(weights)
| [
"tensorflow.truediv",
"tensorflow.equal",
"tensorflow.shape",
"tensorflow.get_variable",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"tensorflow.split",
"tensorflow.tanh",
"tensorflow.multiply",
"math.sqrt",
"tensorflow.logical_not",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits... | [((12334, 12362), 'tensorflow.concat', 'tf.concat', (['encoder_states', '(1)'], {}), '(encoder_states, 1)\n', (12343, 12362), True, 'import tensorflow as tf\n'), ((13854, 13879), 'tensorflow.expand_dims', 'tf.expand_dims', (['hidden', '(2)'], {}), '(hidden, 2)\n', (13868, 13879), True, 'import tensorflow as tf\n'), ((14269, 14326), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['prev_weights', 'filter_', '[1, 1, 1, 1]', '"""SAME"""'], {}), "(prev_weights, filter_, [1, 1, 1, 1], 'SAME')\n", (14281, 14326), True, 'import tensorflow as tf\n'), ((14412, 14435), 'tensorflow.reshape', 'tf.reshape', (['conv', 'shape'], {}), '(conv, shape)\n', (14422, 14435), True, 'import tensorflow as tf\n'), ((14444, 14462), 'tensorflow.matmul', 'tf.matmul', (['conv', 'u'], {}), '(conv, u)\n', (14453, 14462), True, 'import tensorflow as tf\n'), ((14601, 14637), 'tensorflow.reshape', 'tf.reshape', (['y', '[-1, 1, 1, attn_size]'], {}), '(y, [-1, 1, 1, attn_size])\n', (14611, 14637), True, 'import tensorflow as tf\n'), ((14842, 14862), 'tensorflow.matmul', 'tf.matmul', (['hidden', 'k'], {}), '(hidden, k)\n', (14851, 14862), True, 'import tensorflow as tf\n'), ((17322, 17342), 'tensorflow.to_float', 'tf.to_float', (['weights'], {}), '(weights)\n', (17333, 17342), True, 'import tensorflow as tf\n'), ((25159, 25183), 'tensorflow.shape', 'tf.shape', (['decoder_inputs'], {}), '(decoder_inputs)\n', (25167, 25183), True, 'import tensorflow as tf\n'), ((33809, 33852), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32', 'name': '"""time"""'}), "(0, dtype=tf.int32, name='time')\n", (33820, 33852), True, 'import tensorflow as tf\n'), ((33867, 33916), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'time_steps'}), '(dtype=tf.float32, size=time_steps)\n', (33881, 33916), True, 'import tensorflow as tf\n'), ((33931, 33978), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.int64', 'size': 'time_steps'}), '(dtype=tf.int64, size=time_steps)\n', (33945, 33978), True, 'import tensorflow as tf\n'), ((34105, 34154), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'time_steps'}), '(dtype=tf.float32, size=time_steps)\n', (34119, 34154), True, 'import tensorflow as tf\n'), ((34169, 34218), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'time_steps'}), '(dtype=tf.float32, size=time_steps)\n', (34183, 34218), True, 'import tensorflow as tf\n'), ((34231, 34280), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'time_steps'}), '(dtype=tf.float32, size=time_steps)\n', (34245, 34280), True, 'import tensorflow as tf\n'), ((34401, 34435), 'tensorflow.zeros', 'tf.zeros', (['[batch_size]', 'tf.float32'], {}), '([batch_size], tf.float32)\n', (34409, 34435), True, 'import tensorflow as tf\n'), ((39711, 39748), 'tensorflow.transpose', 'tf.transpose', (['outputs'], {'perm': '(1, 0, 2)'}), '(outputs, perm=(1, 0, 2))\n', (39723, 39748), True, 'import tensorflow as tf\n'), ((39763, 39800), 'tensorflow.transpose', 'tf.transpose', (['weights'], {'perm': '(1, 0, 2)'}), '(weights, perm=(1, 0, 2))\n', (39775, 39800), True, 'import tensorflow as tf\n'), ((39814, 39850), 'tensorflow.transpose', 'tf.transpose', (['states'], {'perm': '(1, 0, 2)'}), '(states, perm=(1, 0, 2))\n', (39826, 39850), True, 'import tensorflow as tf\n'), ((39863, 39898), 'tensorflow.transpose', 'tf.transpose', (['attns'], {'perm': '(1, 0, 2)'}), '(attns, perm=(1, 0, 2))\n', (39875, 39898), True, 'import tensorflow as tf\n'), ((39913, 39934), 'tensorflow.transpose', 'tf.transpose', (['samples'], {}), '(samples)\n', (39925, 39934), True, 'import tensorflow as tf\n'), ((42699, 42739), 'tensorflow.concat', 'tf.concat', (['[pad, decoder_inputs]'], {'axis': '(1)'}), '([pad, decoder_inputs], axis=1)\n', (42708, 42739), True, 'import tensorflow as tf\n'), ((45776, 45790), 'tensorflow.exp', 'tf.exp', (['logits'], {}), '(logits)\n', (45782, 45790), True, 'import tensorflow as tf\n'), ((46297, 46376), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'logits_', 'labels': 'targets_'}), '(logits=logits_, labels=targets_)\n', (46343, 46376), True, 'import tensorflow as tf\n'), ((46466, 46507), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(crossent * weights)'], {'axis': '(1)'}), '(crossent * weights, axis=1)\n', (46479, 46507), True, 'import tensorflow as tf\n'), ((46714, 46737), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['log_perp'], {}), '(log_perp)\n', (46727, 46737), True, 'import tensorflow as tf\n'), ((47378, 47403), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['weights'], {}), '(weights)\n', (47394, 47403), True, 'import tensorflow as tf\n'), ((1444, 1509), 'tensorflow.split', 'tf.split', ([], {'value': 'state', 'num_or_size_splits': 'self.num_splits', 'axis': '(1)'}), '(value=state, num_or_size_splits=self.num_splits, axis=1)\n', (1452, 1509), True, 'import tensorflow as tf\n'), ((12723, 12800), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['state'], {'keep_prob': 'attn_keep_prob', 'noise_shape': 'state_noise_shape'}), '(state, keep_prob=attn_keep_prob, noise_shape=state_noise_shape)\n', (12736, 12800), True, 'import tensorflow as tf\n'), ((12906, 12985), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['hidden'], {'keep_prob': 'attn_keep_prob', 'noise_shape': 'hidden_noise_shape'}), '(hidden, keep_prob=attn_keep_prob, noise_shape=hidden_noise_shape)\n', (12919, 12985), True, 'import tensorflow as tf\n'), ((13163, 13201), 'tensorflow.einsum', 'tf.einsum', (['"""ijk,ik->ij"""', 'hidden', 'state'], {}), "('ijk,ik->ij', hidden, state)\n", (13172, 13201), True, 'import tensorflow as tf\n'), ((13297, 13322), 'tensorflow.expand_dims', 'tf.expand_dims', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (13311, 13322), True, 'import tensorflow as tf\n'), ((13898, 13914), 'tensorflow.shape', 'tf.shape', (['hidden'], {}), '(hidden)\n', (13906, 13914), True, 'import tensorflow as tf\n'), ((13935, 13951), 'tensorflow.shape', 'tf.shape', (['hidden'], {}), '(hidden)\n', (13943, 13951), True, 'import tensorflow as tf\n'), ((14216, 14256), 'tensorflow.stack', 'tf.stack', (['[batch_size, time_steps, 1, 1]'], {}), '([batch_size, time_steps, 1, 1])\n', (14224, 14256), True, 'import tensorflow as tf\n'), ((14485, 14533), 'tensorflow.stack', 'tf.stack', (['[batch_size, time_steps, 1, attn_size]'], {}), '([batch_size, time_steps, 1, attn_size])\n', (14493, 14533), True, 'import tensorflow as tf\n'), ((14885, 14933), 'tensorflow.stack', 'tf.stack', (['[batch_size, time_steps, 1, attn_size]'], {}), '([batch_size, time_steps, 1, attn_size])\n', (14893, 14933), True, 'import tensorflow as tf\n'), ((15973, 16013), 'tensorflow.reduce_max', 'tf.reduce_max', (['e'], {'axis': '(1)', 'keep_dims': '(True)'}), '(e, axis=1, keep_dims=True)\n', (15986, 16013), True, 'import tensorflow as tf\n'), ((16480, 16495), 'tensorflow.shape', 'tf.shape', (['state'], {}), '(state)\n', (16488, 16495), True, 'import tensorflow as tf\n'), ((16843, 16887), 'tensorflow.expand_dims', 'tf.expand_dims', (['encoder_input_length'], {'axis': '(1)'}), '(encoder_input_length, axis=1)\n', (16857, 16887), True, 'import tensorflow as tf\n'), ((16988, 17005), 'tensorflow.to_float', 'tf.to_float', (['mask'], {}), '(mask)\n', (16999, 17005), True, 'import tensorflow as tf\n'), ((17634, 17649), 'tensorflow.shape', 'tf.shape', (['state'], {}), '(state)\n', (17642, 17649), True, 'import tensorflow as tf\n'), ((17671, 17694), 'tensorflow.shape', 'tf.shape', (['hidden_states'], {}), '(hidden_states)\n', (17679, 17694), True, 'import tensorflow as tf\n'), ((17767, 17802), 'tensorflow.concat', 'tf.concat', (['[state, context]'], {'axis': '(1)'}), '([state, context], axis=1)\n', (17776, 17802), True, 'import tensorflow as tf\n'), ((21985, 22023), 'beam_search.resize_like', 'beam_search.resize_like', (['hidden', 'state'], {}), '(hidden, state)\n', (22008, 22023), False, 'import utils, beam_search\n'), ((22047, 22091), 'beam_search.resize_like', 'beam_search.resize_like', (['input_length', 'state'], {}), '(input_length, state)\n', (22070, 22091), False, 'import utils, beam_search\n'), ((22601, 22625), 'tensorflow.concat', 'tf.concat', (['attns'], {'axis': '(1)'}), '(attns, axis=1)\n', (22610, 22625), True, 'import tensorflow as tf\n'), ((25003, 25020), 'tensorflow.device', 'tf.device', (['device'], {}), '(device)\n', (25012, 25020), True, 'import tensorflow as tf\n'), ((25420, 25461), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'input_'], {}), '(embedding, input_)\n', (25442, 25461), True, 'import tensorflow as tf\n'), ((29849, 29880), 'tensorflow.equal', 'tf.equal', (['symbol', 'utils.KEEP_ID'], {}), '(symbol, utils.KEEP_ID)\n', (29857, 29880), True, 'import tensorflow as tf\n'), ((29898, 29928), 'tensorflow.equal', 'tf.equal', (['symbol', 'utils.DEL_ID'], {}), '(symbol, utils.DEL_ID)\n', (29906, 29928), True, 'import tensorflow as tf\n'), ((29950, 29980), 'tensorflow.logical_or', 'tf.logical_or', (['is_keep', 'is_del'], {}), '(is_keep, is_del)\n', (29963, 29980), True, 'import tensorflow as tf\n'), ((29996, 30032), 'beam_search.resize_like', 'beam_search.resize_like', (['pos', 'symbol'], {}), '(pos, symbol)\n', (30019, 30032), False, 'import utils, beam_search\n'), ((30051, 30091), 'beam_search.resize_like', 'beam_search.resize_like', (['max_pos', 'symbol'], {}), '(max_pos, symbol)\n', (30074, 30091), False, 'import utils, beam_search\n'), ((30108, 30131), 'tensorflow.to_float', 'tf.to_float', (['is_not_ins'], {}), '(is_not_ins)\n', (30119, 30131), True, 'import tensorflow as tf\n'), ((30581, 30616), 'tensorflow.concat', 'tf.concat', (['projection_input'], {'axis': '(1)'}), '(projection_input, axis=1)\n', (30590, 30616), True, 'import tensorflow as tf\n'), ((32985, 33056), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['initial_state'], {'keep_prob': 'decoder.initial_state_keep_prob'}), '(initial_state, keep_prob=decoder.initial_state_keep_prob)\n', (32998, 33056), True, 'import tensorflow as tf\n'), ((33067, 33096), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope_name'], {}), '(scope_name)\n', (33084, 33096), True, 'import tensorflow as tf\n'), ((38188, 38222), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['predicted_symbol'], {}), '(predicted_symbol)\n', (38204, 38222), True, 'import tensorflow as tf\n'), ((39072, 39394), 'tensorflow.while_loop', 'tf.while_loop', ([], {'cond': '(lambda time, *_: time < time_steps)', 'body': '_time_step', 'loop_vars': '(time, initial_input, initial_symbol, initial_pos, initial_state,\n initial_output, outputs, weights, states, attns, initial_weights, samples)', 'parallel_iterations': 'decoder.parallel_iterations', 'swap_memory': 'decoder.swap_memory'}), '(cond=lambda time, *_: time < time_steps, body=_time_step,\n loop_vars=(time, initial_input, initial_symbol, initial_pos,\n initial_state, initial_output, outputs, weights, states, attns,\n initial_weights, samples), parallel_iterations=decoder.\n parallel_iterations, swap_memory=decoder.swap_memory)\n', (39085, 39394), True, 'import tensorflow as tf\n'), ((42567, 42591), 'tensorflow.shape', 'tf.shape', (['decoder_inputs'], {}), '(decoder_inputs)\n', (42575, 42591), True, 'import tensorflow as tf\n'), ((43547, 43577), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['other_inputs'], {}), '(other_inputs)\n', (43563, 43577), True, 'import tensorflow as tf\n'), ((43922, 43945), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['attns'], {}), '(attns)\n', (43938, 43945), True, 'import tensorflow as tf\n'), ((43963, 43987), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['states'], {}), '(states)\n', (43979, 43987), True, 'import tensorflow as tf\n'), ((44014, 44047), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['decoder_outputs'], {}), '(decoder_outputs)\n', (44030, 44047), True, 'import tensorflow as tf\n'), ((44123, 44170), 'tensorflow.concat', 'tf.concat', (['[attention_states[0], attns]'], {'axis': '(2)'}), '([attention_states[0], attns], axis=2)\n', (44132, 44170), True, 'import tensorflow as tf\n'), ((46050, 46067), 'tensorflow.shape', 'tf.shape', (['targets'], {}), '(targets)\n', (46058, 46067), True, 'import tensorflow as tf\n'), ((46088, 46105), 'tensorflow.shape', 'tf.shape', (['targets'], {}), '(targets)\n', (46096, 46105), True, 'import tensorflow as tf\n'), ((46244, 46279), 'tensorflow.stack', 'tf.stack', (['[time_steps * batch_size]'], {}), '([time_steps * batch_size])\n', (46252, 46279), True, 'import tensorflow as tf\n'), ((46413, 46447), 'tensorflow.stack', 'tf.stack', (['[batch_size, time_steps]'], {}), '([batch_size, time_steps])\n', (46421, 46447), True, 'import tensorflow as tf\n'), ((46563, 46593), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weights'], {'axis': '(1)'}), '(weights, axis=1)\n', (46576, 46593), True, 'import tensorflow as tf\n'), ((47066, 47096), 'tensorflow.expand_dims', 'tf.expand_dims', (['range_'], {'axis': '(0)'}), '(range_, axis=0)\n', (47080, 47096), True, 'import tensorflow as tf\n'), ((1597, 1620), 'tensorflow.concat', 'tf.concat', (['new_state', '(1)'], {}), '(new_state, 1)\n', (1606, 1620), True, 'import tensorflow as tf\n'), ((3404, 3421), 'tensorflow.device', 'tf.device', (['device'], {}), '(device)\n', (3413, 3421), True, 'import tensorflow as tf\n'), ((10578, 10643), 'tensorflow.gather_nd', 'tf.gather_nd', (['encoder_outputs_[:, :, :encoder.cell_size]', 'indices'], {}), '(encoder_outputs_[:, :, :encoder.cell_size], indices)\n', (10590, 10643), True, 'import tensorflow as tf\n'), ((13363, 13420), 'tensorflow.contrib.layers.layer_norm', 'tf.contrib.layers.layer_norm', (['y'], {'scope': '"""layer_norm_state"""'}), "(y, scope='layer_norm_state')\n", (13391, 13420), True, 'import tensorflow as tf\n'), ((13442, 13519), 'tensorflow.contrib.layers.layer_norm', 'tf.contrib.layers.layer_norm', (['hidden'], {'center': '(False)', 'scope': '"""layer_norm_hidden"""'}), "(hidden, center=False, scope='layer_norm_hidden')\n", (13470, 13519), True, 'import tensorflow as tf\n'), ((14349, 14384), 'tensorflow.multiply', 'tf.multiply', (['batch_size', 'time_steps'], {}), '(batch_size, time_steps)\n', (14360, 14384), True, 'import tensorflow as tf\n'), ((15022, 15032), 'tensorflow.tanh', 'tf.tanh', (['s'], {}), '(s)\n', (15029, 15032), True, 'import tensorflow as tf\n'), ((15305, 15340), 'tensorflow.concat', 'tf.concat', (['[state, context]'], {'axis': '(1)'}), '([state, context], axis=1)\n', (15314, 15340), True, 'import tensorflow as tf\n'), ((16180, 16193), 'tensorflow.exp', 'tf.exp', (['(e / T)'], {}), '(e / T)\n', (16186, 16193), True, 'import tensorflow as tf\n'), ((16225, 16268), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['exp'], {'axis': '(-1)', 'keep_dims': '(True)'}), '(exp, axis=-1, keep_dims=True)\n', (16238, 16268), True, 'import tensorflow as tf\n'), ((16537, 16562), 'tensorflow.stack', 'tf.stack', (['[batch_size, 0]'], {}), '([batch_size, 0])\n', (16545, 16562), True, 'import tensorflow as tf\n'), ((17069, 17100), 'tensorflow.expand_dims', 'tf.expand_dims', (['weights'], {'axis': '(2)'}), '(weights, axis=2)\n', (17083, 17100), True, 'import tensorflow as tf\n'), ((17280, 17303), 'tensorflow.shape', 'tf.shape', (['hidden_states'], {}), '(hidden_states)\n', (17288, 17303), True, 'import tensorflow as tf\n'), ((17397, 17428), 'tensorflow.expand_dims', 'tf.expand_dims', (['weights'], {'axis': '(2)'}), '(weights, axis=2)\n', (17411, 17428), True, 'import tensorflow as tf\n'), ((17966, 18010), 'tensorflow.expand_dims', 'tf.expand_dims', (['encoder_input_length'], {'axis': '(1)'}), '(encoder_input_length, axis=1)\n', (17980, 18010), True, 'import tensorflow as tf\n'), ((18059, 18083), 'tensorflow.reshape', 'tf.reshape', (['pos', '[-1, 1]'], {}), '(pos, [-1, 1])\n', (18069, 18083), True, 'import tensorflow as tf\n'), ((18102, 18143), 'tensorflow.minimum', 'tf.minimum', (['pos', '(encoder_input_length - 1)'], {}), '(pos, encoder_input_length - 1)\n', (18112, 18143), True, 'import tensorflow as tf\n'), ((19148, 19183), 'tensorflow.concat', 'tf.concat', (['weighted_average'], {'axis': '(1)'}), '(weighted_average, axis=1)\n', (19157, 19183), True, 'import tensorflow as tf\n'), ((22533, 22556), 'tensorflow.stack', 'tf.stack', (['attns'], {'axis': '(2)'}), '(attns, axis=2)\n', (22541, 22556), True, 'import tensorflow as tf\n'), ((24881, 24893), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (24890, 24893), False, 'import math\n'), ((25646, 25739), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['embedded_input'], {'keep_prob': 'decoder.word_keep_prob', 'noise_shape': 'noise_shape'}), '(embedded_input, keep_prob=decoder.word_keep_prob, noise_shape\n =noise_shape)\n', (25659, 25739), True, 'import tensorflow as tf\n'), ((25976, 26073), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['embedded_input'], {'keep_prob': 'decoder.embedding_keep_prob', 'noise_shape': 'noise_shape'}), '(embedded_input, keep_prob=decoder.embedding_keep_prob,\n noise_shape=noise_shape)\n', (25989, 26073), True, 'import tensorflow as tf\n'), ((27941, 27975), 'tensorflow.concat', 'tf.concat', (['[state, input_]'], {'axis': '(1)'}), '([state, input_], axis=1)\n', (27950, 27975), True, 'import tensorflow as tf\n'), ((28878, 28914), 'tensorflow.concat', 'tf.concat', (['[input_, context]'], {'axis': '(1)'}), '([input_, context], axis=1)\n', (28887, 28914), True, 'import tensorflow as tf\n'), ((28987, 29021), 'rnn.CellInitializer', 'CellInitializer', (['decoder.cell_size'], {}), '(decoder.cell_size)\n', (29002, 29021), False, 'from rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell\n'), ((29488, 29518), 'tensorflow.equal', 'tf.equal', (['symbol', 'utils.DEL_ID'], {}), '(symbol, utils.DEL_ID)\n', (29496, 29518), True, 'import tensorflow as tf\n'), ((29543, 29577), 'tensorflow.where', 'tf.where', (['is_del', 'state', 'new_state'], {}), '(is_del, state, new_state)\n', (29551, 29577), True, 'import tensorflow as tf\n'), ((33267, 33375), 'tensorflow.contrib.layers.layer_norm', 'tf.contrib.layers.layer_norm', (['initial_state'], {'activation_fn': 'tf.nn.tanh', 'scope': '"""initial_state_layer_norm"""'}), "(initial_state, activation_fn=tf.nn.tanh, scope\n ='initial_state_layer_norm')\n", (33295, 33375), True, 'import tensorflow as tf\n'), ((33992, 34039), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.int64', 'size': 'time_steps'}), '(dtype=tf.int64, size=time_steps)\n', (34006, 34039), True, 'import tensorflow as tf\n'), ((34060, 34088), 'tensorflow.transpose', 'tf.transpose', (['decoder_inputs'], {}), '(decoder_inputs)\n', (34072, 34088), True, 'import tensorflow as tf\n'), ((34467, 34511), 'tensorflow.shape', 'tf.shape', (['attention_states[align_encoder_id]'], {}), '(attention_states[align_encoder_id])\n', (34475, 34511), True, 'import tensorflow as tf\n'), ((34687, 34722), 'tensorflow.expand_dims', 'tf.expand_dims', (['initial_pos'], {'axis': '(1)'}), '(initial_pos, axis=1)\n', (34701, 34722), True, 'import tensorflow as tf\n'), ((35011, 35069), 'tensorflow.split', 'tf.split', (['state', '[state_size, context_size, 1, -1]'], {'axis': '(1)'}), '(state, [state_size, context_size, 1, -1], axis=1)\n', (35019, 35069), True, 'import tensorflow as tf\n'), ((35121, 35144), 'tensorflow.squeeze', 'tf.squeeze', (['pos'], {'axis': '(1)'}), '(pos, axis=1)\n', (35131, 35144), True, 'import tensorflow as tf\n'), ((36654, 36681), 'tensorflow.expand_dims', 'tf.expand_dims', (['pos'], {'axis': '(1)'}), '(pos, axis=1)\n', (36668, 36681), True, 'import tensorflow as tf\n'), ((36702, 36755), 'tensorflow.concat', 'tf.concat', (['[state, context, pos, new_weights]'], {'axis': '(1)'}), '([state, context, pos, new_weights], axis=1)\n', (36711, 36755), True, 'import tensorflow as tf\n'), ((37608, 37629), 'tensorflow.argmax', 'tf.argmax', (['output_', '(1)'], {}), '(output_, 1)\n', (37617, 37629), True, 'import tensorflow as tf\n'), ((44248, 44296), 'tensorflow.concat', 'tf.concat', (['[attention_states[0], states]'], {'axis': '(2)'}), '([attention_states[0], states], axis=2)\n', (44257, 44296), True, 'import tensorflow as tf\n'), ((45867, 45909), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['e'], {'axis': 'dim', 'keep_dims': '(True)'}), '(e, axis=dim, keep_dims=True)\n', (45880, 45909), True, 'import tensorflow as tf\n'), ((46790, 46813), 'tensorflow.to_float', 'tf.to_float', (['batch_size'], {}), '(batch_size)\n', (46801, 46813), True, 'import tensorflow as tf\n'), ((46940, 46970), 'tensorflow.not_equal', 'tf.not_equal', (['sequence', 'eos_id'], {}), '(sequence, eos_id)\n', (46952, 46970), True, 'import tensorflow as tf\n'), ((47169, 47188), 'tensorflow.to_float', 'tf.to_float', (['range_'], {}), '(range_)\n', (47180, 47188), True, 'import tensorflow as tf\n'), ((3266, 3278), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (3275, 3278), False, 'import math\n'), ((5393, 5418), 'tensorflow.shape', 'tf.shape', (['encoder_inputs_'], {}), '(encoder_inputs_)\n', (5401, 5418), True, 'import tensorflow as tf\n'), ((5447, 5472), 'tensorflow.shape', 'tf.shape', (['encoder_inputs_'], {}), '(encoder_inputs_)\n', (5455, 5472), True, 'import tensorflow as tf\n'), ((5642, 5688), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'flat_inputs'], {}), '(embedding, flat_inputs)\n', (5664, 5688), True, 'import tensorflow as tf\n'), ((5938, 5988), 'tensorflow.concat', 'tf.concat', (['[encoder_inputs_, other_inputs]'], {'axis': '(2)'}), '([encoder_inputs_, other_inputs], axis=2)\n', (5947, 5988), True, 'import tensorflow as tf\n'), ((6171, 6264), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['encoder_inputs_'], {'keep_prob': 'encoder.word_keep_prob', 'noise_shape': 'noise_shape'}), '(encoder_inputs_, keep_prob=encoder.word_keep_prob,\n noise_shape=noise_shape)\n', (6184, 6264), True, 'import tensorflow as tf\n'), ((6504, 6602), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['encoder_inputs_'], {'keep_prob': 'encoder.embedding_keep_prob', 'noise_shape': 'noise_shape'}), '(encoder_inputs_, keep_prob=encoder.embedding_keep_prob,\n noise_shape=noise_shape)\n', (6517, 6602), True, 'import tensorflow as tf\n'), ((10857, 10891), 'tensorflow.concat', 'tf.concat', (['encoder_states_'], {'axis': '(1)'}), '(encoder_states_, axis=1)\n', (10866, 10891), True, 'import tensorflow as tf\n'), ((13683, 13693), 'tensorflow.tanh', 'tf.tanh', (['s'], {}), '(s)\n', (13690, 13693), True, 'import tensorflow as tf\n'), ((14784, 14819), 'tensorflow.multiply', 'tf.multiply', (['batch_size', 'time_steps'], {}), '(batch_size, time_steps)\n', (14795, 14819), True, 'import tensorflow as tf\n'), ((16310, 16336), 'tensorflow.expand_dims', 'tf.expand_dims', (['weights', '(2)'], {}), '(weights, 2)\n', (16324, 16336), True, 'import tensorflow as tf\n'), ((16946, 16969), 'tensorflow.shape', 'tf.shape', (['hidden_states'], {}), '(hidden_states)\n', (16954, 16969), True, 'import tensorflow as tf\n'), ((18712, 18754), 'tensorflow.minimum', 'tf.minimum', (['pos_', '(encoder_input_length - 1)'], {}), '(pos_, encoder_input_length - 1)\n', (18722, 18754), True, 'import tensorflow as tf\n'), ((18778, 18797), 'tensorflow.maximum', 'tf.maximum', (['pos_', '(0)'], {}), '(pos_, 0)\n', (18788, 18797), True, 'import tensorflow as tf\n'), ((19802, 19838), 'tensorflow.floor', 'tf.floor', (['(encoder_input_length * pos)'], {}), '(encoder_input_length * pos)\n', (19810, 19838), True, 'import tensorflow as tf\n'), ((19857, 19881), 'tensorflow.reshape', 'tf.reshape', (['pos', '[-1, 1]'], {}), '(pos, [-1, 1])\n', (19867, 19881), True, 'import tensorflow as tf\n'), ((19900, 19941), 'tensorflow.minimum', 'tf.minimum', (['pos', '(encoder_input_length - 1)'], {}), '(pos, encoder_input_length - 1)\n', (19910, 19941), True, 'import tensorflow as tf\n'), ((20047, 20081), 'tensorflow.reshape', 'tf.reshape', (['idx', '[-1, attn_length]'], {}), '(idx, [-1, attn_length])\n', (20057, 20081), True, 'import tensorflow as tf\n'), ((20202, 20224), 'tensorflow.to_float', 'tf.to_float', (['(idx < low)'], {}), '(idx < low)\n', (20213, 20224), True, 'import tensorflow as tf\n'), ((20245, 20268), 'tensorflow.to_float', 'tf.to_float', (['(idx > high)'], {}), '(idx > high)\n', (20256, 20268), True, 'import tensorflow as tf\n'), ((20315, 20355), 'tensorflow.to_float', 'tf.to_float', (['(idx >= encoder_input_length)'], {}), '(idx >= encoder_input_length)\n', (20326, 20355), True, 'import tensorflow as tf\n'), ((20700, 20737), 'tensorflow.truediv', 'tf.truediv', (['numerator', '(2 * sigma ** 2)'], {}), '(numerator, 2 * sigma ** 2)\n', (20710, 20737), True, 'import tensorflow as tf\n'), ((20761, 20772), 'tensorflow.exp', 'tf.exp', (['div'], {}), '(div)\n', (20767, 20772), True, 'import tensorflow as tf\n'), ((24867, 24879), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (24876, 24879), False, 'import math\n'), ((25830, 25854), 'tensorflow.shape', 'tf.shape', (['embedded_input'], {}), '(embedded_input)\n', (25838, 25854), True, 'import tensorflow as tf\n'), ((27027, 27288), 'tensorflow.contrib.rnn.DropoutWrapper', 'DropoutWrapper', (['cell'], {'input_keep_prob': 'decoder.rnn_input_keep_prob', 'output_keep_prob': 'decoder.rnn_output_keep_prob', 'state_keep_prob': 'decoder.rnn_state_keep_prob', 'variational_recurrent': 'decoder.pervasive_dropout', 'dtype': 'tf.float32', 'input_size': 'input_size_'}), '(cell, input_keep_prob=decoder.rnn_input_keep_prob,\n output_keep_prob=decoder.rnn_output_keep_prob, state_keep_prob=decoder.\n rnn_state_keep_prob, variational_recurrent=decoder.pervasive_dropout,\n dtype=tf.float32, input_size=input_size_)\n', (27041, 27288), False, 'from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell\n'), ((27561, 27580), 'tensorflow.contrib.rnn.MultiRNNCell', 'MultiRNNCell', (['cells'], {}), '(cells)\n', (27573, 27580), False, 'from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell\n'), ((28329, 28358), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope_name'], {}), '(scope_name)\n', (28346, 28358), True, 'import tensorflow as tf\n'), ((29090, 29113), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (29111, 29113), True, 'import tensorflow as tf\n'), ((30198, 30218), 'tensorflow.to_float', 'tf.to_float', (['max_pos'], {}), '(max_pos)\n', (30209, 30218), True, 'import tensorflow as tf\n'), ((30894, 30989), 'tensorflow.contrib.layers.layer_norm', 'tf.contrib.layers.layer_norm', (['output_'], {'activation_fn': 'tf.nn.tanh', 'scope': '"""output_layer_norm"""'}), "(output_, activation_fn=tf.nn.tanh, scope=\n 'output_layer_norm')\n", (30922, 30989), True, 'import tensorflow as tf\n'), ((31302, 31394), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['output_'], {'keep_prob': 'decoder.deep_layer_keep_prob', 'noise_shape': 'noise_shape'}), '(output_, keep_prob=decoder.deep_layer_keep_prob, noise_shape=\n noise_shape)\n', (31315, 31394), True, 'import tensorflow as tf\n'), ((35171, 35188), 'tensorflow.equal', 'tf.equal', (['time', '(0)'], {}), '(time, 0)\n', (35179, 35188), True, 'import tensorflow as tf\n'), ((36986, 37020), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conditional_1"""'], {}), "('conditional_1')\n", (37003, 37020), True, 'import tensorflow as tf\n'), ((37324, 37358), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conditional_2"""'], {}), "('conditional_2')\n", (37341, 37358), True, 'import tensorflow as tf\n'), ((37882, 37903), 'tensorflow.random_uniform', 'tf.random_uniform', (['[]'], {}), '([])\n', (37899, 37903), True, 'import tensorflow as tf\n'), ((42145, 42175), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weights'], {'axis': '(1)'}), '(weights, axis=1)\n', (42158, 42175), True, 'import tensorflow as tf\n'), ((42620, 42645), 'tensorflow.stack', 'tf.stack', (['[batch_size, 1]'], {}), '([batch_size, 1])\n', (42628, 42645), True, 'import tensorflow as tf\n'), ((47099, 47117), 'tensorflow.shape', 'tf.shape', (['sequence'], {}), '(sequence)\n', (47107, 47117), True, 'import tensorflow as tf\n'), ((47268, 47285), 'tensorflow.shape', 'tf.shape', (['weights'], {}), '(weights)\n', (47276, 47285), True, 'import tensorflow as tf\n'), ((3252, 3264), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (3261, 3264), False, 'import math\n'), ((4875, 5135), 'tensorflow.contrib.rnn.DropoutWrapper', 'DropoutWrapper', (['cell'], {'input_keep_prob': 'encoder.rnn_input_keep_prob', 'output_keep_prob': 'encoder.rnn_output_keep_prob', 'state_keep_prob': 'encoder.rnn_state_keep_prob', 'variational_recurrent': 'encoder.pervasive_dropout', 'dtype': 'tf.float32', 'input_size': 'input_size'}), '(cell, input_keep_prob=encoder.rnn_input_keep_prob,\n output_keep_prob=encoder.rnn_output_keep_prob, state_keep_prob=encoder.\n rnn_state_keep_prob, variational_recurrent=encoder.pervasive_dropout,\n dtype=tf.float32, input_size=input_size)\n', (4889, 5135), False, 'from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell\n'), ((6333, 6358), 'tensorflow.shape', 'tf.shape', (['encoder_inputs_'], {}), '(encoder_inputs_)\n', (6341, 6358), True, 'import tensorflow as tf\n'), ((9079, 9113), 'rnn.CellInitializer', 'CellInitializer', (['encoder.cell_size'], {}), '(encoder.cell_size)\n', (9094, 9113), False, 'from rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell\n'), ((10493, 10513), 'tensorflow.range', 'tf.range', (['batch_size'], {}), '(batch_size)\n', (10501, 10513), True, 'import tensorflow as tf\n'), ((11085, 11113), 'tensorflow.expand_dims', 'tf.expand_dims', (['mask'], {'axis': '(2)'}), '(mask, axis=2)\n', (11099, 11113), True, 'import tensorflow as tf\n'), ((12656, 12671), 'tensorflow.shape', 'tf.shape', (['state'], {}), '(state)\n', (12664, 12671), True, 'import tensorflow as tf\n'), ((12837, 12853), 'tensorflow.shape', 'tf.shape', (['hidden'], {}), '(hidden)\n', (12845, 12853), True, 'import tensorflow as tf\n'), ((16075, 16098), 'tensorflow.shape', 'tf.shape', (['hidden_states'], {}), '(hidden_states)\n', (16083, 16098), True, 'import tensorflow as tf\n'), ((16606, 16629), 'tensorflow.shape', 'tf.shape', (['hidden_states'], {}), '(hidden_states)\n', (16614, 16629), True, 'import tensorflow as tf\n'), ((20005, 20027), 'tensorflow.stack', 'tf.stack', (['[batch_size]'], {}), '([batch_size])\n', (20013, 20027), True, 'import tensorflow as tf\n'), ((20388, 20404), 'tensorflow.equal', 'tf.equal', (['m', '(0.0)'], {}), '(m, 0.0)\n', (20396, 20404), True, 'import tensorflow as tf\n'), ((26411, 26456), 'tensorflow.contrib.rnn.BasicLSTMCell', 'BasicLSTMCell', (['decoder.cell_size'], {'reuse': 'reuse'}), '(decoder.cell_size, reuse=reuse)\n', (26424, 26456), False, 'from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell\n'), ((26541, 26742), 'rnn.DropoutGRUCell', 'DropoutGRUCell', (['decoder.cell_size'], {'reuse': 'reuse', 'layer_norm': 'decoder.layer_norm', 'input_size': 'input_size_', 'input_keep_prob': 'decoder.rnn_input_keep_prob', 'state_keep_prob': 'decoder.rnn_state_keep_prob'}), '(decoder.cell_size, reuse=reuse, layer_norm=decoder.\n layer_norm, input_size=input_size_, input_keep_prob=decoder.\n rnn_input_keep_prob, state_keep_prob=decoder.rnn_state_keep_prob)\n', (26555, 26742), False, 'from rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell\n'), ((26850, 26920), 'rnn.GRUCell', 'GRUCell', (['decoder.cell_size'], {'reuse': 'reuse', 'layer_norm': 'decoder.layer_norm'}), '(decoder.cell_size, reuse=reuse, layer_norm=decoder.layer_norm)\n', (26857, 26920), False, 'from rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell\n'), ((31176, 31193), 'tensorflow.shape', 'tf.shape', (['output_'], {}), '(output_)\n', (31184, 31193), True, 'import tensorflow as tf\n'), ((31894, 31921), 'tensorflow.squeeze', 'tf.squeeze', (['output_'], {'axis': '(2)'}), '(output_, axis=2)\n', (31904, 31921), True, 'import tensorflow as tf\n'), ((32653, 32676), 'tensorflow.transpose', 'tf.transpose', (['embedding'], {}), '(embedding)\n', (32665, 32676), True, 'import tensorflow as tf\n'), ((35768, 35802), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conditional_1"""'], {}), "('conditional_1')\n", (35785, 35802), True, 'import tensorflow as tf\n'), ((36371, 36405), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conditional_2"""'], {}), "('conditional_2')\n", (36388, 36405), True, 'import tensorflow as tf\n'), ((38006, 38033), 'tensorflow.logical_not', 'tf.logical_not', (['feed_argmax'], {}), '(feed_argmax)\n', (38020, 38033), True, 'import tensorflow as tf\n'), ((40521, 40551), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weights'], {'axis': '(1)'}), '(weights, axis=1)\n', (40534, 40551), True, 'import tensorflow as tf\n'), ((44723, 44771), 'tensorflow.get_variable', 'tf.get_variable', (['"""map_attns/matrix"""'], {'shape': 'shape'}), "('map_attns/matrix', shape=shape)\n", (44738, 44771), True, 'import tensorflow as tf\n'), ((44784, 44835), 'tensorflow.get_variable', 'tf.get_variable', (['"""map_attns/bias"""'], {'shape': 'shape[-1:]'}), "('map_attns/bias', shape=shape[-1:])\n", (44799, 44835), True, 'import tensorflow as tf\n'), ((47018, 47036), 'tensorflow.shape', 'tf.shape', (['sequence'], {}), '(sequence)\n', (47026, 47036), True, 'import tensorflow as tf\n'), ((47330, 47345), 'tensorflow.stack', 'tf.stack', (['shape'], {}), '(shape)\n', (47338, 47345), True, 'import tensorflow as tf\n'), ((4228, 4273), 'tensorflow.contrib.rnn.BasicLSTMCell', 'BasicLSTMCell', (['encoder.cell_size'], {'reuse': 'reuse'}), '(encoder.cell_size, reuse=reuse)\n', (4241, 4273), False, 'from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell\n'), ((4366, 4566), 'rnn.DropoutGRUCell', 'DropoutGRUCell', (['encoder.cell_size'], {'reuse': 'reuse', 'layer_norm': 'encoder.layer_norm', 'input_size': 'input_size', 'input_keep_prob': 'encoder.rnn_input_keep_prob', 'state_keep_prob': 'encoder.rnn_state_keep_prob'}), '(encoder.cell_size, reuse=reuse, layer_norm=encoder.\n layer_norm, input_size=input_size, input_keep_prob=encoder.\n rnn_input_keep_prob, state_keep_prob=encoder.rnn_state_keep_prob)\n', (4380, 4566), False, 'from rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell\n'), ((4690, 4760), 'rnn.GRUCell', 'GRUCell', (['encoder.cell_size'], {'reuse': 'reuse', 'layer_norm': 'encoder.layer_norm'}), '(encoder.cell_size, reuse=reuse, layer_norm=encoder.layer_norm)\n', (4697, 4760), False, 'from rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell\n'), ((5574, 5609), 'tensorflow.multiply', 'tf.multiply', (['batch_size', 'time_steps'], {}), '(batch_size, time_steps)\n', (5585, 5609), True, 'import tensorflow as tf\n'), ((7263, 7334), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['encoder_inputs_'], {'keep_prob': 'encoder.input_layer_keep_prob'}), '(encoder_inputs_, keep_prob=encoder.input_layer_keep_prob)\n', (7276, 7334), True, 'import tensorflow as tf\n'), ((8196, 8233), 'tensorflow.expand_dims', 'tf.expand_dims', (['initial_state'], {'axis': '(0)'}), '(initial_state, axis=0)\n', (8210, 8233), True, 'import tensorflow as tf\n'), ((9190, 9213), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (9211, 9213), True, 'import tensorflow as tf\n'), ((11147, 11193), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(mask * encoder_outputs_)'], {'axis': '(1)'}), '(mask * encoder_outputs_, axis=1)\n', (11160, 11193), True, 'import tensorflow as tf\n'), ((11196, 11223), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask'], {'axis': '(1)'}), '(mask, axis=1)\n', (11209, 11223), True, 'import tensorflow as tf\n'), ((11423, 11451), 'tensorflow.expand_dims', 'tf.expand_dims', (['mask'], {'axis': '(2)'}), '(mask, axis=2)\n', (11437, 11451), True, 'import tensorflow as tf\n'), ((18482, 18505), 'tensorflow.squeeze', 'tf.squeeze', (['pos'], {'axis': '(1)'}), '(pos, axis=1)\n', (18492, 18505), True, 'import tensorflow as tf\n'), ((18999, 19031), 'tensorflow.expand_dims', 'tf.expand_dims', (['weights_'], {'axis': '(2)'}), '(weights_, axis=2)\n', (19013, 19031), True, 'import tensorflow as tf\n'), ((19436, 19467), 'tensorflow.expand_dims', 'tf.expand_dims', (['weights'], {'axis': '(2)'}), '(weights, axis=2)\n', (19450, 19467), True, 'import tensorflow as tf\n'), ((19981, 20002), 'tensorflow.range', 'tf.range', (['attn_length'], {}), '(attn_length)\n', (19989, 20002), True, 'import tensorflow as tf\n'), ((20639, 20680), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(2)'], {'dtype': 'tf.float32'}), '(2, dtype=tf.float32)\n', (20659, 20680), True, 'import tensorflow as tf\n'), ((21008, 21039), 'tensorflow.expand_dims', 'tf.expand_dims', (['weights'], {'axis': '(2)'}), '(weights, axis=2)\n', (21022, 21039), True, 'import tensorflow as tf\n'), ((31723, 31754), 'tensorflow.expand_dims', 'tf.expand_dims', (['output_'], {'axis': '(2)'}), '(output_, axis=2)\n', (31737, 31754), True, 'import tensorflow as tf\n'), ((37736, 37758), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['output_'], {}), '(output_)\n', (37749, 37758), True, 'import tensorflow as tf\n'), ((44849, 44879), 'tensorflow.einsum', 'tf.einsum', (['"""ijk,kl->ijl"""', 'x', 'w'], {}), "('ijk,kl->ijl', x, w)\n", (44858, 44879), True, 'import tensorflow as tf\n'), ((44935, 44948), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['x'], {}), '(x)\n', (44945, 44948), True, 'import tensorflow as tf\n'), ((607, 630), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (628, 630), True, 'import tensorflow as tf\n'), ((8139, 8159), 'tensorflow.zeros', 'tf.zeros', (['state_size'], {}), '(state_size)\n', (8147, 8159), True, 'import tensorflow as tf\n'), ((11485, 11530), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(mask * encoder_inputs_)'], {'axis': '(1)'}), '(mask * encoder_inputs_, axis=1)\n', (11498, 11530), True, 'import tensorflow as tf\n'), ((11533, 11560), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask'], {'axis': '(1)'}), '(mask, axis=1)\n', (11546, 11560), True, 'import tensorflow as tf\n'), ((11665, 11713), 'tensorflow.concat', 'tf.concat', (['[last_forward, last_backward]'], {'axis': '(1)'}), '([last_forward, last_backward], axis=1)\n', (11674, 11713), True, 'import tensorflow as tf\n'), ((18902, 18926), 'tensorflow.squeeze', 'tf.squeeze', (['pos_'], {'axis': '(1)'}), '(pos_, axis=1)\n', (18912, 18926), True, 'import tensorflow as tf\n'), ((19345, 19368), 'tensorflow.squeeze', 'tf.squeeze', (['pos'], {'axis': '(1)'}), '(pos, axis=1)\n', (19355, 19368), True, 'import tensorflow as tf\n'), ((19756, 19776), 'tensorflow.matmul', 'tf.matmul', (['state', 'wp'], {}), '(state, wp)\n', (19765, 19776), True, 'import tensorflow as tf\n'), ((31986, 32033), 'tensorflow.split', 'tf.split', (['output_'], {'num_or_size_splits': '(2)', 'axis': '(1)'}), '(output_, num_or_size_splits=2, axis=1)\n', (31994, 32033), True, 'import tensorflow as tf\n'), ((36047, 36064), 'tensorflow.equal', 'tf.equal', (['time', '(0)'], {}), '(time, 0)\n', (36055, 36064), True, 'import tensorflow as tf\n'), ((11013, 11039), 'tensorflow.shape', 'tf.shape', (['encoder_outputs_'], {}), '(encoder_outputs_)\n', (11021, 11039), True, 'import tensorflow as tf\n'), ((11352, 11377), 'tensorflow.shape', 'tf.shape', (['encoder_inputs_'], {}), '(encoder_inputs_)\n', (11360, 11377), True, 'import tensorflow as tf\n')] |
from functools import lru_cache
class Solution:
def shoppingOffers(self, price, special, needs):
n = len(price)
# 过滤不需要计算的大礼包,只保留需要计算的大礼包
filter_special = []
for sp in special:
if sum(sp[i] for i in range(n)) > 0 and sum(sp[i] * price[i] for i in range(n)) > sp[-1]:
filter_special.append(sp)
# 记忆化搜索计算满足购物清单所需花费的最低价格
@lru_cache(None)
def dfs(cur_needs):
# 不购买任何大礼包,原价购买购物清单中的所有物品
min_price = sum(need * price[i] for i, need in enumerate(cur_needs))
for cur_special in filter_special:
special_price = cur_special[-1]
nxt_needs = []
for i in range(n):
if cur_special[i] > cur_needs[i]: # 不能购买超出购物清单指定数量的物品
break
nxt_needs.append(cur_needs[i] - cur_special[i])
if len(nxt_needs) == n: # 大礼包可以购买
min_price = min(min_price, dfs(tuple(nxt_needs)) + special_price)
return min_price
return dfs(tuple(needs))
# 作者:LeetCode-Solution
# 链接:https://leetcode-cn.com/problems/shopping-offers/solution/da-li-bao-by-leetcode-solution-p1ww/
# 来源:力扣(LeetCode)
# 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。 | [
"functools.lru_cache"
] | [((402, 417), 'functools.lru_cache', 'lru_cache', (['None'], {}), '(None)\n', (411, 417), False, 'from functools import lru_cache\n')] |
"""Custom client handling, including CSVStream base class."""
import csv
import os
from typing import Iterable, List, Optional
from singer_sdk import typing as th
from singer_sdk.streams import Stream
class CSVStream(Stream):
"""Stream class for CSV streams."""
file_paths: List[str] = []
def __init__(self, *args, **kwargs):
"""Init CSVStram."""
# cache file_config so we dont need to go iterating the config list again later
self.file_config = kwargs.pop("file_config")
super().__init__(*args, **kwargs)
def get_records(self, context: Optional[dict]) -> Iterable[dict]:
"""Return a generator of row-type dictionary objects.
The optional `context` argument is used to identify a specific slice of the
stream if partitioning is required for the stream. Most implementations do not
require partitioning and should ignore the `context` argument.
"""
for file_path in self.get_file_paths():
headers: List[str] = []
for row in self.get_rows(file_path):
if not headers:
headers = row
continue
yield dict(zip(headers, row))
def get_file_paths(self) -> list:
"""Return a list of file paths to read.
This tap accepts file names and directories so it will detect
directories and iterate files inside.
"""
# Cache file paths so we dont have to iterate multiple times
if self.file_paths:
return self.file_paths
file_path = self.file_config["path"]
if not os.path.exists(file_path):
raise Exception(f"File path does not exist {file_path}")
file_paths = []
if os.path.isdir(file_path):
clean_file_path = os.path.normpath(file_path) + os.sep
for filename in os.listdir(clean_file_path):
file_path = clean_file_path + filename
if self.is_valid_filename(file_path):
file_paths.append(file_path)
else:
if self.is_valid_filename(file_path):
file_paths.append(file_path)
if not file_paths:
raise Exception(
f"Stream '{self.name}' has no acceptable files. \
See warning for more detail."
)
self.file_paths = file_paths
return file_paths
def is_valid_filename(self, file_path: str) -> bool:
"""Return a boolean of whether the file includes CSV extension."""
is_valid = True
if file_path[-4:] != ".csv":
is_valid = False
self.logger.warning(f"Skipping non-csv file '{file_path}'")
self.logger.warning(
"Please provide a CSV file that ends with '.csv'; e.g. 'users.csv'"
)
return is_valid
def get_rows(self, file_path: str) -> Iterable[list]:
"""Return a generator of the rows in a particular CSV file."""
with open(file_path, "r") as f:
reader = csv.reader(f)
for row in reader:
yield row
@property
def schema(self) -> dict:
"""Return dictionary of record schema.
Dynamically detect the json schema for the stream.
This is evaluated prior to any records being retrieved.
"""
properties: List[th.Property] = []
self.primary_keys = self.file_config.get("keys", [])
for file_path in self.get_file_paths():
for header in self.get_rows(file_path):
break
break
for column in header:
# Set all types to string
# TODO: Try to be smarter about inferring types.
properties.append(th.Property(column, th.StringType()))
return th.PropertiesList(*properties).to_dict()
| [
"os.path.exists",
"os.listdir",
"os.path.normpath",
"singer_sdk.typing.PropertiesList",
"os.path.isdir",
"singer_sdk.typing.StringType",
"csv.reader"
] | [((1759, 1783), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (1772, 1783), False, 'import os\n'), ((1627, 1652), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (1641, 1652), False, 'import os\n'), ((1880, 1907), 'os.listdir', 'os.listdir', (['clean_file_path'], {}), '(clean_file_path)\n', (1890, 1907), False, 'import os\n'), ((3067, 3080), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (3077, 3080), False, 'import csv\n'), ((1815, 1842), 'os.path.normpath', 'os.path.normpath', (['file_path'], {}), '(file_path)\n', (1831, 1842), False, 'import os\n'), ((3824, 3854), 'singer_sdk.typing.PropertiesList', 'th.PropertiesList', (['*properties'], {}), '(*properties)\n', (3841, 3854), True, 'from singer_sdk import typing as th\n'), ((3791, 3806), 'singer_sdk.typing.StringType', 'th.StringType', ([], {}), '()\n', (3804, 3806), True, 'from singer_sdk import typing as th\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, List, Optional
from ax.exceptions.storage import SQADecodeError
from ax.utils.common.base import Base, SortableBase
def is_foreign_key_field(field: str) -> bool:
"""Return true if field name is a foreign key field, i.e. ends in `_id`."""
return len(field) > 3 and field[-3:] == "_id"
def copy_db_ids(source: Any, target: Any, path: Optional[List[str]] = None) -> None:
"""Takes as input two objects, `source` and `target`, that should be identical,
except that `source` has _db_ids set and `target` doesn't. Recursively copies the
_db_ids from `source` to `target`.
Raise a SQADecodeError when the assumption of equality on `source` and `target`
is violated, since this method is meant to be used when returning a new
user-facing object after saving.
"""
if not path:
path = []
error_message_prefix = (
f"Error encountered while traversing source {path + [str(source)]} and "
f"target {path + [str(target)]}: "
)
if len(path) > 10:
# this shouldn't happen, but is a precaution against accidentally
# introducing infinite loops
return
if type(source) != type(target):
raise SQADecodeError(
error_message_prefix + "Encountered two objects of different "
f"types: {type(source)} and {type(target)}."
)
if isinstance(source, Base):
for attr, val in source.__dict__.items():
if attr.endswith("_db_id"):
# we're at a "leaf" node; copy the db_id and return
setattr(target, attr, val)
continue
# skip over _experiment to prevent infinite loops,
# and ignore doubly private attributes
if attr == "_experiment" or attr.startswith("__"):
continue
copy_db_ids(val, getattr(target, attr), path + [attr])
elif isinstance(source, (list, set)):
source = list(source)
target = list(target)
if len(source) != len(target):
raise SQADecodeError(
error_message_prefix + "Encountered lists of different lengths."
)
# Safe to skip over lists of types (e.g. transforms)
if len(source) == 0 or isinstance(source[0], type):
return
if isinstance(source[0], Base) and not isinstance(source[0], SortableBase):
raise SQADecodeError(
error_message_prefix + f"Cannot sort instances of {type(source[0])}; "
"sorting is only defined on instances of SortableBase."
)
source = sorted(source)
target = sorted(target)
for index, x in enumerate(source):
copy_db_ids(x, target[index], path + [str(index)])
elif isinstance(source, dict):
for k, v in source.items():
if k not in target:
raise SQADecodeError(
error_message_prefix + "Encountered key only present "
f"in source dictionary: {k}."
)
copy_db_ids(v, target[k], path + [k])
else:
return
| [
"ax.exceptions.storage.SQADecodeError"
] | [((2269, 2354), 'ax.exceptions.storage.SQADecodeError', 'SQADecodeError', (["(error_message_prefix + 'Encountered lists of different lengths.')"], {}), "(error_message_prefix + 'Encountered lists of different lengths.'\n )\n", (2283, 2354), False, 'from ax.exceptions.storage import SQADecodeError\n'), ((3110, 3211), 'ax.exceptions.storage.SQADecodeError', 'SQADecodeError', (["(error_message_prefix +\n f'Encountered key only present in source dictionary: {k}.')"], {}), "(error_message_prefix +\n f'Encountered key only present in source dictionary: {k}.')\n", (3124, 3211), False, 'from ax.exceptions.storage import SQADecodeError\n')] |
from cluster import *
import optparse
import sys
##########################################
## Options and defaults
##########################################
def getOptions():
parser = optparse.OptionParser('python *.py [option]')
parser.add_option('--sdf', dest='input', help='intput sdf file', default='')
parser.add_option('--fp', dest='fp', help='fingerprint type: tp,mc,mo (Topological Fingerprints, MACCS Keys, Morgan Fingerprints), default is mc', default='mc')
parser.add_option('--radius', dest='radius', help=' the radius of the Morgan fingerprint, default is 2',type='int', default=2)
parser.add_option('--algorithm', dest='algorithm', help='cluster algorithm :b,m (Butina, Murtagh), default is b', default='b')
parser.add_option('--cutoff', dest='cutoff', help='distThresh(0-1),elements within this range of each other are considered to be neighbors, needed for Butina cluster algorithm, default is 0.5', type='float', default=0.5)
parser.add_option('--nclusts', dest='nclusts', help='number of clusters, needed for Murtagh cluster algorithm, default is 1',type='int', default=1)
parser.add_option('--murtype', dest='Murtype', help='Method for Murtagh:Wards, SLINK, CLINK, UPGMA, needed when Murtagh is set as algorithm, default is Wards', default='Wards')
parser.add_option('--out', dest='output', help='output sdf file', default='')
options, args = parser.parse_args()
if options.input == '' or options.output == '':
parser.print_help()
print("No input or output is provided")
sys.exit(1)
return options
def main():
options = getOptions()
fpOpdict = {'tp': 'Topological Fingerprints', 'mc': 'MACCS Keys', 'mo': 'Morgan Fingerprints'}
algOpdict = {'b': 'Butina', 'm': 'Murtagh'}
options.algorithm = algOpdict[options.algorithm]
print("fingerprint type: %s" % fpOpdict[options.fp])
if options.fp == 'mo':
print("radius: %s" % str(options.radius))
print("cluster algorithm: %s" % options.algorithm)
if options.algorithm == "Murtagh":
print("Murtagh method: %s" % options.Murtype)
print("Murtagh cluster number set: %s" % options.nclusts)
elif options.algorithm == "Butina":
print("cutoff(distThresh) : %s" % options.cutoff)
print('sdf reading...')
sdfparse = ChemParse(options.input)
sdfparse.sdf_reader()
print('fingerprint calculating...')
sdfparse.get_fps(options.fp, options.radius)
print('clustering...')
fpCluster = Fingerprint_Cluster(sdfparse.fps)
fpCluster.distance_matrix()
fpCluster.cluster_dict(options.algorithm, options.cutoff, options.Murtype, options.nclusts)
print('done, output to %s' % options.output)
sdfparse.clusterOutput(options.output, fpCluster.cdict)
for c in fpCluster.clustdict:
print("cluster%s: %s" % (str(c), str(fpCluster.clustdict[c])))
if __name__ == "__main__":
main()
| [
"optparse.OptionParser",
"sys.exit"
] | [((193, 238), 'optparse.OptionParser', 'optparse.OptionParser', (['"""python *.py [option]"""'], {}), "('python *.py [option]')\n", (214, 238), False, 'import optparse\n'), ((1568, 1579), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1576, 1579), False, 'import sys\n')] |
from knotHash import knotHash
from AOCClasses import Position, SolidPosition, solid, empty
result = 0
input = "jzgqcdpd"
# input = "flqrgnkx"
input += "-"
hashlist = []
for i in range(128):
hash_i = knotHash(input + str(i))
n = int(hash_i, 16)
bin_n = f"{n:0128b}"
hashlist.append(bin_n)
def isSolid(p):
x = p.x
y = p.y
return hashlist[y][x] == "0"
maze = SolidPosition(0,0, xmin=0, ymin=0, xmax=127, ymax=127, solid=isSolid)
visited = {}
for i in range(128):
for j in range(128):
p = Position(i, j)
m = maze + p
if m not in visited and not m.isSolid():
result += 1
queue = [m]
while len(queue) > 0:
n = queue.pop()
visited[n] = 1
for k in n.gridAdj():
if k not in visited:
queue.append(k)
with open("output2.txt", "w") as output:
output.write(str(result))
print(str(result))
| [
"AOCClasses.Position",
"AOCClasses.SolidPosition"
] | [((390, 460), 'AOCClasses.SolidPosition', 'SolidPosition', (['(0)', '(0)'], {'xmin': '(0)', 'ymin': '(0)', 'xmax': '(127)', 'ymax': '(127)', 'solid': 'isSolid'}), '(0, 0, xmin=0, ymin=0, xmax=127, ymax=127, solid=isSolid)\n', (403, 460), False, 'from AOCClasses import Position, SolidPosition, solid, empty\n'), ((532, 546), 'AOCClasses.Position', 'Position', (['i', 'j'], {}), '(i, j)\n', (540, 546), False, 'from AOCClasses import Position, SolidPosition, solid, empty\n')] |
import math, time, os, argparse, logging, json
from wand.image import Image
parser = argparse.ArgumentParser(
prog='tile_cutter',
description='Cuts large images into tiles.')
parser.add_argument('--tile-size', metavar='SIZE', type=int, default=512,
help='Tile size (width and height)')
parser.add_argument('-v', '--verbose', action='store_true',
help='Log debugging information')
parser.add_argument('image', type=argparse.FileType('rb'),
help='Source image')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
layers = []
tile_size = args.tile_size
logging.info("tile size: %dx%d", tile_size, tile_size)
with Image(file=args.image) as source:
logging.info("image size: %dx%d", source.width, source.height)
# every zoom level has 2x more tiles
max_zoom = math.ceil(math.log(max(source.size) / args.tile_size, 2))
logging.info("zoom levels: 1-%d", max_zoom)
image_size = args.tile_size * (2 ** max_zoom)
offset_x, offset_y = tuple((image_size - orig) // 2 for orig in source.size)
logging.info("tiled size: %dx%d-%d-%d", image_size, image_size, offset_x, offset_y)
layers.append({
"name": "???",
"URL": os.path.basename(args.image.name),
"width": source.width,
"height": source.height,
"tileSize": args.tile_size,
"imageSize": image_size
})
square_source = Image(width=image_size, height=image_size)
square_source.composite(source,
(square_source.width - source.width) // 2,
(square_source.height - source.height) // 2)
for z in range(1, max_zoom + 1):
source_size = int(args.tile_size * (2 ** (max_zoom - z)))
logging.info("zoom level %d: source %dx%d", z, source_size, source_size)
current_image = 0
total_images = (image_size // source_size) ** 2
start_time = last_report_time = time.clock()
for y in range(0, image_size // source_size):
for x in range(0, image_size // source_size):
crop_x, crop_y = x * source_size, y * source_size
path = "%s-tiles/%d/%d/%d.png" % (args.image.name, z, x, y)
logging.debug("tile %s: source %dx%d%+d%+d",
path, source_size, source_size, crop_x, crop_y)
with square_source.clone() as tile:
tile.crop(crop_x, crop_y, width=source_size, height=source_size)
tile.resize(tile_size, tile_size)
os.makedirs(os.path.dirname(path), exist_ok=True)
tile.save(filename=path)
current_image += 1
if time.clock() - last_report_time > 1:
last_report_time = time.clock()
eta = (last_report_time - start_time) / current_image * \
(total_images - current_image)
logging.info("completion: %.2f%% (ETA: %dh%dm%ds)",
current_image / total_images * 100,
eta // 3600, (eta % 3600) // 60, eta % 60)
with open("%s.json" % args.image.name, "w") as descr:
descr.write(json.dumps({
"name": "???",
"scale": None,
"layers": layers
}))
logging.info("image description written to: %s" % descr.name)
logging.info("done")
| [
"logging.basicConfig",
"argparse.FileType",
"logging.debug",
"time.clock",
"argparse.ArgumentParser",
"wand.image.Image",
"json.dumps",
"os.path.dirname",
"os.path.basename",
"logging.info"
] | [((86, 179), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""tile_cutter"""', 'description': '"""Cuts large images into tiles."""'}), "(prog='tile_cutter', description=\n 'Cuts large images into tiles.')\n", (109, 179), False, 'import math, time, os, argparse, logging, json\n'), ((710, 764), 'logging.info', 'logging.info', (['"""tile size: %dx%d"""', 'tile_size', 'tile_size'], {}), "('tile size: %dx%d', tile_size, tile_size)\n", (722, 764), False, 'import math, time, os, argparse, logging, json\n'), ((3342, 3362), 'logging.info', 'logging.info', (['"""done"""'], {}), "('done')\n", (3354, 3362), False, 'import math, time, os, argparse, logging, json\n'), ((578, 618), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (597, 618), False, 'import math, time, os, argparse, logging, json\n'), ((629, 668), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (648, 668), False, 'import math, time, os, argparse, logging, json\n'), ((771, 793), 'wand.image.Image', 'Image', ([], {'file': 'args.image'}), '(file=args.image)\n', (776, 793), False, 'from wand.image import Image\n'), ((809, 871), 'logging.info', 'logging.info', (['"""image size: %dx%d"""', 'source.width', 'source.height'], {}), "('image size: %dx%d', source.width, source.height)\n", (821, 871), False, 'import math, time, os, argparse, logging, json\n'), ((991, 1034), 'logging.info', 'logging.info', (['"""zoom levels: 1-%d"""', 'max_zoom'], {}), "('zoom levels: 1-%d', max_zoom)\n", (1003, 1034), False, 'import math, time, os, argparse, logging, json\n'), ((1171, 1258), 'logging.info', 'logging.info', (['"""tiled size: %dx%d-%d-%d"""', 'image_size', 'image_size', 'offset_x', 'offset_y'], {}), "('tiled size: %dx%d-%d-%d', image_size, image_size, offset_x,\n offset_y)\n", (1183, 1258), False, 'import math, time, os, argparse, logging, json\n'), ((1509, 1551), 'wand.image.Image', 'Image', ([], {'width': 'image_size', 'height': 'image_size'}), '(width=image_size, height=image_size)\n', (1514, 1551), False, 'from wand.image import Image\n'), ((1792, 1864), 'logging.info', 'logging.info', (['"""zoom level %d: source %dx%d"""', 'z', 'source_size', 'source_size'], {}), "('zoom level %d: source %dx%d', z, source_size, source_size)\n", (1804, 1864), False, 'import math, time, os, argparse, logging, json\n'), ((1976, 1988), 'time.clock', 'time.clock', ([], {}), '()\n', (1986, 1988), False, 'import math, time, os, argparse, logging, json\n'), ((3279, 3340), 'logging.info', 'logging.info', (["('image description written to: %s' % descr.name)"], {}), "('image description written to: %s' % descr.name)\n", (3291, 3340), False, 'import math, time, os, argparse, logging, json\n'), ((463, 486), 'argparse.FileType', 'argparse.FileType', (['"""rb"""'], {}), "('rb')\n", (480, 486), False, 'import math, time, os, argparse, logging, json\n'), ((3183, 3243), 'json.dumps', 'json.dumps', (["{'name': '???', 'scale': None, 'layers': layers}"], {}), "({'name': '???', 'scale': None, 'layers': layers})\n", (3193, 3243), False, 'import math, time, os, argparse, logging, json\n'), ((1314, 1347), 'os.path.basename', 'os.path.basename', (['args.image.name'], {}), '(args.image.name)\n', (1330, 1347), False, 'import math, time, os, argparse, logging, json\n'), ((2240, 2336), 'logging.debug', 'logging.debug', (['"""tile %s: source %dx%d%+d%+d"""', 'path', 'source_size', 'source_size', 'crop_x', 'crop_y'], {}), "('tile %s: source %dx%d%+d%+d', path, source_size, source_size,\n crop_x, crop_y)\n", (2253, 2336), False, 'import math, time, os, argparse, logging, json\n'), ((2765, 2777), 'time.clock', 'time.clock', ([], {}), '()\n', (2775, 2777), False, 'import math, time, os, argparse, logging, json\n'), ((2923, 3055), 'logging.info', 'logging.info', (['"""completion: %.2f%% (ETA: %dh%dm%ds)"""', '(current_image / total_images * 100)', '(eta // 3600)', '(eta % 3600 // 60)', '(eta % 60)'], {}), "('completion: %.2f%% (ETA: %dh%dm%ds)', current_image /\n total_images * 100, eta // 3600, eta % 3600 // 60, eta % 60)\n", (2935, 3055), False, 'import math, time, os, argparse, logging, json\n'), ((2567, 2588), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (2582, 2588), False, 'import math, time, os, argparse, logging, json\n'), ((2693, 2705), 'time.clock', 'time.clock', ([], {}), '()\n', (2703, 2705), False, 'import math, time, os, argparse, logging, json\n')] |
# Copyright (c) 2013, Homzhub and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import today,getdate
def execute(filters=None):
columns=get_columns()
data=get_data(filters)
return columns, data
def get_columns():
return [
{
"label":"Owner",
"fieldtype": "Link",
"fieldname": "prop_owner",
"options": "Customer",
"width": 100
},
{
"label":"Owner Name",
"fieldtype": "Data",
"fieldname": "owner_name",
"width": 140
},
{
"label":"Tenant",
"fieldtype": "Link",
"fieldname": "tenant",
"options": "Customer",
"width": 100
},
{
"label":"Tenant Name",
"fieldtype": "Data",
"fieldname": "tenant_name",
"width": 140
},
{
"label":"Expected Start",
"fieldtype": "Data",
"fieldname": "expected_start_date",
"width": 100
},
{
"label":"Expected End",
"fieldtype": "Data",
"fieldname": "expected_end_date",
"width": 100
},
{
"label":"Project",
"fieldtype": "Link",
"fieldname": "name",
"options":"Project",
"width": 100
},
{
"label":"Property Address",
"fieldtype": "Link",
"fieldname": "property_address",
"options":"Address",
"width": 160
},
{
"label":"Item Code",
"fieldtype": "Link",
"fieldname": "item_code",
"options":"Item",
"width": 100
},
{
"label":"End Date",
"fieldtype": "Data",
"fieldname": "end_date",
"width": 100
},
{
"label":"Due Days",
"fieldtype": "Data",
"fieldname": "due_days",
"width": 100
}
]
def get_data(filters):
condition="Where pr.status='Open'"
item=''
due_days=''
if filters.get('item_code'):
item=filters.get('item_code')
filters.pop('item_code')
if filters.get('due_days'):
due_days=filters.get('due_days')
filters.pop('due_days')
for i,d in enumerate(filters):
row=d
if filters.get('owner'):
row="ol.prop_owner"
if filters.get('tenant'):
row="tl.tenant"
condition+=" And {0}='{1}'".format(row,filters.get(d))
project_list=frappe.db.sql("""SELECT
pr.name,
pr.expected_start_date,
pr.expected_end_date,
pr.property_address,
ol.prop_owner,
ol.owner_name,
tl.tenant,
tl.tenant_name
FROM `tabProject` pr
Left Join `tabOwner List` ol ON ol.parent=pr.name
Left Join `tabTenant List` tl ON tl.parent=pr.name
{0}""".format(condition),as_dict=True)
data=[]
for row in project_list:
subscription=frappe.db.sql("""SELECT
sl.subscription_plan,
sub.current_invoice_end,
sp.item
From `tabSubscription List` sl
Left Join `tabSubscription Plan` sp ON sp.name=sl.subscription_plan
Left Join `tabSubscription` sub ON sl.subscription=sub.name
WHERE sl.parent='{0}'""".format(row.get('name')),as_dict=True)
if len(subscription):
for sub in subscription:
row.update({
'item_code':sub.get('item'),
'end_date':sub.get('current_invoice_end').strftime("%d-%m-%Y"),
'due_days':(sub.get('current_invoice_end')-getdate(today())).days
})
else:
for ip in frappe.get_all('Invoice Item In Project',{'parent':row.get('name')},['item']):
row.update({'item_code':ip.get('item')})
data.append(row)
data1=data
if item:
data1=[]
for d in data:
if d.get('item_code')==item:
data1.append(d)
data=data1
if due_days:
data=[]
for d in data1:
if str(d.get('due_days'))==due_days:
data.append(d)
return data
| [
"frappe.utils.today"
] | [((3087, 3094), 'frappe.utils.today', 'today', ([], {}), '()\n', (3092, 3094), False, 'from frappe.utils import today, getdate\n')] |
import matplotlib.pyplot as plt
from time import time
import numpy as np
from .plotter_utils import figure_ratio, xarray_set_axes_labels, retrieve_or_create_fig_ax
# Change the bands (RGB) here if you want other false color combinations
def rgb(dataset, at_index=0, x_coord='longitude', y_coord='latitude',
bands=['red', 'green', 'blue'], paint_on_mask = [],
min_possible=0, max_possible=10000, use_data_min=False,
use_data_max=False, min_inten=0.15, max_inten=1.0,
width=10, fig=None, ax=None, imshow_kwargs=None):
"""
Creates a figure showing an area, using three specified bands as the rgb componenets.
Parameters
----------
dataset: xarray.Dataset
A Dataset containing at least latitude and longitude coordinates and optionally time.
The coordinate order should be time, latitude, and finally longitude.
Must contain the data variables specified in the `bands` parameter.
at_index: int
The time index to show.
x_coord, y_coord, time_coord: str
Names of DataArrays in `dataset_in` to use as x, y, and time coordinates.
bands: list-like
A list-like containing 3 names of data variables in `dataset` to use as the red, green, and blue
bands, respectively.
min_possible, max_possible: int
The minimum and maximum valid values for the selected bands according to
the platform used to retrieve the data in `dataset`.
For example, for Landsat these are generally 0 and 10000, respectively.
use_data_min: bool
Whether to use `min_possible` or the minimum among all selected bands
as the band value which has a minimal intensity.
use_data_max: bool
Whether to use `max_possible` or the maximum among all selected bands
as the band value which has a maximal intensity.
min_inten, max_inten: float
The min and max intensities for any band. These can be in range [0,1].
These can be used to brighten or darken the image.
width: int
The width of the figure in inches.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If only `fig` is supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
imshow_kwargs: dict
The dictionary of keyword arguments passed to `ax.imshow()`.
You can pass a colormap here with the key 'cmap'.
Returns
-------
fig, ax: matplotlib.figure.Figure, matplotlib.axes.Axes
The figure and axes used for the plot.
"""
imshow_kwargs = {} if imshow_kwargs is None else imshow_kwargs
### < Dataset to RGB Format, needs float values between 0-1
rgb = np.stack([dataset[bands[0]],
dataset[bands[1]],
dataset[bands[2]]], axis = -1)
# Interpolate values to be in the range [0,1] for creating the image.
min_rgb = np.nanmin(rgb) if use_data_min else min_possible
max_rgb = np.nanmax(rgb) if use_data_max else max_possible
rgb = np.interp(rgb, (min_rgb, max_rgb), [min_inten,max_inten])
rgb = rgb.astype(float)
### >
### < takes a T/F mask, apply a color to T areas
for mask, color in paint_on_mask:
rgb[mask] = np.array(color)/ 255.0
### >
fig, ax = retrieve_or_create_fig_ax(fig, ax, figsize=figure_ratio(rgb.shape[:2], fixed_width = width))
xarray_set_axes_labels(dataset, ax, x_coord, y_coord)
if 'time' in dataset.dims:
ax.imshow(rgb[at_index], **imshow_kwargs)
else:
ax.imshow(rgb, **imshow_kwargs)
return fig, ax | [
"numpy.stack",
"numpy.array",
"numpy.nanmax",
"numpy.interp",
"numpy.nanmin"
] | [((2732, 2808), 'numpy.stack', 'np.stack', (['[dataset[bands[0]], dataset[bands[1]], dataset[bands[2]]]'], {'axis': '(-1)'}), '([dataset[bands[0]], dataset[bands[1]], dataset[bands[2]]], axis=-1)\n', (2740, 2808), True, 'import numpy as np\n'), ((3061, 3119), 'numpy.interp', 'np.interp', (['rgb', '(min_rgb, max_rgb)', '[min_inten, max_inten]'], {}), '(rgb, (min_rgb, max_rgb), [min_inten, max_inten])\n', (3070, 3119), True, 'import numpy as np\n'), ((2939, 2953), 'numpy.nanmin', 'np.nanmin', (['rgb'], {}), '(rgb)\n', (2948, 2953), True, 'import numpy as np\n'), ((3002, 3016), 'numpy.nanmax', 'np.nanmax', (['rgb'], {}), '(rgb)\n', (3011, 3016), True, 'import numpy as np\n'), ((3284, 3299), 'numpy.array', 'np.array', (['color'], {}), '(color)\n', (3292, 3299), True, 'import numpy as np\n')] |
import logging
import os
import numpy as np
import xml.etree.ElementTree as ET
from PIL import Image
from paths import DATASETS_ROOT
log = logging.getLogger()
VOC_CATS = ['__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor']
class VOCLoader():
def __init__(self, year, split, segmentation=False, augmented_seg=False):
assert year in ['07', '12']
self.dataset = 'voc'
self.year = year
self.root = os.path.join(DATASETS_ROOT, 'VOCdevkit/VOC20%s/' % year)
self.split = split
assert split in ['train', 'val', 'trainval', 'test']
cats = VOC_CATS
self.cats_to_ids = dict(map(reversed, enumerate(cats)))
self.ids_to_cats = dict(enumerate(cats))
self.num_classes = len(cats)
self.categories = cats[1:]
self.segmentation = segmentation
self.augmented_seg = augmented_seg
assert not self.segmentation or self.segmentation and self.year == '12'
if self.augmented_seg:
filelist = 'ImageSets/SegmentationAug/%s.txt'
elif self.segmentation:
filelist = 'ImageSets/Segmentation/%s.txt'
else:
filelist = 'ImageSets/Main/%s.txt'
with open(os.path.join(self.root, filelist % self.split), 'r') as f:
self.filenames = f.read().split('\n')[:-1]
log.info("Created a loader VOC%s %s with %i images" % (year, split, len(self.filenames)))
def load_image(self, name):
im = Image.open('%sJPEGImages/%s.jpg' % (self.root, name)).convert('RGB')
im = np.array(im) / 255.0
im = im.astype(np.float32)
return im
def get_filenames(self):
return self.filenames
def read_annotations(self, name):
bboxes = []
cats = []
tree = ET.parse('%sAnnotations/%s.xml' % (self.root, name))
root = tree.getroot()
width = int(root.find('size/width').text)
height = int(root.find('size/height').text)
difficulty = []
for obj in root.findall('object'):
cat = self.cats_to_ids[obj.find('name').text]
difficult = (int(obj.find('difficult').text) != 0)
difficulty.append(difficult)
cats.append(cat)
bbox_tag = obj.find('bndbox')
x = int(bbox_tag.find('xmin').text)
y = int(bbox_tag.find('ymin').text)
w = int(bbox_tag.find('xmax').text)-x
h = int(bbox_tag.find('ymax').text)-y
bboxes.append((x, y, w, h))
gt_cats = np.array(cats)
gt_bboxes = np.array(bboxes).reshape((len(bboxes), 4))
difficulty = np.array(difficulty)
seg_gt = self.read_segmentations(name, height, width)
output = gt_bboxes, seg_gt, gt_cats, width, height, difficulty
return output
def read_segmentations(self, name, height, width):
if self.segmentation:
try:
seg_folder = self.root + 'SegmentationClass/'
seg_file = seg_folder + name + '.png'
seg_map = Image.open(seg_file)
except:
assert self.augmented_seg
seg_folder = self.root + 'SegmentationClassAug/'
seg_file = seg_folder + name + '.png'
seg_map = Image.open(seg_file)
segmentation = np.array(seg_map, dtype=np.uint8)
else:
# if there is no segmentation for a particular image we fill the mask
# with zeros to keep the same amount of tensors but don't learn from it
segmentation = np.zeros([height, width], dtype=np.uint8) + 255
return segmentation
| [
"logging.getLogger",
"PIL.Image.open",
"xml.etree.ElementTree.parse",
"os.path.join",
"numpy.array",
"numpy.zeros"
] | [((143, 162), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (160, 162), False, 'import logging\n'), ((634, 690), 'os.path.join', 'os.path.join', (['DATASETS_ROOT', "('VOCdevkit/VOC20%s/' % year)"], {}), "(DATASETS_ROOT, 'VOCdevkit/VOC20%s/' % year)\n", (646, 690), False, 'import os\n'), ((1978, 2030), 'xml.etree.ElementTree.parse', 'ET.parse', (["('%sAnnotations/%s.xml' % (self.root, name))"], {}), "('%sAnnotations/%s.xml' % (self.root, name))\n", (1986, 2030), True, 'import xml.etree.ElementTree as ET\n'), ((2718, 2732), 'numpy.array', 'np.array', (['cats'], {}), '(cats)\n', (2726, 2732), True, 'import numpy as np\n'), ((2817, 2837), 'numpy.array', 'np.array', (['difficulty'], {}), '(difficulty)\n', (2825, 2837), True, 'import numpy as np\n'), ((1751, 1763), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (1759, 1763), True, 'import numpy as np\n'), ((3516, 3549), 'numpy.array', 'np.array', (['seg_map'], {'dtype': 'np.uint8'}), '(seg_map, dtype=np.uint8)\n', (3524, 3549), True, 'import numpy as np\n'), ((1411, 1457), 'os.path.join', 'os.path.join', (['self.root', '(filelist % self.split)'], {}), '(self.root, filelist % self.split)\n', (1423, 1457), False, 'import os\n'), ((1669, 1722), 'PIL.Image.open', 'Image.open', (["('%sJPEGImages/%s.jpg' % (self.root, name))"], {}), "('%sJPEGImages/%s.jpg' % (self.root, name))\n", (1679, 1722), False, 'from PIL import Image\n'), ((2753, 2769), 'numpy.array', 'np.array', (['bboxes'], {}), '(bboxes)\n', (2761, 2769), True, 'import numpy as np\n'), ((3240, 3260), 'PIL.Image.open', 'Image.open', (['seg_file'], {}), '(seg_file)\n', (3250, 3260), False, 'from PIL import Image\n'), ((3757, 3798), 'numpy.zeros', 'np.zeros', (['[height, width]'], {'dtype': 'np.uint8'}), '([height, width], dtype=np.uint8)\n', (3765, 3798), True, 'import numpy as np\n'), ((3468, 3488), 'PIL.Image.open', 'Image.open', (['seg_file'], {}), '(seg_file)\n', (3478, 3488), False, 'from PIL import Image\n')] |
import numpy as np
from skmultiflow.drift_detection import ADWIN
def demo():
""" _test_adwin
In this demo, an ADWIN object evaluates a sequence of numbers corresponding to 2 distributions.
The ADWIN object indicates the indices where change is detected.
The first half of the data is a sequence of randomly generated 0's and 1's.
The second half of the data is a normal distribution of integers from 0 to 7.
"""
adwin = ADWIN()
size = 2000
change_start = 999
np.random.seed(1)
data_stream = np.random.randint(2, size=size)
data_stream[change_start:] = np.random.randint(8, size=size-change_start)
for i in range(size):
adwin.add_element(data_stream[i])
if adwin.detected_change():
print('Change has been detected in data: ' + str(data_stream[i]) + ' - of index: ' + str(i))
if __name__ == '__main__':
demo()
| [
"skmultiflow.drift_detection.ADWIN",
"numpy.random.randint",
"numpy.random.seed"
] | [((463, 470), 'skmultiflow.drift_detection.ADWIN', 'ADWIN', ([], {}), '()\n', (468, 470), False, 'from skmultiflow.drift_detection import ADWIN\n'), ((514, 531), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (528, 531), True, 'import numpy as np\n'), ((550, 581), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'size'}), '(2, size=size)\n', (567, 581), True, 'import numpy as np\n'), ((615, 661), 'numpy.random.randint', 'np.random.randint', (['(8)'], {'size': '(size - change_start)'}), '(8, size=size - change_start)\n', (632, 661), True, 'import numpy as np\n')] |
"""Common components for styled output.
This modules contains things that would be shared across outputters if there
were any besides Tabular. The Tabular class, though, still contains a good
amount of general logic that should be extracted if any other outputter is
actually added.
"""
from collections import defaultdict
from collections import namedtuple
from collections import OrderedDict
from collections.abc import Mapping
from collections.abc import Sequence
from functools import partial
import inspect
from logging import getLogger
from pyout import elements
from pyout.field import Field
from pyout.field import Nothing
from pyout.truncate import Truncater
from pyout.summary import Summary
lgr = getLogger(__name__)
NOTHING = Nothing()
class UnknownColumns(Exception):
"""The row has unknown columns.
Parameters
----------
unknown_columns : list
"""
def __init__(self, unknown_columns):
self.unknown_columns = unknown_columns
super(UnknownColumns, self).__init__(
"Unknown columns: {}".format(unknown_columns))
class RowNormalizer(object):
"""Transform various input data forms to a common form.
An un-normalized row can be one of three kinds:
* a mapping from column names to keys
* a sequence of values in the same order as `columns`
* any other value will be taken as an object where the column values can
be accessed via an attribute with the same name
To normalize a row, it is
* converted to a dict that maps from column names to values
* all callables are stripped out and replaced with their initial values
* if the value for a column is missing, it is replaced with a Nothing
instance whose value is specified by the column's style (an empty
string by default)
Parameters
----------
columns : sequence of str
Column names.
style : dict, optional
Column styles.
Attributes
----------
method : callable
A function that takes a row and returns a normalized one. This is
chosen at time of the first call. All subsequent calls should use the
same kind of row.
nothings : dict
Maps column name to the placeholder value to use if that column is
missing.
"""
def __init__(self, columns, style):
self._columns = columns
self.method = None
self.delayed = defaultdict(list)
self.delayed_columns = set()
self.nothings = {} # column => missing value
self._known_columns = set()
for column in columns:
cstyle = style[column]
if "delayed" in cstyle:
lgr.debug("Registered delay for column %r", column)
value = cstyle["delayed"]
group = column if value is True else value
self.delayed[group].append(column)
self.delayed_columns.add(column)
if "missing" in cstyle:
self.nothings[column] = Nothing(cstyle["missing"])
else:
self.nothings[column] = NOTHING
def __call__(self, row):
"""Normalize `row`
Parameters
----------
row : mapping, sequence, or other
Data to normalize.
Returns
-------
A tuple (callables, row), where `callables` is a list (as returned by
`strip_callables`) and `row` is the normalized row.
"""
if self.method is None:
self.method = self._choose_normalizer(row)
return self.method(row)
def _choose_normalizer(self, row):
if isinstance(row, Mapping):
getter = self.getter_dict
elif isinstance(row, Sequence):
getter = self.getter_seq
else:
getter = self.getter_attrs
lgr.debug("Selecting %s as normalizer", getter.__name__)
return partial(self._normalize, getter)
def _normalize(self, getter, row):
columns = self._columns
if isinstance(row, Mapping):
callables0 = self.strip_callables(row)
# The row may have new columns. All we're doing here is keeping
# them around in the normalized row so that downstream code can
# react to them.
known = self._known_columns
new_cols = [c for c in row.keys() if c not in known]
if new_cols:
if isinstance(self._columns, OrderedDict):
columns = list(self._columns)
columns = columns + new_cols
else:
callables0 = []
norm_row = self._maybe_delay(getter, row, columns)
# We need a second pass with strip_callables because norm_row will
# contain new callables for any delayed values.
callables1 = self.strip_callables(norm_row)
return callables0 + callables1, norm_row
def _maybe_delay(self, getter, row, columns):
row_norm = {}
for column in columns:
if column not in self.delayed_columns:
row_norm[column] = getter(row, column)
def delay(cols):
return lambda: {c: getter(row, c) for c in cols}
for cols in self.delayed.values():
key = cols[0] if len(cols) == 1 else tuple(cols)
lgr.debug("Delaying %r for row %r", cols, row)
row_norm[key] = delay(cols)
return row_norm
def strip_callables(self, row):
"""Extract callable values from `row`.
Replace the callable values with the initial value (if specified) or
an empty string.
Parameters
----------
row : mapping
A data row. The keys are either a single column name or a tuple of
column names. The values take one of three forms: 1) a
non-callable value, 2) a tuple (initial_value, callable), 3) or a
single callable (in which case the initial value is set to an empty
string).
Returns
-------
list of (column, callable)
"""
callables = []
to_delete = []
to_add = []
for columns, value in row.items():
if isinstance(value, tuple):
initial, fn = value
else:
initial = NOTHING
# Value could be a normal (non-callable) value or a
# callable with no initial value.
fn = value
if callable(fn) or inspect.isgenerator(fn):
lgr.debug("Using %r as the initial value "
"for columns %r in row %r",
initial, columns, row)
if not isinstance(columns, tuple):
columns = columns,
else:
to_delete.append(columns)
for column in columns:
to_add.append((column, initial))
callables.append((columns, fn))
for column, value in to_add:
row[column] = value
for multi_columns in to_delete:
del row[multi_columns]
return callables
# Input-specific getters. These exist as their own methods so that they
# can be wrapped in a callable and delayed.
def getter_dict(self, row, column):
# Note: We .get() from `nothings` because `row` is permitted to have an
# unknown column.
return row.get(column, self.nothings.get(column, NOTHING))
def getter_seq(self, row, column):
col_to_idx = {c: idx for idx, c in enumerate(self._columns)}
return row[col_to_idx[column]]
def getter_attrs(self, row, column):
return getattr(row, column, self.nothings[column])
class StyleFields(object):
"""Generate Fields based on the specified style and processors.
Parameters
----------
style : dict
A style that follows the schema defined in pyout.elements.
procgen : StyleProcessors instance
This instance is used to generate the fields from `style`.
"""
def __init__(self, style, procgen):
self.init_style = style
self.procgen = procgen
self.style = None
self.columns = None
self.autowidth_columns = {}
self._known_columns = set()
self.width_fixed = None
self.width_separtor = None
self.fields = None
self._truncaters = {}
self.hidden = {} # column => {True, "if-empty", False}
self._visible_columns = None # cached list of visible columns
self._table_width = None
def build(self, columns, table_width=None):
"""Build the style and fields.
Parameters
----------
columns : list of str
Column names.
table_width : int, optional
Table width to use instead of the previously specified width.
"""
self.columns = columns
self._known_columns = set(columns)
default = dict(elements.default("default_"),
**self.init_style.get("default_", {}))
self.style = elements.adopt({c: default for c in columns},
self.init_style)
# Store special keys in _style so that they can be validated.
self.style["default_"] = default
self.style["header_"] = self._compose("header_", {"align", "width"})
self.style["aggregate_"] = self._compose("aggregate_",
{"align", "width"})
self.style["separator_"] = self.init_style.get(
"separator_", elements.default("separator_"))
lgr.debug("Validating style %r", self.style)
if table_width is not None:
self._table_width = table_width
elif self._table_width is None:
self._table_width = self.init_style.get(
"width_", elements.default("width_"))
self.style["width_"] = self._table_width
elements.validate(self.style)
self._setup_fields()
self.hidden = {c: self.style[c]["hide"] for c in columns}
self._reset_width_info()
def _compose(self, name, attributes):
"""Construct a style taking `attributes` from the column styles.
Parameters
----------
name : str
Name of main style (e.g., "header_").
attributes : set of str
Adopt these elements from the column styles.
Returns
-------
The composite style for `name`.
"""
name_style = self.init_style.get(name, elements.default(name))
if self.init_style is not None and name_style is not None:
result = {}
for col in self.columns:
cstyle = {k: v for k, v in self.style[col].items()
if k in attributes}
result[col] = dict(cstyle, **name_style)
return result
def _setup_fields(self):
fields = {}
style = self.style
width_table = style["width_"]
def frac_to_int(x):
if x and 0 < x < 1:
result = int(x * width_table)
lgr.debug("Converted fraction %f to %d", x, result)
else:
result = x
return result
for column in self.columns:
lgr.debug("Setting up field for column %r", column)
cstyle = style[column]
style_width = cstyle["width"]
# Convert atomic values into the equivalent complex form.
if style_width == "auto":
style_width = {}
elif not isinstance(style_width, Mapping):
style_width = {"width": style_width}
is_auto = "width" not in style_width
if is_auto:
lgr.debug("Automatically adjusting width for %s", column)
width = frac_to_int(style_width.get("min", 0))
wmax = frac_to_int(style_width.get("max"))
autoval = {"max": wmax, "min": width,
"weight": style_width.get("weight", 1)}
self.autowidth_columns[column] = autoval
lgr.debug("Stored auto-width value for column %r: %s",
column, autoval)
else:
if "min" in style_width or "max" in style_width:
raise ValueError(
"'min' and 'max' are incompatible with 'width'")
width = frac_to_int(style_width["width"])
lgr.debug("Setting width of column %r to %d",
column, width)
# We are creating a distinction between "width" processors, that we
# always want to be active and "default" processors that we want to
# be active unless there's an overriding style (i.e., a header is
# being written or the `style` argument to __call__ is specified).
field = Field(width=width, align=cstyle["align"],
default_keys=["width", "default"],
other_keys=["override"])
field.add("pre", "default",
*(self.procgen.pre_from_style(cstyle)))
truncater = Truncater(
width,
style_width.get("marker", True),
style_width.get("truncate", "right"))
field.add("post", "width", truncater.truncate)
field.add("post", "default",
*(self.procgen.post_from_style(cstyle)))
fields[column] = field
self._truncaters[column] = truncater
self.fields = fields
@property
def has_header(self):
"""Whether the style specifies a header.
"""
return self.style["header_"] is not None
@property
def visible_columns(self):
"""List of columns that are not marked as hidden.
This value is cached and becomes invalid if column visibility has
changed since the last `render` call.
"""
if self._visible_columns is None:
hidden = self.hidden
self._visible_columns = [c for c in self.columns if not hidden[c]]
return self._visible_columns
def _check_widths(self):
visible = self.visible_columns
autowidth_columns = self.autowidth_columns
width_table = self.style["width_"]
if width_table is None:
# The table is unbounded (non-interactive).
return
if len(visible) > width_table:
raise elements.StyleError(
"Number of visible columns exceeds available table width")
width_fixed = self.width_fixed
width_auto = width_table - width_fixed
if width_auto < len(set(autowidth_columns).intersection(visible)):
raise elements.StyleError(
"The number of visible auto-width columns ({}) "
"exceeds the available width ({})"
.format(len(autowidth_columns), width_auto))
def _set_fixed_widths(self):
"""Set fixed-width attributes.
Previously calculated values are invalid if the number of visible
columns changes. Call _reset_width_info() in that case.
"""
visible = self.visible_columns
ngaps = len(visible) - 1
width_separtor = len(self.style["separator_"]) * ngaps
lgr.debug("Calculated separator width as %d", width_separtor)
autowidth_columns = self.autowidth_columns
fields = self.fields
width_fixed = sum([sum(fields[c].width for c in visible
if c not in autowidth_columns),
width_separtor])
lgr.debug("Calculated fixed width as %d", width_fixed)
self.width_separtor = width_separtor
self.width_fixed = width_fixed
def _reset_width_info(self):
"""Reset visibility-dependent information.
"""
self._visible_columns = None
self._set_fixed_widths()
self._check_widths()
def _set_widths(self, row, proc_group):
"""Update auto-width Fields based on `row`.
Parameters
----------
row : dict
proc_group : {'default', 'override'}
Whether to consider 'default' or 'override' key for pre- and
post-format processors.
Returns
-------
True if any widths required adjustment.
"""
autowidth_columns = self.autowidth_columns
fields = self.fields
width_table = self.style["width_"]
width_fixed = self.width_fixed
if width_table is None:
width_auto = float("inf")
else:
width_auto = width_table - width_fixed
if not autowidth_columns:
return False
# Check what width each row wants.
lgr.debug("Checking width for row %r", row)
hidden = self.hidden
for column in autowidth_columns:
if hidden[column]:
lgr.debug("%r is hidden; setting width to 0",
column)
autowidth_columns[column]["wants"] = 0
continue
field = fields[column]
lgr.debug("Checking width of column %r (current field width: %d)",
column, field.width)
# If we've added any style transform functions as pre-format
# processors, we want to measure the width of their result rather
# than the raw value.
if field.pre[proc_group]:
value = field(row[column], keys=[proc_group],
exclude_post=True)
else:
value = row[column]
value = str(value)
value_width = len(value)
wmax = autowidth_columns[column]["max"]
wmin = autowidth_columns[column]["min"]
max_seen = max(value_width, field.width)
requested_floor = max(max_seen, wmin)
wants = min(requested_floor, wmax or requested_floor)
lgr.debug("value=%r, value width=%d, old field length=%d, "
"min width=%s, max width=%s => wants=%d",
value, value_width, field.width, wmin, wmax, wants)
autowidth_columns[column]["wants"] = wants
# Considering those wants and the available width, assign widths to
# each column.
assigned = self._assign_widths(autowidth_columns, width_auto)
# Set the assigned widths.
adjusted = False
for column, width_assigned in assigned.items():
field = fields[column]
width_current = field.width
if width_assigned != width_current:
adjusted = True
field.width = width_assigned
lgr.debug("Adjusting width of %r column from %d to %d ",
column, width_current, field.width)
self._truncaters[column].length = field.width
return adjusted
@staticmethod
def _assign_widths(columns, available):
"""Assign widths to auto-width columns.
Parameters
----------
columns : dict
A dictionary where each key is an auto-width column. The value
should be a dictionary with the following information:
- wants: how much width the column wants
- min: the minimum that the width should set to, provided there
is enough room
- weight: if present, a "weight" key indicates the number of
available characters the column should claim at a time. This is
only in effect after each column has claimed one, and the
specific column has claimed its minimum.
available : int or float('inf')
Width available to be assigned.
Returns
-------
Dictionary mapping each auto-width column to the assigned width.
"""
# NOTE: The method below is not very clever and does unnecessary
# iteration. It may end up being too slow, but at least it should
# serve to establish the baseline (along with tests) that show the
# desired behavior.
assigned = {}
# Make sure every column gets at least one.
for column in columns:
col_wants = columns[column]["wants"]
if col_wants > 0:
available -= 1
assigned[column] = 1
assert available >= 0, "bug: upstream checks should make impossible"
weights = {c: columns[c].get("weight", 1) for c in columns}
# ATTN: The sorting here needs to be stable across calls with the same
# row so that the same assignments come out.
colnames = sorted(assigned.keys(), reverse=True,
key=lambda c: (columns[c]["min"], weights[c], c))
columns_in_need = set(assigned.keys())
while available > 0 and columns_in_need:
for column in colnames:
if column not in columns_in_need:
continue
col_wants = columns[column]["wants"] - assigned[column]
if col_wants < 1:
columns_in_need.remove(column)
continue
wmin = columns[column]["min"]
has = assigned[column]
claim = min(weights[column] if has >= wmin else wmin - has,
col_wants,
available)
available -= claim
assigned[column] += claim
lgr.log(9, "Claiming %d characters (of %d available) for %s",
claim, available, column)
if available == 0:
break
lgr.debug("Available width after assigned: %s", available)
lgr.debug("Assigned widths: %r", assigned)
return assigned
def _proc_group(self, style, adopt=True):
"""Return whether group is "default" or "override".
In the case of "override", the self.fields pre-format and post-format
processors will be set under the "override" key.
Parameters
----------
style : dict
A style that follows the schema defined in pyout.elements.
adopt : bool, optional
Merge `self.style` and `style`, giving priority to the latter's
keys when there are conflicts. If False, treat `style` as a
standalone style.
"""
fields = self.fields
if style is not None:
if adopt:
style = elements.adopt(self.style, style)
elements.validate(style)
for column in self.columns:
fields[column].add(
"pre", "override",
*(self.procgen.pre_from_style(style[column])))
fields[column].add(
"post", "override",
*(self.procgen.post_from_style(style[column])))
return "override"
else:
return "default"
def _check_for_unknown_columns(self, row):
known = self._known_columns
# The sorted() call here isn't necessary, but it makes testing the
# expected output easier without relying on the order-preserving
# implementation detail of the new dict implementation introduced in
# Python 3.6.
cols_new = sorted(c for c in row if c not in known)
if cols_new:
raise UnknownColumns(cols_new)
def render(self, row, style=None, adopt=True, can_unhide=True):
"""Render fields with values from `row`.
Parameters
----------
row : dict
A normalized row.
style : dict, optional
A style that follows the schema defined in pyout.elements. If
None, `self.style` is used.
adopt : bool, optional
Merge `self.style` and `style`, using the latter's keys
when there are conflicts. If False, treat `style` as a
standalone style.
can_unhide : bool, optional
Whether a non-missing value within `row` is able to unhide a column
that is marked with "if_missing".
Returns
-------
A tuple with the rendered value (str) and a flag that indicates whether
the field widths required adjustment (bool).
"""
self._check_for_unknown_columns(row)
hidden = self.hidden
any_unhidden = False
if can_unhide:
for c in row:
val = row[c]
if hidden[c] == "if_missing" and not isinstance(val, Nothing):
lgr.debug("Unhiding column %r after encountering %r",
c, val)
hidden[c] = False
any_unhidden = True
if any_unhidden:
self._reset_width_info()
group = self._proc_group(style, adopt=adopt)
if group == "override":
# Override the "default" processor key.
proc_keys = ["width", "override"]
else:
# Use the set of processors defined by _setup_fields.
proc_keys = None
adjusted = self._set_widths(row, group)
cols = self.visible_columns
proc_fields = ((self.fields[c], row[c]) for c in cols)
# Exclude fields that weren't able to claim any width to avoid
# surrounding empty values with separators.
proc_fields = filter(lambda x: x[0].width > 0, proc_fields)
proc_fields = (fld(val, keys=proc_keys) for fld, val in proc_fields)
return self.style["separator_"].join(proc_fields) + "\n", adjusted
class RedoContent(Exception):
"""The rendered content is stale and should be re-rendered.
"""
pass
class ContentError(Exception):
"""An error occurred when generating the content representation.
"""
pass
ContentRow = namedtuple("ContentRow", ["row", "kwds"])
class Content(object):
"""Concatenation of rendered fields.
Parameters
----------
fields : StyleField instance
"""
def __init__(self, fields):
self.fields = fields
self.summary = None
self.columns = None
self.ids = None
self._header = None
self._rows = []
self._idkey_to_idx = {}
self._idx_to_idkey = {}
def init_columns(self, columns, ids, table_width=None):
"""Set up the fields for `columns`.
Parameters
----------
columns : sequence or OrderedDict
Names of the column. In the case of an OrderedDict, a map between
short and long names.
ids : sequence
A collection of column names that uniquely identify a column.
table_width : int, optional
Update the table width to this value.
"""
self.fields.build(columns, table_width=table_width)
self.columns = columns
self.ids = ids
if self._rows:
# There are pre-existing rows, so this init_columns() call was due
# to encountering unknown columns. Fill in the previous rows.
style = self.fields.style
for row in self._rows:
for col in columns:
if col not in row.row:
cstyle = style[col]
if "missing" in cstyle:
missing = Nothing(cstyle["missing"])
else:
missing = NOTHING
row.row[col] = missing
if self.fields.has_header:
self._add_header()
def __len__(self):
return len(list(self.rows))
def __bool__(self):
return bool(self._rows)
def __getitem__(self, key):
idx = self._idkey_to_idx[key]
return self._rows[idx].row
@property
def rows(self):
"""Data and summary rows.
"""
if self._header:
yield self._header
for i in self._rows:
yield i
def _render(self, rows):
adjusted = []
for row, kwds in rows:
line, adj = self.fields.render(row, **kwds)
yield line
# Continue processing so that we get all the adjustments out of
# the way.
adjusted.append(adj)
if any(adjusted):
raise RedoContent
def __str__(self):
for redo in range(3):
try:
return "".join(self._render(self.rows))
except RedoContent:
# FIXME: Only one additional _render() call is supposed to
# be necessary, but as f34696a7 (Detect changes in the
# terminal width, 2020-08-17), it's not sufficient in some
# cases (see gh-114). Until that is figured out, allow one
# more (i.e., three in total), which appears to get rid of
# the issue.
if redo:
if redo == 1:
lgr.debug("One redo was not enough. Trying again")
else:
raise
def get_idkey(self, idx):
"""Return ID keys for a row.
Parameters
----------
idx : int
Index of row (determined by order it came in to `update`).
Returns
-------
ID key (tuple) matching row. If there is a header, None is return as
its ID key.
Raises
------
IndexError if `idx` does not match known row.
"""
if self._header:
idx -= 1
if idx == -1:
return None
try:
return self._idx_to_idkey[idx]
except KeyError:
msg = ("Index {!r} outside of current range: [0, {})"
.format(idx, len(self._idkey_to_idx)))
raise IndexError(msg) from None
def update(self, row, style):
"""Modify the content.
Parameters
----------
row : dict
A normalized row. If the names specified by `self.ids` have
already been seen in a previous call, the entry for the previous
row is updated. Otherwise, a new entry is appended.
style :
Passed to `StyleFields.render`.
Returns
-------
A tuple of (content, status), where status is 'append', an integer, or
'repaint'.
* append: the only change in the content is the addition of a line,
and the returned content will consist of just this line.
* an integer, N: the Nth line of the output needs to be update, and
the returned content will consist of just this line.
* repaint: all lines need to be updated, and the returned content
will consist of all the lines.
"""
called_before = bool(self)
idkey = tuple(row[idx] for idx in self.ids)
if not called_before and self.fields.has_header:
lgr.debug("Registering header")
self._add_header()
self._rows.append(ContentRow(row, kwds={"style": style}))
self._idkey_to_idx[idkey] = 0
self._idx_to_idkey[0] = idkey
return str(self), "append"
try:
prev_idx = self._idkey_to_idx[idkey]
except KeyError:
prev_idx = None
except TypeError:
raise ContentError("ID columns must be hashable")
if prev_idx is not None:
lgr.debug("Updating content for row %r", idkey)
row_update = {k: v for k, v in row.items()
if not isinstance(v, Nothing)}
self._rows[prev_idx].row.update(row_update)
self._rows[prev_idx].kwds.update({"style": style})
# Replace the passed-in row since it may not have all the columns.
row = self._rows[prev_idx][0]
else:
lgr.debug("Adding row %r to content for first time", idkey)
nrows = len(self._rows)
self._idkey_to_idx[idkey] = nrows
self._idx_to_idkey[nrows] = idkey
self._rows.append(ContentRow(row, kwds={"style": style}))
line, adjusted = self.fields.render(row, style)
lgr.log(9, "Rendered line as %r", line)
if called_before and adjusted:
return str(self), "repaint"
if not adjusted and prev_idx is not None:
return line, prev_idx + self.fields.has_header
return line, "append"
def _add_header(self):
if isinstance(self.columns, OrderedDict):
row = self.columns
else:
row = dict(zip(self.columns, self.columns))
self._header = ContentRow(row,
kwds={"style": self.fields.style["header_"],
"can_unhide": False,
"adopt": False})
class ContentWithSummary(Content):
"""Like Content, but append a summary to the return value of `update`.
"""
def __init__(self, fields):
super(ContentWithSummary, self).__init__(fields)
self.summary = None
def init_columns(self, columns, ids, table_width=None):
super(ContentWithSummary, self).init_columns(
columns, ids, table_width=table_width)
self.summary = Summary(self.fields.style)
def update(self, row, style):
lgr.log(9, "Updating with .summary set to %s", self.summary)
content, status = super(ContentWithSummary, self).update(row, style)
if self.summary:
summ_rows = self.summary.summarize(
self.fields.visible_columns,
[r.row for r in self._rows])
def join():
return "".join(self._render(summ_rows))
try:
summ_content = join()
except RedoContent:
# If rendering the summary lines triggered an adjustment, we
# need to re-render the main content as well.
return str(self), "repaint", join()
return content, status, summ_content
return content, status, None
| [
"logging.getLogger",
"pyout.summary.Summary",
"collections.namedtuple",
"pyout.elements.StyleError",
"pyout.elements.default",
"pyout.elements.adopt",
"inspect.isgenerator",
"functools.partial",
"pyout.field.Nothing",
"collections.defaultdict",
"pyout.elements.validate",
"pyout.field.Field"
] | [((713, 732), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (722, 732), False, 'from logging import getLogger\n'), ((743, 752), 'pyout.field.Nothing', 'Nothing', ([], {}), '()\n', (750, 752), False, 'from pyout.field import Nothing\n'), ((26068, 26109), 'collections.namedtuple', 'namedtuple', (['"""ContentRow"""', "['row', 'kwds']"], {}), "('ContentRow', ['row', 'kwds'])\n", (26078, 26109), False, 'from collections import namedtuple\n'), ((2435, 2452), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2446, 2452), False, 'from collections import defaultdict\n'), ((3919, 3951), 'functools.partial', 'partial', (['self._normalize', 'getter'], {}), '(self._normalize, getter)\n', (3926, 3951), False, 'from functools import partial\n'), ((9119, 9181), 'pyout.elements.adopt', 'elements.adopt', (['{c: default for c in columns}', 'self.init_style'], {}), '({c: default for c in columns}, self.init_style)\n', (9133, 9181), False, 'from pyout import elements\n'), ((9990, 10019), 'pyout.elements.validate', 'elements.validate', (['self.style'], {}), '(self.style)\n', (10007, 10019), False, 'from pyout import elements\n'), ((33549, 33575), 'pyout.summary.Summary', 'Summary', (['self.fields.style'], {}), '(self.fields.style)\n', (33556, 33575), False, 'from pyout.summary import Summary\n'), ((9006, 9034), 'pyout.elements.default', 'elements.default', (['"""default_"""'], {}), "('default_')\n", (9022, 9034), False, 'from pyout import elements\n'), ((9621, 9651), 'pyout.elements.default', 'elements.default', (['"""separator_"""'], {}), "('separator_')\n", (9637, 9651), False, 'from pyout import elements\n'), ((10595, 10617), 'pyout.elements.default', 'elements.default', (['name'], {}), '(name)\n', (10611, 10617), False, 'from pyout import elements\n'), ((12987, 13092), 'pyout.field.Field', 'Field', ([], {'width': 'width', 'align': "cstyle['align']", 'default_keys': "['width', 'default']", 'other_keys': "['override']"}), "(width=width, align=cstyle['align'], default_keys=['width', 'default'],\n other_keys=['override'])\n", (12992, 13092), False, 'from pyout.field import Field\n'), ((14587, 14665), 'pyout.elements.StyleError', 'elements.StyleError', (['"""Number of visible columns exceeds available table width"""'], {}), "('Number of visible columns exceeds available table width')\n", (14606, 14665), False, 'from pyout import elements\n'), ((22755, 22779), 'pyout.elements.validate', 'elements.validate', (['style'], {}), '(style)\n', (22772, 22779), False, 'from pyout import elements\n'), ((3030, 3056), 'pyout.field.Nothing', 'Nothing', (["cstyle['missing']"], {}), "(cstyle['missing'])\n", (3037, 3056), False, 'from pyout.field import Nothing\n'), ((6507, 6530), 'inspect.isgenerator', 'inspect.isgenerator', (['fn'], {}), '(fn)\n', (6526, 6530), False, 'import inspect\n'), ((22709, 22742), 'pyout.elements.adopt', 'elements.adopt', (['self.style', 'style'], {}), '(self.style, style)\n', (22723, 22742), False, 'from pyout import elements\n'), ((9905, 9931), 'pyout.elements.default', 'elements.default', (['"""width_"""'], {}), "('width_')\n", (9921, 9931), False, 'from pyout import elements\n'), ((27576, 27602), 'pyout.field.Nothing', 'Nothing', (["cstyle['missing']"], {}), "(cstyle['missing'])\n", (27583, 27602), False, 'from pyout.field import Nothing\n')] |
# ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
import re
from typing import Optional, Union, Dict, Any, List, Tuple, Sequence, TYPE_CHECKING
from sys import stderr as system_error_stream
import numpy as np
try:
from numpy.typing import ArrayLike
except ImportError:
from numpy import ndarray as ArrayLike
import warnings
from sys import stderr as system_error_stream
import os
import builtins
fileiotype = Union[str, bytes, os.PathLike]
import itk.support.types as itkt
from .helpers import wasm_type_from_image_type, image_type_from_wasm_type
from .helpers import wasm_type_from_mesh_type, mesh_type_from_wasm_type, python_to_js
from .helpers import wasm_type_from_pointset_type, pointset_type_from_wasm_type
if TYPE_CHECKING:
try:
import xarray as xr
except ImportError:
pass
try:
import vtk
except ImportError:
pass
__all__ = [
"output",
"image",
"set_nthreads",
"get_nthreads",
"echo",
"size",
"physical_size",
"spacing",
"origin",
"index",
"region",
"GetArrayFromImage",
"array_from_image",
"GetArrayViewFromImage",
"array_view_from_image",
"GetImageFromArray",
"image_from_array",
"GetImageViewFromArray",
"image_view_from_array",
"array_from_vector_container",
"array_view_from_vector_container",
"vector_container_from_array",
"GetArrayFromVnlVector",
"array_from_vnl_vector",
"GetVnlVectorFromArray",
"vnl_vector_from_array",
"GetArrayViewFromVnlVector",
"array_view_from_vnl_vector",
"GetVnlMatrixFromArray",
"vnl_matrix_from_array",
"GetArrayFromVnlMatrix",
"array_from_vnl_matrix",
"GetArrayViewFromVnlMatrix",
"array_view_from_vnl_matrix",
"GetArrayFromMatrix",
"array_from_matrix",
"GetMatrixFromArray",
"matrix_from_array",
"xarray_from_image",
"image_from_xarray",
"vtk_image_from_image",
"image_from_vtk_image",
"dict_from_image",
"image_from_dict",
"image_intensity_min_max",
"imwrite",
"imread",
"meshwrite",
"meshread",
"mesh_from_dict",
"dict_from_mesh",
"pointset_from_dict",
"dict_from_pointset",
"transformwrite",
"transformread",
"search",
"set_inputs",
"templated_class",
"pipeline",
"auto_pipeline",
"down_cast",
"template",
"class_",
"ctype",
"python_type",
"range",
"TemplateTypeError",
]
def output(input):
"""
If input object has attribute "GetOutput()" then return an itk image,
otherwise this function simply returns the input value
"""
if hasattr(input, "GetOutput"):
return input.GetOutput()
return input
def image(input):
warnings.warn(
"WrapITK warning: itk.image() is deprecated. " "Use itk.output() instead."
)
return output(input)
def set_nthreads(number_of_threads: int) -> None:
"""
Support convenient set of the number of threads.
Use example (in python):
import itk
itk.set_nthreads(4) ## use 4 threads
"""
assert number_of_threads > 0, (
"Please set a positive number of threads instead of %d" % number_of_threads
)
import itk
threader = itk.MultiThreaderBase.New()
threader.SetGlobalDefaultNumberOfThreads(number_of_threads)
def get_nthreads() -> int:
"""
Get the number of threads
"""
import itk
threader = itk.MultiThreaderBase.New()
return threader.GetGlobalDefaultNumberOfThreads()
def echo(obj, f=system_error_stream) -> None:
"""Print an object to stream
If the object has a method Print(), this method is used.
repr(obj) is used otherwise
"""
print(f, obj)
def size(image_or_filter: "itkt.ImageOrImageSource") -> Sequence[int]:
"""Return the size of an image, or of the output image of a filter
This method take care of updating the needed information
"""
# we don't need the entire output, only its size
import itk
image_or_filter.UpdateOutputInformation()
img = itk.output(image_or_filter)
return img.GetLargestPossibleRegion().GetSize()
def physical_size(image_or_filter: "itkt.ImageOrImageSource") -> Sequence[float]:
"""Return the physical size of an image, or of the output image of a filter
This method take care of updating the needed information
"""
# required because range is overloaded in this module
from builtins import range
spacing_ = spacing(image_or_filter)
size_ = size(image_or_filter)
result = []
for i in range(0, spacing_.Size()):
result.append(spacing_.GetElement(i) * size_.GetElement(i))
return result
def spacing(image_or_filter: "itkt.ImageOrImageSource") -> Sequence[float]:
"""Return the spacing of an image, or of the output image of a filter
This method take care of updating the needed information
"""
import itk
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = itk.output(image_or_filter)
return img.GetSpacing()
def origin(image_or_filter: "itkt.ImageOrImageSource") -> Sequence[float]:
"""Return the origin of an image, or of the output image of a filter
This method take care of updating the needed information
"""
import itk
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = itk.output(image_or_filter)
return img.GetOrigin()
def index(image_or_filter: "itkt.ImageOrImageSource") -> Sequence[int]:
"""Return the index of an image, or of the output image of a filter
This method take care of updating the needed information
"""
import itk
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = itk.output(image_or_filter)
return img.GetLargestPossibleRegion().GetIndex()
def region(image_or_filter: "itkt.ImageOrImageSource") -> "itkt.ImageRegion":
"""Return the region of an image, or of the output image of a filter
This method take care of updating the needed information
"""
import itk
# we don't need the entire output, only its size
image_or_filter.UpdateOutputInformation()
img = itk.output(image_or_filter)
return img.GetLargestPossibleRegion()
def _get_itk_pixelid(numpy_array_type):
"""Returns a ITK PixelID given a numpy array."""
import itk
def _long_type():
if os.name == "nt":
return itk.ULL
else:
return itk.UL
# This is a Mapping from numpy array types to itk pixel types.
_np_itk = {
np.uint8: itk.UC,
np.uint16: itk.US,
np.uint32: itk.UI,
np.uint64: _long_type(),
np.int8: itk.SC,
np.int16: itk.SS,
np.int32: itk.SI,
np.int64: itk.SL,
np.float32: itk.F,
np.float64: itk.D,
np.complex64: itk.complex[itk.F],
np.complex128: itk.complex[itk.D],
}
try:
return _np_itk[numpy_array_type.dtype.type]
except KeyError as e:
for key in _np_itk:
if np.issubdtype(numpy_array_type.dtype.type, key):
return _np_itk[key]
raise e
def _GetArrayFromImage(
image_or_filter, function_name: str, keep_axes: bool, update: bool, ttype
) -> np.ndarray:
"""Get an Array with the content of the image buffer"""
# Finds the image type
import itk
img = itk.output(image_or_filter)
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
ImageType = ttype[0]
else:
ImageType = ttype
else:
ImageType = img.__class__
keys = [k for k in itk.PyBuffer.keys() if k[0] == ImageType]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create a numpy array of the type of the input image
templatedFunction = getattr(itk.PyBuffer[keys[0]], function_name)
return templatedFunction(img, keep_axes, update)
def GetArrayFromImage(
image_or_filter: "itkt.ImageOrImageSource",
keep_axes: bool = False,
update: bool = True,
ttype=None,
) -> np.ndarray:
"""Get an array with the content of the image buffer"""
return _GetArrayFromImage(
image_or_filter, "GetArrayFromImage", keep_axes, update, ttype
)
array_from_image = GetArrayFromImage
def GetArrayViewFromImage(
image_or_filter: "itkt.ImageOrImageSource",
keep_axes: bool = False,
update: bool = True,
ttype=None,
) -> np.ndarray:
"""Get an array view with the content of the image buffer"""
return _GetArrayFromImage(
image_or_filter, "GetArrayViewFromImage", keep_axes, update, ttype
)
array_view_from_image = GetArrayViewFromImage
def _GetImageFromArray(arr: ArrayLike, function_name: str, is_vector: bool, ttype):
"""Get an ITK image from a Python array."""
import itk
# Verify inputs
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr)
if ttype is not None:
if is_vector:
raise RuntimeError("Cannot specify both `is_vector` and `ttype`.")
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
ImageType = ttype[0]
else:
ImageType = ttype
if type(itk.template(ImageType)) != tuple or len(itk.template(ImageType)) < 2:
raise RuntimeError("Cannot determine pixel type from supplied ttype.")
is_vector = (
type(itk.template(ImageType)[1][0]) != itk.support.types.itkCType
or itk.template(ImageType)[0] == itk.VectorImage
)
else:
PixelType = _get_itk_pixelid(arr)
Dimension = arr.ndim
if is_vector:
Dimension = arr.ndim - 1
if arr.flags["C_CONTIGUOUS"]:
VectorDimension = arr.shape[-1]
else:
VectorDimension = arr.shape[0]
if PixelType == itk.UC:
if VectorDimension == 3:
ImageType = itk.Image[itk.RGBPixel[itk.UC], Dimension]
elif VectorDimension == 4:
ImageType = itk.Image[itk.RGBAPixel[itk.UC], Dimension]
else:
ImageType = itk.VectorImage[PixelType, Dimension]
else:
ImageType = itk.VectorImage[PixelType, Dimension]
else:
ImageType = itk.Image[PixelType, Dimension]
keys = [k for k in itk.PyBuffer.keys() if k[0] == ImageType]
if len(keys) == 0:
raise RuntimeError(
"""No suitable template parameter can be found.
Please specify an output type via the 'ttype' keyword parameter."""
)
templatedFunction = getattr(itk.PyBuffer[keys[0]], function_name)
return templatedFunction(arr, is_vector)
def GetImageFromArray(
arr: ArrayLike, is_vector: bool = False, ttype=None
) -> "itkt.ImageBase":
"""Get an ITK image from a Python array."""
return _GetImageFromArray(arr, "GetImageFromArray", is_vector, ttype)
image_from_array = GetImageFromArray
def GetImageViewFromArray(
arr: ArrayLike, is_vector: bool = False, ttype=None
) -> "itkt.ImageBase":
"""Get an ITK image view from a Python array."""
return _GetImageFromArray(arr, "GetImageViewFromArray", is_vector, ttype)
image_view_from_array = GetImageViewFromArray
def array_from_vector_container(
container: "itkt.VectorContainer", ttype=None
) -> np.ndarray:
"""Get an Array with the content of the vector container"""
import itk
container_template = itk.template(container)
IndexType = container_template[1][0]
# Find container data type
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
DataType = ttype[0]
else:
DataType = ttype
else:
DataType = container_template[1][1]
keys = [k for k in itk.PyVectorContainer.keys() if k == (IndexType, DataType)]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create numpy array of the type of the input container
return itk.PyVectorContainer[keys[0]].array_from_vector_container(container)
def array_view_from_vector_container(
container: "itkt.VectorContainer", ttype=None
) -> np.ndarray:
"""Get an Array view with the content of the vector container"""
import itk
container_template = itk.template(container)
IndexType = container_template[1][0]
# Find container type
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
DataType = ttype[0]
else:
DataType = ttype
else:
DataType = container_template[1][1]
keys = [k for k in itk.PyVectorContainer.keys() if k == (IndexType, DataType)]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create numpy array of the type of the input container
return itk.PyVectorContainer[keys[0]].array_view_from_vector_container(container)
def vector_container_from_array(arr: ArrayLike, ttype=None) -> "itkt.VectorContainer":
"""Get a vector container from a Python array"""
import itk
# Verify inputs
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr)
# Return VectorContainer with 64-bit index type
if os.name == "nt":
IndexType = itk.ULL
else:
IndexType = itk.UL
# Find container type
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
DataType = ttype[0]
else:
DataType = ttype
else:
DataType = _get_itk_pixelid(arr)
keys = [k for k in itk.PyVectorContainer.keys() if k == (IndexType, DataType)]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create numpy array of the type of the input container
return itk.PyVectorContainer[keys[0]].vector_container_from_array(arr)
def _GetArrayFromVnlObject(vnl_object, function_name: str, ttype) -> np.ndarray:
"""Get an array with the content of vnl_object"""
# Finds the vnl object type
import itk
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
PixelType = ttype[0]
else:
PixelType = ttype
else:
PixelType = itk.template(vnl_object)[1][0]
keys = [k for k in itk.PyVnl.keys() if k[0] == PixelType]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
# Create a numpy array of the type of the vnl object
templatedFunction = getattr(itk.PyVnl[keys[0]], function_name)
return templatedFunction(vnl_object)
def GetArrayFromVnlVector(vnl_vector, ttype=None) -> np.ndarray:
"""Get an array with the content of vnl_vector"""
return _GetArrayFromVnlObject(vnl_vector, "GetArrayFromVnlVector", ttype)
array_from_vnl_vector = GetArrayFromVnlVector
def GetArrayViewFromVnlVector(vnl_vector, ttype=None) -> np.ndarray:
"""Get an array view of vnl_vector"""
return _GetArrayFromVnlObject(vnl_vector, "GetArrayViewFromVnlVector", ttype)
array_view_from_vnl_vector = GetArrayFromVnlVector
def GetArrayFromVnlMatrix(vnl_matrix, ttype=None) -> np.ndarray:
"""Get an array with the content of vnl_matrix"""
return _GetArrayFromVnlObject(vnl_matrix, "GetArrayFromVnlMatrix", ttype)
array_from_vnl_matrix = GetArrayFromVnlMatrix
def GetArrayViewFromVnlMatrix(vnl_matrix, ttype=None) -> np.ndarray:
"""Get an array view of vnl_matrix"""
return _GetArrayFromVnlObject(vnl_matrix, "GetArrayViewFromVnlMatrix", ttype)
array_view_from_vnl_matrix = GetArrayViewFromVnlMatrix
def _GetVnlObjectFromArray(arr: ArrayLike, function_name: str, ttype):
"""Get a vnl object from a Python array."""
import itk
# Verify inputs
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr)
if ttype is not None:
if isinstance(ttype, (tuple, list)):
if len(ttype) != 1:
raise RuntimeError("Expected 1 component in ttype tuple.")
PixelType = ttype[0]
else:
PixelType = ttype
else:
PixelType = _get_itk_pixelid(arr)
keys = [k for k in itk.PyVnl.keys() if k[0] == PixelType]
if len(keys) == 0:
raise RuntimeError("No suitable template parameter can be found.")
templatedFunction = getattr(itk.PyVnl[keys[0]], function_name)
return templatedFunction(arr)
def GetVnlVectorFromArray(arr: ArrayLike, ttype=None):
"""Get a vnl vector from a Python array."""
return _GetVnlObjectFromArray(arr, "GetVnlVectorFromArray", ttype)
vnl_vector_from_array = GetVnlVectorFromArray
def GetVnlMatrixFromArray(arr: ArrayLike, ttype=None):
"""Get a vnl matrix from a Python array."""
return _GetVnlObjectFromArray(arr, "GetVnlMatrixFromArray", ttype)
vnl_matrix_from_array = GetVnlMatrixFromArray
def GetArrayFromMatrix(itk_matrix) -> np.ndarray:
return GetArrayFromVnlMatrix(itk_matrix.GetVnlMatrix().as_matrix())
array_from_matrix = GetArrayFromMatrix
def GetMatrixFromArray(arr: ArrayLike) -> "itkt.Matrix":
import itk
# Verify inputs
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr)
vnl_matrix = GetVnlMatrixFromArray(arr)
dims = arr.shape
PixelType = _get_itk_pixelid(arr)
m = itk.Matrix[PixelType, dims[0], dims[1]](vnl_matrix)
return m
matrix_from_array = GetMatrixFromArray
def xarray_from_image(l_image: "itkt.ImageOrImageSource") -> "xr.DataArray":
"""Convert an itk.Image to an xarray.DataArray.
Origin and spacing metadata is preserved in the xarray's coords. The
Direction is set in the `direction` attribute.
Dims are labeled as `x`, `y`, `z`, `t`, and `c`.
This interface is and behavior is experimental and is subject to possible
future changes."""
import xarray as xr
import itk
import numpy as np
array_view = itk.array_view_from_image(l_image)
l_spacing = itk.spacing(l_image)
l_origin = itk.origin(l_image)
l_size = itk.size(l_image)
direction = np.flip(itk.array_from_matrix(l_image.GetDirection()))
image_dimension = l_image.GetImageDimension()
image_dims: Tuple[str, str, str] = ("x", "y", "z", "t")
coords = {}
for l_index, dim in enumerate(image_dims[:image_dimension]):
coords[dim] = np.linspace(
l_origin[l_index],
l_origin[l_index] + (l_size[l_index] - 1) * l_spacing[l_index],
l_size[l_index],
dtype=np.float64,
)
dims = list(reversed(image_dims[:image_dimension]))
components = l_image.GetNumberOfComponentsPerPixel()
if components > 1:
dims.append("c")
coords["c"] = np.arange(components, dtype=np.uint32)
direction = np.flip(itk.array_from_matrix(l_image.GetDirection()))
attrs = {"direction": direction}
metadata = dict(l_image)
ignore_keys = {"direction", "origin", "spacing"}
for key in metadata:
if not key in ignore_keys:
attrs[key] = metadata[key]
data_array = xr.DataArray(array_view, dims=dims, coords=coords, attrs=attrs)
return data_array
def image_from_xarray(data_array: "xr.DataArray") -> "itkt.ImageBase":
"""Convert an xarray.DataArray to an itk.Image.
Metadata encoded with xarray_from_image is applied to the itk.Image.
This interface is and behavior is experimental and is subject to possible
future changes."""
import numpy as np
import itk
if not {"t", "z", "y", "x", "c"}.issuperset(data_array.dims):
raise ValueError('Unsupported dims, supported dims: "t", "z", "y", "x", "c".')
image_dims = list({"t", "z", "y", "x"}.intersection(set(data_array.dims)))
image_dims.sort(reverse=True)
image_dimension = len(image_dims)
ordered_dims = ("t", "z", "y", "x")[-image_dimension:]
is_vector = "c" in data_array.dims
if is_vector:
ordered_dims = ordered_dims + ("c",)
values = data_array.values
if ordered_dims != data_array.dims:
dest = list(builtins.range(len(ordered_dims)))
source = dest.copy()
for ii in builtins.range(len(ordered_dims)):
source[ii] = data_array.dims.index(ordered_dims[ii])
values = np.moveaxis(values, source, dest).copy()
itk_image = itk.image_view_from_array(values, is_vector=is_vector)
l_origin = [0.0] * image_dimension
l_spacing = [1.0] * image_dimension
for l_index, dim in enumerate(image_dims):
coords = data_array.coords[dim]
if coords.shape[0] > 1:
l_origin[l_index] = float(coords[0])
l_spacing[l_index] = float(coords[1]) - float(coords[0])
l_spacing.reverse()
itk_image.SetSpacing(l_spacing)
l_origin.reverse()
itk_image.SetOrigin(l_origin)
if "direction" in data_array.attrs:
direction = data_array.attrs["direction"]
itk_image.SetDirection(np.flip(direction))
ignore_keys = {"direction", "origin", "spacing"}
for key in data_array.attrs:
if not key in ignore_keys:
itk_image[key] = data_array.attrs[key]
return itk_image
def vtk_image_from_image(l_image: "itkt.ImageOrImageSource") -> "vtk.vtkImageData":
"""Convert an itk.Image to a vtk.vtkImageData."""
import itk
import vtk
from vtk.util.numpy_support import numpy_to_vtk
array = itk.array_view_from_image(l_image)
vtk_image = vtk.vtkImageData()
data_array = numpy_to_vtk(array.reshape(-1))
data_array.SetNumberOfComponents(l_image.GetNumberOfComponentsPerPixel())
data_array.SetName("Scalars")
# Always set Scalars for (future?) multi-component volume rendering
vtk_image.GetPointData().SetScalars(data_array)
dim = l_image.GetImageDimension()
l_spacing = [1.0] * 3
l_spacing[:dim] = l_image.GetSpacing()
vtk_image.SetSpacing(l_spacing)
l_origin = [0.0] * 3
l_origin[:dim] = l_image.GetOrigin()
vtk_image.SetOrigin(l_origin)
dims = [1] * 3
dims[:dim] = itk.size(l_image)
vtk_image.SetDimensions(dims)
# Copy direction matrix for VTK>=9
import vtk
if vtk.vtkVersion.GetVTKMajorVersion() >= 9:
l_direction = l_image.GetDirection()
direction = itk.array_from_matrix(l_direction).flatten().tolist()
if len(direction) == 4:
# Change 2d matrix to 3d
direction = [
direction[0],
direction[1],
0.0,
direction[2],
direction[3],
0.0,
0.0,
0.0,
1.0,
]
vtk_image.SetDirectionMatrix(direction)
if l_image.GetImageDimension() == 3:
PixelType = itk.template(l_image)[1][0]
if PixelType == itk.Vector:
vtk_image.GetPointData().SetVectors(data_array)
elif PixelType == itk.CovariantVector:
vtk_image.GetPointData().SetVectors(data_array)
elif PixelType == itk.SymmetricSecondRankTensor:
vtk_image.GetPointData().SetTensors(data_array)
elif PixelType == itk.DiffusionTensor3D:
vtk_image.GetPointData().SetTensors(data_array)
return vtk_image
def image_from_vtk_image(vtk_image: "vtk.vtkImageData") -> "itkt.ImageBase":
"""Convert a vtk.vtkImageData to an itk.Image."""
import itk
from vtk.util.numpy_support import vtk_to_numpy
point_data = vtk_image.GetPointData()
array = vtk_to_numpy(point_data.GetScalars())
array = array.reshape(-1)
is_vector = point_data.GetScalars().GetNumberOfComponents() != 1
dims = list(vtk_image.GetDimensions())
if is_vector and dims[-1] == 1:
# 2D
dims = dims[:2]
dims.reverse()
dims.append(point_data.GetScalars().GetNumberOfComponents())
else:
dims.reverse()
array.shape = tuple(dims)
l_image = itk.image_view_from_array(array, is_vector)
dim = l_image.GetImageDimension()
l_spacing = [1.0] * dim
l_spacing[:dim] = vtk_image.GetSpacing()[:dim]
l_image.SetSpacing(l_spacing)
l_origin = [0.0] * dim
l_origin[:dim] = vtk_image.GetOrigin()[:dim]
l_image.SetOrigin(l_origin)
# Direction support with VTK 9
import vtk
if vtk.vtkVersion.GetVTKMajorVersion() >= 9:
direction = vtk_image.GetDirectionMatrix()
if dim == 3:
direction_array = np.identity(3)
for y in (0, 1, 2):
for x in (0, 1, 2):
direction_array[x, y] = direction.GetElement(x, y)
elif dim == 2:
direction_array = np.identity(2)
for y in (0, 1):
for x in (0, 1):
direction_array[x, y] = direction.GetElement(x, y)
l_direction = itk.matrix_from_array(direction_array)
l_image.SetDirection(l_direction)
return l_image
def dict_from_image(image: "itkt.Image") -> Dict:
"""Serialize a Python itk.Image object to a pickable Python dictionary."""
import itk
pixel_arr = itk.array_view_from_image(image)
imageType = wasm_type_from_image_type(image)
return dict(
imageType=imageType,
origin=tuple(image.GetOrigin()),
spacing=tuple(image.GetSpacing()),
size=tuple(image.GetBufferedRegion().GetSize()),
direction=np.asarray(image.GetDirection()),
data=pixel_arr,
)
def image_from_dict(image_dict: Dict) -> "itkt.Image":
"""Deserialize an dictionary representing an itk.Image object."""
import itk
ImageType = image_type_from_wasm_type(image_dict["imageType"])
image = itk.PyBuffer[ImageType].GetImageViewFromArray(image_dict["data"])
image.SetOrigin(image_dict["origin"])
image.SetSpacing(image_dict["spacing"])
image.SetDirection(image_dict["direction"])
return image
def mesh_from_dict(mesh_dict: Dict) -> "itkt.Mesh":
"""Deserialize an dictionary representing an itk.Mesh object."""
import itk
MeshType = mesh_type_from_wasm_type(mesh_dict["meshType"])
mesh = MeshType.New()
mesh.SetObjectName(mesh_dict["name"])
points = mesh_dict["points"]
points = itk.vector_container_from_array(points)
mesh.SetPoints(points)
point_data = mesh_dict["pointData"]
point_data = itk.vector_container_from_array(point_data)
mesh.SetPointData(point_data)
cells = mesh_dict["cells"]
cells = itk.vector_container_from_array(cells)
mesh.SetCellsArray(cells)
cell_data = mesh_dict["cellData"]
cell_data = itk.vector_container_from_array(cell_data)
mesh.SetCellData(cell_data)
return mesh
def dict_from_mesh(mesh: "itkt.Mesh") -> Dict:
"""Serialize a Python itk.Mesh object to a pickable Python dictionary."""
import itk
mesh_template = itk.template(mesh)
pixel_type, mangle, pixel_type_components = wasm_type_from_mesh_type(mesh)
number_of_points = mesh.GetNumberOfPoints()
number_of_cells = mesh.GetNumberOfCells()
if number_of_cells == 0:
cells_array = np.array([], np.uint)
else:
cells_array = itk.array_view_from_vector_container(mesh.GetCellsArray())
if number_of_points == 0:
points_array = np.array([], np.float32)
else:
points_array = itk.array_view_from_vector_container(mesh.GetPoints()).flatten()
point_data = mesh.GetPointData()
if point_data.Size() == 0:
point_data_numpy = np.array([], mangle)
else:
point_data_numpy = itk.array_view_from_vector_container(point_data)
cell_data = mesh.GetCellData()
if cell_data.Size() == 0:
cell_data_numpy = np.array([], mangle)
else:
cell_data_numpy = itk.array_view_from_vector_container(cell_data)
if os.name == "nt":
cell_component_type = python_to_js(itk.ULL)
else:
cell_component_type = python_to_js(itk.UL)
point_component_type = python_to_js(itk.F)
# Currently use the same data type for point and cell data
mesh_type = dict()
mesh_type["dimension"] = mesh_template[1][1]
mesh_type["pointComponentType"] = point_component_type
mesh_type["pointPixelComponentType"] = mangle
mesh_type["pointPixelType"] = pixel_type
mesh_type["pointPixelComponents"] = pixel_type_components
mesh_type["cellComponentType"] = cell_component_type
mesh_type["cellPixelComponentType"] = mangle
mesh_type["cellPixelType"] = pixel_type
mesh_type["cellPixelComponents"] = pixel_type_components
cell_buffer_size = cells_array.size
return dict(
meshType=mesh_type,
name=mesh.GetObjectName(),
dimension=mesh_template[1][1],
numberOfPoints=number_of_points,
points=points_array,
numberOfPointPixels=point_data.Size(),
pointData=point_data_numpy,
numberOfCells=number_of_cells,
cells=cells_array,
numberOfCellPixels=cell_data.Size(),
cellData=cell_data_numpy,
cellBufferSize=cell_buffer_size,
)
def pointset_from_dict(pointset_dict: Dict) -> "itkt.PointSet":
"""Deserialize an dictionary representing an itk.PointSet object."""
import itk
MeshType = pointset_type_from_wasm_type(pointset_dict["pointSetType"])
mesh = MeshType.New()
mesh.SetObjectName(pointset_dict["name"])
points = pointset_dict["points"]
points = itk.vector_container_from_array(points)
mesh.SetPoints(points)
point_data = pointset_dict["pointData"]
point_data = itk.vector_container_from_array(point_data)
mesh.SetPointData(point_data)
return mesh
def dict_from_pointset(pointset: "itkt.PointSet") -> Dict:
"""Serialize a Python itk.PointSet object to a pickable Python dictionary."""
import itk
pointset_template = itk.template(pointset)
pixel_type, mangle, pixel_type_components = wasm_type_from_pointset_type(pointset)
number_of_points = pointset.GetNumberOfPoints()
if number_of_points == 0:
points_array = np.array([], np.float32)
else:
points_array = itk.array_view_from_vector_container(pointset.GetPoints()).flatten()
point_data = pointset.GetPointData()
if point_data.Size() == 0:
point_data_numpy = np.array([], mangle)
else:
point_data_numpy = itk.array_view_from_vector_container(point_data)
if os.name == "nt":
cell_component_type = python_to_js(itk.ULL)
else:
cell_component_type = python_to_js(itk.UL)
point_component_type = python_to_js(itk.F)
# Currently use the same data type for point and cell data
pointset_type = dict()
pointset_type["dimension"] = pointset_template[1][1]
pointset_type["pointComponentType"] = point_component_type
pointset_type["pointPixelComponentType"] = mangle
pointset_type["pointPixelType"] = pixel_type
pointset_type["pointPixelComponents"] = pixel_type_components
return dict(
pointSetType=pointset_type,
name=pointset.GetObjectName(),
dimension=pointset_template[1][1],
numberOfPoints=number_of_points,
points=points_array,
numberOfPointPixels=point_data.Size(),
pointData=point_data_numpy,
)
def image_intensity_min_max(image_or_filter: "itkt.ImageOrImageSource"):
"""Return the minimum and maximum of values in a image of in the output image of a filter
The minimum and maximum values are returned in a tuple: (min, max)
image_intensity_min_max() take care of updating the pipeline
"""
import itk
img = itk.output(image_or_filter)
img.UpdateOutputInformation()
img.Update()
# don't put that calculator in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
comp = itk.MinimumMaximumImageCalculator[img].New(Image=img)
auto_pipeline.current = tmp_auto_pipeline
comp.Compute()
return comp.GetMinimum(), comp.GetMaximum()
# range is a python function, and should not be overridden
# the current use of the function name "range" is for backward
# compatibility, but should be considered for removal in the future
def range(image_or_filter):
return image_intensity_min_max(image_or_filter)
def imwrite(
image_or_filter: "itkt.ImageOrImageSource",
filename: fileiotype,
compression: bool = False,
imageio: Optional["itkt.ImageIOBase"] = None,
) -> None:
"""Write a image or the output image of a filter to a file.
Parameters
----------
image_or_filter :
Image or filter that produces an image to write to the file.
filename :
Target output file path.
compression :
Use compression when writing if the format supports it.
imageio :
Use the provided itk.ImageIOBase derived instance to write the file.
The writer is instantiated with the image type of the image in
parameter (or, again, with the output image of the filter in parameter).
"""
import itk
img = itk.output(image_or_filter)
img.UpdateOutputInformation()
# don't put that writer in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
writer = itk.ImageFileWriter[type(img)].New(
Input=img, FileName=f"{filename}", UseCompression=compression
)
auto_pipeline.current = tmp_auto_pipeline
if imageio:
writer.SetImageIO(imageio)
writer.Update()
def imread(
filename: fileiotype,
pixel_type: Optional["itkt.PixelTypes"] = None,
fallback_only: bool = False,
imageio: Optional["itkt.ImageIOBase"] = None,
) -> "itkt.ImageBase":
"""Read an image from a file or series of files and return an itk.Image.
Parameters
----------
filename :
File path for a single file, a list of files for an image series, or a
directory for a DICOM image series.
pixel_type :
Image pixel type to cast to when loading.
fallback_only :
If true, first try to automatically deduce the image pixel type, and
only use the given `pixel_type` if automatic deduction fails.
imageio :
Use the provided itk.ImageIOBase derived instance to read the file.
Returns
-------
image :
The resulting itk.Image.
The reader is instantiated with the image type of the image file if
`pixel_type` is not provided (default). The dimension of the image is
automatically deduced from the dimension stored on disk.
If the filename provided is a directory then the directory is assumed to
be for a DICOM series volume. If there is exactly one DICOM series
volume in that directory, the reader will use an itk.ImageSeriesReader
object to read the the DICOM filenames within that directory.
If the given filename is a list or a tuple of file names, the reader
will use an itk.ImageSeriesReader object to read the files.
If `fallback_only` is set to `True`, `imread()` will first try to
automatically deduce the image pixel_type, and only use the given
`pixel_type` if automatic deduction fails. Failures typically happen if
the pixel type is not supported (e.g. it is not currently wrapped).
"""
import itk
from itk.support.extras import TemplateTypeError
if fallback_only:
if pixel_type is None:
raise Exception(
"pixel_type must be set when using the fallback_only option"
)
try:
return imread(filename)
except (KeyError, TemplateTypeError):
pass
if type(filename) not in [list, tuple]:
import os
if os.path.isdir(filename):
# read DICOM series of 1 image in a folder, refer to: https://github.com/RSIP-Vision/medio
names_generator = itk.GDCMSeriesFileNames.New()
names_generator.SetUseSeriesDetails(True)
names_generator.AddSeriesRestriction("0008|0021") # Series Date
names_generator.SetDirectory(f"{filename}")
series_uid = names_generator.GetSeriesUIDs()
if len(series_uid) == 0:
raise FileNotFoundError(f"no DICOMs in: {filename}.")
if len(series_uid) > 1:
raise OSError(
f"the directory: {filename} contains more than one DICOM series."
)
series_identifier = series_uid[0]
filename = names_generator.GetFileNames(series_identifier)
if type(filename) in [list, tuple]:
template_reader_type = itk.ImageSeriesReader
io_filename = f"{filename[0]}"
increase_dimension = True
kwargs = {"FileNames": [f"{f}" for f in filename]}
else:
template_reader_type = itk.ImageFileReader
io_filename = f"{filename}"
increase_dimension = False
kwargs = {"FileName": f"{filename}"}
if imageio:
kwargs["ImageIO"] = imageio
if pixel_type:
image_IO = itk.ImageIOFactory.CreateImageIO(
io_filename, itk.CommonEnums.IOFileMode_ReadMode
)
if not image_IO:
raise RuntimeError("No ImageIO is registered to handle the given file.")
image_IO.SetFileName(io_filename)
image_IO.ReadImageInformation()
dimension = image_IO.GetNumberOfDimensions()
# Increase dimension if last dimension is not of size one.
if increase_dimension and image_IO.GetDimensions(dimension - 1) != 1:
dimension += 1
is_vlv = False
try:
is_vlv = itk.template(pixel_type)[0] is itk.VariableLengthVector
except KeyError:
pass
if is_vlv:
ImageType = itk.VectorImage[itk.template(pixel_type)[1][0], dimension]
else:
ImageType = itk.Image[pixel_type, dimension]
reader = template_reader_type[ImageType].New(**kwargs)
else:
reader = template_reader_type.New(**kwargs)
reader.Update()
return reader.GetOutput()
def meshwrite(
mesh: "itkt.Mesh", filename: fileiotype, compression: bool = False
) -> None:
"""Write a mesh to a file.
The writer is instantiated according to the type of the input mesh.
"""
import itk
mesh.UpdateOutputInformation()
# don't put that writer in the automatic pipeline
tmp_auto_pipeline = auto_pipeline.current
auto_pipeline.current = None
writer = itk.MeshFileWriter[type(mesh)].New(
Input=mesh, FileName=f"{filename}", UseCompression=compression
)
auto_pipeline.current = tmp_auto_pipeline
writer.Update()
def meshread(
filename: fileiotype,
pixel_type: Optional["itkt.PixelTypes"] = None,
fallback_only: bool = False,
) -> "itkt.Mesh":
"""Read a mesh from a file and return an itk.Mesh.
The reader is instantiated with the mesh type of the mesh file if
`pixel_type` is not provided (default). The dimension of the mesh is
automatically found.
If `fallback_only` is set to `True`, `meshread()` will first try to
automatically deduce the image pixel_type, and only use the given
`pixel_type` if automatic deduction fails. Failures typically
happen if the pixel type is not supported (e.g. it is not currently
wrapped).
"""
import itk
if fallback_only:
if pixel_type is None:
raise Exception(
"pixel_type must be set when using the fallback_only option"
)
try:
return meshread(filename)
except (KeyError, itk.TemplateTypeError):
pass
TemplateReaderType = itk.MeshFileReader
io_filename = f"{filename}"
increase_dimension = False
kwargs = {"FileName": f"{filename}"}
if pixel_type:
meshIO = itk.MeshIOFactory.CreateMeshIO(
io_filename, itk.CommonEnums.IOFileMode_ReadMode
)
if not meshIO:
raise RuntimeError("No MeshIO is registered to handle the given file.")
meshIO.SetFileName(io_filename)
meshIO.ReadMeshInformation()
dimension = meshIO.GetPointDimension()
# Increase dimension if last dimension is not of size one.
if increase_dimension and meshIO.GetDimensions(dimension - 1) != 1:
dimension += 1
MeshType = itk.Mesh[pixel_type, dimension]
reader = TemplateReaderType[MeshType].New(**kwargs)
else:
reader = TemplateReaderType.New(**kwargs)
reader.Update()
return reader.GetOutput()
def transformread(filename: fileiotype) -> List["itkt.TransformBase"]:
"""Read an itk Transform file.
Parameters
----------
filename:
Path to the transform file (typically a .h5 file).
Returns
-------
A Python list containing the transforms in the file.
"""
import itk
reader = itk.TransformFileReaderTemplate[itk.D].New()
reader.SetFileName(f"{filename}")
reader.Update()
transforms = []
transform_list = reader.GetModifiableTransformList()
while not transform_list.empty():
transform = transform_list.pop()
transforms.append(itk.down_cast(transform))
transforms.reverse()
return transforms
def transformwrite(
transforms: List["itkt.TransformBase"],
filename: fileiotype,
compression: bool = False,
) -> None:
"""Write an itk Transform file.
Parameters
----------
transforms: list of itk.TransformBaseTemplate[itk.D]
Python list of the transforms to write.
filename:
Path to the transform file (typically a .h5 file).
compression:
Use compression, if the file format supports it.
"""
import itk
writer = itk.TransformFileWriterTemplate[itk.D].New()
writer.SetFileName(f"{filename}")
writer.SetUseCompression(compression)
for transform in transforms:
writer.AddTransform(transform)
writer.Update()
def search(s: str, case_sensitive: bool = False) -> List[str]: # , fuzzy=True):
"""Search for a class name in the itk module."""
s = s.replace(" ", "")
if not case_sensitive:
s = s.lower()
import itk
names = sorted(dir(itk))
# exact match first
if case_sensitive:
res = [n for n in names if s == n]
else:
res = [n for n in names if s == n.lower()]
# then exact match inside the name
if case_sensitive:
res += [n for n in names if s in n and s != n]
else:
res += [n for n in names if s in n.lower() and s != n.lower()]
# if fuzzy:
# try:
# everything now requires editdist
# import editdist
# if case_sensitive:
# res.sort(key=lambda x: editdist.distance(x, s))
# else:
# res.sort(key=lambda x: (editdist.distance(x.lower(), s), x))
# except:
# pass
return res
def _snake_to_camel(keyword: str):
# Helpers for set_inputs snake case to CamelCase keyword argument conversion
_snake_underscore_re = re.compile("(_)([a-z0-9A-Z])")
def _underscore_upper(match_obj):
return match_obj.group(2).upper()
camel = keyword[0].upper()
if _snake_underscore_re.search(keyword[1:]):
return camel + _snake_underscore_re.sub(_underscore_upper, keyword[1:])
return camel + keyword[1:]
def set_inputs(
new_itk_object,
inargs: Optional[Sequence[Any]] = None,
inkargs: Optional[Dict[str, Any]] = None,
):
"""Set the inputs of the given objects, according to the non named or the
named parameters in args and kargs
This function tries to assign all the non named parameters in the input of
the new_itk_object
- the first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name
prefixed by 'Set'.
set_inputs( obj, kargs={'Threshold': 10} ) calls obj.SetThreshold(10)
This is the function use in the enhanced New() method to manage the inputs.
It can be used to produce a similar behavior:
def SetInputs(self, *args, **kargs):
import itk
itk.set_inputs(self, *args, **kargs)
"""
# Fix bug with Mutable Default Arguments
# https://docs.python-guide.org/writing/gotchas/
args: List[Any] = inargs if inargs else []
kargs: Dict[str, Any] = inkargs if inkargs else {}
# try to get the images from the filters in args
args = [output(arg) for arg in args]
# args without name are filter used to set input image
#
# count SetInput calls to call SetInput, SetInput2, SetInput3, ...
# useful with filter which take 2 input (or more) like SubtractImageFiler
# Ex: subtract image2.png to image1.png and save the result in result.png
# r1 = itk.ImageFileReader.US2.New(FileName='image1.png')
# r2 = itk.ImageFileReader.US2.New(FileName='image2.png')
# s = itk.SubtractImageFilter.US2US2US2.New(r1, r2)
# itk.ImageFileWriter.US2.New(s, FileName='result.png').Update()
setInputNb: int = -1
try:
for setInputNb, arg in enumerate(args):
methodName = "SetInput%i" % (setInputNb + 1)
if methodName in dir(new_itk_object):
# first try to use methods called SetInput1, SetInput2, ...
# those method should have more chances to work in case of
# multiple input types
getattr(new_itk_object, methodName)(arg)
else:
# no method called SetInput?
# try with the standard SetInput(nb, input)
new_itk_object.SetInput(setInputNb, arg)
except TypeError as e:
# the exception have (at least) to possible reasons:
# + the filter don't take the input number as first argument
# + arg is an object of wrong type
#
# if it's not the first input, re-raise the exception
if setInputNb != 0:
raise e
# it's the first input, try to use the SetInput() method without input
# number
new_itk_object.SetInput(args[0])
# but raise an exception if there is more than 1 argument
if len(args) > 1:
raise TypeError("Object accepts only 1 input.")
except AttributeError:
# There is no SetInput() method, try SetImage
# but before, check the number of inputs
if len(args) > 1:
raise TypeError("Object accepts only 1 input.")
methodList = ["SetImage", "SetInputImage"]
methodName = None
for m in methodList:
if m in dir(new_itk_object):
methodName = m
if methodName:
getattr(new_itk_object, methodName)(args[0])
else:
raise AttributeError("No method found to set the input.")
# named args : name is the function name, value is argument(s)
for attribName, value in kargs.items():
# use Set as prefix. It allow to use a shorter and more intuitive
# call (Ex: itk.ImageFileReader.UC2.New(FileName='image.png')) than
# with the full name
# (Ex: itk.ImageFileReader.UC2.New(SetFileName='image.png'))
if attribName not in ["auto_progress", "template_parameters"]:
if attribName.islower():
attribName = _snake_to_camel(attribName)
attrib = getattr(new_itk_object, "Set" + attribName)
# Do not use try-except mechanism as this leads to
# segfaults. Instead limit the number of types that are
# tested. The list of tested type could maybe be replaced by
# a test that would check for iterables.
import itk
if type(value) in [list, tuple]:
try:
output_value = [itk.output(x) for x in value]
attrib(*output_value)
except Exception:
attrib(itk.output(value))
else:
attrib(itk.output(value))
class templated_class:
"""This class is used to mimic the behavior of the templated C++ classes.
It is used this way:
class CustomClass:
# class definition here
CustomClass = templated_class(CustomClass)
customObject = CustomClass[template, parameters].New()
The template parameters are passed to the custom class constructor as a
named parameter 'template_parameters' in a tuple.
The custom class may implement a static method
check_template_parameters(parameters) which should raise an exception if
the template parameters provided are not suitable to instantiate the custom
class.
"""
def __init__(self, cls) -> None:
"""cls is the custom class"""
self.__cls__ = cls
self.__templates__ = {}
def New(self, *args, **kargs):
"""Use the parameters to infer the types of the template parameters."""
# extract the types from the arguments to instantiate the class
import itk
types = tuple(class_(o) for o in args)
return self[types].New(*args, **kargs)
def __getitem__(self, template_parameters):
"""Return a pair class-template parameters ready to be instantiated.
The template parameters may be validated if the custom class provide
the static method check_template_parameters(parameters).
"""
if not isinstance(template_parameters, tuple):
template_parameters = (template_parameters,)
return templated_class.__templated_class_and_parameters__(
self, template_parameters
)
def check_template_parameters(self, template_parameters) -> None:
"""Check the template parameters passed in parameter."""
# this method is there mainly to make possible to reuse it in the
# custom class constructor after having used templated_class().
# Without that, the following example doesn't work:
#
# class CustomClass:
# def __init__(self, *args, **kargs):
# template_parameters = kargs["template_parameters"]
# CustomClass.check_template_parameters(template_parameters)
# other init stuff
# def check_template_parameters(template_parameters):
# check, really
# pass
# CustomClass = templated_class(CustomClass)
#
self.__cls__.check_template_parameters(template_parameters)
def add_template(self, name: str, params):
if not isinstance(params, list) and not isinstance(params, tuple):
params = (params,)
params = tuple(params)
val = self[params]
self.__templates__[params] = val
setattr(self, name, val)
def add_image_templates(self, *args) -> None:
import itk
if not args:
return
combinations = [[t] for t in args[0]]
for types in args[1:]:
temp = []
for t in types:
for c in combinations:
temp.append(c + [t])
combinations = temp
for d in itk.DIMS:
for c in combinations:
parameters = []
name = ""
for t in c:
parameters.append(itk.Image[t, d])
name += "I" + t.short_name + str(d)
self.add_template(name, tuple(parameters))
class __templated_class_and_parameters__:
"""Inner class used to store the pair class-template parameters ready
to instantiate.
"""
def __init__(self, l_templated_class, l_template_parameters) -> None:
self.__templated_class__ = l_templated_class
self.__template_parameters__ = l_template_parameters
if "check_template_parameters" in dir(l_templated_class.__cls__):
l_templated_class.__cls__.check_template_parameters(
l_template_parameters
)
def New(self, *args, **kargs):
"""A New() method to mimic the ITK default behavior, even if the
class doesn't provide any New() method.
"""
kargs["template_parameters"] = self.__template_parameters__
if "New" in dir(self.__templated_class__.__cls__):
obj = self.__templated_class__.__cls__.New(*args, **kargs)
else:
obj = self.__templated_class__.__cls__(*args, **kargs)
setattr(obj, "__template_parameters__", self.__template_parameters__)
setattr(obj, "__templated_class__", self.__templated_class__)
return obj
def __call__(self, *args, **kargs):
return self.New(*args, **kargs)
def keys(self):
return self.__templates__.keys()
def values(self):
return list(self.__templates__.values())
def items(self):
return list(self.__templates__.items())
# everything after this comment is for dict interface
# and is a copy/paste from DictMixin
# only methods to edit dictionary are not there
def __iter__(self) -> str:
yield from self.keys()
def has_key(self, key: str):
return key in self.__templates__
def __contains__(self, key: str):
return key in self
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
return self.get(key, default)
def __len__(self):
return len(self.keys())
class pipeline:
"""A convenient class to store the reference to the filters of a pipeline
With this class, a method can create a pipeline of several filters and
return it without losing the references to the filters in this pipeline.
The pipeline object act almost like a filter (it has a GetOutput() method)
and thus can be simply integrated in another pipeline.
"""
def __init__(self, *args, **kargs) -> None:
self.clear()
self.input = None
self.filters: List[Any] = []
set_inputs(self, args, kargs)
def connect(self, l_filter) -> None:
"""Connect a new l_filter to the pipeline
The output of the first l_filter will be used as the input of this
one and the l_filter passed as parameter will be added to the list
"""
if self.GetOutput() is not None:
set_inputs(l_filter, [self.GetOutput()])
self.append(l_filter)
def append(self, l_filter) -> None:
"""Add a new l_filter to the pipeline
The new l_filter will not be connected. The user must connect it.
"""
self.filters.append(l_filter)
def clear(self) -> None:
"""Clear the filter list"""
self.filters = []
def GetOutput(self, l_index: int = 0):
"""Return the output of the pipeline
If another output is needed, use
pipeline.filters[-1].GetAnotherOutput() instead of this method,
subclass pipeline to implement another GetOutput() method, or use
expose()
"""
if len(self.filters) == 0:
return self.GetInput()
else:
l_filter = self.filters[-1]
if hasattr(l_filter, "__getitem__"):
return l_filter[l_index]
try:
return l_filter.GetOutput(l_index)
except Exception:
if l_index == 0:
return l_filter.GetOutput()
else:
raise ValueError("Index can only be 0 on that object")
def GetNumberOfOutputs(self) -> int:
"""Return the number of outputs"""
if len(self.filters) == 0:
return 1
else:
return self.filters[-1].GetNumberOfOutputs()
def SetInput(self, l_input) -> None:
"""Set the l_input of the pipeline"""
if len(self.filters) != 0:
set_inputs(self.filters[0], [l_input])
self.l_input = l_input
def GetInput(self):
"""Get the input of the pipeline"""
return self.input
def Update(self):
"""Update the pipeline"""
if len(self.filters) > 0:
return self.filters[-1].Update()
def UpdateLargestPossibleRegion(self):
"""Update the pipeline"""
if len(self.filters) > 0:
return self.filters[-1].UpdateLargestPossibleRegion()
def UpdateOutputInformation(self) -> None:
if "UpdateOutputInformation" in dir(self.filters[-1]):
self.filters[-1].UpdateOutputInformation()
else:
self.Update()
def __len__(self):
return self.GetNumberOfOutputs()
def __getitem__(self, item):
return self.GetOutput(item)
def __call__(self, *args, **kargs):
set_inputs(self, args, kargs)
self.UpdateLargestPossibleRegion()
return self
def expose(self, name: str, new_name: Optional[str] = None, position: int = -1):
"""Expose an attribute from a filter of the mini-pipeline.
Once called, the pipeline instance has a new Set/Get set of methods to
access directly the corresponding method of one of the filter of the
pipeline.
Ex: p.expose( "Radius" )
p.SetRadius( 5 )
p.GetRadius( 5 )
By default, the attribute usable on the pipeline instance has the same
name than the one of the filter, but it can be changed by providing a
value to new_name.
The last filter of the pipeline is used by default, but another one may
be used by giving its position.
Ex: p.expose("Radius", "SmoothingNeighborhood", 2)
p.GetSmoothingNeighborhood()
"""
if new_name is None:
new_name = name
src = self.filters[position]
ok: bool = False
set_name: str = "Set" + name
if set_name in dir(src):
setattr(self, "Set" + new_name, getattr(src, set_name))
ok = True
get_name = "Get" + name
if get_name in dir(src):
setattr(self, "Get" + new_name, getattr(src, get_name))
ok = True
if not ok:
raise RuntimeError(f"No attribute {name} at position {position}.")
class auto_pipeline(pipeline):
current = None
def __init__(self, *args, **kargs) -> None:
pipeline.__init__(self, *args, **kargs)
self.Start()
def Start(self) -> None:
auto_pipeline.current = self
@staticmethod
def Stop() -> None:
auto_pipeline.current = None
def down_cast(obj: "itkt.LightObject"):
"""Down cast an itk.LightObject (or a object of a subclass) to its most
specialized type.
"""
import itk
from itk.support.template_class import itkTemplate
class_name: str = obj.GetNameOfClass()
t = getattr(itk, class_name)
if isinstance(t, itkTemplate):
for c in t.values():
try:
return c.cast(obj)
except Exception:
# fail silently for now
pass
raise RuntimeError(f"Can't downcast to a specialization of {class_name}")
else:
return t.cast(obj)
def attribute_list(inputobject, name: str):
"""Returns a list of the specified attributes for the objects in the image.
i: the input LabelImage
name: the attribute name
"""
import itk
img = itk.output(inputobject)
relabel = itk.StatisticsRelabelLabelMapFilter[img].New(
img, Attribute=name, ReverseOrdering=True, InPlace=False
)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
l_list: List[Any] = []
# required because range is overloaded in this module
import sys
from builtins import range
for i in range(1, r.GetNumberOfLabelObjects() + 1):
l_list.append(r.GetLabelObject(i).__getattribute__("Get" + name)())
return l_list
def attributes_list(inputObject, names: List[str]):
"""Returns a list of the specified attributes for the objects in the image.
i: the input LabelImage
name: the attribute name
"""
import itk
img = itk.output(inputObject)
relabel = itk.StatisticsRelabelLabelMapFilter[img].New(
img, Attribute=names[0], ReverseOrdering=True, InPlace=False
)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
l_list: List[Any] = []
# required because range is overloaded in this module
from builtins import range
for i in range(1, r.GetNumberOfLabelObjects() + 1):
attrs = []
for name in names:
attrs.append(r.GetLabelObject(i).__getattribute__("Get" + name)())
l_list.append(tuple(attrs))
return l_list
def attribute_dict(inputobject, name: str):
"""Returns a dict with the attribute values in keys and a list of the
corresponding objects in value
i: the input LabelImage
name: the name of the attribute
"""
import itk
img = itk.output(inputobject)
relabel = itk.StatisticsRelabelLabelMapFilter[img].New(
img, Attribute=name, ReverseOrdering=True, InPlace=False
)
relabel.UpdateLargestPossibleRegion()
r = relabel.GetOutput()
d = {}
# required because range is overloaded in this module
from builtins import range
for i in range(1, r.GetNumberOfLabelObjects() + 1):
lo = r.GetLabelObject(i)
v = lo.__getattribute__("Get" + name)()
l_list = d.get(v, [])
l_list.append(lo)
d[v] = l_list
return d
def number_of_objects(image_or_filter) -> int:
"""Returns the number of objets in the image.
img: the input LabelImage
"""
import itk
image_or_filter.UpdateLargestPossibleRegion()
img = itk.output(image_or_filter)
return img.GetNumberOfLabelObjects()
def ipython_kw_matches(text: str):
"""Match named ITK object's named parameters"""
import IPython
import itk
import re
import inspect
from itk.support import template_class
regexp = re.compile(
r"""
'.*?' | # single quoted strings or
".*?" | # double quoted strings or
\w+ | # identifier
\S # other characters
""",
re.VERBOSE | re.DOTALL,
)
ip = IPython.get_ipython()
if "." in text: # a parameter cannot be dotted
return []
# 1. Find the nearest identifier that comes before an unclosed
# parenthesis e.g. for "foo (1+bar(x), pa", the candidate is "foo".
if ip.Completer.readline:
text_until_cursor = ip.Completer.readline.get_line_buffer()[
: ip.Completer.readline.get_endidx()
]
else:
# IPython >= 5.0.0, which is based on the Python Prompt Toolkit
text_until_cursor = ip.Completer.text_until_cursor
tokens = regexp.findall(text_until_cursor)
tokens.reverse()
iter_tokens = iter(tokens)
open_par = 0
for token in iter_tokens:
if token == ")":
open_par -= 1
elif token == "(":
open_par += 1
if open_par > 0:
# found the last unclosed parenthesis
break
else:
return []
# 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
ids = []
is_id = re.compile(r"\w+$").match
while True:
try:
ids.append(iter_tokens.next())
if not is_id(ids[-1]):
ids.pop()
break
if not iter_tokens.next() == ".":
break
except StopIteration:
break
# lookup the candidate callable matches either using global_matches
# or attr_matches for dotted names
if len(ids) == 1:
callable_matches = ip.Completer.global_matches(ids[0])
else:
callable_matches = ip.Completer.attr_matches(".".join(ids[::-1]))
arg_matches = []
for callable_match in callable_matches:
# drop the .New at this end, so we can search in the class members
if callable_match.endswith(".New"):
callable_match = callable_match[:-4]
elif not re.findall("([A-Z])", callable_match): # True if snake case
# Split at the last '.' occurrence
split_name_parts = callable_match.split(".")
namespace = split_name_parts[:-1]
function_name = split_name_parts[-1]
# Find corresponding object name
object_name = _snake_to_camel(function_name)
# Check that this object actually exists
try:
object_callable_match = ".".join(namespace + [object_name])
eval(object_callable_match, ip.Completer.namespace)
# Reconstruct full object name
callable_match = object_callable_match
except AttributeError:
# callable_match is not a snake case function with a
# corresponding object.
pass
try:
l_object = eval(callable_match, ip.Completer.namespace)
if isinstance(l_object, template_class.itkTemplate):
# this is a template - lets grab the first entry to search for
# the methods
l_object = l_object.values()[0]
named_args = []
is_in: bool = isinstance(l_object, itk.LightObject)
if is_in or (
inspect.isclass(l_object) and issubclass(l_object, itk.LightObject)
):
named_args = [n[3:] for n in dir(l_object) if n.startswith("Set")]
except Exception as e:
print(e)
continue
for namedArg in named_args:
if namedArg.startswith(text):
arg_matches.append(f"{namedArg}=")
return arg_matches
def template(cl):
"""Return the template of a class (or of the class of an object) and
its parameters
template() returns a tuple with 2 elements:
- the first one is the itkTemplate object
- the second is a tuple containing the template parameters
"""
from itk.support.template_class import itkTemplateBase
return itkTemplateBase.__template_instantiations_object_to_name__[class_(cl)]
def ctype(s: str) -> "itkt.itkCType":
"""Return the c type corresponding to the string passed in parameter
The string can contain some extra spaces.
see also itkCType
"""
from itk.support.types import itkCType
ret = itkCType.GetCType(" ".join(s.split()))
if ret is None:
raise KeyError(f"Unrecognized C type '{s}'")
return ret
def class_(obj):
"""Return a class from an object
Often in itk, the __class__ is not what the user is expecting.
class_() should do a better job
"""
import inspect
if inspect.isclass(obj):
# obj is already a class !
return obj
else:
return obj.__class__
def python_type(object_ref) -> str:
"""Returns the Python type name of an object
The Python name corresponding to the given instantiated object is printed.
This includes both the Python name and the parameters of the object. A user
can copy and paste the printed value to instantiate a new object of the
same type."""
from itk.support.template_class import itkTemplate
from itk.support.types import itkCType
def in_itk(name):
import itk
# Remove "itk::" and "std::" from template name.
# Only happens for ITK objects.
shortname: str = name.split("::")[-1]
shortname = shortname.split("itk")[-1]
namespace = itk
# A type cannot be part of ITK if its name was not modified above. This
# check avoids having an input of type `list` and return `itk.list` that
# also exists.
likely_itk: bool = shortname != name or name[:3] == "vnl"
if likely_itk and hasattr(namespace, shortname):
return namespace.__name__ + "." + shortname # Prepend name with 'itk.'
else:
return name
def recursive(l_obj, level: int):
try:
type_name, param_list = template(l_obj)
name = in_itk(type_name.__name__)
parameters = []
for t in param_list:
parameters.append(recursive(t, level + 1))
return name + "[" + ",".join(parameters) + "]"
except KeyError:
if isinstance(l_obj, itkCType): # Handles CTypes differently
return "itk." + l_obj.short_name
elif hasattr(l_obj, "__name__"):
# This should be where most ITK types end up.
return in_itk(l_obj.__name__)
elif (
not isinstance(l_obj, type)
and type(l_obj) != itkTemplate
and level != 0
):
# l_obj should actually be considered a value, not a type,
# or it is already an itkTemplate type.
# A value can be an integer that is a template parameter.
# This does not happen at the first level of the recursion
# as it is not possible that this object would be a template
# parameter. Checking the level `0` allows e.g. to find the
# type of an object that is a `list` or an `int`.
return str(l_obj)
else:
return in_itk(type(l_obj).__name__)
return recursive(object_ref, 0)
class TemplateTypeError(TypeError):
def __init__(self, template_type, input_type):
def tuple_to_string_type(t):
if type(t) == tuple:
return ", ".join(python_type(x) for x in t)
else:
python_type(t)
import itk
# Special case for ITK readers: Add extra information.
extra_eg: str = ""
if template_type in [
itk.ImageFileReader,
itk.ImageSeriesReader,
itk.MeshFileReader,
]:
extra_eg = """
or
e.g.: image = itk.imread(my_input_filename, itk.F)
"""
python_template_type = python_type(template_type)
python_input_type = tuple_to_string_type(input_type)
type_list = "\n".join([python_type(x[0]) for x in template_type.keys()])
eg_type = ", ".join([python_type(x) for x in list(template_type.keys())[0]])
msg: str = """{template_type} is not wrapped for input type `{input_type}`.
To limit the size of the package, only a limited number of
types are available in ITK Python. To print the supported
types, run the following command in your python environment:
{template_type}.GetTypes()
Possible solutions:
* If you are an application user:
** Convert your input image into a supported format (see below).
** Contact developer to report the issue.
* If you are an application developer, force input images to be
loaded in a supported pixel type.
e.g.: instance = {template_type}[{eg_type}].New(my_input){extra_eg}
* (Advanced) If you are an application developer, build ITK Python yourself and
turned to `ON` the corresponding CMake option to wrap the pixel type or image
dimension you need. When configuring ITK with CMake, you can set
`ITK_WRAP_${{type}}` (replace ${{type}} with appropriate pixel type such as
`double`). If you need to support images with 4 or 5 dimensions, you can add
these dimensions to the list of dimensions in the CMake variable
`ITK_WRAP_IMAGE_DIMS`.
Supported input types:
{type_list}
""".format(
template_type=python_template_type,
input_type=python_input_type,
type_list=type_list,
eg_type=eg_type,
extra_eg=extra_eg,
)
TypeError.__init__(self, msg)
# install progress callback and custom completer if we are in ipython
# interpreter
try:
import itkConfig
import IPython
if IPython.get_ipython():
IPython.get_ipython().Completer.matchers.insert(0, ipython_kw_matches)
# some cleanup
del itkConfig, IPython
except (ImportError, AttributeError):
# fail silently
pass
| [
"itk.PyBuffer.keys",
"re.compile",
"itk.down_cast",
"itk.output",
"numpy.array",
"numpy.moveaxis",
"numpy.arange",
"itk.array_from_matrix",
"numpy.flip",
"itk.origin",
"numpy.asarray",
"itk.ImageIOFactory.CreateImageIO",
"itk.MeshIOFactory.CreateMeshIO",
"numpy.issubdtype",
"numpy.linspa... | [((3438, 3529), 'warnings.warn', 'warnings.warn', (['"""WrapITK warning: itk.image() is deprecated. Use itk.output() instead."""'], {}), "(\n 'WrapITK warning: itk.image() is deprecated. Use itk.output() instead.')\n", (3451, 3529), False, 'import warnings\n'), ((3940, 3967), 'itk.MultiThreaderBase.New', 'itk.MultiThreaderBase.New', ([], {}), '()\n', (3965, 3967), False, 'import itk\n'), ((4138, 4165), 'itk.MultiThreaderBase.New', 'itk.MultiThreaderBase.New', ([], {}), '()\n', (4163, 4165), False, 'import itk\n'), ((4761, 4788), 'itk.output', 'itk.output', (['image_or_filter'], {}), '(image_or_filter)\n', (4771, 4788), False, 'import itk\n'), ((5728, 5755), 'itk.output', 'itk.output', (['image_or_filter'], {}), '(image_or_filter)\n', (5738, 5755), False, 'import itk\n'), ((6129, 6156), 'itk.output', 'itk.output', (['image_or_filter'], {}), '(image_or_filter)\n', (6139, 6156), False, 'import itk\n'), ((6525, 6552), 'itk.output', 'itk.output', (['image_or_filter'], {}), '(image_or_filter)\n', (6535, 6552), False, 'import itk\n'), ((6954, 6981), 'itk.output', 'itk.output', (['image_or_filter'], {}), '(image_or_filter)\n', (6964, 6981), False, 'import itk\n'), ((8166, 8193), 'itk.output', 'itk.output', (['image_or_filter'], {}), '(image_or_filter)\n', (8176, 8193), False, 'import itk\n'), ((12472, 12495), 'itk.template', 'itk.template', (['container'], {}), '(container)\n', (12484, 12495), False, 'import itk\n'), ((13415, 13438), 'itk.template', 'itk.template', (['container'], {}), '(container)\n', (13427, 13438), False, 'import itk\n'), ((19282, 19316), 'itk.array_view_from_image', 'itk.array_view_from_image', (['l_image'], {}), '(l_image)\n', (19307, 19316), False, 'import itk\n'), ((19333, 19353), 'itk.spacing', 'itk.spacing', (['l_image'], {}), '(l_image)\n', (19344, 19353), False, 'import itk\n'), ((19369, 19388), 'itk.origin', 'itk.origin', (['l_image'], {}), '(l_image)\n', (19379, 19388), False, 'import itk\n'), ((19402, 19419), 'itk.size', 'itk.size', (['l_image'], {}), '(l_image)\n', (19410, 19419), False, 'import itk\n'), ((20424, 20487), 'xarray.DataArray', 'xr.DataArray', (['array_view'], {'dims': 'dims', 'coords': 'coords', 'attrs': 'attrs'}), '(array_view, dims=dims, coords=coords, attrs=attrs)\n', (20436, 20487), True, 'import xarray as xr\n'), ((21664, 21718), 'itk.image_view_from_array', 'itk.image_view_from_array', (['values'], {'is_vector': 'is_vector'}), '(values, is_vector=is_vector)\n', (21689, 21718), False, 'import itk\n'), ((22723, 22757), 'itk.array_view_from_image', 'itk.array_view_from_image', (['l_image'], {}), '(l_image)\n', (22748, 22757), False, 'import itk\n'), ((22775, 22793), 'vtk.vtkImageData', 'vtk.vtkImageData', ([], {}), '()\n', (22791, 22793), False, 'import vtk\n'), ((23358, 23375), 'itk.size', 'itk.size', (['l_image'], {}), '(l_image)\n', (23366, 23375), False, 'import itk\n'), ((25231, 25274), 'itk.image_view_from_array', 'itk.image_view_from_array', (['array', 'is_vector'], {}), '(array, is_vector)\n', (25256, 25274), False, 'import itk\n'), ((26377, 26409), 'itk.array_view_from_image', 'itk.array_view_from_image', (['image'], {}), '(image)\n', (26402, 26409), False, 'import itk\n'), ((27485, 27524), 'itk.vector_container_from_array', 'itk.vector_container_from_array', (['points'], {}), '(points)\n', (27516, 27524), False, 'import itk\n'), ((27610, 27653), 'itk.vector_container_from_array', 'itk.vector_container_from_array', (['point_data'], {}), '(point_data)\n', (27641, 27653), False, 'import itk\n'), ((27732, 27770), 'itk.vector_container_from_array', 'itk.vector_container_from_array', (['cells'], {}), '(cells)\n', (27763, 27770), False, 'import itk\n'), ((27856, 27898), 'itk.vector_container_from_array', 'itk.vector_container_from_array', (['cell_data'], {}), '(cell_data)\n', (27887, 27898), False, 'import itk\n'), ((28111, 28129), 'itk.template', 'itk.template', (['mesh'], {}), '(mesh)\n', (28123, 28129), False, 'import itk\n'), ((30654, 30693), 'itk.vector_container_from_array', 'itk.vector_container_from_array', (['points'], {}), '(points)\n', (30685, 30693), False, 'import itk\n'), ((30783, 30826), 'itk.vector_container_from_array', 'itk.vector_container_from_array', (['point_data'], {}), '(point_data)\n', (30814, 30826), False, 'import itk\n'), ((31060, 31082), 'itk.template', 'itk.template', (['pointset'], {}), '(pointset)\n', (31072, 31082), False, 'import itk\n'), ((32811, 32838), 'itk.output', 'itk.output', (['image_or_filter'], {}), '(image_or_filter)\n', (32821, 32838), False, 'import itk\n'), ((34248, 34275), 'itk.output', 'itk.output', (['image_or_filter'], {}), '(image_or_filter)\n', (34258, 34275), False, 'import itk\n'), ((44250, 44280), 're.compile', 're.compile', (['"""(_)([a-z0-9A-Z])"""'], {}), "('(_)([a-z0-9A-Z])')\n", (44260, 44280), False, 'import re\n'), ((60532, 60555), 'itk.output', 'itk.output', (['inputobject'], {}), '(inputobject)\n', (60542, 60555), False, 'import itk\n'), ((61265, 61288), 'itk.output', 'itk.output', (['inputObject'], {}), '(inputObject)\n', (61275, 61288), False, 'import itk\n'), ((62100, 62123), 'itk.output', 'itk.output', (['inputobject'], {}), '(inputobject)\n', (62110, 62123), False, 'import itk\n'), ((62868, 62895), 'itk.output', 'itk.output', (['image_or_filter'], {}), '(image_or_filter)\n', (62878, 62895), False, 'import itk\n'), ((63150, 63424), 're.compile', 're.compile', (['"""\n \'.*?\' | # single quoted strings or\n ".*?" | # double quoted strings or\n \\\\w+ | # identifier\n \\\\S # other characters\n """', '(re.VERBOSE | re.DOTALL)'], {}), '(\n """\n \'.*?\' | # single quoted strings or\n ".*?" | # double quoted strings or\n \\\\w+ | # identifier\n \\\\S # other characters\n """\n , re.VERBOSE | re.DOTALL)\n', (63160, 63424), False, 'import re\n'), ((63446, 63467), 'IPython.get_ipython', 'IPython.get_ipython', ([], {}), '()\n', (63465, 63467), False, 'import IPython\n'), ((67944, 67964), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (67959, 67964), False, 'import inspect\n'), ((73021, 73042), 'IPython.get_ipython', 'IPython.get_ipython', ([], {}), '()\n', (73040, 73042), False, 'import IPython\n'), ((9817, 9832), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (9827, 9832), True, 'import numpy as np\n'), ((14373, 14388), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (14383, 14388), True, 'import numpy as np\n'), ((17209, 17224), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (17219, 17224), True, 'import numpy as np\n'), ((18556, 18571), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (18566, 18571), True, 'import numpy as np\n'), ((19705, 19838), 'numpy.linspace', 'np.linspace', (['l_origin[l_index]', '(l_origin[l_index] + (l_size[l_index] - 1) * l_spacing[l_index])', 'l_size[l_index]'], {'dtype': 'np.float64'}), '(l_origin[l_index], l_origin[l_index] + (l_size[l_index] - 1) *\n l_spacing[l_index], l_size[l_index], dtype=np.float64)\n', (19716, 19838), True, 'import numpy as np\n'), ((20078, 20116), 'numpy.arange', 'np.arange', (['components'], {'dtype': 'np.uint32'}), '(components, dtype=np.uint32)\n', (20087, 20116), True, 'import numpy as np\n'), ((23472, 23507), 'vtk.vtkVersion.GetVTKMajorVersion', 'vtk.vtkVersion.GetVTKMajorVersion', ([], {}), '()\n', (23505, 23507), False, 'import vtk\n'), ((25593, 25628), 'vtk.vtkVersion.GetVTKMajorVersion', 'vtk.vtkVersion.GetVTKMajorVersion', ([], {}), '()\n', (25626, 25628), False, 'import vtk\n'), ((26114, 26152), 'itk.matrix_from_array', 'itk.matrix_from_array', (['direction_array'], {}), '(direction_array)\n', (26135, 26152), False, 'import itk\n'), ((28356, 28377), 'numpy.array', 'np.array', (['[]', 'np.uint'], {}), '([], np.uint)\n', (28364, 28377), True, 'import numpy as np\n'), ((28523, 28547), 'numpy.array', 'np.array', (['[]', 'np.float32'], {}), '([], np.float32)\n', (28531, 28547), True, 'import numpy as np\n'), ((28742, 28762), 'numpy.array', 'np.array', (['[]', 'mangle'], {}), '([], mangle)\n', (28750, 28762), True, 'import numpy as np\n'), ((28800, 28848), 'itk.array_view_from_vector_container', 'itk.array_view_from_vector_container', (['point_data'], {}), '(point_data)\n', (28836, 28848), False, 'import itk\n'), ((28941, 28961), 'numpy.array', 'np.array', (['[]', 'mangle'], {}), '([], mangle)\n', (28949, 28961), True, 'import numpy as np\n'), ((28998, 29045), 'itk.array_view_from_vector_container', 'itk.array_view_from_vector_container', (['cell_data'], {}), '(cell_data)\n', (29034, 29045), False, 'import itk\n'), ((31277, 31301), 'numpy.array', 'np.array', (['[]', 'np.float32'], {}), '([], np.float32)\n', (31285, 31301), True, 'import numpy as np\n'), ((31504, 31524), 'numpy.array', 'np.array', (['[]', 'mangle'], {}), '([], mangle)\n', (31512, 31524), True, 'import numpy as np\n'), ((31562, 31610), 'itk.array_view_from_vector_container', 'itk.array_view_from_vector_container', (['point_data'], {}), '(point_data)\n', (31598, 31610), False, 'import itk\n'), ((36891, 36914), 'os.path.isdir', 'os.path.isdir', (['filename'], {}), '(filename)\n', (36904, 36914), False, 'import os\n'), ((38210, 38297), 'itk.ImageIOFactory.CreateImageIO', 'itk.ImageIOFactory.CreateImageIO', (['io_filename', 'itk.CommonEnums.IOFileMode_ReadMode'], {}), '(io_filename, itk.CommonEnums.\n IOFileMode_ReadMode)\n', (38242, 38297), False, 'import itk\n'), ((40985, 41070), 'itk.MeshIOFactory.CreateMeshIO', 'itk.MeshIOFactory.CreateMeshIO', (['io_filename', 'itk.CommonEnums.IOFileMode_ReadMode'], {}), '(io_filename, itk.CommonEnums.IOFileMode_ReadMode\n )\n', (41015, 41070), False, 'import itk\n'), ((64452, 64471), 're.compile', 're.compile', (['"""\\\\w+$"""'], {}), "('\\\\w+$')\n", (64462, 64471), False, 'import re\n'), ((8516, 8535), 'itk.PyBuffer.keys', 'itk.PyBuffer.keys', ([], {}), '()\n', (8533, 8535), False, 'import itk\n'), ((11366, 11385), 'itk.PyBuffer.keys', 'itk.PyBuffer.keys', ([], {}), '()\n', (11383, 11385), False, 'import itk\n'), ((12899, 12927), 'itk.PyVectorContainer.keys', 'itk.PyVectorContainer.keys', ([], {}), '()\n', (12925, 12927), False, 'import itk\n'), ((13837, 13865), 'itk.PyVectorContainer.keys', 'itk.PyVectorContainer.keys', ([], {}), '()\n', (13863, 13865), False, 'import itk\n'), ((14885, 14913), 'itk.PyVectorContainer.keys', 'itk.PyVectorContainer.keys', ([], {}), '()\n', (14911, 14913), False, 'import itk\n'), ((15702, 15718), 'itk.PyVnl.keys', 'itk.PyVnl.keys', ([], {}), '()\n', (15716, 15718), False, 'import itk\n'), ((17556, 17572), 'itk.PyVnl.keys', 'itk.PyVnl.keys', ([], {}), '()\n', (17570, 17572), False, 'import itk\n'), ((22274, 22292), 'numpy.flip', 'np.flip', (['direction'], {}), '(direction)\n', (22281, 22292), True, 'import numpy as np\n'), ((25737, 25751), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (25748, 25751), True, 'import numpy as np\n'), ((37049, 37078), 'itk.GDCMSeriesFileNames.New', 'itk.GDCMSeriesFileNames.New', ([], {}), '()\n', (37076, 37078), False, 'import itk\n'), ((42329, 42353), 'itk.down_cast', 'itk.down_cast', (['transform'], {}), '(transform)\n', (42342, 42353), False, 'import itk\n'), ((7827, 7874), 'numpy.issubdtype', 'np.issubdtype', (['numpy_array_type.dtype.type', 'key'], {}), '(numpy_array_type.dtype.type, key)\n', (7840, 7874), True, 'import numpy as np\n'), ((15648, 15672), 'itk.template', 'itk.template', (['vnl_object'], {}), '(vnl_object)\n', (15660, 15672), False, 'import itk\n'), ((21607, 21640), 'numpy.moveaxis', 'np.moveaxis', (['values', 'source', 'dest'], {}), '(values, source, dest)\n', (21618, 21640), True, 'import numpy as np\n'), ((24076, 24097), 'itk.template', 'itk.template', (['l_image'], {}), '(l_image)\n', (24088, 24097), False, 'import itk\n'), ((25944, 25958), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (25955, 25958), True, 'import numpy as np\n'), ((65279, 65316), 're.findall', 're.findall', (['"""([A-Z])"""', 'callable_match'], {}), "('([A-Z])', callable_match)\n", (65289, 65316), False, 'import re\n'), ((10206, 10229), 'itk.template', 'itk.template', (['ImageType'], {}), '(ImageType)\n', (10218, 10229), False, 'import itk\n'), ((10247, 10270), 'itk.template', 'itk.template', (['ImageType'], {}), '(ImageType)\n', (10259, 10270), False, 'import itk\n'), ((10475, 10498), 'itk.template', 'itk.template', (['ImageType'], {}), '(ImageType)\n', (10487, 10498), False, 'import itk\n'), ((38789, 38813), 'itk.template', 'itk.template', (['pixel_type'], {}), '(pixel_type)\n', (38801, 38813), False, 'import itk\n'), ((49172, 49189), 'itk.output', 'itk.output', (['value'], {}), '(value)\n', (49182, 49189), False, 'import itk\n'), ((66559, 66584), 'inspect.isclass', 'inspect.isclass', (['l_object'], {}), '(l_object)\n', (66574, 66584), False, 'import inspect\n'), ((23579, 23613), 'itk.array_from_matrix', 'itk.array_from_matrix', (['l_direction'], {}), '(l_direction)\n', (23600, 23613), False, 'import itk\n'), ((48979, 48992), 'itk.output', 'itk.output', (['x'], {}), '(x)\n', (48989, 48992), False, 'import itk\n'), ((73052, 73073), 'IPython.get_ipython', 'IPython.get_ipython', ([], {}), '()\n', (73071, 73073), False, 'import IPython\n'), ((10399, 10422), 'itk.template', 'itk.template', (['ImageType'], {}), '(ImageType)\n', (10411, 10422), False, 'import itk\n'), ((38946, 38970), 'itk.template', 'itk.template', (['pixel_type'], {}), '(pixel_type)\n', (38958, 38970), False, 'import itk\n'), ((49112, 49129), 'itk.output', 'itk.output', (['value'], {}), '(value)\n', (49122, 49129), False, 'import itk\n')] |
import socket
import threading
import select
import queue
import time
class ThreadedServer(object):
""" Threading example class
The run() method will be started and it will run in the background
until the application exits.
"""
def __init__(self, host, port, interval=1):
""" Constructor
:type interval: int
:param interval: Check interval, in seconds
"""
self.interval = interval
self.host = host
self.port = port
self.isClientReady = False
self.isClientConnected = False
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setblocking(0)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
self.inputs = [self.sock]
self.outputs = []
self.message_queues = {}
self.oldData = ''
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
self.sock.listen(5)
# print("listening")
while self.inputs:
# print(self.inputs[0].getsockname())
readable, writeable, errored = select.select(
self.inputs, self.outputs, self.inputs)
for s in readable:
if s is self.sock:
client, address = self.sock.accept()
self.inputs.append(client)
print("connection from:", address)
client.settimeout(5)
client.setblocking(0)
self.message_queues[client] = queue.Queue(maxsize=2)
print(len(self.inputs))
print(len(self.outputs))
print(len(self.message_queues))
else:
data = s.recv(64)
if data:
print(data.decode('utf-8'))
# A readable client socket has data
if b'HELLO' in data:
self.isClientConnected = True
print("Hello Client at: " + str(s.getpeername()
[0]) + ':' + str(s.getpeername()[1]))
# print(self.inputs, self.outputs)
if s not in self.outputs:
self.outputs.append(s)
# Add output channel for response
self.message_queues[s].put(data)
elif b'READY' in data:
self.isClientReady = True
print("Client at: " + str(s.getpeername()
[0]) + ':' + str(s.getpeername()[1]) + " is ready")
# Add output channel for response
self.message_queues[s].put(data)
elif b'GOODBYE' in data:
self.isClientReady = False
self.isClientConnected = False
print("Removing client at " + str(s.getpeername()
[0]) + ':' + str(s.getpeername()[1]))
if s in self.inputs:
self.inputs.remove(s)
print("removed from inputs")
if s in self.outputs:
self.outputs.remove(s)
print("removed from outputs")
for k in self.message_queues.keys():
print(k)
print(type(k))
# del self.message_queues[s.getpeername()[0]]
for s in writeable:
try:
next_msg = self.message_queues[s].get_nowait()
except queue.Empty:
pass
else:
totalsent = 0
sizeObj = len(next_msg)
while (totalsent < sizeObj and self.isClientReady and self.isClientConnected):
sent = s.send(next_msg[totalsent:])
s.send(b'\n')
if sent == 0:
raise RuntimeError('Socket is broke')
totalsent += sent
for s in errored:
print('>>handling exceptional condition for')
print(s.getpeername())
self.inputs.remove(s)
if s in self.outputs:
self.outputs.remove(s)
s.close()
del self.message_queues[s]
def appendToMessageBuff(self, data):
for s in self.outputs:
if self.message_queues[s].full() == False:
self.message_queues[s].put(data)
else:
print("Msg Queue is full")
# print("appended to obuff for " + s.getpeername()[0])
| [
"threading.Thread",
"select.select",
"queue.Queue",
"socket.socket"
] | [((588, 637), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (601, 637), False, 'import socket\n'), ((926, 968), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.run', 'args': '()'}), '(target=self.run, args=())\n', (942, 968), False, 'import threading\n'), ((1320, 1373), 'select.select', 'select.select', (['self.inputs', 'self.outputs', 'self.inputs'], {}), '(self.inputs, self.outputs, self.inputs)\n', (1333, 1373), False, 'import select\n'), ((1750, 1772), 'queue.Queue', 'queue.Queue', ([], {'maxsize': '(2)'}), '(maxsize=2)\n', (1761, 1772), False, 'import queue\n')] |
import pandas as pd
from scipy.stats import t
import numpy as np
import requests
def make_dataframe(r):
rows = []
for item in r['data']:
rows.append([item['lat'], item['lon'], item['aqi'], item['station']['name']])
df = pd.DataFrame(rows, columns=['lat', 'lon', 'aqi', 'name'])
df['aqi'] = pd.to_numeric(df.aqi, errors='coerce')
return df
def one_samp_t_test(df, diff):
return diff / (
df['aqi'].var() / df.count()) ** (1 / 2)
def get_request_data(url):
return requests.get(url).json()
class Air_Quality_Analytics():
def __init__(self):
self.base_url = "https://api.waqi.info/feed/"
self.city_str = ""
self.url = self.base_url + self.city_str + "/?token=<PASSWORD>"
def get_local_air_quality_comparison(self, city_str, tolerance=2.0):
self.city_str = city_str
token = "<PASSWORD>"
req_data = get_request_data(self.base_url + self.city_str + "/?token=" + token)
lat, lng = req_data['data']['city']['geo']
latlngbx = str(lat) + "," + str(lng) + "," + str(lat + tolerance) + "," + str(lng + tolerance)
r = requests.get("https://api.waqi.info/" + f"/map/bounds/?latlng={latlngbx}&token={token}").json()
if len(r['data']) > 0:
local_df = make_dataframe(r)
air_quality_comp = {
'deviation': 'Not found',
'probability': 'Not found'
}
deviation = local_df[local_df['name'].str.contains(city_str)]['aqi'].mean() - local_df['aqi'].mean()
if not np.isnan(deviation):
air_quality_comp['deviation'] = deviation
probability = one_samp_t_test(local_df[local_df['name'].str.contains(city_str)], deviation)
probability = t.sf(np.abs(probability), local_df.count() - 1)[0]
if not np.isnan(probability):
air_quality_comp['probability'] = probability
return air_quality_comp
def get_air_quality_index(self, city_str):
self.city_str = city_str
try:
return get_request_data(self.base_url + self.city_str + "/?token=<PASSWORD>")[
'data']["aqi"]
except:
pass
# AQA = Air_Quality_Analytics()
# print(AQA.get_local_air_quality_comparison('Los Angeles'))
| [
"numpy.abs",
"requests.get",
"pandas.to_numeric",
"numpy.isnan",
"pandas.DataFrame"
] | [((242, 299), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {'columns': "['lat', 'lon', 'aqi', 'name']"}), "(rows, columns=['lat', 'lon', 'aqi', 'name'])\n", (254, 299), True, 'import pandas as pd\n'), ((316, 354), 'pandas.to_numeric', 'pd.to_numeric', (['df.aqi'], {'errors': '"""coerce"""'}), "(df.aqi, errors='coerce')\n", (329, 354), True, 'import pandas as pd\n'), ((511, 528), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (523, 528), False, 'import requests\n'), ((1138, 1230), 'requests.get', 'requests.get', (["('https://api.waqi.info/' + f'/map/bounds/?latlng={latlngbx}&token={token}')"], {}), "('https://api.waqi.info/' +\n f'/map/bounds/?latlng={latlngbx}&token={token}')\n", (1150, 1230), False, 'import requests\n'), ((1572, 1591), 'numpy.isnan', 'np.isnan', (['deviation'], {}), '(deviation)\n', (1580, 1591), True, 'import numpy as np\n'), ((1853, 1874), 'numpy.isnan', 'np.isnan', (['probability'], {}), '(probability)\n', (1861, 1874), True, 'import numpy as np\n'), ((1787, 1806), 'numpy.abs', 'np.abs', (['probability'], {}), '(probability)\n', (1793, 1806), True, 'import numpy as np\n')] |
import streamlit as st
text = """\
## Custom CSS does not play nicely with Bokeh HTML, CSS and Javascipt
I've experienced numerous problems when using css.
I have a feeling that the Bokeh Javascript on elements does not take everything like images and inline css into account. But it's difficult for me to catch and understand.
For example I struggled with the below scrollbar until I found out it was because i had a `margin-bottom: 1rem;` in the css for the info box. When I removed that the problem was solved.
<img src="https://github.com/MarcSkovMadsen/awesome-panel/blob/master/gallery/bootstrap_dashboard/assets/images/info_alert_scrollbar_problem.png?raw=true" width="200" height="400" />
"""
st.write(text)
| [
"streamlit.write"
] | [((707, 721), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (715, 721), True, 'import streamlit as st\n')] |
from RnaseqDiffExpressionReport import ProjectTracker
from RnaseqDiffExpressionReport import linkToEnsembl, linkToUCSC
class TopDifferentiallyExpressedGenes(ProjectTracker):
'''output differentially expressed genes.'''
limit = 10
pattern = '(.*)_gene_diff'
sort = ''
def __call__(self, track, slice=None):
statement = '''SELECT DISTINCT a.gene_name,
a.gene_id,
a.gene_biotype,
t.l2fold,
t.treatment_mean,
t.control_mean,
t.pvalue,
t.qvalue,
s.contig, s.start, s.end
FROM %(track)s_gene_diff as t,
annotations.transcript_info as a,
annotations.gene_stats as s
WHERE a.gene_id = t.test_id AND
s.gene_id = t.test_id AND
t.significant
ORDER BY %(sort)s
LIMIT %(limit)i'''
data = self.getAll(statement)
if data:
data['gene_id'] = [linkToEnsembl(x) for x in data["gene_id"]]
data["locus"] = [linkToUCSC(*x) for x in zip(
data["contig"],
data["start"],
data["end"])]
return data
statement = '''SELECT DISTINCT t.test_id,
t.l2fold,
t.treatment_mean,
t.control_mean,
t.pvalue,
t.qvalue
FROM %(track)s_gene_diff as t
WHERE
t.significant
ORDER BY %(sort)s
LIMIT %(limit)i'''
return self.getAll(statement)
class TopUpRegulatedGenes(TopDifferentiallyExpressedGenes):
sort = 't.l2fold DESC'
class TopDownRegulatedGenes(TopDifferentiallyExpressedGenes):
sort = 't.l2fold Asc'
class AllDifferentiallyExpressedGenes(ProjectTracker):
'''output differentially expressed genes.'''
limit = 1000
pattern = '(.*)_gene_diff'
def __call__(self, track, slice=None):
statement = '''SELECT DISTINCT a.gene_name,
a.gene_id,
a.gene_biotype,
t.l2fold,
t.treatment_mean,
t.control_mean,
t.pvalue,
t.qvalue,
s.contig, s.start, s.end
FROM %(track)s_gene_diff as t,
annotations.transcript_info as a,
annotations.gene_stats as s
WHERE a.gene_id = t.test_id AND
s.gene_id = t.test_id AND
t.significant
ORDER BY t.l2fold DESC LIMIT %(limit)i'''
data = self.getAll(statement)
if data:
data['gene_id'] = [linkToEnsembl(x) for x in data["gene_id"]]
data["locus"] = [linkToUCSC(*x) for x in zip(
data["contig"],
data["start"],
data["end"])]
return data
statement = '''SELECT DISTINCT t.test_id,
t.l2fold,
t.treatment_mean,
t.control_mean,
t.pvalue,
t.qvalue
FROM %(track)s_gene_diff as t
WHERE
t.significant
ORDER BY t.l2fold DESC LIMIT %(limit)i'''
return self.getAll(statement)
| [
"RnaseqDiffExpressionReport.linkToUCSC",
"RnaseqDiffExpressionReport.linkToEnsembl"
] | [((915, 931), 'RnaseqDiffExpressionReport.linkToEnsembl', 'linkToEnsembl', (['x'], {}), '(x)\n', (928, 931), False, 'from RnaseqDiffExpressionReport import linkToEnsembl, linkToUCSC\n'), ((987, 1001), 'RnaseqDiffExpressionReport.linkToUCSC', 'linkToUCSC', (['*x'], {}), '(*x)\n', (997, 1001), False, 'from RnaseqDiffExpressionReport import linkToEnsembl, linkToUCSC\n'), ((2804, 2820), 'RnaseqDiffExpressionReport.linkToEnsembl', 'linkToEnsembl', (['x'], {}), '(x)\n', (2817, 2820), False, 'from RnaseqDiffExpressionReport import linkToEnsembl, linkToUCSC\n'), ((2876, 2890), 'RnaseqDiffExpressionReport.linkToUCSC', 'linkToUCSC', (['*x'], {}), '(*x)\n', (2886, 2890), False, 'from RnaseqDiffExpressionReport import linkToEnsembl, linkToUCSC\n')] |
"""Abstract Timeseries Factory Interface."""
from __future__ import absolute_import
from obspy.core import Stream
from .TimeseriesFactory import TimeseriesFactory
class PlotTimeseriesFactory(TimeseriesFactory):
"""TimeseriesFactory that generates a plot."""
def __init__(self, *args, **kwargs):
TimeseriesFactory.__init__(self, *args, **kwargs)
def get_timeseries(
self,
starttime,
endtime,
observatory=None,
channels=None,
type=None,
interval=None,
):
"""This factory does not support get_timeseries."""
raise NotImplementedError('"get_timeseries" not implemented')
def put_timeseries(
self,
timeseries,
starttime=None,
endtime=None,
channels=None,
type=None,
interval=None,
):
"""Store timeseries data.
Parameters
----------
timeseries : obspy.core.Stream
stream containing traces to store.
starttime : UTCDateTime
time of first sample in timeseries to store.
uses first sample if unspecified.
endtime : UTCDateTime
time of last sample in timeseries to store.
uses last sample if unspecified.
channels : array_like
list of channels to store, optional.
uses default if unspecified.
type : {'definitive', 'provisional', 'quasi-definitive', 'variation'}
data type, optional.
uses default if unspecified.
interval : {'day', 'hour', 'minute', 'month', 'second'}
data interval, optional.
uses default if unspecified.
Raises
------
TimeseriesFactoryException
if any errors occur.
"""
if starttime is not None or endtime is not None:
timeseries = timeseries.copy()
timeseries.trim(starttime=starttime, endtime=endtime)
if channels is not None:
filtered = Stream()
for channel in channels:
filtered += timeseries.select(channel=channel)
timeseries = filtered
timeseries.plot()
| [
"obspy.core.Stream"
] | [((2016, 2024), 'obspy.core.Stream', 'Stream', ([], {}), '()\n', (2022, 2024), False, 'from obspy.core import Stream\n')] |
from tools.profilers.net_flops import net_flops
import matplotlib.pyplot as plt
import tensorflow as tf
# define nodeType Leaf node, distinguish node, definition of arrow type
decisionNode = dict(boxstyle="sawtooth", fc="0.8")
leafNode = dict(boxstyle="round4", fc="0.8")
arrow_args = dict(arrowstyle="<-")
def get_layer_index(dic, layer_target):
for index, layer in dic.items(): # for name, age in dictionary.iteritems(): (for Python 2.x)
if layer == layer_target:
return index
def plotNode(nodeText, centerPt, parentPt, nodeType, ax):
ax.annotate(nodeText, xy=parentPt, xycoords='data', xytext=centerPt, textcoords='data',
va='center', ha='center', bbox=nodeType, arrowprops=arrow_args)
# This parameter is a bit scary. did not understand
def plot_sequence(layer, old_end_point, ax, pruned_ratio, layer_index_dic):
while len(layer.outbound_nodes) == 1 and len(layer.inbound_nodes[0].flat_input_ids) == 1:
layer_index = get_layer_index(layer_index_dic, layer)
if isinstance(layer, tf.keras.layers.Conv2D) and pruned_ratio is not None and\
layer_index in pruned_ratio.keys():
text = f'{layer.name} + prune ratio is {pruned_ratio[layer_index]}'
else:
text = f'{layer.name}'
layer = layer.outbound_nodes[0].layer
start_point = (old_end_point[0], old_end_point[1]-0.05)
end_point = (old_end_point[0], old_end_point[1]-0.2)
plotNode(text, end_point, start_point, leafNode, ax)
old_end_point = end_point
if len(layer.outbound_nodes) == 2:
if isinstance(layer, tf.keras.layers.Conv2D) and pruned_ratio is not None:
layer_index = get_layer_index(layer_index_dic, layer)
text = f'{layer.name} + prune ratio is {pruned_ratio[layer_index]}'
else:
text = f'{layer.name}'
start_point = (old_end_point[0], old_end_point[1]-0.05)
end_point = (old_end_point[0], old_end_point[1]-0.2)
plotNode(text, end_point, start_point, leafNode, ax)
old_end_point = end_point
return layer, old_end_point
def load_model_param(model):
layer_index_dic = {}
for index, layer in enumerate(model.layers):
layer_index_dic[index] = layer
return layer_index_dic
def visualize_model(model, foldername, pruned_ratio=None):
ysize = len(model.layers)*90/181
y_size2 = len(model.layers)*-35/181
layer_index_dic = load_model_param(model)
fig, ax = plt.subplots(figsize=(20, ysize))
ax.set(xlim=(-5, 5), ylim=(y_size2, 3))
distance = 3
old_end_point = (0.5, 3)
layer = model.layers[1]
while len(layer.outbound_nodes) != 0:
layer, old_end_point = plot_sequence(
layer, old_end_point, ax, pruned_ratio, layer_index_dic)
while len(layer.outbound_nodes) == 2:
left_start_point = (old_end_point[0]-distance, old_end_point[1])
right_start_point = (old_end_point[0]+distance, old_end_point[1])
_, old_end_point = plot_sequence(
layer.outbound_nodes[0].layer, left_start_point, ax, pruned_ratio, layer_index_dic)
layer, _ = plot_sequence(
layer.outbound_nodes[1].layer, right_start_point, ax, pruned_ratio, layer_index_dic)
old_end_point = (old_end_point[0] + distance, old_end_point[1])
text = f'{layer.name}'
start_point = (old_end_point[0], old_end_point[1]-0.05)
end_point = (old_end_point[0], old_end_point[1]-0.2)
plotNode(text, end_point, start_point, leafNode, ax)
old_end_point = end_point
if len(layer.outbound_nodes) != 0:
layer = layer.outbound_nodes[0].layer
plt.savefig(foldername+"/model_plot.png")
plt.clf()
def plot_model_decompose(model, foldername):
dot_img_file = foldername+"/decomposed_plot.png"
tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
def model_cmp_flops_plot(original_model, compressed_model, foldername):
result_original = net_flops(original_model)
result_compressed = net_flops(compressed_model)
parameter_list = [result_original, result_compressed]
names = ['Original Model', 'Compressed model']
r = [0, 1]
big_k = []
small_k = []
depth = []
rect = []
others = []
for l in parameter_list:
big_k.append(l[0])
small_k.append(l[1])
depth.append(l[2])
rect.append(l[3])
others.append(l[-1]-l[0]-l[2]-l[1]-l[3])
barWidth = 0.5
plt.bar(r, others, color='tab:purple', width=barWidth)
plt.bar(r, depth, bottom=others, color='tab:red', width=barWidth)
plt.bar(r, rect, bottom=[depth[i]+others[i] for i in range(len(depth))], color='tab:green', width=barWidth)
plt.bar(r, small_k, bottom=[depth[i]+others[i]+rect[i] for i in range(len(depth))], color='tab:orange', width=barWidth)
plt.bar(r, big_k, bottom=[depth[i]+others[i]+rect[i]+small_k[i] for i in range(len(depth))], color='tab:blue', width=barWidth)
plt.xticks(r, names, fontweight='bold')
plt.xlabel("group")
# Show graphic
colors = {
'2D Convolution with large kernel': 'tab:blue',
'2D Convolution with small kernel': 'tab:orange',
'Rectangular 2D Convolution': 'tab:green',
'Depthwise 2D Convolution': 'tab:red',
'Others': 'tab:purple'}
labels = list(colors.keys())
handles = [plt.Rectangle((0, 0), 1, 1, color=colors[label]) for label in labels]
plt.legend(handles, labels)
plt.ylabel("FLOPs")
plt.grid()
plt.savefig(foldername+"/FLOPs_comparison.png")
plt.clf()
# plt.show() | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"tensorflow.keras.utils.plot_model",
"matplotlib.pyplot.bar",
"tools.profilers.net_flops.net_flops",
"matplotlib.pyplot.Rectangle",
... | [((2502, 2535), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, ysize)'}), '(figsize=(20, ysize))\n', (2514, 2535), True, 'import matplotlib.pyplot as plt\n'), ((3745, 3788), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(foldername + '/model_plot.png')"], {}), "(foldername + '/model_plot.png')\n", (3756, 3788), True, 'import matplotlib.pyplot as plt\n'), ((3791, 3800), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3798, 3800), True, 'import matplotlib.pyplot as plt\n'), ((3905, 3977), 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['model'], {'to_file': 'dot_img_file', 'show_shapes': '(True)'}), '(model, to_file=dot_img_file, show_shapes=True)\n', (3930, 3977), True, 'import tensorflow as tf\n'), ((4073, 4098), 'tools.profilers.net_flops.net_flops', 'net_flops', (['original_model'], {}), '(original_model)\n', (4082, 4098), False, 'from tools.profilers.net_flops import net_flops\n'), ((4123, 4150), 'tools.profilers.net_flops.net_flops', 'net_flops', (['compressed_model'], {}), '(compressed_model)\n', (4132, 4150), False, 'from tools.profilers.net_flops import net_flops\n'), ((4563, 4617), 'matplotlib.pyplot.bar', 'plt.bar', (['r', 'others'], {'color': '"""tab:purple"""', 'width': 'barWidth'}), "(r, others, color='tab:purple', width=barWidth)\n", (4570, 4617), True, 'import matplotlib.pyplot as plt\n'), ((4623, 4688), 'matplotlib.pyplot.bar', 'plt.bar', (['r', 'depth'], {'bottom': 'others', 'color': '"""tab:red"""', 'width': 'barWidth'}), "(r, depth, bottom=others, color='tab:red', width=barWidth)\n", (4630, 4688), True, 'import matplotlib.pyplot as plt\n'), ((5065, 5104), 'matplotlib.pyplot.xticks', 'plt.xticks', (['r', 'names'], {'fontweight': '"""bold"""'}), "(r, names, fontweight='bold')\n", (5075, 5104), True, 'import matplotlib.pyplot as plt\n'), ((5109, 5128), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""group"""'], {}), "('group')\n", (5119, 5128), True, 'import matplotlib.pyplot as plt\n'), ((5530, 5557), 'matplotlib.pyplot.legend', 'plt.legend', (['handles', 'labels'], {}), '(handles, labels)\n', (5540, 5557), True, 'import matplotlib.pyplot as plt\n'), ((5562, 5581), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""FLOPs"""'], {}), "('FLOPs')\n", (5572, 5581), True, 'import matplotlib.pyplot as plt\n'), ((5586, 5596), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5594, 5596), True, 'import matplotlib.pyplot as plt\n'), ((5601, 5650), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(foldername + '/FLOPs_comparison.png')"], {}), "(foldername + '/FLOPs_comparison.png')\n", (5612, 5650), True, 'import matplotlib.pyplot as plt\n'), ((5653, 5662), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5660, 5662), True, 'import matplotlib.pyplot as plt\n'), ((5456, 5504), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(0, 0)', '(1)', '(1)'], {'color': 'colors[label]'}), '((0, 0), 1, 1, color=colors[label])\n', (5469, 5504), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.preprocessor."""
import numpy as np
import six
import tensorflow as tf
from object_detection.tensorflow_detect.core import standard_fields as fields, \
preprocessor, preprocessor_cache
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-import-not-at-top
class PreprocessorTest(tf.test.TestCase):
def createColorfulTestImage(self):
ch255 = tf.fill([1, 100, 200, 1], tf.constant(255, dtype=tf.uint8))
ch128 = tf.fill([1, 100, 200, 1], tf.constant(128, dtype=tf.uint8))
ch0 = tf.fill([1, 100, 200, 1], tf.constant(0, dtype=tf.uint8))
imr = tf.concat([ch255, ch0, ch0], 3)
img = tf.concat([ch255, ch255, ch0], 3)
imb = tf.concat([ch255, ch0, ch255], 3)
imw = tf.concat([ch128, ch128, ch128], 3)
imu = tf.concat([imr, img], 2)
imd = tf.concat([imb, imw], 2)
im = tf.concat([imu, imd], 1)
return im
def createTestImages(self):
images_r = tf.constant([[[128, 128, 128, 128], [0, 0, 128, 128],
[0, 128, 128, 128], [192, 192, 128, 128]]],
dtype=tf.uint8)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0, 0, 128, 128], [0, 0, 128, 128],
[0, 128, 192, 192], [192, 192, 128, 192]]],
dtype=tf.uint8)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[128, 128, 192, 0], [0, 0, 128, 192],
[0, 128, 128, 0], [192, 192, 192, 128]]],
dtype=tf.uint8)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def createEmptyTestBoxes(self):
boxes = tf.constant([[]], dtype=tf.float32)
return boxes
def createTestBoxes(self):
boxes = tf.constant(
[[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32)
return boxes
def createTestLabelScores(self):
return tf.constant([1.0, 0.5], dtype=tf.float32)
def createTestLabelScoresWithMissingScore(self):
return tf.constant([0.5, np.nan], dtype=tf.float32)
def createTestMasks(self):
mask = np.array([
[[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0]],
[[255.0, 255.0, 0.0],
[255.0, 255.0, 0.0],
[255.0, 255.0, 0.0]]])
return tf.constant(mask, dtype=tf.float32)
def createTestKeypoints(self):
keypoints = np.array([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]],
])
return tf.constant(keypoints, dtype=tf.float32)
def createTestKeypointsInsideCrop(self):
keypoints = np.array([
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]],
])
return tf.constant(keypoints, dtype=tf.float32)
def createTestKeypointsOutsideCrop(self):
keypoints = np.array([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
])
return tf.constant(keypoints, dtype=tf.float32)
def createKeypointFlipPermutation(self):
return np.array([0, 2, 1], dtype=np.int32)
def createTestLabels(self):
labels = tf.constant([1, 2], dtype=tf.int32)
return labels
def createTestBoxesOutOfImage(self):
boxes = tf.constant(
[[-0.1, 0.25, 0.75, 1], [0.25, 0.5, 0.75, 1.1]], dtype=tf.float32)
return boxes
def createTestMultiClassScores(self):
return tf.constant([[1.0, 0.0], [0.5, 0.5]], dtype=tf.float32)
def expectedImagesAfterNormalization(self):
images_r = tf.constant([[[0, 0, 0, 0], [-1, -1, 0, 0],
[-1, 0, 0, 0], [0.5, 0.5, 0, 0]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[-1, -1, 0, 0], [-1, -1, 0, 0],
[-1, 0, 0.5, 0.5], [0.5, 0.5, 0, 0.5]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[0, 0, 0.5, -1], [-1, -1, 0, 0.5],
[-1, 0, 0, -1], [0.5, 0.5, 0.5, 0]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedMaxImageAfterColorScale(self):
images_r = tf.constant([[[0.1, 0.1, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1],
[-0.9, 0.1, 0.1, 0.1], [0.6, 0.6, 0.1, 0.1]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[-0.9, -0.9, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1],
[-0.9, 0.1, 0.6, 0.6], [0.6, 0.6, 0.1, 0.6]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[0.1, 0.1, 0.6, -0.9], [-0.9, -0.9, 0.1, 0.6],
[-0.9, 0.1, 0.1, -0.9], [0.6, 0.6, 0.6, 0.1]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedMinImageAfterColorScale(self):
images_r = tf.constant([[[-0.1, -0.1, -0.1, -0.1], [-1, -1, -0.1, -0.1],
[-1, -0.1, -0.1, -0.1], [0.4, 0.4, -0.1, -0.1]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[-1, -1, -0.1, -0.1], [-1, -1, -0.1, -0.1],
[-1, -0.1, 0.4, 0.4], [0.4, 0.4, -0.1, 0.4]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[-0.1, -0.1, 0.4, -1], [-1, -1, -0.1, 0.4],
[-1, -0.1, -0.1, -1], [0.4, 0.4, 0.4, -0.1]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedImagesAfterLeftRightFlip(self):
images_r = tf.constant([[[0, 0, 0, 0], [0, 0, -1, -1],
[0, 0, 0, -1], [0, 0, 0.5, 0.5]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0, 0, -1, -1], [0, 0, -1, -1],
[0.5, 0.5, 0, -1], [0.5, 0, 0.5, 0.5]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[-1, 0.5, 0, 0], [0.5, 0, -1, -1],
[-1, 0, 0, -1], [0, 0.5, 0.5, 0.5]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedImagesAfterUpDownFlip(self):
images_r = tf.constant([[[0.5, 0.5, 0, 0], [-1, 0, 0, 0],
[-1, -1, 0, 0], [0, 0, 0, 0]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0.5, 0.5, 0, 0.5], [-1, 0, 0.5, 0.5],
[-1, -1, 0, 0], [-1, -1, 0, 0]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[0.5, 0.5, 0.5, 0], [-1, 0, 0, -1],
[-1, -1, 0, 0.5], [0, 0, 0.5, -1]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedImagesAfterRot90(self):
images_r = tf.constant([[[0, 0, 0, 0], [0, 0, 0, 0],
[0, -1, 0, 0.5], [0, -1, -1, 0.5]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0, 0, 0.5, 0.5], [0, 0, 0.5, 0],
[-1, -1, 0, 0.5], [-1, -1, -1, 0.5]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[-1, 0.5, -1, 0], [0.5, 0, 0, 0.5],
[0, -1, 0, 0.5], [0, -1, -1, 0.5]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedBoxesAfterLeftRightFlip(self):
boxes = tf.constant([[0.0, 0.0, 0.75, 0.75], [0.25, 0.0, 0.75, 0.5]],
dtype=tf.float32)
return boxes
def expectedBoxesAfterUpDownFlip(self):
boxes = tf.constant([[0.25, 0.25, 1.0, 1.0], [0.25, 0.5, 0.75, 1.0]],
dtype=tf.float32)
return boxes
def expectedBoxesAfterRot90(self):
boxes = tf.constant(
[[0.0, 0.0, 0.75, 0.75], [0.0, 0.25, 0.5, 0.75]], dtype=tf.float32)
return boxes
def expectedMasksAfterLeftRightFlip(self):
mask = np.array([
[[0.0, 0.0, 255.0],
[0.0, 0.0, 255.0],
[0.0, 0.0, 255.0]],
[[0.0, 255.0, 255.0],
[0.0, 255.0, 255.0],
[0.0, 255.0, 255.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedMasksAfterUpDownFlip(self):
mask = np.array([
[[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0]],
[[255.0, 255.0, 0.0],
[255.0, 255.0, 0.0],
[255.0, 255.0, 0.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedMasksAfterRot90(self):
mask = np.array([
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[255.0, 255.0, 255.0]],
[[0.0, 0.0, 0.0],
[255.0, 255.0, 255.0],
[255.0, 255.0, 255.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedLabelScoresAfterThresholding(self):
return tf.constant([1.0], dtype=tf.float32)
def expectedBoxesAfterThresholding(self):
return tf.constant([[0.0, 0.25, 0.75, 1.0]], dtype=tf.float32)
def expectedLabelsAfterThresholding(self):
return tf.constant([1], dtype=tf.float32)
def expectedMultiClassScoresAfterThresholding(self):
return tf.constant([[1.0, 0.0]], dtype=tf.float32)
def expectedMasksAfterThresholding(self):
mask = np.array([
[[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedKeypointsAfterThresholding(self):
keypoints = np.array([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]
])
return tf.constant(keypoints, dtype=tf.float32)
def expectedLabelScoresAfterThresholdingWithMissingScore(self):
return tf.constant([np.nan], dtype=tf.float32)
def expectedBoxesAfterThresholdingWithMissingScore(self):
return tf.constant([[0.25, 0.5, 0.75, 1]], dtype=tf.float32)
def expectedLabelsAfterThresholdingWithMissingScore(self):
return tf.constant([2], dtype=tf.float32)
def testRgbToGrayscale(self):
images = self.createTestImages()
grayscale_images = preprocessor._rgb_to_grayscale(images)
expected_images = tf.image.rgb_to_grayscale(images)
with self.test_session() as sess:
(grayscale_images, expected_images) = sess.run(
[grayscale_images, expected_images])
self.assertAllEqual(expected_images, grayscale_images)
def testNormalizeImage(self):
preprocess_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 256,
'target_minval': -1,
'target_maxval': 1
})]
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
images_expected = self.expectedImagesAfterNormalization()
with self.test_session() as sess:
(images_, images_expected_) = sess.run(
[images, images_expected])
images_shape_ = images_.shape
images_expected_shape_ = images_expected_.shape
expected_shape = [1, 4, 4, 3]
self.assertAllEqual(images_expected_shape_, images_shape_)
self.assertAllEqual(images_shape_, expected_shape)
self.assertAllClose(images_, images_expected_)
def testRetainBoxesAboveThreshold(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
(retained_boxes, retained_labels,
retained_label_scores) = preprocessor.retain_boxes_above_threshold(
boxes, labels, label_scores, threshold=0.6)
with self.test_session() as sess:
(retained_boxes_, retained_labels_, retained_label_scores_,
expected_retained_boxes_, expected_retained_labels_,
expected_retained_label_scores_) = sess.run([
retained_boxes, retained_labels, retained_label_scores,
self.expectedBoxesAfterThresholding(),
self.expectedLabelsAfterThresholding(),
self.expectedLabelScoresAfterThresholding()])
self.assertAllClose(
retained_boxes_, expected_retained_boxes_)
self.assertAllClose(
retained_labels_, expected_retained_labels_)
self.assertAllClose(
retained_label_scores_, expected_retained_label_scores_)
def testRetainBoxesAboveThresholdWithMultiClassScores(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
multiclass_scores = self.createTestMultiClassScores()
(_, _, _,
retained_multiclass_scores) = preprocessor.retain_boxes_above_threshold(
boxes,
labels,
label_scores,
multiclass_scores=multiclass_scores,
threshold=0.6)
with self.test_session() as sess:
(retained_multiclass_scores_,
expected_retained_multiclass_scores_) = sess.run([
retained_multiclass_scores,
self.expectedMultiClassScoresAfterThresholding()
])
self.assertAllClose(retained_multiclass_scores_,
expected_retained_multiclass_scores_)
def testRetainBoxesAboveThresholdWithMasks(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
masks = self.createTestMasks()
_, _, _, retained_masks = preprocessor.retain_boxes_above_threshold(
boxes, labels, label_scores, masks, threshold=0.6)
with self.test_session() as sess:
retained_masks_, expected_retained_masks_ = sess.run([
retained_masks, self.expectedMasksAfterThresholding()])
self.assertAllClose(
retained_masks_, expected_retained_masks_)
def testRetainBoxesAboveThresholdWithKeypoints(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
keypoints = self.createTestKeypoints()
(_, _, _, retained_keypoints) = preprocessor.retain_boxes_above_threshold(
boxes, labels, label_scores, keypoints=keypoints, threshold=0.6)
with self.test_session() as sess:
(retained_keypoints_,
expected_retained_keypoints_) = sess.run([
retained_keypoints,
self.expectedKeypointsAfterThresholding()])
self.assertAllClose(
retained_keypoints_, expected_retained_keypoints_)
def testRetainBoxesAboveThresholdWithMissingScore(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScoresWithMissingScore()
(retained_boxes, retained_labels,
retained_label_scores) = preprocessor.retain_boxes_above_threshold(
boxes, labels, label_scores, threshold=0.6)
with self.test_session() as sess:
(retained_boxes_, retained_labels_, retained_label_scores_,
expected_retained_boxes_, expected_retained_labels_,
expected_retained_label_scores_) = sess.run([
retained_boxes, retained_labels, retained_label_scores,
self.expectedBoxesAfterThresholdingWithMissingScore(),
self.expectedLabelsAfterThresholdingWithMissingScore(),
self.expectedLabelScoresAfterThresholdingWithMissingScore()])
self.assertAllClose(
retained_boxes_, expected_retained_boxes_)
self.assertAllClose(
retained_labels_, expected_retained_labels_)
self.assertAllClose(
retained_label_scores_, expected_retained_label_scores_)
def testFlipBoxesLeftRight(self):
boxes = self.createTestBoxes()
flipped_boxes = preprocessor._flip_boxes_left_right(boxes)
expected_boxes = self.expectedBoxesAfterLeftRightFlip()
with self.test_session() as sess:
flipped_boxes, expected_boxes = sess.run([flipped_boxes, expected_boxes])
self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten())
def testFlipBoxesUpDown(self):
boxes = self.createTestBoxes()
flipped_boxes = preprocessor._flip_boxes_up_down(boxes)
expected_boxes = self.expectedBoxesAfterUpDownFlip()
with self.test_session() as sess:
flipped_boxes, expected_boxes = sess.run([flipped_boxes, expected_boxes])
self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten())
def testRot90Boxes(self):
boxes = self.createTestBoxes()
rotated_boxes = preprocessor._rot90_boxes(boxes)
expected_boxes = self.expectedBoxesAfterRot90()
with self.test_session() as sess:
rotated_boxes, expected_boxes = sess.run([rotated_boxes, expected_boxes])
self.assertAllEqual(rotated_boxes.flatten(), expected_boxes.flatten())
def testFlipMasksLeftRight(self):
test_mask = self.createTestMasks()
flipped_mask = preprocessor._flip_masks_left_right(test_mask)
expected_mask = self.expectedMasksAfterLeftRightFlip()
with self.test_session() as sess:
flipped_mask, expected_mask = sess.run([flipped_mask, expected_mask])
self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten())
def testFlipMasksUpDown(self):
test_mask = self.createTestMasks()
flipped_mask = preprocessor._flip_masks_up_down(test_mask)
expected_mask = self.expectedMasksAfterUpDownFlip()
with self.test_session() as sess:
flipped_mask, expected_mask = sess.run([flipped_mask, expected_mask])
self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten())
def testRot90Masks(self):
test_mask = self.createTestMasks()
rotated_mask = preprocessor._rot90_masks(test_mask)
expected_mask = self.expectedMasksAfterRot90()
with self.test_session() as sess:
rotated_mask, expected_mask = sess.run([rotated_mask, expected_mask])
self.assertAllEqual(rotated_mask.flatten(), expected_mask.flatten())
def _testPreprocessorCache(self,
preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False,
num_runs=4):
cache = preprocessor_cache.PreprocessorCache()
images = self.createTestImages()
boxes = self.createTestBoxes()
classes = self.createTestLabels()
masks = self.createTestMasks()
keypoints = self.createTestKeypoints()
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=test_masks, include_keypoints=test_keypoints)
out = []
for i in range(num_runs):
tensor_dict = {
fields.InputDataFields.image: images,
}
num_outputs = 1
if test_boxes:
tensor_dict[fields.InputDataFields.groundtruth_boxes] = boxes
tensor_dict[fields.InputDataFields.groundtruth_classes] = classes
num_outputs += 1
if test_masks:
tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks
num_outputs += 1
if test_keypoints:
tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints
num_outputs += 1
out.append(preprocessor.preprocess(
tensor_dict, preprocess_options, preprocessor_arg_map, cache))
with self.test_session() as sess:
to_run = []
for i in range(num_runs):
to_run.append(out[i][fields.InputDataFields.image])
if test_boxes:
to_run.append(out[i][fields.InputDataFields.groundtruth_boxes])
if test_masks:
to_run.append(
out[i][fields.InputDataFields.groundtruth_instance_masks])
if test_keypoints:
to_run.append(out[i][fields.InputDataFields.groundtruth_keypoints])
out_array = sess.run(to_run)
for i in range(num_outputs, len(out_array)):
self.assertAllClose(out_array[i], out_array[i - num_outputs])
def testRandomHorizontalFlip(self):
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterLeftRightFlip()
boxes_expected1 = self.expectedBoxesAfterLeftRightFlip()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_diff_,
boxes_diff_expected_) = sess.run([images_diff, images_diff_expected,
boxes_diff, boxes_diff_expected])
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomHorizontalFlipWithEmptyBoxes(self):
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterLeftRightFlip()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_,
boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes,
boxes_expected])
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomHorizontalFlipWithCache(self):
keypoint_flip_permutation = self.createKeypointFlipPermutation()
preprocess_options = [
(preprocessor.random_horizontal_flip,
{'keypoint_flip_permutation': keypoint_flip_permutation})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRunRandomHorizontalFlipWithMaskAndKeypoints(self):
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
image_height = 3
image_width = 3
images = tf.random_uniform([1, image_height, image_width, 3])
boxes = self.createTestBoxes()
masks = self.createTestMasks()
keypoints = self.createTestKeypoints()
keypoint_flip_permutation = self.createKeypointFlipPermutation()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_instance_masks: masks,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocess_options = [
(preprocessor.random_horizontal_flip,
{'keypoint_flip_permutation': keypoint_flip_permutation})]
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True, include_keypoints=True)
tensor_dict = preprocessor.preprocess(
tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map)
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
boxes, masks, keypoints = sess.run([boxes, masks, keypoints])
self.assertTrue(boxes is not None)
self.assertTrue(masks is not None)
self.assertTrue(keypoints is not None)
def testRandomVerticalFlip(self):
preprocess_options = [(preprocessor.random_vertical_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterUpDownFlip()
boxes_expected1 = self.expectedBoxesAfterUpDownFlip()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_diff_,
boxes_diff_expected_) = sess.run([images_diff, images_diff_expected,
boxes_diff, boxes_diff_expected])
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomVerticalFlipWithEmptyBoxes(self):
preprocess_options = [(preprocessor.random_vertical_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterUpDownFlip()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_,
boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes,
boxes_expected])
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomVerticalFlipWithCache(self):
keypoint_flip_permutation = self.createKeypointFlipPermutation()
preprocess_options = [
(preprocessor.random_vertical_flip,
{'keypoint_flip_permutation': keypoint_flip_permutation})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRunRandomVerticalFlipWithMaskAndKeypoints(self):
preprocess_options = [(preprocessor.random_vertical_flip, {})]
image_height = 3
image_width = 3
images = tf.random_uniform([1, image_height, image_width, 3])
boxes = self.createTestBoxes()
masks = self.createTestMasks()
keypoints = self.createTestKeypoints()
keypoint_flip_permutation = self.createKeypointFlipPermutation()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_instance_masks: masks,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocess_options = [
(preprocessor.random_vertical_flip,
{'keypoint_flip_permutation': keypoint_flip_permutation})]
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True, include_keypoints=True)
tensor_dict = preprocessor.preprocess(
tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map)
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
boxes, masks, keypoints = sess.run([boxes, masks, keypoints])
self.assertTrue(boxes is not None)
self.assertTrue(masks is not None)
self.assertTrue(keypoints is not None)
def testRandomRotation90(self):
preprocess_options = [(preprocessor.random_rotation90, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterRot90()
boxes_expected1 = self.expectedBoxesAfterRot90()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_diff_,
boxes_diff_expected_) = sess.run([images_diff, images_diff_expected,
boxes_diff, boxes_diff_expected])
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomRotation90WithEmptyBoxes(self):
preprocess_options = [(preprocessor.random_rotation90, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterRot90()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_,
boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes,
boxes_expected])
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomRotation90WithCache(self):
preprocess_options = [(preprocessor.random_rotation90, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRunRandomRotation90WithMaskAndKeypoints(self):
preprocess_options = [(preprocessor.random_rotation90, {})]
image_height = 3
image_width = 3
images = tf.random_uniform([1, image_height, image_width, 3])
boxes = self.createTestBoxes()
masks = self.createTestMasks()
keypoints = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_instance_masks: masks,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True, include_keypoints=True)
tensor_dict = preprocessor.preprocess(
tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map)
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
boxes, masks, keypoints = sess.run([boxes, masks, keypoints])
self.assertTrue(boxes is not None)
self.assertTrue(masks is not None)
self.assertTrue(keypoints is not None)
def testRandomPixelValueScale(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_pixel_value_scale, {}))
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_min = tf.to_float(images) * 0.9 / 255.0
images_max = tf.to_float(images) * 1.1 / 255.0
images = tensor_dict[fields.InputDataFields.image]
values_greater = tf.greater_equal(images, images_min)
values_less = tf.less_equal(images, images_max)
values_true = tf.fill([1, 4, 4, 3], True)
with self.test_session() as sess:
(values_greater_, values_less_, values_true_) = sess.run(
[values_greater, values_less, values_true])
self.assertAllClose(values_greater_, values_true_)
self.assertAllClose(values_less_, values_true_)
def testRandomPixelValueScaleWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_pixel_value_scale, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def testRandomImageScale(self):
preprocess_options = [(preprocessor.random_image_scale, {})]
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images_scaled = tensor_dict[fields.InputDataFields.image]
images_original_shape = tf.shape(images_original)
images_scaled_shape = tf.shape(images_scaled)
with self.test_session() as sess:
(images_original_shape_, images_scaled_shape_) = sess.run(
[images_original_shape, images_scaled_shape])
self.assertTrue(
images_original_shape_[1] * 0.5 <= images_scaled_shape_[1])
self.assertTrue(
images_original_shape_[1] * 2.0 >= images_scaled_shape_[1])
self.assertTrue(
images_original_shape_[2] * 0.5 <= images_scaled_shape_[2])
self.assertTrue(
images_original_shape_[2] * 2.0 >= images_scaled_shape_[2])
def testRandomImageScaleWithCache(self):
preprocess_options = [(preprocessor.random_image_scale, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomRGBtoGray(self):
preprocess_options = [(preprocessor.random_rgb_to_gray, {})]
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images_gray = tensor_dict[fields.InputDataFields.image]
images_gray_r, images_gray_g, images_gray_b = tf.split(
value=images_gray, num_or_size_splits=3, axis=3)
images_r, images_g, images_b = tf.split(
value=images_original, num_or_size_splits=3, axis=3)
images_r_diff1 = tf.squared_difference(tf.to_float(images_r),
tf.to_float(images_gray_r))
images_r_diff2 = tf.squared_difference(tf.to_float(images_gray_r),
tf.to_float(images_gray_g))
images_r_diff = tf.multiply(images_r_diff1, images_r_diff2)
images_g_diff1 = tf.squared_difference(tf.to_float(images_g),
tf.to_float(images_gray_g))
images_g_diff2 = tf.squared_difference(tf.to_float(images_gray_g),
tf.to_float(images_gray_b))
images_g_diff = tf.multiply(images_g_diff1, images_g_diff2)
images_b_diff1 = tf.squared_difference(tf.to_float(images_b),
tf.to_float(images_gray_b))
images_b_diff2 = tf.squared_difference(tf.to_float(images_gray_b),
tf.to_float(images_gray_r))
images_b_diff = tf.multiply(images_b_diff1, images_b_diff2)
image_zero1 = tf.constant(0, dtype=tf.float32, shape=[1, 4, 4, 1])
with self.test_session() as sess:
(images_r_diff_, images_g_diff_, images_b_diff_, image_zero1_) = sess.run(
[images_r_diff, images_g_diff, images_b_diff, image_zero1])
self.assertAllClose(images_r_diff_, image_zero1_)
self.assertAllClose(images_g_diff_, image_zero1_)
self.assertAllClose(images_b_diff_, image_zero1_)
def testRandomRGBtoGrayWithCache(self):
preprocess_options = [(
preprocessor.random_rgb_to_gray, {'probability': 0.5})]
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomAdjustBrightness(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_adjust_brightness, {}))
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_bright = tensor_dict[fields.InputDataFields.image]
image_original_shape = tf.shape(images_original)
image_bright_shape = tf.shape(images_bright)
with self.test_session() as sess:
(image_original_shape_, image_bright_shape_) = sess.run(
[image_original_shape, image_bright_shape])
self.assertAllEqual(image_original_shape_, image_bright_shape_)
def testRandomAdjustBrightnessWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_adjust_brightness, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomAdjustContrast(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_adjust_contrast, {}))
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_contrast = tensor_dict[fields.InputDataFields.image]
image_original_shape = tf.shape(images_original)
image_contrast_shape = tf.shape(images_contrast)
with self.test_session() as sess:
(image_original_shape_, image_contrast_shape_) = sess.run(
[image_original_shape, image_contrast_shape])
self.assertAllEqual(image_original_shape_, image_contrast_shape_)
def testRandomAdjustContrastWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_adjust_contrast, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomAdjustHue(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_adjust_hue, {}))
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_hue = tensor_dict[fields.InputDataFields.image]
image_original_shape = tf.shape(images_original)
image_hue_shape = tf.shape(images_hue)
with self.test_session() as sess:
(image_original_shape_, image_hue_shape_) = sess.run(
[image_original_shape, image_hue_shape])
self.assertAllEqual(image_original_shape_, image_hue_shape_)
def testRandomAdjustHueWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_adjust_hue, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomDistortColor(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_distort_color, {}))
images_original = self.createTestImages()
images_original_shape = tf.shape(images_original)
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_distorted_color = tensor_dict[fields.InputDataFields.image]
images_distorted_color_shape = tf.shape(images_distorted_color)
with self.test_session() as sess:
(images_original_shape_, images_distorted_color_shape_) = sess.run(
[images_original_shape, images_distorted_color_shape])
self.assertAllEqual(images_original_shape_, images_distorted_color_shape_)
def testRandomDistortColorWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_distort_color, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomJitterBoxes(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.random_jitter_boxes, {}))
boxes = self.createTestBoxes()
boxes_shape = tf.shape(boxes)
tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
distorted_boxes_shape = tf.shape(distorted_boxes)
with self.test_session() as sess:
(boxes_shape_, distorted_boxes_shape_) = sess.run(
[boxes_shape, distorted_boxes_shape])
self.assertAllEqual(boxes_shape_, distorted_boxes_shape_)
def testRandomCropImage(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_crop_image, {}))
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
self.assertEqual(3, distorted_images.get_shape()[3])
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = sess.run([
boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank
])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testRandomCropImageWithCache(self):
preprocess_options = [(preprocessor.random_rgb_to_gray,
{'probability': 0.5}),
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1,
}),
(preprocessor.random_crop_image, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def testRandomCropImageGrayscale(self):
preprocessing_options = [(preprocessor.rgb_to_gray, {}),
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1,
}),
(preprocessor.random_crop_image, {})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
self.assertEqual(1, distorted_images.get_shape()[3])
with self.test_session() as sess:
session_results = sess.run([
boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank
])
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = session_results
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testRandomCropImageWithBoxOutOfImage(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_crop_image, {}))
images = self.createTestImages()
boxes = self.createTestBoxesOutOfImage()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = sess.run(
[boxes_rank, distorted_boxes_rank, images_rank,
distorted_images_rank])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testRandomCropImageWithRandomCoefOne(self):
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_label_scores: label_scores
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_image, {
'random_coef': 1.0
})]
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_label_scores = distorted_tensor_dict[
fields.InputDataFields.groundtruth_label_scores]
boxes_shape = tf.shape(boxes)
distorted_boxes_shape = tf.shape(distorted_boxes)
images_shape = tf.shape(images)
distorted_images_shape = tf.shape(distorted_images)
with self.test_session() as sess:
(boxes_shape_, distorted_boxes_shape_, images_shape_,
distorted_images_shape_, images_, distorted_images_,
boxes_, distorted_boxes_, labels_, distorted_labels_,
label_scores_, distorted_label_scores_) = sess.run(
[boxes_shape, distorted_boxes_shape, images_shape,
distorted_images_shape, images, distorted_images,
boxes, distorted_boxes, labels, distorted_labels,
label_scores, distorted_label_scores])
self.assertAllEqual(boxes_shape_, distorted_boxes_shape_)
self.assertAllEqual(images_shape_, distorted_images_shape_)
self.assertAllClose(images_, distorted_images_)
self.assertAllClose(boxes_, distorted_boxes_)
self.assertAllEqual(labels_, distorted_labels_)
self.assertAllEqual(label_scores_, distorted_label_scores_)
def testRandomCropWithMockSampleDistortedBoundingBox(self):
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createColorfulTestImage()
boxes = tf.constant([[0.1, 0.1, 0.8, 0.3],
[0.2, 0.4, 0.75, 0.75],
[0.3, 0.1, 0.4, 0.7]], dtype=tf.float32)
labels = tf.constant([1, 7, 11], dtype=tf.int32)
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box') as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (tf.constant(
[6, 143, 0], dtype=tf.int32), tf.constant(
[190, 237, -1], dtype=tf.int32), tf.constant(
[[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
expected_boxes = tf.constant([[0.178947, 0.07173, 0.75789469, 0.66244733],
[0.28421, 0.0, 0.38947365, 0.57805908]],
dtype=tf.float32)
expected_labels = tf.constant([7, 11], dtype=tf.int32)
with self.test_session() as sess:
(distorted_boxes_, distorted_labels_,
expected_boxes_, expected_labels_) = sess.run(
[distorted_boxes, distorted_labels,
expected_boxes, expected_labels])
self.assertAllClose(distorted_boxes_, expected_boxes_)
self.assertAllEqual(distorted_labels_, expected_labels_)
def testRandomCropImageWithMultiClassScores(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_crop_image, {}))
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
multiclass_scores = self.createTestMultiClassScores()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.multiclass_scores: multiclass_scores
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_multiclass_scores = distorted_tensor_dict[
fields.InputDataFields.multiclass_scores]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
multiclass_scores_rank = tf.rank(multiclass_scores)
distorted_multiclass_scores_rank = tf.rank(distorted_multiclass_scores)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_, multiclass_scores_rank_,
distorted_multiclass_scores_rank_,
distorted_multiclass_scores_) = sess.run([
boxes_rank, distorted_boxes, distorted_boxes_rank, images_rank,
distorted_images_rank, multiclass_scores_rank,
distorted_multiclass_scores_rank, distorted_multiclass_scores
])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
self.assertAllEqual(multiclass_scores_rank_,
distorted_multiclass_scores_rank_)
self.assertAllEqual(distorted_boxes_.shape[0],
distorted_multiclass_scores_.shape[0])
def testStrictRandomCropImageWithLabelScores(self):
image = self.createColorfulTestImage()[0]
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
new_image, new_boxes, new_labels, new_label_scores = (
preprocessor._strict_random_crop_image(
image, boxes, labels, label_scores))
with self.test_session() as sess:
new_image, new_boxes, new_labels, new_label_scores = (
sess.run(
[new_image, new_boxes, new_labels, new_label_scores])
)
expected_boxes = np.array(
[[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32)
self.assertAllEqual(new_image.shape, [190, 237, 3])
self.assertAllEqual(new_label_scores, [1.0, 0.5])
self.assertAllClose(
new_boxes.flatten(), expected_boxes.flatten())
def testStrictRandomCropImageWithMasks(self):
image = self.createColorfulTestImage()[0]
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
new_image, new_boxes, new_labels, new_masks = (
preprocessor._strict_random_crop_image(
image, boxes, labels, masks=masks))
with self.test_session() as sess:
new_image, new_boxes, new_labels, new_masks = sess.run(
[new_image, new_boxes, new_labels, new_masks])
expected_boxes = np.array(
[[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32)
self.assertAllEqual(new_image.shape, [190, 237, 3])
self.assertAllEqual(new_masks.shape, [2, 190, 237])
self.assertAllClose(
new_boxes.flatten(), expected_boxes.flatten())
def testStrictRandomCropImageWithKeypoints(self):
image = self.createColorfulTestImage()[0]
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypoints()
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
new_image, new_boxes, new_labels, new_keypoints = (
preprocessor._strict_random_crop_image(
image, boxes, labels, keypoints=keypoints))
with self.test_session() as sess:
new_image, new_boxes, new_labels, new_keypoints = sess.run(
[new_image, new_boxes, new_labels, new_keypoints])
expected_boxes = np.array([
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],], dtype=np.float32)
expected_keypoints = np.array([
[[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, np.nan]],
[[0.38947368, 0.07173],
[0.49473682, 0.24050637],
[0.60000002, 0.40928277]]
], dtype=np.float32)
self.assertAllEqual(new_image.shape, [190, 237, 3])
self.assertAllClose(
new_boxes.flatten(), expected_boxes.flatten())
self.assertAllClose(
new_keypoints.flatten(), expected_keypoints.flatten())
def testRunRandomCropImageWithMasks(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_instance_masks: masks,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True)
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_masks = distorted_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_masks_) = sess.run(
[distorted_image, distorted_boxes, distorted_labels,
distorted_masks])
expected_boxes = np.array([
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3])
self.assertAllEqual(distorted_masks_.shape, [2, 190, 237])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten())
def testRunRandomCropImageWithKeypointsInsideCrop(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypointsInsideCrop()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_keypoints_) = sess.run(
[distorted_image, distorted_boxes, distorted_labels,
distorted_keypoints])
expected_boxes = np.array([
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
], dtype=np.float32)
expected_keypoints = np.array([
[[0.38947368, 0.07173],
[0.49473682, 0.24050637],
[0.60000002, 0.40928277]],
[[0.38947368, 0.07173],
[0.49473682, 0.24050637],
[0.60000002, 0.40928277]]
])
self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten())
self.assertAllClose(
distorted_keypoints_.flatten(), expected_keypoints.flatten())
def testRunRandomCropImageWithKeypointsOutsideCrop(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypointsOutsideCrop()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_keypoints_) = sess.run(
[distorted_image, distorted_boxes, distorted_labels,
distorted_keypoints])
expected_boxes = np.array([
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
], dtype=np.float32)
expected_keypoints = np.array([
[[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, np.nan]],
[[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, np.nan]],
])
self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten())
self.assertAllClose(
distorted_keypoints_.flatten(), expected_keypoints.flatten())
def testRunRetainBoxesAboveThreshold(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
tensor_dict = {
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_label_scores: label_scores
}
preprocessing_options = [
(preprocessor.retain_boxes_above_threshold, {'threshold': 0.6})
]
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_scores=True)
retained_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
retained_boxes = retained_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
retained_labels = retained_tensor_dict[
fields.InputDataFields.groundtruth_classes]
retained_label_scores = retained_tensor_dict[
fields.InputDataFields.groundtruth_label_scores]
with self.test_session() as sess:
(retained_boxes_, retained_labels_,
retained_label_scores_, expected_retained_boxes_,
expected_retained_labels_, expected_retained_label_scores_) = sess.run(
[retained_boxes, retained_labels, retained_label_scores,
self.expectedBoxesAfterThresholding(),
self.expectedLabelsAfterThresholding(),
self.expectedLabelScoresAfterThresholding()])
self.assertAllClose(retained_boxes_, expected_retained_boxes_)
self.assertAllClose(retained_labels_, expected_retained_labels_)
self.assertAllClose(
retained_label_scores_, expected_retained_label_scores_)
def testRunRetainBoxesAboveThresholdWithMasks(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
masks = self.createTestMasks()
tensor_dict = {
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_label_scores: label_scores,
fields.InputDataFields.groundtruth_instance_masks: masks
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_scores=True,
include_instance_masks=True)
preprocessing_options = [
(preprocessor.retain_boxes_above_threshold, {'threshold': 0.6})
]
retained_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
retained_masks = retained_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
with self.test_session() as sess:
(retained_masks_, expected_masks_) = sess.run(
[retained_masks,
self.expectedMasksAfterThresholding()])
self.assertAllClose(retained_masks_, expected_masks_)
def testRunRetainBoxesAboveThresholdWithKeypoints(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
keypoints = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_label_scores: label_scores,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_scores=True,
include_keypoints=True)
preprocessing_options = [
(preprocessor.retain_boxes_above_threshold, {'threshold': 0.6})
]
retained_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
retained_keypoints = retained_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
(retained_keypoints_, expected_keypoints_) = sess.run(
[retained_keypoints,
self.expectedKeypointsAfterThresholding()])
self.assertAllClose(retained_keypoints_, expected_keypoints_)
def testRandomCropToAspectRatioWithCache(self):
preprocess_options = [(preprocessor.random_crop_to_aspect_ratio, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def testRunRandomCropToAspectRatioWithMasks(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_instance_masks: masks
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True)
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})]
with mock.patch.object(preprocessor,
'_random_integer') as mock_random_integer:
mock_random_integer.return_value = tf.constant(0, dtype=tf.int32)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_masks = distorted_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_masks_) = sess.run([
distorted_image, distorted_boxes, distorted_labels, distorted_masks
])
expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3])
self.assertAllEqual(distorted_labels_, [1])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
self.assertAllEqual(distorted_masks_.shape, [1, 200, 200])
def testRunRandomCropToAspectRatioWithKeypoints(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})]
with mock.patch.object(preprocessor,
'_random_integer') as mock_random_integer:
mock_random_integer.return_value = tf.constant(0, dtype=tf.int32)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_keypoints_) = sess.run([
distorted_image, distorted_boxes, distorted_labels,
distorted_keypoints
])
expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32)
expected_keypoints = np.array(
[[0.1, 0.2], [0.2, 0.4], [0.3, 0.6]], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3])
self.assertAllEqual(distorted_labels_, [1])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
self.assertAllClose(distorted_keypoints_.flatten(),
expected_keypoints.flatten())
def testRandomPadToAspectRatioWithCache(self):
preprocess_options = [(preprocessor.random_pad_to_aspect_ratio, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRunRandomPadToAspectRatioWithMinMaxPaddedSizeRatios(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map()
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio,
{'min_padded_size_ratio': (4.0, 4.0),
'max_padded_size_ratio': (4.0, 4.0)})]
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
with self.test_session() as sess:
distorted_image_, distorted_boxes_, distorted_labels_ = sess.run([
distorted_image, distorted_boxes, distorted_labels])
expected_boxes = np.array(
[[0.0, 0.125, 0.1875, 0.5], [0.0625, 0.25, 0.1875, 0.5]],
dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 800, 800, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
def testRunRandomPadToAspectRatioWithMasks(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_instance_masks: masks
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True)
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})]
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_masks = distorted_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_masks_) = sess.run([
distorted_image, distorted_boxes, distorted_labels, distorted_masks
])
expected_boxes = np.array(
[[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
self.assertAllEqual(distorted_masks_.shape, [2, 400, 400])
def testRunRandomPadToAspectRatioWithKeypoints(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})]
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_keypoints_) = sess.run([
distorted_image, distorted_boxes, distorted_labels,
distorted_keypoints
])
expected_boxes = np.array(
[[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32)
expected_keypoints = np.array([
[[0.05, 0.1], [0.1, 0.2], [0.15, 0.3]],
[[0.2, 0.4], [0.25, 0.5], [0.3, 0.6]],
], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
self.assertAllClose(distorted_keypoints_.flatten(),
expected_keypoints.flatten())
def testRandomPadImageWithCache(self):
preprocess_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1,}), (preprocessor.random_pad_image, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRandomPadImage(self):
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_pad_image, {})]
padded_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
padded_boxes_shape = tf.shape(padded_boxes)
images_shape = tf.shape(images)
padded_images_shape = tf.shape(padded_images)
with self.test_session() as sess:
(boxes_shape_, padded_boxes_shape_, images_shape_,
padded_images_shape_, boxes_, padded_boxes_) = sess.run(
[boxes_shape, padded_boxes_shape, images_shape,
padded_images_shape, boxes, padded_boxes])
self.assertAllEqual(boxes_shape_, padded_boxes_shape_)
self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all)
self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all)
self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all)
self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all)
self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= (
padded_boxes_[:, 2] - padded_boxes_[:, 0])))
self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= (
padded_boxes_[:, 3] - padded_boxes_[:, 1])))
def testRandomCropPadImageWithCache(self):
preprocess_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1,}), (preprocessor.random_crop_pad_image, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRandomCropPadImageWithRandomCoefOne(self):
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_pad_image, {
'random_coef': 1.0
})]
padded_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
padded_boxes_shape = tf.shape(padded_boxes)
images_shape = tf.shape(images)
padded_images_shape = tf.shape(padded_images)
with self.test_session() as sess:
(boxes_shape_, padded_boxes_shape_, images_shape_,
padded_images_shape_, boxes_, padded_boxes_) = sess.run(
[boxes_shape, padded_boxes_shape, images_shape,
padded_images_shape, boxes, padded_boxes])
self.assertAllEqual(boxes_shape_, padded_boxes_shape_)
self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all)
self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all)
self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all)
self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all)
self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= (
padded_boxes_[:, 2] - padded_boxes_[:, 0])))
self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= (
padded_boxes_[:, 3] - padded_boxes_[:, 1])))
def testRandomCropToAspectRatio(self):
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, [])
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {
'aspect_ratio': 2.0
})]
cropped_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
cropped_images = cropped_tensor_dict[fields.InputDataFields.image]
cropped_boxes = cropped_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
cropped_boxes_shape = tf.shape(cropped_boxes)
images_shape = tf.shape(images)
cropped_images_shape = tf.shape(cropped_images)
with self.test_session() as sess:
(boxes_shape_, cropped_boxes_shape_, images_shape_,
cropped_images_shape_) = sess.run([
boxes_shape, cropped_boxes_shape, images_shape, cropped_images_shape
])
self.assertAllEqual(boxes_shape_, cropped_boxes_shape_)
self.assertEqual(images_shape_[1], cropped_images_shape_[1] * 2)
self.assertEqual(images_shape_[2], cropped_images_shape_[2])
def testRandomPadToAspectRatio(self):
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, [])
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {
'aspect_ratio': 2.0
})]
padded_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
padded_boxes_shape = tf.shape(padded_boxes)
images_shape = tf.shape(images)
padded_images_shape = tf.shape(padded_images)
with self.test_session() as sess:
(boxes_shape_, padded_boxes_shape_, images_shape_,
padded_images_shape_) = sess.run([
boxes_shape, padded_boxes_shape, images_shape, padded_images_shape
])
self.assertAllEqual(boxes_shape_, padded_boxes_shape_)
self.assertEqual(images_shape_[1], padded_images_shape_[1])
self.assertEqual(2 * images_shape_[2], padded_images_shape_[2])
def testRandomBlackPatchesWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_black_patches, {
'size_to_image_ratio': 0.5
}))
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRandomBlackPatches(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_black_patches, {
'size_to_image_ratio': 0.5
}))
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
blacked_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
blacked_images = blacked_tensor_dict[fields.InputDataFields.image]
images_shape = tf.shape(images)
blacked_images_shape = tf.shape(blacked_images)
with self.test_session() as sess:
(images_shape_, blacked_images_shape_) = sess.run(
[images_shape, blacked_images_shape])
self.assertAllEqual(images_shape_, blacked_images_shape_)
def testRandomResizeMethodWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_resize_method, {
'target_size': (75, 150)
}))
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRandomResizeMethod(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_resize_method, {
'target_size': (75, 150)
}))
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
resized_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
resized_images = resized_tensor_dict[fields.InputDataFields.image]
resized_images_shape = tf.shape(resized_images)
expected_images_shape = tf.constant([1, 75, 150, 3], dtype=tf.int32)
with self.test_session() as sess:
(expected_images_shape_, resized_images_shape_) = sess.run(
[expected_images_shape, resized_images_shape])
self.assertAllEqual(expected_images_shape_,
resized_images_shape_)
def testResizeImageWithMasks(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
height = 50
width = 100
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_image(
in_image, in_masks, new_height=height, new_width=width)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeImageWithMasksTensorInputHeightAndWidth(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
height = tf.constant(50, dtype=tf.int32)
width = tf.constant(100, dtype=tf.int32)
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_image(
in_image, in_masks, new_height=height, new_width=width)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeImageWithNoInstanceMask(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
height = 50
width = 100
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 50, 100], [0, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_image(
in_image, in_masks, new_height=height, new_width=width)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRangePreservesStaticSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]]
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
in_image = tf.random_uniform(in_shape)
out_image, _ = preprocessor.resize_to_range(
in_image, min_dimension=min_dim, max_dimension=max_dim)
self.assertAllEqual(out_image.get_shape().as_list(), expected_shape)
def testResizeToRangeWithDynamicSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]]
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
out_image, _ = preprocessor.resize_to_range(
in_image, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
with self.test_session() as sess:
out_image_shape = sess.run(out_image_shape,
feed_dict={in_image:
np.random.randn(*in_shape)})
self.assertAllEqual(out_image_shape, expected_shape)
def testResizeToRangeWithPadToMaxDimensionReturnsCorrectShapes(self):
in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[100, 100, 3], [100, 100, 3], [100, 100, 3]]
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
out_image, _ = preprocessor.resize_to_range(
in_image,
min_dimension=min_dim,
max_dimension=max_dim,
pad_to_max_dimension=True)
self.assertAllEqual(out_image.shape.as_list(), expected_shape)
out_image_shape = tf.shape(out_image)
with self.test_session() as sess:
out_image_shape = sess.run(
out_image_shape, feed_dict={in_image: np.random.randn(*in_shape)})
self.assertAllEqual(out_image_shape, expected_shape)
def testResizeToRangeWithPadToMaxDimensionReturnsCorrectTensor(self):
in_image_np = np.array([[[0, 1, 2]]], np.float32)
ex_image_np = np.array(
[[[0, 1, 2], [123.68, 116.779, 103.939]],
[[123.68, 116.779, 103.939], [123.68, 116.779, 103.939]]], np.float32)
min_dim = 1
max_dim = 2
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
out_image, _ = preprocessor.resize_to_range(
in_image,
min_dimension=min_dim,
max_dimension=max_dim,
pad_to_max_dimension=True,
per_channel_pad_value=(123.68, 116.779, 103.939))
with self.test_session() as sess:
out_image_np = sess.run(out_image, feed_dict={in_image: in_image_np})
self.assertAllClose(ex_image_np, out_image_np)
def testResizeToRangeWithMasksPreservesStaticSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_range(
in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim)
self.assertAllEqual(out_masks.get_shape().as_list(), expected_mask_shape)
self.assertAllEqual(out_image.get_shape().as_list(), expected_image_shape)
def testResizeToRangeWithMasksAndPadToMaxDimension(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[100, 100, 3], [100, 100, 3]]
expected_masks_shape_list = [[15, 100, 100], [10, 100, 100]]
for (in_image_shape,
expected_image_shape, in_masks_shape, expected_mask_shape) in zip(
in_image_shape_list, expected_image_shape_list,
in_masks_shape_list, expected_masks_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
in_masks = tf.placeholder(tf.float32, shape=(None, None, None))
out_image, out_masks, _ = preprocessor.resize_to_range(
in_image,
in_masks,
min_dimension=min_dim,
max_dimension=max_dim,
pad_to_max_dimension=True)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape],
feed_dict={
in_image: np.random.randn(*in_image_shape),
in_masks: np.random.randn(*in_masks_shape)
})
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRangeWithMasksAndDynamicSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
in_masks = tf.placeholder(tf.float32, shape=(None, None, None))
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_range(
in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape],
feed_dict={
in_image: np.random.randn(*in_image_shape),
in_masks: np.random.randn(*in_masks_shape)
})
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRangeWithInstanceMasksTensorOfSizeZero(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_range(
in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRange4DImageTensor(self):
image = tf.random_uniform([1, 200, 300, 3])
with self.assertRaises(ValueError):
preprocessor.resize_to_range(image, 500, 600)
def testResizeToRangeSameMinMax(self):
"""Tests image resizing, checking output sizes."""
in_shape_list = [[312, 312, 3], [299, 299, 3]]
min_dim = 320
max_dim = 320
expected_shape_list = [[320, 320, 3], [320, 320, 3]]
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
in_image = tf.random_uniform(in_shape)
out_image, _ = preprocessor.resize_to_range(
in_image, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
with self.test_session() as sess:
out_image_shape = sess.run(out_image_shape)
self.assertAllEqual(out_image_shape, expected_shape)
def testResizeToMinDimensionTensorShapes(self):
in_image_shape_list = [[60, 55, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 55], [10, 15, 30]]
min_dim = 50
expected_image_shape_list = [[60, 55, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 60, 55], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
in_masks = tf.placeholder(tf.float32, shape=(None, None, None))
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_min_dimension(
in_image, in_masks, min_dimension=min_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape],
feed_dict={
in_image: np.random.randn(*in_image_shape),
in_masks: np.random.randn(*in_masks_shape)
})
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToMinDimensionWithInstanceMasksTensorOfSizeZero(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
min_dim = 50
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_min_dimension(
in_image, in_masks, min_dimension=min_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToMinDimensionRaisesErrorOn4DImage(self):
image = tf.random_uniform([1, 200, 300, 3])
with self.assertRaises(ValueError):
preprocessor.resize_to_min_dimension(image, 500)
def testScaleBoxesToPixelCoordinates(self):
"""Tests box scaling, checking scaled values."""
in_shape = [60, 40, 3]
in_boxes = [[0.1, 0.2, 0.4, 0.6],
[0.5, 0.3, 0.9, 0.7]]
expected_boxes = [[6., 8., 24., 24.],
[30., 12., 54., 28.]]
in_image = tf.random_uniform(in_shape)
in_boxes = tf.constant(in_boxes)
_, out_boxes = preprocessor.scale_boxes_to_pixel_coordinates(
in_image, boxes=in_boxes)
with self.test_session() as sess:
out_boxes = sess.run(out_boxes)
self.assertAllClose(out_boxes, expected_boxes)
def testScaleBoxesToPixelCoordinatesWithKeypoints(self):
"""Tests box and keypoint scaling, checking scaled values."""
in_shape = [60, 40, 3]
in_boxes = self.createTestBoxes()
in_keypoints = self.createTestKeypoints()
expected_boxes = [[0., 10., 45., 40.],
[15., 20., 45., 40.]]
expected_keypoints = [
[[6., 4.], [12., 8.], [18., 12.]],
[[24., 16.], [30., 20.], [36., 24.]],
]
in_image = tf.random_uniform(in_shape)
_, out_boxes, out_keypoints = preprocessor.scale_boxes_to_pixel_coordinates(
in_image, boxes=in_boxes, keypoints=in_keypoints)
with self.test_session() as sess:
out_boxes_, out_keypoints_ = sess.run([out_boxes, out_keypoints])
self.assertAllClose(out_boxes_, expected_boxes)
self.assertAllClose(out_keypoints_, expected_keypoints)
def testSubtractChannelMean(self):
"""Tests whether channel means have been subtracted."""
with self.test_session():
image = tf.zeros((240, 320, 3))
means = [1, 2, 3]
actual = preprocessor.subtract_channel_mean(image, means=means)
actual = actual.eval()
self.assertTrue((actual[:, :, 0] == -1).all())
self.assertTrue((actual[:, :, 1] == -2).all())
self.assertTrue((actual[:, :, 2] == -3).all())
def testOneHotEncoding(self):
"""Tests one hot encoding of multiclass labels."""
with self.test_session():
labels = tf.constant([1, 4, 2], dtype=tf.int32)
one_hot = preprocessor.one_hot_encoding(labels, num_classes=5)
one_hot = one_hot.eval()
self.assertAllEqual([0, 1, 1, 0, 1], one_hot)
def testSSDRandomCropWithCache(self):
preprocess_options = [
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}),
(preprocessor.ssd_random_crop, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def testSSDRandomCrop(self):
preprocessing_options = [
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}),
(preprocessor.ssd_random_crop, {})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = sess.run(
[boxes_rank, distorted_boxes_rank, images_rank,
distorted_images_rank])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testSSDRandomCropWithMultiClassScores(self):
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}), (preprocessor.ssd_random_crop, {})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
multiclass_scores = self.createTestMultiClassScores()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.multiclass_scores: multiclass_scores,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_multiclass_scores=True)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_multiclass_scores = distorted_tensor_dict[
fields.InputDataFields.multiclass_scores]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
multiclass_scores_rank = tf.rank(multiclass_scores)
distorted_multiclass_scores_rank = tf.rank(distorted_multiclass_scores)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_, multiclass_scores_rank_,
distorted_multiclass_scores_,
distorted_multiclass_scores_rank_) = sess.run([
boxes_rank, distorted_boxes, distorted_boxes_rank, images_rank,
distorted_images_rank, multiclass_scores_rank,
distorted_multiclass_scores, distorted_multiclass_scores_rank
])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
self.assertAllEqual(multiclass_scores_rank_,
distorted_multiclass_scores_rank_)
self.assertAllEqual(distorted_boxes_.shape[0],
distorted_multiclass_scores_.shape[0])
def testSSDRandomCropPad(self):
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
preprocessing_options = [
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}),
(preprocessor.ssd_random_crop_pad, {})]
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = sess.run([
boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank
])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testSSDRandomCropFixedAspectRatioWithCache(self):
preprocess_options = [
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}),
(preprocessor.ssd_random_crop_fixed_aspect_ratio, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def _testSSDRandomCropFixedAspectRatio(self,
include_label_scores,
include_multiclass_scores,
include_instance_masks,
include_keypoints):
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}), (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})]
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
if include_label_scores:
label_scores = self.createTestLabelScores()
tensor_dict[fields.InputDataFields.groundtruth_label_scores] = (
label_scores)
if include_multiclass_scores:
multiclass_scores = self.createTestMultiClassScores()
tensor_dict[fields.InputDataFields.multiclass_scores] = (
multiclass_scores)
if include_instance_masks:
masks = self.createTestMasks()
tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks
if include_keypoints:
keypoints = self.createTestKeypoints()
tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_scores=include_label_scores,
include_multiclass_scores=include_multiclass_scores,
include_instance_masks=include_instance_masks,
include_keypoints=include_keypoints)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = sess.run(
[boxes_rank, distorted_boxes_rank, images_rank,
distorted_images_rank])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testSSDRandomCropFixedAspectRatio(self):
self._testSSDRandomCropFixedAspectRatio(include_label_scores=False,
include_multiclass_scores=False,
include_instance_masks=False,
include_keypoints=False)
def testSSDRandomCropFixedAspectRatioWithMultiClassScores(self):
self._testSSDRandomCropFixedAspectRatio(include_label_scores=False,
include_multiclass_scores=True,
include_instance_masks=False,
include_keypoints=False)
def testSSDRandomCropFixedAspectRatioWithMasksAndKeypoints(self):
self._testSSDRandomCropFixedAspectRatio(include_label_scores=False,
include_multiclass_scores=False,
include_instance_masks=True,
include_keypoints=True)
def testSSDRandomCropFixedAspectRatioWithLabelScoresMasksAndKeypoints(self):
self._testSSDRandomCropFixedAspectRatio(include_label_scores=True,
include_multiclass_scores=False,
include_instance_masks=True,
include_keypoints=True)
def testConvertClassLogitsToSoftmax(self):
multiclass_scores = tf.constant(
[[1.0, 0.0], [0.5, 0.5], [1000, 1]], dtype=tf.float32)
temperature = 2.0
converted_multiclass_scores = (
preprocessor.convert_class_logits_to_softmax(
multiclass_scores=multiclass_scores, temperature=temperature))
expected_converted_multiclass_scores = [[[0.62245935, 0.37754068],
[0.5, 0.5], [1, 0]]]
with self.test_session() as sess:
(converted_multiclass_scores_) = sess.run([converted_multiclass_scores])
self.assertAllClose(converted_multiclass_scores_,
expected_converted_multiclass_scores)
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow.shape",
"tensorflow.split",
"tensorflow.multiply",
"numpy.array",
"object_detection.tensorflow_detect.core.preprocessor.resize_image",
"object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map",
"object_detection.tensorflow_detect.core.preprocessor.scale_boxes_to_pixel_c... | [((125987, 126001), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (125999, 126001), True, 'import tensorflow as tf\n'), ((1360, 1391), 'tensorflow.concat', 'tf.concat', (['[ch255, ch0, ch0]', '(3)'], {}), '([ch255, ch0, ch0], 3)\n', (1369, 1391), True, 'import tensorflow as tf\n'), ((1402, 1435), 'tensorflow.concat', 'tf.concat', (['[ch255, ch255, ch0]', '(3)'], {}), '([ch255, ch255, ch0], 3)\n', (1411, 1435), True, 'import tensorflow as tf\n'), ((1446, 1479), 'tensorflow.concat', 'tf.concat', (['[ch255, ch0, ch255]', '(3)'], {}), '([ch255, ch0, ch255], 3)\n', (1455, 1479), True, 'import tensorflow as tf\n'), ((1490, 1525), 'tensorflow.concat', 'tf.concat', (['[ch128, ch128, ch128]', '(3)'], {}), '([ch128, ch128, ch128], 3)\n', (1499, 1525), True, 'import tensorflow as tf\n'), ((1536, 1560), 'tensorflow.concat', 'tf.concat', (['[imr, img]', '(2)'], {}), '([imr, img], 2)\n', (1545, 1560), True, 'import tensorflow as tf\n'), ((1571, 1595), 'tensorflow.concat', 'tf.concat', (['[imb, imw]', '(2)'], {}), '([imb, imw], 2)\n', (1580, 1595), True, 'import tensorflow as tf\n'), ((1605, 1629), 'tensorflow.concat', 'tf.concat', (['[imu, imd]', '(1)'], {}), '([imu, imd], 1)\n', (1614, 1629), True, 'import tensorflow as tf\n'), ((1690, 1808), 'tensorflow.constant', 'tf.constant', (['[[[128, 128, 128, 128], [0, 0, 128, 128], [0, 128, 128, 128], [192, 192, \n 128, 128]]]'], {'dtype': 'tf.uint8'}), '([[[128, 128, 128, 128], [0, 0, 128, 128], [0, 128, 128, 128], [\n 192, 192, 128, 128]]], dtype=tf.uint8)\n', (1701, 1808), True, 'import tensorflow as tf\n'), ((1875, 1902), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_r', '(3)'], {}), '(images_r, 3)\n', (1889, 1902), True, 'import tensorflow as tf\n'), ((1918, 2031), 'tensorflow.constant', 'tf.constant', (['[[[0, 0, 128, 128], [0, 0, 128, 128], [0, 128, 192, 192], [192, 192, 128, 192]]\n ]'], {'dtype': 'tf.uint8'}), '([[[0, 0, 128, 128], [0, 0, 128, 128], [0, 128, 192, 192], [192,\n 192, 128, 192]]], dtype=tf.uint8)\n', (1929, 2031), True, 'import tensorflow as tf\n'), ((2099, 2126), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_g', '(3)'], {}), '(images_g, 3)\n', (2113, 2126), True, 'import tensorflow as tf\n'), ((2142, 2255), 'tensorflow.constant', 'tf.constant', (['[[[128, 128, 192, 0], [0, 0, 128, 192], [0, 128, 128, 0], [192, 192, 192, 128]]\n ]'], {'dtype': 'tf.uint8'}), '([[[128, 128, 192, 0], [0, 0, 128, 192], [0, 128, 128, 0], [192,\n 192, 192, 128]]], dtype=tf.uint8)\n', (2153, 2255), True, 'import tensorflow as tf\n'), ((2323, 2350), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_b', '(3)'], {}), '(images_b, 3)\n', (2337, 2350), True, 'import tensorflow as tf\n'), ((2364, 2408), 'tensorflow.concat', 'tf.concat', (['[images_r, images_g, images_b]', '(3)'], {}), '([images_r, images_g, images_b], 3)\n', (2373, 2408), True, 'import tensorflow as tf\n'), ((2474, 2509), 'tensorflow.constant', 'tf.constant', (['[[]]'], {'dtype': 'tf.float32'}), '([[]], dtype=tf.float32)\n', (2485, 2509), True, 'import tensorflow as tf\n'), ((2569, 2648), 'tensorflow.constant', 'tf.constant', (['[[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]]'], {'dtype': 'tf.float32'}), '([[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32)\n', (2580, 2648), True, 'import tensorflow as tf\n'), ((2722, 2763), 'tensorflow.constant', 'tf.constant', (['[1.0, 0.5]'], {'dtype': 'tf.float32'}), '([1.0, 0.5], dtype=tf.float32)\n', (2733, 2763), True, 'import tensorflow as tf\n'), ((2827, 2871), 'tensorflow.constant', 'tf.constant', (['[0.5, np.nan]'], {'dtype': 'tf.float32'}), '([0.5, np.nan], dtype=tf.float32)\n', (2838, 2871), True, 'import tensorflow as tf\n'), ((2913, 3052), 'numpy.array', 'np.array', (['[[[255.0, 0.0, 0.0], [255.0, 0.0, 0.0], [255.0, 0.0, 0.0]], [[255.0, 255.0,\n 0.0], [255.0, 255.0, 0.0], [255.0, 255.0, 0.0]]]'], {}), '([[[255.0, 0.0, 0.0], [255.0, 0.0, 0.0], [255.0, 0.0, 0.0]], [[\n 255.0, 255.0, 0.0], [255.0, 255.0, 0.0], [255.0, 255.0, 0.0]]])\n', (2921, 3052), True, 'import numpy as np\n'), ((3112, 3147), 'tensorflow.constant', 'tf.constant', (['mask'], {'dtype': 'tf.float32'}), '(mask, dtype=tf.float32)\n', (3123, 3147), True, 'import tensorflow as tf\n'), ((3198, 3289), 'numpy.array', 'np.array', (['[[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]]'], {}), '([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.4, 0.4], [0.5, 0.5], [\n 0.6, 0.6]]])\n', (3206, 3289), True, 'import numpy as np\n'), ((3319, 3359), 'tensorflow.constant', 'tf.constant', (['keypoints'], {'dtype': 'tf.float32'}), '(keypoints, dtype=tf.float32)\n', (3330, 3359), True, 'import tensorflow as tf\n'), ((3420, 3511), 'numpy.array', 'np.array', (['[[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]], [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]]'], {}), '([[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]], [[0.4, 0.4], [0.5, 0.5], [\n 0.6, 0.6]]])\n', (3428, 3511), True, 'import numpy as np\n'), ((3541, 3581), 'tensorflow.constant', 'tf.constant', (['keypoints'], {'dtype': 'tf.float32'}), '(keypoints, dtype=tf.float32)\n', (3552, 3581), True, 'import tensorflow as tf\n'), ((3643, 3734), 'numpy.array', 'np.array', (['[[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]]'], {}), '([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.1, 0.1], [0.2, 0.2], [\n 0.3, 0.3]]])\n', (3651, 3734), True, 'import numpy as np\n'), ((3764, 3804), 'tensorflow.constant', 'tf.constant', (['keypoints'], {'dtype': 'tf.float32'}), '(keypoints, dtype=tf.float32)\n', (3775, 3804), True, 'import tensorflow as tf\n'), ((3860, 3895), 'numpy.array', 'np.array', (['[0, 2, 1]'], {'dtype': 'np.int32'}), '([0, 2, 1], dtype=np.int32)\n', (3868, 3895), True, 'import numpy as np\n'), ((3940, 3975), 'tensorflow.constant', 'tf.constant', (['[1, 2]'], {'dtype': 'tf.int32'}), '([1, 2], dtype=tf.int32)\n', (3951, 3975), True, 'import tensorflow as tf\n'), ((4046, 4124), 'tensorflow.constant', 'tf.constant', (['[[-0.1, 0.25, 0.75, 1], [0.25, 0.5, 0.75, 1.1]]'], {'dtype': 'tf.float32'}), '([[-0.1, 0.25, 0.75, 1], [0.25, 0.5, 0.75, 1.1]], dtype=tf.float32)\n', (4057, 4124), True, 'import tensorflow as tf\n'), ((4203, 4258), 'tensorflow.constant', 'tf.constant', (['[[1.0, 0.0], [0.5, 0.5]]'], {'dtype': 'tf.float32'}), '([[1.0, 0.0], [0.5, 0.5]], dtype=tf.float32)\n', (4214, 4258), True, 'import tensorflow as tf\n'), ((4321, 4422), 'tensorflow.constant', 'tf.constant', (['[[[0, 0, 0, 0], [-1, -1, 0, 0], [-1, 0, 0, 0], [0.5, 0.5, 0, 0]]]'], {'dtype': 'tf.float32'}), '([[[0, 0, 0, 0], [-1, -1, 0, 0], [-1, 0, 0, 0], [0.5, 0.5, 0, 0]\n ]], dtype=tf.float32)\n', (4332, 4422), True, 'import tensorflow as tf\n'), ((4489, 4516), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_r', '(3)'], {}), '(images_r, 3)\n', (4503, 4516), True, 'import tensorflow as tf\n'), ((4532, 4640), 'tensorflow.constant', 'tf.constant', (['[[[-1, -1, 0, 0], [-1, -1, 0, 0], [-1, 0, 0.5, 0.5], [0.5, 0.5, 0, 0.5]]]'], {'dtype': 'tf.float32'}), '([[[-1, -1, 0, 0], [-1, -1, 0, 0], [-1, 0, 0.5, 0.5], [0.5, 0.5,\n 0, 0.5]]], dtype=tf.float32)\n', (4543, 4640), True, 'import tensorflow as tf\n'), ((4708, 4735), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_g', '(3)'], {}), '(images_g, 3)\n', (4722, 4735), True, 'import tensorflow as tf\n'), ((4751, 4859), 'tensorflow.constant', 'tf.constant', (['[[[0, 0, 0.5, -1], [-1, -1, 0, 0.5], [-1, 0, 0, -1], [0.5, 0.5, 0.5, 0]]]'], {'dtype': 'tf.float32'}), '([[[0, 0, 0.5, -1], [-1, -1, 0, 0.5], [-1, 0, 0, -1], [0.5, 0.5,\n 0.5, 0]]], dtype=tf.float32)\n', (4762, 4859), True, 'import tensorflow as tf\n'), ((4927, 4954), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_b', '(3)'], {}), '(images_b, 3)\n', (4941, 4954), True, 'import tensorflow as tf\n'), ((4968, 5012), 'tensorflow.concat', 'tf.concat', (['[images_r, images_g, images_b]', '(3)'], {}), '([images_r, images_g, images_b], 3)\n', (4977, 5012), True, 'import tensorflow as tf\n'), ((5092, 5220), 'tensorflow.constant', 'tf.constant', (['[[[0.1, 0.1, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1], [-0.9, 0.1, 0.1, 0.1], [0.6,\n 0.6, 0.1, 0.1]]]'], {'dtype': 'tf.float32'}), '([[[0.1, 0.1, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1], [-0.9, 0.1, 0.1,\n 0.1], [0.6, 0.6, 0.1, 0.1]]], dtype=tf.float32)\n', (5103, 5220), True, 'import tensorflow as tf\n'), ((5288, 5315), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_r', '(3)'], {}), '(images_r, 3)\n', (5302, 5315), True, 'import tensorflow as tf\n'), ((5331, 5462), 'tensorflow.constant', 'tf.constant', (['[[[-0.9, -0.9, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1], [-0.9, 0.1, 0.6, 0.6], [\n 0.6, 0.6, 0.1, 0.6]]]'], {'dtype': 'tf.float32'}), '([[[-0.9, -0.9, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1], [-0.9, 0.1, \n 0.6, 0.6], [0.6, 0.6, 0.1, 0.6]]], dtype=tf.float32)\n', (5342, 5462), True, 'import tensorflow as tf\n'), ((5529, 5556), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_g', '(3)'], {}), '(images_g, 3)\n', (5543, 5556), True, 'import tensorflow as tf\n'), ((5572, 5703), 'tensorflow.constant', 'tf.constant', (['[[[0.1, 0.1, 0.6, -0.9], [-0.9, -0.9, 0.1, 0.6], [-0.9, 0.1, 0.1, -0.9], [\n 0.6, 0.6, 0.6, 0.1]]]'], {'dtype': 'tf.float32'}), '([[[0.1, 0.1, 0.6, -0.9], [-0.9, -0.9, 0.1, 0.6], [-0.9, 0.1, \n 0.1, -0.9], [0.6, 0.6, 0.6, 0.1]]], dtype=tf.float32)\n', (5583, 5703), True, 'import tensorflow as tf\n'), ((5770, 5797), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_b', '(3)'], {}), '(images_b, 3)\n', (5784, 5797), True, 'import tensorflow as tf\n'), ((5811, 5855), 'tensorflow.concat', 'tf.concat', (['[images_r, images_g, images_b]', '(3)'], {}), '([images_r, images_g, images_b], 3)\n', (5820, 5855), True, 'import tensorflow as tf\n'), ((5935, 6069), 'tensorflow.constant', 'tf.constant', (['[[[-0.1, -0.1, -0.1, -0.1], [-1, -1, -0.1, -0.1], [-1, -0.1, -0.1, -0.1], [\n 0.4, 0.4, -0.1, -0.1]]]'], {'dtype': 'tf.float32'}), '([[[-0.1, -0.1, -0.1, -0.1], [-1, -1, -0.1, -0.1], [-1, -0.1, -\n 0.1, -0.1], [0.4, 0.4, -0.1, -0.1]]], dtype=tf.float32)\n', (5946, 6069), True, 'import tensorflow as tf\n'), ((6136, 6163), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_r', '(3)'], {}), '(images_r, 3)\n', (6150, 6163), True, 'import tensorflow as tf\n'), ((6179, 6306), 'tensorflow.constant', 'tf.constant', (['[[[-1, -1, -0.1, -0.1], [-1, -1, -0.1, -0.1], [-1, -0.1, 0.4, 0.4], [0.4, \n 0.4, -0.1, 0.4]]]'], {'dtype': 'tf.float32'}), '([[[-1, -1, -0.1, -0.1], [-1, -1, -0.1, -0.1], [-1, -0.1, 0.4, \n 0.4], [0.4, 0.4, -0.1, 0.4]]], dtype=tf.float32)\n', (6190, 6306), True, 'import tensorflow as tf\n'), ((6373, 6400), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_g', '(3)'], {}), '(images_g, 3)\n', (6387, 6400), True, 'import tensorflow as tf\n'), ((6416, 6543), 'tensorflow.constant', 'tf.constant', (['[[[-0.1, -0.1, 0.4, -1], [-1, -1, -0.1, 0.4], [-1, -0.1, -0.1, -1], [0.4, \n 0.4, 0.4, -0.1]]]'], {'dtype': 'tf.float32'}), '([[[-0.1, -0.1, 0.4, -1], [-1, -1, -0.1, 0.4], [-1, -0.1, -0.1, \n -1], [0.4, 0.4, 0.4, -0.1]]], dtype=tf.float32)\n', (6427, 6543), True, 'import tensorflow as tf\n'), ((6610, 6637), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_b', '(3)'], {}), '(images_b, 3)\n', (6624, 6637), True, 'import tensorflow as tf\n'), ((6651, 6695), 'tensorflow.concat', 'tf.concat', (['[images_r, images_g, images_b]', '(3)'], {}), '([images_r, images_g, images_b], 3)\n', (6660, 6695), True, 'import tensorflow as tf\n'), ((6776, 6877), 'tensorflow.constant', 'tf.constant', (['[[[0, 0, 0, 0], [0, 0, -1, -1], [0, 0, 0, -1], [0, 0, 0.5, 0.5]]]'], {'dtype': 'tf.float32'}), '([[[0, 0, 0, 0], [0, 0, -1, -1], [0, 0, 0, -1], [0, 0, 0.5, 0.5]\n ]], dtype=tf.float32)\n', (6787, 6877), True, 'import tensorflow as tf\n'), ((6944, 6971), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_r', '(3)'], {}), '(images_r, 3)\n', (6958, 6971), True, 'import tensorflow as tf\n'), ((6987, 7096), 'tensorflow.constant', 'tf.constant', (['[[[0, 0, -1, -1], [0, 0, -1, -1], [0.5, 0.5, 0, -1], [0.5, 0, 0.5, 0.5]]]'], {'dtype': 'tf.float32'}), '([[[0, 0, -1, -1], [0, 0, -1, -1], [0.5, 0.5, 0, -1], [0.5, 0, \n 0.5, 0.5]]], dtype=tf.float32)\n', (6998, 7096), True, 'import tensorflow as tf\n'), ((7163, 7190), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_g', '(3)'], {}), '(images_g, 3)\n', (7177, 7190), True, 'import tensorflow as tf\n'), ((7206, 7315), 'tensorflow.constant', 'tf.constant', (['[[[-1, 0.5, 0, 0], [0.5, 0, -1, -1], [-1, 0, 0, -1], [0, 0.5, 0.5, 0.5]]]'], {'dtype': 'tf.float32'}), '([[[-1, 0.5, 0, 0], [0.5, 0, -1, -1], [-1, 0, 0, -1], [0, 0.5, \n 0.5, 0.5]]], dtype=tf.float32)\n', (7217, 7315), True, 'import tensorflow as tf\n'), ((7382, 7409), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_b', '(3)'], {}), '(images_b, 3)\n', (7396, 7409), True, 'import tensorflow as tf\n'), ((7423, 7467), 'tensorflow.concat', 'tf.concat', (['[images_r, images_g, images_b]', '(3)'], {}), '([images_r, images_g, images_b], 3)\n', (7432, 7467), True, 'import tensorflow as tf\n'), ((7545, 7646), 'tensorflow.constant', 'tf.constant', (['[[[0.5, 0.5, 0, 0], [-1, 0, 0, 0], [-1, -1, 0, 0], [0, 0, 0, 0]]]'], {'dtype': 'tf.float32'}), '([[[0.5, 0.5, 0, 0], [-1, 0, 0, 0], [-1, -1, 0, 0], [0, 0, 0, 0]\n ]], dtype=tf.float32)\n', (7556, 7646), True, 'import tensorflow as tf\n'), ((7713, 7740), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_r', '(3)'], {}), '(images_r, 3)\n', (7727, 7740), True, 'import tensorflow as tf\n'), ((7756, 7865), 'tensorflow.constant', 'tf.constant', (['[[[0.5, 0.5, 0, 0.5], [-1, 0, 0.5, 0.5], [-1, -1, 0, 0], [-1, -1, 0, 0]]]'], {'dtype': 'tf.float32'}), '([[[0.5, 0.5, 0, 0.5], [-1, 0, 0.5, 0.5], [-1, -1, 0, 0], [-1, -\n 1, 0, 0]]], dtype=tf.float32)\n', (7767, 7865), True, 'import tensorflow as tf\n'), ((7932, 7959), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_g', '(3)'], {}), '(images_g, 3)\n', (7946, 7959), True, 'import tensorflow as tf\n'), ((7975, 8084), 'tensorflow.constant', 'tf.constant', (['[[[0.5, 0.5, 0.5, 0], [-1, 0, 0, -1], [-1, -1, 0, 0.5], [0, 0, 0.5, -1]]]'], {'dtype': 'tf.float32'}), '([[[0.5, 0.5, 0.5, 0], [-1, 0, 0, -1], [-1, -1, 0, 0.5], [0, 0, \n 0.5, -1]]], dtype=tf.float32)\n', (7986, 8084), True, 'import tensorflow as tf\n'), ((8151, 8178), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_b', '(3)'], {}), '(images_b, 3)\n', (8165, 8178), True, 'import tensorflow as tf\n'), ((8192, 8236), 'tensorflow.concat', 'tf.concat', (['[images_r, images_g, images_b]', '(3)'], {}), '([images_r, images_g, images_b], 3)\n', (8201, 8236), True, 'import tensorflow as tf\n'), ((8309, 8410), 'tensorflow.constant', 'tf.constant', (['[[[0, 0, 0, 0], [0, 0, 0, 0], [0, -1, 0, 0.5], [0, -1, -1, 0.5]]]'], {'dtype': 'tf.float32'}), '([[[0, 0, 0, 0], [0, 0, 0, 0], [0, -1, 0, 0.5], [0, -1, -1, 0.5]\n ]], dtype=tf.float32)\n', (8320, 8410), True, 'import tensorflow as tf\n'), ((8477, 8504), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_r', '(3)'], {}), '(images_r, 3)\n', (8491, 8504), True, 'import tensorflow as tf\n'), ((8520, 8629), 'tensorflow.constant', 'tf.constant', (['[[[0, 0, 0.5, 0.5], [0, 0, 0.5, 0], [-1, -1, 0, 0.5], [-1, -1, -1, 0.5]]]'], {'dtype': 'tf.float32'}), '([[[0, 0, 0.5, 0.5], [0, 0, 0.5, 0], [-1, -1, 0, 0.5], [-1, -1, \n -1, 0.5]]], dtype=tf.float32)\n', (8531, 8629), True, 'import tensorflow as tf\n'), ((8696, 8723), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_g', '(3)'], {}), '(images_g, 3)\n', (8710, 8723), True, 'import tensorflow as tf\n'), ((8739, 8848), 'tensorflow.constant', 'tf.constant', (['[[[-1, 0.5, -1, 0], [0.5, 0, 0, 0.5], [0, -1, 0, 0.5], [0, -1, -1, 0.5]]]'], {'dtype': 'tf.float32'}), '([[[-1, 0.5, -1, 0], [0.5, 0, 0, 0.5], [0, -1, 0, 0.5], [0, -1, \n -1, 0.5]]], dtype=tf.float32)\n', (8750, 8848), True, 'import tensorflow as tf\n'), ((8915, 8942), 'tensorflow.expand_dims', 'tf.expand_dims', (['images_b', '(3)'], {}), '(images_b, 3)\n', (8929, 8942), True, 'import tensorflow as tf\n'), ((8956, 9000), 'tensorflow.concat', 'tf.concat', (['[images_r, images_g, images_b]', '(3)'], {}), '([images_r, images_g, images_b], 3)\n', (8965, 9000), True, 'import tensorflow as tf\n'), ((9077, 9156), 'tensorflow.constant', 'tf.constant', (['[[0.0, 0.0, 0.75, 0.75], [0.25, 0.0, 0.75, 0.5]]'], {'dtype': 'tf.float32'}), '([[0.0, 0.0, 0.75, 0.75], [0.25, 0.0, 0.75, 0.5]], dtype=tf.float32)\n', (9088, 9156), True, 'import tensorflow as tf\n'), ((9253, 9332), 'tensorflow.constant', 'tf.constant', (['[[0.25, 0.25, 1.0, 1.0], [0.25, 0.5, 0.75, 1.0]]'], {'dtype': 'tf.float32'}), '([[0.25, 0.25, 1.0, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32)\n', (9264, 9332), True, 'import tensorflow as tf\n'), ((9424, 9503), 'tensorflow.constant', 'tf.constant', (['[[0.0, 0.0, 0.75, 0.75], [0.0, 0.25, 0.5, 0.75]]'], {'dtype': 'tf.float32'}), '([[0.0, 0.0, 0.75, 0.75], [0.0, 0.25, 0.5, 0.75]], dtype=tf.float32)\n', (9435, 9503), True, 'import tensorflow as tf\n'), ((9587, 9725), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 255.0], [0.0, 0.0, 255.0], [0.0, 0.0, 255.0]], [[0.0, 255.0, \n 255.0], [0.0, 255.0, 255.0], [0.0, 255.0, 255.0]]]'], {}), '([[[0.0, 0.0, 255.0], [0.0, 0.0, 255.0], [0.0, 0.0, 255.0]], [[0.0,\n 255.0, 255.0], [0.0, 255.0, 255.0], [0.0, 255.0, 255.0]]])\n', (9595, 9725), True, 'import numpy as np\n'), ((9786, 9821), 'tensorflow.constant', 'tf.constant', (['mask'], {'dtype': 'tf.float32'}), '(mask, dtype=tf.float32)\n', (9797, 9821), True, 'import tensorflow as tf\n'), ((9876, 10015), 'numpy.array', 'np.array', (['[[[255.0, 0.0, 0.0], [255.0, 0.0, 0.0], [255.0, 0.0, 0.0]], [[255.0, 255.0,\n 0.0], [255.0, 255.0, 0.0], [255.0, 255.0, 0.0]]]'], {}), '([[[255.0, 0.0, 0.0], [255.0, 0.0, 0.0], [255.0, 0.0, 0.0]], [[\n 255.0, 255.0, 0.0], [255.0, 255.0, 0.0], [255.0, 255.0, 0.0]]])\n', (9884, 10015), True, 'import numpy as np\n'), ((10075, 10110), 'tensorflow.constant', 'tf.constant', (['mask'], {'dtype': 'tf.float32'}), '(mask, dtype=tf.float32)\n', (10086, 10110), True, 'import tensorflow as tf\n'), ((10160, 10298), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [255.0, 255.0, 255.0]], [[0.0, 0.0, 0.0\n ], [255.0, 255.0, 255.0], [255.0, 255.0, 255.0]]]'], {}), '([[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [255.0, 255.0, 255.0]], [[0.0,\n 0.0, 0.0], [255.0, 255.0, 255.0], [255.0, 255.0, 255.0]]])\n', (10168, 10298), True, 'import numpy as np\n'), ((10359, 10394), 'tensorflow.constant', 'tf.constant', (['mask'], {'dtype': 'tf.float32'}), '(mask, dtype=tf.float32)\n', (10370, 10394), True, 'import tensorflow as tf\n'), ((10457, 10493), 'tensorflow.constant', 'tf.constant', (['[1.0]'], {'dtype': 'tf.float32'}), '([1.0], dtype=tf.float32)\n', (10468, 10493), True, 'import tensorflow as tf\n'), ((10550, 10605), 'tensorflow.constant', 'tf.constant', (['[[0.0, 0.25, 0.75, 1.0]]'], {'dtype': 'tf.float32'}), '([[0.0, 0.25, 0.75, 1.0]], dtype=tf.float32)\n', (10561, 10605), True, 'import tensorflow as tf\n'), ((10663, 10697), 'tensorflow.constant', 'tf.constant', (['[1]'], {'dtype': 'tf.float32'}), '([1], dtype=tf.float32)\n', (10674, 10697), True, 'import tensorflow as tf\n'), ((10765, 10808), 'tensorflow.constant', 'tf.constant', (['[[1.0, 0.0]]'], {'dtype': 'tf.float32'}), '([[1.0, 0.0]], dtype=tf.float32)\n', (10776, 10808), True, 'import tensorflow as tf\n'), ((10865, 10934), 'numpy.array', 'np.array', (['[[[255.0, 0.0, 0.0], [255.0, 0.0, 0.0], [255.0, 0.0, 0.0]]]'], {}), '([[[255.0, 0.0, 0.0], [255.0, 0.0, 0.0], [255.0, 0.0, 0.0]]])\n', (10873, 10934), True, 'import numpy as np\n'), ((10973, 11008), 'tensorflow.constant', 'tf.constant', (['mask'], {'dtype': 'tf.float32'}), '(mask, dtype=tf.float32)\n', (10984, 11008), True, 'import tensorflow as tf\n'), ((11074, 11122), 'numpy.array', 'np.array', (['[[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]]'], {}), '([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]])\n', (11082, 11122), True, 'import numpy as np\n'), ((11148, 11188), 'tensorflow.constant', 'tf.constant', (['keypoints'], {'dtype': 'tf.float32'}), '(keypoints, dtype=tf.float32)\n', (11159, 11188), True, 'import tensorflow as tf\n'), ((11267, 11306), 'tensorflow.constant', 'tf.constant', (['[np.nan]'], {'dtype': 'tf.float32'}), '([np.nan], dtype=tf.float32)\n', (11278, 11306), True, 'import tensorflow as tf\n'), ((11379, 11432), 'tensorflow.constant', 'tf.constant', (['[[0.25, 0.5, 0.75, 1]]'], {'dtype': 'tf.float32'}), '([[0.25, 0.5, 0.75, 1]], dtype=tf.float32)\n', (11390, 11432), True, 'import tensorflow as tf\n'), ((11506, 11540), 'tensorflow.constant', 'tf.constant', (['[2]'], {'dtype': 'tf.float32'}), '([2], dtype=tf.float32)\n', (11517, 11540), True, 'import tensorflow as tf\n'), ((11634, 11672), 'object_detection.tensorflow_detect.core.preprocessor._rgb_to_grayscale', 'preprocessor._rgb_to_grayscale', (['images'], {}), '(images)\n', (11664, 11672), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((11695, 11728), 'tensorflow.image.rgb_to_grayscale', 'tf.image.rgb_to_grayscale', (['images'], {}), '(images)\n', (11720, 11728), True, 'import tensorflow as tf\n'), ((12259, 12315), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocess_options'], {}), '(tensor_dict, preprocess_options)\n', (12282, 12315), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((13088, 13177), 'object_detection.tensorflow_detect.core.preprocessor.retain_boxes_above_threshold', 'preprocessor.retain_boxes_above_threshold', (['boxes', 'labels', 'label_scores'], {'threshold': '(0.6)'}), '(boxes, labels, label_scores,\n threshold=0.6)\n', (13129, 13177), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((14173, 14299), 'object_detection.tensorflow_detect.core.preprocessor.retain_boxes_above_threshold', 'preprocessor.retain_boxes_above_threshold', (['boxes', 'labels', 'label_scores'], {'multiclass_scores': 'multiclass_scores', 'threshold': '(0.6)'}), '(boxes, labels, label_scores,\n multiclass_scores=multiclass_scores, threshold=0.6)\n', (14214, 14299), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((14941, 15037), 'object_detection.tensorflow_detect.core.preprocessor.retain_boxes_above_threshold', 'preprocessor.retain_boxes_above_threshold', (['boxes', 'labels', 'label_scores', 'masks'], {'threshold': '(0.6)'}), '(boxes, labels, label_scores,\n masks, threshold=0.6)\n', (14982, 15037), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((15545, 15655), 'object_detection.tensorflow_detect.core.preprocessor.retain_boxes_above_threshold', 'preprocessor.retain_boxes_above_threshold', (['boxes', 'labels', 'label_scores'], {'keypoints': 'keypoints', 'threshold': '(0.6)'}), '(boxes, labels, label_scores,\n keypoints=keypoints, threshold=0.6)\n', (15586, 15655), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((16216, 16305), 'object_detection.tensorflow_detect.core.preprocessor.retain_boxes_above_threshold', 'preprocessor.retain_boxes_above_threshold', (['boxes', 'labels', 'label_scores'], {'threshold': '(0.6)'}), '(boxes, labels, label_scores,\n threshold=0.6)\n', (16257, 16305), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((17150, 17192), 'object_detection.tensorflow_detect.core.preprocessor._flip_boxes_left_right', 'preprocessor._flip_boxes_left_right', (['boxes'], {}), '(boxes)\n', (17185, 17192), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((17537, 17576), 'object_detection.tensorflow_detect.core.preprocessor._flip_boxes_up_down', 'preprocessor._flip_boxes_up_down', (['boxes'], {}), '(boxes)\n', (17569, 17576), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((17913, 17945), 'object_detection.tensorflow_detect.core.preprocessor._rot90_boxes', 'preprocessor._rot90_boxes', (['boxes'], {}), '(boxes)\n', (17938, 17945), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((18288, 18334), 'object_detection.tensorflow_detect.core.preprocessor._flip_masks_left_right', 'preprocessor._flip_masks_left_right', (['test_mask'], {}), '(test_mask)\n', (18323, 18334), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((18675, 18718), 'object_detection.tensorflow_detect.core.preprocessor._flip_masks_up_down', 'preprocessor._flip_masks_up_down', (['test_mask'], {}), '(test_mask)\n', (18707, 18718), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((19051, 19087), 'object_detection.tensorflow_detect.core.preprocessor._rot90_masks', 'preprocessor._rot90_masks', (['test_mask'], {}), '(test_mask)\n', (19076, 19087), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((19612, 19650), 'object_detection.tensorflow_detect.core.preprocessor_cache.PreprocessorCache', 'preprocessor_cache.PreprocessorCache', ([], {}), '()\n', (19648, 19650), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((19866, 19976), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {'include_instance_masks': 'test_masks', 'include_keypoints': 'test_keypoints'}), '(include_instance_masks=test_masks,\n include_keypoints=test_keypoints)\n', (19903, 19976), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((21833, 21889), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocess_options'], {}), '(tensor_dict, preprocess_options)\n', (21856, 21889), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((22030, 22075), 'tensorflow.squared_difference', 'tf.squared_difference', (['boxes', 'boxes_expected1'], {}), '(boxes, boxes_expected1)\n', (22051, 22075), True, 'import tensorflow as tf\n'), ((22094, 22139), 'tensorflow.squared_difference', 'tf.squared_difference', (['boxes', 'boxes_expected2'], {}), '(boxes, boxes_expected2)\n', (22115, 22139), True, 'import tensorflow as tf\n'), ((22157, 22194), 'tensorflow.multiply', 'tf.multiply', (['boxes_diff1', 'boxes_diff2'], {}), '(boxes_diff1, boxes_diff2)\n', (22168, 22194), True, 'import tensorflow as tf\n'), ((22221, 22246), 'tensorflow.zeros_like', 'tf.zeros_like', (['boxes_diff'], {}), '(boxes_diff)\n', (22234, 22246), True, 'import tensorflow as tf\n'), ((22267, 22314), 'tensorflow.squared_difference', 'tf.squared_difference', (['images', 'images_expected1'], {}), '(images, images_expected1)\n', (22288, 22314), True, 'import tensorflow as tf\n'), ((22334, 22381), 'tensorflow.squared_difference', 'tf.squared_difference', (['images', 'images_expected2'], {}), '(images, images_expected2)\n', (22355, 22381), True, 'import tensorflow as tf\n'), ((22400, 22439), 'tensorflow.multiply', 'tf.multiply', (['images_diff1', 'images_diff2'], {}), '(images_diff1, images_diff2)\n', (22411, 22439), True, 'import tensorflow as tf\n'), ((22467, 22493), 'tensorflow.zeros_like', 'tf.zeros_like', (['images_diff'], {}), '(images_diff)\n', (22480, 22493), True, 'import tensorflow as tf\n'), ((23365, 23421), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocess_options'], {}), '(tensor_dict, preprocess_options)\n', (23388, 23421), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((23563, 23610), 'tensorflow.squared_difference', 'tf.squared_difference', (['images', 'images_expected1'], {}), '(images, images_expected1)\n', (23584, 23610), True, 'import tensorflow as tf\n'), ((23630, 23677), 'tensorflow.squared_difference', 'tf.squared_difference', (['images', 'images_expected2'], {}), '(images, images_expected2)\n', (23651, 23677), True, 'import tensorflow as tf\n'), ((23696, 23735), 'tensorflow.multiply', 'tf.multiply', (['images_diff1', 'images_diff2'], {}), '(images_diff1, images_diff2)\n', (23707, 23735), True, 'import tensorflow as tf\n'), ((23763, 23789), 'tensorflow.zeros_like', 'tf.zeros_like', (['images_diff'], {}), '(images_diff)\n', (23776, 23789), True, 'import tensorflow as tf\n'), ((24772, 24824), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1, image_height, image_width, 3]'], {}), '([1, image_height, image_width, 3])\n', (24789, 24824), True, 'import tensorflow as tf\n'), ((25434, 25528), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {'include_instance_masks': '(True)', 'include_keypoints': '(True)'}), '(include_instance_masks=True,\n include_keypoints=True)\n', (25471, 25528), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((25552, 25648), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocess_options'], {'func_arg_map': 'preprocessor_arg_map'}), '(tensor_dict, preprocess_options, func_arg_map=\n preprocessor_arg_map)\n', (25575, 25648), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((26612, 26668), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocess_options'], {}), '(tensor_dict, preprocess_options)\n', (26635, 26668), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((26809, 26854), 'tensorflow.squared_difference', 'tf.squared_difference', (['boxes', 'boxes_expected1'], {}), '(boxes, boxes_expected1)\n', (26830, 26854), True, 'import tensorflow as tf\n'), ((26873, 26918), 'tensorflow.squared_difference', 'tf.squared_difference', (['boxes', 'boxes_expected2'], {}), '(boxes, boxes_expected2)\n', (26894, 26918), True, 'import tensorflow as tf\n'), ((26936, 26973), 'tensorflow.multiply', 'tf.multiply', (['boxes_diff1', 'boxes_diff2'], {}), '(boxes_diff1, boxes_diff2)\n', (26947, 26973), True, 'import tensorflow as tf\n'), ((27000, 27025), 'tensorflow.zeros_like', 'tf.zeros_like', (['boxes_diff'], {}), '(boxes_diff)\n', (27013, 27025), True, 'import tensorflow as tf\n'), ((27046, 27093), 'tensorflow.squared_difference', 'tf.squared_difference', (['images', 'images_expected1'], {}), '(images, images_expected1)\n', (27067, 27093), True, 'import tensorflow as tf\n'), ((27113, 27160), 'tensorflow.squared_difference', 'tf.squared_difference', (['images', 'images_expected2'], {}), '(images, images_expected2)\n', (27134, 27160), True, 'import tensorflow as tf\n'), ((27179, 27218), 'tensorflow.multiply', 'tf.multiply', (['images_diff1', 'images_diff2'], {}), '(images_diff1, images_diff2)\n', (27190, 27218), True, 'import tensorflow as tf\n'), ((27246, 27272), 'tensorflow.zeros_like', 'tf.zeros_like', (['images_diff'], {}), '(images_diff)\n', (27259, 27272), True, 'import tensorflow as tf\n'), ((28137, 28193), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocess_options'], {}), '(tensor_dict, preprocess_options)\n', (28160, 28193), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((28335, 28382), 'tensorflow.squared_difference', 'tf.squared_difference', (['images', 'images_expected1'], {}), '(images, images_expected1)\n', (28356, 28382), True, 'import tensorflow as tf\n'), ((28402, 28449), 'tensorflow.squared_difference', 'tf.squared_difference', (['images', 'images_expected2'], {}), '(images, images_expected2)\n', (28423, 28449), True, 'import tensorflow as tf\n'), ((28468, 28507), 'tensorflow.multiply', 'tf.multiply', (['images_diff1', 'images_diff2'], {}), '(images_diff1, images_diff2)\n', (28479, 28507), True, 'import tensorflow as tf\n'), ((28535, 28561), 'tensorflow.zeros_like', 'tf.zeros_like', (['images_diff'], {}), '(images_diff)\n', (28548, 28561), True, 'import tensorflow as tf\n'), ((29536, 29588), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1, image_height, image_width, 3]'], {}), '([1, image_height, image_width, 3])\n', (29553, 29588), True, 'import tensorflow as tf\n'), ((30196, 30290), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {'include_instance_masks': '(True)', 'include_keypoints': '(True)'}), '(include_instance_masks=True,\n include_keypoints=True)\n', (30233, 30290), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((30314, 30410), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocess_options'], {'func_arg_map': 'preprocessor_arg_map'}), '(tensor_dict, preprocess_options, func_arg_map=\n preprocessor_arg_map)\n', (30337, 30410), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((31359, 31415), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocess_options'], {}), '(tensor_dict, preprocess_options)\n', (31382, 31415), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((31556, 31601), 'tensorflow.squared_difference', 'tf.squared_difference', (['boxes', 'boxes_expected1'], {}), '(boxes, boxes_expected1)\n', (31577, 31601), True, 'import tensorflow as tf\n'), ((31620, 31665), 'tensorflow.squared_difference', 'tf.squared_difference', (['boxes', 'boxes_expected2'], {}), '(boxes, boxes_expected2)\n', (31641, 31665), True, 'import tensorflow as tf\n'), ((31683, 31720), 'tensorflow.multiply', 'tf.multiply', (['boxes_diff1', 'boxes_diff2'], {}), '(boxes_diff1, boxes_diff2)\n', (31694, 31720), True, 'import tensorflow as tf\n'), ((31747, 31772), 'tensorflow.zeros_like', 'tf.zeros_like', (['boxes_diff'], {}), '(boxes_diff)\n', (31760, 31772), True, 'import tensorflow as tf\n'), ((31793, 31840), 'tensorflow.squared_difference', 'tf.squared_difference', (['images', 'images_expected1'], {}), '(images, images_expected1)\n', (31814, 31840), True, 'import tensorflow as tf\n'), ((31860, 31907), 'tensorflow.squared_difference', 'tf.squared_difference', (['images', 'images_expected2'], {}), '(images, images_expected2)\n', (31881, 31907), True, 'import tensorflow as tf\n'), ((31926, 31965), 'tensorflow.multiply', 'tf.multiply', (['images_diff1', 'images_diff2'], {}), '(images_diff1, images_diff2)\n', (31937, 31965), True, 'import tensorflow as tf\n'), ((31993, 32019), 'tensorflow.zeros_like', 'tf.zeros_like', (['images_diff'], {}), '(images_diff)\n', (32006, 32019), True, 'import tensorflow as tf\n'), ((32874, 32930), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocess_options'], {}), '(tensor_dict, preprocess_options)\n', (32897, 32930), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((33072, 33119), 'tensorflow.squared_difference', 'tf.squared_difference', (['images', 'images_expected1'], {}), '(images, images_expected1)\n', (33093, 33119), True, 'import tensorflow as tf\n'), ((33139, 33186), 'tensorflow.squared_difference', 'tf.squared_difference', (['images', 'images_expected2'], {}), '(images, images_expected2)\n', (33160, 33186), True, 'import tensorflow as tf\n'), ((33205, 33244), 'tensorflow.multiply', 'tf.multiply', (['images_diff1', 'images_diff2'], {}), '(images_diff1, images_diff2)\n', (33216, 33244), True, 'import tensorflow as tf\n'), ((33272, 33298), 'tensorflow.zeros_like', 'tf.zeros_like', (['images_diff'], {}), '(images_diff)\n', (33285, 33298), True, 'import tensorflow as tf\n'), ((34122, 34174), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1, image_height, image_width, 3]'], {}), '([1, image_height, image_width, 3])\n', (34139, 34174), True, 'import tensorflow as tf\n'), ((34574, 34668), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {'include_instance_masks': '(True)', 'include_keypoints': '(True)'}), '(include_instance_masks=True,\n include_keypoints=True)\n', (34611, 34668), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((34692, 34788), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocess_options'], {'func_arg_map': 'preprocessor_arg_map'}), '(tensor_dict, preprocess_options, func_arg_map=\n preprocessor_arg_map)\n', (34715, 34788), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((35693, 35752), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (35716, 35752), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((35931, 35967), 'tensorflow.greater_equal', 'tf.greater_equal', (['images', 'images_min'], {}), '(images, images_min)\n', (35947, 35967), True, 'import tensorflow as tf\n'), ((35986, 36019), 'tensorflow.less_equal', 'tf.less_equal', (['images', 'images_max'], {}), '(images, images_max)\n', (35999, 36019), True, 'import tensorflow as tf\n'), ((36038, 36065), 'tensorflow.fill', 'tf.fill', (['[1, 4, 4, 3]', '(True)'], {}), '([1, 4, 4, 3], True)\n', (36045, 36065), True, 'import tensorflow as tf\n'), ((37108, 37164), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocess_options'], {}), '(tensor_dict, preprocess_options)\n', (37131, 37164), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((37255, 37280), 'tensorflow.shape', 'tf.shape', (['images_original'], {}), '(images_original)\n', (37263, 37280), True, 'import tensorflow as tf\n'), ((37307, 37330), 'tensorflow.shape', 'tf.shape', (['images_scaled'], {}), '(images_scaled)\n', (37315, 37330), True, 'import tensorflow as tf\n'), ((38406, 38462), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocess_options'], {}), '(tensor_dict, preprocess_options)\n', (38429, 38462), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((38573, 38630), 'tensorflow.split', 'tf.split', ([], {'value': 'images_gray', 'num_or_size_splits': '(3)', 'axis': '(3)'}), '(value=images_gray, num_or_size_splits=3, axis=3)\n', (38581, 38630), True, 'import tensorflow as tf\n'), ((38675, 38736), 'tensorflow.split', 'tf.split', ([], {'value': 'images_original', 'num_or_size_splits': '(3)', 'axis': '(3)'}), '(value=images_original, num_or_size_splits=3, axis=3)\n', (38683, 38736), True, 'import tensorflow as tf\n'), ((39045, 39088), 'tensorflow.multiply', 'tf.multiply', (['images_r_diff1', 'images_r_diff2'], {}), '(images_r_diff1, images_r_diff2)\n', (39056, 39088), True, 'import tensorflow as tf\n'), ((39388, 39431), 'tensorflow.multiply', 'tf.multiply', (['images_g_diff1', 'images_g_diff2'], {}), '(images_g_diff1, images_g_diff2)\n', (39399, 39431), True, 'import tensorflow as tf\n'), ((39731, 39774), 'tensorflow.multiply', 'tf.multiply', (['images_b_diff1', 'images_b_diff2'], {}), '(images_b_diff1, images_b_diff2)\n', (39742, 39774), True, 'import tensorflow as tf\n'), ((39793, 39845), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.float32', 'shape': '[1, 4, 4, 1]'}), '(0, dtype=tf.float32, shape=[1, 4, 4, 1])\n', (39804, 39845), True, 'import tensorflow as tf\n'), ((41015, 41074), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (41038, 41074), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((41164, 41189), 'tensorflow.shape', 'tf.shape', (['images_original'], {}), '(images_original)\n', (41172, 41189), True, 'import tensorflow as tf\n'), ((41215, 41238), 'tensorflow.shape', 'tf.shape', (['images_bright'], {}), '(images_bright)\n', (41223, 41238), True, 'import tensorflow as tf\n'), ((42478, 42537), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (42501, 42537), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((42629, 42654), 'tensorflow.shape', 'tf.shape', (['images_original'], {}), '(images_original)\n', (42637, 42654), True, 'import tensorflow as tf\n'), ((42682, 42707), 'tensorflow.shape', 'tf.shape', (['images_contrast'], {}), '(images_contrast)\n', (42690, 42707), True, 'import tensorflow as tf\n'), ((43939, 43998), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (43962, 43998), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((44085, 44110), 'tensorflow.shape', 'tf.shape', (['images_original'], {}), '(images_original)\n', (44093, 44110), True, 'import tensorflow as tf\n'), ((44133, 44153), 'tensorflow.shape', 'tf.shape', (['images_hue'], {}), '(images_hue)\n', (44141, 44153), True, 'import tensorflow as tf\n'), ((45310, 45335), 'tensorflow.shape', 'tf.shape', (['images_original'], {}), '(images_original)\n', (45318, 45335), True, 'import tensorflow as tf\n'), ((45420, 45479), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (45443, 45479), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((45586, 45618), 'tensorflow.shape', 'tf.shape', (['images_distorted_color'], {}), '(images_distorted_color)\n', (45594, 45618), True, 'import tensorflow as tf\n'), ((46609, 46624), 'tensorflow.shape', 'tf.shape', (['boxes'], {}), '(boxes)\n', (46617, 46624), True, 'import tensorflow as tf\n'), ((46711, 46770), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (46734, 46770), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((46875, 46900), 'tensorflow.shape', 'tf.shape', (['distorted_boxes'], {}), '(distorted_boxes)\n', (46883, 46900), True, 'import tensorflow as tf\n'), ((47762, 47821), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (47785, 47821), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((48061, 48075), 'tensorflow.rank', 'tf.rank', (['boxes'], {}), '(boxes)\n', (48068, 48075), True, 'import tensorflow as tf\n'), ((48103, 48127), 'tensorflow.rank', 'tf.rank', (['distorted_boxes'], {}), '(distorted_boxes)\n', (48110, 48127), True, 'import tensorflow as tf\n'), ((48146, 48161), 'tensorflow.rank', 'tf.rank', (['images'], {}), '(images)\n', (48153, 48161), True, 'import tensorflow as tf\n'), ((48190, 48215), 'tensorflow.rank', 'tf.rank', (['distorted_images'], {}), '(distorted_images)\n', (48197, 48215), True, 'import tensorflow as tf\n'), ((50156, 50215), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (50179, 50215), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((50412, 50426), 'tensorflow.rank', 'tf.rank', (['boxes'], {}), '(boxes)\n', (50419, 50426), True, 'import tensorflow as tf\n'), ((50454, 50478), 'tensorflow.rank', 'tf.rank', (['distorted_boxes'], {}), '(distorted_boxes)\n', (50461, 50478), True, 'import tensorflow as tf\n'), ((50497, 50512), 'tensorflow.rank', 'tf.rank', (['images'], {}), '(images)\n', (50504, 50512), True, 'import tensorflow as tf\n'), ((50541, 50566), 'tensorflow.rank', 'tf.rank', (['distorted_images'], {}), '(distorted_images)\n', (50548, 50566), True, 'import tensorflow as tf\n'), ((51702, 51761), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (51725, 51761), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((52001, 52015), 'tensorflow.rank', 'tf.rank', (['boxes'], {}), '(boxes)\n', (52008, 52015), True, 'import tensorflow as tf\n'), ((52043, 52067), 'tensorflow.rank', 'tf.rank', (['distorted_boxes'], {}), '(distorted_boxes)\n', (52050, 52067), True, 'import tensorflow as tf\n'), ((52086, 52101), 'tensorflow.rank', 'tf.rank', (['images'], {}), '(images)\n', (52093, 52101), True, 'import tensorflow as tf\n'), ((52130, 52155), 'tensorflow.rank', 'tf.rank', (['distorted_images'], {}), '(distorted_images)\n', (52137, 52155), True, 'import tensorflow as tf\n'), ((53189, 53248), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (53212, 53248), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((53432, 53491), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (53455, 53491), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((53940, 53955), 'tensorflow.shape', 'tf.shape', (['boxes'], {}), '(boxes)\n', (53948, 53955), True, 'import tensorflow as tf\n'), ((53984, 54009), 'tensorflow.shape', 'tf.shape', (['distorted_boxes'], {}), '(distorted_boxes)\n', (53992, 54009), True, 'import tensorflow as tf\n'), ((54029, 54045), 'tensorflow.shape', 'tf.shape', (['images'], {}), '(images)\n', (54037, 54045), True, 'import tensorflow as tf\n'), ((54075, 54101), 'tensorflow.shape', 'tf.shape', (['distorted_images'], {}), '(distorted_images)\n', (54083, 54101), True, 'import tensorflow as tf\n'), ((55281, 55385), 'tensorflow.constant', 'tf.constant', (['[[0.1, 0.1, 0.8, 0.3], [0.2, 0.4, 0.75, 0.75], [0.3, 0.1, 0.4, 0.7]]'], {'dtype': 'tf.float32'}), '([[0.1, 0.1, 0.8, 0.3], [0.2, 0.4, 0.75, 0.75], [0.3, 0.1, 0.4, \n 0.7]], dtype=tf.float32)\n', (55292, 55385), True, 'import tensorflow as tf\n'), ((55444, 55483), 'tensorflow.constant', 'tf.constant', (['[1, 7, 11]'], {'dtype': 'tf.int32'}), '([1, 7, 11], dtype=tf.int32)\n', (55455, 55483), True, 'import tensorflow as tf\n'), ((55692, 55751), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (55715, 55751), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((58040, 58099), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (58063, 58099), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((58446, 58460), 'tensorflow.rank', 'tf.rank', (['boxes'], {}), '(boxes)\n', (58453, 58460), True, 'import tensorflow as tf\n'), ((58488, 58512), 'tensorflow.rank', 'tf.rank', (['distorted_boxes'], {}), '(distorted_boxes)\n', (58495, 58512), True, 'import tensorflow as tf\n'), ((58531, 58546), 'tensorflow.rank', 'tf.rank', (['images'], {}), '(images)\n', (58538, 58546), True, 'import tensorflow as tf\n'), ((58575, 58600), 'tensorflow.rank', 'tf.rank', (['distorted_images'], {}), '(distorted_images)\n', (58582, 58600), True, 'import tensorflow as tf\n'), ((58630, 58656), 'tensorflow.rank', 'tf.rank', (['multiclass_scores'], {}), '(multiclass_scores)\n', (58637, 58656), True, 'import tensorflow as tf\n'), ((58696, 58732), 'tensorflow.rank', 'tf.rank', (['distorted_multiclass_scores'], {}), '(distorted_multiclass_scores)\n', (58703, 58732), True, 'import tensorflow as tf\n'), ((61062, 61112), 'tensorflow.random_uniform', 'tf.random_uniform', (['[2, 200, 400]'], {'dtype': 'tf.float32'}), '([2, 200, 400], dtype=tf.float32)\n', (61079, 61112), True, 'import tensorflow as tf\n'), ((63926, 63976), 'tensorflow.random_uniform', 'tf.random_uniform', (['[2, 200, 400]'], {'dtype': 'tf.float32'}), '([2, 200, 400], dtype=tf.float32)\n', (63943, 63976), True, 'import tensorflow as tf\n'), ((64260, 64326), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {'include_instance_masks': '(True)'}), '(include_instance_masks=True)\n', (64297, 64326), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((66494, 66555), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {'include_keypoints': '(True)'}), '(include_keypoints=True)\n', (66531, 66555), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((69048, 69109), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {'include_keypoints': '(True)'}), '(include_keypoints=True)\n', (69085, 69109), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((71567, 71631), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {'include_label_scores': '(True)'}), '(include_label_scores=True)\n', (71604, 71631), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((71668, 71767), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {'func_arg_map': 'preprocessor_arg_map'}), '(tensor_dict, preprocessing_options, func_arg_map=\n preprocessor_arg_map)\n', (71691, 71767), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((73268, 73365), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {'include_label_scores': '(True)', 'include_instance_masks': '(True)'}), '(include_label_scores=True,\n include_instance_masks=True)\n', (73305, 73365), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((73516, 73615), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {'func_arg_map': 'preprocessor_arg_map'}), '(tensor_dict, preprocessing_options, func_arg_map=\n preprocessor_arg_map)\n', (73539, 73615), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((74482, 74574), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {'include_label_scores': '(True)', 'include_keypoints': '(True)'}), '(include_label_scores=True,\n include_keypoints=True)\n', (74519, 74574), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((74725, 74824), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {'func_arg_map': 'preprocessor_arg_map'}), '(tensor_dict, preprocessing_options, func_arg_map=\n preprocessor_arg_map)\n', (74748, 74824), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((75695, 75745), 'tensorflow.random_uniform', 'tf.random_uniform', (['[2, 200, 400]'], {'dtype': 'tf.float32'}), '([2, 200, 400], dtype=tf.float32)\n', (75712, 75745), True, 'import tensorflow as tf\n'), ((76028, 76094), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {'include_instance_masks': '(True)'}), '(include_instance_masks=True)\n', (76065, 76094), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((77996, 78057), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {'include_keypoints': '(True)'}), '(include_keypoints=True)\n', (78033, 78057), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((80367, 80406), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {}), '()\n', (80404, 80406), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((80645, 80744), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {'func_arg_map': 'preprocessor_arg_map'}), '(tensor_dict, preprocessing_options, func_arg_map=\n preprocessor_arg_map)\n', (80668, 80744), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((81727, 81777), 'tensorflow.random_uniform', 'tf.random_uniform', (['[2, 200, 400]'], {'dtype': 'tf.float32'}), '([2, 200, 400], dtype=tf.float32)\n', (81744, 81777), True, 'import tensorflow as tf\n'), ((82060, 82126), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {'include_instance_masks': '(True)'}), '(include_instance_masks=True)\n', (82097, 82126), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((82242, 82341), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {'func_arg_map': 'preprocessor_arg_map'}), '(tensor_dict, preprocessing_options, func_arg_map=\n preprocessor_arg_map)\n', (82265, 82341), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((83847, 83908), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {'include_keypoints': '(True)'}), '(include_keypoints=True)\n', (83884, 83908), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((84024, 84123), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {'func_arg_map': 'preprocessor_arg_map'}), '(tensor_dict, preprocessing_options, func_arg_map=\n preprocessor_arg_map)\n', (84047, 84123), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((86363, 86422), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (86386, 86422), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((86570, 86629), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (86593, 86629), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((86856, 86871), 'tensorflow.shape', 'tf.shape', (['boxes'], {}), '(boxes)\n', (86864, 86871), True, 'import tensorflow as tf\n'), ((86897, 86919), 'tensorflow.shape', 'tf.shape', (['padded_boxes'], {}), '(padded_boxes)\n', (86905, 86919), True, 'import tensorflow as tf\n'), ((86939, 86955), 'tensorflow.shape', 'tf.shape', (['images'], {}), '(images)\n', (86947, 86955), True, 'import tensorflow as tf\n'), ((86982, 87005), 'tensorflow.shape', 'tf.shape', (['padded_images'], {}), '(padded_images)\n', (86990, 87005), True, 'import tensorflow as tf\n'), ((88912, 88971), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (88935, 88971), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((89156, 89215), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (89179, 89215), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((89442, 89457), 'tensorflow.shape', 'tf.shape', (['boxes'], {}), '(boxes)\n', (89450, 89457), True, 'import tensorflow as tf\n'), ((89483, 89505), 'tensorflow.shape', 'tf.shape', (['padded_boxes'], {}), '(padded_boxes)\n', (89491, 89505), True, 'import tensorflow as tf\n'), ((89525, 89541), 'tensorflow.shape', 'tf.shape', (['images'], {}), '(images)\n', (89533, 89541), True, 'import tensorflow as tf\n'), ((89568, 89591), 'tensorflow.shape', 'tf.shape', (['padded_images'], {}), '(padded_images)\n', (89576, 89591), True, 'import tensorflow as tf\n'), ((90827, 90867), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', '[]'], {}), '(tensor_dict, [])\n', (90850, 90867), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((91060, 91119), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (91083, 91119), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((91351, 91366), 'tensorflow.shape', 'tf.shape', (['boxes'], {}), '(boxes)\n', (91359, 91366), True, 'import tensorflow as tf\n'), ((91393, 91416), 'tensorflow.shape', 'tf.shape', (['cropped_boxes'], {}), '(cropped_boxes)\n', (91401, 91416), True, 'import tensorflow as tf\n'), ((91436, 91452), 'tensorflow.shape', 'tf.shape', (['images'], {}), '(images)\n', (91444, 91452), True, 'import tensorflow as tf\n'), ((91480, 91504), 'tensorflow.shape', 'tf.shape', (['cropped_images'], {}), '(cropped_images)\n', (91488, 91504), True, 'import tensorflow as tf\n'), ((92292, 92332), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', '[]'], {}), '(tensor_dict, [])\n', (92315, 92332), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((92523, 92582), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (92546, 92582), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((92809, 92824), 'tensorflow.shape', 'tf.shape', (['boxes'], {}), '(boxes)\n', (92817, 92824), True, 'import tensorflow as tf\n'), ((92850, 92872), 'tensorflow.shape', 'tf.shape', (['padded_boxes'], {}), '(padded_boxes)\n', (92858, 92872), True, 'import tensorflow as tf\n'), ((92892, 92908), 'tensorflow.shape', 'tf.shape', (['images'], {}), '(images)\n', (92900, 92908), True, 'import tensorflow as tf\n'), ((92935, 92958), 'tensorflow.shape', 'tf.shape', (['padded_images'], {}), '(padded_images)\n', (92943, 92958), True, 'import tensorflow as tf\n'), ((94451, 94510), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (94474, 94510), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((94651, 94667), 'tensorflow.shape', 'tf.shape', (['images'], {}), '(images)\n', (94659, 94667), True, 'import tensorflow as tf\n'), ((94695, 94719), 'tensorflow.shape', 'tf.shape', (['blacked_images'], {}), '(blacked_images)\n', (94703, 94719), True, 'import tensorflow as tf\n'), ((95993, 96052), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (96016, 96052), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((96201, 96225), 'tensorflow.shape', 'tf.shape', (['resized_images'], {}), '(resized_images)\n', (96209, 96225), True, 'import tensorflow as tf\n'), ((96254, 96298), 'tensorflow.constant', 'tf.constant', (['[1, 75, 150, 3]'], {'dtype': 'tf.int32'}), '([1, 75, 150, 3], dtype=tf.int32)\n', (96265, 96298), True, 'import tensorflow as tf\n'), ((98057, 98088), 'tensorflow.constant', 'tf.constant', (['(50)'], {'dtype': 'tf.int32'}), '(50, dtype=tf.int32)\n', (98068, 98088), True, 'import tensorflow as tf\n'), ((98101, 98133), 'tensorflow.constant', 'tf.constant', (['(100)'], {'dtype': 'tf.int32'}), '(100, dtype=tf.int32)\n', (98112, 98133), True, 'import tensorflow as tf\n'), ((102853, 102888), 'numpy.array', 'np.array', (['[[[0, 1, 2]]]', 'np.float32'], {}), '([[[0, 1, 2]]], np.float32)\n', (102861, 102888), True, 'import numpy as np\n'), ((102907, 103033), 'numpy.array', 'np.array', (['[[[0, 1, 2], [123.68, 116.779, 103.939]], [[123.68, 116.779, 103.939], [\n 123.68, 116.779, 103.939]]]', 'np.float32'], {}), '([[[0, 1, 2], [123.68, 116.779, 103.939]], [[123.68, 116.779, \n 103.939], [123.68, 116.779, 103.939]]], np.float32)\n', (102915, 103033), True, 'import numpy as np\n'), ((103095, 103144), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, None, 3)'}), '(tf.float32, shape=(None, None, 3))\n', (103109, 103144), True, 'import tensorflow as tf\n'), ((103164, 103335), 'object_detection.tensorflow_detect.core.preprocessor.resize_to_range', 'preprocessor.resize_to_range', (['in_image'], {'min_dimension': 'min_dim', 'max_dimension': 'max_dim', 'pad_to_max_dimension': '(True)', 'per_channel_pad_value': '(123.68, 116.779, 103.939)'}), '(in_image, min_dimension=min_dim, max_dimension\n =max_dim, pad_to_max_dimension=True, per_channel_pad_value=(123.68, \n 116.779, 103.939))\n', (103192, 103335), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((108997, 109032), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1, 200, 300, 3]'], {}), '([1, 200, 300, 3])\n', (109014, 109032), True, 'import tensorflow as tf\n'), ((112581, 112616), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1, 200, 300, 3]'], {}), '([1, 200, 300, 3])\n', (112598, 112616), True, 'import tensorflow as tf\n'), ((113018, 113045), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_shape'], {}), '(in_shape)\n', (113035, 113045), True, 'import tensorflow as tf\n'), ((113061, 113082), 'tensorflow.constant', 'tf.constant', (['in_boxes'], {}), '(in_boxes)\n', (113072, 113082), True, 'import tensorflow as tf\n'), ((113102, 113173), 'object_detection.tensorflow_detect.core.preprocessor.scale_boxes_to_pixel_coordinates', 'preprocessor.scale_boxes_to_pixel_coordinates', (['in_image'], {'boxes': 'in_boxes'}), '(in_image, boxes=in_boxes)\n', (113147, 113173), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((113775, 113802), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_shape'], {}), '(in_shape)\n', (113792, 113802), True, 'import tensorflow as tf\n'), ((113837, 113936), 'object_detection.tensorflow_detect.core.preprocessor.scale_boxes_to_pixel_coordinates', 'preprocessor.scale_boxes_to_pixel_coordinates', (['in_image'], {'boxes': 'in_boxes', 'keypoints': 'in_keypoints'}), '(in_image, boxes=in_boxes,\n keypoints=in_keypoints)\n', (113882, 113936), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((116063, 116122), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (116086, 116122), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((116364, 116379), 'tensorflow.rank', 'tf.rank', (['images'], {}), '(images)\n', (116371, 116379), True, 'import tensorflow as tf\n'), ((116408, 116433), 'tensorflow.rank', 'tf.rank', (['distorted_images'], {}), '(distorted_images)\n', (116415, 116433), True, 'import tensorflow as tf\n'), ((116451, 116465), 'tensorflow.rank', 'tf.rank', (['boxes'], {}), '(boxes)\n', (116458, 116465), True, 'import tensorflow as tf\n'), ((116493, 116517), 'tensorflow.rank', 'tf.rank', (['distorted_boxes'], {}), '(distorted_boxes)\n', (116500, 116517), True, 'import tensorflow as tf\n'), ((117606, 117675), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {'include_multiclass_scores': '(True)'}), '(include_multiclass_scores=True)\n', (117643, 117675), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((117713, 117812), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {'func_arg_map': 'preprocessor_arg_map'}), '(tensor_dict, preprocessing_options, func_arg_map=\n preprocessor_arg_map)\n', (117736, 117812), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((118113, 118128), 'tensorflow.rank', 'tf.rank', (['images'], {}), '(images)\n', (118120, 118128), True, 'import tensorflow as tf\n'), ((118157, 118182), 'tensorflow.rank', 'tf.rank', (['distorted_images'], {}), '(distorted_images)\n', (118164, 118182), True, 'import tensorflow as tf\n'), ((118200, 118214), 'tensorflow.rank', 'tf.rank', (['boxes'], {}), '(boxes)\n', (118207, 118214), True, 'import tensorflow as tf\n'), ((118242, 118266), 'tensorflow.rank', 'tf.rank', (['distorted_boxes'], {}), '(distorted_boxes)\n', (118249, 118266), True, 'import tensorflow as tf\n'), ((118296, 118322), 'tensorflow.rank', 'tf.rank', (['multiclass_scores'], {}), '(multiclass_scores)\n', (118303, 118322), True, 'import tensorflow as tf\n'), ((118362, 118398), 'tensorflow.rank', 'tf.rank', (['distorted_multiclass_scores'], {}), '(distorted_multiclass_scores)\n', (118369, 118398), True, 'import tensorflow as tf\n'), ((119858, 119917), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (119881, 119917), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((120159, 120174), 'tensorflow.rank', 'tf.rank', (['images'], {}), '(images)\n', (120166, 120174), True, 'import tensorflow as tf\n'), ((120203, 120228), 'tensorflow.rank', 'tf.rank', (['distorted_images'], {}), '(distorted_images)\n', (120210, 120228), True, 'import tensorflow as tf\n'), ((120246, 120260), 'tensorflow.rank', 'tf.rank', (['boxes'], {}), '(boxes)\n', (120253, 120260), True, 'import tensorflow as tf\n'), ((120288, 120312), 'tensorflow.rank', 'tf.rank', (['distorted_boxes'], {}), '(distorted_boxes)\n', (120295, 120312), True, 'import tensorflow as tf\n'), ((122733, 122965), 'object_detection.tensorflow_detect.core.preprocessor.get_default_func_arg_map', 'preprocessor.get_default_func_arg_map', ([], {'include_label_scores': 'include_label_scores', 'include_multiclass_scores': 'include_multiclass_scores', 'include_instance_masks': 'include_instance_masks', 'include_keypoints': 'include_keypoints'}), '(include_label_scores=\n include_label_scores, include_multiclass_scores=\n include_multiclass_scores, include_instance_masks=\n include_instance_masks, include_keypoints=include_keypoints)\n', (122770, 122965), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((123012, 123111), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {'func_arg_map': 'preprocessor_arg_map'}), '(tensor_dict, preprocessing_options, func_arg_map=\n preprocessor_arg_map)\n', (123035, 123111), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((123304, 123319), 'tensorflow.rank', 'tf.rank', (['images'], {}), '(images)\n', (123311, 123319), True, 'import tensorflow as tf\n'), ((123348, 123373), 'tensorflow.rank', 'tf.rank', (['distorted_images'], {}), '(distorted_images)\n', (123355, 123373), True, 'import tensorflow as tf\n'), ((123391, 123405), 'tensorflow.rank', 'tf.rank', (['boxes'], {}), '(boxes)\n', (123398, 123405), True, 'import tensorflow as tf\n'), ((123433, 123457), 'tensorflow.rank', 'tf.rank', (['distorted_boxes'], {}), '(distorted_boxes)\n', (123440, 123457), True, 'import tensorflow as tf\n'), ((125315, 125381), 'tensorflow.constant', 'tf.constant', (['[[1.0, 0.0], [0.5, 0.5], [1000, 1]]'], {'dtype': 'tf.float32'}), '([[1.0, 0.0], [0.5, 0.5], [1000, 1]], dtype=tf.float32)\n', (125326, 125381), True, 'import tensorflow as tf\n'), ((125458, 125569), 'object_detection.tensorflow_detect.core.preprocessor.convert_class_logits_to_softmax', 'preprocessor.convert_class_logits_to_softmax', ([], {'multiclass_scores': 'multiclass_scores', 'temperature': 'temperature'}), '(multiclass_scores=\n multiclass_scores, temperature=temperature)\n', (125502, 125569), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((1176, 1208), 'tensorflow.constant', 'tf.constant', (['(255)'], {'dtype': 'tf.uint8'}), '(255, dtype=tf.uint8)\n', (1187, 1208), True, 'import tensorflow as tf\n'), ((1248, 1280), 'tensorflow.constant', 'tf.constant', (['(128)'], {'dtype': 'tf.uint8'}), '(128, dtype=tf.uint8)\n', (1259, 1280), True, 'import tensorflow as tf\n'), ((1318, 1348), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.uint8'}), '(0, dtype=tf.uint8)\n', (1329, 1348), True, 'import tensorflow as tf\n'), ((38789, 38810), 'tensorflow.to_float', 'tf.to_float', (['images_r'], {}), '(images_r)\n', (38800, 38810), True, 'import tensorflow as tf\n'), ((38855, 38881), 'tensorflow.to_float', 'tf.to_float', (['images_gray_r'], {}), '(images_gray_r)\n', (38866, 38881), True, 'import tensorflow as tf\n'), ((38926, 38952), 'tensorflow.to_float', 'tf.to_float', (['images_gray_r'], {}), '(images_gray_r)\n', (38937, 38952), True, 'import tensorflow as tf\n'), ((38997, 39023), 'tensorflow.to_float', 'tf.to_float', (['images_gray_g'], {}), '(images_gray_g)\n', (39008, 39023), True, 'import tensorflow as tf\n'), ((39132, 39153), 'tensorflow.to_float', 'tf.to_float', (['images_g'], {}), '(images_g)\n', (39143, 39153), True, 'import tensorflow as tf\n'), ((39198, 39224), 'tensorflow.to_float', 'tf.to_float', (['images_gray_g'], {}), '(images_gray_g)\n', (39209, 39224), True, 'import tensorflow as tf\n'), ((39269, 39295), 'tensorflow.to_float', 'tf.to_float', (['images_gray_g'], {}), '(images_gray_g)\n', (39280, 39295), True, 'import tensorflow as tf\n'), ((39340, 39366), 'tensorflow.to_float', 'tf.to_float', (['images_gray_b'], {}), '(images_gray_b)\n', (39351, 39366), True, 'import tensorflow as tf\n'), ((39475, 39496), 'tensorflow.to_float', 'tf.to_float', (['images_b'], {}), '(images_b)\n', (39486, 39496), True, 'import tensorflow as tf\n'), ((39541, 39567), 'tensorflow.to_float', 'tf.to_float', (['images_gray_b'], {}), '(images_gray_b)\n', (39552, 39567), True, 'import tensorflow as tf\n'), ((39612, 39638), 'tensorflow.to_float', 'tf.to_float', (['images_gray_b'], {}), '(images_gray_b)\n', (39623, 39638), True, 'import tensorflow as tf\n'), ((39683, 39709), 'tensorflow.to_float', 'tf.to_float', (['images_gray_r'], {}), '(images_gray_r)\n', (39694, 39709), True, 'import tensorflow as tf\n'), ((55884, 55944), 'unittest.mock.patch.object', 'mock.patch.object', (['tf.image', '"""sample_distorted_bounding_box"""'], {}), "(tf.image, 'sample_distorted_bounding_box')\n", (55901, 55944), False, 'from unittest import mock\n'), ((56284, 56343), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {}), '(tensor_dict, preprocessing_options)\n', (56307, 56343), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((56623, 56744), 'tensorflow.constant', 'tf.constant', (['[[0.178947, 0.07173, 0.75789469, 0.66244733], [0.28421, 0.0, 0.38947365, \n 0.57805908]]'], {'dtype': 'tf.float32'}), '([[0.178947, 0.07173, 0.75789469, 0.66244733], [0.28421, 0.0, \n 0.38947365, 0.57805908]], dtype=tf.float32)\n', (56634, 56744), True, 'import tensorflow as tf\n'), ((56835, 56871), 'tensorflow.constant', 'tf.constant', (['[7, 11]'], {'dtype': 'tf.int32'}), '([7, 11], dtype=tf.int32)\n', (56846, 56871), True, 'import tensorflow as tf\n'), ((59797, 59857), 'unittest.mock.patch.object', 'mock.patch.object', (['tf.image', '"""sample_distorted_bounding_box"""'], {}), "(tf.image, 'sample_distorted_bounding_box')\n", (59814, 59857), False, 'from unittest import mock\n'), ((60228, 60302), 'object_detection.tensorflow_detect.core.preprocessor._strict_random_crop_image', 'preprocessor._strict_random_crop_image', (['image', 'boxes', 'labels', 'label_scores'], {}), '(image, boxes, labels, label_scores)\n', (60266, 60302), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((61122, 61182), 'unittest.mock.patch.object', 'mock.patch.object', (['tf.image', '"""sample_distorted_bounding_box"""'], {}), "(tf.image, 'sample_distorted_bounding_box')\n", (61139, 61182), False, 'from unittest import mock\n'), ((61546, 61619), 'object_detection.tensorflow_detect.core.preprocessor._strict_random_crop_image', 'preprocessor._strict_random_crop_image', (['image', 'boxes', 'labels'], {'masks': 'masks'}), '(image, boxes, labels, masks=masks)\n', (61584, 61619), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((62382, 62442), 'unittest.mock.patch.object', 'mock.patch.object', (['tf.image', '"""sample_distorted_bounding_box"""'], {}), "(tf.image, 'sample_distorted_bounding_box')\n", (62399, 62442), False, 'from unittest import mock\n'), ((62810, 62896), 'object_detection.tensorflow_detect.core.preprocessor._strict_random_crop_image', 'preprocessor._strict_random_crop_image', (['image', 'boxes', 'labels'], {'keypoints': 'keypoints'}), '(image, boxes, labels, keypoints=\n keypoints)\n', (62848, 62896), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((64414, 64474), 'unittest.mock.patch.object', 'mock.patch.object', (['tf.image', '"""sample_distorted_bounding_box"""'], {}), "(tf.image, 'sample_distorted_bounding_box')\n", (64431, 64474), False, 'from unittest import mock\n'), ((64804, 64903), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {'func_arg_map': 'preprocessor_arg_map'}), '(tensor_dict, preprocessing_options, func_arg_map=\n preprocessor_arg_map)\n', (64827, 64903), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((66643, 66703), 'unittest.mock.patch.object', 'mock.patch.object', (['tf.image', '"""sample_distorted_bounding_box"""'], {}), "(tf.image, 'sample_distorted_bounding_box')\n", (66660, 66703), False, 'from unittest import mock\n'), ((67033, 67132), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {'func_arg_map': 'preprocessor_arg_map'}), '(tensor_dict, preprocessing_options, func_arg_map=\n preprocessor_arg_map)\n', (67056, 67132), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((69197, 69257), 'unittest.mock.patch.object', 'mock.patch.object', (['tf.image', '"""sample_distorted_bounding_box"""'], {}), "(tf.image, 'sample_distorted_bounding_box')\n", (69214, 69257), False, 'from unittest import mock\n'), ((69587, 69686), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {'func_arg_map': 'preprocessor_arg_map'}), '(tensor_dict, preprocessing_options, func_arg_map=\n preprocessor_arg_map)\n', (69610, 69686), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((76192, 76242), 'unittest.mock.patch.object', 'mock.patch.object', (['preprocessor', '"""_random_integer"""'], {}), "(preprocessor, '_random_integer')\n", (76209, 76242), False, 'from unittest import mock\n'), ((76335, 76365), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32'}), '(0, dtype=tf.int32)\n', (76346, 76365), True, 'import tensorflow as tf\n'), ((76396, 76495), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {'func_arg_map': 'preprocessor_arg_map'}), '(tensor_dict, preprocessing_options, func_arg_map=\n preprocessor_arg_map)\n', (76419, 76495), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((78155, 78205), 'unittest.mock.patch.object', 'mock.patch.object', (['preprocessor', '"""_random_integer"""'], {}), "(preprocessor, '_random_integer')\n", (78172, 78205), False, 'from unittest import mock\n'), ((78298, 78328), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32'}), '(0, dtype=tf.int32)\n', (78309, 78328), True, 'import tensorflow as tf\n'), ((78359, 78458), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocessing_options'], {'func_arg_map': 'preprocessor_arg_map'}), '(tensor_dict, preprocessing_options, func_arg_map=\n preprocessor_arg_map)\n', (78382, 78458), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((81214, 81303), 'numpy.array', 'np.array', (['[[0.0, 0.125, 0.1875, 0.5], [0.0625, 0.25, 0.1875, 0.5]]'], {'dtype': 'np.float32'}), '([[0.0, 0.125, 0.1875, 0.5], [0.0625, 0.25, 0.1875, 0.5]], dtype=np\n .float32)\n', (81222, 81303), True, 'import numpy as np\n'), ((82968, 83047), 'numpy.array', 'np.array', (['[[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]]'], {'dtype': 'np.float32'}), '([[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32)\n', (82976, 83047), True, 'import numpy as np\n'), ((84768, 84847), 'numpy.array', 'np.array', (['[[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]]'], {'dtype': 'np.float32'}), '([[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32)\n', (84776, 84847), True, 'import numpy as np\n'), ((84886, 84997), 'numpy.array', 'np.array', (['[[[0.05, 0.1], [0.1, 0.2], [0.15, 0.3]], [[0.2, 0.4], [0.25, 0.5], [0.3, 0.6]]]'], {'dtype': 'np.float32'}), '([[[0.05, 0.1], [0.1, 0.2], [0.15, 0.3]], [[0.2, 0.4], [0.25, 0.5],\n [0.3, 0.6]]], dtype=np.float32)\n', (84894, 84997), True, 'import numpy as np\n'), ((97244, 97277), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_image_shape'], {}), '(in_image_shape)\n', (97261, 97277), True, 'import tensorflow as tf\n'), ((97295, 97328), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_masks_shape'], {}), '(in_masks_shape)\n', (97312, 97328), True, 'import tensorflow as tf\n'), ((97361, 97447), 'object_detection.tensorflow_detect.core.preprocessor.resize_image', 'preprocessor.resize_image', (['in_image', 'in_masks'], {'new_height': 'height', 'new_width': 'width'}), '(in_image, in_masks, new_height=height, new_width=\n width)\n', (97386, 97447), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((97478, 97497), 'tensorflow.shape', 'tf.shape', (['out_image'], {}), '(out_image)\n', (97486, 97497), True, 'import tensorflow as tf\n'), ((97522, 97541), 'tensorflow.shape', 'tf.shape', (['out_masks'], {}), '(out_masks)\n', (97530, 97541), True, 'import tensorflow as tf\n'), ((98584, 98617), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_image_shape'], {}), '(in_image_shape)\n', (98601, 98617), True, 'import tensorflow as tf\n'), ((98635, 98668), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_masks_shape'], {}), '(in_masks_shape)\n', (98652, 98668), True, 'import tensorflow as tf\n'), ((98701, 98787), 'object_detection.tensorflow_detect.core.preprocessor.resize_image', 'preprocessor.resize_image', (['in_image', 'in_masks'], {'new_height': 'height', 'new_width': 'width'}), '(in_image, in_masks, new_height=height, new_width=\n width)\n', (98726, 98787), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((98818, 98837), 'tensorflow.shape', 'tf.shape', (['out_image'], {}), '(out_image)\n', (98826, 98837), True, 'import tensorflow as tf\n'), ((98862, 98881), 'tensorflow.shape', 'tf.shape', (['out_masks'], {}), '(out_masks)\n', (98870, 98881), True, 'import tensorflow as tf\n'), ((99846, 99879), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_image_shape'], {}), '(in_image_shape)\n', (99863, 99879), True, 'import tensorflow as tf\n'), ((99897, 99930), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_masks_shape'], {}), '(in_masks_shape)\n', (99914, 99930), True, 'import tensorflow as tf\n'), ((99963, 100049), 'object_detection.tensorflow_detect.core.preprocessor.resize_image', 'preprocessor.resize_image', (['in_image', 'in_masks'], {'new_height': 'height', 'new_width': 'width'}), '(in_image, in_masks, new_height=height, new_width=\n width)\n', (99988, 100049), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((100080, 100099), 'tensorflow.shape', 'tf.shape', (['out_image'], {}), '(out_image)\n', (100088, 100099), True, 'import tensorflow as tf\n'), ((100124, 100143), 'tensorflow.shape', 'tf.shape', (['out_masks'], {}), '(out_masks)\n', (100132, 100143), True, 'import tensorflow as tf\n'), ((100791, 100818), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_shape'], {}), '(in_shape)\n', (100808, 100818), True, 'import tensorflow as tf\n'), ((100840, 100929), 'object_detection.tensorflow_detect.core.preprocessor.resize_to_range', 'preprocessor.resize_to_range', (['in_image'], {'min_dimension': 'min_dim', 'max_dimension': 'max_dim'}), '(in_image, min_dimension=min_dim, max_dimension\n =max_dim)\n', (100868, 100929), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((101379, 101428), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, None, 3)'}), '(tf.float32, shape=(None, None, 3))\n', (101393, 101428), True, 'import tensorflow as tf\n'), ((101450, 101539), 'object_detection.tensorflow_detect.core.preprocessor.resize_to_range', 'preprocessor.resize_to_range', (['in_image'], {'min_dimension': 'min_dim', 'max_dimension': 'max_dim'}), '(in_image, min_dimension=min_dim, max_dimension\n =max_dim)\n', (101478, 101539), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((101570, 101589), 'tensorflow.shape', 'tf.shape', (['out_image'], {}), '(out_image)\n', (101578, 101589), True, 'import tensorflow as tf\n'), ((102209, 102258), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, None, 3)'}), '(tf.float32, shape=(None, None, 3))\n', (102223, 102258), True, 'import tensorflow as tf\n'), ((102280, 102396), 'object_detection.tensorflow_detect.core.preprocessor.resize_to_range', 'preprocessor.resize_to_range', (['in_image'], {'min_dimension': 'min_dim', 'max_dimension': 'max_dim', 'pad_to_max_dimension': '(True)'}), '(in_image, min_dimension=min_dim, max_dimension\n =max_dim, pad_to_max_dimension=True)\n', (102308, 102396), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((102526, 102545), 'tensorflow.shape', 'tf.shape', (['out_image'], {}), '(out_image)\n', (102534, 102545), True, 'import tensorflow as tf\n'), ((104249, 104282), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_image_shape'], {}), '(in_image_shape)\n', (104266, 104282), True, 'import tensorflow as tf\n'), ((104300, 104333), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_masks_shape'], {}), '(in_masks_shape)\n', (104317, 104333), True, 'import tensorflow as tf\n'), ((104366, 104464), 'object_detection.tensorflow_detect.core.preprocessor.resize_to_range', 'preprocessor.resize_to_range', (['in_image', 'in_masks'], {'min_dimension': 'min_dim', 'max_dimension': 'max_dim'}), '(in_image, in_masks, min_dimension=min_dim,\n max_dimension=max_dim)\n', (104394, 104464), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((105262, 105311), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, None, 3)'}), '(tf.float32, shape=(None, None, 3))\n', (105276, 105311), True, 'import tensorflow as tf\n'), ((105329, 105381), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, None, None)'}), '(tf.float32, shape=(None, None, None))\n', (105343, 105381), True, 'import tensorflow as tf\n'), ((105414, 105539), 'object_detection.tensorflow_detect.core.preprocessor.resize_to_range', 'preprocessor.resize_to_range', (['in_image', 'in_masks'], {'min_dimension': 'min_dim', 'max_dimension': 'max_dim', 'pad_to_max_dimension': '(True)'}), '(in_image, in_masks, min_dimension=min_dim,\n max_dimension=max_dim, pad_to_max_dimension=True)\n', (105442, 105539), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((105611, 105630), 'tensorflow.shape', 'tf.shape', (['out_image'], {}), '(out_image)\n', (105619, 105630), True, 'import tensorflow as tf\n'), ((105655, 105674), 'tensorflow.shape', 'tf.shape', (['out_masks'], {}), '(out_masks)\n', (105663, 105674), True, 'import tensorflow as tf\n'), ((106817, 106866), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, None, 3)'}), '(tf.float32, shape=(None, None, 3))\n', (106831, 106866), True, 'import tensorflow as tf\n'), ((106884, 106936), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, None, None)'}), '(tf.float32, shape=(None, None, None))\n', (106898, 106936), True, 'import tensorflow as tf\n'), ((106954, 106987), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_masks_shape'], {}), '(in_masks_shape)\n', (106971, 106987), True, 'import tensorflow as tf\n'), ((107020, 107118), 'object_detection.tensorflow_detect.core.preprocessor.resize_to_range', 'preprocessor.resize_to_range', (['in_image', 'in_masks'], {'min_dimension': 'min_dim', 'max_dimension': 'max_dim'}), '(in_image, in_masks, min_dimension=min_dim,\n max_dimension=max_dim)\n', (107048, 107118), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((107150, 107169), 'tensorflow.shape', 'tf.shape', (['out_image'], {}), '(out_image)\n', (107158, 107169), True, 'import tensorflow as tf\n'), ((107194, 107213), 'tensorflow.shape', 'tf.shape', (['out_masks'], {}), '(out_masks)\n', (107202, 107213), True, 'import tensorflow as tf\n'), ((108354, 108387), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_image_shape'], {}), '(in_image_shape)\n', (108371, 108387), True, 'import tensorflow as tf\n'), ((108405, 108438), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_masks_shape'], {}), '(in_masks_shape)\n', (108422, 108438), True, 'import tensorflow as tf\n'), ((108471, 108569), 'object_detection.tensorflow_detect.core.preprocessor.resize_to_range', 'preprocessor.resize_to_range', (['in_image', 'in_masks'], {'min_dimension': 'min_dim', 'max_dimension': 'max_dim'}), '(in_image, in_masks, min_dimension=min_dim,\n max_dimension=max_dim)\n', (108499, 108569), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((108601, 108620), 'tensorflow.shape', 'tf.shape', (['out_image'], {}), '(out_image)\n', (108609, 108620), True, 'import tensorflow as tf\n'), ((108645, 108664), 'tensorflow.shape', 'tf.shape', (['out_masks'], {}), '(out_masks)\n', (108653, 108664), True, 'import tensorflow as tf\n'), ((109079, 109124), 'object_detection.tensorflow_detect.core.preprocessor.resize_to_range', 'preprocessor.resize_to_range', (['image', '(500)', '(600)'], {}), '(image, 500, 600)\n', (109107, 109124), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((109461, 109488), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_shape'], {}), '(in_shape)\n', (109478, 109488), True, 'import tensorflow as tf\n'), ((109510, 109599), 'object_detection.tensorflow_detect.core.preprocessor.resize_to_range', 'preprocessor.resize_to_range', (['in_image'], {'min_dimension': 'min_dim', 'max_dimension': 'max_dim'}), '(in_image, min_dimension=min_dim, max_dimension\n =max_dim)\n', (109538, 109599), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((109630, 109649), 'tensorflow.shape', 'tf.shape', (['out_image'], {}), '(out_image)\n', (109638, 109649), True, 'import tensorflow as tf\n'), ((110428, 110477), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, None, 3)'}), '(tf.float32, shape=(None, None, 3))\n', (110442, 110477), True, 'import tensorflow as tf\n'), ((110495, 110547), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, None, None)'}), '(tf.float32, shape=(None, None, None))\n', (110509, 110547), True, 'import tensorflow as tf\n'), ((110565, 110598), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_masks_shape'], {}), '(in_masks_shape)\n', (110582, 110598), True, 'import tensorflow as tf\n'), ((110631, 110710), 'object_detection.tensorflow_detect.core.preprocessor.resize_to_min_dimension', 'preprocessor.resize_to_min_dimension', (['in_image', 'in_masks'], {'min_dimension': 'min_dim'}), '(in_image, in_masks, min_dimension=min_dim)\n', (110667, 110710), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((110746, 110765), 'tensorflow.shape', 'tf.shape', (['out_image'], {}), '(out_image)\n', (110754, 110765), True, 'import tensorflow as tf\n'), ((110790, 110809), 'tensorflow.shape', 'tf.shape', (['out_masks'], {}), '(out_masks)\n', (110798, 110809), True, 'import tensorflow as tf\n'), ((111939, 111972), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_image_shape'], {}), '(in_image_shape)\n', (111956, 111972), True, 'import tensorflow as tf\n'), ((111990, 112023), 'tensorflow.random_uniform', 'tf.random_uniform', (['in_masks_shape'], {}), '(in_masks_shape)\n', (112007, 112023), True, 'import tensorflow as tf\n'), ((112056, 112135), 'object_detection.tensorflow_detect.core.preprocessor.resize_to_min_dimension', 'preprocessor.resize_to_min_dimension', (['in_image', 'in_masks'], {'min_dimension': 'min_dim'}), '(in_image, in_masks, min_dimension=min_dim)\n', (112092, 112135), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((112171, 112190), 'tensorflow.shape', 'tf.shape', (['out_image'], {}), '(out_image)\n', (112179, 112190), True, 'import tensorflow as tf\n'), ((112215, 112234), 'tensorflow.shape', 'tf.shape', (['out_masks'], {}), '(out_masks)\n', (112223, 112234), True, 'import tensorflow as tf\n'), ((112663, 112711), 'object_detection.tensorflow_detect.core.preprocessor.resize_to_min_dimension', 'preprocessor.resize_to_min_dimension', (['image', '(500)'], {}), '(image, 500)\n', (112699, 112711), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((114310, 114333), 'tensorflow.zeros', 'tf.zeros', (['(240, 320, 3)'], {}), '((240, 320, 3))\n', (114318, 114333), True, 'import tensorflow as tf\n'), ((114373, 114427), 'object_detection.tensorflow_detect.core.preprocessor.subtract_channel_mean', 'preprocessor.subtract_channel_mean', (['image'], {'means': 'means'}), '(image, means=means)\n', (114407, 114427), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((114750, 114788), 'tensorflow.constant', 'tf.constant', (['[1, 4, 2]'], {'dtype': 'tf.int32'}), '([1, 4, 2], dtype=tf.int32)\n', (114761, 114788), True, 'import tensorflow as tf\n'), ((114805, 114857), 'object_detection.tensorflow_detect.core.preprocessor.one_hot_encoding', 'preprocessor.one_hot_encoding', (['labels'], {'num_classes': '(5)'}), '(labels, num_classes=5)\n', (114834, 114857), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((20585, 20674), 'object_detection.tensorflow_detect.core.preprocessor.preprocess', 'preprocessor.preprocess', (['tensor_dict', 'preprocess_options', 'preprocessor_arg_map', 'cache'], {}), '(tensor_dict, preprocess_options,\n preprocessor_arg_map, cache)\n', (20608, 20674), False, 'from object_detection.tensorflow_detect.core import standard_fields as fields, preprocessor, preprocessor_cache\n'), ((35770, 35789), 'tensorflow.to_float', 'tf.to_float', (['images'], {}), '(images)\n', (35781, 35789), True, 'import tensorflow as tf\n'), ((35821, 35840), 'tensorflow.to_float', 'tf.to_float', (['images'], {}), '(images)\n', (35832, 35840), True, 'import tensorflow as tf\n'), ((56058, 56098), 'tensorflow.constant', 'tf.constant', (['[6, 143, 0]'], {'dtype': 'tf.int32'}), '([6, 143, 0], dtype=tf.int32)\n', (56069, 56098), True, 'import tensorflow as tf\n'), ((56111, 56154), 'tensorflow.constant', 'tf.constant', (['[190, 237, -1]'], {'dtype': 'tf.int32'}), '([190, 237, -1], dtype=tf.int32)\n', (56122, 56154), True, 'import tensorflow as tf\n'), ((56171, 56232), 'tensorflow.constant', 'tf.constant', (['[[[0.03, 0.3575, 0.98, 0.95]]]'], {'dtype': 'tf.float32'}), '([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)\n', (56182, 56232), True, 'import tensorflow as tf\n'), ((59987, 60027), 'tensorflow.constant', 'tf.constant', (['[6, 143, 0]'], {'dtype': 'tf.int32'}), '([6, 143, 0], dtype=tf.int32)\n', (59998, 60027), True, 'import tensorflow as tf\n'), ((60039, 60082), 'tensorflow.constant', 'tf.constant', (['[190, 237, -1]'], {'dtype': 'tf.int32'}), '([190, 237, -1], dtype=tf.int32)\n', (60050, 60082), True, 'import tensorflow as tf\n'), ((60094, 60155), 'tensorflow.constant', 'tf.constant', (['[[[0.03, 0.3575, 0.98, 0.95]]]'], {'dtype': 'tf.float32'}), '([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)\n', (60105, 60155), True, 'import tensorflow as tf\n'), ((60550, 60654), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469,\n 1.0]], dtype=np.float32)\n', (60558, 60654), True, 'import numpy as np\n'), ((61312, 61352), 'tensorflow.constant', 'tf.constant', (['[6, 143, 0]'], {'dtype': 'tf.int32'}), '([6, 143, 0], dtype=tf.int32)\n', (61323, 61352), True, 'import tensorflow as tf\n'), ((61364, 61407), 'tensorflow.constant', 'tf.constant', (['[190, 237, -1]'], {'dtype': 'tf.int32'}), '([190, 237, -1], dtype=tf.int32)\n', (61375, 61407), True, 'import tensorflow as tf\n'), ((61419, 61480), 'tensorflow.constant', 'tf.constant', (['[[[0.03, 0.3575, 0.98, 0.95]]]'], {'dtype': 'tf.float32'}), '([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)\n', (61430, 61480), True, 'import tensorflow as tf\n'), ((61824, 61928), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469,\n 1.0]], dtype=np.float32)\n', (61832, 61928), True, 'import numpy as np\n'), ((62572, 62612), 'tensorflow.constant', 'tf.constant', (['[6, 143, 0]'], {'dtype': 'tf.int32'}), '([6, 143, 0], dtype=tf.int32)\n', (62583, 62612), True, 'import tensorflow as tf\n'), ((62624, 62667), 'tensorflow.constant', 'tf.constant', (['[190, 237, -1]'], {'dtype': 'tf.int32'}), '([190, 237, -1], dtype=tf.int32)\n', (62635, 62667), True, 'import tensorflow as tf\n'), ((62679, 62740), 'tensorflow.constant', 'tf.constant', (['[[[0.03, 0.3575, 0.98, 0.95]]]'], {'dtype': 'tf.float32'}), '([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)\n', (62690, 62740), True, 'import tensorflow as tf\n'), ((63105, 63209), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469,\n 1.0]], dtype=np.float32)\n', (63113, 63209), True, 'import numpy as np\n'), ((63261, 63432), 'numpy.array', 'np.array', (['[[[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], [[0.38947368, \n 0.07173], [0.49473682, 0.24050637], [0.60000002, 0.40928277]]]'], {'dtype': 'np.float32'}), '([[[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], [[\n 0.38947368, 0.07173], [0.49473682, 0.24050637], [0.60000002, 0.40928277\n ]]], dtype=np.float32)\n', (63269, 63432), True, 'import numpy as np\n'), ((64604, 64644), 'tensorflow.constant', 'tf.constant', (['[6, 143, 0]'], {'dtype': 'tf.int32'}), '([6, 143, 0], dtype=tf.int32)\n', (64615, 64644), True, 'import tensorflow as tf\n'), ((64656, 64699), 'tensorflow.constant', 'tf.constant', (['[190, 237, -1]'], {'dtype': 'tf.int32'}), '([190, 237, -1], dtype=tf.int32)\n', (64667, 64699), True, 'import tensorflow as tf\n'), ((64711, 64772), 'tensorflow.constant', 'tf.constant', (['[[[0.03, 0.3575, 0.98, 0.95]]]'], {'dtype': 'tf.float32'}), '([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)\n', (64722, 64772), True, 'import tensorflow as tf\n'), ((65562, 65666), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469,\n 1.0]], dtype=np.float32)\n', (65570, 65666), True, 'import numpy as np\n'), ((66833, 66873), 'tensorflow.constant', 'tf.constant', (['[6, 143, 0]'], {'dtype': 'tf.int32'}), '([6, 143, 0], dtype=tf.int32)\n', (66844, 66873), True, 'import tensorflow as tf\n'), ((66885, 66928), 'tensorflow.constant', 'tf.constant', (['[190, 237, -1]'], {'dtype': 'tf.int32'}), '([190, 237, -1], dtype=tf.int32)\n', (66896, 66928), True, 'import tensorflow as tf\n'), ((66940, 67001), 'tensorflow.constant', 'tf.constant', (['[[[0.03, 0.3575, 0.98, 0.95]]]'], {'dtype': 'tf.float32'}), '([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)\n', (66951, 67001), True, 'import tensorflow as tf\n'), ((67798, 67902), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469,\n 1.0]], dtype=np.float32)\n', (67806, 67902), True, 'import numpy as np\n'), ((67963, 68137), 'numpy.array', 'np.array', (['[[[0.38947368, 0.07173], [0.49473682, 0.24050637], [0.60000002, 0.40928277]\n ], [[0.38947368, 0.07173], [0.49473682, 0.24050637], [0.60000002, \n 0.40928277]]]'], {}), '([[[0.38947368, 0.07173], [0.49473682, 0.24050637], [0.60000002, \n 0.40928277]], [[0.38947368, 0.07173], [0.49473682, 0.24050637], [\n 0.60000002, 0.40928277]]])\n', (67971, 68137), True, 'import numpy as np\n'), ((69387, 69427), 'tensorflow.constant', 'tf.constant', (['[6, 143, 0]'], {'dtype': 'tf.int32'}), '([6, 143, 0], dtype=tf.int32)\n', (69398, 69427), True, 'import tensorflow as tf\n'), ((69439, 69482), 'tensorflow.constant', 'tf.constant', (['[190, 237, -1]'], {'dtype': 'tf.int32'}), '([190, 237, -1], dtype=tf.int32)\n', (69450, 69482), True, 'import tensorflow as tf\n'), ((69494, 69555), 'tensorflow.constant', 'tf.constant', (['[[[0.03, 0.3575, 0.98, 0.95]]]'], {'dtype': 'tf.float32'}), '([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)\n', (69505, 69555), True, 'import tensorflow as tf\n'), ((70352, 70456), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0]]'], {'dtype': 'np.float32'}), '([[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469,\n 1.0]], dtype=np.float32)\n', (70360, 70456), True, 'import numpy as np\n'), ((70517, 70643), 'numpy.array', 'np.array', (['[[[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], [[np.nan, np.nan],\n [np.nan, np.nan], [np.nan, np.nan]]]'], {}), '([[[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], [[np.nan,\n np.nan], [np.nan, np.nan], [np.nan, np.nan]]])\n', (70525, 70643), True, 'import numpy as np\n'), ((77150, 77199), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.75, 1.0]'], {'dtype': 'np.float32'}), '([0.0, 0.5, 0.75, 1.0], dtype=np.float32)\n', (77158, 77199), True, 'import numpy as np\n'), ((79133, 79182), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.75, 1.0]'], {'dtype': 'np.float32'}), '([0.0, 0.5, 0.75, 1.0], dtype=np.float32)\n', (79141, 79182), True, 'import numpy as np\n'), ((79212, 79276), 'numpy.array', 'np.array', (['[[0.1, 0.2], [0.2, 0.4], [0.3, 0.6]]'], {'dtype': 'np.float32'}), '([[0.1, 0.2], [0.2, 0.4], [0.3, 0.6]], dtype=np.float32)\n', (79220, 79276), True, 'import numpy as np\n'), ((87667, 87752), 'numpy.all', 'np.all', (['(boxes_[:, 2] - boxes_[:, 0] >= padded_boxes_[:, 2] - padded_boxes_[:, 0])'], {}), '(boxes_[:, 2] - boxes_[:, 0] >= padded_boxes_[:, 2] - padded_boxes_[:, 0]\n )\n', (87673, 87752), True, 'import numpy as np\n'), ((87786, 87871), 'numpy.all', 'np.all', (['(boxes_[:, 3] - boxes_[:, 1] >= padded_boxes_[:, 3] - padded_boxes_[:, 1])'], {}), '(boxes_[:, 3] - boxes_[:, 1] >= padded_boxes_[:, 3] - padded_boxes_[:, 1]\n )\n', (87792, 87871), True, 'import numpy as np\n'), ((90253, 90338), 'numpy.all', 'np.all', (['(boxes_[:, 2] - boxes_[:, 0] >= padded_boxes_[:, 2] - padded_boxes_[:, 0])'], {}), '(boxes_[:, 2] - boxes_[:, 0] >= padded_boxes_[:, 2] - padded_boxes_[:, 0]\n )\n', (90259, 90338), True, 'import numpy as np\n'), ((90372, 90457), 'numpy.all', 'np.all', (['(boxes_[:, 3] - boxes_[:, 1] >= padded_boxes_[:, 3] - padded_boxes_[:, 1])'], {}), '(boxes_[:, 3] - boxes_[:, 1] >= padded_boxes_[:, 3] - padded_boxes_[:, 1]\n )\n', (90378, 90457), True, 'import numpy as np\n'), ((101784, 101810), 'numpy.random.randn', 'np.random.randn', (['*in_shape'], {}), '(*in_shape)\n', (101799, 101810), True, 'import numpy as np\n'), ((102672, 102698), 'numpy.random.randn', 'np.random.randn', (['*in_shape'], {}), '(*in_shape)\n', (102687, 102698), True, 'import numpy as np\n'), ((105867, 105899), 'numpy.random.randn', 'np.random.randn', (['*in_image_shape'], {}), '(*in_image_shape)\n', (105882, 105899), True, 'import numpy as np\n'), ((105927, 105959), 'numpy.random.randn', 'np.random.randn', (['*in_masks_shape'], {}), '(*in_masks_shape)\n', (105942, 105959), True, 'import numpy as np\n'), ((107406, 107438), 'numpy.random.randn', 'np.random.randn', (['*in_image_shape'], {}), '(*in_image_shape)\n', (107421, 107438), True, 'import numpy as np\n'), ((107466, 107498), 'numpy.random.randn', 'np.random.randn', (['*in_masks_shape'], {}), '(*in_masks_shape)\n', (107481, 107498), True, 'import numpy as np\n'), ((111002, 111034), 'numpy.random.randn', 'np.random.randn', (['*in_image_shape'], {}), '(*in_image_shape)\n', (111017, 111034), True, 'import numpy as np\n'), ((111062, 111094), 'numpy.random.randn', 'np.random.randn', (['*in_masks_shape'], {}), '(*in_masks_shape)\n', (111077, 111094), True, 'import numpy as np\n')] |
import scrapy
from activesport.items import ActivesportItem
import logging
from scrapy.utils.log import configure_logging
configure_logging(install_root_handler=False)
logging.basicConfig(
filename='log.txt',
format='%(levelname)s: %(message)s',
level=logging.INFO
)
"""
Go to the first categories page (such as Bikes and Frames)
Parse data out from categories page
Go to the each link to subcategories (such as Bikes, Frames, Scooters)
Parse data out from subcategories page
Go to the each link to item's page
Parse data out from item's page
Go to the next subcategories page if it exists
"""
class ActivesportSpider(scrapy.Spider):
name = "activesport"
allowed_domains = ["activesport.co"]
# list of categories
start_urls = [
"http://activesport.co/epages/80c85f8f-7a95-4b1c-9c30-e64b314f3f2e.sf/en_GB/?ObjectPath=/Shops/80c85f8f-7a95-4b1c-9c30-e64b314f3f2e/Categories/Bikes_Frames", ]
# "http://activesport.co/epages/80c85f8f-7a95-4b1c-9c30-e64b314f3f2e.sf/en_GB/?ObjectPath=/Shops/80c85f8f-7a95-4b1c-9c30-e64b314f3f2e/Categories/Accessories",
# "http://activesport.co/epages/80c85f8f-7a95-4b1c-9c30-e64b314f3f2e.sf/en_GB/?ObjectPath=/Shops/80c85f8f-7a95-4b1c-9c30-e64b314f3f2e/Categories/Components",
# "http://activesport.co/epages/80c85f8f-7a95-4b1c-9c30-e64b314f3f2e.sf/en_GB/?ObjectPath=/Shops/80c85f8f-7a95-4b1c-9c30-e64b314f3f2e/Categories/Apparel",
# "http://activesport.co/epages/80c85f8f-7a95-4b1c-9c30-e64b314f3f2e.sf/en_GB/?ObjectPath=/Shops/80c85f8f-7a95-4b1c-9c30-e64b314f3f2e/Categories/Tech"
# ]
sub_categories_number = 0
def parse(self, response):
"""Retrieve data out from section's catalogue - the subcategories level"""
self.logger.info('Visited on sections catalogue %s', response.url)
# Extract link to the second level section, follow them
list_of_subcatalogues = response.xpath(
'//div[@class="InfoArea"]/h3/a/@href').extract()[:1]
length = len(list_of_subcatalogues)
sub_categories_number = + length
self.logger.debug('List of subcategories has %s links' % (length,))
for link in list_of_subcatalogues:
self.logger.debug('The subcategories url %s' % (link, ))
url = response.urljoin(link)
self.logger.debug('The absolute subcategoties url is %s' % (url, ))
yield scrapy.Request(url, callback=self.parse_subcategories_level)
self.logger.debug('There are %s subcategories there' %
(sub_categories_number, ))
def parse_subcategories_level(self, response):
"""Retrieve data out from subcategories levels"""
self.logger.info('We are in subcategories level %s', response.url)
# Extract link to the sub_subcategories, follow them
try:
list_of_sub_subcategories = response.xpath(
'//h3[not(@class)]/a/@href').extract()
length = len(list_of_sub_subcategories)
self.logger.debug(
'List of sub-subcategories has %s links' % (length,))
for link in list_of_sub_subcategories:
self.logger.debug(
'The sub-subcategories url is %s' % (link, ))
url = response.urljoin(link)
self.logger.debug(
'The sub-subcategories absolute url is %s' % (url, ))
yield scrapy.Request(url, callback=self.parse_item_follow_next_page)
except:
pass
def parse_item_follow_next_page(self, response):
"""" Retrieve data from sub-subcategories level - items link, next page link"""
self.logger.info('We are in sub-subcategories level %s', response.url)
items_link = response.xpath(
'//div[@class="InfoArea"]/h3[@class="TopPaddingWide"]/a/@href').extract()
for link in items_link:
self.logger.debug('The items link is %s' % (link, ))
url = response.urljoin(link)
self.logger.debug('The items pages absolute url is %s' % (url))
yield scrapy.Request(url, callback=self.parse_item_content)
# Going to the next items page if it exists
try:
next_page = response.xpath(
'//ul[@class="PagerSizeContainer"]/li/a[@title="Next"]/@href').extract()[0]
next_page_url = response.urljoin(next_page)
self.logger.debug(
'This is next items pages absolute url %s' % (next_page_url))
yield scrapy.Request(next_page_url, self.parse_item_follow_next_page)
except:
pass
def parse_item_content(self, response):
"""Extract all data about the particular item"""
self.logger.info('Item parse function on %s' % (response.url, ))
item = ActivesportItem()
item['title'] = response.xpath(
'//div[contains(@class, "InfoArea")]/h1/text()').extract()
price = response.xpath(
'//span[(@itemprop="price")]/text()').extract()
item['price'] = price[0] if price else None
link = response.xpath(
'//li[contains(@style, "margin-bottom")]/a/@href').extract()
item['link'] = link[0] if link else None
# Items description can have several values
desc1 = map(unicode.strip, response.xpath('//div[@id="tab-additional_information"]//text()[normalize-space()]').extract())
desc2 = map(unicode.strip, response.xpath(
'//div[@class="description"]//ul[not(@id="list1")]/li//text()[normalize-space()]').extract())
desc3 = map(unicode.strip, response.xpath(
'//div[contains(@style, "color")]/text()[normalize-space()]').extract())
desc4 = map(unicode.strip, response.xpath('//div[@class="description"]//td//text()[normalize-space()]').extract())
desc5 = map(unicode.strip, response.xpath(
'//div[@class="description"]//p//text()[normalize-space()]').extract())
if desc1:
item['description'] = str(desc1).split(']')[0]
elif desc2:
item['description'] = str(desc2).split(']')[0]
elif desc3:
item['description'] = str(desc3).split(']')[0]
elif desc4:
item['description'] = str(desc4).split(']')[0]
elif desc5:
item['description'] = str(desc5).split(']')[0]
else:
item['description'] = None
yield item
| [
"scrapy.utils.log.configure_logging",
"scrapy.Request",
"activesport.items.ActivesportItem",
"logging.basicConfig"
] | [((124, 169), 'scrapy.utils.log.configure_logging', 'configure_logging', ([], {'install_root_handler': '(False)'}), '(install_root_handler=False)\n', (141, 169), False, 'from scrapy.utils.log import configure_logging\n'), ((170, 270), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""log.txt"""', 'format': '"""%(levelname)s: %(message)s"""', 'level': 'logging.INFO'}), "(filename='log.txt', format='%(levelname)s: %(message)s',\n level=logging.INFO)\n", (189, 270), False, 'import logging\n'), ((4828, 4845), 'activesport.items.ActivesportItem', 'ActivesportItem', ([], {}), '()\n', (4843, 4845), False, 'from activesport.items import ActivesportItem\n'), ((2402, 2462), 'scrapy.Request', 'scrapy.Request', (['url'], {'callback': 'self.parse_subcategories_level'}), '(url, callback=self.parse_subcategories_level)\n', (2416, 2462), False, 'import scrapy\n'), ((4103, 4156), 'scrapy.Request', 'scrapy.Request', (['url'], {'callback': 'self.parse_item_content'}), '(url, callback=self.parse_item_content)\n', (4117, 4156), False, 'import scrapy\n'), ((4541, 4604), 'scrapy.Request', 'scrapy.Request', (['next_page_url', 'self.parse_item_follow_next_page'], {}), '(next_page_url, self.parse_item_follow_next_page)\n', (4555, 4604), False, 'import scrapy\n'), ((3431, 3493), 'scrapy.Request', 'scrapy.Request', (['url'], {'callback': 'self.parse_item_follow_next_page'}), '(url, callback=self.parse_item_follow_next_page)\n', (3445, 3493), False, 'import scrapy\n')] |
# -*- coding: utf-8 -*-
# pip install pdfminer.six -i https://pypi.doubanio.com/simple
import io
from pdfminer.high_level import *
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
def return_txt():
name = sys.argv[1]
text = extract_text(name)
print(text)
if __name__ == '__main__':
return_txt() | [
"io.TextIOWrapper"
] | [((146, 199), 'io.TextIOWrapper', 'io.TextIOWrapper', (['sys.stdout.buffer'], {'encoding': '"""utf-8"""'}), "(sys.stdout.buffer, encoding='utf-8')\n", (162, 199), False, 'import io\n')] |
# $Id: misc.py 8595 2020-12-15 23:06:58Z milde $
# Authors: <NAME> <<EMAIL>>; <NAME>
# Copyright: This module has been placed in the public domain.
"""Miscellaneous directives."""
__docformat__ = 'reStructuredText'
import sys
import os.path
import re
import time
from docutils import io, nodes, statemachine, utils
from docutils.utils.error_reporting import SafeString, ErrorString
from docutils.utils.error_reporting import locale_encoding
from docutils.parsers.rst import Directive, convert_directive_function
from docutils.parsers.rst import directives, roles, states
from docutils.parsers.rst.directives.body import CodeBlock, NumberLines
from docutils.parsers.rst.roles import set_classes
from docutils.transforms import misc
class Include(Directive):
"""
Include content read from a separate source file.
Content may be parsed by the parser, or included as a literal
block. The encoding of the included file can be specified. Only
a part of the given file argument may be included by specifying
start and end line or text to match before and/or after the text
to be used.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'literal': directives.flag,
'code': directives.unchanged,
'encoding': directives.encoding,
'parser': directives.parser_name,
'tab-width': int,
'start-line': int,
'end-line': int,
'start-after': directives.unchanged_required,
'end-before': directives.unchanged_required,
# ignored except for 'literal' or 'code':
'number-lines': directives.unchanged, # integer or None
'class': directives.class_option,
'name': directives.unchanged}
standard_include_path = os.path.join(os.path.dirname(states.__file__),
'include')
def run(self):
"""Include a file as part of the content of this reST file."""
if not self.state.document.settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
source_dir = os.path.dirname(os.path.abspath(source))
path = directives.path(self.arguments[0])
if path.startswith('<') and path.endswith('>'):
path = os.path.join(self.standard_include_path, path[1:-1])
path = os.path.normpath(os.path.join(source_dir, path))
path = utils.relative_path(None, path)
path = nodes.reprunicode(path)
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler=self.state.document.settings.input_encoding_error_handler
tab_width = self.options.get(
'tab-width', self.state.document.settings.tab_width)
try:
self.state.document.settings.record_dependencies.add(path)
include_file = io.FileInput(source_path=path,
encoding=encoding,
error_handler=e_handler)
except UnicodeEncodeError as error:
raise self.severe(u'Problems with "%s" directive path:\n'
'Cannot encode input file path "%s" '
'(wrong locale?).' %
(self.name, SafeString(path)))
except IOError as error:
raise self.severe(u'Problems with "%s" directive path:\n%s.' %
(self.name, ErrorString(error)))
# Get to-be-included content
startline = self.options.get('start-line', None)
endline = self.options.get('end-line', None)
try:
if startline or (endline is not None):
lines = include_file.readlines()
rawtext = ''.join(lines[startline:endline])
else:
rawtext = include_file.read()
except UnicodeError as error:
raise self.severe(u'Problem with "%s" directive:\n%s' %
(self.name, ErrorString(error)))
# start-after/end-before: no restrictions on newlines in match-text,
# and no restrictions on matching inside lines vs. line boundaries
after_text = self.options.get('start-after', None)
if after_text:
# skip content in rawtext before *and incl.* a matching text
after_index = rawtext.find(after_text)
if after_index < 0:
raise self.severe('Problem with "start-after" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[after_index + len(after_text):]
before_text = self.options.get('end-before', None)
if before_text:
# skip content in rawtext after *and incl.* a matching text
before_index = rawtext.find(before_text)
if before_index < 0:
raise self.severe('Problem with "end-before" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[:before_index]
include_lines = statemachine.string2lines(rawtext, tab_width,
convert_whitespace=True)
for i, line in enumerate(include_lines):
if len(line) > self.state.document.settings.line_length_limit:
raise self.warning('"%s": line %d exceeds the'
' line-length-limit.' % (path, i+1))
if 'literal' in self.options:
# Don't convert tabs to spaces, if `tab_width` is negative.
if tab_width >= 0:
text = rawtext.expandtabs(tab_width)
else:
text = rawtext
literal_block = nodes.literal_block(rawtext, source=path,
classes=self.options.get('class', []))
literal_block.line = 1
self.add_name(literal_block)
if 'number-lines' in self.options:
try:
startline = int(self.options['number-lines'] or 1)
except ValueError:
raise self.error(':number-lines: with non-integer '
'start value')
endline = startline + len(include_lines)
if text.endswith('\n'):
text = text[:-1]
tokens = NumberLines([([], text)], startline, endline)
for classes, value in tokens:
if classes:
literal_block += nodes.inline(value, value,
classes=classes)
else:
literal_block += nodes.Text(value)
else:
literal_block += nodes.Text(text)
return [literal_block]
if 'code' in self.options:
self.options['source'] = path
# Don't convert tabs to spaces, if `tab_width` is negative:
if tab_width < 0:
include_lines = rawtext.splitlines()
codeblock = CodeBlock(self.name,
[self.options.pop('code')], # arguments
self.options,
include_lines, # content
self.lineno,
self.content_offset,
self.block_text,
self.state,
self.state_machine)
return codeblock.run()
if 'parser' in self.options:
parser = self.options['parser']()
# parse into a new (dummy) document
document = utils.new_document(path, self.state.document.settings)
parser.parse('\n'.join(include_lines), document)
return document.children
# include as rST source
#
# Prevent circular inclusion:
source = utils.relative_path(None, source)
clip_options = (startline, endline, before_text, after_text)
include_log = self.state.document.include_log
if not include_log: # new document:
# log entries: (<source>, <clip-options>, <insertion end index>)
include_log = [(source, (None,None,None,None), sys.maxsize/2)]
# cleanup: we may have passed the last inclusion(s):
include_log = [entry for entry in include_log
if entry[2] >= self.lineno]
if (path, clip_options) in [(pth, opt)
for (pth, opt, e) in include_log]:
raise self.warning('circular inclusion in "%s" directive: %s'
% (self.name, ' < '.join([path] + [pth for (pth, opt, e)
in include_log[::-1]])))
# include as input
self.state_machine.insert_input(include_lines, path)
# update include-log
include_log.append((path, clip_options, self.lineno))
self.state.document.include_log = [(pth, opt, e+len(include_lines)+2)
for (pth, opt, e) in include_log]
return []
class Raw(Directive):
"""
Pass through content unchanged
Content is included in output based on type argument
Content may be included inline (content section of directive) or
imported from a file or url.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding}
has_content = True
def run(self):
if (not self.state.document.settings.raw_enabled
or (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options))):
raise self.warning('"%s" directive disabled.' % self.name)
attributes = {'format': ' '.join(self.arguments[0].lower().split())}
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler=self.state.document.settings.input_encoding_error_handler
if self.content:
if 'file' in self.options or 'url' in self.options:
raise self.error(
'"%s" directive may not both specify an external file '
'and have content.' % self.name)
text = '\n'.join(self.content)
elif 'file' in self.options:
if 'url' in self.options:
raise self.error(
'The "file" and "url" options may not be simultaneously '
'specified for the "%s" directive.' % self.name)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
path = os.path.normpath(os.path.join(source_dir,
self.options['file']))
path = utils.relative_path(None, path)
try:
raw_file = io.FileInput(source_path=path,
encoding=encoding,
error_handler=e_handler)
# TODO: currently, raw input files are recorded as
# dependencies even if not used for the chosen output format.
self.state.document.settings.record_dependencies.add(path)
except IOError as error:
raise self.severe(u'Problems with "%s" directive path:\n%s.'
% (self.name, ErrorString(error)))
try:
text = raw_file.read()
except UnicodeError as error:
raise self.severe(u'Problem with "%s" directive:\n%s'
% (self.name, ErrorString(error)))
attributes['source'] = path
elif 'url' in self.options:
source = self.options['url']
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
if sys.version_info >= (3, 0):
from urllib.request import urlopen
from urllib.error import URLError
else:
from urllib2 import urlopen, URLError
try:
raw_text = urlopen(source).read()
except (URLError, IOError, OSError) as error:
raise self.severe(u'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], ErrorString(error)))
raw_file = io.StringInput(source=raw_text, source_path=source,
encoding=encoding,
error_handler=e_handler)
try:
text = raw_file.read()
except UnicodeError as error:
raise self.severe(u'Problem with "%s" directive:\n%s'
% (self.name, ErrorString(error)))
attributes['source'] = source
else:
# This will always fail because there is no content.
self.assert_has_content()
raw_node = nodes.raw('', text, **attributes)
(raw_node.source,
raw_node.line) = self.state_machine.get_source_and_line(self.lineno)
return [raw_node]
class Replace(Directive):
has_content = True
def run(self):
if not isinstance(self.state, states.SubstitutionDef):
raise self.error(
'Invalid context: the "%s" directive can only be used within '
'a substitution definition.' % self.name)
self.assert_has_content()
text = '\n'.join(self.content)
element = nodes.Element(text)
self.state.nested_parse(self.content, self.content_offset,
element)
# element might contain [paragraph] + system_message(s)
node = None
messages = []
for elem in element:
if not node and isinstance(elem, nodes.paragraph):
node = elem
elif isinstance(elem, nodes.system_message):
elem['backrefs'] = []
messages.append(elem)
else:
return [
self.state_machine.reporter.error(
'Error in "%s" directive: may contain a single paragraph '
'only.' % (self.name), line=self.lineno) ]
if node:
return messages + node.children
return messages
class Unicode(Directive):
r"""
Convert Unicode character codes (numbers) to characters. Codes may be
decimal numbers, hexadecimal numbers (prefixed by ``0x``, ``x``, ``\x``,
``U+``, ``u``, or ``\u``; e.g. ``U+262E``), or XML-style numeric character
entities (e.g. ``☮``). Text following ".." is a comment and is
ignored. Spaces are ignored, and any other text remains as-is.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'trim': directives.flag,
'ltrim': directives.flag,
'rtrim': directives.flag}
comment_pattern = re.compile(r'( |\n|^)\.\. ')
def run(self):
if not isinstance(self.state, states.SubstitutionDef):
raise self.error(
'Invalid context: the "%s" directive can only be used within '
'a substitution definition.' % self.name)
substitution_definition = self.state_machine.node
if 'trim' in self.options:
substitution_definition.attributes['ltrim'] = 1
substitution_definition.attributes['rtrim'] = 1
if 'ltrim' in self.options:
substitution_definition.attributes['ltrim'] = 1
if 'rtrim' in self.options:
substitution_definition.attributes['rtrim'] = 1
codes = self.comment_pattern.split(self.arguments[0])[0].split()
element = nodes.Element()
for code in codes:
try:
decoded = directives.unicode_code(code)
except ValueError as error:
raise self.error(u'Invalid character code: %s\n%s'
% (code, ErrorString(error)))
element += nodes.Text(decoded)
return element.children
class Class(Directive):
"""
Set a "class" attribute on the directive content or the next element.
When applied to the next element, a "pending" element is inserted, and a
transform does the work later.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
has_content = True
def run(self):
try:
class_value = directives.class_option(self.arguments[0])
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node_list = []
if self.content:
container = nodes.Element()
self.state.nested_parse(self.content, self.content_offset,
container)
for node in container:
node['classes'].extend(class_value)
node_list.extend(container.children)
else:
pending = nodes.pending(
misc.ClassAttribute,
{'class': class_value, 'directive': self.name},
self.block_text)
self.state_machine.document.note_pending(pending)
node_list.append(pending)
return node_list
class Role(Directive):
has_content = True
argument_pattern = re.compile(r'(%s)\s*(\(\s*(%s)\s*\)\s*)?$'
% ((states.Inliner.simplename,) * 2))
def run(self):
"""Dynamically create and register a custom interpreted text role."""
if self.content_offset > self.lineno or not self.content:
raise self.error('"%s" directive requires arguments on the first '
'line.' % self.name)
args = self.content[0]
match = self.argument_pattern.match(args)
if not match:
raise self.error('"%s" directive arguments not valid role names: '
'"%s".' % (self.name, args))
new_role_name = match.group(1)
base_role_name = match.group(3)
messages = []
if base_role_name:
base_role, messages = roles.role(
base_role_name, self.state_machine.language, self.lineno,
self.state.reporter)
if base_role is None:
error = self.state.reporter.error(
'Unknown interpreted text role "%s".' % base_role_name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return messages + [error]
else:
base_role = roles.generic_custom_role
assert not hasattr(base_role, 'arguments'), (
'Supplemental directive arguments for "%s" directive not '
'supported (specified by "%r" role).' % (self.name, base_role))
try:
converted_role = convert_directive_function(base_role)
(arguments, options, content, content_offset) = (
self.state.parse_directive_block(
self.content[1:], self.content_offset, converted_role,
option_presets={}))
except states.MarkupError as detail:
error = self.state_machine.reporter.error(
'Error in "%s" directive:\n%s.' % (self.name, detail),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return messages + [error]
if 'class' not in options:
try:
options['class'] = directives.class_option(new_role_name)
except ValueError as detail:
error = self.state_machine.reporter.error(
u'Invalid argument for "%s" directive:\n%s.'
% (self.name, SafeString(detail)), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return messages + [error]
role = roles.CustomRole(new_role_name, base_role, options, content)
roles.register_local_role(new_role_name, role)
return messages
class DefaultRole(Directive):
"""Set the default interpreted text role."""
optional_arguments = 1
final_argument_whitespace = False
def run(self):
if not self.arguments:
if '' in roles._roles:
# restore the "default" default role
del roles._roles['']
return []
role_name = self.arguments[0]
role, messages = roles.role(role_name, self.state_machine.language,
self.lineno, self.state.reporter)
if role is None:
error = self.state.reporter.error(
'Unknown interpreted text role "%s".' % role_name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return messages + [error]
roles._roles[''] = role
return messages
class Title(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
def run(self):
self.state_machine.document['title'] = self.arguments[0]
return []
class Date(Directive):
has_content = True
def run(self):
if not isinstance(self.state, states.SubstitutionDef):
raise self.error(
'Invalid context: the "%s" directive can only be used within '
'a substitution definition.' % self.name)
format_str = '\n'.join(self.content) or '%Y-%m-%d'
if sys.version_info< (3, 0):
try:
format_str = format_str.encode(locale_encoding or 'utf-8')
except UnicodeEncodeError:
raise self.warning(u'Cannot encode date format string '
u'with locale encoding "%s".' % locale_encoding)
# @@@
# Use timestamp from the `SOURCE_DATE_EPOCH`_ environment variable?
# Pro: Docutils-generated documentation
# can easily be part of `reproducible software builds`__
#
# __ https://reproducible-builds.org/
#
# Con: Changes the specs, hard to predict behaviour,
#
# See also the discussion about \date \time \year in TeX
# http://tug.org/pipermail/tex-k/2016-May/002704.html
# source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH')
# if (source_date_epoch):
# text = time.strftime(format_str,
# time.gmtime(int(source_date_epoch)))
# else:
text = time.strftime(format_str)
if sys.version_info< (3, 0):
# `text` is a byte string that may contain non-ASCII characters:
try:
text = text.decode(locale_encoding or 'utf-8')
except UnicodeDecodeError:
text = text.decode(locale_encoding or 'utf-8', 'replace')
raise self.warning(u'Error decoding "%s"'
u'with locale encoding "%s".' % (text, locale_encoding))
return [nodes.Text(text)]
class TestDirective(Directive):
"""This directive is useful only for testing purposes."""
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'option': directives.unchanged_required}
has_content = True
def run(self):
if self.content:
text = '\n'.join(self.content)
info = self.state_machine.reporter.info(
'Directive processed. Type="%s", arguments=%r, options=%r, '
'content:' % (self.name, self.arguments, self.options),
nodes.literal_block(text, text), line=self.lineno)
else:
info = self.state_machine.reporter.info(
'Directive processed. Type="%s", arguments=%r, options=%r, '
'content: None' % (self.name, self.arguments, self.options),
line=self.lineno)
return [info]
# Old-style, functional definition:
#
# def directive_test_function(name, arguments, options, content, lineno,
# content_offset, block_text, state, state_machine):
# """This directive is useful only for testing purposes."""
# if content:
# text = '\n'.join(content)
# info = state_machine.reporter.info(
# 'Directive processed. Type="%s", arguments=%r, options=%r, '
# 'content:' % (name, arguments, options),
# nodes.literal_block(text, text), line=lineno)
# else:
# info = state_machine.reporter.info(
# 'Directive processed. Type="%s", arguments=%r, options=%r, '
# 'content: None' % (name, arguments, options), line=lineno)
# return [info]
#
# directive_test_function.arguments = (0, 1, 1)
# directive_test_function.options = {'option': directives.unchanged_required}
# directive_test_function.content = 1
| [
"docutils.parsers.rst.directives.path",
"docutils.nodes.inline",
"re.compile",
"docutils.nodes.literal_block",
"docutils.nodes.Text",
"docutils.parsers.rst.directives.body.NumberLines",
"docutils.statemachine.string2lines",
"docutils.parsers.rst.directives.class_option",
"docutils.parsers.rst.direct... | [((16044, 16074), 're.compile', 're.compile', (['"""( |\\\\n|^)\\\\.\\\\. """'], {}), "('( |\\\\n|^)\\\\.\\\\. ')\n", (16054, 16074), False, 'import re\n'), ((18588, 18678), 're.compile', 're.compile', (["('(%s)\\\\s*(\\\\(\\\\s*(%s)\\\\s*\\\\)\\\\s*)?$' % ((states.Inliner.simplename,) * 2))"], {}), "('(%s)\\\\s*(\\\\(\\\\s*(%s)\\\\s*\\\\)\\\\s*)?$' % ((states.Inliner.\n simplename,) * 2))\n", (18598, 18678), False, 'import re\n'), ((2507, 2541), 'docutils.parsers.rst.directives.path', 'directives.path', (['self.arguments[0]'], {}), '(self.arguments[0])\n', (2522, 2541), False, 'from docutils.parsers.rst import directives, roles, states\n'), ((2753, 2784), 'docutils.utils.relative_path', 'utils.relative_path', (['None', 'path'], {}), '(None, path)\n', (2772, 2784), False, 'from docutils import io, nodes, statemachine, utils\n'), ((2801, 2824), 'docutils.nodes.reprunicode', 'nodes.reprunicode', (['path'], {}), '(path)\n', (2818, 2824), False, 'from docutils import io, nodes, statemachine, utils\n'), ((5512, 5582), 'docutils.statemachine.string2lines', 'statemachine.string2lines', (['rawtext', 'tab_width'], {'convert_whitespace': '(True)'}), '(rawtext, tab_width, convert_whitespace=True)\n', (5537, 5582), False, 'from docutils import io, nodes, statemachine, utils\n'), ((8481, 8514), 'docutils.utils.relative_path', 'utils.relative_path', (['None', 'source'], {}), '(None, source)\n', (8500, 8514), False, 'from docutils import io, nodes, statemachine, utils\n'), ((13945, 13978), 'docutils.nodes.raw', 'nodes.raw', (['""""""', 'text'], {}), "('', text, **attributes)\n", (13954, 13978), False, 'from docutils import io, nodes, statemachine, utils\n'), ((14518, 14537), 'docutils.nodes.Element', 'nodes.Element', (['text'], {}), '(text)\n', (14531, 14537), False, 'from docutils import io, nodes, statemachine, utils\n'), ((16835, 16850), 'docutils.nodes.Element', 'nodes.Element', ([], {}), '()\n', (16848, 16850), False, 'from docutils import io, nodes, statemachine, utils\n'), ((21264, 21324), 'docutils.parsers.rst.roles.CustomRole', 'roles.CustomRole', (['new_role_name', 'base_role', 'options', 'content'], {}), '(new_role_name, base_role, options, content)\n', (21280, 21324), False, 'from docutils.parsers.rst import directives, roles, states\n'), ((21334, 21380), 'docutils.parsers.rst.roles.register_local_role', 'roles.register_local_role', (['new_role_name', 'role'], {}), '(new_role_name, role)\n', (21359, 21380), False, 'from docutils.parsers.rst import directives, roles, states\n'), ((21832, 21921), 'docutils.parsers.rst.roles.role', 'roles.role', (['role_name', 'self.state_machine.language', 'self.lineno', 'self.state.reporter'], {}), '(role_name, self.state_machine.language, self.lineno, self.state.\n reporter)\n', (21842, 21921), False, 'from docutils.parsers.rst import directives, roles, states\n'), ((23961, 23986), 'time.strftime', 'time.strftime', (['format_str'], {}), '(format_str)\n', (23974, 23986), False, 'import time\n'), ((3229, 3303), 'docutils.io.FileInput', 'io.FileInput', ([], {'source_path': 'path', 'encoding': 'encoding', 'error_handler': 'e_handler'}), '(source_path=path, encoding=encoding, error_handler=e_handler)\n', (3241, 3303), False, 'from docutils import io, nodes, statemachine, utils\n'), ((8223, 8277), 'docutils.utils.new_document', 'utils.new_document', (['path', 'self.state.document.settings'], {}), '(path, self.state.document.settings)\n', (8241, 8277), False, 'from docutils import io, nodes, statemachine, utils\n'), ((17138, 17157), 'docutils.nodes.Text', 'nodes.Text', (['decoded'], {}), '(decoded)\n', (17148, 17157), False, 'from docutils import io, nodes, statemachine, utils\n'), ((17612, 17654), 'docutils.parsers.rst.directives.class_option', 'directives.class_option', (['self.arguments[0]'], {}), '(self.arguments[0])\n', (17635, 17654), False, 'from docutils.parsers.rst import directives, roles, states\n'), ((17915, 17930), 'docutils.nodes.Element', 'nodes.Element', ([], {}), '()\n', (17928, 17930), False, 'from docutils import io, nodes, statemachine, utils\n'), ((18228, 18332), 'docutils.nodes.pending', 'nodes.pending', (['misc.ClassAttribute', "{'class': class_value, 'directive': self.name}", 'self.block_text'], {}), "(misc.ClassAttribute, {'class': class_value, 'directive': self\n .name}, self.block_text)\n", (18241, 18332), False, 'from docutils import io, nodes, statemachine, utils\n'), ((19415, 19509), 'docutils.parsers.rst.roles.role', 'roles.role', (['base_role_name', 'self.state_machine.language', 'self.lineno', 'self.state.reporter'], {}), '(base_role_name, self.state_machine.language, self.lineno, self.\n state.reporter)\n', (19425, 19509), False, 'from docutils.parsers.rst import directives, roles, states\n'), ((20176, 20213), 'docutils.parsers.rst.convert_directive_function', 'convert_directive_function', (['base_role'], {}), '(base_role)\n', (20202, 20213), False, 'from docutils.parsers.rst import Directive, convert_directive_function\n'), ((24454, 24470), 'docutils.nodes.Text', 'nodes.Text', (['text'], {}), '(text)\n', (24464, 24470), False, 'from docutils import io, nodes, statemachine, utils\n'), ((6840, 6885), 'docutils.parsers.rst.directives.body.NumberLines', 'NumberLines', (['[([], text)]', 'startline', 'endline'], {}), '([([], text)], startline, endline)\n', (6851, 6885), False, 'from docutils.parsers.rst.directives.body import CodeBlock, NumberLines\n'), ((7247, 7263), 'docutils.nodes.Text', 'nodes.Text', (['text'], {}), '(text)\n', (7257, 7263), False, 'from docutils import io, nodes, statemachine, utils\n'), ((11637, 11668), 'docutils.utils.relative_path', 'utils.relative_path', (['None', 'path'], {}), '(None, path)\n', (11656, 11668), False, 'from docutils import io, nodes, statemachine, utils\n'), ((16924, 16953), 'docutils.parsers.rst.directives.unicode_code', 'directives.unicode_code', (['code'], {}), '(code)\n', (16947, 16953), False, 'from docutils.parsers.rst import directives, roles, states\n'), ((20847, 20885), 'docutils.parsers.rst.directives.class_option', 'directives.class_option', (['new_role_name'], {}), '(new_role_name)\n', (20870, 20885), False, 'from docutils.parsers.rst import directives, roles, states\n'), ((22113, 22166), 'docutils.nodes.literal_block', 'nodes.literal_block', (['self.block_text', 'self.block_text'], {}), '(self.block_text, self.block_text)\n', (22132, 22166), False, 'from docutils import io, nodes, statemachine, utils\n'), ((25041, 25072), 'docutils.nodes.literal_block', 'nodes.literal_block', (['text', 'text'], {}), '(text, text)\n', (25060, 25072), False, 'from docutils import io, nodes, statemachine, utils\n'), ((11715, 11789), 'docutils.io.FileInput', 'io.FileInput', ([], {'source_path': 'path', 'encoding': 'encoding', 'error_handler': 'e_handler'}), '(source_path=path, encoding=encoding, error_handler=e_handler)\n', (11727, 11789), False, 'from docutils import io, nodes, statemachine, utils\n'), ((13346, 13445), 'docutils.io.StringInput', 'io.StringInput', ([], {'source': 'raw_text', 'source_path': 'source', 'encoding': 'encoding', 'error_handler': 'e_handler'}), '(source=raw_text, source_path=source, encoding=encoding,\n error_handler=e_handler)\n', (13360, 13445), False, 'from docutils import io, nodes, statemachine, utils\n'), ((19725, 19778), 'docutils.nodes.literal_block', 'nodes.literal_block', (['self.block_text', 'self.block_text'], {}), '(self.block_text, self.block_text)\n', (19744, 19778), False, 'from docutils import io, nodes, statemachine, utils\n'), ((20628, 20681), 'docutils.nodes.literal_block', 'nodes.literal_block', (['self.block_text', 'self.block_text'], {}), '(self.block_text, self.block_text)\n', (20647, 20681), False, 'from docutils import io, nodes, statemachine, utils\n'), ((7008, 7051), 'docutils.nodes.inline', 'nodes.inline', (['value', 'value'], {'classes': 'classes'}), '(value, value, classes=classes)\n', (7020, 7051), False, 'from docutils import io, nodes, statemachine, utils\n'), ((7176, 7193), 'docutils.nodes.Text', 'nodes.Text', (['value'], {}), '(value)\n', (7186, 7193), False, 'from docutils import io, nodes, statemachine, utils\n'), ((21110, 21163), 'docutils.nodes.literal_block', 'nodes.literal_block', (['self.block_text', 'self.block_text'], {}), '(self.block_text, self.block_text)\n', (21129, 21163), False, 'from docutils import io, nodes, statemachine, utils\n'), ((3666, 3682), 'docutils.utils.error_reporting.SafeString', 'SafeString', (['path'], {}), '(path)\n', (3676, 3682), False, 'from docutils.utils.error_reporting import SafeString, ErrorString\n'), ((3830, 3848), 'docutils.utils.error_reporting.ErrorString', 'ErrorString', (['error'], {}), '(error)\n', (3841, 3848), False, 'from docutils.utils.error_reporting import SafeString, ErrorString\n'), ((4397, 4415), 'docutils.utils.error_reporting.ErrorString', 'ErrorString', (['error'], {}), '(error)\n', (4408, 4415), False, 'from docutils.utils.error_reporting import SafeString, ErrorString\n'), ((13081, 13096), 'urllib2.urlopen', 'urlopen', (['source'], {}), '(source)\n', (13088, 13096), False, 'from urllib2 import urlopen, URLError\n'), ((17093, 17111), 'docutils.utils.error_reporting.ErrorString', 'ErrorString', (['error'], {}), '(error)\n', (17104, 17111), False, 'from docutils.utils.error_reporting import SafeString, ErrorString\n'), ((21089, 21107), 'docutils.utils.error_reporting.SafeString', 'SafeString', (['detail'], {}), '(detail)\n', (21099, 21107), False, 'from docutils.utils.error_reporting import SafeString, ErrorString\n'), ((12260, 12278), 'docutils.utils.error_reporting.ErrorString', 'ErrorString', (['error'], {}), '(error)\n', (12271, 12278), False, 'from docutils.utils.error_reporting import SafeString, ErrorString\n'), ((12488, 12506), 'docutils.utils.error_reporting.ErrorString', 'ErrorString', (['error'], {}), '(error)\n', (12499, 12506), False, 'from docutils.utils.error_reporting import SafeString, ErrorString\n'), ((13301, 13319), 'docutils.utils.error_reporting.ErrorString', 'ErrorString', (['error'], {}), '(error)\n', (13312, 13319), False, 'from docutils.utils.error_reporting import SafeString, ErrorString\n'), ((13741, 13759), 'docutils.utils.error_reporting.ErrorString', 'ErrorString', (['error'], {}), '(error)\n', (13752, 13759), False, 'from docutils.utils.error_reporting import SafeString, ErrorString\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import re
import django.contrib.auth.models
import django.utils.timezone
import model_utils.fields
import projects.utils
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('email', models.EmailField(unique=True, max_length=254, verbose_name='email address')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator(re.compile(b'^[\\w.@+-]+$'), 'Enter a valid username.', b'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('screen_name', models.CharField(max_length=20, null=True, verbose_name='\u663e\u793a\u540d\u79f0', blank=True)),
('description', models.CharField(max_length=50, verbose_name='\u63cf\u8ff0', blank=True)),
('subnet', models.CharField(default=projects.utils.get_subnet, max_length=32, verbose_name='\u7f51\u6bb5')),
('is_superuser', models.BooleanField(default=False)),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"django.db.models.EmailField",
"re.compile",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((453, 546), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (469, 546), False, 'from django.db import migrations, models\n'), ((574, 631), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'verbose_name': '"""password"""'}), "(max_length=128, verbose_name='password')\n", (590, 631), False, 'from django.db import migrations, models\n'), ((665, 735), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'verbose_name': '"""last login"""', 'blank': '(True)'}), "(null=True, verbose_name='last login', blank=True)\n", (685, 735), False, 'from django.db import migrations, models\n'), ((1053, 1129), 'django.db.models.EmailField', 'models.EmailField', ([], {'unique': '(True)', 'max_length': '(254)', 'verbose_name': '"""email address"""'}), "(unique=True, max_length=254, verbose_name='email address')\n", (1070, 1129), False, 'from django.db import migrations, models\n'), ((1473, 1543), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'verbose_name': '"""first name"""', 'blank': '(True)'}), "(max_length=30, verbose_name='first name', blank=True)\n", (1489, 1543), False, 'from django.db import migrations, models\n'), ((1576, 1645), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'verbose_name': '"""last name"""', 'blank': '(True)'}), "(max_length=30, verbose_name='last name', blank=True)\n", (1592, 1645), False, 'from django.db import migrations, models\n'), ((1678, 1859), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'help_text': '"""Designates whether this user should be treated as active. Unselect this instead of deleting accounts."""', 'verbose_name': '"""active"""'}), "(default=True, help_text=\n 'Designates whether this user should be treated as active. Unselect this instead of deleting accounts.'\n , verbose_name='active')\n", (1697, 1859), False, 'from django.db import migrations, models\n'), ((1884, 1972), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now', 'verbose_name': '"""date joined"""'}), "(default=django.utils.timezone.now, verbose_name=\n 'date joined')\n", (1904, 1972), False, 'from django.db import migrations, models\n'), ((2002, 2077), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'verbose_name': '"""显示名称"""', 'blank': '(True)'}), "(max_length=20, null=True, verbose_name='显示名称', blank=True)\n", (2018, 2077), False, 'from django.db import migrations, models\n'), ((2132, 2194), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'verbose_name': '"""描述"""', 'blank': '(True)'}), "(max_length=50, verbose_name='描述', blank=True)\n", (2148, 2194), False, 'from django.db import migrations, models\n'), ((2234, 2323), 'django.db.models.CharField', 'models.CharField', ([], {'default': 'projects.utils.get_subnet', 'max_length': '(32)', 'verbose_name': '"""网段"""'}), "(default=projects.utils.get_subnet, max_length=32,\n verbose_name='网段')\n", (2250, 2323), False, 'from django.db import migrations, models\n'), ((2365, 2399), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2384, 2399), False, 'from django.db import migrations, models\n'), ((1370, 1397), 're.compile', 're.compile', (["b'^[\\\\w.@+-]+$'"], {}), "(b'^[\\\\w.@+-]+$')\n", (1380, 1397), False, 'import re\n')] |
from pkgcheck.checks import dropped_keywords
from snakeoil.cli import arghparse
from .. import misc
class TestDroppedKeywords(misc.ReportTestCase):
check_kls = dropped_keywords.DroppedKeywordsCheck
def mk_pkg(self, ver, keywords='', eclasses=(), **kwargs):
return misc.FakePkg(
f"dev-util/diffball-{ver}",
data={
**kwargs,
"KEYWORDS": keywords,
"_eclasses_": eclasses,
})
def mk_check(self, arches=('x86', 'amd64'), verbosity=0):
options = arghparse.Namespace(arches=arches, verbosity=verbosity)
return self.check_kls(options, arches_addon=None)
def test_it(self):
# single version, shouldn't yield.
check = self.mk_check()
self.assertNoReport(check, [self.mk_pkg('1')])
# ebuilds without keywords are skipped
self.assertNoReport(
check, [self.mk_pkg("1", "x86 amd64"), self.mk_pkg("2")])
# ensure it limits itself to just the arches we care about
# check unstable at the same time;
# finally, check '-' handling; if x86 -> -x86, that's valid.
self.assertNoReport(
check,
[self.mk_pkg("1", "x86 ~amd64 ppc"),
self.mk_pkg("2", "~amd64 x86"),
self.mk_pkg("3", "-amd64 x86")])
# check added keyword handling
self.assertNoReport(
check,
[self.mk_pkg("1", "amd64"),
self.mk_pkg("2", "x86"),
self.mk_pkg("3", "~x86 ~amd64")])
# check special keyword handling
for key in ('-*', '*', '~*'):
self.assertNoReport(
check,
[self.mk_pkg("1", "x86 ~amd64"),
self.mk_pkg("2", key)])
# ensure it doesn't flag live ebuilds
self.assertNoReport(
check,
[self.mk_pkg("1", "x86 amd64"),
self.mk_pkg("9999", "", PROPERTIES='live')])
def test_verbose_mode(self):
# verbose mode outputs a report per version with dropped keywords
check = self.mk_check(verbosity=1)
reports = self.assertReports(
check,
[self.mk_pkg("1", "amd64 x86"),
self.mk_pkg("2", "amd64"),
self.mk_pkg("3", "amd64")])
assert len(reports) == 2
assert {x.version for x in reports} == {"2", "3"}
assert set().union(*(x.arches for x in reports)) == {"x86"}
def test_regular_mode(self):
# regular mode outputs the most recent pkg with dropped keywords
check = self.mk_check()
reports = self.assertReports(
check,
[self.mk_pkg("1", "x86 amd64"),
self.mk_pkg("2", "amd64"),
self.mk_pkg("3", "amd64")])
assert len(reports) == 1
assert reports[0].version == '3'
assert set().union(*(x.arches for x in reports)) == {"x86"}
| [
"snakeoil.cli.arghparse.Namespace"
] | [((558, 613), 'snakeoil.cli.arghparse.Namespace', 'arghparse.Namespace', ([], {'arches': 'arches', 'verbosity': 'verbosity'}), '(arches=arches, verbosity=verbosity)\n', (577, 613), False, 'from snakeoil.cli import arghparse\n')] |
import gzip
import lzma
from pytest import fixture
import zlib
from bloom.main import construct_match_array, array_is_subset, construct_file_arrays, index_file, match_file, open_database
def test_construct_match_array():
array = construct_match_array(16, ['hello', 'world'], sample_sizes=[4])
assert isinstance(array, bytes)
assert array.hex() == '00002000600000000000000000001000'
def test_array_is_subset():
assert array_is_subset(bytes.fromhex('00'), bytes.fromhex('00')) is True
assert array_is_subset(bytes.fromhex('00'), bytes.fromhex('01')) is True
assert array_is_subset(bytes.fromhex('01'), bytes.fromhex('00')) is False
assert array_is_subset(bytes.fromhex('01'), bytes.fromhex('02')) is False
assert array_is_subset(bytes.fromhex('01'), bytes.fromhex('03')) is True
def test_construct_file_arrays_simple(temp_dir):
p = temp_dir / 'one.txt'
p.write_text('hello\nworld\n')
with p.open(mode='rb') as f:
array, = construct_file_arrays(f, array_bytesize=16, sample_sizes=[4])
assert isinstance(array, bytes)
assert array.hex() == '00002000600000000000000000001000'
def test_construct_file_arrays_gzip(temp_dir):
p = temp_dir / 'one.txt.gz'
with gzip.open(p, mode='wb') as f:
f.write(b'hello\nworld\n')
with p.open(mode='rb') as f:
array, = construct_file_arrays(f, array_bytesize=16, sample_sizes=[4])
assert isinstance(array, bytes)
assert array.hex() == '00002000600000000000000000001000'
def test_construct_file_arrays_xz(temp_dir):
p = temp_dir / 'one.txt.xz'
with lzma.open(p, mode='wb') as f:
f.write(b'hello\nworld\n')
with p.open(mode='rb') as f:
array, = construct_file_arrays(f, array_bytesize=16, sample_sizes=[4])
assert isinstance(array, bytes)
assert array.hex() == '00002000600000000000000000001000'
@fixture
def db(temp_dir):
return open_database(temp_dir / 'db')
def test_index_files(temp_dir, db):
p1 = temp_dir / 'one.txt'
p1.write_bytes(b'Lorem ipsum dolor sit amet.\nThis is second line.\n')
p2 = temp_dir / 'two.txt.gz'
p2.write_bytes(gzip.compress(b'This is a compressed file.\n'))
index_file(db, p1, array_bytesize=16)
index_file(db, p2, array_bytesize=16)
cur = db._connect().cursor()
cur.execute('SELECT path, key, array FROM bloom_files_v3')
row1, row2 = sorted(cur.fetchall())
assert row1[0] == str(p1)
assert row1[1] == f"{p1.stat().st_size}:{p1.stat().st_mtime}:fnv1a_64:4,5,6"
assert zlib.decompress(row1[2]).hex() == '97e126c173ff9373a75d1967f97219ec'
assert row2[0] == str(p2)
assert row2[1] == f"{p2.stat().st_size}:{p2.stat().st_mtime}:fnv1a_64:4,5,6"
assert zlib.decompress(row2[2]).hex() == '12e3c6f14a0792e8836c194a4e8f00e0'
def test_filter_files(temp_dir, db):
p1 = temp_dir / 'one.txt'
p1.write_bytes(b'Lorem ipsum dolor sit amet.\nThis is second line.\n')
p2 = temp_dir / 'two.txt.gz'
p2.write_bytes(gzip.compress(b'This is a compressed file.\n'))
filter_files = lambda db, paths, q: [p for p in paths if match_file(db, q, p)]
assert list(filter_files(db, [p1, p2], ['This'])) == [p1, p2]
assert list(filter_files(db, [p1, p2], ['compressed'])) == [p2]
assert list(filter_files(db, [p1, p2], ['nothing'])) == []
| [
"bloom.main.match_file",
"gzip.open",
"lzma.open",
"bloom.main.open_database",
"bloom.main.construct_match_array",
"gzip.compress",
"bloom.main.construct_file_arrays",
"zlib.decompress",
"bloom.main.index_file"
] | [((236, 299), 'bloom.main.construct_match_array', 'construct_match_array', (['(16)', "['hello', 'world']"], {'sample_sizes': '[4]'}), "(16, ['hello', 'world'], sample_sizes=[4])\n", (257, 299), False, 'from bloom.main import construct_match_array, array_is_subset, construct_file_arrays, index_file, match_file, open_database\n'), ((1904, 1934), 'bloom.main.open_database', 'open_database', (["(temp_dir / 'db')"], {}), "(temp_dir / 'db')\n", (1917, 1934), False, 'from bloom.main import construct_match_array, array_is_subset, construct_file_arrays, index_file, match_file, open_database\n'), ((2182, 2219), 'bloom.main.index_file', 'index_file', (['db', 'p1'], {'array_bytesize': '(16)'}), '(db, p1, array_bytesize=16)\n', (2192, 2219), False, 'from bloom.main import construct_match_array, array_is_subset, construct_file_arrays, index_file, match_file, open_database\n'), ((2224, 2261), 'bloom.main.index_file', 'index_file', (['db', 'p2'], {'array_bytesize': '(16)'}), '(db, p2, array_bytesize=16)\n', (2234, 2261), False, 'from bloom.main import construct_match_array, array_is_subset, construct_file_arrays, index_file, match_file, open_database\n'), ((979, 1040), 'bloom.main.construct_file_arrays', 'construct_file_arrays', (['f'], {'array_bytesize': '(16)', 'sample_sizes': '[4]'}), '(f, array_bytesize=16, sample_sizes=[4])\n', (1000, 1040), False, 'from bloom.main import construct_match_array, array_is_subset, construct_file_arrays, index_file, match_file, open_database\n'), ((1228, 1251), 'gzip.open', 'gzip.open', (['p'], {'mode': '"""wb"""'}), "(p, mode='wb')\n", (1237, 1251), False, 'import gzip\n'), ((1343, 1404), 'bloom.main.construct_file_arrays', 'construct_file_arrays', (['f'], {'array_bytesize': '(16)', 'sample_sizes': '[4]'}), '(f, array_bytesize=16, sample_sizes=[4])\n', (1364, 1404), False, 'from bloom.main import construct_match_array, array_is_subset, construct_file_arrays, index_file, match_file, open_database\n'), ((1590, 1613), 'lzma.open', 'lzma.open', (['p'], {'mode': '"""wb"""'}), "(p, mode='wb')\n", (1599, 1613), False, 'import lzma\n'), ((1705, 1766), 'bloom.main.construct_file_arrays', 'construct_file_arrays', (['f'], {'array_bytesize': '(16)', 'sample_sizes': '[4]'}), '(f, array_bytesize=16, sample_sizes=[4])\n', (1726, 1766), False, 'from bloom.main import construct_match_array, array_is_subset, construct_file_arrays, index_file, match_file, open_database\n'), ((2130, 2176), 'gzip.compress', 'gzip.compress', (["b'This is a compressed file.\\n'"], {}), "(b'This is a compressed file.\\n')\n", (2143, 2176), False, 'import gzip\n'), ((2976, 3022), 'gzip.compress', 'gzip.compress', (["b'This is a compressed file.\\n'"], {}), "(b'This is a compressed file.\\n')\n", (2989, 3022), False, 'import gzip\n'), ((2520, 2544), 'zlib.decompress', 'zlib.decompress', (['row1[2]'], {}), '(row1[2])\n', (2535, 2544), False, 'import zlib\n'), ((2711, 2735), 'zlib.decompress', 'zlib.decompress', (['row2[2]'], {}), '(row2[2])\n', (2726, 2735), False, 'import zlib\n'), ((3085, 3105), 'bloom.main.match_file', 'match_file', (['db', 'q', 'p'], {}), '(db, q, p)\n', (3095, 3105), False, 'from bloom.main import construct_match_array, array_is_subset, construct_file_arrays, index_file, match_file, open_database\n')] |
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.uic import loadUi
class FormWindow(QtWidgets.QMainWindow):
def __init__(self):
super(FormWindow, self).__init__()
self.ui = loadUi('GUI/form.ui',self)
self.ui.show()
self.submitButton = self.ui.pushButton_ok
self.submitButton.clicked.connect(self.clickButton_ok)
self.lineName = self.ui.lineEdit_name
self.lineID = self.ui.lineEdit_id
self.labelNameError = self.ui.label_nameError
self.labelIDError = self.ui.label_idError
self.labelMessage = self.ui.label_message
def clickButton_ok(self):
name = self.lineName.text()
id = self.lineID.text()
validation = self.validate(name,id)
if validation:
controlPanel.create_user(name,id)
def validate(self,name,id):
return True | [
"PyQt5.uic.loadUi"
] | [((201, 228), 'PyQt5.uic.loadUi', 'loadUi', (['"""GUI/form.ui"""', 'self'], {}), "('GUI/form.ui', self)\n", (207, 228), False, 'from PyQt5.uic import loadUi\n')] |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Google Cloud PubSub sources and sinks.
Cloud Pub/Sub sources and sinks are currently supported only in streaming
pipelines, during remote execution.
This API is currently under development and is subject to change.
"""
# pytype: skip-file
from __future__ import absolute_import
import re
from builtins import object
from typing import Any
from typing import Optional
from future.utils import iteritems
from past.builtins import unicode
from apache_beam import coders
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Write
from apache_beam.runners.dataflow.native_io import iobase as dataflow_io
from apache_beam.transforms import Map
from apache_beam.transforms import PTransform
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.utils.annotations import deprecated
try:
from google.cloud import pubsub
except ImportError:
pubsub = None
__all__ = [
'PubsubMessage',
'ReadFromPubSub',
'ReadStringsFromPubSub',
'WriteStringsToPubSub',
'WriteToPubSub'
]
class PubsubMessage(object):
"""Represents a Cloud Pub/Sub message.
Message payload includes the data and attributes fields. For the payload to be
valid, at least one of its fields must be non-empty.
This interface is experimental. No backwards compatibility guarantees.
Attributes:
data: (bytes) Message data. May be None.
attributes: (dict) Key-value map of str to str, containing both user-defined
and service generated attributes (such as id_label and
timestamp_attribute). May be None.
"""
def __init__(self, data, attributes):
if data is None and not attributes:
raise ValueError(
'Either data (%r) or attributes (%r) must be set.', data, attributes)
self.data = data
self.attributes = attributes
def __hash__(self):
return hash((self.data, frozenset(self.attributes.items())))
def __eq__(self, other):
return isinstance(other, PubsubMessage) and (
self.data == other.data and self.attributes == other.attributes)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __repr__(self):
return 'PubsubMessage(%s, %s)' % (self.data, self.attributes)
@staticmethod
def _from_proto_str(proto_msg):
# type: (bytes) -> PubsubMessage
"""Construct from serialized form of ``PubsubMessage``.
Args:
proto_msg: String containing a serialized protobuf of type
https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#google.pubsub.v1.PubsubMessage
Returns:
A new PubsubMessage object.
"""
msg = pubsub.types.pubsub_pb2.PubsubMessage()
msg.ParseFromString(proto_msg)
# Convert ScalarMapContainer to dict.
attributes = dict((key, msg.attributes[key]) for key in msg.attributes)
return PubsubMessage(msg.data, attributes)
def _to_proto_str(self):
"""Get serialized form of ``PubsubMessage``.
Args:
proto_msg: str containing a serialized protobuf.
Returns:
A str containing a serialized protobuf of type
https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#google.pubsub.v1.PubsubMessage
containing the payload of this object.
"""
msg = pubsub.types.pubsub_pb2.PubsubMessage()
msg.data = self.data
for key, value in iteritems(self.attributes):
msg.attributes[key] = value
return msg.SerializeToString()
@staticmethod
def _from_message(msg):
# type: (Any) -> PubsubMessage
"""Construct from ``google.cloud.pubsub_v1.subscriber.message.Message``.
https://googleapis.github.io/google-cloud-python/latest/pubsub/subscriber/api/message.html
"""
# Convert ScalarMapContainer to dict.
attributes = dict((key, msg.attributes[key]) for key in msg.attributes)
return PubsubMessage(msg.data, attributes)
class ReadFromPubSub(PTransform):
"""A ``PTransform`` for reading from Cloud Pub/Sub."""
# Implementation note: This ``PTransform`` is overridden by Directrunner.
def __init__(self,
topic=None, # type: Optional[str]
subscription=None, # type: Optional[str]
id_label=None, # type: Optional[str]
with_attributes=False, # type: bool
timestamp_attribute=None # type: Optional[str]
):
# type: (...) -> None
"""Initializes ``ReadFromPubSub``.
Args:
topic: Cloud Pub/Sub topic in the form
"projects/<project>/topics/<topic>". If provided, subscription must be
None.
subscription: Existing Cloud Pub/Sub subscription to use in the
form "projects/<project>/subscriptions/<subscription>". If not
specified, a temporary subscription will be created from the specified
topic. If provided, topic must be None.
id_label: The attribute on incoming Pub/Sub messages to use as a unique
record identifier. When specified, the value of this attribute (which
can be any string that uniquely identifies the record) will be used for
deduplication of messages. If not provided, we cannot guarantee
that no duplicate data will be delivered on the Pub/Sub stream. In this
case, deduplication of the stream will be strictly best effort.
with_attributes:
True - output elements will be :class:`~PubsubMessage` objects.
False - output elements will be of type ``bytes`` (message
data only).
timestamp_attribute: Message value to use as element timestamp. If None,
uses message publishing time as the timestamp.
Timestamp values should be in one of two formats:
- A numerical value representing the number of milliseconds since the
Unix epoch.
- A string in RFC 3339 format, UTC timezone. Example:
``2015-10-29T23:41:41.123Z``. The sub-second component of the
timestamp is optional, and digits beyond the first three (i.e., time
units smaller than milliseconds) may be ignored.
"""
super(ReadFromPubSub, self).__init__()
self.with_attributes = with_attributes
self._source = _PubSubSource(
topic=topic,
subscription=subscription,
id_label=id_label,
with_attributes=self.with_attributes,
timestamp_attribute=timestamp_attribute)
def expand(self, pvalue):
pcoll = pvalue.pipeline | Read(self._source)
pcoll.element_type = bytes
if self.with_attributes:
pcoll = pcoll | Map(PubsubMessage._from_proto_str)
pcoll.element_type = PubsubMessage
return pcoll
def to_runner_api_parameter(self, context):
# Required as this is identified by type in PTransformOverrides.
# TODO(BEAM-3812): Use an actual URN here.
return self.to_runner_api_pickled(context)
@deprecated(since='2.7.0', extra_message='Use ReadFromPubSub instead.')
def ReadStringsFromPubSub(topic=None, subscription=None, id_label=None):
return _ReadStringsFromPubSub(topic, subscription, id_label)
class _ReadStringsFromPubSub(PTransform):
"""This class is deprecated. Use ``ReadFromPubSub`` instead."""
def __init__(self, topic=None, subscription=None, id_label=None):
super(_ReadStringsFromPubSub, self).__init__()
self.topic = topic
self.subscription = subscription
self.id_label = id_label
def expand(self, pvalue):
p = (
pvalue.pipeline
| ReadFromPubSub(
self.topic, self.subscription, self.id_label, with_attributes=False)
| 'DecodeString' >> Map(lambda b: b.decode('utf-8')))
p.element_type = unicode
return p
@deprecated(since='2.7.0', extra_message='Use WriteToPubSub instead.')
def WriteStringsToPubSub(topic):
return _WriteStringsToPubSub(topic)
class _WriteStringsToPubSub(PTransform):
"""This class is deprecated. Use ``WriteToPubSub`` instead."""
def __init__(self, topic):
"""Initializes ``_WriteStringsToPubSub``.
Attributes:
topic: Cloud Pub/Sub topic in the form "/topics/<project>/<topic>".
"""
super(_WriteStringsToPubSub, self).__init__()
self._sink = _PubSubSink(
topic, id_label=None, with_attributes=False, timestamp_attribute=None)
def expand(self, pcoll):
pcoll = pcoll | 'EncodeString' >> Map(lambda s: s.encode('utf-8'))
pcoll.element_type = bytes
return pcoll | Write(self._sink)
class WriteToPubSub(PTransform):
"""A ``PTransform`` for writing messages to Cloud Pub/Sub."""
# Implementation note: This ``PTransform`` is overridden by Directrunner.
def __init__(self,
topic, # type: str
with_attributes=False, # type: bool
id_label=None, # type: Optional[str]
timestamp_attribute=None # type: Optional[str]
):
# type: (...) -> None
"""Initializes ``WriteToPubSub``.
Args:
topic: Cloud Pub/Sub topic in the form "/topics/<project>/<topic>".
with_attributes:
True - input elements will be :class:`~PubsubMessage` objects.
False - input elements will be of type ``bytes`` (message
data only).
id_label: If set, will set an attribute for each Cloud Pub/Sub message
with the given name and a unique value. This attribute can then be used
in a ReadFromPubSub PTransform to deduplicate messages.
timestamp_attribute: If set, will set an attribute for each Cloud Pub/Sub
message with the given name and the message's publish time as the value.
"""
super(WriteToPubSub, self).__init__()
self.with_attributes = with_attributes
self.id_label = id_label
self.timestamp_attribute = timestamp_attribute
self._sink = _PubSubSink(
topic, id_label, with_attributes, timestamp_attribute)
@staticmethod
def to_proto_str(element):
# type: (PubsubMessage) -> bytes
if not isinstance(element, PubsubMessage):
raise TypeError(
'Unexpected element. Type: %s (expected: PubsubMessage), '
'value: %r' % (type(element), element))
return element._to_proto_str()
def expand(self, pcoll):
if self.with_attributes:
pcoll = pcoll | 'ToProtobuf' >> Map(self.to_proto_str)
# Without attributes, message data is written as-is. With attributes,
# message data + attributes are passed as a serialized protobuf string (see
# ``PubsubMessage._to_proto_str`` for exact protobuf message type).
pcoll.element_type = bytes
return pcoll | Write(self._sink)
def to_runner_api_parameter(self, context):
# Required as this is identified by type in PTransformOverrides.
# TODO(BEAM-3812): Use an actual URN here.
return self.to_runner_api_pickled(context)
PROJECT_ID_REGEXP = '[a-z][-a-z0-9:.]{4,61}[a-z0-9]'
SUBSCRIPTION_REGEXP = 'projects/([^/]+)/subscriptions/(.+)'
TOPIC_REGEXP = 'projects/([^/]+)/topics/(.+)'
def parse_topic(full_topic):
match = re.match(TOPIC_REGEXP, full_topic)
if not match:
raise ValueError(
'PubSub topic must be in the form "projects/<project>/topics'
'/<topic>" (got %r).' % full_topic)
project, topic_name = match.group(1), match.group(2)
if not re.match(PROJECT_ID_REGEXP, project):
raise ValueError('Invalid PubSub project name: %r.' % project)
return project, topic_name
def parse_subscription(full_subscription):
match = re.match(SUBSCRIPTION_REGEXP, full_subscription)
if not match:
raise ValueError(
'PubSub subscription must be in the form "projects/<project>'
'/subscriptions/<subscription>" (got %r).' % full_subscription)
project, subscription_name = match.group(1), match.group(2)
if not re.match(PROJECT_ID_REGEXP, project):
raise ValueError('Invalid PubSub project name: %r.' % project)
return project, subscription_name
class _PubSubSource(dataflow_io.NativeSource):
"""Source for a Cloud Pub/Sub topic or subscription.
This ``NativeSource`` is overridden by a native Pubsub implementation.
Attributes:
with_attributes: If False, will fetch just message data. Otherwise,
fetches ``PubsubMessage`` protobufs.
"""
def __init__(self,
topic=None, # type: Optional[str]
subscription=None, # type: Optional[str]
id_label=None, # type: Optional[str]
with_attributes=False, # type: bool
timestamp_attribute=None # type: Optional[str]
):
self.coder = coders.BytesCoder()
self.full_topic = topic
self.full_subscription = subscription
self.topic_name = None
self.subscription_name = None
self.id_label = id_label
self.with_attributes = with_attributes
self.timestamp_attribute = timestamp_attribute
# Perform some validation on the topic and subscription.
if not (topic or subscription):
raise ValueError('Either a topic or subscription must be provided.')
if topic and subscription:
raise ValueError('Only one of topic or subscription should be provided.')
if topic:
self.project, self.topic_name = parse_topic(topic)
if subscription:
self.project, self.subscription_name = parse_subscription(subscription)
@property
def format(self):
"""Source format name required for remote execution."""
return 'pubsub'
def display_data(self):
return {
'id_label': DisplayDataItem(self.id_label,
label='ID Label Attribute').drop_if_none(),
'topic': DisplayDataItem(self.full_topic,
label='Pubsub Topic').drop_if_none(),
'subscription': DisplayDataItem(
self.full_subscription, label='Pubsub Subscription').drop_if_none(),
'with_attributes': DisplayDataItem(
self.with_attributes, label='With Attributes').drop_if_none(),
'timestamp_attribute': DisplayDataItem(
self.timestamp_attribute,
label='Timestamp Attribute').drop_if_none(),
}
def reader(self):
raise NotImplementedError
def is_bounded(self):
return False
class _PubSubSink(dataflow_io.NativeSink):
"""Sink for a Cloud Pub/Sub topic.
This ``NativeSource`` is overridden by a native Pubsub implementation.
"""
def __init__(self,
topic, # type: str
id_label, # type: Optional[str]
with_attributes, # type: bool
timestamp_attribute # type: Optional[str]
):
self.coder = coders.BytesCoder()
self.full_topic = topic
self.id_label = id_label
self.with_attributes = with_attributes
self.timestamp_attribute = timestamp_attribute
self.project, self.topic_name = parse_topic(topic)
@property
def format(self):
"""Sink format name required for remote execution."""
return 'pubsub'
def display_data(self):
return {
'topic': DisplayDataItem(self.full_topic, label='Pubsub Topic'),
'id_label': DisplayDataItem(self.id_label, label='ID Label Attribute'),
'with_attributes': DisplayDataItem(
self.with_attributes, label='With Attributes').drop_if_none(),
'timestamp_attribute': DisplayDataItem(
self.timestamp_attribute, label='Timestamp Attribute'),
}
def writer(self):
raise NotImplementedError
| [
"apache_beam.transforms.display.DisplayDataItem",
"apache_beam.io.iobase.Read",
"re.match",
"apache_beam.coders.BytesCoder",
"apache_beam.utils.annotations.deprecated",
"apache_beam.transforms.Map",
"apache_beam.io.iobase.Write",
"google.cloud.pubsub.types.pubsub_pb2.PubsubMessage",
"future.utils.it... | [((7605, 7675), 'apache_beam.utils.annotations.deprecated', 'deprecated', ([], {'since': '"""2.7.0"""', 'extra_message': '"""Use ReadFromPubSub instead."""'}), "(since='2.7.0', extra_message='Use ReadFromPubSub instead.')\n", (7615, 7675), False, 'from apache_beam.utils.annotations import deprecated\n'), ((8407, 8476), 'apache_beam.utils.annotations.deprecated', 'deprecated', ([], {'since': '"""2.7.0"""', 'extra_message': '"""Use WriteToPubSub instead."""'}), "(since='2.7.0', extra_message='Use WriteToPubSub instead.')\n", (8417, 8476), False, 'from apache_beam.utils.annotations import deprecated\n'), ((11684, 11718), 're.match', 're.match', (['TOPIC_REGEXP', 'full_topic'], {}), '(TOPIC_REGEXP, full_topic)\n', (11692, 11718), False, 'import re\n'), ((12124, 12172), 're.match', 're.match', (['SUBSCRIPTION_REGEXP', 'full_subscription'], {}), '(SUBSCRIPTION_REGEXP, full_subscription)\n', (12132, 12172), False, 'import re\n'), ((3431, 3470), 'google.cloud.pubsub.types.pubsub_pb2.PubsubMessage', 'pubsub.types.pubsub_pb2.PubsubMessage', ([], {}), '()\n', (3468, 3470), False, 'from google.cloud import pubsub\n'), ((4049, 4088), 'google.cloud.pubsub.types.pubsub_pb2.PubsubMessage', 'pubsub.types.pubsub_pb2.PubsubMessage', ([], {}), '()\n', (4086, 4088), False, 'from google.cloud import pubsub\n'), ((4136, 4162), 'future.utils.iteritems', 'iteritems', (['self.attributes'], {}), '(self.attributes)\n', (4145, 4162), False, 'from future.utils import iteritems\n'), ((11935, 11971), 're.match', 're.match', (['PROJECT_ID_REGEXP', 'project'], {}), '(PROJECT_ID_REGEXP, project)\n', (11943, 11971), False, 'import re\n'), ((12424, 12460), 're.match', 're.match', (['PROJECT_ID_REGEXP', 'project'], {}), '(PROJECT_ID_REGEXP, project)\n', (12432, 12460), False, 'import re\n'), ((13210, 13229), 'apache_beam.coders.BytesCoder', 'coders.BytesCoder', ([], {}), '()\n', (13227, 13229), False, 'from apache_beam import coders\n'), ((15232, 15251), 'apache_beam.coders.BytesCoder', 'coders.BytesCoder', ([], {}), '()\n', (15249, 15251), False, 'from apache_beam import coders\n'), ((7198, 7216), 'apache_beam.io.iobase.Read', 'Read', (['self._source'], {}), '(self._source)\n', (7202, 7216), False, 'from apache_beam.io.iobase import Read\n'), ((9138, 9155), 'apache_beam.io.iobase.Write', 'Write', (['self._sink'], {}), '(self._sink)\n', (9143, 9155), False, 'from apache_beam.io.iobase import Write\n'), ((11254, 11271), 'apache_beam.io.iobase.Write', 'Write', (['self._sink'], {}), '(self._sink)\n', (11259, 11271), False, 'from apache_beam.io.iobase import Write\n'), ((15627, 15681), 'apache_beam.transforms.display.DisplayDataItem', 'DisplayDataItem', (['self.full_topic'], {'label': '"""Pubsub Topic"""'}), "(self.full_topic, label='Pubsub Topic')\n", (15642, 15681), False, 'from apache_beam.transforms.display import DisplayDataItem\n'), ((15703, 15761), 'apache_beam.transforms.display.DisplayDataItem', 'DisplayDataItem', (['self.id_label'], {'label': '"""ID Label Attribute"""'}), "(self.id_label, label='ID Label Attribute')\n", (15718, 15761), False, 'from apache_beam.transforms.display import DisplayDataItem\n'), ((15913, 15983), 'apache_beam.transforms.display.DisplayDataItem', 'DisplayDataItem', (['self.timestamp_attribute'], {'label': '"""Timestamp Attribute"""'}), "(self.timestamp_attribute, label='Timestamp Attribute')\n", (15928, 15983), False, 'from apache_beam.transforms.display import DisplayDataItem\n'), ((7299, 7333), 'apache_beam.transforms.Map', 'Map', (['PubsubMessage._from_proto_str'], {}), '(PubsubMessage._from_proto_str)\n', (7302, 7333), False, 'from apache_beam.transforms import Map\n'), ((10954, 10976), 'apache_beam.transforms.Map', 'Map', (['self.to_proto_str'], {}), '(self.to_proto_str)\n', (10957, 10976), False, 'from apache_beam.transforms import Map\n'), ((14112, 14170), 'apache_beam.transforms.display.DisplayDataItem', 'DisplayDataItem', (['self.id_label'], {'label': '"""ID Label Attribute"""'}), "(self.id_label, label='ID Label Attribute')\n", (14127, 14170), False, 'from apache_beam.transforms.display import DisplayDataItem\n'), ((14240, 14294), 'apache_beam.transforms.display.DisplayDataItem', 'DisplayDataItem', (['self.full_topic'], {'label': '"""Pubsub Topic"""'}), "(self.full_topic, label='Pubsub Topic')\n", (14255, 14294), False, 'from apache_beam.transforms.display import DisplayDataItem\n'), ((14368, 14436), 'apache_beam.transforms.display.DisplayDataItem', 'DisplayDataItem', (['self.full_subscription'], {'label': '"""Pubsub Subscription"""'}), "(self.full_subscription, label='Pubsub Subscription')\n", (14383, 14436), False, 'from apache_beam.transforms.display import DisplayDataItem\n'), ((14493, 14555), 'apache_beam.transforms.display.DisplayDataItem', 'DisplayDataItem', (['self.with_attributes'], {'label': '"""With Attributes"""'}), "(self.with_attributes, label='With Attributes')\n", (14508, 14555), False, 'from apache_beam.transforms.display import DisplayDataItem\n'), ((14616, 14686), 'apache_beam.transforms.display.DisplayDataItem', 'DisplayDataItem', (['self.timestamp_attribute'], {'label': '"""Timestamp Attribute"""'}), "(self.timestamp_attribute, label='Timestamp Attribute')\n", (14631, 14686), False, 'from apache_beam.transforms.display import DisplayDataItem\n'), ((15790, 15852), 'apache_beam.transforms.display.DisplayDataItem', 'DisplayDataItem', (['self.with_attributes'], {'label': '"""With Attributes"""'}), "(self.with_attributes, label='With Attributes')\n", (15805, 15852), False, 'from apache_beam.transforms.display import DisplayDataItem\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Part of the Reusables package.
#
# Copyright (c) 2014-2020 - <NAME> - MIT License
from __future__ import absolute_import
import datetime
import re
from reusables.namespace import Namespace
__all__ = ["dt_exps", "datetime_regex", "now", "datetime_format", "datetime_from_iso", "dtf", "dtiso"]
dt_exps = {
"datetime": {
"format": {
"%I": re.compile(r"{(?:12)?-?hours?}"),
"%H": re.compile(r"{24-?hours?}"),
"%S": re.compile(r"{seco?n?d?s?}"),
"%M": re.compile(r"{minu?t?e?s?}"),
"%f": re.compile(r"{micro-?(?:second)?s?}"),
"%Z": re.compile(r"{(?:(tz|time-?zone))?}"),
"%y": re.compile(r"{years?}"),
"%Y": re.compile(r"{years?-?(?:(full|name|full-?name))?s?}"),
"%m": re.compile(r"{months?}"),
"%b": re.compile(r"{months?-?name}"),
"%B": re.compile(r"{months?-?(?:(full|full-?name))?s?}"),
"%d": re.compile(r"{days?}"),
"%w": re.compile(r"{week-?days?}"),
"%j": re.compile(r"{year-?days?}"),
"%a": re.compile(r"{(?:week)?-?days?-?name}"),
"%A": re.compile(r"{(?:week)?-?days?-?fullname}"),
"%U": re.compile(r"{weeks?}"),
"%W": re.compile(r"{mon(?:day)?-?weeks?}"),
"%x": re.compile(r"{date}"),
"%X": re.compile(r"{time}"),
"%c": re.compile(r"{date-?time}"),
"%z": re.compile(r"{(?:utc)?-?offset}"),
"%p": re.compile(r"{periods?}"),
"%Y-%m-%dT%H:%M:%S": re.compile(r"{iso-?(?:format)?}"),
},
"date": re.compile(r"((?:[\d]{2}|[\d]{4})[\- _\\/]?[\d]{2}[\- _\\/]?" r"\n[\d]{2})"),
"time": re.compile(r"([\d]{2}:[\d]{2}(?:\.[\d]{6})?)"),
"datetime": re.compile(
r"((?:[\d]{2}|[\d]{4})[\- _\\/]?[\d]{2}" r"[\- _\\/]?[\d]{2}T[\d]{2}:[\d]{2}" r"(?:\.[\d]{6})?)"
),
}
}
datetime_regex = Namespace(**dt_exps)
def datetime_format(desired_format, datetime_instance=None, *args, **kwargs):
"""
Replaces format style phrases (listed in the dt_exps dictionary)
with this datetime instance's information.
.. code :: python
reusables.datetime_format("Hey, it's {month-full} already!")
"Hey, it's March already!"
:param desired_format: string to add datetime details too
:param datetime_instance: datetime.datetime instance, defaults to 'now'
:param args: additional args to pass to str.format
:param kwargs: additional kwargs to pass to str format
:return: formatted string
"""
for strf, exp in datetime_regex.datetime.format.items():
desired_format = exp.sub(strf, desired_format)
if not datetime_instance:
datetime_instance = now()
return datetime_instance.strftime(desired_format.format(*args, **kwargs))
def datetime_from_iso(iso_string):
"""
Create a DateTime object from a ISO string
.. code :: python
reusables.datetime_from_iso('2019-03-10T12:56:55.031863')
datetime.datetime(2019, 3, 10, 12, 56, 55, 31863)
:param iso_string: string of an ISO datetime
:return: DateTime object
"""
try:
assert datetime_regex.datetime.datetime.match(iso_string).groups()[0]
except (ValueError, AssertionError, IndexError, AttributeError):
raise TypeError("String is not in ISO format")
try:
return datetime.datetime.strptime(iso_string, "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
return datetime.datetime.strptime(iso_string, "%Y-%m-%dT%H:%M:%S")
def now(utc=False, tz=None):
"""
Get a current DateTime object. By default is local.
.. code:: python
reusables.now()
# DateTime(2016, 12, 8, 22, 5, 2, 517000)
reusables.now().format("It's {24-hour}:{min}")
# "It's 22:05"
:param utc: bool, default False, UTC time not local
:param tz: TimeZone as specified by the datetime module
:return: reusables.DateTime
"""
return datetime.datetime.utcnow() if utc else datetime.datetime.now(tz=tz)
dtf = datetime_format
dtiso = datetime_from_iso
| [
"datetime.datetime.utcnow",
"reusables.namespace.Namespace",
"datetime.datetime.strptime",
"re.compile",
"datetime.datetime.now"
] | [((1986, 2006), 'reusables.namespace.Namespace', 'Namespace', ([], {}), '(**dt_exps)\n', (1995, 2006), False, 'from reusables.namespace import Namespace\n'), ((1666, 1753), 're.compile', 're.compile', (['"""((?:[\\\\d]{2}|[\\\\d]{4})[\\\\- _\\\\\\\\/]?[\\\\d]{2}[\\\\- _\\\\\\\\/]?\\\\n[\\\\d]{2})"""'], {}), "(\n '((?:[\\\\d]{2}|[\\\\d]{4})[\\\\- _\\\\\\\\/]?[\\\\d]{2}[\\\\- _\\\\\\\\/]?\\\\n[\\\\d]{2})')\n", (1676, 1753), False, 'import re\n'), ((1760, 1809), 're.compile', 're.compile', (['"""([\\\\d]{2}:[\\\\d]{2}(?:\\\\.[\\\\d]{6})?)"""'], {}), "('([\\\\d]{2}:[\\\\d]{2}(?:\\\\.[\\\\d]{6})?)')\n", (1770, 1809), False, 'import re\n'), ((1828, 1951), 're.compile', 're.compile', (['"""((?:[\\\\d]{2}|[\\\\d]{4})[\\\\- _\\\\\\\\/]?[\\\\d]{2}[\\\\- _\\\\\\\\/]?[\\\\d]{2}T[\\\\d]{2}:[\\\\d]{2}(?:\\\\.[\\\\d]{6})?)"""'], {}), "(\n '((?:[\\\\d]{2}|[\\\\d]{4})[\\\\- _\\\\\\\\/]?[\\\\d]{2}[\\\\- _\\\\\\\\/]?[\\\\d]{2}T[\\\\d]{2}:[\\\\d]{2}(?:\\\\.[\\\\d]{6})?)'\n )\n", (1838, 1951), False, 'import re\n'), ((3450, 3512), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['iso_string', '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(iso_string, '%Y-%m-%dT%H:%M:%S.%f')\n", (3476, 3512), False, 'import datetime\n'), ((4050, 4076), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (4074, 4076), False, 'import datetime\n'), ((4089, 4117), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'tz'}), '(tz=tz)\n', (4110, 4117), False, 'import datetime\n'), ((413, 444), 're.compile', 're.compile', (['"""{(?:12)?-?hours?}"""'], {}), "('{(?:12)?-?hours?}')\n", (423, 444), False, 'import re\n'), ((465, 491), 're.compile', 're.compile', (['"""{24-?hours?}"""'], {}), "('{24-?hours?}')\n", (475, 491), False, 'import re\n'), ((512, 539), 're.compile', 're.compile', (['"""{seco?n?d?s?}"""'], {}), "('{seco?n?d?s?}')\n", (522, 539), False, 'import re\n'), ((560, 587), 're.compile', 're.compile', (['"""{minu?t?e?s?}"""'], {}), "('{minu?t?e?s?}')\n", (570, 587), False, 'import re\n'), ((608, 644), 're.compile', 're.compile', (['"""{micro-?(?:second)?s?}"""'], {}), "('{micro-?(?:second)?s?}')\n", (618, 644), False, 'import re\n'), ((665, 701), 're.compile', 're.compile', (['"""{(?:(tz|time-?zone))?}"""'], {}), "('{(?:(tz|time-?zone))?}')\n", (675, 701), False, 'import re\n'), ((722, 744), 're.compile', 're.compile', (['"""{years?}"""'], {}), "('{years?}')\n", (732, 744), False, 'import re\n'), ((765, 818), 're.compile', 're.compile', (['"""{years?-?(?:(full|name|full-?name))?s?}"""'], {}), "('{years?-?(?:(full|name|full-?name))?s?}')\n", (775, 818), False, 'import re\n'), ((839, 862), 're.compile', 're.compile', (['"""{months?}"""'], {}), "('{months?}')\n", (849, 862), False, 'import re\n'), ((883, 912), 're.compile', 're.compile', (['"""{months?-?name}"""'], {}), "('{months?-?name}')\n", (893, 912), False, 'import re\n'), ((933, 982), 're.compile', 're.compile', (['"""{months?-?(?:(full|full-?name))?s?}"""'], {}), "('{months?-?(?:(full|full-?name))?s?}')\n", (943, 982), False, 'import re\n'), ((1003, 1024), 're.compile', 're.compile', (['"""{days?}"""'], {}), "('{days?}')\n", (1013, 1024), False, 'import re\n'), ((1045, 1072), 're.compile', 're.compile', (['"""{week-?days?}"""'], {}), "('{week-?days?}')\n", (1055, 1072), False, 'import re\n'), ((1093, 1120), 're.compile', 're.compile', (['"""{year-?days?}"""'], {}), "('{year-?days?}')\n", (1103, 1120), False, 'import re\n'), ((1141, 1179), 're.compile', 're.compile', (['"""{(?:week)?-?days?-?name}"""'], {}), "('{(?:week)?-?days?-?name}')\n", (1151, 1179), False, 'import re\n'), ((1200, 1242), 're.compile', 're.compile', (['"""{(?:week)?-?days?-?fullname}"""'], {}), "('{(?:week)?-?days?-?fullname}')\n", (1210, 1242), False, 'import re\n'), ((1263, 1285), 're.compile', 're.compile', (['"""{weeks?}"""'], {}), "('{weeks?}')\n", (1273, 1285), False, 'import re\n'), ((1306, 1341), 're.compile', 're.compile', (['"""{mon(?:day)?-?weeks?}"""'], {}), "('{mon(?:day)?-?weeks?}')\n", (1316, 1341), False, 'import re\n'), ((1362, 1382), 're.compile', 're.compile', (['"""{date}"""'], {}), "('{date}')\n", (1372, 1382), False, 'import re\n'), ((1403, 1423), 're.compile', 're.compile', (['"""{time}"""'], {}), "('{time}')\n", (1413, 1423), False, 'import re\n'), ((1444, 1470), 're.compile', 're.compile', (['"""{date-?time}"""'], {}), "('{date-?time}')\n", (1454, 1470), False, 'import re\n'), ((1491, 1523), 're.compile', 're.compile', (['"""{(?:utc)?-?offset}"""'], {}), "('{(?:utc)?-?offset}')\n", (1501, 1523), False, 'import re\n'), ((1544, 1568), 're.compile', 're.compile', (['"""{periods?}"""'], {}), "('{periods?}')\n", (1554, 1568), False, 'import re\n'), ((1604, 1636), 're.compile', 're.compile', (['"""{iso-?(?:format)?}"""'], {}), "('{iso-?(?:format)?}')\n", (1614, 1636), False, 'import re\n'), ((3551, 3610), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['iso_string', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(iso_string, '%Y-%m-%dT%H:%M:%S')\n", (3577, 3610), False, 'import datetime\n')] |
import datetime
import uuid
from http import HTTPStatus
import pytest
import pytz
from rest_framework.test import APIClient
from tests.entities import AuthorizedUser
from winter.argument_resolver import ArgumentNotSupported
from winter.http import ResponseHeader
def test_response_header_sets_header():
headers = {}
header = ResponseHeader[uuid.UUID](headers, 'My-Header')
uid = uuid.uuid4()
# Act
header.set(uid)
assert headers['my-header'] == uid
def test_str_response_header():
client = APIClient()
user = AuthorizedUser()
client.force_authenticate(user)
# Act
response = client.get('/with-response-headers/str-header/', content_type='application/json')
assert response.status_code == HTTPStatus.OK
assert response.json() == 'OK'
assert response['x-header'] == 'test header'
def test_int_response_header():
client = APIClient()
user = AuthorizedUser()
client.force_authenticate(user)
# Act
response = client.get('/with-response-headers/int-header/', content_type='application/json')
assert response.status_code == HTTPStatus.OK
assert response.json() == 'OK'
assert response['x-header'] == '123'
def test_datetime_isoformat_response_header():
client = APIClient()
user = AuthorizedUser()
client.force_authenticate(user)
now = datetime.datetime.now()
# Act
response = client.get(
f'/with-response-headers/datetime-isoformat-header/?now={now.timestamp()}',
content_type='application/json',
)
assert response.status_code == HTTPStatus.OK
assert response.json() == 'OK'
assert response['x-header'] == now.isoformat()
def test_last_modified_response_header():
client = APIClient()
user = AuthorizedUser()
client.force_authenticate(user)
now = datetime.datetime.now()
# Act
response = client.get(
f'/with-response-headers/last-modified-header/?now={now.timestamp()}',
content_type='application/json',
)
assert response.status_code == HTTPStatus.OK
assert response.json() == 'OK'
assert response['last-modified'] == now.astimezone(pytz.utc).strftime('%a, %d %b %Y %X GMT')
def test_uuid_response_header():
client = APIClient()
user = AuthorizedUser()
client.force_authenticate(user)
uid = uuid.uuid4()
# Act
response = client.get(f'/with-response-headers/uuid-header/?uid={uid}', content_type='application/json')
assert response.status_code == HTTPStatus.OK
assert response.json() == 'OK'
assert response['x-header'] == str(uid)
def test_two_response_headers():
client = APIClient()
user = AuthorizedUser()
client.force_authenticate(user)
# Act
response = client.get('/with-response-headers/two-headers/', content_type='application/json')
assert response.status_code == HTTPStatus.OK
assert response.json() == 'OK'
assert response['x-header1'] == 'header1'
assert response['x-header2'] == 'header2'
def test_header_without_annotation():
client = APIClient()
user = AuthorizedUser()
client.force_authenticate(user)
with pytest.raises(ArgumentNotSupported):
# Act
client.get('/with-response-headers/header-without-annotation/', content_type='application/json')
| [
"rest_framework.test.APIClient",
"uuid.uuid4",
"datetime.datetime.now",
"tests.entities.AuthorizedUser",
"pytest.raises"
] | [((395, 407), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (405, 407), False, 'import uuid\n'), ((526, 537), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (535, 537), False, 'from rest_framework.test import APIClient\n'), ((549, 565), 'tests.entities.AuthorizedUser', 'AuthorizedUser', ([], {}), '()\n', (563, 565), False, 'from tests.entities import AuthorizedUser\n'), ((891, 902), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (900, 902), False, 'from rest_framework.test import APIClient\n'), ((914, 930), 'tests.entities.AuthorizedUser', 'AuthorizedUser', ([], {}), '()\n', (928, 930), False, 'from tests.entities import AuthorizedUser\n'), ((1263, 1274), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (1272, 1274), False, 'from rest_framework.test import APIClient\n'), ((1286, 1302), 'tests.entities.AuthorizedUser', 'AuthorizedUser', ([], {}), '()\n', (1300, 1302), False, 'from tests.entities import AuthorizedUser\n'), ((1349, 1372), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1370, 1372), False, 'import datetime\n'), ((1735, 1746), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (1744, 1746), False, 'from rest_framework.test import APIClient\n'), ((1758, 1774), 'tests.entities.AuthorizedUser', 'AuthorizedUser', ([], {}), '()\n', (1772, 1774), False, 'from tests.entities import AuthorizedUser\n'), ((1821, 1844), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1842, 1844), False, 'import datetime\n'), ((2239, 2250), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (2248, 2250), False, 'from rest_framework.test import APIClient\n'), ((2262, 2278), 'tests.entities.AuthorizedUser', 'AuthorizedUser', ([], {}), '()\n', (2276, 2278), False, 'from tests.entities import AuthorizedUser\n'), ((2325, 2337), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2335, 2337), False, 'import uuid\n'), ((2635, 2646), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (2644, 2646), False, 'from rest_framework.test import APIClient\n'), ((2658, 2674), 'tests.entities.AuthorizedUser', 'AuthorizedUser', ([], {}), '()\n', (2672, 2674), False, 'from tests.entities import AuthorizedUser\n'), ((3050, 3061), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (3059, 3061), False, 'from rest_framework.test import APIClient\n'), ((3073, 3089), 'tests.entities.AuthorizedUser', 'AuthorizedUser', ([], {}), '()\n', (3087, 3089), False, 'from tests.entities import AuthorizedUser\n'), ((3136, 3171), 'pytest.raises', 'pytest.raises', (['ArgumentNotSupported'], {}), '(ArgumentNotSupported)\n', (3149, 3171), False, 'import pytest\n')] |
import csv
archivo = '/Users/alfonso/devel/datoscovid-19/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
with open(archivo) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are: {", ".join(row)}')
line_count += 1
else:
print(f'\t{row[0]} {row[1]} {row[2]}')
line_count += 1
print(f'Processed {line_count} lines.')
| [
"csv.reader"
] | [((204, 239), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (214, 239), False, 'import csv\n')] |
# -*- coding: utf-8 -*-
import logging
import utool as ut
import numpy as np
import vtool as vt
import pandas as pd
from wbia.algo.graph.nx_utils import ensure_multi_index
from wbia.algo.graph.state import POSTV, NEGTV, INCMP
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger('wbia')
class Groundtruth(object):
def is_comparable(infr, aid_pairs, allow_guess=True):
"""
Guesses by default when real comparable information is not available.
"""
if infr.ibs is not None:
return infr.wbia_is_comparable(aid_pairs, allow_guess)
is_comp = list(
infr.gen_edge_values(
'gt_comparable', edges=aid_pairs, default=True, on_missing='default'
)
)
return np.array(is_comp)
def is_photobomb(infr, aid_pairs):
if infr.ibs is not None:
return infr.wbia_is_photobomb(aid_pairs)
return np.array([False] * len(aid_pairs))
def is_same(infr, aid_pairs):
if infr.ibs is not None:
return infr.wbia_is_same(aid_pairs)
node_dict = ut.nx_node_dict(infr.graph)
nid1 = [node_dict[n1]['orig_name_label'] for n1, n2 in aid_pairs]
nid2 = [node_dict[n2]['orig_name_label'] for n1, n2 in aid_pairs]
return np.equal(nid1, nid2)
def apply_edge_truth(infr, edges=None):
if edges is None:
edges = list(infr.edges())
edge_truth_df = infr.match_state_df(edges)
edge_truth = edge_truth_df.idxmax(axis=1).to_dict()
infr.set_edge_attrs('truth', edge_truth)
infr.edge_truth.update(edge_truth)
def match_state_df(infr, index):
""" Returns groundtruth state based on wbia controller """
index = ensure_multi_index(index, ('aid1', 'aid2'))
aid_pairs = np.asarray(index.tolist())
aid_pairs = vt.ensure_shape(aid_pairs, (None, 2))
is_same = infr.is_same(aid_pairs)
is_comp = infr.is_comparable(aid_pairs)
match_state_df = pd.DataFrame.from_dict(
dict(
[
(NEGTV, ~is_same & is_comp),
(POSTV, is_same & is_comp),
(INCMP, ~is_comp),
]
)
)
match_state_df.index = index
return match_state_df
def match_state_gt(infr, edge):
if edge in infr.edge_truth:
truth = infr.edge_truth[edge]
elif hasattr(infr, 'dummy_verif'):
truth = infr.dummy_verif._get_truth(edge)
else:
aid_pairs = np.asarray([edge])
is_same = infr.is_same(aid_pairs)[0]
is_comp = infr.is_comparable(aid_pairs)[0]
match_state = pd.Series(
dict(
[
(NEGTV, ~is_same & is_comp),
(POSTV, is_same & is_comp),
(INCMP, ~is_comp),
]
)
)
truth = match_state.idxmax()
return truth
def edge_attr_df(infr, key, edges=None, default=ut.NoParam):
""" constructs DataFrame using current predictions """
edge_states = infr.gen_edge_attrs(key, edges=edges, default=default)
edge_states = list(edge_states)
if isinstance(edges, pd.MultiIndex):
index = edges
else:
if edges is None:
edges_ = ut.take_column(edge_states, 0)
else:
edges_ = ut.lmap(tuple, ut.aslist(edges))
index = pd.MultiIndex.from_tuples(edges_, names=('aid1', 'aid2'))
records = ut.itake_column(edge_states, 1)
edge_df = pd.Series.from_array(records)
edge_df.name = key
edge_df.index = index
return edge_df
| [
"logging.getLogger",
"utool.inject2",
"utool.itake_column",
"wbia.algo.graph.nx_utils.ensure_multi_index",
"numpy.asarray",
"numpy.equal",
"numpy.array",
"utool.take_column",
"vtool.ensure_shape",
"utool.nx_node_dict",
"pandas.MultiIndex.from_tuples",
"utool.aslist",
"pandas.Series.from_arra... | [((249, 269), 'utool.inject2', 'ut.inject2', (['__name__'], {}), '(__name__)\n', (259, 269), True, 'import utool as ut\n'), ((279, 304), 'logging.getLogger', 'logging.getLogger', (['"""wbia"""'], {}), "('wbia')\n", (296, 304), False, 'import logging\n'), ((776, 793), 'numpy.array', 'np.array', (['is_comp'], {}), '(is_comp)\n', (784, 793), True, 'import numpy as np\n'), ((1106, 1133), 'utool.nx_node_dict', 'ut.nx_node_dict', (['infr.graph'], {}), '(infr.graph)\n', (1121, 1133), True, 'import utool as ut\n'), ((1297, 1317), 'numpy.equal', 'np.equal', (['nid1', 'nid2'], {}), '(nid1, nid2)\n', (1305, 1317), True, 'import numpy as np\n'), ((1752, 1795), 'wbia.algo.graph.nx_utils.ensure_multi_index', 'ensure_multi_index', (['index', "('aid1', 'aid2')"], {}), "(index, ('aid1', 'aid2'))\n", (1770, 1795), False, 'from wbia.algo.graph.nx_utils import ensure_multi_index\n'), ((1863, 1900), 'vtool.ensure_shape', 'vt.ensure_shape', (['aid_pairs', '(None, 2)'], {}), '(aid_pairs, (None, 2))\n', (1878, 1900), True, 'import vtool as vt\n'), ((3628, 3659), 'utool.itake_column', 'ut.itake_column', (['edge_states', '(1)'], {}), '(edge_states, 1)\n', (3643, 3659), True, 'import utool as ut\n'), ((3678, 3707), 'pandas.Series.from_array', 'pd.Series.from_array', (['records'], {}), '(records)\n', (3698, 3707), True, 'import pandas as pd\n'), ((3552, 3609), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['edges_'], {'names': "('aid1', 'aid2')"}), "(edges_, names=('aid1', 'aid2'))\n", (3577, 3609), True, 'import pandas as pd\n'), ((2571, 2589), 'numpy.asarray', 'np.asarray', (['[edge]'], {}), '([edge])\n', (2581, 2589), True, 'import numpy as np\n'), ((3425, 3455), 'utool.take_column', 'ut.take_column', (['edge_states', '(0)'], {}), '(edge_states, 0)\n', (3439, 3455), True, 'import utool as ut\n'), ((3514, 3530), 'utool.aslist', 'ut.aslist', (['edges'], {}), '(edges)\n', (3523, 3530), True, 'import utool as ut\n')] |
import os
import logging
from decimal import Decimal
import boto3
import botocore
logger = logging.getLogger('analysis')
def clean_detect_response(response):
images = response.get('Labels', [])
return [{'Name': i['Name'], 'Confidence': Decimal(str(i['Confidence']))} for i in images]
def analyze_image(bucket_name, image):
# TODO handle more image types
# TODO remove params from image urls
# if image['ImageUrl'].endswith(".png"):
# logger.error(f"Failed to analyze, Invalid image format: {image['ImageUrl']}")
# return []
logger.error(f"analyzing image: {image}")
rekognition = boto3.client('rekognition')
image_url = image['S3Url']
try:
response = rekognition.detect_labels(
Image={
'S3Object': {
'Bucket': bucket_name,
'Name': image_url,
},
},
MaxLabels=10,
)
# except botocore.errorfactory.InvalidImageFormatException as e:
except Exception as e:
logger.error(f"Failed to analyze, Invalid image format: {image['ImageUrl']}")
return []
return clean_detect_response(response)
# if __name__ == "__main__":
# bucket_name = os.getenv('GBIMAGECLASSIFIER_BUCKET', "gbimageclassifier_noenv")
# image = {'ImageUrl': 'http://garybake.com/images/gameboy/gameboy.jpg', 'S3Url': 'fec9ae39.garybake.com/images_rl_mario-learning.jpg'}
# labels = analyze_image_mock(bucket_name, image)
# print(labels)
| [
"logging.getLogger",
"boto3.client"
] | [((93, 122), 'logging.getLogger', 'logging.getLogger', (['"""analysis"""'], {}), "('analysis')\n", (110, 122), False, 'import logging\n'), ((633, 660), 'boto3.client', 'boto3.client', (['"""rekognition"""'], {}), "('rekognition')\n", (645, 660), False, 'import boto3\n')] |
# -*- coding: utf-8 -*-
"""BehaveX - Agile test wrapper on top of Behave (BDD)."""
# pylint: disable=W0703
# __future__ has been added to maintain compatibility
from __future__ import absolute_import, print_function
import codecs
import json
import logging.config
import multiprocessing
import os
import os.path
import platform
import re
import signal
import sys
import time
import traceback
from operator import itemgetter
from tempfile import gettempdir
from behave import __main__ as behave_script
# noinspection PyUnresolvedReferences
import behavex.outputs.report_json
from behavex import conf_mgr
from behavex.arguments import BEHAVE_ARGS, BEHAVEX_ARGS, parse_arguments
from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env
from behavex.environment import extend_behave_hooks
from behavex.execution_singleton import ExecutionSingleton
from behavex.global_vars import global_vars
from behavex.outputs import report_xml
from behavex.outputs.report_utils import (
get_overall_status,
match_for_execution,
pretty_print_time,
text,
try_operate_descriptor,
)
from behavex.utils import (
IncludeNameMatch,
IncludePathsMatch,
MatchInclude,
check_environment_file,
cleanup_folders,
configure_logging,
copy_bootstrap_html_generator,
create_partial_function_append,
explore_features,
generate_reports,
get_json_results,
get_logging_level,
join_feature_reports,
join_scenario_reports,
len_scenarios,
print_env_variables,
print_parallel,
set_behave_tags,
set_env_variable,
set_environ_config,
set_system_paths,
)
EXIT_OK = 0
EXIT_ERROR = 1
EXECUTION_BLOCKED_MSG = (
'Some of the folders or files are being used by another '
'program. Please, close them and try again...'
)
os.environ.setdefault('EXECUTION_CODE', '1')
match_include = None
include_path_match = None
include_name_match = None
scenario_lines = {}
def main():
"""BehaveX starting point."""
args = sys.argv[1:]
exit_code = run(args)
exit(exit_code)
def run(args):
global match_include
global include_path_match
global include_name_match
args_parsed = parse_arguments(args)
if not os.path.isdir(os.environ.get('FEATURES_PATH')):
print('\n"features" folder was not found in current path...')
exit()
set_environ_config(args_parsed)
ConfigRun().set_args(args_parsed)
_set_env_variables(args_parsed)
execution_code = setup_running_failures(args_parsed)
if execution_code == EXIT_ERROR:
return EXIT_ERROR
set_system_paths()
cleanup_folders()
copy_bootstrap_html_generator()
configure_logging(args_parsed)
check_environment_file()
match_include = MatchInclude()
include_path_match = IncludePathsMatch()
include_name_match = IncludeNameMatch()
return launch_behavex()
def setup_running_failures(args_parsed):
if args_parsed.run_failures:
set_env_variable('RUN_FAILURES', args_parsed.run_failures)
failures_path = os.path.join(
get_env('OUTPUT'), global_vars.report_filenames['report_failures']
)
if not os.path.exists(failures_path):
print('\nThere are no failing test scenarios to run.')
return EXIT_ERROR
with open(failures_path, 'r') as failures_file:
content = failures_file.read()
if not content:
print('\nThere are no failing test scenarios to run.')
return EXIT_ERROR
set_env_variable('INCLUDE_PATHS', content.split())
return EXIT_OK
def init_multiprocessing():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def launch_behavex():
"""Launch the BehaveX test execution in the specified parallel mode."""
json_reports = None
execution_codes = None
time_init = time.time()
features_path = os.environ.get('FEATURES_PATH')
parallel_scheme = get_param('parallel_scheme')
parallel_processes = get_param('parallel_processes')
multiprocess = (
True
if get_param('parallel_processes') > 1 and not get_param('dry_run')
else False
)
if not multiprocess:
parallel_scheme = ''
set_behave_tags()
scenario = False
notify_missing_features()
features_list = explore_features(features_path)
create_scenario_line_references(features_list)
process_pool = multiprocessing.Pool(parallel_processes, init_multiprocessing)
try:
if parallel_processes == 1 or get_param('dry_run'):
# when it is not multiprocess
if get_param('dry_run'):
print('Obtaining information about the reporting scope...')
execution_codes, json_reports = execute_tests(
[True], multiprocess=False, config=ConfigRun()
)
elif parallel_scheme == 'scenario':
execution_codes, json_reports = launch_by_scenario(
features_list, process_pool
)
scenario = True
elif parallel_scheme == 'feature':
execution_codes, json_reports = launch_by_feature(
features_list, process_pool
)
wrap_up_process_pools(process_pool, json_reports, multiprocess, scenario)
time_end = time.time()
if get_param('dry_run'):
msg = '\nDry run completed. Please, see the report in {0}' ' folder.\n\n'
print(msg.format(get_env('OUTPUT')))
if multiprocess:
print_parallel(
'\nTotal execution time: {}'.format(
pretty_print_time(time_end - time_init)
),
no_chain=True,
)
remove_temporary_files(parallel_processes)
results = get_json_results()
failing_non_muted_tests = False
if results:
failures = {}
for feature in results['features']:
if feature['status'] == 'failed':
filename = feature['filename']
failures[filename] = []
else:
continue
for scenario in feature['scenarios']:
if scenario['status'] == 'failed':
failures[filename].append(scenario['name'])
if 'MUTE' not in scenario['tags']:
failing_non_muted_tests = True
if failures:
failures_file_path = os.path.join(
get_env('OUTPUT'), global_vars.report_filenames['report_failures']
)
with open(failures_file_path, 'w') as failures_file:
parameters = create_test_list(failures)
failures_file.write(parameters)
# Calculates final exit code. execution_codes is 1 only if an execution exception arises
if isinstance(execution_codes, list):
execution_exception = True if sum(execution_codes) > 0 else False
else:
execution_exception = True if execution_codes > 0 else False
exit_code = (
EXIT_ERROR if execution_exception or failing_non_muted_tests else EXIT_OK
)
except KeyboardInterrupt:
print('Caught KeyboardInterrupt, terminating workers')
process_pool.terminate()
process_pool.join()
exit_code = 1
print('Exit code: {}'.format(exit_code))
return exit_code
def notify_missing_features():
include_paths = get_env('include_paths', [])
for path in include_paths:
include_path = path.partition(':')[0]
if not os.path.exists(os.path.normpath(include_path)):
print_parallel('path.not_found', os.path.realpath(include_path))
def create_test_list(test_list):
paths = []
sce_lines = get_env('scenario_lines')
for feature, scenarios in test_list.items():
for scenario_name in scenarios:
paths.append('{}:{}'.format(feature, sce_lines[feature][scenario_name]))
return ' '.join(paths)
def create_scenario_line_references(features):
sce_lines = {}
for feature in features:
sce_lines[text(feature.filename)] = {}
feature_lines = sce_lines[text(feature.filename)]
for scenario in feature.scenarios:
if scenario.keyword == u'Scenario':
feature_lines[scenario.name] = scenario.line
else:
for scenario_multiline in scenario.scenarios:
feature_lines[scenario_multiline.name] = scenario_multiline.line
set_env('scenario_lines', sce_lines)
def launch_by_feature(features, process_pool):
json_reports = []
execution_codes = []
serial = [feature.filename for feature in features if 'SERIAL' in feature.tags]
features_dict = {
feature.filename: feature.name
for feature in features
if feature.filename not in serial
}
if serial:
print_parallel('feature.serial_execution')
execution_code, map_json = execute_tests(serial, config=ConfigRun())
json_reports += [map_json]
execution_codes.append(execution_code)
print_parallel('feature.running_parallels')
for filename, _scenario_name in features_dict.items():
process_pool.apply_async(
execute_tests,
([filename], None, True, ConfigRun()),
callback=create_partial_function_append(execution_codes, json_reports),
)
return execution_codes, json_reports
def launch_by_scenario(features, process_pool):
serial_scenarios = []
json_reports = []
filenames = []
execution_codes = []
duplicated_scenarios = []
for feature in features:
for scenario in feature.scenarios:
# noinspection PyCallingNonCallable
if include_path_match(
feature.filename, scenario.line
) and include_name_match(scenario.name):
scenario.tags += feature.tags
if 'SERIAL' in scenario.tags:
scenario_tuple = (feature.filename, scenario.name)
if scenario_tuple in serial_scenarios:
duplicated_scenarios.append(scenario.name)
serial_scenarios.append(scenario_tuple)
else:
scenario_tuple = ([feature.filename], scenario.name)
if scenario_tuple in filenames:
duplicated_scenarios.append(scenario.name)
filenames.append(scenario_tuple)
if duplicated_scenarios:
print_parallel(
'scenario.duplicated_scenarios', json.dumps(duplicated_scenarios, indent=4)
)
exit()
if serial_scenarios:
print_parallel('scenario.serial_execution')
json_serial_reports = [
execute_tests([feature], scenario, config=ConfigRun())
for feature, scenario in serial_scenarios
]
# execution_codes and json_reports are forced to be lists now.
execution_codes += list(map(itemgetter(0), json_serial_reports))
json_reports += list(map(itemgetter(1), json_serial_reports))
print_parallel('scenario.running_parallels')
for filename, scenario_name in filenames:
process_pool.apply_async(
execute_tests,
(filename, scenario_name, True, ConfigRun()),
callback=create_partial_function_append(execution_codes, json_reports),
)
return execution_codes, json_reports
def execute_tests(list_features, scenario=None, multiprocess=True, config=None):
args = None
json_reports = []
paths = config.get_env('include_paths', [])
execution_codes, generate_report = [], False
if multiprocess:
ExecutionSingleton._instances[ConfigRun] = config
extend_behave_hooks()
for feature in list_features:
try:
args = _set_behave_arguments(multiprocess, feature, scenario, paths, config)
except Exception as exception:
traceback.print_exc()
print(exception)
execution_codes, generate_report = _launch_behave(args)
if generate_report:
json_output = dump_json_results()
else:
json_output = {'environment': [], 'features': [], 'steps_definition': []}
if scenario:
json_output['features'] = filter_feature_executed(
json_output, text(list_features[0]), scenario
)
try:
processing_xml_feature(json_output, scenario)
except Exception as ex:
logging.exception(ex)
json_reports.append(json_output)
return execution_codes, join_feature_reports(json_reports)
def filter_feature_executed(json_output, filename, scenario_name):
for feature in json_output.get('features', '')[:]:
if feature.get('filename', '') == filename:
mapping_scenarios = []
for scenario in feature['scenarios']:
if scenario_name_matching(scenario_name, scenario['name']):
mapping_scenarios.append(scenario)
feature['scenarios'] = mapping_scenarios
return [feature]
def _launch_behave(args):
# Save tags configuration to report only selected scenarios
# Check for tags in config file
generate_report = True
execution_code = 0
try:
behave_script.main(args)
except KeyboardInterrupt:
execution_code = 1
generate_report = False
except Exception as ex:
execution_code = 1
generate_report = True
logging.exception('Unexpected error executing behave steps: ')
logging.exception(ex)
traceback.print_exc()
return execution_code, generate_report
def wrap_up_process_pools(process_pool, json_reports, multi_process, scenario=False):
merged_json = None
output = os.path.join(get_env('OUTPUT'))
try:
if multi_process:
process_pool.close()
process_pool.join()
if scenario:
json_reports = join_scenario_reports(json_reports)
merged_json = join_feature_reports(json_reports)
else:
merged_json = json_reports
except KeyboardInterrupt:
process_pool.terminate()
process_pool.join()
status_info = os.path.join(output, global_vars.report_filenames['report_overall'])
with open(status_info, 'w') as file_info:
over_status = {'status': get_overall_status(merged_json)}
file_info.write(json.dumps(over_status))
path_info = os.path.join(output, global_vars.report_filenames['report_json'])
if get_env('include_paths'):
filter_by_paths(merged_json)
with open(path_info, 'w') as file_info:
file_info.write(json.dumps(merged_json))
if get_param('dry_run'):
print('Generating outputs...')
generate_reports(merged_json)
def filter_by_paths(merged_json_reports):
sce_lines = get_env('scenario_lines')
if not sce_lines:
return
for feature in merged_json_reports['features']:
filters = []
for index, scenario in enumerate(feature['scenarios'][:]):
line = sce_lines[feature['filename']][scenario['name']]
if (
(
IncludePathsMatch()(feature['filename'], line)
and MatchInclude()(feature['filename'])
)
and match_for_execution(scenario['tags'])
and IncludeNameMatch()(scenario['name'])
):
filters.append(index)
feature['scenarios'] = [
scenario
for index, scenario in enumerate(feature['scenarios'])
if index in filters
]
merged_json_reports['features'] = [
feature
for feature in merged_json_reports['features']
if feature['scenarios']
]
def remove_temporary_files(parallel_processes):
path_info = os.path.join(
os.path.join(get_env('OUTPUT'), global_vars.report_filenames['report_json'])
)
if os.path.exists(path_info):
with open(path_info, 'r') as json_file:
results_json = json.load(json_file)
if 'features' and results_json['features']:
return
for i in range(parallel_processes):
result_temp = os.path.join(gettempdir(), 'result{}.tmp'.format(i + 1))
if os.path.exists(result_temp):
try:
os.remove(result_temp)
except Exception as remove_ex:
print(remove_ex)
path_stdout = os.path.join(gettempdir(), 'stdout{}.txt'.format(i + 1))
if os.path.exists(path_stdout):
try:
os.chmod(path_stdout, 511) # nosec
os.remove(path_stdout)
except Exception as remove_ex:
print(remove_ex)
name = multiprocessing.current_process().name.split('-')[-1]
stdout_file = os.path.join(gettempdir(), 'std{}2.txt'.format(name))
logger = logging.getLogger()
logger.propagate = False
for handler in logging.root.handlers:
logger.removeHandler(handler)
for handler in logger.handlers:
logger.removeHandler(handler)
logging._handlers = []
console_log = logging.StreamHandler(sys.stdout)
console_log.setLevel(get_logging_level())
logger.addHandler(console_log)
if os.path.exists(stdout_file):
os.chmod(stdout_file, 511) # nosec
if not os.access(stdout_file, os.W_OK):
os.remove(stdout_file)
def processing_xml_feature(json_output, scenario):
if json_output['features'] and 'scenarios' in json_output['features'][0]:
reported_scenarios = json_output['features'][0]['scenarios']
scenario_executed = []
for reported_scenario in reported_scenarios:
reported_name = reported_scenario['name']
if reported_name == scenario or ('@' in reported_name and scenario_name_matching(scenario, reported_name)):
scenario_executed.append(reported_scenario)
json_output['features'][0]['scenarios'] = scenario_executed
feature_name = os.path.join(
get_env('OUTPUT'), u'{}.tmp'.format(json_output['features'][0]['name'])
)
feature_old = json_output['features'][0]
feature_old['scenarios'] = scenario_executed
if os.path.exists(feature_name):
for _ in range(0, 10):
try:
feature_old = json.load(open(feature_name, 'r'))
with open(feature_name, 'w') as feature_file:
for scen in scenario_executed:
feature_old['scenarios'].append(scen)
json.dump(feature_old, feature_file)
break
except Exception as ex:
logging.debug(ex)
logging.debug('Retrying reading from {}'.format(feature_name))
time.sleep(1)
else:
with codecs.open(feature_name, 'w', 'utf8') as feature_file:
# feature_old['scenarios'] = scenarios_old
json.dump(feature_old, feature_file)
# We calculate the quantity of the scenario that should executing
scenarios_total = len_scenarios(feature_old['filename'])
if len(feature_old['scenarios']) == scenarios_total:
try:
report_xml.export_feature_to_xml(feature_old, False)
except Exception as ex:
traceback.print_exc()
print(ex)
finally:
path_tmp = u'{}.tmp'.format(feature_name[:-4])
os.remove(path_tmp)
def _set_env_variables(args):
output_folder = os.path.normpath(get_env('output'))
if os.path.isabs(output_folder):
set_env_variable('OUTPUT', output_folder)
else:
set_env_variable('OUTPUT', os.path.abspath(output_folder))
_store_tags_to_env_variable(args.tags)
if get_param('include_paths'):
set_env_variable('INCLUDE_PATHS', get_param('include_paths'))
if get_param('include'):
if platform.system() == 'Windows':
set_env_variable(
'INCLUDE', json.dumps(get_param('include').replace('/', '\\'))
)
else:
set_env_variable('INCLUDE', get_param('include'))
if get_param('include'):
set_env_variable('INCLUDE', json.loads(get_param('include')))
if get_param('name'):
set_env_variable('NAME', args.name)
for arg in BEHAVEX_ARGS[4:]:
set_env_variable(arg.upper(), get_param(arg))
set_env_variable('TEMP', os.path.join(get_env('output'), 'temp'))
set_env_variable('LOGS', os.path.join(get_env('output'), 'outputs', 'logs'))
if get_param('logging_level'):
set_env_variable('logging_level', get_param('logging_level'))
if platform.system() == 'Windows':
set_env_variable('HOME', os.path.abspath('.\\'))
set_env_variable('DRY_RUN', get_param('dry_run'))
print_env_variables(
[
'HOME',
'CONFIG',
'OUTPUT',
'TAGS',
'PARALLEL_SCHEME',
'PARALLEL_PROCESSES',
'TEMP',
'LOGS',
'LOGGING_LEVEL',
]
)
def _store_tags_to_env_variable(tags):
config = conf_mgr.get_config()
tags_skip = config['test_run']['tags_to_skip']
if isinstance(tags_skip, str) and tags_skip:
tags_skip = [tags_skip]
else:
tags_skip = tags_skip
tags = tags if tags is not None else []
tags_skip = [tag for tag in tags_skip if tag not in tags]
tags = tags + ['~@{0}'.format(tag) for tag in tags_skip] if tags else []
if tags:
for tag in tags:
if get_env('TAGS'):
set_env_variable('TAGS', get_env('tags') + ';' + tag)
else:
set_env_variable('TAGS', tag)
else:
set_env_variable('TAGS', '')
def _set_behave_arguments(
multiprocess, feature=None, scenario=None, paths=None, config=None
):
arguments = []
output_folder = config.get_env('OUTPUT')
if multiprocess:
arguments.append(feature)
arguments.append('--no-summary')
if scenario:
outline_examples_in_name = re.findall('<\\S*>', scenario)
scenario_outline_compatible = '{}(.?--.?@\\d*.\\d*\\s*)?$'.format(re.escape(scenario))
for example_name in outline_examples_in_name:
scenario_outline_compatible = scenario_outline_compatible.replace(example_name, "\\S*")
arguments.append('--name')
arguments.append(scenario_outline_compatible)
name = multiprocessing.current_process().name.split('-')[-1]
arguments.append('--outfile')
arguments.append(os.path.join(gettempdir(), 'stdout{}.txt'.format(name)))
else:
set_paths_argument(arguments, paths)
if get_param('dry_run'):
arguments.append('--no-summary')
else:
arguments.append('--summary')
arguments.append('--junit-directory')
arguments.append(output_folder)
arguments.append('--outfile')
arguments.append(os.path.join(output_folder, 'behave', 'behave.log'))
arguments.append('--no-skipped')
arguments.append('--no-junit')
run_wip_tests = False
if get_env('tags'):
tags = get_env('tags').split(';')
for tag in tags:
arguments.append('--tags')
arguments.append(tag)
if tag.upper() in ['WIP', '@WIP']:
run_wip_tests = True
if not run_wip_tests:
arguments.append('--tags')
arguments.append('~@WIP')
arguments.append('--tags')
arguments.append('~@MANUAL')
args_sys = config.args
set_args_captures(arguments, args_sys)
if args_sys.no_snippets:
arguments.append('--no-snippets')
for arg in BEHAVE_ARGS:
value_arg = getattr(args_sys, arg) if hasattr(args_sys, arg) else False
if arg == 'include':
if multiprocess or not value_arg:
continue
else:
features_path = os.path.abspath(os.environ['FEATURES_PATH'])
value_arg = value_arg.replace(features_path, 'features').replace(
'\\', '\\\\'
)
if arg == 'define' and value_arg:
for key_value in value_arg:
arguments.append('--define')
arguments.append(key_value)
if value_arg and arg not in BEHAVEX_ARGS and arg != 'define':
arguments.append('--{}'.format(arg.replace('_', '-')))
if value_arg and not isinstance(value_arg, bool):
arguments.append(value_arg)
if arguments == 'logging_level':
set_env_variable(arg, value_arg)
else:
os.environ[arg] = str(value_arg)
return arguments
def set_args_captures(args, args_sys):
for default_arg in ['capture', 'capture_stderr', 'logcapture']:
if not getattr(args_sys, 'no_{}'.format(default_arg)):
args.append('--no-{}'.format(default_arg.replace('_', '-')))
def set_paths_argument(args, paths):
if paths:
for path in paths:
args.append(os.path.realpath(path))
def scenario_name_matching(abstract_scenario_name, scenario_name):
outline_examples_in_name = re.findall('<\\S*>', abstract_scenario_name)
scenario_outline_compatible = '{}(.--.@\\d+.\\d+)?'.format(re.escape(abstract_scenario_name))
for example_name in outline_examples_in_name:
scenario_outline_compatible = scenario_outline_compatible.replace(example_name, "\\S*")
pattern = re.compile(scenario_outline_compatible)
return pattern.match(scenario_name)
def dump_json_results():
""" Do reporting. """
if multiprocessing.current_process().name == 'MainProcess':
path_info = os.path.join(
os.path.abspath(get_env('OUTPUT')),
global_vars.report_filenames['report_json'],
)
else:
process_name = multiprocessing.current_process().name.split('-')[-1]
path_info = os.path.join(gettempdir(), 'result{}.tmp'.format(process_name))
def _load_json():
"""this function load from file"""
with open(path_info, 'r') as info_file:
json_output_file = info_file.read()
json_output_converted = json.loads(json_output_file)
return json_output_converted
json_output = {'environment': '', 'features': [], 'steps_definition': ''}
if os.path.exists(path_info):
json_output = try_operate_descriptor(path_info, _load_json, return_value=True)
return json_output
if __name__ == '__main__':
main()
| [
"behavex.environment.extend_behave_hooks",
"re.escape",
"behavex.outputs.report_utils.text",
"behavex.utils.get_logging_level",
"re.compile",
"behavex.utils.set_behave_tags",
"behavex.utils.set_system_paths",
"behavex.utils.join_feature_reports",
"time.sleep",
"behavex.conf_mgr.get_config",
"beh... | [((1800, 1844), 'os.environ.setdefault', 'os.environ.setdefault', (['"""EXECUTION_CODE"""', '"""1"""'], {}), "('EXECUTION_CODE', '1')\n", (1821, 1844), False, 'import os\n'), ((2176, 2197), 'behavex.arguments.parse_arguments', 'parse_arguments', (['args'], {}), '(args)\n', (2191, 2197), False, 'from behavex.arguments import BEHAVE_ARGS, BEHAVEX_ARGS, parse_arguments\n'), ((2346, 2377), 'behavex.utils.set_environ_config', 'set_environ_config', (['args_parsed'], {}), '(args_parsed)\n', (2364, 2377), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((2576, 2594), 'behavex.utils.set_system_paths', 'set_system_paths', ([], {}), '()\n', (2592, 2594), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((2600, 2617), 'behavex.utils.cleanup_folders', 'cleanup_folders', ([], {}), '()\n', (2615, 2617), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((2622, 2653), 'behavex.utils.copy_bootstrap_html_generator', 'copy_bootstrap_html_generator', ([], {}), '()\n', (2651, 2653), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((2658, 2688), 'behavex.utils.configure_logging', 'configure_logging', (['args_parsed'], {}), '(args_parsed)\n', (2675, 2688), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((2693, 2717), 'behavex.utils.check_environment_file', 'check_environment_file', ([], {}), '()\n', (2715, 2717), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((2738, 2752), 'behavex.utils.MatchInclude', 'MatchInclude', ([], {}), '()\n', (2750, 2752), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((2778, 2797), 'behavex.utils.IncludePathsMatch', 'IncludePathsMatch', ([], {}), '()\n', (2795, 2797), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((2823, 2841), 'behavex.utils.IncludeNameMatch', 'IncludeNameMatch', ([], {}), '()\n', (2839, 2841), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((3641, 3685), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal.SIG_IGN'], {}), '(signal.SIGINT, signal.SIG_IGN)\n', (3654, 3685), False, 'import signal\n'), ((3853, 3864), 'time.time', 'time.time', ([], {}), '()\n', (3862, 3864), False, 'import time\n'), ((3885, 3916), 'os.environ.get', 'os.environ.get', (['"""FEATURES_PATH"""'], {}), "('FEATURES_PATH')\n", (3899, 3916), False, 'import os\n'), ((3939, 3967), 'behavex.conf_mgr.get_param', 'get_param', (['"""parallel_scheme"""'], {}), "('parallel_scheme')\n", (3948, 3967), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((3993, 4024), 'behavex.conf_mgr.get_param', 'get_param', (['"""parallel_processes"""'], {}), "('parallel_processes')\n", (4002, 4024), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((4218, 4235), 'behavex.utils.set_behave_tags', 'set_behave_tags', ([], {}), '()\n', (4233, 4235), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((4307, 4338), 'behavex.utils.explore_features', 'explore_features', (['features_path'], {}), '(features_path)\n', (4323, 4338), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((4409, 4471), 'multiprocessing.Pool', 'multiprocessing.Pool', (['parallel_processes', 'init_multiprocessing'], {}), '(parallel_processes, init_multiprocessing)\n', (4429, 4471), False, 'import multiprocessing\n'), ((7500, 7528), 'behavex.conf_mgr.get_env', 'get_env', (['"""include_paths"""', '[]'], {}), "('include_paths', [])\n", (7507, 7528), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((7812, 7837), 'behavex.conf_mgr.get_env', 'get_env', (['"""scenario_lines"""'], {}), "('scenario_lines')\n", (7819, 7837), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((8562, 8598), 'behavex.conf_mgr.set_env', 'set_env', (['"""scenario_lines"""', 'sce_lines'], {}), "('scenario_lines', sce_lines)\n", (8569, 8598), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((9151, 9194), 'behavex.utils.print_parallel', 'print_parallel', (['"""feature.running_parallels"""'], {}), "('feature.running_parallels')\n", (9165, 9194), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((11173, 11217), 'behavex.utils.print_parallel', 'print_parallel', (['"""scenario.running_parallels"""'], {}), "('scenario.running_parallels')\n", (11187, 11217), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((11820, 11841), 'behavex.environment.extend_behave_hooks', 'extend_behave_hooks', ([], {}), '()\n', (11839, 11841), False, 'from behavex.environment import extend_behave_hooks\n'), ((14349, 14417), 'os.path.join', 'os.path.join', (['output', "global_vars.report_filenames['report_overall']"], {}), "(output, global_vars.report_filenames['report_overall'])\n", (14361, 14417), False, 'import os\n'), ((14596, 14661), 'os.path.join', 'os.path.join', (['output', "global_vars.report_filenames['report_json']"], {}), "(output, global_vars.report_filenames['report_json'])\n", (14608, 14661), False, 'import os\n'), ((14669, 14693), 'behavex.conf_mgr.get_env', 'get_env', (['"""include_paths"""'], {}), "('include_paths')\n", (14676, 14693), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((14832, 14852), 'behavex.conf_mgr.get_param', 'get_param', (['"""dry_run"""'], {}), "('dry_run')\n", (14841, 14852), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((14897, 14926), 'behavex.utils.generate_reports', 'generate_reports', (['merged_json'], {}), '(merged_json)\n', (14913, 14926), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((14987, 15012), 'behavex.conf_mgr.get_env', 'get_env', (['"""scenario_lines"""'], {}), "('scenario_lines')\n", (14994, 15012), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((16116, 16141), 'os.path.exists', 'os.path.exists', (['path_info'], {}), '(path_info)\n', (16130, 16141), False, 'import os\n'), ((17435, 17462), 'os.path.exists', 'os.path.exists', (['stdout_file'], {}), '(stdout_file)\n', (17449, 17462), False, 'import os\n'), ((19847, 19875), 'os.path.isabs', 'os.path.isabs', (['output_folder'], {}), '(output_folder)\n', (19860, 19875), False, 'import os\n'), ((20055, 20081), 'behavex.conf_mgr.get_param', 'get_param', (['"""include_paths"""'], {}), "('include_paths')\n", (20064, 20081), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((20160, 20180), 'behavex.conf_mgr.get_param', 'get_param', (['"""include"""'], {}), "('include')\n", (20169, 20180), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((20431, 20451), 'behavex.conf_mgr.get_param', 'get_param', (['"""include"""'], {}), "('include')\n", (20440, 20451), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((20530, 20547), 'behavex.conf_mgr.get_param', 'get_param', (['"""name"""'], {}), "('name')\n", (20539, 20547), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((20839, 20865), 'behavex.conf_mgr.get_param', 'get_param', (['"""logging_level"""'], {}), "('logging_level')\n", (20848, 20865), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((21091, 21226), 'behavex.utils.print_env_variables', 'print_env_variables', (["['HOME', 'CONFIG', 'OUTPUT', 'TAGS', 'PARALLEL_SCHEME',\n 'PARALLEL_PROCESSES', 'TEMP', 'LOGS', 'LOGGING_LEVEL']"], {}), "(['HOME', 'CONFIG', 'OUTPUT', 'TAGS', 'PARALLEL_SCHEME',\n 'PARALLEL_PROCESSES', 'TEMP', 'LOGS', 'LOGGING_LEVEL'])\n", (21110, 21226), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((21410, 21431), 'behavex.conf_mgr.get_config', 'conf_mgr.get_config', ([], {}), '()\n', (21429, 21431), False, 'from behavex import conf_mgr\n'), ((23435, 23450), 'behavex.conf_mgr.get_env', 'get_env', (['"""tags"""'], {}), "('tags')\n", (23442, 23450), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((25497, 25541), 're.findall', 're.findall', (['"""<\\\\S*>"""', 'abstract_scenario_name'], {}), "('<\\\\S*>', abstract_scenario_name)\n", (25507, 25541), False, 'import re\n'), ((25800, 25839), 're.compile', 're.compile', (['scenario_outline_compatible'], {}), '(scenario_outline_compatible)\n', (25810, 25839), False, 'import re\n'), ((26667, 26692), 'os.path.exists', 'os.path.exists', (['path_info'], {}), '(path_info)\n', (26681, 26692), False, 'import os\n'), ((2955, 3013), 'behavex.utils.set_env_variable', 'set_env_variable', (['"""RUN_FAILURES"""', 'args_parsed.run_failures'], {}), "('RUN_FAILURES', args_parsed.run_failures)\n", (2971, 3013), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((5291, 5302), 'time.time', 'time.time', ([], {}), '()\n', (5300, 5302), False, 'import time\n'), ((5315, 5335), 'behavex.conf_mgr.get_param', 'get_param', (['"""dry_run"""'], {}), "('dry_run')\n", (5324, 5335), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((5773, 5791), 'behavex.utils.get_json_results', 'get_json_results', ([], {}), '()\n', (5789, 5791), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((8944, 8986), 'behavex.utils.print_parallel', 'print_parallel', (['"""feature.serial_execution"""'], {}), "('feature.serial_execution')\n", (8958, 8986), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((10747, 10790), 'behavex.utils.print_parallel', 'print_parallel', (['"""scenario.serial_execution"""'], {}), "('scenario.serial_execution')\n", (10761, 10790), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((12700, 12734), 'behavex.utils.join_feature_reports', 'join_feature_reports', (['json_reports'], {}), '(json_reports)\n', (12720, 12734), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((13404, 13428), 'behave.__main__.main', 'behave_script.main', (['args'], {}), '(args)\n', (13422, 13428), True, 'from behave import __main__ as behave_script\n'), ((13915, 13932), 'behavex.conf_mgr.get_env', 'get_env', (['"""OUTPUT"""'], {}), "('OUTPUT')\n", (13922, 13932), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((16449, 16476), 'os.path.exists', 'os.path.exists', (['result_temp'], {}), '(result_temp)\n', (16463, 16476), False, 'import os\n'), ((16701, 16728), 'os.path.exists', 'os.path.exists', (['path_stdout'], {}), '(path_stdout)\n', (16715, 16728), False, 'import os\n'), ((17011, 17023), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (17021, 17023), False, 'from tempfile import gettempdir\n'), ((17372, 17391), 'behavex.utils.get_logging_level', 'get_logging_level', ([], {}), '()\n', (17389, 17391), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((17472, 17498), 'os.chmod', 'os.chmod', (['stdout_file', '(511)'], {}), '(stdout_file, 511)\n', (17480, 17498), False, 'import os\n'), ((18423, 18451), 'os.path.exists', 'os.path.exists', (['feature_name'], {}), '(feature_name)\n', (18437, 18451), False, 'import os\n'), ((19346, 19384), 'behavex.utils.len_scenarios', 'len_scenarios', (["feature_old['filename']"], {}), "(feature_old['filename'])\n", (19359, 19384), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((19821, 19838), 'behavex.conf_mgr.get_env', 'get_env', (['"""output"""'], {}), "('output')\n", (19828, 19838), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((19885, 19926), 'behavex.utils.set_env_variable', 'set_env_variable', (['"""OUTPUT"""', 'output_folder'], {}), "('OUTPUT', output_folder)\n", (19901, 19926), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((20557, 20592), 'behavex.utils.set_env_variable', 'set_env_variable', (['"""NAME"""', 'args.name'], {}), "('NAME', args.name)\n", (20573, 20592), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((20944, 20961), 'platform.system', 'platform.system', ([], {}), '()\n', (20959, 20961), False, 'import platform\n'), ((21065, 21085), 'behavex.conf_mgr.get_param', 'get_param', (['"""dry_run"""'], {}), "('dry_run')\n", (21074, 21085), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((22009, 22037), 'behavex.utils.set_env_variable', 'set_env_variable', (['"""TAGS"""', '""""""'], {}), "('TAGS', '')\n", (22025, 22037), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((23005, 23025), 'behavex.conf_mgr.get_param', 'get_param', (['"""dry_run"""'], {}), "('dry_run')\n", (23014, 23025), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((25605, 25638), 're.escape', 're.escape', (['abstract_scenario_name'], {}), '(abstract_scenario_name)\n', (25614, 25638), False, 'import re\n'), ((26716, 26780), 'behavex.outputs.report_utils.try_operate_descriptor', 'try_operate_descriptor', (['path_info', '_load_json'], {'return_value': '(True)'}), '(path_info, _load_json, return_value=True)\n', (26738, 26780), False, 'from behavex.outputs.report_utils import get_overall_status, match_for_execution, pretty_print_time, text, try_operate_descriptor\n'), ((2223, 2254), 'os.environ.get', 'os.environ.get', (['"""FEATURES_PATH"""'], {}), "('FEATURES_PATH')\n", (2237, 2254), False, 'import os\n'), ((2382, 2393), 'behavex.conf_mgr.ConfigRun', 'ConfigRun', ([], {}), '()\n', (2391, 2393), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((3064, 3081), 'behavex.conf_mgr.get_env', 'get_env', (['"""OUTPUT"""'], {}), "('OUTPUT')\n", (3071, 3081), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((3157, 3186), 'os.path.exists', 'os.path.exists', (['failures_path'], {}), '(failures_path)\n', (3171, 3186), False, 'import os\n'), ((4519, 4539), 'behavex.conf_mgr.get_param', 'get_param', (['"""dry_run"""'], {}), "('dry_run')\n", (4528, 4539), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((4598, 4618), 'behavex.conf_mgr.get_param', 'get_param', (['"""dry_run"""'], {}), "('dry_run')\n", (4607, 4618), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((8154, 8176), 'behavex.outputs.report_utils.text', 'text', (['feature.filename'], {}), '(feature.filename)\n', (8158, 8176), False, 'from behavex.outputs.report_utils import get_overall_status, match_for_execution, pretty_print_time, text, try_operate_descriptor\n'), ((8217, 8239), 'behavex.outputs.report_utils.text', 'text', (['feature.filename'], {}), '(feature.filename)\n', (8221, 8239), False, 'from behavex.outputs.report_utils import get_overall_status, match_for_execution, pretty_print_time, text, try_operate_descriptor\n'), ((10646, 10688), 'json.dumps', 'json.dumps', (['duplicated_scenarios'], {'indent': '(4)'}), '(duplicated_scenarios, indent=4)\n', (10656, 10688), False, 'import json\n'), ((13713, 13734), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (13732, 13734), False, 'import traceback\n'), ((14152, 14186), 'behavex.utils.join_feature_reports', 'join_feature_reports', (['json_reports'], {}), '(json_reports)\n', (14172, 14186), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((14498, 14529), 'behavex.outputs.report_utils.get_overall_status', 'get_overall_status', (['merged_json'], {}), '(merged_json)\n', (14516, 14529), False, 'from behavex.outputs.report_utils import get_overall_status, match_for_execution, pretty_print_time, text, try_operate_descriptor\n'), ((14555, 14578), 'json.dumps', 'json.dumps', (['over_status'], {}), '(over_status)\n', (14565, 14578), False, 'import json\n'), ((14800, 14823), 'json.dumps', 'json.dumps', (['merged_json'], {}), '(merged_json)\n', (14810, 14823), False, 'import json\n'), ((16039, 16056), 'behavex.conf_mgr.get_env', 'get_env', (['"""OUTPUT"""'], {}), "('OUTPUT')\n", (16046, 16056), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((16218, 16238), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (16227, 16238), False, 'import json\n'), ((16394, 16406), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (16404, 16406), False, 'from tempfile import gettempdir\n'), ((16646, 16658), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (16656, 16658), False, 'from tempfile import gettempdir\n'), ((17523, 17554), 'os.access', 'os.access', (['stdout_file', 'os.W_OK'], {}), '(stdout_file, os.W_OK)\n', (17532, 17554), False, 'import os\n'), ((17568, 17590), 'os.remove', 'os.remove', (['stdout_file'], {}), '(stdout_file)\n', (17577, 17590), False, 'import os\n'), ((18228, 18245), 'behavex.conf_mgr.get_env', 'get_env', (['"""OUTPUT"""'], {}), "('OUTPUT')\n", (18235, 18245), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((19972, 20002), 'os.path.abspath', 'os.path.abspath', (['output_folder'], {}), '(output_folder)\n', (19987, 20002), False, 'import os\n'), ((20125, 20151), 'behavex.conf_mgr.get_param', 'get_param', (['"""include_paths"""'], {}), "('include_paths')\n", (20134, 20151), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((20193, 20210), 'platform.system', 'platform.system', ([], {}), '()\n', (20208, 20210), False, 'import platform\n'), ((20664, 20678), 'behavex.conf_mgr.get_param', 'get_param', (['arg'], {}), '(arg)\n', (20673, 20678), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((20723, 20740), 'behavex.conf_mgr.get_env', 'get_env', (['"""output"""'], {}), "('output')\n", (20730, 20740), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((20793, 20810), 'behavex.conf_mgr.get_env', 'get_env', (['"""output"""'], {}), "('output')\n", (20800, 20810), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((20909, 20935), 'behavex.conf_mgr.get_param', 'get_param', (['"""logging_level"""'], {}), "('logging_level')\n", (20918, 20935), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((21009, 21031), 'os.path.abspath', 'os.path.abspath', (['""".\\\\"""'], {}), "('.\\\\')\n", (21024, 21031), False, 'import os\n'), ((21840, 21855), 'behavex.conf_mgr.get_env', 'get_env', (['"""TAGS"""'], {}), "('TAGS')\n", (21847, 21855), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((22361, 22391), 're.findall', 're.findall', (['"""<\\\\S*>"""', 'scenario'], {}), "('<\\\\S*>', scenario)\n", (22371, 22391), False, 'import re\n'), ((23277, 23328), 'os.path.join', 'os.path.join', (['output_folder', '"""behave"""', '"""behave.log"""'], {}), "(output_folder, 'behave', 'behave.log')\n", (23289, 23328), False, 'import os\n'), ((25940, 25973), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (25971, 25973), False, 'import multiprocessing\n'), ((26266, 26278), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (26276, 26278), False, 'from tempfile import gettempdir\n'), ((26515, 26543), 'json.loads', 'json.loads', (['json_output_file'], {}), '(json_output_file)\n', (26525, 26543), False, 'import json\n'), ((4070, 4101), 'behavex.conf_mgr.get_param', 'get_param', (['"""parallel_processes"""'], {}), "('parallel_processes')\n", (4079, 4101), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((4114, 4134), 'behavex.conf_mgr.get_param', 'get_param', (['"""dry_run"""'], {}), "('dry_run')\n", (4123, 4134), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((7636, 7666), 'os.path.normpath', 'os.path.normpath', (['include_path'], {}), '(include_path)\n', (7652, 7666), False, 'import os\n'), ((7714, 7744), 'os.path.realpath', 'os.path.realpath', (['include_path'], {}), '(include_path)\n', (7730, 7744), False, 'import os\n'), ((9051, 9062), 'behavex.conf_mgr.ConfigRun', 'ConfigRun', ([], {}), '()\n', (9060, 9062), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((9353, 9364), 'behavex.conf_mgr.ConfigRun', 'ConfigRun', ([], {}), '()\n', (9362, 9364), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((9388, 9449), 'behavex.utils.create_partial_function_append', 'create_partial_function_append', (['execution_codes', 'json_reports'], {}), '(execution_codes, json_reports)\n', (9418, 9449), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((11061, 11074), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (11071, 11074), False, 'from operator import itemgetter\n'), ((11131, 11144), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (11141, 11144), False, 'from operator import itemgetter\n'), ((11369, 11380), 'behavex.conf_mgr.ConfigRun', 'ConfigRun', ([], {}), '()\n', (11378, 11380), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((11404, 11465), 'behavex.utils.create_partial_function_append', 'create_partial_function_append', (['execution_codes', 'json_reports'], {}), '(execution_codes, json_reports)\n', (11434, 11465), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((12029, 12050), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (12048, 12050), False, 'import traceback\n'), ((12431, 12453), 'behavex.outputs.report_utils.text', 'text', (['list_features[0]'], {}), '(list_features[0])\n', (12435, 12453), False, 'from behavex.outputs.report_utils import get_overall_status, match_for_execution, pretty_print_time, text, try_operate_descriptor\n'), ((14090, 14125), 'behavex.utils.join_scenario_reports', 'join_scenario_reports', (['json_reports'], {}), '(json_reports)\n', (14111, 14125), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((15458, 15495), 'behavex.outputs.report_utils.match_for_execution', 'match_for_execution', (["scenario['tags']"], {}), "(scenario['tags'])\n", (15477, 15495), False, 'from behavex.outputs.report_utils import get_overall_status, match_for_execution, pretty_print_time, text, try_operate_descriptor\n'), ((16511, 16533), 'os.remove', 'os.remove', (['result_temp'], {}), '(result_temp)\n', (16520, 16533), False, 'import os\n'), ((16763, 16789), 'os.chmod', 'os.chmod', (['path_stdout', '(511)'], {}), '(path_stdout, 511)\n', (16771, 16789), False, 'import os\n'), ((16815, 16837), 'os.remove', 'os.remove', (['path_stdout'], {}), '(path_stdout)\n', (16824, 16837), False, 'import os\n'), ((19078, 19116), 'codecs.open', 'codecs.open', (['feature_name', '"""w"""', '"""utf8"""'], {}), "(feature_name, 'w', 'utf8')\n", (19089, 19116), False, 'import codecs\n'), ((19209, 19245), 'json.dump', 'json.dump', (['feature_old', 'feature_file'], {}), '(feature_old, feature_file)\n', (19218, 19245), False, 'import json\n'), ((19479, 19531), 'behavex.outputs.report_xml.export_feature_to_xml', 'report_xml.export_feature_to_xml', (['feature_old', '(False)'], {}), '(feature_old, False)\n', (19511, 19531), False, 'from behavex.outputs import report_xml\n'), ((19732, 19751), 'os.remove', 'os.remove', (['path_tmp'], {}), '(path_tmp)\n', (19741, 19751), False, 'import os\n'), ((20402, 20422), 'behavex.conf_mgr.get_param', 'get_param', (['"""include"""'], {}), "('include')\n", (20411, 20422), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((20500, 20520), 'behavex.conf_mgr.get_param', 'get_param', (['"""include"""'], {}), "('include')\n", (20509, 20520), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((21961, 21990), 'behavex.utils.set_env_variable', 'set_env_variable', (['"""TAGS"""', 'tag'], {}), "('TAGS', tag)\n", (21977, 21990), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((22470, 22489), 're.escape', 're.escape', (['scenario'], {}), '(scenario)\n', (22479, 22489), False, 'import re\n'), ((22895, 22907), 'tempfile.gettempdir', 'gettempdir', ([], {}), '()\n', (22905, 22907), False, 'from tempfile import gettempdir\n'), ((23467, 23482), 'behavex.conf_mgr.get_env', 'get_env', (['"""tags"""'], {}), "('tags')\n", (23474, 23482), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((24234, 24278), 'os.path.abspath', 'os.path.abspath', (["os.environ['FEATURES_PATH']"], {}), "(os.environ['FEATURES_PATH'])\n", (24249, 24278), False, 'import os\n'), ((25373, 25395), 'os.path.realpath', 'os.path.realpath', (['path'], {}), '(path)\n', (25389, 25395), False, 'import os\n'), ((26059, 26076), 'behavex.conf_mgr.get_env', 'get_env', (['"""OUTPUT"""'], {}), "('OUTPUT')\n", (26066, 26076), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((4806, 4817), 'behavex.conf_mgr.ConfigRun', 'ConfigRun', ([], {}), '()\n', (4815, 4817), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((5452, 5469), 'behavex.conf_mgr.get_env', 'get_env', (['"""OUTPUT"""'], {}), "('OUTPUT')\n", (5459, 5469), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((5598, 5637), 'behavex.outputs.report_utils.pretty_print_time', 'pretty_print_time', (['(time_end - time_init)'], {}), '(time_end - time_init)\n', (5615, 5637), False, 'from behavex.outputs.report_utils import get_overall_status, match_for_execution, pretty_print_time, text, try_operate_descriptor\n'), ((6513, 6530), 'behavex.conf_mgr.get_env', 'get_env', (['"""OUTPUT"""'], {}), "('OUTPUT')\n", (6520, 6530), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((10877, 10888), 'behavex.conf_mgr.ConfigRun', 'ConfigRun', ([], {}), '()\n', (10886, 10888), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((15516, 15534), 'behavex.utils.IncludeNameMatch', 'IncludeNameMatch', ([], {}), '()\n', (15532, 15534), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((16926, 16959), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (16957, 16959), False, 'import multiprocessing\n'), ((19584, 19605), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (19603, 19605), False, 'import traceback\n'), ((24895, 24927), 'behavex.utils.set_env_variable', 'set_env_variable', (['arg', 'value_arg'], {}), '(arg, value_arg)\n', (24911, 24927), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((15313, 15332), 'behavex.utils.IncludePathsMatch', 'IncludePathsMatch', ([], {}), '()\n', (15330, 15332), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((15384, 15398), 'behavex.utils.MatchInclude', 'MatchInclude', ([], {}), '()\n', (15396, 15398), False, 'from behavex.utils import IncludeNameMatch, IncludePathsMatch, MatchInclude, check_environment_file, cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_partial_function_append, explore_features, generate_reports, get_json_results, get_logging_level, join_feature_reports, join_scenario_reports, len_scenarios, print_env_variables, print_parallel, set_behave_tags, set_env_variable, set_environ_config, set_system_paths\n'), ((18789, 18825), 'json.dump', 'json.dump', (['feature_old', 'feature_file'], {}), '(feature_old, feature_file)\n', (18798, 18825), False, 'import json\n'), ((19033, 19046), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (19043, 19046), False, 'import time\n'), ((22765, 22798), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (22796, 22798), False, 'import multiprocessing\n'), ((26179, 26212), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (26210, 26212), False, 'import multiprocessing\n'), ((20293, 20313), 'behavex.conf_mgr.get_param', 'get_param', (['"""include"""'], {}), "('include')\n", (20302, 20313), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n'), ((21898, 21913), 'behavex.conf_mgr.get_env', 'get_env', (['"""tags"""'], {}), "('tags')\n", (21905, 21913), False, 'from behavex.conf_mgr import ConfigRun, get_env, get_param, set_env\n')] |
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
from DQM.SiPixelPhase1Common.HistogramManager_cfi import *
import DQM.SiPixelPhase1Common.TriggerEventFlag_cfi as trigger
SiPixelPhase1TrackEfficiencyValid = DefaultHistoTrack.clone(
name = "valid",
title = "Valid Hits",
range_min = 0, range_max = 50, range_nbins = 50,
xlabel = "valid hits",
dimensions = 0,
specs = VPSet(
StandardSpecifications1D_Num,
#StandardSpecification2DProfile_Num, #for this we have the on track clusters map (i.e the same thing)
Specification().groupBy("PXBarrel/PXLayer/Event") #this will produce inclusive counts per Layer/Disk
.reduce("COUNT")
.groupBy("PXBarrel/PXLayer")
.save(nbins=50, xmin=0, xmax=1500),
Specification().groupBy("PXForward/PXDisk/Event")
.reduce("COUNT")
.groupBy("PXForward/PXDisk/")
.save(nbins=50, xmin=0, xmax=1500),
)
)
SiPixelPhase1TrackEfficiencyInactive = DefaultHistoTrack.clone(
name = "inactive",
title = "Inactive Hits",
xlabel = "inactive hits",
range_min = 0, range_max = 25, range_nbins = 25,
dimensions = 0,
specs = VPSet(
StandardSpecification2DProfile_Num,
Specification().groupBy("PXBarrel/PXLayer/Event") #this will produce inclusive counts per Layer/Disk
.reduce("COUNT")
.groupBy("PXBarrel/PXLayer")
.save(nbins=50, xmin=0, xmax=100),
Specification().groupBy("PXForward/PXDisk/Event")
.reduce("COUNT")
.groupBy("PXForward/PXDisk/")
.save(nbins=50, xmin=0, xmax=100),
)
)
SiPixelPhase1TrackEfficiencyMissing = DefaultHistoTrack.clone(
name = "missing",
title = "Missing Hits",
range_min = 0, range_max = 25, range_nbins = 25,
xlabel = "missing hits",
dimensions = 0,
specs = VPSet(
StandardSpecifications1D_Num,
StandardSpecification2DProfile_Num,
Specification().groupBy("PXBarrel/PXLayer/Event") #this will produce inclusive counts per Layer/Disk
.reduce("COUNT")
.groupBy("PXBarrel/PXLayer")
.save(nbins=50, xmin=0, xmax=100),
Specification().groupBy("PXForward/PXDisk/Event")
.reduce("COUNT")
.groupBy("PXForward/PXDisk/")
.save(nbins=50, xmin=0, xmax=100),
)
)
SiPixelPhase1TrackEfficiencyEfficiency = SiPixelPhase1TrackEfficiencyValid.clone(
name = "hitefficiency",
title = "Hit Efficiency",
xlabel = "#valid/(#valid+#missing)",
dimensions = 1,
specs = VPSet(
StandardSpecification2DProfile,
#profiles per layer and shell
Specification(PerLadder).groupBy("PXBarrel/Shell/PXLayer/SignedLadder")
.reduce("MEAN")
.groupBy("PXBarrel/Shell/PXLayer", "EXTEND_X")
.save(),
Specification(PerLadder).groupBy("PXForward/HalfCylinder/PXRing/PXDisk/SignedBlade")
.reduce("MEAN")
.groupBy("PXForward/HalfCylinder/PXRing/PXDisk", "EXTEND_X")
.save(),
#per layer
Specification().groupBy("PXBarrel/PXLayer")
.reduce("MEAN")
.groupBy("PXBarrel", "EXTEND_X")
.save(),
Specification().groupBy("PXForward/PXDisk")
.reduce("MEAN")
.groupBy("PXForward", "EXTEND_X")
.save(),
Specification(PerLayer2D)
.groupBy("PXBarrel/PXLayer/Lumisection")
.groupBy("PXBarrel/PXLayer", "EXTEND_X")
.groupBy("PXBarrel", "EXTEND_Y")
.reduce("MEAN")
.save(),
Specification(PerLayer2D)
.groupBy("PXForward/PXDisk/Lumisection")
.groupBy("PXForward/PXDisk", "EXTEND_X")
.groupBy("PXForward", "EXTEND_Y")
.reduce("MEAN")
.save(),
)
)
SiPixelPhase1TrackEfficiencyVertices= DefaultHistoTrack.clone(
name = "num_vertices",
title = "PrimaryVertices",
xlabel= "# Vertices",
dimensions = 1,
range_min = -0.5,
range_max = 100.5,
range_nbins =101,
specs = VPSet(
Specification().groupBy("")
.save(),
Specification().groupBy("/Lumisection")
.reduce("MEAN")
.groupBy("","EXTEND_X")
.save()
)
)
from Configuration.Eras.Modifier_run3_common_cff import run3_common
run3_common.toModify(SiPixelPhase1TrackEfficiencyVertices, range_max = 150.5, range_nbins=151)
SiPixelPhase1TrackEfficiencyConf = cms.VPSet(
SiPixelPhase1TrackEfficiencyValid,
SiPixelPhase1TrackEfficiencyMissing,
SiPixelPhase1TrackEfficiencyInactive,
SiPixelPhase1TrackEfficiencyEfficiency,
SiPixelPhase1TrackEfficiencyVertices
)
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
SiPixelPhase1TrackEfficiencyAnalyzer = DQMEDAnalyzer('SiPixelPhase1TrackEfficiency',
clusters = cms.InputTag("siPixelClusters"),
tracks = cms.InputTag("generalTracks"),
trajectoryInput = cms.InputTag("refittedForPixelDQM"),
primaryvertices = cms.InputTag("offlinePrimaryVertices"),
tracker = cms.InputTag("MeasurementTrackerEvent"),
histograms = SiPixelPhase1TrackEfficiencyConf,
geometry = SiPixelPhase1Geometry,
triggerflags = trigger.SiPixelPhase1Triggers,
VertexCut = cms.untracked.bool(True)
)
SiPixelPhase1TrackEfficiencyHarvester = DQMEDHarvester("SiPixelPhase1Harvester",
histograms = SiPixelPhase1TrackEfficiencyConf,
geometry = SiPixelPhase1Geometry
)
| [
"FWCore.ParameterSet.Config.InputTag",
"Configuration.Eras.Modifier_run3_common_cff.run3_common.toModify",
"DQMServices.Core.DQMEDHarvester.DQMEDHarvester",
"FWCore.ParameterSet.Config.VPSet",
"FWCore.ParameterSet.Config.untracked.bool"
] | [((4871, 4967), 'Configuration.Eras.Modifier_run3_common_cff.run3_common.toModify', 'run3_common.toModify', (['SiPixelPhase1TrackEfficiencyVertices'], {'range_max': '(150.5)', 'range_nbins': '(151)'}), '(SiPixelPhase1TrackEfficiencyVertices, range_max=150.5,\n range_nbins=151)\n', (4891, 4967), False, 'from Configuration.Eras.Modifier_run3_common_cff import run3_common\n'), ((5002, 5215), 'FWCore.ParameterSet.Config.VPSet', 'cms.VPSet', (['SiPixelPhase1TrackEfficiencyValid', 'SiPixelPhase1TrackEfficiencyMissing', 'SiPixelPhase1TrackEfficiencyInactive', 'SiPixelPhase1TrackEfficiencyEfficiency', 'SiPixelPhase1TrackEfficiencyVertices'], {}), '(SiPixelPhase1TrackEfficiencyValid,\n SiPixelPhase1TrackEfficiencyMissing,\n SiPixelPhase1TrackEfficiencyInactive,\n SiPixelPhase1TrackEfficiencyEfficiency,\n SiPixelPhase1TrackEfficiencyVertices)\n', (5011, 5215), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5884, 6006), 'DQMServices.Core.DQMEDHarvester.DQMEDHarvester', 'DQMEDHarvester', (['"""SiPixelPhase1Harvester"""'], {'histograms': 'SiPixelPhase1TrackEfficiencyConf', 'geometry': 'SiPixelPhase1Geometry'}), "('SiPixelPhase1Harvester', histograms=\n SiPixelPhase1TrackEfficiencyConf, geometry=SiPixelPhase1Geometry)\n", (5898, 6006), False, 'from DQMServices.Core.DQMEDHarvester import DQMEDHarvester\n'), ((5375, 5406), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""siPixelClusters"""'], {}), "('siPixelClusters')\n", (5387, 5406), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5425, 5454), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""generalTracks"""'], {}), "('generalTracks')\n", (5437, 5454), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5482, 5517), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""refittedForPixelDQM"""'], {}), "('refittedForPixelDQM')\n", (5494, 5517), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5546, 5584), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""offlinePrimaryVertices"""'], {}), "('offlinePrimaryVertices')\n", (5558, 5584), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5604, 5643), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""MeasurementTrackerEvent"""'], {}), "('MeasurementTrackerEvent')\n", (5616, 5643), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5816, 5840), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (5834, 5840), True, 'import FWCore.ParameterSet.Config as cms\n')] |
from utils._context.library_version import LibraryVersion, Version
from utils import context
context.execute_warmups = lambda *args, **kwargs: None
def test_version_comparizon():
v = Version("1.0", "some_component")
assert v == "1.0"
assert v != "1.1"
assert v <= "1.1"
assert v <= "1.0"
assert "1.1" >= v
assert "1.0" >= v
assert v < "1.1"
assert "1.1" > v
assert v >= "0.9"
assert v >= "1.0"
assert "0.9" <= v
assert "1.0" <= v
assert v > "0.9"
assert "0.9" < v
assert Version("1.31.1", "") < "v1.34.1"
assert "1.31.1" < Version("v1.34.1", "")
assert Version("1.31.1", "") < Version("v1.34.1", "")
assert Version(" * ddtrace (1.0.0.beta1)", "ruby") == Version("1.0.0.beta1", "ruby")
assert Version(" * ddtrace (1.0.0.beta1)", "ruby")
assert Version(" * ddtrace (1.0.0.beta1)", "ruby") < Version(" * ddtrace (1.0.0.beta1 de82857)", "ruby")
assert Version(" * ddtrace (1.0.0.beta1 de82857)", "ruby") < Version("1.0.0", "ruby")
assert Version("1.0.0beta1", "ruby") < Version("1.0.0beta1+8a50f1f", "ruby")
assert Version("1.1.0rc2.dev15+gc41d325d", "python") >= "1.1.0rc2.dev"
assert Version("1.1.0", "python") >= "1.1.0rc2.dev"
def test_version_serialization():
assert Version("v1.3.1", "cpp") == "1.3.1"
assert str(Version("v1.3.1", "cpp")) == "1.3.1"
v = Version("0.53.0.dev70+g494e6dc0", "some comp")
assert v == "0.53.0.dev70+g494e6dc0"
assert str(v) == "0.53.0.dev70+g494e6dc0"
v = Version(" * ddtrace (0.53.0.appsec.180045)", "ruby")
assert v == Version("0.53.0appsec.180045", "ruby")
assert v == "0.53.0appsec.180045"
v = Version(" * ddtrace (1.0.0.beta1)", "ruby")
assert v == Version("1.0.0beta1", "ruby")
v = Version(" * ddtrace (1.0.0.beta1 de82857)", "ruby")
assert v == Version("1.0.0beta1+de82857", "ruby")
v = Version("* libddwaf (1.0.14.1.0.beta1)", "libddwaf")
assert v == Version("1.0.14.1.0.beta1", "libddwaf")
assert v == "1.0.14.1.0.beta1"
v = Version("Agent 7.33.0 - Commit: e6cfcb9 - Serialization version: v5.0.4 - Go version: go1.16.7", "agent")
assert v == "7.33.0"
v = Version("1.0.0-nightly", "php")
assert v == "1.0.0"
v = Version("3.0.0pre0", "nodejs")
assert v == "3.0.0pre0"
def test_library_version():
v = LibraryVersion("p")
assert v == "p"
assert v != "u"
v = LibraryVersion("p", "1.0")
assert v == "p@1.0"
assert v == "p"
assert v != "p@1.1"
assert v != "u"
assert v <= "p@1.1"
assert v <= "p@1.0"
assert "p@1.1" >= v
assert "p@1.0" >= v
assert v < "p@1.1"
assert "p@1.1" > v
assert v >= "p@0.9"
assert v >= "p@1.0"
assert "p@0.9" <= v
assert "p@1.0" <= v
assert v > "p@0.9"
assert "p@0.9" < v
assert (v <= "u@1.0") is False
assert (v >= "u@1.0") is False
assert ("u@1.0" <= v) is False
assert ("u@1.0" >= v) is False
v = LibraryVersion("p")
assert ("u@1.0" == v) is False
assert ("u@1.0" <= v) is False
v = LibraryVersion("python", "0.53.0.dev70+g494e6dc0")
assert v == "python@0.53.0.dev70+g494e6dc0"
v = LibraryVersion("java", "0.94.1~dde6877139")
assert v == "java@0.94.1"
assert v >= "java@0.94.1"
assert v < "java@0.94.2"
v = LibraryVersion("java", "0.94.0-SNAPSHOT~57664cfbe5")
assert v == "java@0.94.0"
assert v >= "java@0.94.0"
assert v < "java@0.94.1"
| [
"utils._context.library_version.Version",
"utils._context.library_version.LibraryVersion"
] | [((191, 223), 'utils._context.library_version.Version', 'Version', (['"""1.0"""', '"""some_component"""'], {}), "('1.0', 'some_component')\n", (198, 223), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((784, 828), 'utils._context.library_version.Version', 'Version', (['""" * ddtrace (1.0.0.beta1)"""', '"""ruby"""'], {}), "(' * ddtrace (1.0.0.beta1)', 'ruby')\n", (791, 828), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((1390, 1436), 'utils._context.library_version.Version', 'Version', (['"""0.53.0.dev70+g494e6dc0"""', '"""some comp"""'], {}), "('0.53.0.dev70+g494e6dc0', 'some comp')\n", (1397, 1436), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((1533, 1586), 'utils._context.library_version.Version', 'Version', (['""" * ddtrace (0.53.0.appsec.180045)"""', '"""ruby"""'], {}), "(' * ddtrace (0.53.0.appsec.180045)', 'ruby')\n", (1540, 1586), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((1689, 1733), 'utils._context.library_version.Version', 'Version', (['""" * ddtrace (1.0.0.beta1)"""', '"""ruby"""'], {}), "(' * ddtrace (1.0.0.beta1)', 'ruby')\n", (1696, 1733), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((1789, 1841), 'utils._context.library_version.Version', 'Version', (['""" * ddtrace (1.0.0.beta1 de82857)"""', '"""ruby"""'], {}), "(' * ddtrace (1.0.0.beta1 de82857)', 'ruby')\n", (1796, 1841), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((1905, 1957), 'utils._context.library_version.Version', 'Version', (['"""* libddwaf (1.0.14.1.0.beta1)"""', '"""libddwaf"""'], {}), "('* libddwaf (1.0.14.1.0.beta1)', 'libddwaf')\n", (1912, 1957), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((2058, 2173), 'utils._context.library_version.Version', 'Version', (['"""Agent 7.33.0 - Commit: e6cfcb9 - Serialization version: v5.0.4 - Go version: go1.16.7"""', '"""agent"""'], {}), "(\n 'Agent 7.33.0 - Commit: e6cfcb9 - Serialization version: v5.0.4 - Go version: go1.16.7'\n , 'agent')\n", (2065, 2173), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((2198, 2229), 'utils._context.library_version.Version', 'Version', (['"""1.0.0-nightly"""', '"""php"""'], {}), "('1.0.0-nightly', 'php')\n", (2205, 2229), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((2263, 2293), 'utils._context.library_version.Version', 'Version', (['"""3.0.0pre0"""', '"""nodejs"""'], {}), "('3.0.0pre0', 'nodejs')\n", (2270, 2293), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((2361, 2380), 'utils._context.library_version.LibraryVersion', 'LibraryVersion', (['"""p"""'], {}), "('p')\n", (2375, 2380), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((2430, 2456), 'utils._context.library_version.LibraryVersion', 'LibraryVersion', (['"""p"""', '"""1.0"""'], {}), "('p', '1.0')\n", (2444, 2456), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((2985, 3004), 'utils._context.library_version.LibraryVersion', 'LibraryVersion', (['"""p"""'], {}), "('p')\n", (2999, 3004), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((3085, 3135), 'utils._context.library_version.LibraryVersion', 'LibraryVersion', (['"""python"""', '"""0.53.0.dev70+g494e6dc0"""'], {}), "('python', '0.53.0.dev70+g494e6dc0')\n", (3099, 3135), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((3193, 3236), 'utils._context.library_version.LibraryVersion', 'LibraryVersion', (['"""java"""', '"""0.94.1~dde6877139"""'], {}), "('java', '0.94.1~dde6877139')\n", (3207, 3236), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((3335, 3387), 'utils._context.library_version.LibraryVersion', 'LibraryVersion', (['"""java"""', '"""0.94.0-SNAPSHOT~57664cfbe5"""'], {}), "('java', '0.94.0-SNAPSHOT~57664cfbe5')\n", (3349, 3387), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((545, 566), 'utils._context.library_version.Version', 'Version', (['"""1.31.1"""', '""""""'], {}), "('1.31.1', '')\n", (552, 566), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((601, 623), 'utils._context.library_version.Version', 'Version', (['"""v1.34.1"""', '""""""'], {}), "('v1.34.1', '')\n", (608, 623), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((635, 656), 'utils._context.library_version.Version', 'Version', (['"""1.31.1"""', '""""""'], {}), "('1.31.1', '')\n", (642, 656), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((659, 681), 'utils._context.library_version.Version', 'Version', (['"""v1.34.1"""', '""""""'], {}), "('v1.34.1', '')\n", (666, 681), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((694, 738), 'utils._context.library_version.Version', 'Version', (['""" * ddtrace (1.0.0.beta1)"""', '"""ruby"""'], {}), "(' * ddtrace (1.0.0.beta1)', 'ruby')\n", (701, 738), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((742, 772), 'utils._context.library_version.Version', 'Version', (['"""1.0.0.beta1"""', '"""ruby"""'], {}), "('1.0.0.beta1', 'ruby')\n", (749, 772), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((840, 884), 'utils._context.library_version.Version', 'Version', (['""" * ddtrace (1.0.0.beta1)"""', '"""ruby"""'], {}), "(' * ddtrace (1.0.0.beta1)', 'ruby')\n", (847, 884), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((887, 939), 'utils._context.library_version.Version', 'Version', (['""" * ddtrace (1.0.0.beta1 de82857)"""', '"""ruby"""'], {}), "(' * ddtrace (1.0.0.beta1 de82857)', 'ruby')\n", (894, 939), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((951, 1003), 'utils._context.library_version.Version', 'Version', (['""" * ddtrace (1.0.0.beta1 de82857)"""', '"""ruby"""'], {}), "(' * ddtrace (1.0.0.beta1 de82857)', 'ruby')\n", (958, 1003), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((1006, 1030), 'utils._context.library_version.Version', 'Version', (['"""1.0.0"""', '"""ruby"""'], {}), "('1.0.0', 'ruby')\n", (1013, 1030), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((1043, 1072), 'utils._context.library_version.Version', 'Version', (['"""1.0.0beta1"""', '"""ruby"""'], {}), "('1.0.0beta1', 'ruby')\n", (1050, 1072), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((1075, 1112), 'utils._context.library_version.Version', 'Version', (['"""1.0.0beta1+8a50f1f"""', '"""ruby"""'], {}), "('1.0.0beta1+8a50f1f', 'ruby')\n", (1082, 1112), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((1125, 1170), 'utils._context.library_version.Version', 'Version', (['"""1.1.0rc2.dev15+gc41d325d"""', '"""python"""'], {}), "('1.1.0rc2.dev15+gc41d325d', 'python')\n", (1132, 1170), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((1200, 1226), 'utils._context.library_version.Version', 'Version', (['"""1.1.0"""', '"""python"""'], {}), "('1.1.0', 'python')\n", (1207, 1226), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((1293, 1317), 'utils._context.library_version.Version', 'Version', (['"""v1.3.1"""', '"""cpp"""'], {}), "('v1.3.1', 'cpp')\n", (1300, 1317), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((1603, 1641), 'utils._context.library_version.Version', 'Version', (['"""0.53.0appsec.180045"""', '"""ruby"""'], {}), "('0.53.0appsec.180045', 'ruby')\n", (1610, 1641), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((1750, 1779), 'utils._context.library_version.Version', 'Version', (['"""1.0.0beta1"""', '"""ruby"""'], {}), "('1.0.0beta1', 'ruby')\n", (1757, 1779), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((1858, 1895), 'utils._context.library_version.Version', 'Version', (['"""1.0.0beta1+de82857"""', '"""ruby"""'], {}), "('1.0.0beta1+de82857', 'ruby')\n", (1865, 1895), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((1974, 2013), 'utils._context.library_version.Version', 'Version', (['"""1.0.14.1.0.beta1"""', '"""libddwaf"""'], {}), "('1.0.14.1.0.beta1', 'libddwaf')\n", (1981, 2013), False, 'from utils._context.library_version import LibraryVersion, Version\n'), ((1344, 1368), 'utils._context.library_version.Version', 'Version', (['"""v1.3.1"""', '"""cpp"""'], {}), "('v1.3.1', 'cpp')\n", (1351, 1368), False, 'from utils._context.library_version import LibraryVersion, Version\n')] |
"""
AssignResourcesCommand class for SubarrayNodeLow.
"""
# Standard python imports
import json
# Additional import
from ska.base.commands import ResultCode
from ska.base import SKASubarray
from . import const
from tmc.common.tango_server_helper import TangoServerHelper
class AssignResources(SKASubarray.AssignResourcesCommand):
"""
A class for SubarrayNodelow's AssignResources() command.
Assigns the resources to the subarray. It accepts station ids, channels, station beam ids and channels
in JSON string format.
"""
def do(self, argin):
"""
Method to invoke AssignResources command.
:param argin: DevString in JSON form containing following fields:
interface: Schema to allocate assign resources.
mccs:
subarray_beam_ids: list of integers
station_ids: list of integers
channel_blocks: list of integers
Example:
{"interface":"https://schema.skao.int/ska-low-tmc-assignedresources/2.0","mccs":{"subarray_beam_ids":[1],"station_ids":[[1,2]],"channel_blocks":[3]}}
return:
A tuple containing ResultCode and string.
"""
device_data = self.target
this_server = TangoServerHelper.get_instance()
device_data.is_end_command = False
device_data.is_release_resources = False
device_data.is_abort_command_executed = False
device_data.is_obsreset_command_executed = False
device_data.is_restart_command_executed = False
# TODO: For now storing resources as station ids
input_str = json.loads(argin)
device_data.resource_list = input_str["mccs"]["station_ids"]
log_msg = f"{const.STR_ASSIGN_RES_EXEC}STARTED"
self.logger.debug(log_msg)
this_server.write_attr("activityMessage", log_msg, False)
return (ResultCode.STARTED, log_msg)
| [
"tmc.common.tango_server_helper.TangoServerHelper.get_instance",
"json.loads"
] | [((1254, 1286), 'tmc.common.tango_server_helper.TangoServerHelper.get_instance', 'TangoServerHelper.get_instance', ([], {}), '()\n', (1284, 1286), False, 'from tmc.common.tango_server_helper import TangoServerHelper\n'), ((1623, 1640), 'json.loads', 'json.loads', (['argin'], {}), '(argin)\n', (1633, 1640), False, 'import json\n')] |
import json
import sys
import traceback
import yaml
import urllib3
from requests.exceptions import ConnectionError, SSLError
from .client import CLI
from awxkit.utils import to_str
from awxkit.exceptions import Unauthorized, Common
from awxkit.cli.utils import cprint
# you'll only see these warnings if you've explicitly *disabled* SSL
# verification, so they're a little annoying, redundant
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def run(stdout=sys.stdout, stderr=sys.stderr, argv=[]):
cli = CLI(stdout=stdout, stderr=stderr)
try:
cli.parse_args(argv or sys.argv)
cli.connect()
cli.parse_resource()
except KeyboardInterrupt:
sys.exit(1)
except ConnectionError as e:
cli.parser.print_help()
msg = (
'\nThere was a network error of some kind trying to reach '
'{}.\nYou might need to specify (or double-check) '
'--conf.host'.format(cli.get_config('host'))
)
if isinstance(e, SSLError):
msg = (
'\nCould not establish a secure connection. '
'\nPlease add your server to your certificate authority.'
'\nYou can also run this command by specifying '
'-k or --conf.insecure'
)
cprint(msg + '\n', 'red', file=stderr)
cprint(e, 'red', file=stderr)
sys.exit(1)
except Unauthorized as e:
cli.parser.print_help()
msg = '\nValid credentials were not provided.\n$ awx login --help'
cprint(msg + '\n', 'red', file=stderr)
if cli.verbose:
cprint(e.__class__, 'red', file=stderr)
sys.exit(1)
except Common as e:
if cli.verbose:
print(traceback.format_exc(), sys.stderr)
if cli.get_config('format') == 'json':
json.dump(e.msg, sys.stdout)
print('')
elif cli.get_config('format') == 'yaml':
sys.stdout.write(to_str(
yaml.safe_dump(
e.msg,
default_flow_style=False,
encoding='utf-8',
allow_unicode=True
)
))
elif cli.get_config('format') == 'human':
sys.stdout.write(e.__class__.__name__)
print('')
sys.exit(1)
except Exception as e:
if cli.verbose:
e = traceback.format_exc()
cprint(e, 'red', file=stderr)
sys.exit(1)
| [
"traceback.format_exc",
"yaml.safe_dump",
"awxkit.cli.utils.cprint",
"urllib3.disable_warnings",
"sys.exit",
"json.dump",
"sys.stdout.write"
] | [((397, 464), 'urllib3.disable_warnings', 'urllib3.disable_warnings', (['urllib3.exceptions.InsecureRequestWarning'], {}), '(urllib3.exceptions.InsecureRequestWarning)\n', (421, 464), False, 'import urllib3\n'), ((706, 717), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (714, 717), False, 'import sys\n'), ((1322, 1360), 'awxkit.cli.utils.cprint', 'cprint', (["(msg + '\\n')", '"""red"""'], {'file': 'stderr'}), "(msg + '\\n', 'red', file=stderr)\n", (1328, 1360), False, 'from awxkit.cli.utils import cprint\n'), ((1369, 1398), 'awxkit.cli.utils.cprint', 'cprint', (['e', '"""red"""'], {'file': 'stderr'}), "(e, 'red', file=stderr)\n", (1375, 1398), False, 'from awxkit.cli.utils import cprint\n'), ((1407, 1418), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1415, 1418), False, 'import sys\n'), ((1564, 1602), 'awxkit.cli.utils.cprint', 'cprint', (["(msg + '\\n')", '"""red"""'], {'file': 'stderr'}), "(msg + '\\n', 'red', file=stderr)\n", (1570, 1602), False, 'from awxkit.cli.utils import cprint\n'), ((1687, 1698), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1695, 1698), False, 'import sys\n'), ((2343, 2354), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2351, 2354), False, 'import sys\n'), ((2453, 2482), 'awxkit.cli.utils.cprint', 'cprint', (['e', '"""red"""'], {'file': 'stderr'}), "(e, 'red', file=stderr)\n", (2459, 2482), False, 'from awxkit.cli.utils import cprint\n'), ((2491, 2502), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2499, 2502), False, 'import sys\n'), ((1639, 1678), 'awxkit.cli.utils.cprint', 'cprint', (['e.__class__', '"""red"""'], {'file': 'stderr'}), "(e.__class__, 'red', file=stderr)\n", (1645, 1678), False, 'from awxkit.cli.utils import cprint\n'), ((1860, 1888), 'json.dump', 'json.dump', (['e.msg', 'sys.stdout'], {}), '(e.msg, sys.stdout)\n', (1869, 1888), False, 'import json\n'), ((2422, 2444), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2442, 2444), False, 'import traceback\n'), ((1765, 1787), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1785, 1787), False, 'import traceback\n'), ((2274, 2312), 'sys.stdout.write', 'sys.stdout.write', (['e.__class__.__name__'], {}), '(e.__class__.__name__)\n', (2290, 2312), False, 'import sys\n'), ((2013, 2102), 'yaml.safe_dump', 'yaml.safe_dump', (['e.msg'], {'default_flow_style': '(False)', 'encoding': '"""utf-8"""', 'allow_unicode': '(True)'}), "(e.msg, default_flow_style=False, encoding='utf-8',\n allow_unicode=True)\n", (2027, 2102), False, 'import yaml\n')] |
from django import test
from django.http import HttpResponse
from django.test import override_settings
from django.utils import translation
from mock import mock
from model_mommy import mommy
from devilry.devilry_account import middleware
@override_settings(
LANGUAGE_CODE='en',
LANGUAGES=[
('en', 'English'),
('nb', 'Norwegian Bokmal'),
]
)
class TestAccountMiddleware(test.TestCase):
def tearDown(self):
translation.deactivate_all()
def __make_mock_request(self, user=None, is_authenticated=False, languagecode='en'):
mockrequest = mock.MagicMock()
mockrequest.session = self.client.session
mockrequest.session['SELECTED_LANGUAGE_CODE'] = languagecode
mockrequest.user = user or mock.MagicMock()
mockrequest.user.is_authenticated.return_value = is_authenticated
return mockrequest
def test_process_request_unauthenticated_user(self):
local_middleware = middleware.LocalMiddleware()
mockrequest = self.__make_mock_request(languagecode='nb')
local_middleware.process_request(request=mockrequest)
self.assertEqual('nb', translation.get_language())
self.assertEqual('nb', mockrequest.LANGUAGE_CODE)
def test_process_request_authenticated_user(self):
local_middleware = middleware.LocalMiddleware()
user = mommy.make('devilry_account.User', languagecode='nb')
mockrequest = self.__make_mock_request(user=user, is_authenticated=True)
local_middleware.process_request(request=mockrequest)
self.assertEqual('nb', translation.get_language())
self.assertEqual('nb', mockrequest.LANGUAGE_CODE)
def test_process_response(self):
local_middleware = middleware.LocalMiddleware()
translation.activate('nb')
mockrequest = self.__make_mock_request(languagecode='nb')
response = local_middleware.process_response(request=mockrequest, response=HttpResponse())
self.assertEqual('nb', response['Content-Language'])
| [
"model_mommy.mommy.make",
"django.http.HttpResponse",
"django.utils.translation.activate",
"mock.mock.MagicMock",
"django.test.override_settings",
"django.utils.translation.deactivate_all",
"django.utils.translation.get_language",
"devilry.devilry_account.middleware.LocalMiddleware"
] | [((243, 343), 'django.test.override_settings', 'override_settings', ([], {'LANGUAGE_CODE': '"""en"""', 'LANGUAGES': "[('en', 'English'), ('nb', 'Norwegian Bokmal')]"}), "(LANGUAGE_CODE='en', LANGUAGES=[('en', 'English'), ('nb',\n 'Norwegian Bokmal')])\n", (260, 343), False, 'from django.test import override_settings\n'), ((449, 477), 'django.utils.translation.deactivate_all', 'translation.deactivate_all', ([], {}), '()\n', (475, 477), False, 'from django.utils import translation\n'), ((590, 606), 'mock.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (604, 606), False, 'from mock import mock\n'), ((964, 992), 'devilry.devilry_account.middleware.LocalMiddleware', 'middleware.LocalMiddleware', ([], {}), '()\n', (990, 992), False, 'from devilry.devilry_account import middleware\n'), ((1321, 1349), 'devilry.devilry_account.middleware.LocalMiddleware', 'middleware.LocalMiddleware', ([], {}), '()\n', (1347, 1349), False, 'from devilry.devilry_account import middleware\n'), ((1365, 1418), 'model_mommy.mommy.make', 'mommy.make', (['"""devilry_account.User"""'], {'languagecode': '"""nb"""'}), "('devilry_account.User', languagecode='nb')\n", (1375, 1418), False, 'from model_mommy import mommy\n'), ((1744, 1772), 'devilry.devilry_account.middleware.LocalMiddleware', 'middleware.LocalMiddleware', ([], {}), '()\n', (1770, 1772), False, 'from devilry.devilry_account import middleware\n'), ((1781, 1807), 'django.utils.translation.activate', 'translation.activate', (['"""nb"""'], {}), "('nb')\n", (1801, 1807), False, 'from django.utils import translation\n'), ((761, 777), 'mock.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (775, 777), False, 'from mock import mock\n'), ((1152, 1178), 'django.utils.translation.get_language', 'translation.get_language', ([], {}), '()\n', (1176, 1178), False, 'from django.utils import translation\n'), ((1593, 1619), 'django.utils.translation.get_language', 'translation.get_language', ([], {}), '()\n', (1617, 1619), False, 'from django.utils import translation\n'), ((1957, 1971), 'django.http.HttpResponse', 'HttpResponse', ([], {}), '()\n', (1969, 1971), False, 'from django.http import HttpResponse\n')] |
"""
Django settings for thesaurus project.
Generated by 'django-admin startproject' using Django 3.0.5.
"""
import os
import re
from logging.config import dictConfig
from django.core.validators import MinValueValidator
from django.db import DEFAULT_DB_ALIAS
from django.urls import reverse_lazy
from django.utils.log import DEFAULT_LOGGING
from django.utils.translation import gettext_lazy as _
from .config import AutoConfig
config = AutoConfig(search_path='/run/secrets/') # .env file is injected by docker secrets
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = config("SECRET_KEY")
DEBUG = config("DEBUG", cast=bool, default=False)
ENVIRONMENT_NAME = config("ENVIRONMENT_LABEL", cast=str, default="Unknown environment")
ENVIRONMENT_COLOR = config("ENVIRONMENT_COLOR", cast=str, default="#777")
VERSION = config('THESAURUS_VERSION', default='unknown')
ALLOWED_HOSTS = config("ALLOWED_HOSTS").split(" ")
INSTALLED_APPS = [
'django_admin_env_notice',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.postgres',
'constance',
'constance.backends.database',
'apps.audit',
'apps.accounts',
'apps.api',
'apps.attachment',
'apps.emails',
'apps.frontend',
'apps.thesis',
'apps.review',
'apps.utils',
'debug_toolbar',
'django_better_admin_arrayfield',
'django_bleach',
'django_extensions',
'django_filters',
'django_python3_ldap',
'loginas',
'mailqueue',
'rest_framework',
'webpack_loader',
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'apps.utils.middleware.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'apps.audit.middleware.AuditMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
AUTH_USER_MODEL = 'accounts.User'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.i18n',
'django_admin_env_notice.context_processors.from_settings',
],
},
},
]
WSGI_APPLICATION = 'thesaurus.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
DEFAULT_DB_ALIAS: {
"ENGINE": config("SQL_ENGINE", default="django.db.backends.sqlite3"),
"NAME": config("SQL_DATABASE", default=os.path.join(BASE_DIR, "db.sqlite3")),
"USER": config("SQL_USER", default="user"),
"PASSWORD": config("SQL_PASSWORD", default="password"),
"HOST": config("SQL_HOST", default="localhost"),
"PORT": config("SQL_PORT", default="5432"),
"ATOMIC_REQUESTS": True,
'OPTIONS': {
'options': '-c search_path=public,audit'
},
}
}
AUDIT_REWRITE_PKS_TO_LABELS_FOR_MODELS = (
'attachment.TypeAttachment',
'thesis.Category',
)
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator'},
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
]
AUTHENTICATION_BACKENDS = [
'django_python3_ldap.auth.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGES = (
('cs', _('Czech')),
('en', _('English')),
)
LANGUAGE_CODE = 'en'
TIME_ZONE = config('TZ')
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': False,
'BUNDLE_DIR_NAME': './', # must end with slash
'STATS_FILE': config('BUILD_DIR', default='') + 'webpack-stats.json',
}
}
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema',
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
'apps.api.utils.filters.UnAccentSearchFilter',
'apps.api.utils.filters.RelatedOrderingFilter',
),
'DEFAULT_PERMISSION_CLASSES': (
'apps.api.permissions.RestrictedViewModelPermissions',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'apps.api.authentication.SessionAuthentication',
),
'EXCEPTION_HANDLER': 'apps.api.utils.exceptions.exception_handler',
'PAGE_SIZE': 20,
}
CAN_LOGIN_AS = lambda request, target_user: request.user.is_superuser and not target_user.is_superuser
if DEBUG:
# for django-debug-toolbar
# remote_addr does not matter in debug mode in image
INTERNAL_IPS = type(str('ContainsEverything'), (), {'__contains__': lambda *a: True})()
###### LDAP
# https://github.com/etianen/django-python3-ldap
LDAP_AUTH_URL = f"ldap://{config('LDAP_HOST', cast=str)}:{config('LDAP_PORT', cast=str)}"
LDAP_AUTH_USE_TLS = False
LDAP_AUTH_CONNECTION_USERNAME = config('LDAP_USERNAME', cast=str)
LDAP_AUTH_CONNECTION_PASSWORD = config('LDAP_PASSWORD', cast=str)
LDAP_AUTH_CONNECT_TIMEOUT = None
LDAP_AUTH_RECEIVE_TIMEOUT = None
LDAP_AUTH_SEARCH_BASE = config('LDAP_SEARCH_BASE', cast=str)
LDAP_AUTH_ACTIVE_DIRECTORY_DOMAIN = config('LDAP_ACTIVE_DIRECTORY_DOMAIN', cast=str)
LDAP_AUTH_OBJECT_CLASS = "organizationalPerson"
LDAP_AUTH_USER_FIELDS = dict(
username="sAMAccountName",
first_name="givenName",
last_name="sn",
email="userPrincipalName",
)
LDAP_AUTH_USER_LOOKUP_FIELDS = ("username",)
LDAP_AUTH_CLEAN_USER_DATA = "django_python3_ldap.utils.clean_user_data"
LDAP_AUTH_SYNC_USER_RELATIONS = "apps.accounts.ldap.sync_user_relations"
LDAP_AUTH_FORMAT_SEARCH_FILTERS = "django_python3_ldap.utils.format_search_filters"
LDAP_AUTH_FORMAT_USERNAME = "django_python3_ldap.utils.format_username_active_directory_principal"
# custom app config
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'
CONSTANCE_CONFIG = {
'MAX_OPEN_RESERVATIONS_COUNT': (
6,
_('Maximal count of opened reservations linked to one user (inclusive).'),
'non_negative_small_integer',
),
}
CONSTANCE_ADDITIONAL_FIELDS = {
'non_negative_small_integer': ['django.forms.fields.IntegerField', {
'validators': [
MinValueValidator(limit_value=0)
]
}],
}
# emailing
EMAIL_BACKEND = config('EMAIL_BACKEND')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT', cast=int)
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
MAILQUEUE_QUEUE_UP: bool = config('EMAIL_USE_QUEUE', default=not DEBUG, cast=bool)
MAILQUEUE_STORAGE = True
DEFAULT_FROM_EMAIL: str = config('MAIL_FROM_ADDRESS', default='noreply@thesaurus')
MAIL_SUBJECT_TITLE: str = config('MAIL_SUBJECT_TITLE', default='Thesaurus')
EMAIL_LANGUAGE = 'cs'
# urls definitions
STATIC_URL = '/static/'
STATIC_ROOT = '/usr/src/static'
# public URL for building absolute urls
PUBLIC_HOST: str = config('PUBLIC_HOST', cast=str)
MEDIA_ROOT = '/usr/src/media'
MEDIA_URL = '/media/'
ROOT_URLCONF = 'thesaurus.urls'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
if not DEBUG:
SECURE_HSTS_SECONDS = 15768000
SECURE_HSTS_PRELOAD = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_REFERRER_POLICY = 'strict-origin'
LOGIN_REDIRECT_URL = reverse_lazy('app')
LOGOUT_REDIRECT_URL = LOGINAS_LOGOUT_REDIRECT_URL = reverse_lazy('login')
LOGIN_URL = reverse_lazy('login')
APPEND_SLASH = False
API_URL_PATTERN = re.compile(r'^/api/.*')
LOCALE_MIDDLEWARE_IGNORE_URLS = (
API_URL_PATTERN,
)
# logging & sentry
SENTRY_DSN = config('SENTRY_DSN', default='')
if SENTRY_DSN:
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[DjangoIntegration(transaction_style='function_name')],
send_default_pii=True,
)
LOGGING_CONFIG = None
LOGLEVEL = config('LOGLEVEL', default='info').upper()
dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
},
'django.server': DEFAULT_LOGGING['formatters']['django.server'],
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
},
'django.server': DEFAULT_LOGGING['handlers']['django.server'],
},
'loggers': {
'': {
'level': 'WARNING',
'handlers': ['console'],
},
'apps': {
'level': LOGLEVEL,
'handlers': ['console'],
'propagate': False,
},
'django.server': DEFAULT_LOGGING['loggers']['django.server'],
},
})
| [
"re.compile",
"sentry_sdk.integrations.django.DjangoIntegration",
"logging.config.dictConfig",
"django.utils.translation.gettext_lazy",
"os.path.join",
"django.urls.reverse_lazy",
"os.path.abspath",
"django.core.validators.MinValueValidator"
] | [((8661, 8680), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""app"""'], {}), "('app')\n", (8673, 8680), False, 'from django.urls import reverse_lazy\n'), ((8734, 8755), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""login"""'], {}), "('login')\n", (8746, 8755), False, 'from django.urls import reverse_lazy\n'), ((8769, 8790), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""login"""'], {}), "('login')\n", (8781, 8790), False, 'from django.urls import reverse_lazy\n'), ((8832, 8854), 're.compile', 're.compile', (['"""^/api/.*"""'], {}), "('^/api/.*')\n", (8842, 8854), False, 'import re\n'), ((9322, 9922), 'logging.config.dictConfig', 'dictConfig', (["{'version': 1, 'disable_existing_loggers': False, 'formatters': {'default':\n {'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'},\n 'django.server': DEFAULT_LOGGING['formatters']['django.server']},\n 'handlers': {'console': {'class': 'logging.StreamHandler', 'formatter':\n 'default'}, 'django.server': DEFAULT_LOGGING['handlers'][\n 'django.server']}, 'loggers': {'': {'level': 'WARNING', 'handlers': [\n 'console']}, 'apps': {'level': LOGLEVEL, 'handlers': ['console'],\n 'propagate': False}, 'django.server': DEFAULT_LOGGING['loggers'][\n 'django.server']}}"], {}), "({'version': 1, 'disable_existing_loggers': False, 'formatters':\n {'default': {'format':\n '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'}, 'django.server':\n DEFAULT_LOGGING['formatters']['django.server']}, 'handlers': {'console':\n {'class': 'logging.StreamHandler', 'formatter': 'default'},\n 'django.server': DEFAULT_LOGGING['handlers']['django.server']},\n 'loggers': {'': {'level': 'WARNING', 'handlers': ['console']}, 'apps':\n {'level': LOGLEVEL, 'handlers': ['console'], 'propagate': False},\n 'django.server': DEFAULT_LOGGING['loggers']['django.server']}})\n", (9332, 9922), False, 'from logging.config import dictConfig\n'), ((4593, 4625), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""locale"""'], {}), "(BASE_DIR, 'locale')\n", (4605, 4625), False, 'import os\n'), ((4740, 4772), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""static"""'], {}), "(BASE_DIR, 'static')\n", (4752, 4772), False, 'import os\n'), ((566, 591), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (581, 591), False, 'import os\n'), ((4438, 4448), 'django.utils.translation.gettext_lazy', '_', (['"""Czech"""'], {}), "('Czech')\n", (4439, 4448), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4462, 4474), 'django.utils.translation.gettext_lazy', '_', (['"""English"""'], {}), "('English')\n", (4463, 4474), True, 'from django.utils.translation import gettext_lazy as _\n'), ((7220, 7293), 'django.utils.translation.gettext_lazy', '_', (['"""Maximal count of opened reservations linked to one user (inclusive)."""'], {}), "('Maximal count of opened reservations linked to one user (inclusive).')\n", (7221, 7293), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2416, 2451), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""templates"""'], {}), "(BASE_DIR, 'templates')\n", (2428, 2451), False, 'import os\n'), ((3252, 3288), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""db.sqlite3"""'], {}), "(BASE_DIR, 'db.sqlite3')\n", (3264, 3288), False, 'import os\n'), ((7483, 7515), 'django.core.validators.MinValueValidator', 'MinValueValidator', ([], {'limit_value': '(0)'}), '(limit_value=0)\n', (7500, 7515), False, 'from django.core.validators import MinValueValidator\n'), ((9150, 9202), 'sentry_sdk.integrations.django.DjangoIntegration', 'DjangoIntegration', ([], {'transaction_style': '"""function_name"""'}), "(transaction_style='function_name')\n", (9167, 9202), False, 'from sentry_sdk.integrations.django import DjangoIntegration\n')] |
from collections import OrderedDict
import numpy as np
from pypospack.qoi import Qoi
class ThermalExpansion(Qoi):
"""
Args:
temperature_min (float,int): beginning of the temperature range in Kelvin
temperature_max (float,int): end of the temperature range in Kelvin
temperature_step (float,int): increments of the temperature range in Kelvin
time_total (int): total simulation time in fs
time_step (int): simulation time step in fs
"""
def __init__(self,qoi_name,structures,
temperature_min=0,
temperature_max=2700,
temperature_step=100,
time_total=10,
time_step=0.001,
supercell=[5,5,5]):
_qoi_name = qoi_name
_qoi_type = 'lmps_thermal_expansion'
_structures = OrderedDict()
_structures['ideal'] = structures['ideal']
Qoi.__init__(self,
qoi_name=_qoi_name,
qoi_type=_qoi_type,
structures=_structures)
self.temperature_min = temperature_min
self.temperature_max = temperature_max
self.temperature_step = temperature_step
self.time_total=time_total
self.time_step=time_step
self.supercell = supercell
def determine_tasks(self):
T = self.temperature_min
while T <= self.temperature_max:
if T == 0:
_ideal_structure_name = self.structures['ideal']
_ideal_task_type = 'lmps_min_all'
_ideal_task_name = '{}.{}'.format(
_ideal_structure_name,
_ideal_task_type
)
_bulk_structure_name = None
self.add_task(
task_type=_ideal_task_type,
task_name=_ideal_task_name,
task_structure=_ideal_structure_name,
bulk_structure_name=_bulk_structure_name,
)
else:
_ideal_structure_name = self.structures['ideal']
_ideal_task_type = 'lmps_npt'.format(T)
_ideal_task_name = '{}.{}_{}'.format(
_ideal_structure_name,
_ideal_task_type,
T
)
_bulk_structure_name = None
self.add_task(
task_type=_ideal_task_type,
task_name=_ideal_task_name,
task_structure=_ideal_structure_name,
bulk_structure_name=_bulk_structure_name,
task_options={
'temperature':T,
'time_total':self.time_total,
'time_step':self.time_step,
'supercell':self.supercell}
)
T = T + self.temperature_step
def calculate_thermal_expansion_coefficient(self,temperatures,lattice_constants):
assert isinstance(temperatures,list)
assert isinstance(lattice_constants,list)
T = list(temperatures)
a0 = list(lattice_constants)
a0_at_0K = a0[0]
for i,v in enumerate(a0):
a0[i] = v/a0_at_0K-1
T = np.array(temperatures)
a0 = np.array(a0)
print(T)
print(a0)
T = T[:,np.newaxis] # T needs to be a column vector
# model is y = a*x
alpha_L,_,_,_ = np.linalg.lstsq(T,a0)
print('alpha_L:{}'.format(alpha_L[0]))
return alpha_L[0]
def calculate_qois(self,task_results):
_prefix = '{}.{}'.format(self.structures['ideal'],self.qoi_type)
s = self.structures['ideal']
T = self.temperature_min
lattice_constants = OrderedDict()
while T <= self.temperature_max:
if T == 0:
lattice_constants[T] = np.sqrt(
task_results["{}.lmps_min_all.a11".format(s)]**2 \
+ task_results['{}.lmps_min_all.a12'.format(s)]**2 \
+ task_results["{}.lmps_min_all.a13".format(s)]**2
)
else:
try:
lattice_constants[T] = task_results["{}.lmps_npt_{}.a1".format(s,T)]
except KeyError as e:
for k,v in task_results.items():
print(k,v)
raise
T = T + self.temperature_step
self.qois = OrderedDict()
# add lattice constants at different temperatures
for k,v in lattice_constants.items():
self.qois['{}.a0_{}'.format(_prefix,T)] = v
_temperatures = [k for k,v in lattice_constants.items()]
_lattice_constants = [v for k,v in lattice_constants.items()]
# add thermal expansion coefficient
self.qois['{}.thermal_expansion_coefficient'.format(_prefix)] = \
self.calculate_thermal_expansion_coefficient(
temperatures=_temperatures,
lattice_constants=_lattice_constants
)
| [
"numpy.array",
"collections.OrderedDict",
"pypospack.qoi.Qoi.__init__",
"numpy.linalg.lstsq"
] | [((819, 832), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (830, 832), False, 'from collections import OrderedDict\n'), ((893, 980), 'pypospack.qoi.Qoi.__init__', 'Qoi.__init__', (['self'], {'qoi_name': '_qoi_name', 'qoi_type': '_qoi_type', 'structures': '_structures'}), '(self, qoi_name=_qoi_name, qoi_type=_qoi_type, structures=\n _structures)\n', (905, 980), False, 'from pypospack.qoi import Qoi\n'), ((3368, 3390), 'numpy.array', 'np.array', (['temperatures'], {}), '(temperatures)\n', (3376, 3390), True, 'import numpy as np\n'), ((3404, 3416), 'numpy.array', 'np.array', (['a0'], {}), '(a0)\n', (3412, 3416), True, 'import numpy as np\n'), ((3576, 3598), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['T', 'a0'], {}), '(T, a0)\n', (3591, 3598), True, 'import numpy as np\n'), ((3900, 3913), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3911, 3913), False, 'from collections import OrderedDict\n'), ((4604, 4617), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4615, 4617), False, 'from collections import OrderedDict\n')] |
import sys
import math
import random
from collections import namedtuple
import time
from pyrf.util import (compute_usable_bins, adjust_usable_fstart_fstop,
trim_to_usable_fstart_fstop, find_saturation)
import numpy as np
from twisted.internet import defer
from pyrf.numpy_util import compute_fft
import struct
MAXIMUM_SPP = 32768
class correction_vector_acquire(object):
data_buffer = ""
v_type = "SIGNAL"
dut = None
complete_buffer = False
d = None
offset = 0
size = 0
transfer_size = 16*1024
def get_vector_loop(self, data):
self.data_buffer = b"".join([self.data_buffer, data])
self.offset += len(data)
if self.offset >= self.size:
# we have gotten all out data, return this object
if self.d is not None:
self.d.callback(self)
else:
# more data, grab another set of data
data1 = self.dut.correction_data(self.v_type, self.offset,
self.transfer_size)
# and add this function to the call back
data1.addCallback(self.get_vector_loop)
def get_vector_data(self, size):
# We got out size
if size is None:
# size is return None threw our created deffered in get_vector
if self.d is not None:
self.d.callback(None)
return
self.size = int(size)
if self.size == 0:
if self.d is not None:
self.d.callback(None)
return
if self.size < self.transfer_size:
self.transfer_size = self.size
# Grab our first set of data (deffered)
data = self.dut.correction_data(self.v_type, self.offset,
self.transfer_size)
# add the self.get_vector_loop call back
data.addCallback(self.get_vector_loop)
# what happens to error back here?
def error_b(self, failure):
if self.d is not None:
self.d.callback(None)
return None
def get_vector(self, v_type=None):
#
self.v_type = v_type
self.offset = 0
self.data_buffer = ""
# Create a defered
d = defer.Deferred()
self.d = d
# get our size (deffered)
size = self.dut.correction_size(self.v_type)
size.addCallback(self.get_vector_data)
size.addErrback(self.error_b)
# return our deferred
return d
class correction_vector(object):
correction_vectors = None
frequency_index = None
digest = None
def __init__(self):
self.frequency_index = []
self.dy = np.dtype(np.int32)
self.dy = self.dy.newbyteorder('>')
self.correction_vectors = {}
def _binary_search(self, freq):
# Simple binary search, modified to work the object's datastructure
lo = 0
hi = len(self.frequency_index)
while lo < hi:
mid = (lo + hi) // 2
if self.frequency_index[mid][0] * 1e3 < freq:
lo = mid + 1
else:
hi = mid
return lo
def _interp(self, in_array, number_of_points):
# array index of our orignal from 0 to size of vector - 1
x = np.arange(0.0, self.vector_size, 1.0)
# our new index
z = np.linspace(0.0, self.vector_size - 1, number_of_points)
# interpolate to get our new vector array
out_array = np.interp(z, x, in_array)
return out_array
def get_correction_vector(self, freq, number_of_points):
# binary search, retunrs our index
index = self._binary_search(freq)
# get the case where we go off the end
if index == len(self.frequency_index):
index = index - 1
# get our vector
vector = self.correction_vectors[self.frequency_index[index][1]]
# convert from micro db to db
vector = vector / 1000000.0
# interpolate our vector to the wanted size
resampled_vector = self._interp(vector, number_of_points)
return resampled_vector
def buffer_to_vector(self, buffer_in):
if buffer_in is None:
raise ValueError
if len(buffer_in) < 8 + 40:
raise ValueError
# Get the first 8 bytes
offset = 0
size = 8
input_buffer = buffer_in[offset:offset + size]
version, freq_num, vector_num, self.vector_size = struct.unpack("!HHHH", input_buffer)
offset = size
# Ignore the next 40 bytes, as not used know
offset += 40
# grab our frequency list
size = 6 * freq_num
input_buffer = buffer_in[offset:offset + size]
offset += size
if len(input_buffer) < size:
raise ValueError
# loop over our buffer, adding a frequency pair to the array
for i in range(freq_num):
freq, index = struct.unpack("!LH", input_buffer[i*6:i*6+6])
self.frequency_index.append([freq, index])
# grab our correction vectors
for i in range(vector_num):
# Grab out index
size = 2
input_buffer = buffer_in[offset:offset + size]
index = struct.unpack(">H", input_buffer)[0]
offset += size
# get our correction vector
size = 4 * self.vector_size
input_buffer = buffer_in[offset:offset + size]
micro_db = np.frombuffer(input_buffer, dtype=self.dy,
count=self.vector_size)
self.correction_vectors[index] = micro_db
offset += size
class SweepDeviceError(Exception):
"""
Exception for the sweep device to state an error() has occured
"""
pass
class SweepSettings(object):
"""
An object used to keep track of the sweep settings
"""
def __init__(self):
# start frequency of the results we will eventually return
self.bandstart = 0.0
# stop frequency of the results we will eventually return
self.bandstop = 0.0
# sweep entry's start frequency
self.fstart = 0.0
# sweep entry's stop frequency
self.fstop = 0.0
# sweep entry frequency step
self.fstep = 0.0
# sweep entry's RFE mode
self.rfe_mode = None
# determine if a second entry is required
self.dd_mode = False
# determines if a non dd entry is needed
self.beyond_dd = True
# entry attenuation
self.attenuation = 0
# entry ppb
self.ppb = 1
# sweep entry's spp
self.spp = 0.0
# sweep capture iterations
self.iterations = 0
# expected spectral points
self.spectral_points = 0
# determines if a sweep entry is required at the end
self.make_end_entry = False
# determine the frequency of the end entry
self.end_entry_freq = 0.0
# how many steps are in this sweep
self.step_count = 0
# what's the actual RBW of what we're capturing
self.rbw = 0
def __str__(self):
return "SweepSettings[ bandstart = %d, bandstop = %d, fstart = %d, fstop = %d, fstep = %d, step_count = %d, rfe_mode = %s, dd_mode = %s, beyond_dd = %s, attenuation = %s, ppb = %d, spp = %d, iterations = %d, spectral_points = %d, make_end_entry = %s, end_entry_freq = %d, rbw = %f ]" % (self.bandstart, self.bandstop, self.fstart, self.fstop, self.fstep, self.step_count, self.rfe_mode, self.dd_mode, self.beyond_dd, self.attenuation, self.ppb, self.spp, self.iterations, self.spectral_points, self.make_end_entry, self.end_entry_freq, self.rbw)
class SweepPlanner(object):
"""
An object that plans a sweep based on given paramaters.
:param dev_prop: the sweep device properties
:type dev_prop: dict
"""
def __init__(self, dev_prop):
self.dev_properties = dev_prop
self._prev_settings = SweepSettings()
def plan_sweep(self, fstart, fstop, rbw, mode, dev_settings = {}):
"""
Plan the sweep given the inputs
"""
# initialize the sweep settings variable
sweep_settings = SweepSettings()
# assign the sweep mode and start/stop
sweep_settings.rfe_mode = mode
sweep_settings.bandstart = fstart
sweep_settings.bandstop = fstop
if 'attenuator' in dev_settings:
sweep_settings.attenuation = dev_settings['attenuator']
# grab the usable bw of the current mode
usable_bw = self.dev_properties.USABLE_BW[mode]
# calculate the required SPP to get the RBW desired
sweep_settings.spp = self.dev_properties.FULL_BW[mode] / rbw
# find closest multiple of 32 because hardware
sweep_settings.spp = int(32 * round(float(sweep_settings.spp) / 32))
# double the points for SH/SHN mode
if mode in ['SH', 'SHN']:
sweep_settings.spp = sweep_settings.spp * 2
# if we're using zif mode, but we have a DD entry, we have half the SPP avaible, since DD is I-only and ZIF is IQ
if (mode == 'ZIF') and sweep_settings.dd_mode:
maxspp = self.dev_properties.MAX_SPP / 2
else:
maxspp = self.dev_properties.MAX_SPP
# adjust SPP if it's too big
sweep_settings.spp = min(maxspp, sweep_settings.spp)
# figure out our actual RBW (account for real vs complex data)
sweep_settings.rbw = self.dev_properties.FULL_BW[mode] / sweep_settings.spp
if not (mode == 'ZIF'):
sweep_settings.rbw = sweep_settings.rbw * 2
# make sure our result is atleast 1 RBW big
if (sweep_settings.bandstop - sweep_settings.bandstart) < sweep_settings.rbw:
fstop = sweep_settings.bandstart + sweep_settings.rbw
sweep_settings.bandstop = fstop
# change fstart and stop by a bit to account for floating point errors
# TODO: make this take into account tuning resolution
fstart -= sweep_settings.rbw * 4
fstop += sweep_settings.rbw * 4
# calculate fstart frequency
if fstart < self.dev_properties.MIN_TUNABLE[mode]:
sweep_settings.dd_mode = True
sweep_settings.fstart = self.dev_properties.MIN_TUNABLE[mode] + (usable_bw / 2)
sweep_settings.step_count += 1
# make sure we don't accidentally make an fstart that's beyond our tuning range
elif (fstart + (usable_bw / 2)) > self.dev_properties.MAX_TUNABLE[mode]:
sweep_settings.dd_mode = False
sweep_settings.fstart = self.dev_properties.MAX_TUNABLE[mode] - (usable_bw / 2)
else:
sweep_settings.dd_mode = False
sweep_settings.fstart = fstart + (usable_bw / 2)
# check if non-dd mode is required
if fstop <= self.dev_properties.MIN_TUNABLE[mode]:
sweep_settings.beyond_dd = False
else:
sweep_settings.beyond_dd = True
sweep_settings.step_count += 1
# assign the sweep entry's step frequency reducing by a couple rbw to account for floating point errors
# TODO: make this take into account tuning resolution
sweep_settings.fstep = usable_bw - (sweep_settings.rbw * 4)
# calculate the fstop of the sweep entry from fstart and how many usable_bw's we need
fspan = fstop - sweep_settings.fstart - sweep_settings.rbw
required_steps = round(fspan / sweep_settings.fstep)
sweep_settings.fstop = sweep_settings.fstart + (required_steps * sweep_settings.fstep)
sweep_settings.step_count += required_steps
# make sure fstop is lower than max tunable
# - it can sometimes be higher if an fstart is chosen, such that our
# fstep causes our fstop to go beyond fmax to cover all the band required
sweep_settings.make_end_entry = False
sweep_settings.end_entry_freq = 0
if sweep_settings.fstop > self.dev_properties.MAX_TUNABLE[mode]:
# go back one step
sweep_settings.fstop -= sweep_settings.fstep
# add an entry for fmax
sweep_settings.make_end_entry = True
sweep_settings.end_entry_freq = self.dev_properties.MAX_TUNABLE[mode] - (usable_bw / 2)
# calculate the expected number of spectral bins required for the SweepEntry
sweep_settings.spectral_points = int(round((sweep_settings.bandstop - sweep_settings.bandstart) / sweep_settings.rbw))
# return the sweep_settings
return sweep_settings
class SweepDevice(object):
"""
Virtual device that generates power spectrum from a given frequency range
by sweeping the frequencies with a real device and piecing together the FFT results.
:param real_device: the RF device that will be used for capturing data,
typically a :class:`pyrf.devices.thinkrf.WSA` instance.
:param async_callback: a callback to use for async operation (not used if
*real_device* is using a blocking :class:`PlainSocketConnector`)
"""
# keep track of the mode
rfe_mode = None
# keep track of the fstart/fstop and rbw
fstart = None
fstop = None
rbw = None
# keep track of non-standard device settings
device_settings = None
# keep track of whether DD mode is needed
dd_mode = False
# keep track of the sweep settings
_sweep_settings = None
# keep track of the packet count
packet_count = 0
# determine if a new entry is required
_new_entry = True
# array to place spectral data
spectral_data = []
capture_count = 0
sp_corr_obj = None
nf_corr_obj = None
_flattening_enabled = True
def __init__(self, real_device, async_callback=None):
# init log string
self.logstr = ''
self.logtype = 'NONE'
# initialize the real device
self.real_device = real_device
# request read permission from device
self.real_device.request_read_perm()
# keep track of the device properties
self.dev_properties = self.real_device.properties
# initialize the geolocation callback
self._geo_callback_func = None
self._geo_callback_data = None
# initialize the sweep planner
self._sweep_planner = SweepPlanner(self.dev_properties)
# make sure user passes async callback if the device has async connector
if real_device.async_connector():
if not async_callback:
raise SweepDeviceError(
"async_callback required for async operation")
# disable receiving data until we are expecting it
real_device.set_async_callback(None)
# Function to be called when async data is done capturing
def _save_correction_vector(data_buffer):
if data_buffer is None:
return None
try:
if data_buffer.v_type == "SIGNAL":
self.sp_corr_obj = correction_vector()
self.sp_corr_obj.buffer_to_vector(data_buffer.data_buffer)
elif data_buffer.v_type == "NOISE":
self.nf_corr_obj = correction_vector()
self.nf_corr_obj.buffer_to_vector(data_buffer.data_buffer)
except AttributeError:
if data_buffer.v_type == "SIGNAL":
self.sp_corr_obj = None
elif data_buffer.v_type == "NOISE":
self.nf_corr_obj = None
# function to catch the errback of the async code. Used to handle
# the case when we can get the correction vectors.
def _catch_timeout(failure):
failure.trap(IOError)
return None
vector_obj = correction_vector_acquire()
vector_obj.dut = real_device
vector_obj1 = correction_vector_acquire()
vector_obj1.dut = real_device
d1 = vector_obj.get_vector("NOISE")
d1.addCallback(_save_correction_vector)
d1.addErrback(_catch_timeout)
d2 = vector_obj1.get_vector("SIGNAL")
d2.addCallback(_save_correction_vector)
d2.addErrback(_catch_timeout)
else:
# make sure user doesnt pass async callback if the connector uses blocking sockets
if async_callback:
raise SweepDeviceError(
"async_callback not applicable for sync operation")
def _get_correction(dut, v_type=None):
if v_type.upper() == "SIGNAL" or v_type.upper() == "NOISE":
v_type = v_type.upper()
else:
raise ValueError
max_buf_size = 16*1024
offset = 0
bin_data = ""
try:
signal_size = dut.correction_size(v_type)
except (IOError, OSError): # this will handle socket.error's
raise ValueError
# We have nothing to transfer
if signal_size == 0:
return None
# check to see if tere is more data than can be transfer in one
# go
if signal_size > max_buf_size:
# if so transfer our max buffer size
transfer_size = max_buf_size
else:
# if not grab only what we need
transfer_size = signal_size
# While we still have data remaining
while offset < signal_size:
# get the data
data_buffer = dut.correction_data(v_type, offset,
transfer_size)
# figure out how many bytes were transfered
transfered = len(data_buffer)
# append the data to the buffer of what we have allready
# got
bin_data = b"".join([bin_data, data_buffer])
# increase the offset
offset = offset + transfered
return bin_data
self.sp_corr_obj = correction_vector()
try:
self.sp_corr_obj.buffer_to_vector(_get_correction(self.real_device, "SIGNAL"))
except ValueError:
self.sp_corr_obj = None
self.nf_corr_obj = correction_vector()
try:
self.nf_corr_obj.buffer_to_vector(_get_correction(self.real_device, "NOISE"))
except ValueError:
self.nf_corr_obj = None
self.async_callback = async_callback
self.continuous = False
# init the sweep id
self._next_sweep_id = 0
# init last finished (technically, it hasn't finished, but for our purposes, it has)
self._last_finished = True
# Private function
def log(self, firstmsg, *msgs):
if self.logtype == 'LOG':
self.logstr += firstmsg.__str__()
for msg in msgs:
self.logstr += ", "
self.logstr += msg.__str__()
self.logstr += "\n"
elif self.logtype == 'PRINT':
sys.stdout.write(firstmsg.__str__())
for msg in msgs:
sys.stdout.write(", ")
sys.stdout.write(msg.__str__())
sys.stdout.write("\n")
def enable_flattening(self, enable=None):
"""
:param enable: enable or disable spectral flattening
:type enable: bool or None
"""
if enable is None:
return self._flattening_enabled
else:
self._flattening_enabled = enable
def set_geolocation_callback(self, func, data = None):
"""
set a callback that will get called whenever the geolocation information
of the device is updated.
The callback function should accept two parameters. The first parameter
will be the callback data that was passed in this function
set_geolocation_callback(func, data, geolocation_dictionary).
The geolocation_dictionary will have the following properties:
- oui
- seconds
- altitude
- longitude
- speedoverground
- secondsfractional
- track
- latitude
- magneticvariation
- heading
See the programmer's guide for usage on each of these properties.
:param func: the function to be called
:param data: the data to be passed to the function
:returns: None
"""
self._geo_callback_func = func
self._geo_callback_data = data
def capture_power_spectrum(self,
fstart,
fstop,
rbw,
device_settings=None,
mode='SH',
continuous=False):
"""
Initiate a data capture from the *real_device* by setting up a sweep list
and starting a single sweep, and then return power spectral density data
along with the **actual** sweep start and stop frequencies set (which
might not be exactly the same as the requested *fstart* and *fstop*).
.. note:: This function does not pipeline, and if the last sweep isn't received before starting a new one, it will generate a failure.
:param int fstart: sweep starting frequency in Hz
:param int fstop: sweep ending frequency in Hz
:param float rbw: the resolution bandwidth (RBW) in Hz of the data to be captured (output RBW may be smaller than requested)
:param device_settings: attenuation and other device settings
:type device_settings: dict
:param str mode: sweep mode, 'ZIF', 'SH', or 'SHN'
:param bool continuous: set sweep to be continuously or not (once only)
:returns: fstart, fstop, power_data
"""
self.log("- capture_power_spectrum", fstart, fstop, rbw, device_settings, mode, continuous)
if continuous and not self.async_callback:
raise SweepDeviceError(
"continuous mode only applies to async operation")
# see if the last sweep has finished
if not self._last_finished:
raise SweepDeviceError(
"previous sweep must have finished before starting a new one")
self._last_finished = False
# increment the sweep id
if self._next_sweep_id < 0x00000000ffffffff:
self._next_sweep_id += 1
else:
self._next_sweep_id = 0
# keep track if this is a continuous sweep
self.continuous = continuous
# plan the sweep
self._sweep_planner = SweepPlanner(self.dev_properties)
self._sweep_settings = self._sweep_planner.plan_sweep(fstart, fstop, rbw, mode, device_settings)
self.log("self._sweep_settings = %s" % self._sweep_settings)
# remember our last sweep for optimization purposes
self._last_sweep = (fstart, fstop, rbw, mode, device_settings, continuous)
# configure the device with the sweep_settings
self.real_device.sweep_clear()
self.real_device.sweep_add(self._sweep_settings)
# configure the iteration
self.real_device.sweep_iterations(1)
# capture the sweep data
return self._perform_full_sweep()
def _perform_full_sweep(self):
# perform the sweep using async socket
if self.async_callback:
# set the async callback
self.real_device.set_async_callback(self._vrt_receive)
# start the sweep sequence
self._start_sweep()
return
# perform sweep using blocking sockets
self._start_sweep()
result = None
while result is None:
result = self._vrt_receive(self.real_device.read())
return result
def _start_sweep(self):
self._vrt_context = {}
# initialize the array we'll use to hold results
self.spectral_data = np.zeros(self._sweep_settings.spectral_points)
# keep track of packets recieved
self.packet_count = 0
self.real_device.sweep_start(self._next_sweep_id)
def _vrt_receive(self, packet):
# context packet just update our context dictionary
if packet.is_context_packet():
# look for any geolocation info
geo = { }
for field in [ 'latitude', 'longitude', 'altitude', 'speedoverground', 'heading', 'track', 'magneticvariation' ]:
if field in packet.fields:
geo[field] = packet.fields[field]
if geo and self._geo_callback_func:
# execute callback
func = self._geo_callback_func
func(self._geo_callback_data, geo)
self._vrt_context.update(packet.fields)
self.log(packet)
return
# check to see if we recieved our sweep ID
if not ('sweepid' in self._vrt_context):
return
# make sure we are receiving packets for the right sweep
if not (self._vrt_context['sweepid'] == self._next_sweep_id):
raise SweepDeviceError("data packets received before start of sweep received! cur = %d, next = %d" % (self._vrt_context['sweepid'], self._next_sweep_id))
# increment the packet count
self.packet_count += 1
self.log("#%d of %d - %s" % (self.packet_count, self._sweep_settings.step_count, packet))
# retrieve the frequency and usable BW of the packet
packet_freq = self._vrt_context['rffreq']
usable_bw = self.dev_properties.USABLE_BW[self._sweep_settings.rfe_mode]
# compute the fft
pow_data = compute_fft(self.real_device, packet, self._vrt_context)
# calc rbw for this packet
rbw = float(self.dev_properties.FULL_BW[self._sweep_settings.rfe_mode]) / len(pow_data)
self.log("rbw = %f, %f" % (rbw, self._sweep_settings.rbw))
if self._flattening_enabled:
# Check if we are above 50 MHz and in SH mode
if packet_freq >= 50e6 and self._sweep_settings.rfe_mode == "SH":
number_of_points = len(pow_data)
# check if we have correction vectors (Noise)
if self.nf_corr_obj is not None:
# if so grab them
nf_cal = \
self.nf_corr_obj.get_correction_vector(packet_freq,
number_of_points)
else:
# if no set it to 0
nf_cal = np.zeros(number_of_points)
# check if we have corrrection vectors (Spectrum)
if self.sp_corr_obj is not None:
# if so grab them
sp_cal = \
self.sp_corr_obj.get_correction_vector(packet_freq,
number_of_points)
else:
# if not set it to 0
sp_cal = np.zeros(number_of_points)
# if the data is spectraly inverted, invert the vectors
if packet.spec_inv:
nf_cal = np.flipud(nf_cal)
sp_cal = np.flipud(sp_cal)
# calculate the correction threshold
correction_thresh = (-135.0 + ((10.0 * packet_freq / 1e6)
/ 27000.0) + 10.0
* np.log10(rbw)
+ self._sweep_settings.attenuation)
# creat the spectrum. per bin, if the ampltitude is above
# correction threshold do pow_data - sp_cal else do pow_data -
# nf_cal
pow_data = np.where(pow_data < correction_thresh,
pow_data - nf_cal, pow_data - sp_cal)
# check if DD mode was used in this sweep
if self.packet_count == 1 and self._sweep_settings.dd_mode:
# copy the data into the result array
self._copy_data(0, self.dev_properties.FULL_BW['DD'], pow_data, self._sweep_settings.bandstart, self._sweep_settings.bandstop, self.spectral_data);
if self._sweep_settings.beyond_dd:
return
else:
return self._emit_data()
# determine the usable bins in this config
self.log("===> compute_usable_bins()", self._sweep_settings.rfe_mode, self._sweep_settings.spp, 1, 0)
usable_bins = compute_usable_bins(self.dev_properties,
self._sweep_settings.rfe_mode,
self._sweep_settings.spp,
1,
0)
self.log("<--- usable_bins", usable_bins)
# adjust the usable range based on spectral inversion
self.log("===> adjust_usable_fstart_fstop()", "self.dev_properties", self._sweep_settings.rfe_mode, len(pow_data) * 2, 1, packet_freq, packet.spec_inv, usable_bins)
usable_bins, packet_start, packet_stop = adjust_usable_fstart_fstop(self.dev_properties,
self._sweep_settings.rfe_mode,
len(pow_data) * 2,
1,
packet_freq,
packet.spec_inv,
usable_bins)
self.log("<--- adjust_usable_fstart_fstop", packet_start, packet_stop, usable_bins)
#
# WARNING: the start and stop returned from this function are HIGHLY sketchy
#
# calculate packet frequency range
#packet_start = packet_freq - (self.dev_properties.FULL_BW[self._sweep_settings.rfe_mode] / 2)
#packet_stop = packet_freq + (self.dev_properties.FULL_BW[self._sweep_settings.rfe_mode] / 2)
#print "packet start/stop", packet_start, packet_stop
#trim the FFT data, note decimation is 1, fshift is 0
self.log("===> trim_to_usable_fstart_fstop()", "pow_data", usable_bins, packet_start, packet_stop)
trimmed_spectrum, edge_data, usable_start, usable_stop = trim_to_usable_fstart_fstop(pow_data,
usable_bins,
packet_start,
packet_stop)
self.log("<--- trim_to_usable_fstart_fstop", usable_start, usable_stop, "trimmed_spectrum", edge_data)
# copy the data
self._copy_data(usable_start, usable_stop, trimmed_spectrum, self._sweep_settings.bandstart, self._sweep_settings.bandstop, self.spectral_data);
# if there's no more packets, emit result
if self.packet_count == self._sweep_settings.step_count:
return self._emit_data()
# all done
return
def _emit_data(self):
# note that we finished this sweep
self._last_finished = True
# if async callback is available, emit the data
if self.async_callback:
self.async_callback(self._sweep_settings.bandstart, self._sweep_settings.bandstop, self.spectral_data)
return
# return the values if using blocking sockets
else:
return (self._sweep_settings.bandstart, self._sweep_settings.bandstop, self.spectral_data)
def _copy_data(self, src_fstart, src_fstop, src_psd, dst_fstart, dst_fstop, dst_psd):
self.log("_copy_data(%d, %d, src_psd, %d, %d, dst_psd)" % (src_fstart, src_fstop, dst_fstart, dst_fstop))
# calc src len and dst len
srclen = len(src_psd)
dstlen = len(dst_psd)
self.log("len -- src = %d, dst = %d" % (srclen, dstlen))
# calc src and dest rbw
srcrbw = float(src_fstop - src_fstart) / srclen
dstrbw = float(dst_fstop - dst_fstart) / dstlen
self.log("rbw = %f, %f, %f" % (srcrbw, dstrbw, self._sweep_settings.rbw))
# check if packet start is before sweep start. shouldn't happen, but check anyway
self.log("boundary(start) = %f / %f" % (src_fstart, dst_fstart))
if src_fstart < dst_fstart:
self.log("foo")
src_start_bin = int(float(dst_fstart - src_fstart) / srcrbw)
else:
self.log("bar")
src_start_bin = 0
# check if packet stop is after sweep stop. this means we don't need the whole packet
self.log("boundary(stop) = %f / %f" % (src_fstop, dst_fstop))
if src_fstop > dst_fstop:
self.log("foo")
src_stop_bin = srclen - int(float(src_fstop - dst_fstop) / srcrbw)
else:
self.log("bar")
src_stop_bin = srclen
# how many values are we copying?
tocopy = src_stop_bin - src_start_bin
# calculate dest start index
if src_fstart < dst_fstart:
dst_start_bin = 0
else:
dst_start_bin = int(round(float(src_fstart - dst_fstart) / dstrbw))
# calculate dest stop index
dst_stop_bin = dst_start_bin + tocopy
if dst_stop_bin > dstlen:
dst_stop_bin = dstlen
# adjust tocopy
tocopy = dst_stop_bin - dst_start_bin
# adjust src stop bin because we adjusted tocopy
src_stop_bin = src_start_bin + tocopy
# copy the data, if there's data that needs copying
if ((dst_stop_bin - dst_start_bin) > 0) and ((src_stop_bin - src_start_bin) > 0):
self.log("dst_psd[%d:%d] = src_psd[%d:%d]" % (dst_start_bin, dst_stop_bin, src_start_bin, src_stop_bin))
dst_psd[dst_start_bin:dst_stop_bin] = src_psd[src_start_bin:src_stop_bin]
| [
"numpy.log10",
"numpy.arange",
"pyrf.util.compute_usable_bins",
"numpy.where",
"numpy.flipud",
"pyrf.util.trim_to_usable_fstart_fstop",
"pyrf.numpy_util.compute_fft",
"numpy.linspace",
"struct.unpack",
"numpy.zeros",
"numpy.interp",
"numpy.frombuffer",
"numpy.dtype",
"twisted.internet.defe... | [((2239, 2255), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (2253, 2255), False, 'from twisted.internet import defer\n'), ((2682, 2700), 'numpy.dtype', 'np.dtype', (['np.int32'], {}), '(np.int32)\n', (2690, 2700), True, 'import numpy as np\n'), ((3283, 3320), 'numpy.arange', 'np.arange', (['(0.0)', 'self.vector_size', '(1.0)'], {}), '(0.0, self.vector_size, 1.0)\n', (3292, 3320), True, 'import numpy as np\n'), ((3357, 3413), 'numpy.linspace', 'np.linspace', (['(0.0)', '(self.vector_size - 1)', 'number_of_points'], {}), '(0.0, self.vector_size - 1, number_of_points)\n', (3368, 3413), True, 'import numpy as np\n'), ((3484, 3509), 'numpy.interp', 'np.interp', (['z', 'x', 'in_array'], {}), '(z, x, in_array)\n', (3493, 3509), True, 'import numpy as np\n'), ((4480, 4516), 'struct.unpack', 'struct.unpack', (['"""!HHHH"""', 'input_buffer'], {}), "('!HHHH', input_buffer)\n", (4493, 4516), False, 'import struct\n'), ((24410, 24456), 'numpy.zeros', 'np.zeros', (['self._sweep_settings.spectral_points'], {}), '(self._sweep_settings.spectral_points)\n', (24418, 24456), True, 'import numpy as np\n'), ((26125, 26181), 'pyrf.numpy_util.compute_fft', 'compute_fft', (['self.real_device', 'packet', 'self._vrt_context'], {}), '(self.real_device, packet, self._vrt_context)\n', (26136, 26181), False, 'from pyrf.numpy_util import compute_fft\n'), ((29019, 29126), 'pyrf.util.compute_usable_bins', 'compute_usable_bins', (['self.dev_properties', 'self._sweep_settings.rfe_mode', 'self._sweep_settings.spp', '(1)', '(0)'], {}), '(self.dev_properties, self._sweep_settings.rfe_mode,\n self._sweep_settings.spp, 1, 0)\n', (29038, 29126), False, 'from pyrf.util import compute_usable_bins, adjust_usable_fstart_fstop, trim_to_usable_fstart_fstop, find_saturation\n'), ((30885, 30962), 'pyrf.util.trim_to_usable_fstart_fstop', 'trim_to_usable_fstart_fstop', (['pow_data', 'usable_bins', 'packet_start', 'packet_stop'], {}), '(pow_data, usable_bins, packet_start, packet_stop)\n', (30912, 30962), False, 'from pyrf.util import compute_usable_bins, adjust_usable_fstart_fstop, trim_to_usable_fstart_fstop, find_saturation\n'), ((4951, 5002), 'struct.unpack', 'struct.unpack', (['"""!LH"""', 'input_buffer[i * 6:i * 6 + 6]'], {}), "('!LH', input_buffer[i * 6:i * 6 + 6])\n", (4964, 5002), False, 'import struct\n'), ((5484, 5550), 'numpy.frombuffer', 'np.frombuffer', (['input_buffer'], {'dtype': 'self.dy', 'count': 'self.vector_size'}), '(input_buffer, dtype=self.dy, count=self.vector_size)\n', (5497, 5550), True, 'import numpy as np\n'), ((5257, 5290), 'struct.unpack', 'struct.unpack', (['""">H"""', 'input_buffer'], {}), "('>H', input_buffer)\n", (5270, 5290), False, 'import struct\n'), ((19649, 19671), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (19665, 19671), False, 'import sys\n'), ((28262, 28338), 'numpy.where', 'np.where', (['(pow_data < correction_thresh)', '(pow_data - nf_cal)', '(pow_data - sp_cal)'], {}), '(pow_data < correction_thresh, pow_data - nf_cal, pow_data - sp_cal)\n', (28270, 28338), True, 'import numpy as np\n'), ((19566, 19588), 'sys.stdout.write', 'sys.stdout.write', (['""", """'], {}), "(', ')\n", (19582, 19588), False, 'import sys\n'), ((27039, 27065), 'numpy.zeros', 'np.zeros', (['number_of_points'], {}), '(number_of_points)\n', (27047, 27065), True, 'import numpy as np\n'), ((27508, 27534), 'numpy.zeros', 'np.zeros', (['number_of_points'], {}), '(number_of_points)\n', (27516, 27534), True, 'import numpy as np\n'), ((27673, 27690), 'numpy.flipud', 'np.flipud', (['nf_cal'], {}), '(nf_cal)\n', (27682, 27690), True, 'import numpy as np\n'), ((27720, 27737), 'numpy.flipud', 'np.flipud', (['sp_cal'], {}), '(sp_cal)\n', (27729, 27737), True, 'import numpy as np\n'), ((27970, 27983), 'numpy.log10', 'np.log10', (['rbw'], {}), '(rbw)\n', (27978, 27983), True, 'import numpy as np\n')] |
import csv
import datetime
import dask.bag as db
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-t', "--tmp", type=str, default="./tmp", help="Path to dir containing the identitiy csv files")
parser.add_argument('-c', "--csv", type=str, default="./preprocess/dataset.csv", help="Path to save the combined dataset csv file")
p = parser.parse_args()
def main():
b = db.read_text(p.tmp + '/*.csv', blocksize=100000) # Read in a bunch of CSV files from the current directory.
records = b.str.strip().str.split(',')
header = records.compute()[0] # Get the first line from the records to retrieve the header.
combined_bag = db.from_sequence(records.compute(), npartitions=1)
# ^ Join the bag into one partition, so the CSV file is not separated.
filtered_bag = combined_bag.filter(lambda r: not r[0].startswith(header[0]))
# ^ Remove the header from each CSV.
date_today = datetime.datetime.now()
outfile = open(p.csv , 'wb')
bagwriter = csv.writer(outfile)
bagwriter.writerow(header)
for line in filtered_bag.compute():
bagwriter.writerow(line)
outfile.close()
if __name__ == '__main__':
main() | [
"datetime.datetime.now",
"csv.writer",
"dask.bag.read_text",
"argparse.ArgumentParser"
] | [((95, 111), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (109, 111), False, 'from argparse import ArgumentParser\n'), ((407, 455), 'dask.bag.read_text', 'db.read_text', (["(p.tmp + '/*.csv')"], {'blocksize': '(100000)'}), "(p.tmp + '/*.csv', blocksize=100000)\n", (419, 455), True, 'import dask.bag as db\n'), ((942, 965), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (963, 965), False, 'import datetime\n'), ((1016, 1035), 'csv.writer', 'csv.writer', (['outfile'], {}), '(outfile)\n', (1026, 1035), False, 'import csv\n')] |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import logging as log
from .item import Node, NodeType
from .xbar import Xbar
def elaborate(xbar: Xbar) -> bool:
"""elaborate reads all nodes and edges then
construct internal FIFOs, Sockets.
"""
# Condition check
if len(xbar.nodes) <= 1 or len(xbar.edges) == 0:
log.error(
"# of Nodes is less than 2 or no Edge exists. Cannot proceed.")
return False
for host in xbar.hosts:
process_node(host, xbar)
log.info("Node Processed: " + repr(xbar))
# Pipeline
process_pipeline(xbar)
# Build address map
# Each socket_1n should have address map
return True
def process_node(node, xbar): # node: Node -> xbar: Xbar -> Xbar
"""process each node based on algorithm
1. If a node has different clock from main clock and not ASYNC_FIFO:
a. (New Node) Create ASYNC_FIFO node.
b. Revise every edges from the node to have start node as ASYNC_FIFO
node. (New Edge) create a edge from the node to ASYNC_FIFO node.
- Repeat the algorithm with ASYNC_FIFO node.
c. Revise every edges to the node to have end node as ASYNC_FIFO
node. (New Edge) create a edge from ASYNC_FIFO node to the node.
d. If it is not DEVICE, HOST node, raise Error. If it is DEVICE, end
(next item).
2. If a node has multiple edges having it as a end node and not SOCKET_M1:
a. (New node) Create SOCKET_M1 node.
b. Revise every edges to the node to have SOCKET_M1 node as end node.
c. (New Edge) create a edge from SOCKET_M1 to the node.
d. Repeat the algorithm with the node.
3. If a node has multiple edges having it as a start node and not SOCKET_1N:
a. (New node) Create SOCKET_1N node.
b. Revise every edges from the node to have SOCKET_1N node as start node.
c. (New Edge) Create a edge from the node to SOCKET_1N node.
d. (for loop) Repeat the algorithm with SOCKET_1N's other side node.
"""
# If a node has different clock from main clock and not ASYNC_FIFO:
if node.node_type != NodeType.ASYNC_FIFO and node.clocks[0] != xbar.clock:
# (New Node) Create ASYNC_FIFO node
new_node = Node(name="asf_" + str(len(xbar.nodes)),
node_type=NodeType.ASYNC_FIFO,
clock=xbar.clock,
reset=xbar.reset)
# if node is HOST, host clock synchronizes into xbar domain
# if node is DEVICE, xbar synchronizes into device clock domain
if node.node_type == NodeType.HOST:
new_node.clocks.insert(0, node.clocks[0])
new_node.resets.insert(0, node.resets[0])
else:
new_node.clocks.append(node.clocks[0])
new_node.resets.append(node.resets[0])
xbar.insert_node(new_node, node)
process_node(new_node, xbar)
# If a node has multiple edges having it as a end node and not SOCKET_M1:
elif node.node_type != NodeType.SOCKET_M1 and len(node.us) > 1:
# (New node) Create SOCKET_M1 node
new_node = Node(name="sm1_" + str(len(xbar.nodes)),
node_type=NodeType.SOCKET_M1,
clock=xbar.clock,
reset=xbar.reset)
# By default, assume connecting to SOCKET_1N upstream and bypass all FIFOs
# If upstream requires pipelining, it will be added through process pipeline
new_node.hdepth = 0
new_node.hpass = 2**len(node.us) - 1
new_node.ddepth = 0
new_node.dpass = 1
xbar.insert_node(new_node, node)
process_node(new_node, xbar)
# If a node has multiple edges having it as a start node and not SOCKET_1N:
elif node.node_type != NodeType.SOCKET_1N and len(node.ds) > 1:
# (New node) Create SOCKET_1N node
new_node = Node(name="s1n_" + str(len(xbar.nodes)),
node_type=NodeType.SOCKET_1N,
clock=xbar.clock,
reset=xbar.reset)
# By default, assume connecting to SOCKET_M1 downstream and bypass all FIFOs
# If upstream requires pipelining, it will be added through process pipeline
new_node.hdepth = 0
new_node.hpass = 1
new_node.ddepth = 0
new_node.dpass = 2**len(node.ds) - 1
xbar.insert_node(new_node, node)
# (for loop) Repeat the algorithm with SOCKET_1N's other side node
for edge in new_node.ds:
process_node(edge.ds, xbar)
return xbar
def process_pipeline(xbar):
"""Check if HOST, DEVICE has settings different from default, then propagate it to end
"""
for host in xbar.hosts:
# go downstream and change the HReqPass/Depth at the first instance.
# If it is async, skip.
# If Socket 1N,
# if pipeline True and bypass false, set hpass to 0
# if pipeline is False, set depth to 0
# If Socket M1, find position of the host and follow procedure above
# If it is device, it means host and device are directly connected. Ignore now.
log.info("Processing pipeline for host {}".format(host.name))
# FIFO present with no passthrough option
# FIFO present with passthrough option
# FIFO not present and full passthrough
full_fifo = False
fifo_passthru = False
full_passthru = True
if host.pipeline is True and host.pipeline_byp is False:
full_fifo = True
elif host.pipeline is True and host.pipeline_byp is True:
fifo_passthru = True
elif host.pipeline is False:
full_passthru = True
dnode = host.ds[0].ds
if dnode.node_type == NodeType.ASYNC_FIFO:
continue
if dnode.node_type == NodeType.SOCKET_1N:
if full_fifo:
dnode.hpass = 0
dnode.hdepth = 2
elif fifo_passthru:
dnode.hpass = 0
dnode.hdepth = 2
elif full_passthru:
dnode.hpass = 1
dnode.hdepth = 0
log.info(
"Finished processing socket1n {}, pass={}, depth={}".format(
dnode.name, dnode.hpass, dnode.hdepth))
elif dnode.node_type == NodeType.SOCKET_M1:
idx = dnode.us.index(host.ds[0])
if full_fifo:
log.info("fifo present no bypass")
dnode.hpass = dnode.hpass & ~(1 << idx)
dnode.hdepth = dnode.hdepth | (2 << idx * 4)
elif fifo_passthru:
log.info("fifo present with bypass")
dnode.hpass = dnode.hpass | (1 << idx)
dnode.hdepth = dnode.hdepth | (2 << idx * 4)
elif full_passthru:
log.info("fifo not present")
dnode.hpass = dnode.hpass | (1 << idx)
dnode.hdepth = dnode.hdepth & ~(0xF << idx * 4)
log.info(
"Finished processing socketm1 {}, pass={}, depth={}".format(
dnode.name, dnode.hpass, dnode.hdepth))
for device in xbar.devices:
# go upstream and set DReq/RspPass at the first instance.
# If it is async, skip
# If Socket M1
# If pipeline True and bypass False, set dpass to 0
# If pipeline False, set depth to 0
# If Socket 1N, find position of the device and follow procedure above
# If it is host, ignore
log.info("Processing pipeline for device {}".format(device.name))
# FIFO present with no passthrough option
# FIFO present with passthrough option
# FIFO not present and full passthrough
full_fifo = False
fifo_passthru = False
full_passthru = True
if device.pipeline is True and device.pipeline_byp is False:
full_fifo = True
elif device.pipeline is True and device.pipeline_byp is True:
fifo_passthru = True
elif device.pipeline is False:
full_passthru = True
unode = device.us[0].us
if unode.node_type == NodeType.ASYNC_FIFO:
continue
if unode.node_type == NodeType.SOCKET_1N:
idx = unode.ds.index(device.us[0])
if full_fifo:
unode.dpass = unode.dpass & ~(1 << idx)
unode.ddepth = unode.ddepth | (2 << idx * 4)
elif fifo_passthru:
unode.dpass = unode.dpass | (1 << idx)
unode.ddepth = unode.ddepth | (2 << idx * 4)
elif full_passthru:
unode.dpass = unode.dpass | (1 << idx)
unode.ddepth = unode.ddepth & ~(0xF << idx * 4)
log.info("Finished processing socket1n {}, pass={:x}, depth={:x}".
format(unode.name, unode.dpass, unode.ddepth))
elif unode.node_type == NodeType.SOCKET_M1:
if full_fifo:
log.info("Fifo present with no passthrough")
unode.dpass = 0
unode.ddepth = 2
elif fifo_passthru:
log.info("Fifo present with passthrough")
unode.dpass = 0
unode.ddepth = 2
elif full_passthru:
log.info("No Fifo")
unode.dpass = 1
unode.ddepth = 0
log.info("Finished processing socketm1 {}, pass={:x}, depth={:x}".
format(unode.name, unode.dpass, unode.ddepth))
return xbar
| [
"logging.error",
"logging.info"
] | [((442, 515), 'logging.error', 'log.error', (['"""# of Nodes is less than 2 or no Edge exists. Cannot proceed."""'], {}), "('# of Nodes is less than 2 or no Edge exists. Cannot proceed.')\n", (451, 515), True, 'import logging as log\n'), ((6560, 6594), 'logging.info', 'log.info', (['"""fifo present no bypass"""'], {}), "('fifo present no bypass')\n", (6568, 6594), True, 'import logging as log\n'), ((9115, 9159), 'logging.info', 'log.info', (['"""Fifo present with no passthrough"""'], {}), "('Fifo present with no passthrough')\n", (9123, 9159), True, 'import logging as log\n'), ((6760, 6796), 'logging.info', 'log.info', (['"""fifo present with bypass"""'], {}), "('fifo present with bypass')\n", (6768, 6796), True, 'import logging as log\n'), ((9273, 9314), 'logging.info', 'log.info', (['"""Fifo present with passthrough"""'], {}), "('Fifo present with passthrough')\n", (9281, 9314), True, 'import logging as log\n'), ((6961, 6989), 'logging.info', 'log.info', (['"""fifo not present"""'], {}), "('fifo not present')\n", (6969, 6989), True, 'import logging as log\n'), ((9428, 9447), 'logging.info', 'log.info', (['"""No Fifo"""'], {}), "('No Fifo')\n", (9436, 9447), True, 'import logging as log\n')] |
from flask import Flask, request
from flask_restful import Resource, Api
from joblib import load
import cv2
import time
import sys
sys.path.append("..")
from common import utils
app = Flask(__name__)
api = Api(app)
class predict(Resource):
w_search_range = 32
h_search_range = 16
scaling_factor = 2
stride = 2
roi_h_len = int( 320 / scaling_factor )
roi_w_len = int( 480 / scaling_factor)
clf = load('od.joblib')
pre_pos = (128, 96)
captured = False
def dynamic_gen_roi_pos(self, image, stride):
if predict.captured == True:
predict.w_search_range -= predict.stride
predict.h_search_range -= int(predict.stride / 2)
else:
predict.w_search_range += predict.stride
predict.h_search_range += int(predict.stride / 2)
if predict.w_search_range < 2 * predict.stride:
predict.w_search_range = 2 * predict.stride
if predict.h_search_range < predict.stride:
predict.h_search_range = predict.stride
return utils.gen_roi_pos(predict.pre_pos, predict.w_search_range, predict.h_search_range, stride)
def get_rect(self, image_stream):
result = {"objects":[]}
print("=======================================")
print("start read {0}".format(time.time()))
img_data = utils.readb64(image_stream)
img_h = int(img_data.shape[0] / self.scaling_factor)
img_w = int(img_data.shape[1] / self.scaling_factor)
print("start resize {0}".format(time.time()))
resized_img = cv2.resize(img_data, (img_w, img_h), interpolation=cv2.INTER_AREA)
print("gen roi {0}".format(time.time()))
locations = self.dynamic_gen_roi_pos(resized_img, self.stride)
print("location len {0}".format(len(locations)))
hog_features = utils.get_hog(resized_img, locations=locations,
winSize=(self.roi_w_len, self.roi_h_len))
hog_feature_list = list(utils.chunks(hog_features, int(len(hog_features) / len(locations))))
predict_results = predict.clf.predict(hog_feature_list)
predict.captured = False
for i in range(0,len(predict_results)):
if predict_results[i] == 0:
u_h = locations[i][1]
d_h = u_h + self.roi_h_len
l_w = locations[i][0]
r_w = l_w + self.roi_w_len
result["objects"].append({
"positions":
{"cross": [[l_w * self.scaling_factor, u_h * self.scaling_factor]
, [r_w * self.scaling_factor, u_h * self.scaling_factor]
, [r_w * self.scaling_factor, d_h * self.scaling_factor]
, [l_w * self.scaling_factor, d_h * self.scaling_factor]]},
"attributes":
{"status": "Normal"}
}
)
predict.pre_pos = (l_w, u_h)
predict.captured = True
break
return result
def post(self):
result = {"results": []}
try:
data = request.json
for pic in data["params"]:
result["results"].append(self.get_rect(pic["data"]))
except Exception as e:
print(e)
return result
api.add_resource(predict,"/")
if __name__ == '__main__':
app.run(host='localhost', port=8888) | [
"common.utils.gen_roi_pos",
"flask_restful.Api",
"flask.Flask",
"time.time",
"common.utils.readb64",
"common.utils.get_hog",
"joblib.load",
"cv2.resize",
"sys.path.append"
] | [((132, 153), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (147, 153), False, 'import sys\n'), ((186, 201), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (191, 201), False, 'from flask import Flask, request\n'), ((208, 216), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (211, 216), False, 'from flask_restful import Resource, Api\n'), ((426, 443), 'joblib.load', 'load', (['"""od.joblib"""'], {}), "('od.joblib')\n", (430, 443), False, 'from joblib import load\n'), ((1055, 1150), 'common.utils.gen_roi_pos', 'utils.gen_roi_pos', (['predict.pre_pos', 'predict.w_search_range', 'predict.h_search_range', 'stride'], {}), '(predict.pre_pos, predict.w_search_range, predict.\n h_search_range, stride)\n', (1072, 1150), False, 'from common import utils\n'), ((1348, 1375), 'common.utils.readb64', 'utils.readb64', (['image_stream'], {}), '(image_stream)\n', (1361, 1375), False, 'from common import utils\n'), ((1574, 1640), 'cv2.resize', 'cv2.resize', (['img_data', '(img_w, img_h)'], {'interpolation': 'cv2.INTER_AREA'}), '(img_data, (img_w, img_h), interpolation=cv2.INTER_AREA)\n', (1584, 1640), False, 'import cv2\n'), ((1842, 1935), 'common.utils.get_hog', 'utils.get_hog', (['resized_img'], {'locations': 'locations', 'winSize': '(self.roi_w_len, self.roi_h_len)'}), '(resized_img, locations=locations, winSize=(self.roi_w_len,\n self.roi_h_len))\n', (1855, 1935), False, 'from common import utils\n'), ((1315, 1326), 'time.time', 'time.time', ([], {}), '()\n', (1324, 1326), False, 'import time\n'), ((1538, 1549), 'time.time', 'time.time', ([], {}), '()\n', (1547, 1549), False, 'import time\n'), ((1676, 1687), 'time.time', 'time.time', ([], {}), '()\n', (1685, 1687), False, 'import time\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[5]:
1 + 1 * 2
# In[4]:
20 // 3 + 20 // 7 ** 2
# In[2]:
import random
4 + random.randint(10, 100)
# In[5]:
import random
4 + random.randint(10, 100)
# In[ ]:
| [
"random.randint"
] | [((128, 151), 'random.randint', 'random.randint', (['(10)', '(100)'], {}), '(10, 100)\n', (142, 151), False, 'import random\n'), ((184, 207), 'random.randint', 'random.randint', (['(10)', '(100)'], {}), '(10, 100)\n', (198, 207), False, 'import random\n')] |
# -*- mode: python; fill-column: 100; comment-column: 100; -*-
import os
import sys
import unittest
sys.path.append(
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
os.path.pardir)))
import base_test
class EcmasScriptTest(base_test.WebDriverBaseTest):
def test_that_ecmascript_returns_document_title(self):
self.driver.get(
self.webserver.where_is("ecmascript/res/ecmascript_test.html"))
result = self.driver.execute_script("return document.title;")
self.assertEquals("ecmascript test", result)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"os.path.dirname"
] | [((620, 635), 'unittest.main', 'unittest.main', ([], {}), '()\n', (633, 635), False, 'import unittest\n'), ((174, 199), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (189, 199), False, 'import os\n')] |
"""
View feed directly from camera
"""
import logging
from functools import partial
import click
from rakali import VideoPlayer
from rakali.annotate import add_frame_labels, colors
from rakali.video import VideoStream
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def decorate_frame(frame, source):
img = add_frame_labels(
frame=frame,
labels=[f"{source}"],
color=colors.get("BHP"),
)
return img
@click.command(context_settings=dict(max_content_width=120))
@click.version_option()
@click.option(
"-s",
"--source",
help="Video source, can be local USB cam (0|1|2..) or IP cam rtsp URL or file",
default="http://axis-lab/axis-cgi/mjpg/video.cgi?&camera=2",
show_default=True,
)
def cli(source):
_decorate = partial(decorate_frame, source=source)
stream = VideoStream(src=source)
player = VideoPlayer(stream=stream, frame_callback=_decorate)
with player:
player.autoplay()
| [
"logging.basicConfig",
"logging.getLogger",
"rakali.annotate.colors.get",
"rakali.VideoPlayer",
"click.option",
"functools.partial",
"click.version_option",
"rakali.video.VideoStream"
] | [((221, 261), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (240, 261), False, 'import logging\n'), ((272, 299), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (289, 299), False, 'import logging\n'), ((534, 556), 'click.version_option', 'click.version_option', ([], {}), '()\n', (554, 556), False, 'import click\n'), ((558, 761), 'click.option', 'click.option', (['"""-s"""', '"""--source"""'], {'help': '"""Video source, can be local USB cam (0|1|2..) or IP cam rtsp URL or file"""', 'default': '"""http://axis-lab/axis-cgi/mjpg/video.cgi?&camera=2"""', 'show_default': '(True)'}), "('-s', '--source', help=\n 'Video source, can be local USB cam (0|1|2..) or IP cam rtsp URL or file',\n default='http://axis-lab/axis-cgi/mjpg/video.cgi?&camera=2',\n show_default=True)\n", (570, 761), False, 'import click\n'), ((805, 843), 'functools.partial', 'partial', (['decorate_frame'], {'source': 'source'}), '(decorate_frame, source=source)\n', (812, 843), False, 'from functools import partial\n'), ((857, 880), 'rakali.video.VideoStream', 'VideoStream', ([], {'src': 'source'}), '(src=source)\n', (868, 880), False, 'from rakali.video import VideoStream\n'), ((894, 946), 'rakali.VideoPlayer', 'VideoPlayer', ([], {'stream': 'stream', 'frame_callback': '_decorate'}), '(stream=stream, frame_callback=_decorate)\n', (905, 946), False, 'from rakali import VideoPlayer\n'), ((430, 447), 'rakali.annotate.colors.get', 'colors.get', (['"""BHP"""'], {}), "('BHP')\n", (440, 447), False, 'from rakali.annotate import add_frame_labels, colors\n')] |
import sys
from itertools import product
import pytest
from pyformlang.regular_expression import PythonRegex
if not sys.platform.startswith("linux"):
pytest.skip("skipping ubuntu-only tests", allow_module_level=True)
else:
from project import (
generate_two_cycles_graph,
rpq,
FABooleanMatricesDok,
FABooleanMatricesCB,
)
@pytest.fixture(params=[FABooleanMatricesDok, FABooleanMatricesCB])
def fabm(request):
return request.param
@pytest.fixture
def graph():
return generate_two_cycles_graph(3, 2, ("x", "y"))
@pytest.fixture
def all_nodes_rpq():
res = set(product(range(4), range(4)))
return res.union({(0, 4), (4, 5), (5, 0)})
def test_all_nodes_s_and_f(graph, fabm, all_nodes_rpq):
# All nodes are start and final
actual_rpq = rpq(graph, PythonRegex("x*|y"), fabm=fabm)
assert actual_rpq == all_nodes_rpq
@pytest.mark.parametrize(
"pattern,start_nodes,final_nodes,expected_rpq",
[
("x*|y", {0}, {1, 2, 3, 4}, {(0, 1), (0, 2), (0, 3), (0, 4)}),
("x*|y", {4}, {4, 5}, {(4, 5)}),
("xx", {0, 1, 2, 3}, {0, 1, 2, 3}, {(0, 2), (1, 3), (2, 0), (3, 1)}),
("y", {0}, {0, 1, 2, 3}, set()),
("y*", {0}, {5, 4}, {(0, 5), (0, 4)}),
],
)
def test_querying(graph, fabm, pattern, start_nodes, final_nodes, expected_rpq):
regex = PythonRegex(pattern)
actual_rpq = rpq(graph, regex, start_nodes, final_nodes, fabm)
assert actual_rpq == expected_rpq
| [
"project.rpq",
"project.generate_two_cycles_graph",
"pyformlang.regular_expression.PythonRegex",
"sys.platform.startswith",
"pytest.fixture",
"pytest.skip"
] | [((371, 437), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[FABooleanMatricesDok, FABooleanMatricesCB]'}), '(params=[FABooleanMatricesDok, FABooleanMatricesCB])\n', (385, 437), False, 'import pytest\n'), ((118, 150), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (141, 150), False, 'import sys\n'), ((156, 222), 'pytest.skip', 'pytest.skip', (['"""skipping ubuntu-only tests"""'], {'allow_module_level': '(True)'}), "('skipping ubuntu-only tests', allow_module_level=True)\n", (167, 222), False, 'import pytest\n'), ((524, 567), 'project.generate_two_cycles_graph', 'generate_two_cycles_graph', (['(3)', '(2)', "('x', 'y')"], {}), "(3, 2, ('x', 'y'))\n", (549, 567), False, 'from project import generate_two_cycles_graph, rpq, FABooleanMatricesDok, FABooleanMatricesCB\n'), ((1357, 1377), 'pyformlang.regular_expression.PythonRegex', 'PythonRegex', (['pattern'], {}), '(pattern)\n', (1368, 1377), False, 'from pyformlang.regular_expression import PythonRegex\n'), ((1395, 1444), 'project.rpq', 'rpq', (['graph', 'regex', 'start_nodes', 'final_nodes', 'fabm'], {}), '(graph, regex, start_nodes, final_nodes, fabm)\n', (1398, 1444), False, 'from project import generate_two_cycles_graph, rpq, FABooleanMatricesDok, FABooleanMatricesCB\n'), ((819, 838), 'pyformlang.regular_expression.PythonRegex', 'PythonRegex', (['"""x*|y"""'], {}), "('x*|y')\n", (830, 838), False, 'from pyformlang.regular_expression import PythonRegex\n')] |
# Copyright (c) 2021 Mira Geoscience Ltd.
#
# This file is part of geoapps.
#
# geoapps is distributed under the terms and conditions of the MIT License
# (see LICENSE file at the root of this source code package).
from typing import Any, Dict, List, Tuple, Union
from uuid import UUID
from ..input_file import InputFile
from ..params import Params
from ..validators import InputValidator
from .constants import default_ui_json, required_parameters, validations
class MVIParams(Params):
_default_ui_json = default_ui_json
def __init__(self, **kwargs):
self.validations: Dict[str, Any] = validations
self.validator: InputValidator = InputValidator(
required_parameters, validations
)
self.associations: Dict[Union[str, UUID], Union[str, UUID]] = None
self.forward_only: bool = None
self.inducing_field_strength: float = None
self.inducing_field_inclination: float = None
self.inducing_field_declination: float = None
self.topography_object: UUID = None
self.topography = None
self.data_object = None
self.tmi_channel = None
self.tmi_uncertainty = None
self.starting_model_object = None
self.starting_inclination_object = None
self.starting_declination_object = None
self.starting_model = None
self.starting_inclination = None
self.starting_declination = None
self.tile_spatial = None
self.receivers_radar_drape = None
self.receivers_offset_x = None
self.receivers_offset_y = None
self.receivers_offset_z = None
self.gps_receivers_offset = None
self.ignore_values = None
self.resolution = None
self.detrend_data = None
self.detrend_order = None
self.detrend_type = None
self.max_chunk_size = None
self.chunk_by_rows = None
self.output_tile_files = None
self.mesh = None
self.mesh_from_params = None
self.core_cell_size_x = None
self.core_cell_size_y = None
self.core_cell_size_z = None
self.octree_levels_topo = None
self.octree_levels_obs = None
self.octree_levels_padding = None
self.depth_core = None
self.max_distance = None
self.padding_distance_x = None
self.padding_distance_y = None
self.padding_distance_z = None
self.window_center_x = None
self.window_center_y = None
self.window_width = None
self.window_height = None
self.inversion_style = None
self.chi_factor = None
self.max_iterations = None
self.max_cg_iterations = None
self.max_global_iterations = None
self.initial_beta = None
self.initial_beta_ratio = None
self.tol_cg = None
self.alpha_s = None
self.alpha_x = None
self.alpha_y = None
self.alpha_z = None
self.smallness_norm = None
self.x_norm = None
self.y_norm = None
self.z_norm = None
self.reference_model_object = None
self.reference_inclination_object = None
self.reference_declination_object = None
self.reference_model = None
self.reference_inclination = None
self.reference_declination = None
self.gradient_type = None
self.lower_bound = None
self.upper_bound = None
self.parallelized = None
self.n_cpu = None
self.max_ram = None
self.inversion_type = None
self.out_group = None
self.no_data_value = None
self._input_file = InputFile()
super().__init__(**kwargs)
def _set_defaults(self) -> None:
""" Wraps Params._set_defaults """
return super()._set_defaults(self.default_ui_json)
def default(self, param) -> Any:
""" Wraps Params.default. """
return super().default(self.default_ui_json, param)
def components(self) -> List[str]:
""" Retrieve component names used to index channel, uncertainty data. """
return [k.split("_")[0] for k in self.active() if "channel" in k]
def uncertainty(self, component: str) -> float:
""" Returns uncertainty for chosen data component. """
return self.__getattribute__("_".join([component, "uncertainty"]))
def channel(self, component: str) -> UUID:
""" Returns channel uuid for chosen data component. """
return self.__getattribute__("_".join([component, "channel"]))
def window(self) -> Dict[str, float]:
""" Returns window dictionary """
win = {
"center_x": self.window_center_x,
"center_y": self.window_center_y,
"width": self.window_width,
"height": self.window_height,
"center": [self.window_center_x, self.window_center_y],
"size": [self.window_width, self.window_height],
}
return win if any([v is not None for v in win.values()]) else None
def offset(self) -> Tuple[List[float], UUID]:
""" Returns offset components as list and drape data. """
offsets = [
self.receivers_offset_x,
self.receivers_offset_y,
self.receivers_offset_z,
]
is_offset = any([(k != 0) for k in offsets])
offsets = offsets if is_offset else None
return offsets, self.receivers_radar_drape
def inducing_field_aid(self) -> List[float]:
""" Returns inducing field components as a list. """
return [
self.inducing_field_strength,
self.inducing_field_inclination,
self.inducing_field_declination,
]
def model_norms(self) -> List[float]:
""" Returns model norm components as a list. """
return [
self.smallness_norm,
self.x_norm,
self.y_norm,
self.z_norm,
]
@property
def inversion_type(self):
return self._inversion_type
@inversion_type.setter
def inversion_type(self, val):
if val is None:
self._inversion_type = val
return
p = "inversion_type"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._inversion_type = val
@property
def forward_only(self):
return self._forward_only
@forward_only.setter
def forward_only(self, val):
if val is None:
self._forward_only = val
return
p = "forward_only"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._forward_only = val
@property
def inducing_field_strength(self):
return self._inducing_field_strength
@inducing_field_strength.setter
def inducing_field_strength(self, val):
if val is None:
self._inducing_field_strength = val
return
p = "inducing_field_strength"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
if val <= 0:
raise ValueError("inducing_field_strength must be greater than 0.")
self._inducing_field_strength = UUID(val) if isinstance(val, str) else val
@property
def inducing_field_inclination(self):
return self._inducing_field_inclination
@inducing_field_inclination.setter
def inducing_field_inclination(self, val):
if val is None:
self._inducing_field_inclination = val
return
p = "inducing_field_inclination"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._inducing_field_inclination = UUID(val) if isinstance(val, str) else val
@property
def inducing_field_declination(self):
return self._inducing_field_declination
@inducing_field_declination.setter
def inducing_field_declination(self, val):
if val is None:
self._inducing_field_declination = val
return
p = "inducing_field_declination"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._inducing_field_declination = UUID(val) if isinstance(val, str) else val
@property
def topography_object(self):
return self._topography_object
@topography_object.setter
def topography_object(self, val):
if val is None:
self._topography_object = val
return
p = "topography_object"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._topography_object = UUID(val) if isinstance(val, str) else val
@property
def topography(self):
return self._topography
@topography.setter
def topography(self, val):
if val is None:
self._topography = val
return
p = "topography"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._topography = UUID(val) if isinstance(val, str) else val
@property
def data_object(self):
return self._data_object
@data_object.setter
def data_object(self, val):
if val is None:
self._data_object = val
return
p = "data_object"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._data_object = UUID(val) if isinstance(val, str) else val
@property
def tmi_channel(self):
return self._tmi_channel
@tmi_channel.setter
def tmi_channel(self, val):
if val is None:
self._tmi_channel = val
return
p = "tmi_channel"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._tmi_channel = UUID(val) if isinstance(val, str) else val
@property
def tmi_uncertainty(self):
return self._tmi_uncertainty
@tmi_uncertainty.setter
def tmi_uncertainty(self, val):
if val is None:
self._tmi_uncertainty = val
return
p = "tmi_uncertainty"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._tmi_uncertainty = UUID(val) if isinstance(val, str) else val
@property
def starting_model_object(self):
return self._starting_model_object
@starting_model_object.setter
def starting_model_object(self, val):
if val is None:
self._starting_model_object = val
return
p = "starting_model_object"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._starting_model_object = UUID(val) if isinstance(val, str) else val
@property
def starting_inclination_object(self):
return self._starting_inclination_object
@starting_inclination_object.setter
def starting_inclination_object(self, val):
if val is None:
self._starting_inclination_object = val
return
p = "starting_inclination_object"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._starting_inclination_object = UUID(val) if isinstance(val, str) else val
@property
def starting_declination_object(self):
return self._starting_declination_object
@starting_declination_object.setter
def starting_declination_object(self, val):
if val is None:
self._starting_declination_object = val
return
p = "starting_declination_object"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._starting_declination_object = UUID(val) if isinstance(val, str) else val
@property
def starting_model(self):
return self._starting_model
@starting_model.setter
def starting_model(self, val):
if val is None:
self._starting_model = val
return
p = "starting_model"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._starting_model = UUID(val) if isinstance(val, str) else val
@property
def starting_inclination(self):
return self._starting_inclination
@starting_inclination.setter
def starting_inclination(self, val):
if val is None:
self._starting_inclination = val
return
p = "starting_inclination"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._starting_inclination = UUID(val) if isinstance(val, str) else val
@property
def starting_declination(self):
return self._starting_declination
@starting_declination.setter
def starting_declination(self, val):
if val is None:
self._starting_declination = val
return
p = "starting_declination"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._starting_declination = UUID(val) if isinstance(val, str) else val
@property
def tile_spatial(self):
return self._tile_spatial
@tile_spatial.setter
def tile_spatial(self, val):
if val is None:
self._tile_spatial = val
return
p = "tile_spatial"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._tile_spatial = UUID(val) if isinstance(val, str) else val
@property
def receivers_radar_drape(self):
return self._receivers_radar_drape
@receivers_radar_drape.setter
def receivers_radar_drape(self, val):
if val is None:
self._receivers_radar_drape = val
return
p = "receivers_radar_drape"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._receivers_radar_drape = UUID(val) if isinstance(val, str) else val
@property
def receivers_offset_x(self):
return self._receivers_offset_x
@receivers_offset_x.setter
def receivers_offset_x(self, val):
if val is None:
self._receivers_offset_x = val
return
p = "receivers_offset_x"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._receivers_offset_x = val
@property
def receivers_offset_y(self):
return self._receivers_offset_y
@receivers_offset_y.setter
def receivers_offset_y(self, val):
if val is None:
self._receivers_offset_y = val
return
p = "receivers_offset_y"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._receivers_offset_y = val
@property
def receivers_offset_z(self):
return self._receivers_offset_z
@receivers_offset_z.setter
def receivers_offset_z(self, val):
if val is None:
self._receivers_offset_z = val
return
p = "receivers_offset_z"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._receivers_offset_z = val
@property
def gps_receivers_offset(self):
return self._gps_receivers_offset
@gps_receivers_offset.setter
def gps_receivers_offset(self, val):
if val is None:
self._gps_receivers_offset = val
return
p = "gps_receivers_offset"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._gps_receivers_offset = UUID(val) if isinstance(val, str) else val
@property
def ignore_values(self):
return self._ignore_values
@ignore_values.setter
def ignore_values(self, val):
if val is None:
self._ignore_values = val
return
p = "ignore_values"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._ignore_values = val
@property
def resolution(self):
return self._resolution
@resolution.setter
def resolution(self, val):
if val is None:
self._resolution = val
return
p = "resolution"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._resolution = val
@property
def detrend_data(self):
return self._detrend_data
@detrend_data.setter
def detrend_data(self, val):
if val is None:
self._detrend_data = val
return
p = "detrend_data"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._detrend_data = val
@property
def detrend_order(self):
return self._detrend_order
@detrend_order.setter
def detrend_order(self, val):
if val is None:
self._detrend_order = val
return
p = "detrend_order"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._detrend_order = val
@property
def detrend_type(self):
return self._detrend_type
@detrend_type.setter
def detrend_type(self, val):
if val is None:
self._detrend_type = val
return
p = "detrend_type"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._detrend_type = val
@property
def max_chunk_size(self):
return self._max_chunk_size
@max_chunk_size.setter
def max_chunk_size(self, val):
if val is None:
self._max_chunk_size = val
return
p = "max_chunk_size"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._max_chunk_size = val
@property
def chunk_by_rows(self):
return self._chunk_by_rows
@chunk_by_rows.setter
def chunk_by_rows(self, val):
if val is None:
self._chunk_by_rows = val
return
p = "chunk_by_rows"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._chunk_by_rows = val
@property
def output_tile_files(self):
return self._output_tile_files
@output_tile_files.setter
def output_tile_files(self, val):
if val is None:
self._output_tile_files = val
return
p = "output_tile_files"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._output_tile_files = val
@property
def mesh(self):
return self._mesh
@mesh.setter
def mesh(self, val):
if val is None:
self._mesh = val
return
p = "mesh"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._mesh = UUID(val) if isinstance(val, str) else val
@property
def mesh_from_params(self):
return self._mesh_from_params
@mesh_from_params.setter
def mesh_from_params(self, val):
if val is None:
self._mesh_from_params = val
return
p = "mesh_from_params"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._mesh_from_params = val
@property
def core_cell_size_x(self):
return self._core_cell_size_x
@core_cell_size_x.setter
def core_cell_size_x(self, val):
if val is None:
self._core_cell_size_x = val
return
p = "core_cell_size_x"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._core_cell_size_x = val
@property
def core_cell_size_y(self):
return self._core_cell_size_y
@core_cell_size_y.setter
def core_cell_size_y(self, val):
if val is None:
self._core_cell_size_y = val
return
p = "core_cell_size_y"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._core_cell_size_y = val
@property
def core_cell_size_z(self):
return self._core_cell_size_z
@core_cell_size_z.setter
def core_cell_size_z(self, val):
if val is None:
self._core_cell_size_z = val
return
p = "core_cell_size_z"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._core_cell_size_z = val
@property
def octree_levels_topo(self):
return self._octree_levels_topo
@octree_levels_topo.setter
def octree_levels_topo(self, val):
if val is None:
self._octree_levels_topo = val
return
p = "octree_levels_topo"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._octree_levels_topo = val
@property
def octree_levels_obs(self):
return self._octree_levels_obs
@octree_levels_obs.setter
def octree_levels_obs(self, val):
if val is None:
self._octree_levels_obs = val
return
p = "octree_levels_obs"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._octree_levels_obs = val
@property
def octree_levels_padding(self):
return self._octree_levels_padding
@octree_levels_padding.setter
def octree_levels_padding(self, val):
if val is None:
self._octree_levels_padding = val
return
p = "octree_levels_padding"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._octree_levels_padding = val
@property
def depth_core(self):
return self._depth_core
@depth_core.setter
def depth_core(self, val):
if val is None:
self._depth_core = val
return
p = "depth_core"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._depth_core = val
@property
def max_distance(self):
return self._max_distance
@max_distance.setter
def max_distance(self, val):
if val is None:
self._max_distance = val
return
p = "max_distance"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._max_distance = val
@property
def padding_distance_x(self):
return self._padding_distance_x
@padding_distance_x.setter
def padding_distance_x(self, val):
if val is None:
self._padding_distance_x = val
return
p = "padding_distance_x"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._padding_distance_x = val
@property
def padding_distance_y(self):
return self._padding_distance_y
@padding_distance_y.setter
def padding_distance_y(self, val):
if val is None:
self._padding_distance_y = val
return
p = "padding_distance_y"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._padding_distance_y = val
@property
def padding_distance_z(self):
return self._padding_distance_z
@padding_distance_z.setter
def padding_distance_z(self, val):
if val is None:
self._padding_distance_z = val
return
p = "padding_distance_z"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._padding_distance_z = val
@property
def window_center_x(self):
return self._window_center_x
@window_center_x.setter
def window_center_x(self, val):
if val is None:
self._window_center_x = val
return
p = "window_center_x"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._window_center_x = val
@property
def window_center_y(self):
return self._window_center_y
@window_center_y.setter
def window_center_y(self, val):
if val is None:
self._window_center_y = val
return
p = "window_center_y"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._window_center_y = val
@property
def window_width(self):
return self._window_width
@window_width.setter
def window_width(self, val):
if val is None:
self._window_width = val
return
p = "window_width"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._window_width = val
@property
def window_height(self):
return self._window_height
@window_height.setter
def window_height(self, val):
if val is None:
self._window_height = val
return
p = "window_height"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._window_height = val
@property
def inversion_style(self):
return self._inversion_style
@inversion_style.setter
def inversion_style(self, val):
if val is None:
self._inversion_style = val
return
p = "inversion_style"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._inversion_style = val
@property
def chi_factor(self):
return self._chi_factor
@chi_factor.setter
def chi_factor(self, val):
if val is None:
self._chi_factor = val
return
p = "chi_factor"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._chi_factor = val
@property
def max_iterations(self):
return self._max_iterations
@max_iterations.setter
def max_iterations(self, val):
if val is None:
self._max_iterations = val
return
p = "max_iterations"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._max_iterations = val
@property
def max_cg_iterations(self):
return self._max_cg_iterations
@max_cg_iterations.setter
def max_cg_iterations(self, val):
if val is None:
self._max_cg_iterations = val
return
p = "max_cg_iterations"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._max_cg_iterations = val
@property
def max_global_iterations(self):
return self._max_global_iterations
@max_global_iterations.setter
def max_global_iterations(self, val):
if val is None:
self._max_global_iterations = val
return
p = "max_global_iterations"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._max_global_iterations = val
@property
def initial_beta(self):
return self._initial_beta
@initial_beta.setter
def initial_beta(self, val):
if val is None:
self._initial_beta = val
return
p = "initial_beta"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._initial_beta = val
@property
def initial_beta_ratio(self):
return self._initial_beta_ratio
@initial_beta_ratio.setter
def initial_beta_ratio(self, val):
if val is None:
self._initial_beta_ratio = val
return
p = "initial_beta_ratio"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._initial_beta_ratio = val
@property
def tol_cg(self):
return self._tol_cg
@tol_cg.setter
def tol_cg(self, val):
if val is None:
self._tol_cg = val
return
p = "tol_cg"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._tol_cg = val
@property
def alpha_s(self):
return self._alpha_s
@alpha_s.setter
def alpha_s(self, val):
if val is None:
self._alpha_s = val
return
p = "alpha_s"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._alpha_s = val
@property
def alpha_x(self):
return self._alpha_x
@alpha_x.setter
def alpha_x(self, val):
if val is None:
self._alpha_x = val
return
p = "alpha_x"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._alpha_x = val
@property
def alpha_y(self):
return self._alpha_y
@alpha_y.setter
def alpha_y(self, val):
if val is None:
self._alpha_y = val
return
p = "alpha_y"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._alpha_y = val
@property
def alpha_z(self):
return self._alpha_z
@alpha_z.setter
def alpha_z(self, val):
if val is None:
self._alpha_z = val
return
p = "alpha_z"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._alpha_z = val
@property
def smallness_norm(self):
return self._smallness_norm
@smallness_norm.setter
def smallness_norm(self, val):
if val is None:
self._smallness_norm = val
return
p = "smallness_norm"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._smallness_norm = val
@property
def x_norm(self):
return self._x_norm
@x_norm.setter
def x_norm(self, val):
if val is None:
self._x_norm = val
return
p = "x_norm"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._x_norm = val
@property
def y_norm(self):
return self._y_norm
@y_norm.setter
def y_norm(self, val):
if val is None:
self._y_norm = val
return
p = "y_norm"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._y_norm = val
@property
def z_norm(self):
return self._z_norm
@z_norm.setter
def z_norm(self, val):
if val is None:
self._z_norm = val
return
p = "z_norm"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._z_norm = val
@property
def reference_model_object(self):
return self._reference_model_object
@reference_model_object.setter
def reference_model_object(self, val):
if val is None:
self._reference_model_object = val
return
p = "reference_model_object"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._reference_model_object = UUID(val) if isinstance(val, str) else val
@property
def reference_inclination_object(self):
return self._reference_inclination_object
@reference_inclination_object.setter
def reference_inclination_object(self, val):
if val is None:
self._reference_inclination_object = val
return
p = "reference_inclination_object"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._reference_inclination_object = UUID(val) if isinstance(val, str) else val
@property
def reference_declination_object(self):
return self._reference_declination_object
@reference_declination_object.setter
def reference_declination_object(self, val):
if val is None:
self._reference_declination_object = val
return
p = "reference_declination_object"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._reference_declination_object = UUID(val) if isinstance(val, str) else val
@property
def reference_model(self):
return self._reference_model
@reference_model.setter
def reference_model(self, val):
if val is None:
self._reference_model = val
return
p = "reference_model"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._reference_model = UUID(val) if isinstance(val, str) else val
@property
def reference_inclination(self):
return self._reference_inclination
@reference_inclination.setter
def reference_inclination(self, val):
if val is None:
self._reference_inclination = val
return
p = "reference_inclination"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._reference_inclination = UUID(val) if isinstance(val, str) else val
@property
def reference_declination(self):
return self._reference_declination
@reference_declination.setter
def reference_declination(self, val):
if val is None:
self._reference_declination = val
return
p = "reference_declination"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._reference_declination = UUID(val) if isinstance(val, str) else val
@property
def gradient_type(self):
return self._gradient_type
@gradient_type.setter
def gradient_type(self, val):
if val is None:
self._gradient_type = val
return
p = "gradient_type"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._gradient_type = val
@property
def lower_bound(self):
return self._lower_bound
@lower_bound.setter
def lower_bound(self, val):
if val is None:
self._lower_bound = val
return
p = "lower_bound"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._lower_bound = UUID(val) if isinstance(val, str) else val
@property
def upper_bound(self):
return self._upper_bound
@upper_bound.setter
def upper_bound(self, val):
if val is None:
self._upper_bound = val
return
p = "upper_bound"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._upper_bound = UUID(val) if isinstance(val, str) else val
@property
def parallelized(self):
return self._parallelized
@parallelized.setter
def parallelized(self, val):
if val is None:
self._parallelized = val
return
p = "parallelized"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._parallelized = val
@property
def n_cpu(self):
return self._n_cpu
@n_cpu.setter
def n_cpu(self, val):
if val is None:
self._n_cpu = val
return
p = "n_cpu"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._n_cpu = val
@property
def max_ram(self):
return self._max_ram
@max_ram.setter
def max_ram(self, val):
if val is None:
self._max_ram = val
return
p = "max_ram"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._max_ram = val
@property
def out_group(self):
return self._out_group
@out_group.setter
def out_group(self, val):
if val is None:
self._out_group = val
return
p = "out_group"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._out_group = val
@property
def no_data_value(self):
return self._no_data_value
@no_data_value.setter
def no_data_value(self, val):
if val is None:
self._no_data_value = val
return
p = "no_data_value"
self.validator.validate(
p, val, self.validations[p], self.workspace, self.associations
)
self._no_data_value = val
def _init_params(self, inputfile: InputFile) -> None:
""" Wraps Params._init_params. """
super()._init_params(inputfile, required_parameters, validations)
| [
"uuid.UUID"
] | [((7308, 7317), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (7312, 7317), False, 'from uuid import UUID\n'), ((7839, 7848), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (7843, 7848), False, 'from uuid import UUID\n'), ((8370, 8379), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (8374, 8379), False, 'from uuid import UUID\n'), ((8838, 8847), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (8842, 8847), False, 'from uuid import UUID\n'), ((9257, 9266), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (9261, 9266), False, 'from uuid import UUID\n'), ((9683, 9692), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (9687, 9692), False, 'from uuid import UUID\n'), ((10109, 10118), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (10113, 10118), False, 'from uuid import UUID\n'), ((10563, 10572), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (10567, 10572), False, 'from uuid import UUID\n'), ((11059, 11068), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (11063, 11068), False, 'from uuid import UUID\n'), ((11597, 11606), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (11601, 11606), False, 'from uuid import UUID\n'), ((12135, 12144), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (12139, 12144), False, 'from uuid import UUID\n'), ((12582, 12591), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (12586, 12591), False, 'from uuid import UUID\n'), ((13071, 13080), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (13075, 13080), False, 'from uuid import UUID\n'), ((13560, 13569), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (13564, 13569), False, 'from uuid import UUID\n'), ((13993, 14002), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (13997, 14002), False, 'from uuid import UUID\n'), ((14489, 14498), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (14493, 14498), False, 'from uuid import UUID\n'), ((16286, 16295), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (16290, 16295), False, 'from uuid import UUID\n'), ((19871, 19880), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (19875, 19880), False, 'from uuid import UUID\n'), ((33262, 33271), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (33266, 33271), False, 'from uuid import UUID\n'), ((33807, 33816), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (33811, 33816), False, 'from uuid import UUID\n'), ((34352, 34361), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (34356, 34361), False, 'from uuid import UUID\n'), ((34806, 34815), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (34810, 34815), False, 'from uuid import UUID\n'), ((35302, 35311), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (35306, 35311), False, 'from uuid import UUID\n'), ((35798, 35807), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (35802, 35807), False, 'from uuid import UUID\n'), ((36625, 36634), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (36629, 36634), False, 'from uuid import UUID\n'), ((37051, 37060), 'uuid.UUID', 'UUID', (['val'], {}), '(val)\n', (37055, 37060), False, 'from uuid import UUID\n')] |
from pfdo_mgz2img.pfdo_mgz2img import Pfdo_mgz2img
def main():
chris_app = Pfdo_mgz2img()
chris_app.launch()
if __name__ == "__main__":
main()
| [
"pfdo_mgz2img.pfdo_mgz2img.Pfdo_mgz2img"
] | [((81, 95), 'pfdo_mgz2img.pfdo_mgz2img.Pfdo_mgz2img', 'Pfdo_mgz2img', ([], {}), '()\n', (93, 95), False, 'from pfdo_mgz2img.pfdo_mgz2img import Pfdo_mgz2img\n')] |
from abc import abstractmethod
from sqlalchemy.orm import Session, aliased, selectinload
from sqlalchemy.orm.exc import NoResultFound
from domainmodel.movie import Movie
from domainmodel.director import Director
from domainmodel.actor import Actor
from domainmodel.genre import Genre
from domainmodel.orm import movie_actor, movie_genre
from domainmodel.review import Review
from domainmodel.user import User
class Repository:
def __init__(self):
self.movies = None
self.actors = None
self.directors = None
self.genres = None
self.users = None
@abstractmethod
def view_movies(self, start, number, director: str = "", actors=None, genres=None):
if genres is None:
genres = []
if actors is None:
actors = []
@abstractmethod
def add_user(self, username, password):
pass
@abstractmethod
def login(self, username, password):
pass
@abstractmethod
def get_user(self, index):
pass
@abstractmethod
def get_movie(self, index):
pass
@abstractmethod
def get_reviews(self, movie_index) -> [(User, Review)]:
pass
@abstractmethod
def add_review(self, user_id, review):
pass
def filter_results(director, actors, genres):
def x(m: Movie):
return (director == "" or m.director == Director(director)) \
and (actors is [] or all(Actor(a) in m.actors for a in actors)) \
and (genres is [] or all(Genre(g) in m.genres for g in genres))
return x
class MemoryRepository(Repository):
def __init__(self, m, a, d, g):
self.movies = m
for i, movie in enumerate(m):
movie.id = i
self.actors = a
self.directors = d
self.genres = g
self.users = []
def view_movies(self, start, number, director=None, actors=None, genres=None):
# JANKY TODO
if director is None:
director = ""
if genres is None:
genres = []
if actors is None:
actors = []
results = list(filter(filter_results(director, actors, genres), self.movies))
return results[start:start + number], start + number < len(results)
def add_user(self, username, password):
uu = User(username, password)
if uu not in self.users:
self.users.append(uu)
return True # success
else:
return False # failure - user exists!
def get_user(self, index):
if index < len(self.users):
return self.users[index]
return None
def get_movie(self, index):
if index < len(self.movies):
return self.movies[index]
return None
def get_reviews(self, movie_index):
if movie_index < len(self.movies):
reviews = []
for user in self.users:
user_reviews = list(filter(lambda u: u.movie_id == movie_index, user.reviews))
if len(user_reviews) > 0:
reviews.append((user, user_reviews))
return reviews
return None
def add_review(self, user_index, review):
if user_index < len(self.users):
self.users[user_index].add_review(review)
return True
return False
def has_user(self, user_id):
return 0 <= user_id < len(self.users)
def login(self, username, password):
users = list(filter(lambda u: u[1].username == username, enumerate(self.users)))
if len(users) == 0:
return None
if users[0][1].verify_password(password):
return users[0][0] # return id of user
class DatabaseRepository(Repository):
def __init__(self, session_factory):
self.session_factory = session_factory
# @property
# def movies(self):
# return Session().query(User)
@property
def directors(self):
l = self.session_factory().query(Director).all()
return l
@property
def actors(self):
return self.session_factory().query(Actor).all()
@property
def genres(self):
return self.session_factory().query(Genre).all()
def view_movies(self, start, number, director=None, actors=None, genres=None):
session = self.session_factory()
query = session.query(Movie)
if director is not None and director != "":
query = query.join(Director).filter(Director.full_name == director)
for actor_name in actors:
a = aliased(Actor)
m = aliased(movie_actor)
query = query.join(m, Movie.id == m.c.movie_id).join(a, m.c.actor_id == a.id)
query = query.filter(a.full_name == actor_name)
for genre_name in genres:
g = aliased(Genre)
mg = aliased(movie_genre)
query = query.join(mg, Movie.id == mg.c.movie_id).join(g, mg.c.genre_id == g.id)
query = query.filter(g.name == genre_name)
count = query.count()
# paginate
query = query.options(selectinload(Movie.actors), selectinload(Movie.director))
results = query.limit(number).offset(start).all()
return results, start + number < count
def add_user(self, username, password):
session = self.session_factory()
user = User(username, password)
# username is converted appropriately when inserted
# into User object.
has_user = session.query(User)\
.filter(User.username == user.username)\
.count() > 0
if has_user:
return False
session = self.session_factory()
session.add(user)
session.commit()
return True
def get_user(self, index):
session = self.session_factory()
try:
return session.query(User).filter(User.id == index).one()
except NoResultFound:
return None
def get_movie(self, index):
session = self.session_factory()
print(Movie.actors)
try:
# selectinload causes an eager load, ie. data is loaded
# as soon as possible
return session.query(Movie) \
.filter(Movie.id == index)\
.options(selectinload(Movie.actors),
selectinload(Movie.genres),
selectinload(Movie.director)
)\
.one()
except NoResultFound:
return None
def get_reviews(self, movie_index):
session = self.session_factory()
reviews = session.query(Review)\
.join(Movie)\
.filter(Movie.id == movie_index)\
.order_by(Review.user_id)\
.all()
user_ids = [review.user_id for review in reviews]
# reviews probably may not be grouped by users.... *shrugs*
users = session.query(User)\
.filter(User.id.in_(user_ids))\
.order_by(User.id)\
.all()
#print(reviews)
#print(users)
import itertools
z = zip(users, itertools.groupby(reviews, lambda x: x.user_id))
l = list(map(lambda pair: (pair[0], list(pair[1][1])), z))
return l
def add_review(self, user_id, review):
session = self.session_factory()
if not self.has_user(user_id):
return False
review.user_id = user_id
session.add(review)
session.commit()
return True
def has_user(self, user_id):
session = self.session_factory()
return session.query(User).filter(User.id == user_id).count() > 0
def login(self, username, password):
session = self.session_factory()
try:
user = session.query(User).filter(User.username == username).one()
if user.verify_password(password):
return user.id # return id of user
return None
except NoResultFound:
return None
| [
"itertools.groupby",
"sqlalchemy.orm.selectinload",
"domainmodel.actor.Actor",
"domainmodel.user.User.id.in_",
"sqlalchemy.orm.aliased",
"domainmodel.user.User",
"domainmodel.director.Director",
"domainmodel.genre.Genre"
] | [((2313, 2337), 'domainmodel.user.User', 'User', (['username', 'password'], {}), '(username, password)\n', (2317, 2337), False, 'from domainmodel.user import User\n'), ((5353, 5377), 'domainmodel.user.User', 'User', (['username', 'password'], {}), '(username, password)\n', (5357, 5377), False, 'from domainmodel.user import User\n'), ((4554, 4568), 'sqlalchemy.orm.aliased', 'aliased', (['Actor'], {}), '(Actor)\n', (4561, 4568), False, 'from sqlalchemy.orm import Session, aliased, selectinload\n'), ((4585, 4605), 'sqlalchemy.orm.aliased', 'aliased', (['movie_actor'], {}), '(movie_actor)\n', (4592, 4605), False, 'from sqlalchemy.orm import Session, aliased, selectinload\n'), ((4807, 4821), 'sqlalchemy.orm.aliased', 'aliased', (['Genre'], {}), '(Genre)\n', (4814, 4821), False, 'from sqlalchemy.orm import Session, aliased, selectinload\n'), ((4839, 4859), 'sqlalchemy.orm.aliased', 'aliased', (['movie_genre'], {}), '(movie_genre)\n', (4846, 4859), False, 'from sqlalchemy.orm import Session, aliased, selectinload\n'), ((5088, 5114), 'sqlalchemy.orm.selectinload', 'selectinload', (['Movie.actors'], {}), '(Movie.actors)\n', (5100, 5114), False, 'from sqlalchemy.orm import Session, aliased, selectinload\n'), ((5116, 5144), 'sqlalchemy.orm.selectinload', 'selectinload', (['Movie.director'], {}), '(Movie.director)\n', (5128, 5144), False, 'from sqlalchemy.orm import Session, aliased, selectinload\n'), ((7156, 7203), 'itertools.groupby', 'itertools.groupby', (['reviews', '(lambda x: x.user_id)'], {}), '(reviews, lambda x: x.user_id)\n', (7173, 7203), False, 'import itertools\n'), ((1375, 1393), 'domainmodel.director.Director', 'Director', (['director'], {}), '(director)\n', (1383, 1393), False, 'from domainmodel.director import Director\n'), ((6281, 6307), 'sqlalchemy.orm.selectinload', 'selectinload', (['Movie.actors'], {}), '(Movie.actors)\n', (6293, 6307), False, 'from sqlalchemy.orm import Session, aliased, selectinload\n'), ((6334, 6360), 'sqlalchemy.orm.selectinload', 'selectinload', (['Movie.genres'], {}), '(Movie.genres)\n', (6346, 6360), False, 'from sqlalchemy.orm import Session, aliased, selectinload\n'), ((6387, 6415), 'sqlalchemy.orm.selectinload', 'selectinload', (['Movie.director'], {}), '(Movie.director)\n', (6399, 6415), False, 'from sqlalchemy.orm import Session, aliased, selectinload\n'), ((1437, 1445), 'domainmodel.actor.Actor', 'Actor', (['a'], {}), '(a)\n', (1442, 1445), False, 'from domainmodel.actor import Actor\n'), ((1518, 1526), 'domainmodel.genre.Genre', 'Genre', (['g'], {}), '(g)\n', (1523, 1526), False, 'from domainmodel.genre import Genre\n'), ((6977, 6998), 'domainmodel.user.User.id.in_', 'User.id.in_', (['user_ids'], {}), '(user_ids)\n', (6988, 6998), False, 'from domainmodel.user import User\n')] |
import socket
from flask import Flask, Response
from PIL import Image, ImageDraw
import threading
from collections import deque
import struct
import io
HOST = '0.0.0.0'
PORT = 54321
image_bytes_length = 640*480*3
bbox_bytes_length = 5*8
# The socket client sends one bounding box and score.
data_bytes_length = image_bytes_length + bbox_bytes_length
app = Flask(__name__)
# The buffer that holds the most recent JPEG frame.
stream_buffer = deque(maxlen=1)
# global flag
should_capture = False
# The buffer that holds the most recent captured JPEG frame, which is either the first frame after the should_capture flag is set, or the latest frame showing a missing tooth.
capture_buffer = deque(maxlen=1)
# This functions returns an additional flag indicating whether a missing tooth bounding box has been drawn.
def to_jpeg(image_bytes, bbox_bytes):
# Unpack the bytes to a list of floats.
f = []
for i in range(5):
# Each float was encoded into 8 bytes.
float_bytes = bbox_bytes[8*i:8*(i+1)]
float_value, = struct.unpack('!d', float_bytes)
f.append(float_value)
# This buffer holds the JPEG image which will be a single frame of the streaming video.
bytes_buffer = io.BytesIO()
image = Image.frombytes('RGB', (640, 480), image_bytes, 'raw', 'RGB')
# Draw a box showing the part of the image that was sent to the model, with corner coordinates (0, 0) and (224, 224).
x1, y1, x2, y2 = (0.0, 0.0, 224.0, 224.0)
# These offsets invert the cropping in recognize.py:image_bytes_to_image.
x1 += 258
x2 += 258
y1 += 148
y2 += 148
draw = ImageDraw.Draw(image)
draw.line(xy=[(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], fill=128, width=5)
del draw
# Draw an additional bounding box if a missing tooth was detected.
x1, y1, x2, y2, score = f
bbox_drawn = False
if score > 0.5:
bbox_drawn = True
# The coordinates from the DetectionEngine were normalized. Transform to the pixel scale before drawing.
x1 *= 224
x2 *= 224
y1 *= 224
y2 *= 224
# Place the cropped (224, 224) image back in the (640, 480) image at the corret position.
x1 += 258
x2 += 258
y1 += 148
y2 += 148
draw = ImageDraw.Draw(image)
draw.line(xy=[(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], fill=128, width=5)
# Write image to the buffer and return the JPEG bytes.
image.save(bytes_buffer, format='JPEG')
frame = bytes_buffer.getvalue()
return frame, bbox_drawn
def server_worker(host, port, stream_buffer, capture_buffer):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((host, port))
s.listen()
print('Waiting for connection.')
conn, addr = s.accept()
with conn:
print('Client: {}'.format(addr))
while True:
try:
# image bytes and bounding box score bytes
data = conn.recv(data_bytes_length)
if data and len(data) == data_bytes_length:
image_bytes = data[:image_bytes_length]
bbox_bytes = data[image_bytes_length:]
frame, bbox_drawn = to_jpeg(image_bytes, bbox_bytes)
stream_buffer.append(frame)
# update the frame in capture_buffer if:
# (a) should_capture is True and capture_buffer is empty; or
# (b) should_capture is True and bbox_drawn is True
should_update = should_capture and (bbox_drawn or not capture_buffer)
if should_update:
capture_buffer.append(frame)
except Exception as e:
print(repr(e))
break
def make_generator(buffer_):
while True:
if buffer_:
# peek instead of pop, since the buffer may not always be updated
frame = buffer_[-1]
else:
continue
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video')
def video():
generator = make_generator(stream_buffer)
return Response(generator,
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/capture')
def capture():
generator = make_generator(capture_buffer)
return Response(generator,
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/start_capture')
def start_capture():
# We're clearing the capture buffer here because the logic while capturing
# is to only grab a buffer frame if the buffer is empty or missing teeth
# is detected, and we want to be sure to grab a fresh still each time even
# when no teeth are detected
global capture_buffer
capture_buffer.clear()
global should_capture
should_capture = True
return 'OK', 200
@app.route('/stop_capture')
def stop_capture():
global should_capture
should_capture = False
return 'OK', 200
@app.route('/')
def index():
return 'OK', 200
if __name__ == '__main__':
thread = threading.Thread(target=server_worker, args=(HOST, PORT, stream_buffer, capture_buffer))
thread.start()
app.run(host='0.0.0.0', debug=False)
thread.join()
| [
"collections.deque",
"socket.socket",
"flask.Flask",
"io.BytesIO",
"PIL.ImageDraw.Draw",
"struct.unpack",
"flask.Response",
"PIL.Image.frombytes",
"threading.Thread"
] | [((360, 375), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (365, 375), False, 'from flask import Flask, Response\n'), ((445, 460), 'collections.deque', 'deque', ([], {'maxlen': '(1)'}), '(maxlen=1)\n', (450, 460), False, 'from collections import deque\n'), ((693, 708), 'collections.deque', 'deque', ([], {'maxlen': '(1)'}), '(maxlen=1)\n', (698, 708), False, 'from collections import deque\n'), ((1226, 1238), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1236, 1238), False, 'import io\n'), ((1252, 1313), 'PIL.Image.frombytes', 'Image.frombytes', (['"""RGB"""', '(640, 480)', 'image_bytes', '"""raw"""', '"""RGB"""'], {}), "('RGB', (640, 480), image_bytes, 'raw', 'RGB')\n", (1267, 1313), False, 'from PIL import Image, ImageDraw\n'), ((1630, 1651), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (1644, 1651), False, 'from PIL import Image, ImageDraw\n'), ((4312, 4385), 'flask.Response', 'Response', (['generator'], {'mimetype': '"""multipart/x-mixed-replace; boundary=frame"""'}), "(generator, mimetype='multipart/x-mixed-replace; boundary=frame')\n", (4320, 4385), False, 'from flask import Flask, Response\n'), ((4504, 4577), 'flask.Response', 'Response', (['generator'], {'mimetype': '"""multipart/x-mixed-replace; boundary=frame"""'}), "(generator, mimetype='multipart/x-mixed-replace; boundary=frame')\n", (4512, 4577), False, 'from flask import Flask, Response\n'), ((5264, 5356), 'threading.Thread', 'threading.Thread', ([], {'target': 'server_worker', 'args': '(HOST, PORT, stream_buffer, capture_buffer)'}), '(target=server_worker, args=(HOST, PORT, stream_buffer,\n capture_buffer))\n', (5280, 5356), False, 'import threading\n'), ((1051, 1083), 'struct.unpack', 'struct.unpack', (['"""!d"""', 'float_bytes'], {}), "('!d', float_bytes)\n", (1064, 1083), False, 'import struct\n'), ((2297, 2318), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (2311, 2318), False, 'from PIL import Image, ImageDraw\n'), ((2654, 2703), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (2667, 2703), False, 'import socket\n')] |
###########################################################################
# PROJECT: IPC (Interprocess Communication) Package
#
# (c) Copyright 2011 <NAME>. All rights reserved.
#
# FILE: module2.py
#
# ABSTRACT: Test program for Python version of IPC.
# Publishes: MSG2
# Subscribes to: MSG1, QUERY1
# Responds with: RESPONSE1
# Behavior: Listens for MSG1 and prints out message data.
# When QUERY1 is received, publishes MSG1 and
# responds to the query with RESPONSE1.
# Exits when 'q' is typed at terminal.
# Should be run in conjunction with module1
#
# $Revision: 2.2 $
# $Date: 2013/07/24 20:01:01 $
# $Author: reids $
#
# Copyright (c) 2008, Carnegie Mellon University
# This software is distributed under the terms of the
# Simplified BSD License (see ipc/LICENSE.TXT)
#
# REVISION HISTORY
#
# $Log: module2.py,v $
# Revision 2.2 2013/07/24 20:01:01 reids
# Updating lisp, java, python test programs to adhere to updated API
#
# Revision 2.1 2011/08/16 16:00:09 reids
# Adding Python test programs
#
################################################################/
import sys
from primFmttrs import *
import IPC
from module import *
def msg1Handler (msgRef, callData, clientData) :
print("msg1Handler: Receiving %s (%d) [%s] " % \
(IPC.IPC_msgInstanceName(msgRef), callData, clientData))
def queryHandler (msgRef, t1, clientData) :
print("queryHandler: Receiving %s [%s]", \
(IPC.IPC_msgInstanceName(msgRef), clientData))
IPC.IPC_printData(IPC.IPC_msgInstanceFormatter(msgRef), sys.stdout, t1)
# Publish this message -- all subscribers get it
str1 = "Hello, world"
print('\n IPC.IPC_publishData(%s, "%s")' % (MSG2, str1))
IPC.IPC_publishData(MSG2, str1)
t2 = T2()
t2.str1 = str1
# Variable length array of one element
t2.t1 = [T1()]
t2.t1[0] = t1
t2.count = 1
t2.status = ReceiveVal
# Respond with this message -- only the query handler gets it
print("\n IPC.IPC_respondData(%s, %s, %s)" % (msgRef, RESPONSE1, t2))
IPC.IPC_respondData(msgRef, RESPONSE1, t2)
done = False
def stdinHnd (fd, clientData) :
global done
input = sys.stdin.readline()
if (input[0] == 'q' or input[0] == 'Q') :
IPC.IPC_disconnect()
done = True
else :
print("stdinHnd [%s]: Received %s" % (clientData, input))
def main () :
global done
done = False
# Connect to the central server
print("\nIPC.IPC_connect(%s)" % MODULE2_NAME)
IPC.IPC_connect(MODULE2_NAME)
# Define the messages that this module publishes
print("\nIPC.IPC_defineMsg(%s, IPC_VARIABLE_LENGTH, %s)" % \
(MSG2, MSG2_FORMAT))
IPC.IPC_defineMsg(MSG2, IPC.IPC_VARIABLE_LENGTH, MSG2_FORMAT)
print("\nIPC.IPC_defineMsg(%s, IPC_VARIABLE_LENGTH, %s)" % \
(RESPONSE1, RESPONSE1_FORMAT))
IPC.IPC_defineMsg(RESPONSE1, IPC.IPC_VARIABLE_LENGTH, RESPONSE1_FORMAT)
IPC.IPC_msgClass(RESPONSE1, T2)
# Subscribe to the messages that this module listens to
print("\nIPC.IPC_subscribeData(%s,%s, %s)" % \
(MSG1, msg1Handler.__name__, MODULE2_NAME))
IPC.IPC_subscribeData(MSG1, msg1Handler, MODULE2_NAME)
print("\nIPC.IPC_subscribeData(%s, %s, %s, %s)" % \
(QUERY1 , queryHandler.__name__, MODULE2_NAME, T1.__name__))
IPC.IPC_subscribeData(QUERY1, queryHandler, MODULE2_NAME)
# Subscribe a handler for tty input. Typing "q" will quit the program.
print("\nIPC_subscribeFD(%d, stdinHnd, %s)" % \
(sys.stdin.fileno(), MODULE2_NAME))
IPC.IPC_subscribeFD(sys.stdin.fileno(), stdinHnd, MODULE2_NAME)
print("\nType 'q' to quit")
while (not done) : IPC.IPC_listen(250)
IPC.IPC_disconnect()
if __name__ == "__main__": main()
| [
"IPC.IPC_defineMsg",
"IPC.IPC_connect",
"sys.stdin.fileno",
"IPC.IPC_msgInstanceName",
"IPC.IPC_listen",
"IPC.IPC_msgInstanceFormatter",
"IPC.IPC_subscribeData",
"IPC.IPC_publishData",
"sys.stdin.readline",
"IPC.IPC_disconnect",
"IPC.IPC_msgClass",
"IPC.IPC_respondData"
] | [((1790, 1821), 'IPC.IPC_publishData', 'IPC.IPC_publishData', (['MSG2', 'str1'], {}), '(MSG2, str1)\n', (1809, 1821), False, 'import IPC\n'), ((2109, 2151), 'IPC.IPC_respondData', 'IPC.IPC_respondData', (['msgRef', 'RESPONSE1', 't2'], {}), '(msgRef, RESPONSE1, t2)\n', (2128, 2151), False, 'import IPC\n'), ((2223, 2243), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (2241, 2243), False, 'import sys\n'), ((2529, 2558), 'IPC.IPC_connect', 'IPC.IPC_connect', (['MODULE2_NAME'], {}), '(MODULE2_NAME)\n', (2544, 2558), False, 'import IPC\n'), ((2705, 2766), 'IPC.IPC_defineMsg', 'IPC.IPC_defineMsg', (['MSG2', 'IPC.IPC_VARIABLE_LENGTH', 'MSG2_FORMAT'], {}), '(MSG2, IPC.IPC_VARIABLE_LENGTH, MSG2_FORMAT)\n', (2722, 2766), False, 'import IPC\n'), ((2872, 2943), 'IPC.IPC_defineMsg', 'IPC.IPC_defineMsg', (['RESPONSE1', 'IPC.IPC_VARIABLE_LENGTH', 'RESPONSE1_FORMAT'], {}), '(RESPONSE1, IPC.IPC_VARIABLE_LENGTH, RESPONSE1_FORMAT)\n', (2889, 2943), False, 'import IPC\n'), ((2946, 2977), 'IPC.IPC_msgClass', 'IPC.IPC_msgClass', (['RESPONSE1', 'T2'], {}), '(RESPONSE1, T2)\n', (2962, 2977), False, 'import IPC\n'), ((3140, 3194), 'IPC.IPC_subscribeData', 'IPC.IPC_subscribeData', (['MSG1', 'msg1Handler', 'MODULE2_NAME'], {}), '(MSG1, msg1Handler, MODULE2_NAME)\n', (3161, 3194), False, 'import IPC\n'), ((3321, 3378), 'IPC.IPC_subscribeData', 'IPC.IPC_subscribeData', (['QUERY1', 'queryHandler', 'MODULE2_NAME'], {}), '(QUERY1, queryHandler, MODULE2_NAME)\n', (3342, 3378), False, 'import IPC\n'), ((3688, 3708), 'IPC.IPC_disconnect', 'IPC.IPC_disconnect', ([], {}), '()\n', (3706, 3708), False, 'import IPC\n'), ((1598, 1634), 'IPC.IPC_msgInstanceFormatter', 'IPC.IPC_msgInstanceFormatter', (['msgRef'], {}), '(msgRef)\n', (1626, 1634), False, 'import IPC\n'), ((2293, 2313), 'IPC.IPC_disconnect', 'IPC.IPC_disconnect', ([], {}), '()\n', (2311, 2313), False, 'import IPC\n'), ((3569, 3587), 'sys.stdin.fileno', 'sys.stdin.fileno', ([], {}), '()\n', (3585, 3587), False, 'import sys\n'), ((3665, 3684), 'IPC.IPC_listen', 'IPC.IPC_listen', (['(250)'], {}), '(250)\n', (3679, 3684), False, 'import IPC\n'), ((1532, 1563), 'IPC.IPC_msgInstanceName', 'IPC.IPC_msgInstanceName', (['msgRef'], {}), '(msgRef)\n', (1555, 1563), False, 'import IPC\n'), ((1383, 1414), 'IPC.IPC_msgInstanceName', 'IPC.IPC_msgInstanceName', (['msgRef'], {}), '(msgRef)\n', (1406, 1414), False, 'import IPC\n'), ((3512, 3530), 'sys.stdin.fileno', 'sys.stdin.fileno', ([], {}), '()\n', (3528, 3530), False, 'import sys\n')] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.profiles import supported_api_version
from azure.cli.core.commands import create_command, command_table
from ._validators import validate_client_parameters
def cli_storage_data_plane_command(name, operation, client_factory, transform=None, table_transformer=None,
exception_handler=None, resource_type=None, max_api=None, min_api=None):
""" Registers an Azure CLI Storage Data Plane command. These commands always include the
four parameters which can be used to obtain a storage client: account-name, account-key,
connection-string, and sas-token. """
if resource_type and (max_api or min_api):
if not supported_api_version(resource_type, min_api=min_api, max_api=max_api):
return
command = create_command(__name__, name, operation, transform, table_transformer,
client_factory, exception_handler=exception_handler)
# add parameters required to create a storage client
group_name = 'Storage Account'
command.add_argument('account_name', '--account-name', required=False, default=None,
arg_group=group_name,
help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used '
'in conjunction with either storage account key or a SAS token. If neither are present, '
'the command will try to query the storage account key using the authenticated Azure '
'account. If a large number of storage commands are executed the API quota may be hit')
command.add_argument('account_key', '--account-key', required=False, default=None,
arg_group=group_name,
help='Storage account key. Must be used in conjunction with storage '
'account name. Environment variable: '
'AZURE_STORAGE_KEY')
command.add_argument('connection_string', '--connection-string', required=False, default=None,
validator=validate_client_parameters, arg_group=group_name,
help='Storage account connection string. Environment variable: '
'AZURE_STORAGE_CONNECTION_STRING')
command.add_argument('sas_token', '--sas-token', required=False, default=None,
arg_group=group_name,
help='A Shared Access Signature (SAS). Must be used in conjunction with '
'storage account name. Environment variable: '
'AZURE_STORAGE_SAS_TOKEN')
command_table[command.name] = command
| [
"azure.cli.core.commands.create_command",
"azure.cli.core.profiles.supported_api_version"
] | [((1137, 1265), 'azure.cli.core.commands.create_command', 'create_command', (['__name__', 'name', 'operation', 'transform', 'table_transformer', 'client_factory'], {'exception_handler': 'exception_handler'}), '(__name__, name, operation, transform, table_transformer,\n client_factory, exception_handler=exception_handler)\n', (1151, 1265), False, 'from azure.cli.core.commands import create_command, command_table\n'), ((1031, 1101), 'azure.cli.core.profiles.supported_api_version', 'supported_api_version', (['resource_type'], {'min_api': 'min_api', 'max_api': 'max_api'}), '(resource_type, min_api=min_api, max_api=max_api)\n', (1052, 1101), False, 'from azure.cli.core.profiles import supported_api_version\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import numpy as np
import random
import sys
from geopandas import GeoDataFrame
from io import StringIO
from smoomapy import (
quick_stewart, quick_idw, SmoothIdw, SmoothStewart,
head_tail_breaks, maximal_breaks, get_opt_nb_class)
from smoomapy.helpers_classif import _chain
class TestSmoothIdw(unittest.TestCase):
def setUp(self):
pass
def test_one_shot_idw(self):
# Exports correctly to `bytes`:
res = quick_idw(
"misc/nuts3_data.geojson", "pop2008",
power=1, resolution=80000, nb_class=8,
disc_func='jenks', mask="misc/nuts3_data.geojson")
self.assertIsInstance(res, bytes)
# Exports correctly to `GeoDataFrame`
# and respects the choosen number of class:
res = quick_idw(
"misc/nuts3_data.geojson", "pop2008",
power=1, nb_pts=8000,
nb_class=8, disc_func="jenks",
mask="misc/nuts3_data.geojson",
output="GeoDataFrame")
self.assertIsInstance(res, GeoDataFrame)
self.assertEqual(len(res), 8)
def test_object_idw(self):
# Test the OO approach for building smoothed map with stewart potentials
idw = SmoothIdw("misc/nuts3_data.geojson", "pop2008",
power=2,
resolution=90000,
mask="misc/nuts3_data.geojson")
# Test using percentiles :
result = idw.render(nb_class=10,
disc_func="percentiles",
output="geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 10)
# Test using somes already choosed break values :
my_breaks = [0, 250000, 375000, 500000, 870000, 1850000, 4250000]
result = idw.render(
nb_class=48, # bogus values as `nb_class` and
disc_func="foobar", # ... disc_func should be overrided
user_defined_breaks=my_breaks, # ... by the `user_defined_breaks` params
output="geodataframe") # ... and this is what we are testing here
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), len(my_breaks) - 1)
# Assert these break values were actually used :
for wanted_break, obtained_break in zip(my_breaks[1:-1], result["max"][:-1]):
self.assertAlmostEqual(wanted_break, obtained_break)
# Test again using another discretization method : "head tail breaks"
# (should define automatically the number of class)
result = idw.render(nb_class=None,
disc_func="head_tail",
output="geodataframe")
self.assertIsInstance(result, GeoDataFrame)
# Test that the object has a nice representation :
a = str(idw)
b = repr(idw)
self.assertEqual(a, b)
self.assertIn("SmoothIdw - variable :", a)
self.assertIn("{} features".format(len(idw.gdf)), a)
if sys.version_info >= (3, 0):
sys.stdout = StringIO()
idw.properties
printed = sys.stdout.getvalue()
sys.stdout = sys.__stdout__
self.assertIn("SmoothIdw - variable :", printed)
# def test_object_idw_two_var(self):
# # Test the OO approach with two variables :
# idw = SmoothIdw("misc/nuts3_data.geojson", "gdppps2008",
# power=0.7, resolution=80000,
# variable_name2="pop2008",
# mask="misc/nuts3_data.geojson")
# result = idw.render(8, "equal_interval", output="Geodataframe")
# self.assertIsInstance(result, GeoDataFrame)
# self.assertEqual(len(result), 8)
def test_distance_not_geo(self):
# First whith one variable :
idw = SmoothIdw("misc/nuts3_data.geojson",
"gdppps2008",
nb_pts=7200,
power=3,
mask="misc/nuts3_data.geojson",
distGeo=False)
result = idw.render(8, "jenks", output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 8)
# # Then with two variables and a custom projection to use :
# idw = SmoothIdw("misc/nuts3_data.geojson",
# "gdppps2008",
# power=1.5,
# variable_name2="pop2008",
# mask="misc/nuts3_data.geojson",
# distGeo=False,
# projDistance={"init": "epsg:3035"})
# result = idw.render(8, "equal_interval", output="Geodataframe")
# self.assertIsInstance(result, GeoDataFrame)
# self.assertEqual(len(result), 8)
# self.assertEqual(result.crs, {'init': 'epsg:3035'})
def test_from_gdf_with_new_mask(self):
gdf = GeoDataFrame.from_file("misc/nuts3_data.geojson")
idw = SmoothIdw(gdf, "gdppps2008", power=1, nb_pts=2800, mask=None)
result = idw.render(6, "percentiles", output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 6)
# Finally, use a mask (from a file) :
result = idw.render(5, "percentiles",
output="Geodataframe",
new_mask="misc/nuts3_data.geojson")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(idw.use_mask, True)
self.assertEqual(len(result), 5)
# Or from a GeoDataFrame :
result = idw.render(6, "percentiles",
output="Geodataframe",
new_mask=gdf)
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(idw.use_mask, True)
self.assertEqual(len(result), 6)
# # Nope, no mask :
# result = idw.render(5, "percentiles",
# output="Geodataframe",
# new_mask=None)
# self.assertIsInstance(result, GeoDataFrame)
# self.assertEqual(idw.use_mask, False)
# self.assertEqual(len(result), 5)
# Test that it skips the mask parameter if the layer provided as a mask
# is not a Polygon/MultiPolygon layer :
gdf_mask = gdf[1:50].copy()
gdf_mask.geometry = gdf_mask.geometry.centroid
result = idw.render(5, "percentiles",
output="Geodataframe",
new_mask=gdf_mask)
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(idw.use_mask, False)
self.assertEqual(len(result), 5)
def test_input_with_missing_values(self):
gdf = GeoDataFrame.from_file("misc/nuts3_data.geojson")
gdf.loc[12:18, "gdppps2008"] = np.NaN
idw = SmoothIdw(gdf, "gdppps2008", power=1, nb_pts=2600, mask=gdf)
result = idw.render(9, "jenks", output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 9)
gdf2 = GeoDataFrame.from_file('misc/nuts3_data.geojson').to_crs({"init": "epsg:3035"})
gdf2.loc[:, 'gdppps2008'] = gdf2['gdppps2008'].astype(object)
gdf2.loc[15:20, 'gdppps2008'] = ""
gdf2.loc[75:78, 'gdppps2008'] = ""
idw = SmoothIdw(gdf2, 'gdppps2008', power=1, nb_pts=1200, mask=gdf2)
result = idw.render(9, 'jenks', output="GeoDataFrame")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 9)
def test_wrong_dtype_missing_values(self):
gdf = GeoDataFrame.from_file("misc/nuts3_data.geojson")
gdf.loc[12:18, "gdppps2008"] = np.NaN
gdf.loc[25:35, "pop2008"] = np.NaN
gdf.loc[0:len(gdf)-1, "pop2008"] = gdf["pop2008"].astype(str)
idw = SmoothIdw(gdf, "gdppps2008", power=1, nb_pts=2600,
mask="misc/nuts3_data.geojson")
result = idw.render(9, "jenks", output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 9)
# idw = SmoothIdw(gdf, "gdppps2008", variable_name2="pop2008",
# power=1, nb_pts=1200, mask="misc/nuts3_data.geojson")
# result = idw.render(9, "equal_interval", output="Geodataframe")
# self.assertIsInstance(result, GeoDataFrame)
# self.assertEqual(len(result), 9)
def test_from_point_layer_and_maximal_breaks(self):
gdf = GeoDataFrame.from_file("misc/nuts3_data.geojson").to_crs({"init": "epsg:4326"})
# Convert the input layer to a point layer :
gdf.geometry = gdf.geometry.centroid
idw = SmoothIdw(gdf, "gdppps2008", power=1, nb_pts=7600,
mask="misc/nuts3_data.geojson")
# Use equal interval :
result = idw.render(3, "equal_interval", output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 3)
# Use maximal breaks discretisation method:
result = idw.render(9, "maximal_breaks", output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
def test_from_polygon_layer_no_crs(self):
gdf = GeoDataFrame.from_file("misc/nuts3_data.geojson")
gdf.crs = ''
# Convert the input layer to a polygon layer (instead of multipolygon):
gdf.geometry = gdf.geometry.union(gdf.geometry)
idw = SmoothIdw(gdf, "gdppps2008", power=1, nb_pts=2600,
mask="misc/nuts3_data.geojson")
# Use equal interval :
result = idw.render(8, "jenks", output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 8)
def test_errors(self):
idw = SmoothIdw("misc/nuts3_data.geojson", "gdppps2008",
power=2, nb_pts=1000)
# Test with a wrong discretization function name :
with self.assertRaises(ValueError):
idw.render(9, "foo", output="Geodataframe")
# Test with a sizelimit and a high number of points
# (the nuts3 layer contains 1448 features)
with self.assertRaises(ValueError):
idw = SmoothIdw("misc/nuts3_data.geojson", "gdppps2008",
power=2, nb_pts=100000, sizelimit=10000000)
class TestSmoothStewart(unittest.TestCase):
def setUp(self):
pass
def test_one_shot_stewart(self):
# Exports correctly to `bytes`:
res = quick_stewart(
"misc/nuts3_data.geojson", "pop2008",
span=65000, beta=2, resolution=80000, nb_class=8,
mask="misc/nuts3_data.geojson")
self.assertIsInstance(res, bytes)
# Exports correctly to `GeoDataFrame`
# and respects the choosen number of class:
res = quick_stewart(
"misc/nuts3_data.geojson", "pop2008",
span=65000, beta=2, nb_pts=8000, nb_class=8,
mask="misc/nuts3_data.geojson", output="GeoDataFrame")
self.assertIsInstance(res, GeoDataFrame)
self.assertEqual(len(res), 8)
# Test that it works without specifying without `nb_pts`,
# `nb_class` and `resolution`:
res = quick_stewart(
"misc/nuts3_data.geojson", "pop2008",
span=65000,
beta=2,
mask="misc/nuts3_data.geojson",
output="GeoDataFrame")
self.assertIsInstance(res, GeoDataFrame)
# Test with user defined breaks values :
my_breaks = [0, 197000, 1295000, 2093000, 3091000,
5888000, 10186000, 13500000]
res = quick_stewart(
"misc/nuts3_data.geojson",
"pop2008",
span=65000,
beta=2,
resolution=80000,
user_defined_breaks=my_breaks,
mask="misc/nuts3_data.geojson",
output="GeoDataFrame")
self.assertIsInstance(res, GeoDataFrame)
self.assertEqual(len(res), 7)
# Assert these break values were actually used :
for wanted_break, obtained_break in zip(my_breaks[1:-1], res["max"][:-1]):
self.assertAlmostEqual(wanted_break, obtained_break)
# Test with user defined breaks values
# (the maximum value is volontarily low, and the minimum volontarily high,
# two new class will be created,
# respectively between the minimum and the first break value
# and between the last break value and the maximum)
my_breaks = [1295000, 2093000, 3091000, 5888000, 10186000]
nb_interval = len(my_breaks) - 1
res2 = quick_stewart(
"misc/nuts3_data.geojson",
"pop2008",
span=65000,
beta=2,
resolution=80000,
user_defined_breaks=my_breaks,
mask="misc/nuts3_data.geojson",
output="GeoDataFrame")
self.assertIsInstance(res2, GeoDataFrame)
# We can test that there is no hole by comparing the area of theses polygons
# and the area of the previously computed resultat :
self.assertAlmostEqual(res2.area.sum(), res.area.sum(), 2)
# And by the fact that there is two extra class compared to our break values :
self.assertEqual(len(res2), nb_interval + 2)
# Test with break values non-unique (likely due to the discretization choosed):
# + Not correctly ordered values
# They should be reorderer and duplicates should be removed ...
my_breaks = [0, 0, 197000, 1295000, 3091000, 2093000,
5888000, 10186000, 13500000]
res3 = quick_stewart(
"misc/nuts3_data.geojson",
"pop2008",
span=65000,
beta=2,
resolution=80000,
user_defined_breaks=my_breaks,
mask="misc/nuts3_data.geojson",
output="GeoDataFrame")
self.assertIsInstance(res3, GeoDataFrame)
# ... so we should have the same class number than `res` :
self.assertEqual(len(res3), len(res))
def test_object_stewart(self):
# Test the OO approach for building smoothed map with stewart potentials
StePot = SmoothStewart("misc/nuts3_data.geojson", "pop2008",
span=65000, beta=2, resolution=90000,
mask="misc/nuts3_data.geojson")
# Test using percentiles :
result = StePot.render(nb_class=10,
disc_func="percentiles",
output="geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 10)
# Test using somes already choosed break values :
my_breaks = [0, 197000, 1295000, 2093000, 3091000,
5888000, 10186000, 12000000]
result = StePot.render(
nb_class=48, # bogus values as `nb_class` and
disc_func="foobar", # ... disc_func should be overrided
user_defined_breaks=my_breaks, # ... by the `user_defined_breaks` params
output="geodataframe") # ... and this is what we are testing here
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 7)
# Assert these break values were actually used :
for wanted_break, obtained_break in zip(my_breaks[1:-1], result["max"][:-1]):
self.assertAlmostEqual(wanted_break, obtained_break)
# Test again using another discretization method : "head tail breaks"
# (should define automatically the number of class)
result = StePot.render(nb_class=None,
disc_func="head_tail",
output="geodataframe")
self.assertIsInstance(result, GeoDataFrame)
# Test that the object has a nice representation :
a = str(StePot)
b = repr(StePot)
self.assertEqual(a, b)
self.assertIn("SmoothStewart - variable :", a)
self.assertIn("{} features".format(len(StePot.gdf)), a)
def test_object_stewart_two_var(self):
# Test the OO approach with two variables :
StePot = SmoothStewart("misc/nuts3_data.geojson", "gdppps2008",
span=65000, beta=2, resolution=80000,
variable_name2="pop2008",
mask="misc/nuts3_data.geojson")
result = StePot.render(8, "equal_interval", output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 8)
def test_distance_not_geo(self):
# First whith one variable :
StePot = SmoothStewart("misc/nuts3_data.geojson",
"gdppps2008",
resolution=100000,
span=65000, beta=3,
mask="misc/nuts3_data.geojson",
distGeo=False)
result = StePot.render(8, "equal_interval", output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 8)
# Then with two variables and a custom projection to use :
StePot = SmoothStewart("misc/nuts3_data.geojson",
"gdppps2008",
span=65000, beta=2,
resolution=80000,
variable_name2="pop2008",
mask="misc/nuts3_data.geojson",
distGeo=False,
projDistance={"init": "epsg:3035"})
result = StePot.render(8, "equal_interval", output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 8)
self.assertEqual(result.crs, {'init': 'epsg:3035'})
def test_from_gdf_with_new_mask(self):
gdf = GeoDataFrame.from_file("misc/nuts3_data.geojson")
# Let's use pareto function for this one :
StePot = SmoothStewart(gdf, "gdppps2008", typefct="pareto",
span=65000, beta=2.33, resolution=80000,
mask=None)
result = StePot.render(6, output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 6)
# Finally, use a mask (from a file) :
result = StePot.render(5, output="Geodataframe",
new_mask="misc/nuts3_data.geojson")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(StePot.use_mask, True)
self.assertEqual(len(result), 5)
# Or from a GeoDataFrame :
result = StePot.render(6, output="Geodataframe",
new_mask=gdf)
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(StePot.use_mask, True)
self.assertEqual(len(result), 6)
# # Nope, no mask :
# result = StePot.render(5, output="Geodataframe",
# new_mask=None)
# self.assertIsInstance(result, GeoDataFrame)
# self.assertEqual(StePot.use_mask, False)
# self.assertEqual(len(result), 5)
# Test that it skips the mask parameter if the layer provided as a mask
# is not a Polygon/MultiPolygon layer :
gdf_mask = gdf[1:50].copy()
gdf_mask.geometry = gdf_mask.geometry.centroid
result = StePot.render(5, output="Geodataframe",
new_mask=gdf_mask)
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(StePot.use_mask, False)
self.assertEqual(len(result), 5)
def test_input_with_missing_values(self):
gdf = GeoDataFrame.from_file("misc/nuts3_data.geojson")
gdf.loc[12:18, "gdppps2008"] = np.NaN
StePot = SmoothStewart(gdf, "gdppps2008",
span=65000, beta=2, resolution=100000,
mask=gdf)
result = StePot.render(9, "equal_interval", output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 9)
gdf2 = GeoDataFrame.from_file('misc/nuts3_data.geojson').to_crs({"init": "epsg:3035"})
gdf2.loc[:, 'gdppps2008'] = gdf2['gdppps2008'].astype(object)
gdf2.loc[15:20, 'gdppps2008'] = ""
gdf2.loc[75:78, 'gdppps2008'] = ""
StePot = SmoothStewart(gdf2, 'gdppps2008', span=65000, beta=2,
resolution=80000, mask=gdf2)
result = StePot.render(9, 'equal_interval', output="GeoDataFrame")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 9)
def test_wrong_dtype_missing_values(self):
gdf = GeoDataFrame.from_file("misc/nuts3_data.geojson")
gdf.loc[12:18, "gdppps2008"] = np.NaN
gdf.loc[25:35, "pop2008"] = np.NaN
gdf.loc[0:len(gdf)-1, "pop2008"] = gdf["pop2008"].astype(str)
StePot = SmoothStewart(gdf, "gdppps2008",
span=65000, beta=2, resolution=100000,
mask="misc/nuts3_data.geojson")
result = StePot.render(9, "equal_interval", output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 9)
StePot = SmoothStewart(gdf, "gdppps2008", variable_name2="pop2008",
span=65000, beta=2, resolution=100000,
mask="misc/nuts3_data.geojson")
result = StePot.render(9, "equal_interval", output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 9)
def test_from_point_layer_and_maximal_breaks(self):
gdf = GeoDataFrame.from_file("misc/nuts3_data.geojson").to_crs({"init": "epsg:4326"})
# Convert the input layer to a point layer :
gdf.geometry = gdf.geometry.centroid
StePot = SmoothStewart(gdf, "gdppps2008",
span=65000, beta=2, resolution=80000,
mask="misc/nuts3_data.geojson")
# Use equal interval :
result = StePot.render(9, "equal_interval", output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 9)
# Use maximal breaks discretisation method:
result = StePot.render(9, "maximal_breaks", output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
def test_from_polygon_layer_no_crs(self):
gdf = GeoDataFrame.from_file("misc/nuts3_data.geojson")
gdf.crs = ''
# Convert the input layer to a polygon layer (instead of multipolygon):
gdf.geometry = gdf.geometry.union(gdf.geometry)
StePot = SmoothStewart(gdf, "gdppps2008",
span=65000, beta=2, resolution=100000,
mask="misc/nuts3_data.geojson")
# Use equal interval :
result = StePot.render(8, "equal_interval", output="Geodataframe")
self.assertIsInstance(result, GeoDataFrame)
self.assertEqual(len(result), 8)
def test_errors(self):
# Test with a wrong interaction function name :
with self.assertRaises(ValueError):
StePot = SmoothStewart("misc/nuts3_data.geojson", "gdppps2008",
span=65000, beta=2,
typefct="abcdefg")
StePot = SmoothStewart("misc/nuts3_data.geojson", "gdppps2008",
span=65000, beta=2, resolution=90000)
# Test with a wrong discretization function name :
with self.assertRaises(ValueError):
StePot.render(9, "foo", output="Geodataframe")
# Test with a sizelimit and a high number of points
# (the nuts3 layer contains 1448 features)
with self.assertRaises(ValueError):
StePot = SmoothStewart(
"misc/nuts3_data.geojson", "gdppps2008",
span=65000, beta=2, typefct='pareto',
nb_pts=100000, sizelimit=10000000)
class TestHelpers(unittest.TestCase):
def setUp(self):
self.li = [random.random() * 1000 for i in range(1200)]
def test_head_tail_breaks(self):
breaks = head_tail_breaks(self.li)
self.assertIsInstance(breaks, list)
breaks2 = head_tail_breaks(self.li, direction="head")
self.assertIsInstance(breaks, list)
self.assertAlmostEqual(breaks2, sorted(breaks2))
self.assertAlmostEqual(breaks, breaks2)
breaks3 = head_tail_breaks(self.li, direction="tail")
self.assertIsInstance(breaks, list)
self.assertAlmostEqual(breaks3, sorted(breaks3))
with self.assertRaises(ValueError):
head_tail_breaks(self.li, direction="nope")
def test_maximal_breaks(self):
breaks = maximal_breaks(self.li)
self.assertIsInstance(breaks, list)
breaks = maximal_breaks(self.li, k=6)
self.assertIsInstance(breaks, list)
self.assertEqual(len(breaks), 7)
def test_get_opt_nb_class(self):
nb_class = get_opt_nb_class(len(self.li))
self.assertEqual(nb_class, 11)
def test_chain_list(self):
_list = [i for i in _chain([789, 45], [78, 96], [7878, 789, 36])]
self.assertEqual(_list, [789, 45, 78, 96, 7878, 789, 36])
if __name__ == "__main__":
unittest.main()
| [
"geopandas.GeoDataFrame.from_file",
"smoomapy.helpers_classif._chain",
"random.random",
"smoomapy.quick_stewart",
"smoomapy.SmoothIdw",
"smoomapy.maximal_breaks",
"smoomapy.SmoothStewart",
"unittest.main",
"sys.stdout.getvalue",
"io.StringIO",
"smoomapy.quick_idw",
"smoomapy.head_tail_breaks"
... | [((25614, 25629), 'unittest.main', 'unittest.main', ([], {}), '()\n', (25627, 25629), False, 'import unittest\n'), ((508, 649), 'smoomapy.quick_idw', 'quick_idw', (['"""misc/nuts3_data.geojson"""', '"""pop2008"""'], {'power': '(1)', 'resolution': '(80000)', 'nb_class': '(8)', 'disc_func': '"""jenks"""', 'mask': '"""misc/nuts3_data.geojson"""'}), "('misc/nuts3_data.geojson', 'pop2008', power=1, resolution=80000,\n nb_class=8, disc_func='jenks', mask='misc/nuts3_data.geojson')\n", (517, 649), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((838, 1002), 'smoomapy.quick_idw', 'quick_idw', (['"""misc/nuts3_data.geojson"""', '"""pop2008"""'], {'power': '(1)', 'nb_pts': '(8000)', 'nb_class': '(8)', 'disc_func': '"""jenks"""', 'mask': '"""misc/nuts3_data.geojson"""', 'output': '"""GeoDataFrame"""'}), "('misc/nuts3_data.geojson', 'pop2008', power=1, nb_pts=8000,\n nb_class=8, disc_func='jenks', mask='misc/nuts3_data.geojson', output=\n 'GeoDataFrame')\n", (847, 1002), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((1270, 1380), 'smoomapy.SmoothIdw', 'SmoothIdw', (['"""misc/nuts3_data.geojson"""', '"""pop2008"""'], {'power': '(2)', 'resolution': '(90000)', 'mask': '"""misc/nuts3_data.geojson"""'}), "('misc/nuts3_data.geojson', 'pop2008', power=2, resolution=90000,\n mask='misc/nuts3_data.geojson')\n", (1279, 1380), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((3920, 4043), 'smoomapy.SmoothIdw', 'SmoothIdw', (['"""misc/nuts3_data.geojson"""', '"""gdppps2008"""'], {'nb_pts': '(7200)', 'power': '(3)', 'mask': '"""misc/nuts3_data.geojson"""', 'distGeo': '(False)'}), "('misc/nuts3_data.geojson', 'gdppps2008', nb_pts=7200, power=3,\n mask='misc/nuts3_data.geojson', distGeo=False)\n", (3929, 4043), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((5008, 5057), 'geopandas.GeoDataFrame.from_file', 'GeoDataFrame.from_file', (['"""misc/nuts3_data.geojson"""'], {}), "('misc/nuts3_data.geojson')\n", (5030, 5057), False, 'from geopandas import GeoDataFrame\n'), ((5073, 5134), 'smoomapy.SmoothIdw', 'SmoothIdw', (['gdf', '"""gdppps2008"""'], {'power': '(1)', 'nb_pts': '(2800)', 'mask': 'None'}), "(gdf, 'gdppps2008', power=1, nb_pts=2800, mask=None)\n", (5082, 5134), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((6833, 6882), 'geopandas.GeoDataFrame.from_file', 'GeoDataFrame.from_file', (['"""misc/nuts3_data.geojson"""'], {}), "('misc/nuts3_data.geojson')\n", (6855, 6882), False, 'from geopandas import GeoDataFrame\n'), ((6943, 7003), 'smoomapy.SmoothIdw', 'SmoothIdw', (['gdf', '"""gdppps2008"""'], {'power': '(1)', 'nb_pts': '(2600)', 'mask': 'gdf'}), "(gdf, 'gdppps2008', power=1, nb_pts=2600, mask=gdf)\n", (6952, 7003), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((7426, 7488), 'smoomapy.SmoothIdw', 'SmoothIdw', (['gdf2', '"""gdppps2008"""'], {'power': '(1)', 'nb_pts': '(1200)', 'mask': 'gdf2'}), "(gdf2, 'gdppps2008', power=1, nb_pts=1200, mask=gdf2)\n", (7435, 7488), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((7707, 7756), 'geopandas.GeoDataFrame.from_file', 'GeoDataFrame.from_file', (['"""misc/nuts3_data.geojson"""'], {}), "('misc/nuts3_data.geojson')\n", (7729, 7756), False, 'from geopandas import GeoDataFrame\n'), ((7930, 8017), 'smoomapy.SmoothIdw', 'SmoothIdw', (['gdf', '"""gdppps2008"""'], {'power': '(1)', 'nb_pts': '(2600)', 'mask': '"""misc/nuts3_data.geojson"""'}), "(gdf, 'gdppps2008', power=1, nb_pts=2600, mask=\n 'misc/nuts3_data.geojson')\n", (7939, 8017), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((8775, 8862), 'smoomapy.SmoothIdw', 'SmoothIdw', (['gdf', '"""gdppps2008"""'], {'power': '(1)', 'nb_pts': '(7600)', 'mask': '"""misc/nuts3_data.geojson"""'}), "(gdf, 'gdppps2008', power=1, nb_pts=7600, mask=\n 'misc/nuts3_data.geojson')\n", (8784, 8862), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((9317, 9366), 'geopandas.GeoDataFrame.from_file', 'GeoDataFrame.from_file', (['"""misc/nuts3_data.geojson"""'], {}), "('misc/nuts3_data.geojson')\n", (9339, 9366), False, 'from geopandas import GeoDataFrame\n'), ((9539, 9626), 'smoomapy.SmoothIdw', 'SmoothIdw', (['gdf', '"""gdppps2008"""'], {'power': '(1)', 'nb_pts': '(2600)', 'mask': '"""misc/nuts3_data.geojson"""'}), "(gdf, 'gdppps2008', power=1, nb_pts=2600, mask=\n 'misc/nuts3_data.geojson')\n", (9548, 9626), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((9876, 9948), 'smoomapy.SmoothIdw', 'SmoothIdw', (['"""misc/nuts3_data.geojson"""', '"""gdppps2008"""'], {'power': '(2)', 'nb_pts': '(1000)'}), "('misc/nuts3_data.geojson', 'gdppps2008', power=2, nb_pts=1000)\n", (9885, 9948), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((10597, 10734), 'smoomapy.quick_stewart', 'quick_stewart', (['"""misc/nuts3_data.geojson"""', '"""pop2008"""'], {'span': '(65000)', 'beta': '(2)', 'resolution': '(80000)', 'nb_class': '(8)', 'mask': '"""misc/nuts3_data.geojson"""'}), "('misc/nuts3_data.geojson', 'pop2008', span=65000, beta=2,\n resolution=80000, nb_class=8, mask='misc/nuts3_data.geojson')\n", (10610, 10734), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((10923, 11083), 'smoomapy.quick_stewart', 'quick_stewart', (['"""misc/nuts3_data.geojson"""', '"""pop2008"""'], {'span': '(65000)', 'beta': '(2)', 'nb_pts': '(8000)', 'nb_class': '(8)', 'mask': '"""misc/nuts3_data.geojson"""', 'output': '"""GeoDataFrame"""'}), "('misc/nuts3_data.geojson', 'pop2008', span=65000, beta=2,\n nb_pts=8000, nb_class=8, mask='misc/nuts3_data.geojson', output=\n 'GeoDataFrame')\n", (10936, 11083), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((11319, 11449), 'smoomapy.quick_stewart', 'quick_stewart', (['"""misc/nuts3_data.geojson"""', '"""pop2008"""'], {'span': '(65000)', 'beta': '(2)', 'mask': '"""misc/nuts3_data.geojson"""', 'output': '"""GeoDataFrame"""'}), "('misc/nuts3_data.geojson', 'pop2008', span=65000, beta=2,\n mask='misc/nuts3_data.geojson', output='GeoDataFrame')\n", (11332, 11449), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((11729, 11913), 'smoomapy.quick_stewart', 'quick_stewart', (['"""misc/nuts3_data.geojson"""', '"""pop2008"""'], {'span': '(65000)', 'beta': '(2)', 'resolution': '(80000)', 'user_defined_breaks': 'my_breaks', 'mask': '"""misc/nuts3_data.geojson"""', 'output': '"""GeoDataFrame"""'}), "('misc/nuts3_data.geojson', 'pop2008', span=65000, beta=2,\n resolution=80000, user_defined_breaks=my_breaks, mask=\n 'misc/nuts3_data.geojson', output='GeoDataFrame')\n", (11742, 11913), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((12724, 12908), 'smoomapy.quick_stewart', 'quick_stewart', (['"""misc/nuts3_data.geojson"""', '"""pop2008"""'], {'span': '(65000)', 'beta': '(2)', 'resolution': '(80000)', 'user_defined_breaks': 'my_breaks', 'mask': '"""misc/nuts3_data.geojson"""', 'output': '"""GeoDataFrame"""'}), "('misc/nuts3_data.geojson', 'pop2008', span=65000, beta=2,\n resolution=80000, user_defined_breaks=my_breaks, mask=\n 'misc/nuts3_data.geojson', output='GeoDataFrame')\n", (12737, 12908), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((13730, 13914), 'smoomapy.quick_stewart', 'quick_stewart', (['"""misc/nuts3_data.geojson"""', '"""pop2008"""'], {'span': '(65000)', 'beta': '(2)', 'resolution': '(80000)', 'user_defined_breaks': 'my_breaks', 'mask': '"""misc/nuts3_data.geojson"""', 'output': '"""GeoDataFrame"""'}), "('misc/nuts3_data.geojson', 'pop2008', span=65000, beta=2,\n resolution=80000, user_defined_breaks=my_breaks, mask=\n 'misc/nuts3_data.geojson', output='GeoDataFrame')\n", (13743, 13914), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((14301, 14426), 'smoomapy.SmoothStewart', 'SmoothStewart', (['"""misc/nuts3_data.geojson"""', '"""pop2008"""'], {'span': '(65000)', 'beta': '(2)', 'resolution': '(90000)', 'mask': '"""misc/nuts3_data.geojson"""'}), "('misc/nuts3_data.geojson', 'pop2008', span=65000, beta=2,\n resolution=90000, mask='misc/nuts3_data.geojson')\n", (14314, 14426), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((16288, 16442), 'smoomapy.SmoothStewart', 'SmoothStewart', (['"""misc/nuts3_data.geojson"""', '"""gdppps2008"""'], {'span': '(65000)', 'beta': '(2)', 'resolution': '(80000)', 'variable_name2': '"""pop2008"""', 'mask': '"""misc/nuts3_data.geojson"""'}), "('misc/nuts3_data.geojson', 'gdppps2008', span=65000, beta=2,\n resolution=80000, variable_name2='pop2008', mask='misc/nuts3_data.geojson')\n", (16301, 16442), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((16792, 16936), 'smoomapy.SmoothStewart', 'SmoothStewart', (['"""misc/nuts3_data.geojson"""', '"""gdppps2008"""'], {'resolution': '(100000)', 'span': '(65000)', 'beta': '(3)', 'mask': '"""misc/nuts3_data.geojson"""', 'distGeo': '(False)'}), "('misc/nuts3_data.geojson', 'gdppps2008', resolution=100000,\n span=65000, beta=3, mask='misc/nuts3_data.geojson', distGeo=False)\n", (16805, 16936), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((17341, 17555), 'smoomapy.SmoothStewart', 'SmoothStewart', (['"""misc/nuts3_data.geojson"""', '"""gdppps2008"""'], {'span': '(65000)', 'beta': '(2)', 'resolution': '(80000)', 'variable_name2': '"""pop2008"""', 'mask': '"""misc/nuts3_data.geojson"""', 'distGeo': '(False)', 'projDistance': "{'init': 'epsg:3035'}"}), "('misc/nuts3_data.geojson', 'gdppps2008', span=65000, beta=2,\n resolution=80000, variable_name2='pop2008', mask=\n 'misc/nuts3_data.geojson', distGeo=False, projDistance={'init':\n 'epsg:3035'})\n", (17354, 17555), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((18046, 18095), 'geopandas.GeoDataFrame.from_file', 'GeoDataFrame.from_file', (['"""misc/nuts3_data.geojson"""'], {}), "('misc/nuts3_data.geojson')\n", (18068, 18095), False, 'from geopandas import GeoDataFrame\n'), ((18165, 18271), 'smoomapy.SmoothStewart', 'SmoothStewart', (['gdf', '"""gdppps2008"""'], {'typefct': '"""pareto"""', 'span': '(65000)', 'beta': '(2.33)', 'resolution': '(80000)', 'mask': 'None'}), "(gdf, 'gdppps2008', typefct='pareto', span=65000, beta=2.33,\n resolution=80000, mask=None)\n", (18178, 18271), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((19879, 19928), 'geopandas.GeoDataFrame.from_file', 'GeoDataFrame.from_file', (['"""misc/nuts3_data.geojson"""'], {}), "('misc/nuts3_data.geojson')\n", (19901, 19928), False, 'from geopandas import GeoDataFrame\n'), ((19992, 20077), 'smoomapy.SmoothStewart', 'SmoothStewart', (['gdf', '"""gdppps2008"""'], {'span': '(65000)', 'beta': '(2)', 'resolution': '(100000)', 'mask': 'gdf'}), "(gdf, 'gdppps2008', span=65000, beta=2, resolution=100000,\n mask=gdf)\n", (20005, 20077), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((20573, 20659), 'smoomapy.SmoothStewart', 'SmoothStewart', (['gdf2', '"""gdppps2008"""'], {'span': '(65000)', 'beta': '(2)', 'resolution': '(80000)', 'mask': 'gdf2'}), "(gdf2, 'gdppps2008', span=65000, beta=2, resolution=80000,\n mask=gdf2)\n", (20586, 20659), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((20917, 20966), 'geopandas.GeoDataFrame.from_file', 'GeoDataFrame.from_file', (['"""misc/nuts3_data.geojson"""'], {}), "('misc/nuts3_data.geojson')\n", (20939, 20966), False, 'from geopandas import GeoDataFrame\n'), ((21143, 21250), 'smoomapy.SmoothStewart', 'SmoothStewart', (['gdf', '"""gdppps2008"""'], {'span': '(65000)', 'beta': '(2)', 'resolution': '(100000)', 'mask': '"""misc/nuts3_data.geojson"""'}), "(gdf, 'gdppps2008', span=65000, beta=2, resolution=100000,\n mask='misc/nuts3_data.geojson')\n", (21156, 21250), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((21495, 21629), 'smoomapy.SmoothStewart', 'SmoothStewart', (['gdf', '"""gdppps2008"""'], {'variable_name2': '"""pop2008"""', 'span': '(65000)', 'beta': '(2)', 'resolution': '(100000)', 'mask': '"""misc/nuts3_data.geojson"""'}), "(gdf, 'gdppps2008', variable_name2='pop2008', span=65000, beta\n =2, resolution=100000, mask='misc/nuts3_data.geojson')\n", (21508, 21629), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((22122, 22229), 'smoomapy.SmoothStewart', 'SmoothStewart', (['gdf', '"""gdppps2008"""'], {'span': '(65000)', 'beta': '(2)', 'resolution': '(80000)', 'mask': '"""misc/nuts3_data.geojson"""'}), "(gdf, 'gdppps2008', span=65000, beta=2, resolution=80000, mask\n ='misc/nuts3_data.geojson')\n", (22135, 22229), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((22728, 22777), 'geopandas.GeoDataFrame.from_file', 'GeoDataFrame.from_file', (['"""misc/nuts3_data.geojson"""'], {}), "('misc/nuts3_data.geojson')\n", (22750, 22777), False, 'from geopandas import GeoDataFrame\n'), ((22953, 23060), 'smoomapy.SmoothStewart', 'SmoothStewart', (['gdf', '"""gdppps2008"""'], {'span': '(65000)', 'beta': '(2)', 'resolution': '(100000)', 'mask': '"""misc/nuts3_data.geojson"""'}), "(gdf, 'gdppps2008', span=65000, beta=2, resolution=100000,\n mask='misc/nuts3_data.geojson')\n", (22966, 23060), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((23650, 23746), 'smoomapy.SmoothStewart', 'SmoothStewart', (['"""misc/nuts3_data.geojson"""', '"""gdppps2008"""'], {'span': '(65000)', 'beta': '(2)', 'resolution': '(90000)'}), "('misc/nuts3_data.geojson', 'gdppps2008', span=65000, beta=2,\n resolution=90000)\n", (23663, 23746), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((24482, 24507), 'smoomapy.head_tail_breaks', 'head_tail_breaks', (['self.li'], {}), '(self.li)\n', (24498, 24507), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((24571, 24614), 'smoomapy.head_tail_breaks', 'head_tail_breaks', (['self.li'], {'direction': '"""head"""'}), "(self.li, direction='head')\n", (24587, 24614), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((24783, 24826), 'smoomapy.head_tail_breaks', 'head_tail_breaks', (['self.li'], {'direction': '"""tail"""'}), "(self.li, direction='tail')\n", (24799, 24826), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((25082, 25105), 'smoomapy.maximal_breaks', 'maximal_breaks', (['self.li'], {}), '(self.li)\n', (25096, 25105), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((25168, 25196), 'smoomapy.maximal_breaks', 'maximal_breaks', (['self.li'], {'k': '(6)'}), '(self.li, k=6)\n', (25182, 25196), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((3159, 3169), 'io.StringIO', 'StringIO', ([], {}), '()\n', (3167, 3169), False, 'from io import StringIO\n'), ((3219, 3240), 'sys.stdout.getvalue', 'sys.stdout.getvalue', ([], {}), '()\n', (3238, 3240), False, 'import sys\n'), ((10307, 10405), 'smoomapy.SmoothIdw', 'SmoothIdw', (['"""misc/nuts3_data.geojson"""', '"""gdppps2008"""'], {'power': '(2)', 'nb_pts': '(100000)', 'sizelimit': '(10000000)'}), "('misc/nuts3_data.geojson', 'gdppps2008', power=2, nb_pts=100000,\n sizelimit=10000000)\n", (10316, 10405), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((23468, 23565), 'smoomapy.SmoothStewart', 'SmoothStewart', (['"""misc/nuts3_data.geojson"""', '"""gdppps2008"""'], {'span': '(65000)', 'beta': '(2)', 'typefct': '"""abcdefg"""'}), "('misc/nuts3_data.geojson', 'gdppps2008', span=65000, beta=2,\n typefct='abcdefg')\n", (23481, 23565), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((24114, 24245), 'smoomapy.SmoothStewart', 'SmoothStewart', (['"""misc/nuts3_data.geojson"""', '"""gdppps2008"""'], {'span': '(65000)', 'beta': '(2)', 'typefct': '"""pareto"""', 'nb_pts': '(100000)', 'sizelimit': '(10000000)'}), "('misc/nuts3_data.geojson', 'gdppps2008', span=65000, beta=2,\n typefct='pareto', nb_pts=100000, sizelimit=10000000)\n", (24127, 24245), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((24985, 25028), 'smoomapy.head_tail_breaks', 'head_tail_breaks', (['self.li'], {'direction': '"""nope"""'}), "(self.li, direction='nope')\n", (25001, 25028), False, 'from smoomapy import quick_stewart, quick_idw, SmoothIdw, SmoothStewart, head_tail_breaks, maximal_breaks, get_opt_nb_class\n'), ((7176, 7225), 'geopandas.GeoDataFrame.from_file', 'GeoDataFrame.from_file', (['"""misc/nuts3_data.geojson"""'], {}), "('misc/nuts3_data.geojson')\n", (7198, 7225), False, 'from geopandas import GeoDataFrame\n'), ((8582, 8631), 'geopandas.GeoDataFrame.from_file', 'GeoDataFrame.from_file', (['"""misc/nuts3_data.geojson"""'], {}), "('misc/nuts3_data.geojson')\n", (8604, 8631), False, 'from geopandas import GeoDataFrame\n'), ((20320, 20369), 'geopandas.GeoDataFrame.from_file', 'GeoDataFrame.from_file', (['"""misc/nuts3_data.geojson"""'], {}), "('misc/nuts3_data.geojson')\n", (20342, 20369), False, 'from geopandas import GeoDataFrame\n'), ((21926, 21975), 'geopandas.GeoDataFrame.from_file', 'GeoDataFrame.from_file', (['"""misc/nuts3_data.geojson"""'], {}), "('misc/nuts3_data.geojson')\n", (21948, 21975), False, 'from geopandas import GeoDataFrame\n'), ((24382, 24397), 'random.random', 'random.random', ([], {}), '()\n', (24395, 24397), False, 'import random\n'), ((25469, 25513), 'smoomapy.helpers_classif._chain', '_chain', (['[789, 45]', '[78, 96]', '[7878, 789, 36]'], {}), '([789, 45], [78, 96], [7878, 789, 36])\n', (25475, 25513), False, 'from smoomapy.helpers_classif import _chain\n')] |
# Import libraries and set random seeds for reproducibility
random_seed = 1237
import random
random.seed( random_seed )
import numpy as np
np.random.seed( random_seed )
import tensorflow as tf
tf.set_random_seed( random_seed )
# Import model and instance loader
import model
from instance_loader import InstanceLoader
import os, sys, itertools, util
from scipy import stats
METRICS_LIST = [ "ABS", "REL", "KENDALLTAUB", "KENDALLTAUB_P", "PEARSON", "PEARSON_P" ]
def get_metrics_from_batch( predictions_list, labels_list ):
"""
Gets all the metrics from a batch for a specific centrality
"""
bABS, bREL, bKENDALLTAUB, bKENDALLTAUB_P, bPEARSON, bPEARSON_P = (0 for _ in METRICS_LIST)
for predictions, labels in zip( predictions_list, labels_list ):
ABS, REL, KENDALLTAUB, KENDALLTAUB_P, PEARSON, PEARSON_P = get_metrics( predictions, labels )
bABS, bREL, bKENDALLTAUB, bKENDALLTAUB_P, bPEARSON, bPEARSON_P = map(
lambda pair: pair[0] + pair[1],
zip(
(bABS, bREL, bKENDALLTAUB, bKENDALLTAUB_P, bPEARSON, bPEARSON_P),
( ABS, REL, KENDALLTAUB, KENDALLTAUB_P, PEARSON, PEARSON_P)
)
)
#end for
b = len( labels_list )
bABS, bREL, bKENDALLTAUB, bKENDALLTAUB_P, bPEARSON, bPEARSON_P = map(
lambda x: x / b,
(bABS, bREL, bKENDALLTAUB, bKENDALLTAUB_P, bPEARSON, bPEARSON_P)
)
return bABS, bREL, bKENDALLTAUB, bKENDALLTAUB_P, bPEARSON, bPEARSON_P
#end get_metrics_batch
def get_metrics( predictions, labels ):
"""
Gets all the metrics for a specific centrality
"""
ABS = [ abs( p - l ) for p, l in zip( predictions, labels ) ]
ABS = sum(ABS) / len(ABS)
REL = [ abs( p - l ) / l for p, l in zip( predictions, labels ) if l != 0 ]
REL = sum(REL) / len(REL)
KENDALLTAUB, KENDALLTAUB_P = stats.kendalltau(predictions,labels)
PEARSON, PEARSON_P = stats.pearsonr(predictions,labels)
return ABS, REL, KENDALLTAUB, KENDALLTAUB_P, PEARSON, PEARSON_P
#end
def build_metrics_dict( centralities, header = False, header_prepend = "" ):
"""
Builds the dictionary used to log the values or the dictionary containing the headers
"""
metrics_dict = dict()
for metric in METRICS_LIST:
metrics_dict[metric] = header_prepend + metric if header else 0
for centrality in centralities:
centrality_metric = "{c}_{m}".format(c=centrality,m=metric)
metrics_dict[centrality_metric] = header_prepend + centrality_metric if header else 0
#end for
#end for
metrics_dict["loss"] = header_prepend + "loss" if header else 0
for centrality in centralities:
centrality_cost = "{c}_cost".format(c=centrality)
metrics_dict[centrality_cost] = header_prepend + centrality_cost if header else 0
#end for
return metrics_dict
#end build_metrics_dict
def log_metrics_dict( metrics_dict, centralities, log_file ):
"""
Log a dictionary to a file.
Note that it assumes one will write at least some value before the values being logged in the file and it also doesn't end a line.
"""
print(
"\t{val}".format(
val = metrics_dict["loss"]
),
end = "",
file = log_file
)
for centrality in centralities:
print(
"\t{val}".format(
val = metrics_dict["{c}_cost".format(c=centrality)]
),
end = "",
file = log_file
)
#end for
for metric in METRICS_LIST:
print(
"\t{val}".format(
val = metrics_dict[metric]
),
end = "",
file = log_file
)
#end for
for centrality in centralities:
for metric in METRICS_LIST:
print(
"\t{val}".format(
val = metrics_dict["{c}_{m}".format(c=centrality,m=metric)]
),
end = "",
file = log_file
)
#end for
#end for
#end log_metrics_dict
def train(
session,
model_dict,
time_steps,
centralities,
epochs_to_run,
train_instance_loader,
batch_size,
test_batch_size,
batches_per_epoch,
test_instance_loader,
epoch_logging_file,
batch_logging_file,
model_checkpoint_filename,
log_to_stdout = False
):
"""
Runs the training procedure, logs the metrics and saves the model's weights to a checkpoint after every epoch.
"""
log_epoch( "epoch_id", build_metrics_dict( centralities, header = True, header_prepend = "train_" ), build_metrics_dict( centralities, header = True, header_prepend = "test_" ), centralities, epoch_logging_file )
log_batch( "epoch_id", "batch_id", build_metrics_dict( centralities, header = True ), centralities, batch_logging_file )
print( "Starting training for {} epochs".format( epochs_to_run ) )
for epoch_id in range( epochs_to_run ):
if log_to_stdout:
print( "Epoch\t{}".format( epoch_id ), end = "", file = sys.stdout )
log_metrics_dict( build_metrics_dict( centralities, header = True ), centralities, sys.stdout )
print( "", flush = True, file = sys.stdout )
#end if
run_epoch(
epoch_id,
session,
model_dict,
time_steps,
centralities,
train_instance_loader,
batch_size,
test_batch_size if epoch_id != epochs_to_run - 1 else 1,
batches_per_epoch,
test_instance_loader,
epoch_logging_file,
batch_logging_file,
log_to_stdout = log_to_stdout
)
print( "SAVING MODEL WEIGHTS TO {}".format( model_checkpoint_filename ) )
util.save_weights( session, model_checkpoint_filename )
#end for
#end train
def log_epoch(
epoch_id,
epoch_train_metrics_dict,
epoch_test_metrics_dict,
centralities,
epoch_logging_file
):
# Log the training part of the epoch
print( epoch_id, end = "", file = epoch_logging_file )
log_metrics_dict( epoch_train_metrics_dict, centralities, epoch_logging_file )
# Log the testing part of the epoch and flush the line
log_metrics_dict( epoch_test_metrics_dict, centralities, epoch_logging_file )
print( "", flush = True, file = epoch_logging_file )
#end log_epoch
def run_epoch(
epoch_id,
session,
model_dict,
time_steps,
centralities,
train_instance_loader,
batch_size,
test_batch_size,
batches_per_epoch,
test_instance_loader,
epoch_logging_file,
batch_logging_file,
log_to_stdout = False
):
"""
Runs and logs a training/testing epoch
"""
# Build the metrics dictionary for logging
epoch_train_metrics_dict = build_metrics_dict( centralities )
# Reset instance loader
train_instance_loader.reset()
for batch_id, batch in itertools.islice( enumerate( train_instance_loader.get_batches( batch_size ) ), batches_per_epoch ):
# Run and log every training batch, accumulating the metrics
batch_metrics_dict = run_batch(
epoch_id,
batch_id,
session,
model_dict,
time_steps,
centralities,
batch,
batch_logging_file,
train = True,
log_to_stdout = log_to_stdout
)
for metric in epoch_train_metrics_dict:
epoch_train_metrics_dict[metric] += batch_metrics_dict[metric]
#end for
#end for
# Normalize the metrics by the number of batches
for metric in epoch_train_metrics_dict:
epoch_train_metrics_dict[metric] /= batches_per_epoch
#end for
# Test
# Build the metrics dictionary for logging
epoch_test_metrics_dict = build_metrics_dict( centralities )
# Reset instance loader
test_instance_loader.reset()
# Counter for the number of instances
number_of_test_batches = 0
for cbat, batch in enumerate( test_instance_loader.get_batches( test_batch_size ) ):
# Open a null file so that we don't log every test instance being ran as a separate batch
with open(os.devnull, 'w') as nullfile:
# Run and log every test instance, accumulating the metrics
batch_metrics_dict = run_batch(
epoch_id,
"test",
session,
model_dict,
time_steps,
centralities,
batch,
nullfile,
train = False,
log_to_stdout = log_to_stdout
)
#end with
for metric in epoch_test_metrics_dict:
epoch_test_metrics_dict[metric] += batch_metrics_dict[metric]
#end for
number_of_test_batches += 1
#end for
# Normalize the metrics by the number of test instances
for metric in epoch_test_metrics_dict:
epoch_test_metrics_dict[metric] /= number_of_test_batches
#end for
log_epoch( epoch_id, epoch_train_metrics_dict, epoch_test_metrics_dict, centralities, epoch_logging_file )
if log_to_stdout:
print( "EPOCH\t", end = "" )
log_epoch( "summary", epoch_train_metrics_dict, epoch_test_metrics_dict, centralities, sys.stdout )
#end if
#end run_epoch
def log_batch(
epoch_id,
batch_id,
batch_metrics_dict,
centralities,
batch_logging_file
):
print(
"{eid}\t{bid}".format(
eid = epoch_id,
bid = batch_id
),
end = "",
file = batch_logging_file
)
log_metrics_dict( batch_metrics_dict, centralities, batch_logging_file )
print( "", flush = True, file = batch_logging_file )
#end
def run_batch(
epoch_id,
batch_id,
session,
model_dict,
time_steps,
centralities,
batch,
batch_logging_file,
train = False,
log_to_stdout = False
):
"""
Runs and logs a batch
"""
# Build metrics dictionary for logging
batch_metrics_dict = build_metrics_dict( centralities )
# Transform sparse batch labels to dense
labels = {
centrality: util.flatten( batch["{c}".format(c=centrality)] )
for centrality in centralities
}
# Build the feed_dict
feed_dict = {
model_dict["{c}_labels".format(c=centrality)]: labels[centrality]
for centrality in centralities
}
feed_dict[ model_dict["gnn"].matrix_placeholders["M"] ] = util.sparse_to_dense( batch["matrix"] )
feed_dict[ model_dict["gnn"].time_steps ] = time_steps
feed_dict[ model_dict["nodes_n"] ] = batch["problem_n"]
# Train if required
if train:
returned_values = session.run(
model_dict["train_step"],
feed_dict = feed_dict
)
#end if
# Get logits for batch
returned_predictions = session.run(
[
model_dict["{c}_predictions".format( c = centrality ) ]
for centrality in centralities
],
feed_dict = feed_dict
)
# Get losses for batch
returned_losses = session.run(
[
model_dict["loss"]
] + [
model_dict["{c}_cost".format( c = centrality ) ]
for centrality in centralities
],
feed_dict = feed_dict
)
# Update the overall loss
batch_metrics_dict["loss"] = returned_losses[0]
# Update each centrality's value
for centrality, predictions, cost in zip( centralities, returned_predictions, returned_losses[1:] ):
metric_values = get_metrics_from_batch(
model.separate_batch(
predictions,
batch["problem_n"]
),
model.separate_batch(
labels[centrality],
batch["problem_n"]
)
)
# Update loss for the centrality
batch_metrics_dict["{c}_cost".format(c=centrality)] = cost
# Update every other metric for the centrality
for metric, value in zip( METRICS_LIST, metric_values ):
batch_metrics_dict["{c}_{m}".format(c=centrality,m=metric)] = value
#end for
#end for
# For every metric, comput the average over the centralities
for metric in METRICS_LIST:
for centrality in centralities:
batch_metrics_dict[metric] += batch_metrics_dict["{c}_{m}".format(c=centrality,m=metric)]
#end for
batch_metrics_dict[metric] /= len( centralities )
#end for
# Log the batch
log_batch( epoch_id, batch_id, batch_metrics_dict, centralities, batch_logging_file )
if log_to_stdout:
log_batch( "batch", batch_id, batch_metrics_dict, centralities, sys.stdout )
#end if
return batch_metrics_dict
#end run_batch
if __name__ == "__main__":
embedding_size = 32
epochs_to_run = 32
batches_per_epoch = 32
batch_size = 32
test_batch_size = 32
time_steps = 32
centralities = sorted( sys.argv[1:] )
if len( centralities ) <= 0:
raise ValueError( "No centrality passed" )
#end if
for centrality in centralities:
if centrality not in [ "betweenness","closeness","degree","eigenvector" ]:
raise ValueError( "Centrality {c} not one of the accepted ones.".format( c=centrality ) )
#end if
#end for
fname = "centrality-" + "-".join( centralities )
# Build model
print( "Building model ..." )
GNN = model.model_builder( embedding_size, centralities )
# Load instances with a predefined seed and separate random generator for reproducibility
training_instance_loader_random_generator = random.Random( random_seed )
test_instance_loader_random_generator = random.Random( random_seed )
training_instance_loader = InstanceLoader("./instances", rng = training_instance_loader_random_generator )
test_instance_loader = InstanceLoader("./test-instances", rng = test_instance_loader_random_generator )
epoch_logging_file = open( "{fname}.epoch.log".format( fname = fname ), "w" )
batch_logging_file = open( "{fname}.batch.log".format( fname = fname ), "w" )
model_checkpoint_filename = fname
# Disallow GPU use
config = tf.ConfigProto(
# device_count = {"GPU": 0 },
# inter_op_parallelism_threads = 1,
# intra_op_parallelism_threads = 1,
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1/5.2)
)
with tf.Session(config=config) as sess:
# Initialize global variables
print( "Initializing global variables ... " )
sess.run( tf.global_variables_initializer() )
train(
sess,
GNN,
time_steps,
centralities,
epochs_to_run,
training_instance_loader,
batch_size,
test_batch_size,
batches_per_epoch,
test_instance_loader,
epoch_logging_file,
batch_logging_file,
model_checkpoint_filename,
log_to_stdout = True
)
#end Session
| [
"instance_loader.InstanceLoader",
"util.sparse_to_dense",
"random.Random",
"model.separate_batch",
"tensorflow.Session",
"random.seed",
"util.save_weights",
"tensorflow.global_variables_initializer",
"model.model_builder",
"numpy.random.seed",
"scipy.stats.pearsonr",
"tensorflow.set_random_see... | [((93, 117), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (104, 117), False, 'import random\n'), ((139, 166), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (153, 166), True, 'import numpy as np\n'), ((193, 224), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['random_seed'], {}), '(random_seed)\n', (211, 224), True, 'import tensorflow as tf\n'), ((1769, 1806), 'scipy.stats.kendalltau', 'stats.kendalltau', (['predictions', 'labels'], {}), '(predictions, labels)\n', (1785, 1806), False, 'from scipy import stats\n'), ((1829, 1864), 'scipy.stats.pearsonr', 'stats.pearsonr', (['predictions', 'labels'], {}), '(predictions, labels)\n', (1843, 1864), False, 'from scipy import stats\n'), ((9790, 9827), 'util.sparse_to_dense', 'util.sparse_to_dense', (["batch['matrix']"], {}), "(batch['matrix'])\n", (9810, 9827), False, 'import os, sys, itertools, util\n'), ((12476, 12525), 'model.model_builder', 'model.model_builder', (['embedding_size', 'centralities'], {}), '(embedding_size, centralities)\n', (12495, 12525), False, 'import model\n'), ((12666, 12692), 'random.Random', 'random.Random', (['random_seed'], {}), '(random_seed)\n', (12679, 12692), False, 'import random\n'), ((12737, 12763), 'random.Random', 'random.Random', (['random_seed'], {}), '(random_seed)\n', (12750, 12763), False, 'import random\n'), ((12796, 12872), 'instance_loader.InstanceLoader', 'InstanceLoader', (['"""./instances"""'], {'rng': 'training_instance_loader_random_generator'}), "('./instances', rng=training_instance_loader_random_generator)\n", (12810, 12872), False, 'from instance_loader import InstanceLoader\n'), ((12901, 12978), 'instance_loader.InstanceLoader', 'InstanceLoader', (['"""./test-instances"""'], {'rng': 'test_instance_loader_random_generator'}), "('./test-instances', rng=test_instance_loader_random_generator)\n", (12915, 12978), False, 'from instance_loader import InstanceLoader\n'), ((5512, 5565), 'util.save_weights', 'util.save_weights', (['session', 'model_checkpoint_filename'], {}), '(session, model_checkpoint_filename)\n', (5529, 5565), False, 'import os, sys, itertools, util\n'), ((13422, 13447), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (13432, 13447), True, 'import tensorflow as tf\n'), ((10789, 10842), 'model.separate_batch', 'model.separate_batch', (['predictions', "batch['problem_n']"], {}), "(predictions, batch['problem_n'])\n", (10809, 10842), False, 'import model\n'), ((10874, 10934), 'model.separate_batch', 'model.separate_batch', (['labels[centrality]', "batch['problem_n']"], {}), "(labels[centrality], batch['problem_n'])\n", (10894, 10934), False, 'import model\n'), ((13358, 13412), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(1 / 5.2)'}), '(per_process_gpu_memory_fraction=1 / 5.2)\n', (13371, 13412), True, 'import tensorflow as tf\n'), ((13555, 13588), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (13586, 13588), True, 'import tensorflow as tf\n')] |
from datetime import datetime
from typing import List
from flask_restful import fields
from src.models.community import CommunityModel
from src.models.user import UserModel
class KmPerUserModel:
user: UserModel
km: float = 0
km_accounted_for_passengers: float = 0
@staticmethod
def get_marshaller():
return {
'user': fields.Nested(UserModel.get_marshaller()),
'km': fields.Float,
'km_accounted_for_passengers': fields.Float
}
class CostsPerUserModel:
user: UserModel
costs: float = 0
@staticmethod
def get_marshaller():
return {
'user': fields.Nested(UserModel.get_marshaller()),
'costs': fields.Float,
}
class CommunityStatisticModel:
community: CommunityModel
statistic_start: datetime
statistic_end: datetime
km_per_user: List[KmPerUserModel] = []
costs_per_user: List[CostsPerUserModel] = []
def __init__(self):
self.km_per_user = []
self.costs_per_user = []
@staticmethod
def get_marshaller():
return {
'community': fields.Nested(CommunityModel.get_marshaller()),
'statistic_start': fields.DateTime,
'statistic_end': fields.DateTime,
'km_per_user': fields.Nested(KmPerUserModel.get_marshaller()),
'costs_per_user': fields.Nested(CostsPerUserModel.get_marshaller())
}
| [
"src.models.user.UserModel.get_marshaller",
"src.models.community.CommunityModel.get_marshaller"
] | [((376, 402), 'src.models.user.UserModel.get_marshaller', 'UserModel.get_marshaller', ([], {}), '()\n', (400, 402), False, 'from src.models.user import UserModel\n'), ((667, 693), 'src.models.user.UserModel.get_marshaller', 'UserModel.get_marshaller', ([], {}), '()\n', (691, 693), False, 'from src.models.user import UserModel\n'), ((1143, 1174), 'src.models.community.CommunityModel.get_marshaller', 'CommunityModel.get_marshaller', ([], {}), '()\n', (1172, 1174), False, 'from src.models.community import CommunityModel\n')] |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: _ModuleToggle.py
import dsz
import dsz.lp
import dsz.user
import dsz.script
import dsz.data
import sys
import xml.dom.minidom
import re
import os
Action = 'action'
SystemName = 'systemName'
Implementation = 'impl'
Silent = 'silent'
Load = 'load'
Free = 'free'
List = 1
Set = 2
Register = 3
bShowOutput = True
matchSystemName = re.compile('_PROV_(MCL_[^_]+)')
matchTargetName = re.compile('_PROV_(.+_TARGET)')
matchSystemImplementation = re.compile('_PROV_(MCL_[^_]*)_(.*)')
matchTargetImplementation = re.compile('_PROV_(.+_TARGET)_(.*)')
matchSystemSubstitution = re.compile('_SUB_(MCL_.+)')
matchTargetSubstitution = re.compile('_SUB_(.+_TARGET)')
bLoad = True
bFree = True
class ModuleInfo:
def __init__(self, name):
self.Name = name
if name.startswith('MCL'):
self.Implementations = [
'FAIL']
self.Selected = 'FAIL'
else:
self.Implementations = [
'DEFAULT']
self.Selected = 'DEFAULT'
def TaskingMain(namespace):
global bLoad
global bShowOutput
global bFree
import mcl.target
import mcl.tasking
dsz.control.echo.Off()
params = mcl.tasking.GetParameters()
bLoad = params[Load]
bFree = params[Free]
if params[Silent]:
bShowOutput = False
if params[Action] == List:
retVal = _DoList(params[SystemName])
elif params[Action] == Set:
retVal = _DoSet(params[SystemName], params[Implementation])
elif params[Action] == Register:
retVal = _DoRegister(params[SystemName])
else:
dsz.ui.Echo('Unknown action')
retVal = False
if retVal:
mcl.tasking.TaskSetStatus(mcl.target.CALL_SUCCEEDED)
else:
mcl.tasking.TaskSetStatus(mcl.target.CALL_FAILED)
return True
def _DoList(systemDesired, bDisplay=True):
if bDisplay:
_PrintTask('Retrieving List')
lpEnv = dsz.data.CreateCommand('lpgetenv', 'lpgetenv')
if lpEnv == None:
if bDisplay:
_PrintFailure('lpgetenv failed')
return False
else:
if bDisplay:
_PrintSuccess()
listOfSystems = dict()
def matchName(systemList, option, value):
for pattern in [matchSystemName, matchTargetName]:
match = pattern.match(option)
if match == None:
continue
if match.group(1).upper() not in systemList:
systemList[match.group(1).upper()] = ModuleInfo(match.group(1))
return
def matchImpl(systemList, option, value):
for pattern in [matchSystemImplementation, matchTargetImplementation]:
match = pattern.match(option)
if match == None:
continue
if match.group(1).upper() not in systemList:
systemList[match.group(1).upper()] = ModuleInfo(match.group(1))
systemList[match.group(1).upper()].Implementations.append(match.group(2))
return
def matchSub(systemList, option, value):
for pattern in [matchSystemSubstitution, matchTargetSubstitution]:
match = pattern.match(option)
if match == None:
continue
if match.group(1).upper() not in systemList:
systemList[match.group(1).upper()] = ModuleInfo(match.group(1))
for pattern2 in ['MCL_[^_]+_(.+)', '%s_(.+)' % match.group(1).upper()]:
valueMatch = re.match(pattern2, value.upper())
if valueMatch:
if valueMatch.group(1) == 'DEFAULT':
return
systemList[match.group(1).upper()].Selected = valueMatch.group(1)
return
for envItem in lpEnv.EnvItem:
matchName(listOfSystems, envItem.option, envItem.Value)
matchImpl(listOfSystems, envItem.option, envItem.Value)
matchSub(listOfSystems, envItem.option, envItem.Value)
dsz.script.data.Start('ModuleToggle')
try:
if systemDesired:
if systemDesired.upper() in listOfSystems.keys():
if bDisplay:
_DisplaySystemOptions(systemDesired, listOfSystems[systemDesired.upper()])
_PrintSuccess()
_StoreSystem(systemDesired, listOfSystems[systemDesired.upper()])
return listOfSystems[systemDesired.upper()]
else:
if bDisplay:
_PrintFailure('System %s not found' % systemDesired)
return
else:
if bDisplay:
for sys in sorted(listOfSystems.keys()):
_DisplaySystemOptions(sys, listOfSystems[sys])
_PrintSuccess()
for sys in listOfSystems.keys():
_StoreSystem(sys, listOfSystems[sys])
return listOfSystems
finally:
dsz.script.data.Store()
return
def _StoreSystem(system, data):
dsz.script.data.Start('System')
dsz.script.data.Add('Name', system, dsz.TYPE_STRING)
if data.Selected:
dsz.script.data.Add('Selected', data.Selected, dsz.TYPE_STRING)
for item in data.Implementations:
dsz.script.data.Add('Implementation', item, dsz.TYPE_STRING)
dsz.script.data.End()
def _DisplaySystemOptions(name, options):
dsz.ui.Echo('Implementations for %s (%s)' % (name, options.Selected))
dsz.ui.Echo('----------------------------')
for impl in options.Implementations:
type = dsz.DEFAULT
if impl == 'FAIL' and impl == options.Selected:
type = dsz.WARNING
suffix = '(selected)'
elif impl == 'FAIL':
type = dsz.ERROR
suffix = ''
elif impl == options.Selected:
type = dsz.GOOD
suffix = '(selected)'
else:
suffix = ''
dsz.ui.Echo('%s %s' % (impl, suffix), type)
dsz.ui.Echo('----------------------------')
dsz.ui.Echo('')
def _DoSet(system, impl):
if bFree:
plugins = dsz.data.CreateCommand('plugins', 'plugins')
else:
plugins = None
_PrintTask('Preparing to set %s to %s' % (system, impl))
if system == None or impl == None:
_PrintFailure('Invalid parameters')
return False
else:
provVar = '_PROV_%s' % system.upper()
subVar = '_SUB_%s' % system.upper()
if impl.upper() == 'FAIL':
subVal = '%s' % system.upper()
else:
subVal = '%s_%s' % (system.upper(), impl.upper())
if not dsz.env.Check(provVar):
_PrintFailure('System (%s) is not found' % system)
return False
if impl.upper() == 'DEFAULT':
dsz.env.Delete(subVar)
_PrintSuccess()
if _DoFree(plugins, system):
return _DoLoad(system, impl)
else:
return True
if dsz.env.Check(subVar) and dsz.env.Get(subVar) == subVal:
pass
options = _DoList(system, False)
if options == None:
_PrintFailure('System (%s) implementations cannot be enumerated' % system)
return False
if impl.upper() not in options.Implementations:
_PrintFailure('%s not a valid implementation: %s' % (impl, options.Implementations))
return False
_PrintSuccess()
rtn = True
if _DoFree(plugins, system):
if not _DoLoad(system, impl):
rtn = False
else:
rtn = False
_PrintTask('Setting environment variable')
if not dsz.env.Set(subVar, subVal):
_PrintFailure()
return False
if rtn:
_PrintSuccess()
else:
_PrintFailure()
return rtn
def _DoRegister(system):
if system:
_PrintTask('Registering implementations of %s' % system)
else:
_PrintTask('Registering all implementations')
retVal = True
try:
LegalTechniques = _GetAvailableModuleTechniquesByName(system)
for tech in LegalTechniques:
try:
dsz.env.Set('_PROV_%s' % tech['name'].upper(), '%d' % tech['implementation'])
except:
retVal = False
finally:
_PrintOutcome(retVal)
return retVal
def _DoFree(plugins, system):
if bFree == False:
return True
else:
if system == None:
_PrintTask('Freeing items')
_PrintFailure('System must be specified')
return False
if not bFree:
return True
localSystem = '%s' % system
if not __FreeDependantPlugins(plugins, localSystem):
return False
if not __Free(plugins, localSystem):
return False
return True
def _DoLoad(system, name):
if not bLoad:
return True
else:
if system == None:
_PrintTask('Loading implementation')
_PrintFailure('System must be specified')
return False
if name == None:
_PrintTask('Loading implementation')
_PrintFailure('Implementation must be specified')
return False
potentialItems = _GetAvailableModuleTechniquesByName(system)
for item in potentialItems:
if item['name'].lower().endswith(name.lower()) or name.lower() == 'fail' and item['name'].lower() == system.lower() or name.lower() == 'default' and item['name'].lower() == system.lower():
_PrintTask('Loading %s (%d)' % (item['name'], item['id']))
if dsz.cmd.Run('load -id %d' % item['id']):
_PrintSuccess()
return True
else:
_PrintFailure()
return False
_PrintTask('Loading %s' % system)
_PrintFailure('Cannot find implementation id')
return False
def __GetAppropriatePluginSet(plugins):
if dsz.script.IsLocal():
return plugins.Local.Plugin
return plugins.Remote.Plugin
def __Free(plugins, name):
if name.strip() == '':
return True
for plugin in __GetAppropriatePluginSet(plugins):
if name.upper() in plugin.Name.upper():
_PrintTask('Free %s (%d)' % (plugin.Name, plugin.Id))
cmd = 'free -id %d -force -depends 0' % plugin.Id
if dsz.cmd.Run(cmd):
_PrintSuccess()
else:
_PrintFailure()
return False
return True
def __FreeDependantPlugins(plugins, key, cntDown=10):
if key.strip() == '':
return True
if cntDown == 0:
_PrintTask('Free Dependant Plugins')
_PrintFailure('Excessive recursion')
return False
for plugin in __GetAppropriatePluginSet(plugins):
for acquiredApi in plugin.AcquiredApis:
if acquiredApi.ProvidedBy.upper() == key.upper():
if not __FreeDependantPlugins(plugins, plugin.Name, cntDown - 1):
return False
if not __Free(plugins, plugin.Name):
return False
return True
def _FreeItem(plugins, system):
_PrintTask('Freeing %s' % system)
retVal = True
try:
pluginSet = plugins.Remote
if dsz.script.IsLocal():
pluginSet = plugins.Local
for plugin in pluginSet.Plugin:
if system in plugin.Name:
if not dsz.cmd.Run('free -id %d -force' % plugin.id):
retVal = False
finally:
_PrintOutcome(retVal)
return retVal
def _GetAvailableModuleTechniquesByName(Module):
retVal = list()
AllModules = _GetTechniqueFiles()
for file in AllModules:
try:
xmlFile = xml.dom.minidom.parse(file)
name = xmlFile.getElementsByTagName('Technique')[0].getAttribute('name')
id = int(xmlFile.getElementsByTagName('Technique')[0].getAttribute('id'), 10)
if Module != None and Module.lower() not in name.lower():
continue
if _DoesModuleMatch(xmlFile):
for implementation in xmlFile.getElementsByTagName('Implementation'):
value = int(implementation.getAttribute('value'), 10)
retVal.append({'name': name,'filename': file,'implementation': value,'id': id})
except Exception as E:
pass
return retVal
def _GetTechniqueFiles():
retVal = list()
resDir = dsz.env.Get('_LPDIR_RESOURCES')
resDirs = dsz.env.Get('_RES_DIRS')
osStr = dsz.env.Get('_OS')
if osStr == 'winnt':
osStr = 'windows'
for res in resDirs.split(';'):
path = os.path.normpath('%s/%s/Modules/Techniques/%s/' % (resDir, res, osStr))
if not os.path.exists(path):
continue
for file in os.listdir(path):
filename = '%s/%s' % (path, file)
if os.path.isdir(filename):
continue
retVal.append(filename)
return retVal
def _DoesModuleMatch(xmlFile):
for arch in xmlFile.getElementsByTagName('Architecture'):
if not _DoesArchMatch(arch.getAttribute('type')):
continue
for platform in arch.childNodes:
if not platform.nodeType == xml.dom.Node.ELEMENT_NODE:
continue
if not _DoesPlatformMatch(platform.getAttribute('family')):
continue
for version in platform.childNodes:
if not version.nodeType == xml.dom.Node.ELEMENT_NODE:
continue
if _DoesVersionMatch(version.getAttribute('major'), version.getAttribute('minor'), version.getAttribute('other')):
return True
return False
def _DoesArchMatch(moduleArch):
arch = dsz.env.Get('_ARCH')
compArch = dsz.env.Get('_COMPILED_ARCH')
return compArch == moduleArch
def _DoesPlatformMatch(modulePlatform):
osStr = dsz.env.Get('_OS')
if osStr == 'winnt':
return modulePlatform == 'windows_nt' or modulePlatform == 'winnt'
return False
def _DoesVersionMatch(major, minor, other):
return _DoesVersionElementMatch(major, dsz.env.Get('_OS_VERSION_MAJOR')) and _DoesVersionElementMatch(minor, dsz.env.Get('_OS_VERSION_MINOR')) and _DoesVersionElementMatch(other, dsz.env.Get('_OS_VERSION_OTHER'))
def _DoesVersionElementMatch(fileVer, envVer):
if fileVer == '*':
return True
bGreater = False
if '+' in fileVer:
bGreater = True
fileVer = fileVer[:fileVer.index('+')]
fileVer = int(fileVer)
envVer = int(envVer)
if fileVer == envVer:
return True
if bGreater and fileVer < envVer:
return True
return False
def _PrintTask(task):
if bShowOutput:
dsz.ui.Echo(task)
def _PrintOutcome(bState, msg=None):
if bState:
_PrintSuccess(msg)
else:
_PrintFailure(msg)
def _PrintSuccess(msg=None):
__PrintImpl('PASSED', msg, dsz.GOOD)
def _PrintFailure(msg=None):
__PrintImpl('FAILED', msg, dsz.ERROR)
def __PrintImpl(msg, detail, type):
if not bShowOutput:
return
else:
if detail != None:
dsz.ui.Echo(' %s (%s)' % (msg, detail), type)
else:
dsz.ui.Echo(' %s' % msg, type)
return
if __name__ == '__main__':
import sys
if TaskingMain(sys.argv[1]) != True:
sys.exit(-1) | [
"re.compile",
"sys.exit",
"dsz.control.echo.Off",
"os.path.exists",
"os.listdir",
"dsz.ui.Echo",
"dsz.script.data.Store",
"dsz.env.Get",
"os.path.normpath",
"os.path.isdir",
"dsz.script.data.Start",
"dsz.cmd.Run",
"dsz.data.CreateCommand",
"dsz.script.data.End",
"dsz.env.Delete",
"dsz.... | [((498, 529), 're.compile', 're.compile', (['"""_PROV_(MCL_[^_]+)"""'], {}), "('_PROV_(MCL_[^_]+)')\n", (508, 529), False, 'import re\n'), ((548, 579), 're.compile', 're.compile', (['"""_PROV_(.+_TARGET)"""'], {}), "('_PROV_(.+_TARGET)')\n", (558, 579), False, 'import re\n'), ((608, 644), 're.compile', 're.compile', (['"""_PROV_(MCL_[^_]*)_(.*)"""'], {}), "('_PROV_(MCL_[^_]*)_(.*)')\n", (618, 644), False, 'import re\n'), ((673, 709), 're.compile', 're.compile', (['"""_PROV_(.+_TARGET)_(.*)"""'], {}), "('_PROV_(.+_TARGET)_(.*)')\n", (683, 709), False, 'import re\n'), ((736, 763), 're.compile', 're.compile', (['"""_SUB_(MCL_.+)"""'], {}), "('_SUB_(MCL_.+)')\n", (746, 763), False, 'import re\n'), ((790, 820), 're.compile', 're.compile', (['"""_SUB_(.+_TARGET)"""'], {}), "('_SUB_(.+_TARGET)')\n", (800, 820), False, 'import re\n'), ((1299, 1321), 'dsz.control.echo.Off', 'dsz.control.echo.Off', ([], {}), '()\n', (1319, 1321), False, 'import dsz\n'), ((2069, 2115), 'dsz.data.CreateCommand', 'dsz.data.CreateCommand', (['"""lpgetenv"""', '"""lpgetenv"""'], {}), "('lpgetenv', 'lpgetenv')\n", (2091, 2115), False, 'import dsz\n'), ((5328, 5359), 'dsz.script.data.Start', 'dsz.script.data.Start', (['"""System"""'], {}), "('System')\n", (5349, 5359), False, 'import dsz\n'), ((5364, 5416), 'dsz.script.data.Add', 'dsz.script.data.Add', (['"""Name"""', 'system', 'dsz.TYPE_STRING'], {}), "('Name', system, dsz.TYPE_STRING)\n", (5383, 5416), False, 'import dsz\n'), ((5623, 5644), 'dsz.script.data.End', 'dsz.script.data.End', ([], {}), '()\n', (5642, 5644), False, 'import dsz\n'), ((5693, 5762), 'dsz.ui.Echo', 'dsz.ui.Echo', (["('Implementations for %s (%s)' % (name, options.Selected))"], {}), "('Implementations for %s (%s)' % (name, options.Selected))\n", (5704, 5762), False, 'import dsz\n'), ((5767, 5810), 'dsz.ui.Echo', 'dsz.ui.Echo', (['"""----------------------------"""'], {}), "('----------------------------')\n", (5778, 5810), False, 'import dsz\n'), ((6278, 6321), 'dsz.ui.Echo', 'dsz.ui.Echo', (['"""----------------------------"""'], {}), "('----------------------------')\n", (6289, 6321), False, 'import dsz\n'), ((6326, 6341), 'dsz.ui.Echo', 'dsz.ui.Echo', (['""""""'], {}), "('')\n", (6337, 6341), False, 'import dsz\n'), ((10325, 10345), 'dsz.script.IsLocal', 'dsz.script.IsLocal', ([], {}), '()\n', (10343, 10345), False, 'import dsz\n'), ((12851, 12882), 'dsz.env.Get', 'dsz.env.Get', (['"""_LPDIR_RESOURCES"""'], {}), "('_LPDIR_RESOURCES')\n", (12862, 12882), False, 'import dsz\n'), ((12897, 12921), 'dsz.env.Get', 'dsz.env.Get', (['"""_RES_DIRS"""'], {}), "('_RES_DIRS')\n", (12908, 12921), False, 'import dsz\n'), ((12934, 12952), 'dsz.env.Get', 'dsz.env.Get', (['"""_OS"""'], {}), "('_OS')\n", (12945, 12952), False, 'import dsz\n'), ((14165, 14185), 'dsz.env.Get', 'dsz.env.Get', (['"""_ARCH"""'], {}), "('_ARCH')\n", (14176, 14185), False, 'import dsz\n'), ((14201, 14230), 'dsz.env.Get', 'dsz.env.Get', (['"""_COMPILED_ARCH"""'], {}), "('_COMPILED_ARCH')\n", (14212, 14230), False, 'import dsz\n'), ((14319, 14337), 'dsz.env.Get', 'dsz.env.Get', (['"""_OS"""'], {}), "('_OS')\n", (14330, 14337), False, 'import dsz\n'), ((4231, 4268), 'dsz.script.data.Start', 'dsz.script.data.Start', (['"""ModuleToggle"""'], {}), "('ModuleToggle')\n", (4252, 4268), False, 'import dsz\n'), ((5447, 5510), 'dsz.script.data.Add', 'dsz.script.data.Add', (['"""Selected"""', 'data.Selected', 'dsz.TYPE_STRING'], {}), "('Selected', data.Selected, dsz.TYPE_STRING)\n", (5466, 5510), False, 'import dsz\n'), ((5557, 5617), 'dsz.script.data.Add', 'dsz.script.data.Add', (['"""Implementation"""', 'item', 'dsz.TYPE_STRING'], {}), "('Implementation', item, dsz.TYPE_STRING)\n", (5576, 5617), False, 'import dsz\n'), ((6229, 6272), 'dsz.ui.Echo', 'dsz.ui.Echo', (["('%s %s' % (impl, suffix))", 'type'], {}), "('%s %s' % (impl, suffix), type)\n", (6240, 6272), False, 'import dsz\n'), ((6402, 6446), 'dsz.data.CreateCommand', 'dsz.data.CreateCommand', (['"""plugins"""', '"""plugins"""'], {}), "('plugins', 'plugins')\n", (6424, 6446), False, 'import dsz\n'), ((11644, 11664), 'dsz.script.IsLocal', 'dsz.script.IsLocal', ([], {}), '()\n', (11662, 11664), False, 'import dsz\n'), ((13054, 13125), 'os.path.normpath', 'os.path.normpath', (["('%s/%s/Modules/Techniques/%s/' % (resDir, res, osStr))"], {}), "('%s/%s/Modules/Techniques/%s/' % (resDir, res, osStr))\n", (13070, 13125), False, 'import os\n'), ((13204, 13220), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (13214, 13220), False, 'import os\n'), ((15150, 15167), 'dsz.ui.Echo', 'dsz.ui.Echo', (['task'], {}), '(task)\n', (15161, 15167), False, 'import dsz\n'), ((15774, 15786), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (15782, 15786), False, 'import sys\n'), ((5250, 5273), 'dsz.script.data.Store', 'dsz.script.data.Store', ([], {}), '()\n', (5271, 5273), False, 'import dsz\n'), ((6914, 6936), 'dsz.env.Check', 'dsz.env.Check', (['provVar'], {}), '(provVar)\n', (6927, 6936), False, 'import dsz\n'), ((7076, 7098), 'dsz.env.Delete', 'dsz.env.Delete', (['subVar'], {}), '(subVar)\n', (7090, 7098), False, 'import dsz\n'), ((7271, 7292), 'dsz.env.Check', 'dsz.env.Check', (['subVar'], {}), '(subVar)\n', (7284, 7292), False, 'import dsz\n'), ((7958, 7985), 'dsz.env.Set', 'dsz.env.Set', (['subVar', 'subVal'], {}), '(subVar, subVal)\n', (7969, 7985), False, 'import dsz\n'), ((10737, 10753), 'dsz.cmd.Run', 'dsz.cmd.Run', (['cmd'], {}), '(cmd)\n', (10748, 10753), False, 'import dsz\n'), ((13141, 13161), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (13155, 13161), False, 'import os\n'), ((13283, 13306), 'os.path.isdir', 'os.path.isdir', (['filename'], {}), '(filename)\n', (13296, 13306), False, 'import os\n'), ((14544, 14576), 'dsz.env.Get', 'dsz.env.Get', (['"""_OS_VERSION_MAJOR"""'], {}), "('_OS_VERSION_MAJOR')\n", (14555, 14576), False, 'import dsz\n'), ((14614, 14646), 'dsz.env.Get', 'dsz.env.Get', (['"""_OS_VERSION_MINOR"""'], {}), "('_OS_VERSION_MINOR')\n", (14625, 14646), False, 'import dsz\n'), ((14684, 14716), 'dsz.env.Get', 'dsz.env.Get', (['"""_OS_VERSION_OTHER"""'], {}), "('_OS_VERSION_OTHER')\n", (14695, 14716), False, 'import dsz\n'), ((15557, 15605), 'dsz.ui.Echo', 'dsz.ui.Echo', (["(' %s (%s)' % (msg, detail))", 'type'], {}), "(' %s (%s)' % (msg, detail), type)\n", (15568, 15605), False, 'import dsz\n'), ((15632, 15665), 'dsz.ui.Echo', 'dsz.ui.Echo', (["(' %s' % msg)", 'type'], {}), "(' %s' % msg, type)\n", (15643, 15665), False, 'import dsz\n'), ((1744, 1773), 'dsz.ui.Echo', 'dsz.ui.Echo', (['"""Unknown action"""'], {}), "('Unknown action')\n", (1755, 1773), False, 'import dsz\n'), ((7297, 7316), 'dsz.env.Get', 'dsz.env.Get', (['subVar'], {}), '(subVar)\n', (7308, 7316), False, 'import dsz\n'), ((9957, 9996), 'dsz.cmd.Run', 'dsz.cmd.Run', (["('load -id %d' % item['id'])"], {}), "('load -id %d' % item['id'])\n", (9968, 9996), False, 'import dsz\n'), ((11805, 11850), 'dsz.cmd.Run', 'dsz.cmd.Run', (["('free -id %d -force' % plugin.id)"], {}), "('free -id %d -force' % plugin.id)\n", (11816, 11850), False, 'import dsz\n')] |
#!/usr/bin/env python
import rospy
import math
import numpy as np
from sensor_msgs.msg import LaserScan
#######################################
# Laser Scan:
# Header: Seq, Stamp, frame_id
# Angle_min, Angle_max, Angle_Increment, Time_Increment
# Scan time, range_min, range_max, ranges, intensities
#######################################
class Noise_class:
def __init__(self):
#rospy.on_shutdown(self.save_csv)
self.laser_sub = rospy.Subscriber('/base_scan', LaserScan, self.laser_callback)
self.scan_pub = rospy.Publisher('/gaus_err_laser_scan', LaserScan, queue_size= 1)
def laser_callback(self, msg):
filtered_values = LaserScan()
distance = np.array(msg.ranges)
filtered_values.header = msg.header
filtered_values.angle_increment = msg.angle_increment
filtered_values.time_increment = msg.time_increment
filtered_values.scan_time = msg.scan_time
filtered_values.range_min = msg.range_min
filtered_values.range_max = msg.range_max
filtered_values.intensities = msg.intensities
angle = filtered_values.angle_increment
min_angle = msg.angle_min
max_angle = msg.angle_max
laser_noise_variance = rospy.get_param('laser_noise_variance')
if laser_noise_variance <= 0:
laser_noise_variance = 0.1
filtered_values_ranges = np.zeros(len(distance))
noise_values_ranges = np.random.normal(loc = 0, scale=laser_noise_variance, size=len(distance))
for i in range(len(distance)):
filtered_values_ranges[i] = noise_values_ranges[i]+distance[i]
filtered_values.ranges = filtered_values_ranges
filtered_values.angle_min = min_angle
filtered_values.angle_max = max_angle
self.scan_pub.publish(filtered_values)
if __name__ == '__main__':
rospy.init_node('noiser', anonymous=True)
noisy = Noise_class()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
| [
"sensor_msgs.msg.LaserScan",
"rospy.Subscriber",
"rospy.init_node",
"rospy.get_param",
"numpy.array",
"rospy.spin",
"rospy.Publisher"
] | [((1698, 1739), 'rospy.init_node', 'rospy.init_node', (['"""noiser"""'], {'anonymous': '(True)'}), "('noiser', anonymous=True)\n", (1713, 1739), False, 'import rospy\n'), ((441, 503), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/base_scan"""', 'LaserScan', 'self.laser_callback'], {}), "('/base_scan', LaserScan, self.laser_callback)\n", (457, 503), False, 'import rospy\n'), ((522, 586), 'rospy.Publisher', 'rospy.Publisher', (['"""/gaus_err_laser_scan"""', 'LaserScan'], {'queue_size': '(1)'}), "('/gaus_err_laser_scan', LaserScan, queue_size=1)\n", (537, 586), False, 'import rospy\n'), ((642, 653), 'sensor_msgs.msg.LaserScan', 'LaserScan', ([], {}), '()\n', (651, 653), False, 'from sensor_msgs.msg import LaserScan\n'), ((667, 687), 'numpy.array', 'np.array', (['msg.ranges'], {}), '(msg.ranges)\n', (675, 687), True, 'import numpy as np\n'), ((1142, 1181), 'rospy.get_param', 'rospy.get_param', (['"""laser_noise_variance"""'], {}), "('laser_noise_variance')\n", (1157, 1181), False, 'import rospy\n'), ((1771, 1783), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1781, 1783), False, 'import rospy\n')] |
def find_paths(out_dir, files_ID, time_instances, month_id):
import json
from collections import defaultdict
import networkx as nx
for instance in time_instances['id']:
with open(out_dir + 'data_traffic_assignment_uni-class/'+ files_ID + '_net_' + month_id + '_full_' + instance + '.txt') as MA_journal_flow:
MA_journal_flow_lines = MA_journal_flow.readlines()
MA_journal_links = []
i = -9
for line in MA_journal_flow_lines:
i += 1
if i > 0:
MA_journal_links.append(line.split(' ')[1:3])
numLinks = i
link_list_js = [str(int(MA_journal_links[i][0])) + ',' + str(int(MA_journal_links[i][1])) for \
i in range(len(MA_journal_links))]
link_list_pk = [str(int(MA_journal_links[i][0])) + '->' + str(int(MA_journal_links[i][1])) for \
i in range(len(MA_journal_links))]
zdump(link_list_js, out_dir + 'link_list_js' + files_ID + '.pkz')
zdump(link_list_pk, out_dir + 'link_list_pk' + files_ID + '.pkz')
numNodes = max([int(MA_journal_links[i][1]) for i in range(numLinks)])
node_neighbors_dict = {}
for node_ in range(numNodes):
node = node_#+1
node_neighbors_dict[node] = []
for link in MA_journal_links:
if node == int(link[0]):
node_neighbors_dict[node].append(int(link[1]))
with open(out_dir + 'data_traffic_assignment_uni-class/'+ files_ID + '_trips_' + month_id + '_full_' + instance + '.txt') as MA_journal_trips:
MA_journal_trips_lines = MA_journal_trips.readlines()
numZones = int(MA_journal_trips_lines[0].split(' ')[3])
numLinks = len(link_list_js)
#od_pairs = []
#for i in range(numZones+1)[1:]:
# for j in range(numZones+1)[1:]:
# if i != j:
# od_pairs.append([i, j])
# create O-D pair labels
# create a dictionary mapping O-D pairs to labels
OD_pair_label_dict = {}
OD_pair_label_dict_ = {}
label = 1
for od in od_pairs:
i = od[0]
j = od[1]
key = (i, j)
OD_pair_label_dict[str(key)] = label
OD_pair_label_dict_[str(label)] = key
label += 1
with open( out_dir + 'od_pair_label_dict.json', 'w') as json_file:
json.dump(OD_pair_label_dict, json_file)
with open( out_dir + 'od_pair_label_dict__.json', 'w') as json_file:
json.dump(OD_pair_label_dict_, json_file)
OD_pair_label_dict_refined = {}
OD_pair_label_dict_refined_ = {}
label = 1
for od in od_pairs:
i = od[0]
j = od[1]
key = (i, j)
OD_pair_label_dict_refined[str(key)] = label
OD_pair_label_dict_refined_[str(label)] = key
label += 1
with open( out_dir + 'od_pair_label_dict_MA_refined.json', 'w') as json_file:
json.dump(OD_pair_label_dict_refined, json_file)
with open( out_dir + 'od_pair_label_dict__refined.json', 'w') as json_file:
json.dump(OD_pair_label_dict_refined_, json_file)
# create link labels
# create a dictionary mapping directed links to labels
link_label_dict = {}
link_label_dict_ = {}
link_list = zload(out_dir + 'link_list_js' + files_ID + '.pkz')
for i in range(numLinks):
link_label_dict[str(i)] = link_list[i]
for i in range(numLinks):
link_label_dict_[link_list[i]] = i
with open(out_dir + 'link_label_dict.json', 'w') as json_file:
json.dump(link_label_dict, json_file)
with open( out_dir + 'link_label_dict_.json', 'w') as json_file:
json.dump(link_label_dict_, json_file)
# create link labels
# create a dictionary mapping directed links to labels
link_label_dict = {}
link_label_dict_ = {}
link_list = zload(out_dir + 'link_list_pk' + files_ID + '.pkz')
for i in range(numLinks):
link_label_dict[str(i)] = link_list[i]
for i in range(numLinks):
link_label_dict_[link_list[i]] = i
zdump(link_label_dict, out_dir + 'link_label_dict.pkz')
zdump(link_label_dict_, out_dir + 'link_label_dict_.pkz')
link_length_dict_MA_journal = {} # save free-flow time actually
link_capac_dict_MA_journal = {}
length_list = []
capac_list = []
with open(out_dir + 'data_traffic_assignment_uni-class/'+ files_ID + '_net_' + month_id + '_full_' + instance + '.txt', 'r') as f:
read_data = f.readlines()
flag = 0
for row in read_data:
if ';' in row:
flag += 1
if flag > 1:
length_list.append(float(row.split(' ')[5]))
capac_list.append(float(row.split(' ')[3]))
for idx in range(len(length_list)):
key = str(idx)
link_length_dict_MA_journal[key] = length_list[idx]
link_capac_dict_MA_journal[key] = capac_list[idx]
with open( out_dir + 'link_length_dict.json', 'w') as json_file:
json.dump(link_length_dict_MA_journal, json_file)
with open( out_dir + 'link_capac_dict.json', 'w') as json_file:
json.dump(link_capac_dict_MA_journal, json_file)
# compute length of a route
def routeLength(route):
link_list = []
node_list = []
for i in route.split('->'):
node_list.append(int(i))
for i in range(len(node_list))[:-1]:
link_list.append('%d->%d' %(node_list[i], node_list[i+1]))
length_of_route = sum([link_length_dict_MA_journal[str(link_label_dict_[link])] for link in link_list])
return length_of_route
MA_journal = nx.DiGraph()
MA_journal.add_nodes_from(range(numNodes+1)[1:])
MA_journal_weighted_edges = [(int(link_list_js[i].split(',')[0]), int(link_list_js[i].split(',')[1]), \
length_list[i]) for i in range(len(link_list_js))]
MA_journal.add_weighted_edges_from(MA_journal_weighted_edges)
path = nx.all_pairs_dijkstra_path(MA_journal)
path = list(path)
#path = nx.all_simple_paths(MA_journal)
#path = list(path)
#print(path)
route_path_mat = {}
od_route_dict = {}
route_ = 0
with open(out_dir + 'path-link_incidence_' + instance + files_ID + '.txt', 'w') as the_file:
for od in od_pairs:
origi = od[0]
desti = od[1]
paths = list(nx.shortest_simple_paths(MA_journal,origi,desti))
the_file.write('O-D pair (%s, %s):\n'%(origi, desti))
for path in paths:
# print(path)
route = str(path).replace("[", "").replace(", ", "->").replace("]", "")
the_file.write(route)
the_file.write('\n')
route_ = route_+ 1
#print(route_)
#route_path_mat = route
key = "(" + str(origi) + ", " + str(desti) + ")"
#print(OD_pair_label_dict_refined)
od_route_dict[str(OD_pair_label_dict_refined[key]) + "-" + str(route_)] = 1
#route_path_mat[route_] = np.zeros(numLinks )
for link_i in range(len(path)-1):
link = str(path[link_i]) + "->" + str(path[link_i+1])
link_id = link_label_dict_[link]
route_path_mat[str(link_id) + "-" + str(route_)] = 1
#link_route_mat = np.transpose(np.matrix( np.array([route_path_mat[i] for i in route_path_mat.keys()])))
# print(link_route_mat )
with open(out_dir + 'path-link_incidence_' + instance + files_ID + '.txt', 'r') as the_file:
# path counts
i = 0
for row in the_file:
if '->' in row:
i = i + 1
with open( out_dir + 'numRoutes_' + instance + files_ID + '.json', 'w') as json_file:
json.dump(i, json_file)
with open(out_dir + "link_route_incidence_" + instance + files_ID + ".json", 'w') as json_file:
json.dump(route_path_mat, json_file)
with open(out_dir + "od_pair_route_incidence_" + instance + files_ID + ".json", 'w') as json_file:
json.dump(od_route_dict, json_file)
find_paths(out_dir, files_ID, time_instances, month_id) | [
"networkx.shortest_simple_paths",
"networkx.DiGraph",
"networkx.all_pairs_dijkstra_path",
"json.dump"
] | [((6182, 6194), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (6192, 6194), True, 'import networkx as nx\n'), ((6539, 6577), 'networkx.all_pairs_dijkstra_path', 'nx.all_pairs_dijkstra_path', (['MA_journal'], {}), '(MA_journal)\n', (6565, 6577), True, 'import networkx as nx\n'), ((2483, 2523), 'json.dump', 'json.dump', (['OD_pair_label_dict', 'json_file'], {}), '(OD_pair_label_dict, json_file)\n', (2492, 2523), False, 'import json\n'), ((2626, 2667), 'json.dump', 'json.dump', (['OD_pair_label_dict_', 'json_file'], {}), '(OD_pair_label_dict_, json_file)\n', (2635, 2667), False, 'import json\n'), ((3120, 3168), 'json.dump', 'json.dump', (['OD_pair_label_dict_refined', 'json_file'], {}), '(OD_pair_label_dict_refined, json_file)\n', (3129, 3168), False, 'import json\n'), ((3278, 3327), 'json.dump', 'json.dump', (['OD_pair_label_dict_refined_', 'json_file'], {}), '(OD_pair_label_dict_refined_, json_file)\n', (3287, 3327), False, 'import json\n'), ((3830, 3867), 'json.dump', 'json.dump', (['link_label_dict', 'json_file'], {}), '(link_label_dict, json_file)\n', (3839, 3867), False, 'import json\n'), ((3966, 4004), 'json.dump', 'json.dump', (['link_label_dict_', 'json_file'], {}), '(link_label_dict_, json_file)\n', (3975, 4004), False, 'import json\n'), ((5480, 5529), 'json.dump', 'json.dump', (['link_length_dict_MA_journal', 'json_file'], {}), '(link_length_dict_MA_journal, json_file)\n', (5489, 5529), False, 'import json\n'), ((5627, 5675), 'json.dump', 'json.dump', (['link_capac_dict_MA_journal', 'json_file'], {}), '(link_capac_dict_MA_journal, json_file)\n', (5636, 5675), False, 'import json\n'), ((8624, 8647), 'json.dump', 'json.dump', (['i', 'json_file'], {}), '(i, json_file)\n', (8633, 8647), False, 'import json\n'), ((8777, 8813), 'json.dump', 'json.dump', (['route_path_mat', 'json_file'], {}), '(route_path_mat, json_file)\n', (8786, 8813), False, 'import json\n'), ((8946, 8981), 'json.dump', 'json.dump', (['od_route_dict', 'json_file'], {}), '(od_route_dict, json_file)\n', (8955, 8981), False, 'import json\n'), ((7014, 7064), 'networkx.shortest_simple_paths', 'nx.shortest_simple_paths', (['MA_journal', 'origi', 'desti'], {}), '(MA_journal, origi, desti)\n', (7038, 7064), True, 'import networkx as nx\n')] |
#!/usr/bin/python
import signal
import sys
import dweepy
import time
import pyupm_grove as grove
import pyupm_ttp223 as ttp223
import pyupm_i2clcd as lcd
def interruptHandler(signal, frame):
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, interruptHandler)
touch = ttp223.TTP223(6)
myLcd = lcd.Jhd1313m1(0, 0x3E, 0x62)
button = grove.GroveButton(8)
count = 0
myLcd.setColor(0,0,255)
# Read the input and print, waiting 1/2 second betweenreadings
while 1:
if button.value():
count=count+1
if touch.isPressed():
count=count-1
myLcd.setCursor(0,0)
myLcd.write('%6d'% count)
time.sleep(0.5)
dic={}
dic["cont"]=count
dweepy.dweet_for("Aracelis",dic)
time. sleep(1)
del button
del touch
| [
"pyupm_ttp223.TTP223",
"signal.signal",
"dweepy.dweet_for",
"time.sleep",
"pyupm_grove.GroveButton",
"pyupm_i2clcd.Jhd1313m1",
"sys.exit"
] | [((197, 208), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (205, 208), False, 'import sys\n'), ((239, 285), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'interruptHandler'], {}), '(signal.SIGINT, interruptHandler)\n', (252, 285), False, 'import signal\n'), ((298, 314), 'pyupm_ttp223.TTP223', 'ttp223.TTP223', (['(6)'], {}), '(6)\n', (311, 314), True, 'import pyupm_ttp223 as ttp223\n'), ((326, 350), 'pyupm_i2clcd.Jhd1313m1', 'lcd.Jhd1313m1', (['(0)', '(62)', '(98)'], {}), '(0, 62, 98)\n', (339, 350), True, 'import pyupm_i2clcd as lcd\n'), ((367, 387), 'pyupm_grove.GroveButton', 'grove.GroveButton', (['(8)'], {}), '(8)\n', (384, 387), True, 'import pyupm_grove as grove\n'), ((674, 689), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (684, 689), False, 'import time\n'), ((733, 766), 'dweepy.dweet_for', 'dweepy.dweet_for', (['"""Aracelis"""', 'dic'], {}), "('Aracelis', dic)\n", (749, 766), False, 'import dweepy\n'), ((772, 785), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (782, 785), False, 'import time\n')] |
from flask_wtf import FlaskForm
from flask_login import current_user
from flask_wtf.file import FileField, FileAllowed
from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length
from app.models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember= BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'<PASSWORD>', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
class EditProfileForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
about_me = TextAreaField('About me', validators=[Length(min=0, max=140)])
picture = FileField('Profile Picture', validators=[FileAllowed(['jpg', 'png'])])
submit = SubmitField('Update')
def validate_username(self, username):
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is taken. Please choose a different one.')
def validate_email(self, email):
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email is taken. Please choose a different one.')
class PostForm(FlaskForm):
title = StringField('Title', validators=[DataRequired()])
content = TextAreaField('Title', validators=[DataRequired()])
submit = SubmitField('Post') | [
"wtforms.validators.Email",
"wtforms.validators.ValidationError",
"flask_wtf.file.FileAllowed",
"wtforms.BooleanField",
"wtforms.SubmitField",
"wtforms.validators.EqualTo",
"wtforms.validators.Length",
"app.models.User.query.filter_by",
"wtforms.validators.DataRequired"
] | [((504, 531), 'wtforms.BooleanField', 'BooleanField', (['"""Remember Me"""'], {}), "('Remember Me')\n", (516, 531), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField\n'), ((543, 565), 'wtforms.SubmitField', 'SubmitField', (['"""Sign In"""'], {}), "('Sign In')\n", (554, 565), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField\n'), ((914, 937), 'wtforms.SubmitField', 'SubmitField', (['"""Register"""'], {}), "('Register')\n", (925, 937), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField\n'), ((1591, 1612), 'wtforms.SubmitField', 'SubmitField', (['"""Update"""'], {}), "('Update')\n", (1602, 1612), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField\n'), ((2259, 2278), 'wtforms.SubmitField', 'SubmitField', (['"""Post"""'], {}), "('Post')\n", (2270, 2278), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField\n'), ((1078, 1129), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Please use a different username."""'], {}), "('Please use a different username.')\n", (1093, 1129), False, 'from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length\n'), ((1261, 1317), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Please use a different email address."""'], {}), "('Please use a different email address.')\n", (1276, 1317), False, 'from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length\n'), ((407, 421), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (419, 421), False, 'from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length\n'), ((475, 489), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (487, 489), False, 'from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length\n'), ((653, 667), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (665, 667), False, 'from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length\n'), ((713, 727), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (725, 727), False, 'from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length\n'), ((729, 736), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (734, 736), False, 'from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length\n'), ((790, 804), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (802, 804), False, 'from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length\n'), ((865, 879), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (877, 879), False, 'from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length\n'), ((881, 900), 'wtforms.validators.EqualTo', 'EqualTo', (['"""password"""'], {}), "('password')\n", (888, 900), False, 'from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length\n'), ((991, 1035), 'app.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'username.data'}), '(username=username.data)\n', (1011, 1035), False, 'from app.models import User\n'), ((1180, 1218), 'app.models.User.query.filter_by', 'User.query.filter_by', ([], {'email': 'email.data'}), '(email=email.data)\n', (1200, 1218), False, 'from app.models import User\n'), ((1404, 1418), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1416, 1418), False, 'from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length\n'), ((1472, 1494), 'wtforms.validators.Length', 'Length', ([], {'min': '(0)', 'max': '(140)'}), '(min=0, max=140)\n', (1478, 1494), False, 'from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length\n'), ((1550, 1577), 'flask_wtf.file.FileAllowed', 'FileAllowed', (["['jpg', 'png']"], {}), "(['jpg', 'png'])\n", (1561, 1577), False, 'from flask_wtf.file import FileField, FileAllowed\n'), ((1790, 1863), 'wtforms.validators.ValidationError', 'ValidationError', (['"""That username is taken. Please choose a different one."""'], {}), "('That username is taken. Please choose a different one.')\n", (1805, 1863), False, 'from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length\n'), ((2023, 2093), 'wtforms.validators.ValidationError', 'ValidationError', (['"""That email is taken. Please choose a different one."""'], {}), "('That email is taken. Please choose a different one.')\n", (2038, 2093), False, 'from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length\n'), ((2167, 2181), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (2179, 2181), False, 'from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length\n'), ((2231, 2245), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (2243, 2245), False, 'from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length\n'), ((1713, 1757), 'app.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'username.data'}), '(username=username.data)\n', (1733, 1757), False, 'from app.models import User\n'), ((1952, 1990), 'app.models.User.query.filter_by', 'User.query.filter_by', ([], {'email': 'email.data'}), '(email=email.data)\n', (1972, 1990), False, 'from app.models import User\n')] |
# epydoc -- Utility functions
#
# Copyright (C) 2005 <NAME>
# Author: <NAME> <<EMAIL>>
# URL: <http://epydoc.sf.net>
#
# $Id: util.py 1671 2008-01-29 02:55:49Z edloper $
"""
Miscellaneous utility functions that are used by multiple modules.
@group Python source types: is_module_file, is_package_dir, is_pyname,
py_src_filename
@group Text processing: wordwrap, decode_with_backslashreplace,
plaintext_to_html
"""
__docformat__ = 'epytext en'
import os, os.path, re
######################################################################
## Python Source Types
######################################################################
PY_SRC_EXTENSIONS = ['.py', '.pyw']
PY_BIN_EXTENSIONS = ['.pyc', '.so', '.pyd']
def is_module_file(path):
# Make sure it's a file name.
if not isinstance(path, basestring):
return False
(dir, filename) = os.path.split(path)
(basename, extension) = os.path.splitext(filename)
return (os.path.isfile(path) and
re.match('[a-zA-Z_]\w*$', basename) and
extension in PY_SRC_EXTENSIONS+PY_BIN_EXTENSIONS)
def is_src_filename(filename):
if not isinstance(filename, basestring): return False
if not os.path.exists(filename): return False
return os.path.splitext(filename)[1] in PY_SRC_EXTENSIONS
def is_package_dir(dirname):
"""
Return true if the given directory is a valid package directory
(i.e., it names a directory that contains a valid __init__ file,
and its name is a valid identifier).
"""
# Make sure it's a directory name.
if not isinstance(dirname, basestring):
return False
if not os.path.isdir(dirname):
return False
dirname = os.path.abspath(dirname)
# Make sure it's a valid identifier. (Special case for
# "foo/", where os.path.split -> ("foo", "").)
(parent, dir) = os.path.split(dirname)
if dir == '': (parent, dir) = os.path.split(parent)
# The following constraint was removed because of sourceforge
# bug #1787028 -- in some cases (eg eggs), it's too strict.
#if not re.match('\w+$', dir):
# return False
for name in os.listdir(dirname):
filename = os.path.join(dirname, name)
if name.startswith('__init__.') and is_module_file(filename):
return True
else:
return False
def is_pyname(name):
return re.match(r"\w+(\.\w+)*$", name)
def py_src_filename(filename):
basefile, extension = os.path.splitext(filename)
if extension in PY_SRC_EXTENSIONS:
return filename
else:
for ext in PY_SRC_EXTENSIONS:
if os.path.isfile('%s%s' % (basefile, ext)):
return '%s%s' % (basefile, ext)
else:
raise ValueError('Could not find a corresponding '
'Python source file for %r.' % filename)
def munge_script_name(filename):
name = os.path.split(filename)[1]
name = re.sub(r'\W', '_', name)
return 'script-'+name
######################################################################
## Text Processing
######################################################################
def decode_with_backslashreplace(s):
r"""
Convert the given 8-bit string into unicode, treating any
character c such that ord(c)<128 as an ascii character, and
converting any c such that ord(c)>128 into a backslashed escape
sequence.
>>> decode_with_backslashreplace('abc\xff\xe8')
u'abc\\xff\\xe8'
"""
# s.encode('string-escape') is not appropriate here, since it
# also adds backslashes to some ascii chars (eg \ and ').
assert isinstance(s, str)
return (s
.decode('latin1')
.encode('ascii', 'backslashreplace')
.decode('ascii'))
def wordwrap(str, indent=0, right=75, startindex=0, splitchars=''):
"""
Word-wrap the given string. I.e., add newlines to the string such
that any lines that are longer than C{right} are broken into
shorter lines (at the first whitespace sequence that occurs before
index C{right}). If the given string contains newlines, they will
I{not} be removed. Any lines that begin with whitespace will not
be wordwrapped.
@param indent: If specified, then indent each line by this number
of spaces.
@type indent: C{int}
@param right: The right margin for word wrapping. Lines that are
longer than C{right} will be broken at the first whitespace
sequence before the right margin.
@type right: C{int}
@param startindex: If specified, then assume that the first line
is already preceeded by C{startindex} characters.
@type startindex: C{int}
@param splitchars: A list of non-whitespace characters which can
be used to split a line. (E.g., use '/\\' to allow path names
to be split over multiple lines.)
@rtype: C{str}
"""
if splitchars:
chunks = re.split(r'( +|\n|[^ \n%s]*[%s])' %
(re.escape(splitchars), re.escape(splitchars)),
str.expandtabs())
else:
chunks = re.split(r'( +|\n)', str.expandtabs())
result = [' '*(indent-startindex)]
charindex = max(indent, startindex)
for chunknum, chunk in enumerate(chunks):
if (charindex+len(chunk) > right and charindex > 0) or chunk == '\n':
result.append('\n' + ' '*indent)
charindex = indent
if chunk[:1] not in ('\n', ' '):
result.append(chunk)
charindex += len(chunk)
else:
result.append(chunk)
charindex += len(chunk)
return ''.join(result).rstrip()+'\n'
def plaintext_to_html(s):
"""
@return: An HTML string that encodes the given plaintext string.
In particular, special characters (such as C{'<'} and C{'&'})
are escaped.
@rtype: C{string}
"""
s = s.replace('&', '&').replace('"', '"')
s = s.replace('<', '<').replace('>', '>')
return s
def plaintext_to_latex(str, nbsp=0, breakany=0):
"""
@return: A LaTeX string that encodes the given plaintext string.
In particular, special characters (such as C{'$'} and C{'_'})
are escaped, and tabs are expanded.
@rtype: C{string}
@param breakany: Insert hyphenation marks, so that LaTeX can
break the resulting string at any point. This is useful for
small boxes (e.g., the type box in the variable list table).
@param nbsp: Replace every space with a non-breaking space
(C{'~'}).
"""
# These get converted to hyphenation points later
if breakany: str = re.sub('(.)', '\\1\1', str)
# These get converted to \textbackslash later.
str = str.replace('\\', '\0')
# Expand tabs
str = str.expandtabs()
# These elements need to be backslashed.
str = re.sub(r'([#$&%_\${}])', r'\\\1', str)
# These elements have special names.
str = str.replace('|', '{\\textbar}')
str = str.replace('<', '{\\textless}')
str = str.replace('>', '{\\textgreater}')
str = str.replace('^', '{\\textasciicircum}')
str = str.replace('~', '{\\textasciitilde}')
str = str.replace('\0', r'{\textbackslash}')
# replace spaces with non-breaking spaces
if nbsp: str = str.replace(' ', '~')
# Convert \1's to hyphenation points.
if breakany: str = str.replace('\1', r'\-')
return str
class RunSubprocessError(OSError):
def __init__(self, cmd, out, err):
OSError.__init__(self, '%s failed' % cmd[0])
self.out = out
self.err = err
def run_subprocess(cmd, data=None):
"""
Execute the command C{cmd} in a subprocess.
@param cmd: The command to execute, specified as a list
of string.
@param data: A string containing data to send to the
subprocess.
@return: A tuple C{(out, err)}.
@raise OSError: If there is any problem executing the
command, or if its exitval is not 0.
"""
if isinstance(cmd, basestring):
cmd = cmd.split()
# Under Python 2.4+, use subprocess
try:
from subprocess import Popen, PIPE
pipe = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = pipe.communicate(data)
if hasattr(pipe, 'returncode'):
if pipe.returncode == 0:
return out, err
else:
raise RunSubprocessError(cmd, out, err)
else:
# Assume that there was an error iff anything was written
# to the child's stderr.
if err == '':
return out, err
else:
raise RunSubprocessError(cmd, out, err)
except ImportError:
pass
# Under Python 2.3 or earlier, on unix, use popen2.Popen3 so we
# can access the return value.
import popen2
if hasattr(popen2, 'Popen3'):
pipe = popen2.Popen3(' '.join(cmd), True)
to_child = pipe.tochild
from_child = pipe.fromchild
child_err = pipe.childerr
if data:
to_child.write(data)
to_child.close()
out = err = ''
while pipe.poll() is None:
out += from_child.read()
err += child_err.read()
out += from_child.read()
err += child_err.read()
if pipe.wait() == 0:
return out, err
else:
raise RunSubprocessError(cmd, out, err)
# Under Python 2.3 or earlier, on non-unix, use os.popen3
else:
to_child, from_child, child_err = os.popen3(' '.join(cmd), 'b')
if data:
try:
to_child.write(data)
# Guard for a broken pipe error
except IOError as e:
raise OSError(e)
to_child.close()
out = from_child.read()
err = child_err.read()
# Assume that there was an error iff anything was written
# to the child's stderr.
if err == '':
return out, err
else:
raise RunSubprocessError(cmd, out, err)
| [
"os.path.exists",
"re.escape",
"os.listdir",
"subprocess.Popen",
"os.path.splitext",
"re.match",
"os.path.join",
"os.path.split",
"os.path.isfile",
"os.path.isdir",
"os.path.abspath",
"re.sub"
] | [((869, 888), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (882, 888), False, 'import os, os.path, re\n'), ((917, 943), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (933, 943), False, 'import os, os.path, re\n'), ((1695, 1719), 'os.path.abspath', 'os.path.abspath', (['dirname'], {}), '(dirname)\n', (1710, 1719), False, 'import os, os.path, re\n'), ((1851, 1873), 'os.path.split', 'os.path.split', (['dirname'], {}), '(dirname)\n', (1864, 1873), False, 'import os, os.path, re\n'), ((2135, 2154), 'os.listdir', 'os.listdir', (['dirname'], {}), '(dirname)\n', (2145, 2154), False, 'import os, os.path, re\n'), ((2361, 2394), 're.match', 're.match', (['"""\\\\w+(\\\\.\\\\w+)*$"""', 'name'], {}), "('\\\\w+(\\\\.\\\\w+)*$', name)\n", (2369, 2394), False, 'import os, os.path, re\n'), ((2451, 2477), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2467, 2477), False, 'import os, os.path, re\n'), ((2924, 2948), 're.sub', 're.sub', (['"""\\\\W"""', '"""_"""', 'name'], {}), "('\\\\W', '_', name)\n", (2930, 2948), False, 'import os, os.path, re\n'), ((6838, 6878), 're.sub', 're.sub', (['"""([#$&%_\\\\${}])"""', '"""\\\\\\\\\\\\1"""', 'str'], {}), "('([#$&%_\\\\${}])', '\\\\\\\\\\\\1', str)\n", (6844, 6878), False, 'import os, os.path, re\n'), ((956, 976), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (970, 976), False, 'import os, os.path, re\n'), ((993, 1029), 're.match', 're.match', (['"""[a-zA-Z_]\\\\w*$"""', 'basename'], {}), "('[a-zA-Z_]\\\\w*$', basename)\n", (1001, 1029), False, 'import os, os.path, re\n'), ((1196, 1220), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1210, 1220), False, 'import os, os.path, re\n'), ((1636, 1658), 'os.path.isdir', 'os.path.isdir', (['dirname'], {}), '(dirname)\n', (1649, 1658), False, 'import os, os.path, re\n'), ((1908, 1929), 'os.path.split', 'os.path.split', (['parent'], {}), '(parent)\n', (1921, 1929), False, 'import os, os.path, re\n'), ((2175, 2202), 'os.path.join', 'os.path.join', (['dirname', 'name'], {}), '(dirname, name)\n', (2187, 2202), False, 'import os, os.path, re\n'), ((2886, 2909), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (2899, 2909), False, 'import os, os.path, re\n'), ((6622, 6651), 're.sub', 're.sub', (['"""(.)"""', '"""\\\\1\x01"""', 'str'], {}), "('(.)', '\\\\1\\x01', str)\n", (6628, 6651), False, 'import os, os.path, re\n'), ((8134, 8182), 'subprocess.Popen', 'Popen', (['cmd'], {'stdin': 'PIPE', 'stdout': 'PIPE', 'stderr': 'PIPE'}), '(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n', (8139, 8182), False, 'from subprocess import Popen, PIPE\n'), ((1246, 1272), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1262, 1272), False, 'import os, os.path, re\n'), ((2604, 2644), 'os.path.isfile', 'os.path.isfile', (["('%s%s' % (basefile, ext))"], {}), "('%s%s' % (basefile, ext))\n", (2618, 2644), False, 'import os, os.path, re\n'), ((4991, 5012), 're.escape', 're.escape', (['splitchars'], {}), '(splitchars)\n', (5000, 5012), False, 'import os, os.path, re\n'), ((5014, 5035), 're.escape', 're.escape', (['splitchars'], {}), '(splitchars)\n', (5023, 5035), False, 'import os, os.path, re\n')] |
import pytest
import simdjson
def test_load(parser):
"""Ensure we can load from disk."""
with pytest.raises(ValueError):
parser.load('jsonexamples/invalid.json')
doc = parser.load("jsonexamples/small/demo.json")
doc.at_pointer('/Image/Width')
def test_parse(parser):
"""Ensure we can load from string fragments."""
parser.parse(b'{"hello": "world"}')
def test_unicode_decode_error(parser):
"""Ensure the parser raises encoding issues."""
with pytest.raises(UnicodeDecodeError):
parser.load('jsonexamples/test_parsing/n_array_invalid_utf8.json')
def test_implementation():
"""Ensure we can set the implementation."""
parser = simdjson.Parser()
# Ensure a rubbish implementation does not get set - simdjson does not do
# a safety check, buy pysimdjson does. A break in this check will cause
# a segfault.
with pytest.raises(ValueError):
parser.implementation = 'rubbish'
# The generic, always-available implementation.
parser.implementation = 'fallback'
parser.parse('{"hello": "world"}')
| [
"simdjson.Parser",
"pytest.raises"
] | [((691, 708), 'simdjson.Parser', 'simdjson.Parser', ([], {}), '()\n', (706, 708), False, 'import simdjson\n'), ((105, 130), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (118, 130), False, 'import pytest\n'), ((491, 524), 'pytest.raises', 'pytest.raises', (['UnicodeDecodeError'], {}), '(UnicodeDecodeError)\n', (504, 524), False, 'import pytest\n'), ((890, 915), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (903, 915), False, 'import pytest\n')] |
import os
import time
from unittest import TestCase, skipIf
from fuocore import MpvPlayer
from fuocore.core.player import Playlist
MP3_URL = os.path.join(os.path.dirname(__file__),
'../data/fixtures/ybwm-ts.mp3')
class FakeSongModel: # pylint: disable=all
pass
class TestPlayer(TestCase):
def setUp(self):
self.player = MpvPlayer()
self.player.initialize()
def tearDown(self):
self.player.shutdown()
@skipIf(os.environ.get('TEST_ENV', 'travis'), '')
def test_play(self):
self.player.play(MP3_URL)
self.player.stop()
@skipIf(os.environ.get('TEST_ENV', 'travis'), '')
def test_duration(self):
# This may failed?
self.player.play(MP3_URL)
time.sleep(0.1)
self.assertIsNotNone(self.player.duration)
@skipIf(os.environ.get('TEST_ENV', 'travis'), '')
def test_seek(self):
self.player.play(MP3_URL)
time.sleep(0.1)
self.player.position = 100
class TestPlaylist(TestCase):
def setUp(self):
self.s1 = FakeSongModel()
self.s2 = FakeSongModel()
self.playlist = Playlist()
self.playlist.add(self.s1)
self.playlist.add(self.s2)
def tearDown(self):
self.playlist.clear()
def test_add(self):
self.playlist.add(self.s1)
self.assertEqual(len(self.playlist), 2)
def test_remove_current_song(self):
s3 = FakeSongModel()
self.playlist.add(s3)
self.playlist.current_song = self.s2
self.playlist.remove(self.s2)
self.assertEqual(self.playlist.current_song, s3)
self.assertEqual(len(self.playlist), 2)
def test_remove(self):
self.playlist.remove(self.s1)
self.assertEqual(len(self.playlist), 1)
| [
"fuocore.MpvPlayer",
"os.environ.get",
"time.sleep",
"os.path.dirname",
"fuocore.core.player.Playlist"
] | [((157, 182), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (172, 182), False, 'import os\n'), ((367, 378), 'fuocore.MpvPlayer', 'MpvPlayer', ([], {}), '()\n', (376, 378), False, 'from fuocore import MpvPlayer\n'), ((481, 517), 'os.environ.get', 'os.environ.get', (['"""TEST_ENV"""', '"""travis"""'], {}), "('TEST_ENV', 'travis')\n", (495, 517), False, 'import os\n'), ((762, 777), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (772, 777), False, 'import time\n'), ((622, 658), 'os.environ.get', 'os.environ.get', (['"""TEST_ENV"""', '"""travis"""'], {}), "('TEST_ENV', 'travis')\n", (636, 658), False, 'import os\n'), ((951, 966), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (961, 966), False, 'import time\n'), ((842, 878), 'os.environ.get', 'os.environ.get', (['"""TEST_ENV"""', '"""travis"""'], {}), "('TEST_ENV', 'travis')\n", (856, 878), False, 'import os\n'), ((1147, 1157), 'fuocore.core.player.Playlist', 'Playlist', ([], {}), '()\n', (1155, 1157), False, 'from fuocore.core.player import Playlist\n')] |
#!/usr/bin/env python3
import sys
import numpy as np
from example import AmiciExample
class ExampleCalvetti(AmiciExample):
def __init__(self):
AmiciExample.__init__( self )
self.numX = 6
self.numP = 0
self.numK = 6
self.modelOptions['theta'] = []
self.modelOptions['kappa'] = [0.29, 0.74, 0.44, 0.08, 0.27, 0.18]
self.modelOptions['ts'] = np.linspace(0, 20, 201)
self.modelOptions['pscale'] = 0
self.solverOptions['atol'] = 1e-6
self.solverOptions['rtol'] = 1e-4
self.solverOptions['sens_ind'] = []
self.solverOptions['sensi'] = 0
self.solverOptions['sensi_meth'] = 1
def writeNoSensi(filename):
ex = ExampleCalvetti()
ex.writeToFile(filename, '/model_calvetti/nosensi/')
def main():
if len(sys.argv) < 2:
print("Error: Must provide output file as first and only argument.")
sys.exit(1)
filename = sys.argv[1]
writeNoSensi(filename)
if __name__ == "__main__":
main()
| [
"numpy.linspace",
"example.AmiciExample.__init__",
"sys.exit"
] | [((158, 185), 'example.AmiciExample.__init__', 'AmiciExample.__init__', (['self'], {}), '(self)\n', (179, 185), False, 'from example import AmiciExample\n'), ((404, 427), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(201)'], {}), '(0, 20, 201)\n', (415, 427), True, 'import numpy as np\n'), ((922, 933), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (930, 933), False, 'import sys\n')] |
# coding: utf-8
"""A Jupyter-aware wrapper for the yarn package manager"""
import os
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
from jupyterlab_server.process import subprocess, which
HERE = os.path.dirname(os.path.abspath(__file__))
YARN_PATH = os.path.join(HERE, "staging", "yarn.js")
def execvp(cmd, argv):
"""Execvp, except on Windows where it uses Popen.
The first argument, by convention, should point to the filename
associated with the file being executed.
Python provides execvp on Windows, but its behavior is problematic
(Python bug#9148).
"""
cmd = which(cmd)
if os.name == "nt":
import signal
import sys
p = subprocess.Popen([cmd] + argv[1:])
# Don't raise KeyboardInterrupt in the parent process.
# Set this after spawning, to avoid subprocess inheriting handler.
signal.signal(signal.SIGINT, signal.SIG_IGN)
p.wait()
sys.exit(p.returncode)
else:
os.execvp(cmd, argv)
def main(argv=None):
"""Run node and return the result."""
# Make sure node is available.
argv = argv or sys.argv[1:]
execvp("node", ["node", YARN_PATH] + argv)
| [
"os.execvp",
"signal.signal",
"os.path.join",
"jupyterlab_server.process.which",
"jupyterlab_server.process.subprocess.Popen",
"sys.exit",
"os.path.abspath"
] | [((319, 359), 'os.path.join', 'os.path.join', (['HERE', '"""staging"""', '"""yarn.js"""'], {}), "(HERE, 'staging', 'yarn.js')\n", (331, 359), False, 'import os\n'), ((280, 305), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (295, 305), False, 'import os\n'), ((666, 676), 'jupyterlab_server.process.which', 'which', (['cmd'], {}), '(cmd)\n', (671, 676), False, 'from jupyterlab_server.process import subprocess, which\n'), ((755, 789), 'jupyterlab_server.process.subprocess.Popen', 'subprocess.Popen', (['([cmd] + argv[1:])'], {}), '([cmd] + argv[1:])\n', (771, 789), False, 'from jupyterlab_server.process import subprocess, which\n'), ((936, 980), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal.SIG_IGN'], {}), '(signal.SIGINT, signal.SIG_IGN)\n', (949, 980), False, 'import signal\n'), ((1006, 1028), 'sys.exit', 'sys.exit', (['p.returncode'], {}), '(p.returncode)\n', (1014, 1028), False, 'import sys\n'), ((1047, 1067), 'os.execvp', 'os.execvp', (['cmd', 'argv'], {}), '(cmd, argv)\n', (1056, 1067), False, 'import os\n')] |
#!/usr/bin/env python
# encoding: utf-8
# PYTHON_ARGCOMPLETE_OK
# from __future__ imports must occur at the beginning of the file
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import sys
import os
# https://packaging.python.org/single_source_version/
__title__ = 'bypy'
__version__ = '1.6.7'
__author__ = '<NAME>'
__license__ = 'MIT'
__desc__ = 'Python client for Baidu Yun (Personal Cloud Storage) 百度云/百度网盘 Python 客户端'
__url__ = 'https://github.com/houtianze/bypy'
### return (error) codes
# they are put at the top because:
# 1. they have zero dependencies
# 2. can be referred in any abort later, e.g. return error on import faliures
ENoError = 0 # plain old OK, fine, no error.
EIncorrectPythonVersion = 1
#EApiNotConfigured = 10 # Deprecated: ApiKey, SecretKey and AppPcsPath not properly configured
EArgument = 10 # invalid program command argument
EAbort = 20 # aborted
EException = 30 # unhandled exception occured
EParameter = 40 # invalid parameter passed to ByPy
EInvalidJson = 50
EHashMismatch = 60 # MD5 hashes of the local file and remote file don't match each other
EFileWrite = 70
EFileTooBig = 80 # file too big to upload
EFailToCreateLocalDir = 90
EFailToCreateLocalFile = 100
EFailToDeleteDir = 110
EFailToDeleteFile = 120
EFileNotFound = 130
EMaxRetry = 140
ERequestFailed = 150 # request failed
ECacheNotLoaded = 160
EMigrationFailed = 170
EDownloadCerts = 180
EUserRejected = 190 # user's decision
EUpdateNeeded = 200
ESkipped = 210
EFatal = -1 # No way to continue
# internal errors
IEMD5NotFound = 31079 # File md5 not found, you should use upload API to upload the whole file.
IESuperfileCreationFailed = 31081 # superfile create failed (HTTP 404)
# Undocumented, see #308 , https://paste.ubuntu.com/23672323/
IEBlockMissInSuperFile2 = 31363 # block miss in superfile2 (HTTP 403)
IETaskNotFound = 36016 # Task was not found
IEFileAlreadyExists = 31061 # {"error_code":31061,"error_msg":"file already exists","request_id":2939656146461714799}
# TODO: Should have use an enum or some sort of data structure for this,
# but now changing this is too time consuming and error-prone
ErrorExplanations = {
ENoError: "Everything went fine.",
EIncorrectPythonVersion: "Incorrect Python version",
EArgument: "Invalid program argument passed in",
EAbort: "Abort due to unrecovrable errors",
EException: "Unhandled exception occurred",
EParameter: "Some or all the parameters passed to the function are invalid",
EInvalidJson: "Invalid JSON received",
EHashMismatch: "MD5 hashes of the local file and remote file don't match each other",
EFileWrite: "Error writing file",
EFileTooBig: "File too big to upload",
EFailToCreateLocalDir: "Unable to create some directory(ies)",
EFailToCreateLocalFile: "Unable to create some local file(s)",
EFailToDeleteDir:" Unable to delete some directory(ies)",
EFailToDeleteFile: "Unable to delete some file(s)",
EFileNotFound: "File not found",
EMaxRetry: "Maximum retries reached",
ERequestFailed: "Request failed",
ECacheNotLoaded: "Failed to load file caches",
EMigrationFailed: "Failed to migrate from the old cache format",
EDownloadCerts: "Failed to download certificats", # no long in use
EUserRejected: "User chose to not to proceed",
EUpdateNeeded: "Need to update bypy",
ESkipped: "Some files/directores are skipped",
EFatal: "Fatal error, unable to continue",
IEMD5NotFound: "File md5 not found, you should use upload API to upload the whole file.",
IESuperfileCreationFailed: "superfile create failed (HTTP 404)",
# Undocumented, see #308 , https://paste.ubuntu.com/23672323/
IEBlockMissInSuperFile2: "Block miss in superfile2 (HTTP 403)",
IETaskNotFound: "Task was not found",
IEFileAlreadyExists: "File already exists"
}
DownloaderAria2 = 'aria2'
Downloaders = [DownloaderAria2]
DownloaderDefaultArgs = {
DownloaderAria2 : "-c -k10M -x4 -s4 --file-allocation=none"
}
DownloaderArgsEnvKey = 'DOWNLOADER_ARGUMENTS'
DownloaderArgsIsFilePrefix = '@'
PipBinaryName = 'pip' + str(sys.version_info[0])
PipInstallCommand = PipBinaryName + ' install requests'
PipUpgradeCommand = PipBinaryName + ' install -U requests'
#### Definitions that are real world constants
OneK = 1024
OneM = OneK * OneK
OneG = OneM * OneK
OneT = OneG * OneK
OneP = OneT * OneK
OneE = OneP * OneK
OneZ = OneE * OneK
OneY = OneZ * OneK
SIPrefixNames = [ '', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y' ]
SIPrefixTimes = {
'K' : OneK,
'M' : OneM,
'G' : OneG,
'T' : OneT,
'E' : OneE,
'Z' : OneZ,
'Y' : OneY }
# before this, you don't know me, i don't know you - Eason
TenYearInSeconds = 60 * 60 * 24 * 366 * 10
# For Python 3 only, threading.TIMEOUT_MAX is 9223372036854.0 on all *nix systems,
# but it's a little over 49 days for Windows, if we give a value larger than that,
# Python 3 on Windows will throw towel, so we cringe.
FortyNineDaysInSeconds = 60 * 60 * 24 * 49
#### Baidu PCS constants
# ==== NOTE ====
# I use server auth, because it's the only possible method to protect the SecretKey.
# If you want to perform local authorization using 'Device' method instead, you just need:
# - Paste your own ApiKey and SecretKey. (An non-NONE or non-empty SecretKey means using local auth
# - Change the AppPcsPath to your own App's directory at Baidu PCS
# Then you are good to go
ApiKey = '<KEY>' # replace with your own ApiKey if you use your own appid
SecretKey = '' # replace with your own SecretKey if you use your own appid
# NOTE: no trailing '/'
AppPcsPath = '/apps/bypy' # change this to the App's directory you specified when creating the app
AppPcsPathLen = len(AppPcsPath)
## Baidu PCS URLs etc.
OpenApiUrl = "https://openapi.baidu.com"
OpenApiVersion = "2.0"
OAuthUrl = OpenApiUrl + "/oauth/" + OpenApiVersion
ServerAuthUrl = OAuthUrl + "/authorize"
DeviceAuthUrl = OAuthUrl + "/device/code"
TokenUrl = OAuthUrl + "/token"
PcsDomain = 'pcs.baidu.com'
RestApiPath = '/rest/2.0/pcs/'
PcsUrl = 'https://' + PcsDomain + RestApiPath
CPcsUrl = 'https://c.pcs.baidu.com/rest/2.0/pcs/'
DPcsUrl = 'https://d.pcs.baidu.com/rest/2.0/pcs/'
## Baidu PCS constants
MinRapidUploadFileSize = 256 * OneK
MaxSliceSize = 2 * OneG
MaxSlicePieces = 1024
MaxListEntries = 1000 # https://github.com/houtianze/bypy/issues/285
### Auth servers
GaeUrl = 'https://bypyoauth.appspot.com'
#OpenShiftUrl = 'https://bypy-tianze.rhcloud.com'
OpenShiftUrl = 'https://bypyoauth-route-bypy.a3c1.starter-us-west-1.openshiftapps.com'
HerokuUrl = 'https://bypyoauth.herokuapp.com'
Heroku1Url = 'https://bypyoauth1.herokuapp.com'
GaeRedirectUrl = GaeUrl + '/auth'
GaeRefreshUrl = GaeUrl + '/refresh'
OpenShiftRedirectUrl = OpenShiftUrl + '/auth'
OpenShiftRefreshUrl = OpenShiftUrl + '/refresh'
HerokuRedirectUrl = HerokuUrl + '/auth'
HerokuRefreshUrl = HerokuUrl + '/refresh'
Heroku1RedirectUrl = Heroku1Url + '/auth'
Heroku1RefreshUrl = Heroku1Url + '/refresh'
AuthServerList = [
# url, retry?, message
(OpenShiftRedirectUrl, False, "Authorizing/refreshing with the OpenShift server ..."),
(HerokuRedirectUrl, False, "OpenShift server failed, authorizing/refreshing with the Heroku server ..."),
(Heroku1RedirectUrl, False, "Heroku server failed, authorizing/refreshing with the Heroku1 server ..."),
(GaeRedirectUrl, False, "Heroku1 server failed. Last resort: authorizing/refreshing with the GAE server ..."),
]
RefreshServerList = [
# url, retry?, message
(OpenShiftRefreshUrl, False, "Authorizing/refreshing with the OpenShift server ..."),
(HerokuRefreshUrl, False, "OpenShift server failed, authorizing/refreshing with the Heroku server ..."),
(Heroku1RefreshUrl, False, "Heroku server failed, authorizing/refreshing with the Heroku1 server ..."),
(GaeRefreshUrl, False, "Heroku1 server failed. Last resort: authorizing/refreshing with the GAE server ..."),
]
### public static properties
HelpMarker = "Usage:"
### ByPy config constants
## directories, for setting, cache, etc
HomeDir = os.getenv('BYPY_HOME','')
if HomeDir == "" :
HomeDir = os.path.expanduser('~')
# os.path.join() may not handle unicode well
ConfigDir = HomeDir + os.sep + '.bypy'
TokenFileName = 'bypy.json'
TokenFilePath = ConfigDir + os.sep + TokenFileName
SettingFileName = 'bypy.setting.json'
SettingFilePath = ConfigDir + os.sep + SettingFileName
HashCacheFileName = 'bypy.hashcache.json'
HashCachePath = ConfigDir + os.sep + HashCacheFileName
PickleFileName = 'bypy.pickle'
PicklePath = ConfigDir + os.sep + PickleFileName
# ProgressPath saves the MD5s of uploaded slices, for upload resuming
# format:
# {
# abspath: [slice_size, [slice1md5, slice2md5, ...]],
# }
#
ProgressFileName = 'bypy.parts.json'
ProgressPath = ConfigDir + os.sep + ProgressFileName
ByPyCertsFileName = 'bypy.cacerts.pem'
OldByPyCertsPath = ConfigDir + os.sep + ByPyCertsFileName
# Old setting locations, should be moved to ~/.bypy to be clean
OldTokenFilePath = HomeDir + os.sep + '.bypy.json'
OldPicklePath = HomeDir + os.sep + '.bypy.pickle'
RemoteTempDir = AppPcsPath + '/.bypytemp'
SettingKey_OverwriteRemoteTempDir = 'overwriteRemoteTempDir'
SettingKey_LastUpdateCheckTime = 'lastUpdateCheck'
## default config values
PrintFlushPeriodInSec = 5.0
# TODO: Does the following User-Agent emulation help?
UserAgent = None # According to xslidian, User-Agent affects download.
#UserAgent = 'Mozilla/5.0'
#UserAgent = "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)"
#UserAgent = 'netdisk;5.2.7.2;PC;PC-Windows;6.2.9200;WindowsBaiduYunGuanJia'
DefaultSliceInMB = 20
DefaultSliceSize = 20 * OneM
DefaultDlChunkSize = 20 * OneM
RetryDelayInSec = 10
CacheSavePeriodInSec = 10 * 60.0
DefaultTimeOutInSeconds=300
# share retries
ShareRapidUploadRetries = 3
DefaultResumeDlRevertCount = 1
DefaultProcessCount = 1
## program switches
CleanOptionShort = '-c'
CleanOptionLong = '--clean'
DisableSslCheckOption = '--disable-ssl-check'
CaCertsOption = '--cacerts'
MultiprocessOption = '--processes'
# vim: tabstop=4 noexpandtab shiftwidth=4 softtabstop=4 ff=unix fileencoding=utf-8
| [
"os.path.expanduser",
"os.getenv"
] | [((7922, 7948), 'os.getenv', 'os.getenv', (['"""BYPY_HOME"""', '""""""'], {}), "('BYPY_HOME', '')\n", (7931, 7948), False, 'import os\n'), ((7979, 8002), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (7997, 8002), False, 'import os\n')] |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import pkgutil
import sys
from subprocess import check_call
import kaldi
parser = argparse.ArgumentParser(
description="Generates autosummary documentation for pykaldi.")
# parser.add_argument('--force', '-f', action='store_true',
# help='Overwrite files. Default: False.')
parser.add_argument('--out_dir', '-o', default='api',
help='Output directory. Default: api' )
parser.add_argument('--include_private', action='store_true',
help='Include private modules. Default: False.')
args = parser.parse_args()
if os.path.exists(args.out_dir):
print("Output directory: {} already exists.".format(args.out_dir),
file=sys.stderr)
sys.exit(1)
os.mkdir(args.out_dir)
##################################################
# Generate autosummary lists and api
##################################################
with open("api.rst", "w") as api, \
open("packages.rst", "w") as packages, \
open("modules.rst", "w") as modules:
print(".. toctree::\n :caption: API Guide\n :hidden:\n", file=api)
# print(" {}/kaldi".format(args.out_dir), file=api)
print(".. autosummary::\n :toctree: {}\n".format(args.out_dir),
file=packages)
print(".. autosummary::\n :toctree: {}\n".format(args.out_dir),
file=modules)
for _, modname, ispkg in pkgutil.walk_packages(path=kaldi.__path__,
prefix=kaldi.__name__+'.',
onerror=lambda x: None):
if modname.split(".")[-1][0] == "_" and not args.include_private:
continue
if modname == "kaldi.itf":
continue
if ispkg:
print(" {}/{}".format(args.out_dir, modname), file=api)
print(" {}".format(modname), file=packages)
else:
if len(modname.split(".")) == 2:
print(" {}/{}".format(args.out_dir, modname), file=api)
print(" {}".format(modname), file=modules)
##################################################
# Call autogen
##################################################
check_call(['sphinx-autogen', '-i', '-o', args.out_dir, 'packages.rst'])
check_call(['sphinx-autogen', '-i', '-o', args.out_dir, 'modules.rst'])
check_call(['rm' , '-f', 'packages.rst', 'modules.rst'])
##################################################
# Include submodules in package documentation
##################################################
for importer, modname, ispkg in pkgutil.walk_packages(path=kaldi.__path__,
prefix=kaldi.__name__+'.',
onerror=lambda x: None):
if modname.split(".")[-1][0] == "_" and not args.include_private:
continue
if modname == "kaldi.itf":
continue
if not ispkg and len(modname.split(".")) > 2:
mod_file = "{}.rst".format(modname)
mod_path = os.path.join(args.out_dir, mod_file)
pkg_file = "{}.rst".format(".".join(modname.split(".")[:-1]))
pkg_path = os.path.join(args.out_dir, pkg_file)
# Edit submodule headers
check_call(['sed', '-i', 's/=/-/g', mod_path])
# Include submodule in pkg.rst
with open(pkg_path, "a") as pkg:
# pkg.write("""\n.. include:: {}\n\n""".format(mod_file))
pkg.write("\n")
pkg.write(open(mod_path).read())
# Remove mod.rst
check_call(['rm', '-f', mod_path])
##################################################
# Add autosummary nosignatures option
##################################################
for importer, modname, ispkg in pkgutil.walk_packages(path=kaldi.__path__,
prefix=kaldi.__name__+'.',
onerror=lambda x: None):
if modname.split(".")[-1][0] == "_" and not args.include_private:
continue
if modname == "kaldi.itf":
continue
if ispkg:
pkg_file = "{}.rst".format(modname)
pkg_path = os.path.join(args.out_dir, pkg_file)
check_call(['sed', '-i',
's/autosummary::/autosummary::\\n :nosignatures:/g',
pkg_path])
| [
"os.path.exists",
"argparse.ArgumentParser",
"pkgutil.walk_packages",
"subprocess.check_call",
"os.path.join",
"os.mkdir",
"sys.exit"
] | [((174, 266), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generates autosummary documentation for pykaldi."""'}), "(description=\n 'Generates autosummary documentation for pykaldi.')\n", (197, 266), False, 'import argparse\n'), ((666, 694), 'os.path.exists', 'os.path.exists', (['args.out_dir'], {}), '(args.out_dir)\n', (680, 694), False, 'import os\n'), ((811, 833), 'os.mkdir', 'os.mkdir', (['args.out_dir'], {}), '(args.out_dir)\n', (819, 833), False, 'import os\n'), ((2256, 2328), 'subprocess.check_call', 'check_call', (["['sphinx-autogen', '-i', '-o', args.out_dir, 'packages.rst']"], {}), "(['sphinx-autogen', '-i', '-o', args.out_dir, 'packages.rst'])\n", (2266, 2328), False, 'from subprocess import check_call\n'), ((2329, 2400), 'subprocess.check_call', 'check_call', (["['sphinx-autogen', '-i', '-o', args.out_dir, 'modules.rst']"], {}), "(['sphinx-autogen', '-i', '-o', args.out_dir, 'modules.rst'])\n", (2339, 2400), False, 'from subprocess import check_call\n'), ((2401, 2456), 'subprocess.check_call', 'check_call', (["['rm', '-f', 'packages.rst', 'modules.rst']"], {}), "(['rm', '-f', 'packages.rst', 'modules.rst'])\n", (2411, 2456), False, 'from subprocess import check_call\n'), ((2640, 2739), 'pkgutil.walk_packages', 'pkgutil.walk_packages', ([], {'path': 'kaldi.__path__', 'prefix': "(kaldi.__name__ + '.')", 'onerror': '(lambda x: None)'}), "(path=kaldi.__path__, prefix=kaldi.__name__ + '.',\n onerror=lambda x: None)\n", (2661, 2739), False, 'import pkgutil\n'), ((3811, 3910), 'pkgutil.walk_packages', 'pkgutil.walk_packages', ([], {'path': 'kaldi.__path__', 'prefix': "(kaldi.__name__ + '.')", 'onerror': '(lambda x: None)'}), "(path=kaldi.__path__, prefix=kaldi.__name__ + '.',\n onerror=lambda x: None)\n", (3832, 3910), False, 'import pkgutil\n'), ((798, 809), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (806, 809), False, 'import sys\n'), ((1453, 1552), 'pkgutil.walk_packages', 'pkgutil.walk_packages', ([], {'path': 'kaldi.__path__', 'prefix': "(kaldi.__name__ + '.')", 'onerror': '(lambda x: None)'}), "(path=kaldi.__path__, prefix=kaldi.__name__ + '.',\n onerror=lambda x: None)\n", (1474, 1552), False, 'import pkgutil\n'), ((3091, 3127), 'os.path.join', 'os.path.join', (['args.out_dir', 'mod_file'], {}), '(args.out_dir, mod_file)\n', (3103, 3127), False, 'import os\n'), ((3218, 3254), 'os.path.join', 'os.path.join', (['args.out_dir', 'pkg_file'], {}), '(args.out_dir, pkg_file)\n', (3230, 3254), False, 'import os\n'), ((3297, 3343), 'subprocess.check_call', 'check_call', (["['sed', '-i', 's/=/-/g', mod_path]"], {}), "(['sed', '-i', 's/=/-/g', mod_path])\n", (3307, 3343), False, 'from subprocess import check_call\n'), ((3602, 3636), 'subprocess.check_call', 'check_call', (["['rm', '-f', mod_path]"], {}), "(['rm', '-f', mod_path])\n", (3612, 3636), False, 'from subprocess import check_call\n'), ((4226, 4262), 'os.path.join', 'os.path.join', (['args.out_dir', 'pkg_file'], {}), '(args.out_dir, pkg_file)\n', (4238, 4262), False, 'import os\n'), ((4272, 4369), 'subprocess.check_call', 'check_call', (["['sed', '-i', 's/autosummary::/autosummary::\\\\n :nosignatures:/g',\n pkg_path]"], {}), "(['sed', '-i',\n 's/autosummary::/autosummary::\\\\n :nosignatures:/g', pkg_path])\n", (4282, 4369), False, 'from subprocess import check_call\n')] |
"""
Bootstrap the Galaxy framework.
This should not be called directly! Use the run.sh script in Galaxy's
top level directly.
"""
import os
import sys
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'lib')))
from galaxy.util.pastescript import serve
from check_python import check_python # noqa: I100
# ensure supported version
try:
check_python()
except:
sys.exit(1)
if 'LOG_TEMPFILES' in os.environ:
from log_tempfile import TempFile
_log_tempfile = TempFile()
serve.run()
| [
"check_python.check_python",
"os.path.dirname",
"galaxy.util.pastescript.serve.run",
"sys.exit",
"log_tempfile.TempFile"
] | [((526, 537), 'galaxy.util.pastescript.serve.run', 'serve.run', ([], {}), '()\n', (535, 537), False, 'from galaxy.util.pastescript import serve\n'), ((382, 396), 'check_python.check_python', 'check_python', ([], {}), '()\n', (394, 396), False, 'from check_python import check_python\n'), ((514, 524), 'log_tempfile.TempFile', 'TempFile', ([], {}), '()\n', (522, 524), False, 'from log_tempfile import TempFile\n'), ((409, 420), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (417, 420), False, 'import sys\n'), ((202, 227), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (217, 227), False, 'import os\n')] |
from runner.action.feature.feature import Feature
from runner.action.set.discrete import Discrete
class FeatureDiscrete(Feature):
"""Discrete feature
Alias for Feature with set.Discrete first sub action
Args:
low (float): low boundary
high (float): high boundary
num (int): number of steps
route (str): route to value (see Action)
"""
def __init__(self, low=0.0, high=1.0, num=None, route='.~~', **kwargs):
d = Discrete(low=low, high=high, num=num, route=route)
kwargs.setdefault('sub_actions', []).insert(0, d)
super().__init__(**kwargs)
| [
"runner.action.set.discrete.Discrete"
] | [((476, 526), 'runner.action.set.discrete.Discrete', 'Discrete', ([], {'low': 'low', 'high': 'high', 'num': 'num', 'route': 'route'}), '(low=low, high=high, num=num, route=route)\n', (484, 526), False, 'from runner.action.set.discrete import Discrete\n')] |
import os
import csv
import threading
from .classes import Node, Link, Network, Column, ColumnVec, VDFPeriod, \
AgentType, DemandPeriod, Demand, Assignment, UI
from .colgen import update_links_using_columns
from .consts import SMALL_DIVISOR
__all__ = [
'read_network',
'load_columns',
'output_columns',
'output_link_performance',
'download_sample_data_sets',
'output_agent_paths'
]
# for precheck on connectivity of each OD pair
# 0: isolated, has neither outgoing links nor incoming links
# 1: has at least one outgoing link
# 2: has at least one incoming link
# 3: has both outgoing and incoming links
_zone_degrees = {}
def _update_orig_zone(oz_id):
if oz_id not in _zone_degrees:
_zone_degrees[oz_id] = 1
elif _zone_degrees[oz_id] == 2:
_zone_degrees[oz_id] = 3
def _update_dest_zone(dz_id):
if dz_id not in _zone_degrees:
_zone_degrees[dz_id] = 2
elif _zone_degrees[dz_id] == 1:
_zone_degrees[dz_id] = 3
def _are_od_connected(oz_id, dz_id):
connected = True
# at least one node in O must have outgoing links
if oz_id not in _zone_degrees or _zone_degrees[oz_id] == 2:
connected = False
print(f'WARNING! {oz_id} has no outgoing links to route volume '
f'between OD: {oz_id} --> {dz_id}')
# at least one node in D must have incoming links
if dz_id not in _zone_degrees or _zone_degrees[dz_id] == 1:
if connected:
connected = False
print(f'WARNING! {dz_id} has no incoming links to route volume '
f'between OD: {oz_id} --> {dz_id}')
return connected
def _convert_str_to_int(str):
"""
TypeError will take care the case that str is None
ValueError will take care the case that str is empty
"""
if not str:
return None
try:
return int(str)
except ValueError:
return int(float(str))
except TypeError:
return None
def _convert_str_to_float(str):
"""
TypeError will take care the case that str is None
ValueError will take care the case that str is empty
"""
if not str:
return None
try:
return float(str)
except (TypeError, ValueError):
return None
def _download_url(url, filename, loc_dir):
try:
import requests
except ImportError:
print('please print requests to preceed downloading!!')
try:
r = requests.get(url)
r.raise_for_status()
with open(loc_dir+filename, 'wb') as f:
f.write(r.content)
except requests.HTTPError:
print('file not existing: '+url)
except requests.ConnectionError:
raise Exception('check your connectcion!!!')
except Exception as e:
raise e
def download_sample_data_sets():
url = 'https://raw.githubusercontent.com/jdlph/Path4GMNS/master/data/'
data_sets = [
"ASU",
"Braess_Paradox",
"Chicago_Sketch",
"Lima_Network",
"Sioux_Falls",
"Two_Corridor"
]
files = [
"node.csv",
"link.csv",
"demand.csv",
"settings.csv",
"settings.yml"
]
print('downloading starts')
# data folder under cdw
loc_data_dir = 'data'
if not os.path.isdir(loc_data_dir):
os.mkdir(loc_data_dir)
for ds in data_sets:
web_dir = url + ds + '/'
loc_sub_dir = os.path.join(loc_data_dir, ds) + '/'
if not os.path.isdir(loc_sub_dir):
os.mkdir(loc_sub_dir)
# multi-threading
threads = []
for x in files:
t = threading.Thread(
target=_download_url,
args=(web_dir+x, x, loc_sub_dir)
)
t.start()
threads.append(t)
for t in threads:
t.join()
print('downloading completes')
print('check '+os.path.join(os.getcwd(), loc_data_dir)+' for downloaded data sets')
def read_nodes(input_dir,
nodes,
id_to_no_dict,
no_to_id_dict,
zone_to_node_dict):
""" step 1: read input_node """
with open(input_dir+'/node.csv', 'r') as fp:
print('read node.csv')
reader = csv.DictReader(fp)
node_seq_no = 0
for line in reader:
# set up node_id, which should be an integer
node_id = _convert_str_to_int(line['node_id'])
if node_id is None:
continue
# set up zone_id, which should be an integer
zone_id = _convert_str_to_int(line['zone_id'])
if zone_id is None:
zone_id = -1
# treat them as string
coord_x = line['x_coord']
coord_y = line['y_coord']
# construct node object
node = Node(node_seq_no, node_id, zone_id, coord_x, coord_y)
nodes.append(node)
# set up mapping between node_seq_no and node_id
id_to_no_dict[node_id] = node_seq_no
no_to_id_dict[node_seq_no] = node_id
# associate node_id with corresponding zone
if zone_id not in zone_to_node_dict.keys():
zone_to_node_dict[zone_id] = []
zone_to_node_dict[zone_id].append(node_id)
node_seq_no += 1
print(f'the number of nodes is {node_seq_no}')
zone_size = len(zone_to_node_dict)
# do not count virtual zone with id as -1
if -1 in zone_to_node_dict.keys():
zone_size -= 1
print(f'the number of zones is {zone_size}')
def read_links(input_dir,
links,
nodes,
id_to_no_dict,
link_id_dict,
agent_type_size,
demand_period_size,
load_demand):
""" step 2: read input_link """
with open(input_dir+'/link.csv', 'r') as fp:
print('read link.csv')
reader = csv.DictReader(fp)
link_seq_no = 0
for line in reader:
# it can be an empty string
link_id = line['link_id']
# check the validity
from_node_id = _convert_str_to_int(line['from_node_id'])
if from_node_id is None:
continue
to_node_id =_convert_str_to_int(line['to_node_id'])
if to_node_id is None:
continue
length = _convert_str_to_float(line['length'])
if length is None:
continue
# pass validity check
try:
from_node_no = id_to_no_dict[from_node_id]
to_node_no = id_to_no_dict[to_node_id]
except KeyError:
print(f'EXCEPTION: Node ID {from_node_id} '
f'or/and Node ID {to_node_id} NOT IN THE NETWORK!!')
continue
# for the following attributes,
# if they are not None, convert them to the corresponding types
# if they are None's, set them using the default values
lanes = _convert_str_to_int(line['lanes'])
if lanes is None:
lanes = 1
link_type = _convert_str_to_int(line['link_type'])
if link_type is None:
link_type = 1
free_speed = _convert_str_to_int(line['free_speed'])
if free_speed is None:
free_speed = 60
# issue: int??
capacity = _convert_str_to_int(line['capacity'])
if capacity is None:
capacity = 49500
# if link.csv does not have no column 'allowed_uses',
# set allowed_uses to 'all'
# developer's note:
# we may need to change this implementation as we cannot deal with
# cases a link which is not open to any modes
try:
allowed_uses = line['allowed_uses']
if not allowed_uses:
allowed_uses = 'all'
except KeyError:
allowed_uses = 'all'
# if link.csv does not have no column 'geometry',
# set geometry to ''
try:
geometry = line['geometry']
except KeyError:
geometry = ''
link_id_dict[link_id] = link_seq_no
# construct link object
link = Link(link_id,
link_seq_no,
from_node_no,
to_node_no,
from_node_id,
to_node_id,
length,
lanes,
link_type,
free_speed,
capacity,
allowed_uses,
geometry,
agent_type_size,
demand_period_size)
# VDF Attributes
for i in range(demand_period_size):
dp_id_str = str(i+1)
header_vdf_alpha = 'VDF_alpha' + dp_id_str
header_vdf_beta = 'VDF_beta' + dp_id_str
header_vdf_mu = 'VDF_mu' + dp_id_str
header_vdf_fftt = 'VDF_fftt' + dp_id_str
header_vdf_cap = 'VDF_cap' + dp_id_str
header_vdf_phf = 'VDF_phf' + dp_id_str
# case i: link.csv does not VDF attributes at all
# case ii: link.csv only has partial VDF attributes
# under case i, we will set up only one VDFPeriod object using
# default values
# under case ii, we will set up some VDFPeriod objects up to
# the number of complete set of VDF_alpha, VDF_beta, and VDF_mu
try:
VDF_alpha = line[header_vdf_alpha]
if VDF_alpha:
VDF_alpha = float(VDF_alpha)
except (KeyError, TypeError):
if i == 0:
# default value will be applied in the constructor
VDF_alpha = 0.15
else:
break
try:
VDF_beta = line[header_vdf_beta]
if VDF_beta:
VDF_beta = float(VDF_beta)
except (KeyError, TypeError):
if i == 0:
# default value will be applied in the constructor
VDF_beta = 4
else:
break
try:
VDF_mu = line[header_vdf_mu]
if VDF_mu:
VDF_mu = float(VDF_mu)
except (KeyError, TypeError):
if i == 0:
# default value will be applied in the constructor
VDF_mu = 1000
else:
break
try:
VDF_fftt = line[header_vdf_fftt]
if VDF_fftt:
VDF_fftt = float(VDF_fftt)
except (KeyError, TypeError):
# set it up using length and free_speed from link
VDF_fftt = length / max(SMALL_DIVISOR, free_speed) * 60
try:
VDF_cap = line[header_vdf_cap]
if VDF_cap:
VDF_cap = float(VDF_cap)
except (KeyError, TypeError):
# set it up using capacity from link
VDF_cap = capacity
# not a mandatory column
try:
VDF_phf = line[header_vdf_phf]
if VDF_phf:
VDF_phf = float(VDF_phf)
except (KeyError, TypeError):
# default value will be applied in the constructor
VDF_phf = -1
# construct VDFPeriod object
vdf = VDFPeriod(i, VDF_alpha, VDF_beta, VDF_mu,
VDF_fftt, VDF_cap, VDF_phf)
link.vdfperiods.append(vdf)
# set up outgoing links and incoming links
from_node = nodes[from_node_no]
to_node = nodes[to_node_no]
from_node.add_outgoing_link(link)
to_node.add_incoming_link(link)
links.append(link)
# set up zone degrees
if load_demand:
oz_id = from_node.get_zone_id()
dz_id = to_node.get_zone_id()
_update_orig_zone(oz_id)
_update_dest_zone(dz_id)
link_seq_no += 1
print(f'the number of links is {link_seq_no}')
def read_demand(input_dir,
file,
agent_type_id,
demand_period_id,
zone_to_node_dict,
column_pool):
""" step 3:read input_agent """
with open(input_dir+'/'+file, 'r') as fp:
print('read '+file)
at = agent_type_id
dp = demand_period_id
reader = csv.DictReader(fp)
total_agents = 0
for line in reader:
# invalid origin zone id, discard it
oz_id = _convert_str_to_int(line['o_zone_id'])
if oz_id is None:
continue
# invalid destination zone id, discard it
dz_id = _convert_str_to_int(line['d_zone_id'])
if dz_id is None:
continue
# o_zone_id does not exist in node.csv, discard it
if oz_id not in zone_to_node_dict.keys():
continue
# d_zone_id does not exist in node.csv, discard it
if dz_id not in zone_to_node_dict.keys():
continue
volume = _convert_str_to_float(line['volume'])
if volume is None:
continue
if volume == 0:
continue
# precheck on connectivity of each OD pair
if not _are_od_connected(oz_id, dz_id):
continue
# set up volume for ColumnVec
if (at, dp, oz_id, dz_id) not in column_pool.keys():
column_pool[(at, dp, oz_id, dz_id)] = ColumnVec()
column_pool[(at, dp, oz_id, dz_id)].od_vol += volume
total_agents += int(volume + 1)
print(f'the number of agents is {total_agents}')
if total_agents == 0:
raise Exception('NO VALID OD VOLUME!! DOUBLE CHECK YOUR demand.csv')
def _auto_setup(assignment):
""" automatically set up one demand period and one agent type
The two objects will be set up using the default constructors using the
default values. See class DemandPeriod and class AgentType for details
"""
at = AgentType()
dp = DemandPeriod()
d = Demand()
assignment.update_agent_types(at)
assignment.update_demand_periods(dp)
assignment.update_demands(d)
def read_settings(input_dir, assignment):
try:
import yaml as ym
with open(input_dir+'/settings.yml') as file:
settings = ym.full_load(file)
# agent types
agents = settings['agents']
for i, a in enumerate(agents):
agent_type = a['type']
agent_name = a['name']
agent_vot = a['vot']
agent_flow_type = a['flow_type']
agent_pce = a['pce']
agent_ffs = a['free_speed']
at = AgentType(i,
agent_type,
agent_name,
agent_vot,
agent_flow_type,
agent_pce,
agent_ffs)
assignment.update_agent_types(at)
# demand periods
demand_periods = settings['demand_periods']
for i, d in enumerate(demand_periods):
period = d['period']
time_period = d['time_period']
dp = DemandPeriod(i, period, time_period)
assignment.update_demand_periods(dp)
# demand files
demands = settings['demand_files']
for i, d in enumerate(demands):
demand_file = d['file_name']
# demand_format_type = d['format_type']
demand_period = d['period']
demand_type = d['agent_type']
demand = Demand(i, demand_period, demand_type, demand_file)
assignment.update_demands(demand)
except ImportError:
# just in case user does not have pyyaml installed
print('Please install pyyaml next time!')
print('Engine will set up one demand period and one agent type using '
'default values for you, which might NOT reflect your case!\n')
_auto_setup(assignment)
except FileNotFoundError:
# just in case user does not provide settings.yml
print('Please provide settings.yml next time!')
print('Engine will set up one demand period and one agent type using '
'default values for you, which might NOT reflect your case!\n')
_auto_setup(assignment)
except Exception as e:
raise e
def read_network(load_demand='true', input_dir='.'):
assignm = Assignment()
network = Network()
read_settings(input_dir, assignm)
read_nodes(input_dir,
network.node_list,
network.node_id_to_no_dict,
network.node_no_to_id_dict,
network.zone_to_nodes_dict)
read_links(input_dir,
network.link_list,
network.node_list,
network.node_id_to_no_dict,
network.link_id_dict,
assignm.get_agent_type_count(),
assignm.get_demand_period_count(),
load_demand)
if load_demand:
for d in assignm.get_demands():
at = assignm.get_agent_type_id(d.get_agent_type_str())
dp = assignm.get_demand_period_id(d.get_period())
read_demand(input_dir,
d.get_file_name(),
at,
dp,
network.zone_to_nodes_dict,
assignm.column_pool)
network.update(assignm.get_agent_type_count(),
assignm.get_demand_period_count())
assignm.network = network
assignm.setup_spnetwork()
ui = UI(assignm)
return ui
def load_columns(ui, input_dir='.'):
""" developer note: do we use agent.csv to set up network? """
with open(input_dir+'/agent.csv', 'r') as f:
print('read agent.csv')
A = ui._base_assignment
reader = csv.DictReader(f)
# just in case agent_id was not outputed
last_agent_id = 0
for line in reader:
# critical info
oz_id = _convert_str_to_int(line['o_zone_id'])
if oz_id is None:
continue
dz_id = _convert_str_to_int(line['d_zone_id'])
if dz_id is None:
continue
node_seq = line['node_sequence']
if node_seq is None:
continue
link_seq = line['link_sequence']
if link_seq is None:
continue
# non-critical info
agent_id = _convert_str_to_int(line['agent_id'])
if agent_id is None:
agent_id = last_agent_id + 1
last_agent_id = agent_id
# it could be empty
# path_id = line['path_id']
at = line['agent_type']
if not at:
continue
else:
at = A.get_agent_type_id(at)
dp = line['demand_period']
if not dp:
continue
else:
dp = A.get_demand_period_id(dp)
vol = _convert_str_to_float(line['volume'])
if vol is None:
continue
toll = _convert_str_to_float(line['toll'])
if toll is None:
toll = 0
tt = _convert_str_to_float(line['travel_time'])
if tt is None:
tt = 0
dist = _convert_str_to_float(line['distance'])
if dist is None:
dist = 0
# it could be empty
geo = line['geometry']
if (at, dp, oz_id, dz_id) not in A.get_column_pool().keys():
continue
cv = A.get_column_vec(at, dp, oz_id, dz_id)
node_path = None
try:
# if x is only needed for columns generated from DTALite,
# which have the trailing ';' and leads to '' after split
node_path = [int(x) for x in node_seq.split(';') if x]
except ValueError:
raise Exception(
f'INVALID NODE PATH found for agent id: {agent_id}'
)
node_sum = sum(node_path)
if node_sum not in cv.path_node_seq_map.keys():
path_seq_no = cv.get_column_num()
col = Column(path_seq_no)
try:
col.nodes = [A.get_node_no(x) for x in node_path]
except IndexError:
raise Exception(
'Invalid node found on column!!'
'Did you use agent.csv from a different network?'
)
try:
# if x is only needed for columns generated from DTALite,
# which have the trailing ';' and leads to '' after split
col.links = [
A.get_link_seq_no(x) for x in link_seq.split(';') if x
]
except IndexError:
raise Exception(
'INVALID link found on column!!'
'Did you use agent.csv from a different network?'
)
except ValueError:
raise Exception(
f'INVALID LINK PATH found for agent id: {agent_id}'
)
# the following four are non-critical info
col.set_toll(toll)
col.set_travel_time(tt)
col.set_geometry(geo)
if dist == 0:
sum(A.get_link(x).get_length() for x in col.links)
col.set_distance(dist)
cv.add_new_column(node_sum, col)
cv.get_column(node_sum).increase_volume(vol)
update_links_using_columns(ui)
def output_columns(ui, output_geometry=True, output_dir='.'):
with open(output_dir+'/agent.csv', 'w', newline='') as fp:
base = ui._base_assignment
nodes = base.get_nodes()
links = base.get_links()
column_pool = base.get_column_pool()
writer = csv.writer(fp)
line = ['agent_id',
'o_zone_id',
'd_zone_id',
'path_id',
'agent_type',
'demand_period',
'volume',
'toll',
'travel_time',
'distance',
'node_sequence',
'link_sequence',
'geometry']
writer.writerow(line)
path_sep = ';'
i = 0
for k, cv in column_pool.items():
if cv.get_od_volume() <= 0:
continue
# k = (at_id, dp_id, oz_id, dz_id)
at_id = k[0]
dp_id = k[1]
oz_id = k[2]
dz_id = k[3]
at_str = base.get_agent_type_str(at_id)
dp_str = base.get_demand_period_str(dp_id)
for col in cv.get_columns().values():
i += 1
node_seq = path_sep.join(
str(nodes[x].get_node_id()) for x in reversed(col.nodes)
)
link_seq = path_sep.join(
str(links[x].get_link_id()) for x in reversed(col.links)
)
geometry = ''
if output_geometry:
geometry = ', '.join(
nodes[x].get_coordinate() for x in reversed(col.nodes)
)
geometry = 'LINESTRING (' + geometry + ')'
line = [i,
oz_id,
dz_id,
col.get_seq_no(),
at_str,
dp_str,
col.get_volume(),
col.get_toll(),
col.get_travel_time(),
col.get_distance(),
node_seq,
link_seq,
geometry]
writer.writerow(line)
if output_dir == '.':
print('\ncheck agent.csv in '
+os.getcwd()+' for path finding results')
else:
print('\ncheck agent.csv in '
+os.path.join(os.getcwd(), output_dir)
+' for path finding results')
def output_link_performance(ui, output_dir='.'):
with open(output_dir+'/link_performance.csv', 'w', newline='') as fp:
base = ui._base_assignment
links = base.get_links()
writer = csv.writer(fp)
line = ['link_id',
'from_node_id',
'to_node_id',
'time_period',
'volume',
'travel_time',
'speed',
'VOC',
'queue',
'density',
'geometry',
'notes']
writer.writerow(line)
for link in links:
for dp in base.get_demand_periods():
avg_travel_time = link.get_period_avg_travel_time(dp.get_id())
speed = link.get_length() / (max(SMALL_DIVISOR, avg_travel_time) / 60)
line = [link.get_link_id(),
link.get_from_node_id(),
link.get_to_node_id(),
dp.get_period(),
link.get_period_flow_vol(dp.get_id()),
avg_travel_time,
speed,
link.get_period_voc(dp.get_id()),
'',
'',
link.get_geometry(),
'']
writer.writerow(line)
if output_dir == '.':
print('\ncheck link_performance.csv in '
+os.getcwd()+' for link performance')
else:
print('\ncheck link_performance.csv in '
+os.path.join(os.getcwd(), output_dir)
+' for link performance')
def output_agent_paths(ui, output_geometry=True, output_dir='.'):
with open(output_dir+'/agent_paths.csv', 'w', newline='') as f:
writer = csv.writer(f)
line = ['agent_id',
'o_zone_id',
'd_zone_id',
'path_id',
'agent_type',
'demand_period',
'volume',
'toll',
'travel_time',
'distance',
'node_sequence',
'link_sequence',
'geometry']
writer.writerow(line)
base = ui._base_assignment
nodes = base.get_nodes()
agents = base.get_agents()
agents.sort(key=lambda agent: agent.get_orig_node_id())
pre_dest_node_id = -1
for a in agents:
if not a.get_node_path():
continue
if a.get_dest_node_id() == pre_dest_node_id:
continue
pre_dest_node_id = a.get_dest_node_id()
agent_id = a.get_id()
geometry = ''
if output_geometry:
geometry = ', '.join(
nodes[x].get_coordinate() for x in reversed(a.get_node_path())
)
geometry = 'LINESTRING (' + geometry + ')'
line = [agent_id,
a.get_orig_zone_id(),
a.get_dest_zone_id(),
0,
'N/A',
'N/A',
'N/A',
'N/A',
'N/A',
a.get_path_cost(),
base.get_agent_node_path(agent_id, True),
base.get_agent_link_path(agent_id, True),
geometry]
writer.writerow(line)
if output_dir == '.':
print('\ncheck agent_paths.csv in '
+os.getcwd()+' for unique agent paths')
else:
print('\ncheck agent_paths.csv in '
+os.path.join(os.getcwd(), output_dir)
+' for unique agent paths') | [
"yaml.full_load",
"csv.DictReader",
"csv.writer",
"os.path.join",
"requests.get",
"os.getcwd",
"os.path.isdir",
"os.mkdir",
"threading.Thread"
] | [((2460, 2477), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2472, 2477), False, 'import requests\n'), ((3292, 3319), 'os.path.isdir', 'os.path.isdir', (['loc_data_dir'], {}), '(loc_data_dir)\n', (3305, 3319), False, 'import os\n'), ((3329, 3351), 'os.mkdir', 'os.mkdir', (['loc_data_dir'], {}), '(loc_data_dir)\n', (3337, 3351), False, 'import os\n'), ((4259, 4277), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (4273, 4277), False, 'import csv\n'), ((5976, 5994), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (5990, 5994), False, 'import csv\n'), ((13164, 13182), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (13178, 13182), False, 'import csv\n'), ((18887, 18904), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (18901, 18904), False, 'import csv\n'), ((23120, 23134), 'csv.writer', 'csv.writer', (['fp'], {}), '(fp)\n', (23130, 23134), False, 'import csv\n'), ((25585, 25599), 'csv.writer', 'csv.writer', (['fp'], {}), '(fp)\n', (25595, 25599), False, 'import csv\n'), ((27210, 27223), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (27220, 27223), False, 'import csv\n'), ((3433, 3463), 'os.path.join', 'os.path.join', (['loc_data_dir', 'ds'], {}), '(loc_data_dir, ds)\n', (3445, 3463), False, 'import os\n'), ((3486, 3512), 'os.path.isdir', 'os.path.isdir', (['loc_sub_dir'], {}), '(loc_sub_dir)\n', (3499, 3512), False, 'import os\n'), ((3526, 3547), 'os.mkdir', 'os.mkdir', (['loc_sub_dir'], {}), '(loc_sub_dir)\n', (3534, 3547), False, 'import os\n'), ((3636, 3710), 'threading.Thread', 'threading.Thread', ([], {'target': '_download_url', 'args': '(web_dir + x, x, loc_sub_dir)'}), '(target=_download_url, args=(web_dir + x, x, loc_sub_dir))\n', (3652, 3710), False, 'import threading\n'), ((15200, 15218), 'yaml.full_load', 'ym.full_load', (['file'], {}), '(file)\n', (15212, 15218), True, 'import yaml as ym\n'), ((3923, 3934), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3932, 3934), False, 'import os\n'), ((25170, 25181), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (25179, 25181), False, 'import os\n'), ((26851, 26862), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (26860, 26862), False, 'import os\n'), ((28948, 28959), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (28957, 28959), False, 'import os\n'), ((25299, 25310), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (25308, 25310), False, 'import os\n'), ((26987, 26998), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (26996, 26998), False, 'import os\n'), ((29081, 29092), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (29090, 29092), False, 'import os\n')] |
#!/usr/bin/env python
from __future__ import with_statement
from geode import Prop,PropManager,cache
from geode.value import Worker
import sys
def worker_test_factory(props):
x = props.get('x')
y = props.add('y',5)
return cache(lambda:x()*y())
def remote(conn):
inputs = conn.inputs
x = inputs.get('x')
assert x()==7
n = Prop('n',-1)
done = Prop('done',False)
conn.add_output('n',n)
conn.add_output('done',done)
for i in xrange(10):
n.set(i)
done.set(True)
def test_worker():
command_file = __file__
if command_file.endswith('.pyc'):
command_file=command_file[:-3]+'py'
for command in None,[command_file,'--worker']:
props = PropManager()
x = props.add('x',3)
props.add('y',5)
with Worker.Worker(debug=0,command=command) as worker:
worker.add_props(props)
xy = worker.create('xy',worker_test_factory)
assert xy() is None
worker.pull('xy')
worker.process(timeout=None,count=1)
assert xy()==3*5
x.set(7)
worker.process(timeout=None,count=1)
assert xy()==None
worker.pull('xy')
worker.process(timeout=None,count=1)
assert xy()==7*5
# Test remote function execution
worker.run(remote)
n = worker.wait_for_output('n')
done = worker.wait_for_output('done')
seen = []
while not done():
worker.process(timeout=None,count=1)
seen.append(n())
assert seen==range(10)+[9]
if __name__=='__main__':
if len(sys.argv)==3 and sys.argv[1]=='--worker':
Worker.worker_standalone_main(sys.argv[2])
else:
test_worker()
| [
"geode.value.Worker.Worker",
"geode.value.Worker.worker_standalone_main",
"geode.Prop",
"geode.PropManager"
] | [((338, 351), 'geode.Prop', 'Prop', (['"""n"""', '(-1)'], {}), "('n', -1)\n", (342, 351), False, 'from geode import Prop, PropManager, cache\n'), ((360, 379), 'geode.Prop', 'Prop', (['"""done"""', '(False)'], {}), "('done', False)\n", (364, 379), False, 'from geode import Prop, PropManager, cache\n'), ((671, 684), 'geode.PropManager', 'PropManager', ([], {}), '()\n', (682, 684), False, 'from geode import Prop, PropManager, cache\n'), ((1530, 1572), 'geode.value.Worker.worker_standalone_main', 'Worker.worker_standalone_main', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (1559, 1572), False, 'from geode.value import Worker\n'), ((740, 779), 'geode.value.Worker.Worker', 'Worker.Worker', ([], {'debug': '(0)', 'command': 'command'}), '(debug=0, command=command)\n', (753, 779), False, 'from geode.value import Worker\n')] |
"""Tools file for Supervisor."""
import asyncio
from contextvars import ContextVar
from ipaddress import IPv4Address
import logging
from pathlib import Path
import re
import socket
from typing import Any
from .job_monitor import JobMonitor
_LOGGER: logging.Logger = logging.getLogger(__name__)
RE_STRING: re.Pattern = re.compile(r"\x1b(\[.*?[@-~]|\].*?(\x07|\x1b\\))")
job_monitor: ContextVar[Optional[JobMonitor]] = ContextVar("job_monitor", default=None)
def convert_to_ascii(raw: bytes) -> str:
"""Convert binary to ascii and remove colors."""
return RE_STRING.sub("", raw.decode())
def process_lock(method):
"""Wrap function with only run once."""
async def wrap_api(api, *args, **kwargs):
"""Return api wrapper."""
if api.lock.locked():
_LOGGER.error(
"Can't execute %s while a task is in progress", method.__name__
)
return False
async with api.lock:
job_monitor.set(JobMonitor(api))
return await method(api, *args, **kwargs)
return wrap_api
def check_port(address: IPv4Address, port: int) -> bool:
"""Check if port is mapped."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
try:
result = sock.connect_ex((str(address), port))
sock.close()
# Check if the port is available
if result == 0:
return True
except OSError:
pass
return False
def check_exception_chain(err: Exception, object_type: Any) -> bool:
"""Check if exception chain include sub exception.
It's not full recursive because we need mostly only access to the latest.
"""
if issubclass(type(err), object_type):
return True
if not err.__context__:
return False
return check_exception_chain(err.__context__, object_type)
def get_message_from_exception_chain(err: Exception) -> str:
"""Get the first message from the exception chain."""
if str(err):
return str(err)
if not err.__context__:
return ""
return get_message_from_exception_chain(err.__context__)
async def remove_folder(folder: Path, content_only: bool = False) -> None:
"""Remove folder and reset privileged.
Is needed to avoid issue with:
- CAP_DAC_OVERRIDE
- CAP_DAC_READ_SEARCH
"""
del_folder = f"{folder}" + "/{,.[!.],..?}*" if content_only else f"{folder}"
try:
proc = await asyncio.create_subprocess_exec(
"bash", "-c", f"rm -rf {del_folder}", stdout=asyncio.subprocess.DEVNULL
)
_, error_msg = await proc.communicate()
except OSError as err:
error_msg = str(err)
else:
if proc.returncode == 0:
return
_LOGGER.error("Can't remove folder %s: %s", folder, error_msg)
| [
"logging.getLogger",
"socket.socket",
"re.compile",
"contextvars.ContextVar",
"asyncio.create_subprocess_exec"
] | [((268, 295), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (285, 295), False, 'import logging\n'), ((321, 377), 're.compile', 're.compile', (['"""\\\\x1b(\\\\[.*?[@-~]|\\\\].*?(\\\\x07|\\\\x1b\\\\\\\\))"""'], {}), "('\\\\x1b(\\\\[.*?[@-~]|\\\\].*?(\\\\x07|\\\\x1b\\\\\\\\))')\n", (331, 377), False, 'import re\n'), ((421, 460), 'contextvars.ContextVar', 'ContextVar', (['"""job_monitor"""'], {'default': 'None'}), "('job_monitor', default=None)\n", (431, 460), False, 'from contextvars import ContextVar\n'), ((1185, 1234), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1198, 1234), False, 'import socket\n'), ((2478, 2586), 'asyncio.create_subprocess_exec', 'asyncio.create_subprocess_exec', (['"""bash"""', '"""-c"""', 'f"""rm -rf {del_folder}"""'], {'stdout': 'asyncio.subprocess.DEVNULL'}), "('bash', '-c', f'rm -rf {del_folder}', stdout\n =asyncio.subprocess.DEVNULL)\n", (2508, 2586), False, 'import asyncio\n')] |
from unittest import TestCase
from convertfracts import convertFracts
class TestConvertFracts(TestCase):
def test_convertFracts_01(self):
self.assertEqual(convertFracts([[1, 2], [1, 3], [1, 4]]),
[[6, 12], [4, 12], [3, 12]])
def test_convertFracts_02(self):
self.assertEqual(convertFracts([]), [])
def test_convertFracts_03(self):
self.assertEqual(convertFracts([[27115, 5262],
[87546, 11111111],
[43216, 255689]]),
[[77033412951888085, 14949283383840498],
[117787497858828, 14949283383840498],
[2526695441399712, 14949283383840498]])
| [
"convertfracts.convertFracts"
] | [((175, 214), 'convertfracts.convertFracts', 'convertFracts', (['[[1, 2], [1, 3], [1, 4]]'], {}), '([[1, 2], [1, 3], [1, 4]])\n', (188, 214), False, 'from convertfracts import convertFracts\n'), ((337, 354), 'convertfracts.convertFracts', 'convertFracts', (['[]'], {}), '([])\n', (350, 354), False, 'from convertfracts import convertFracts\n'), ((423, 489), 'convertfracts.convertFracts', 'convertFracts', (['[[27115, 5262], [87546, 11111111], [43216, 255689]]'], {}), '([[27115, 5262], [87546, 11111111], [43216, 255689]])\n', (436, 489), False, 'from convertfracts import convertFracts\n')] |
from PIL import Image
from tflite_runtime.interpreter import Interpreter
from tflite_runtime.interpreter import load_delegate
from video import create_capture
import numpy as np
import cv2 as cv
import io
import picamera
import simpleaudio as sa
# tf model upload
def load_labels(path):
with open(path, 'r') as f:
return {i: line.strip() for i, line in enumerate(f.readlines())}
def set_input_tensor(interpreter, image):
tensor_index = interpreter.get_input_details()[0]['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
# check whether user wears helmet
def classify_image(interpreter, image, top_k=1):
set_input_tensor(interpreter, image)
interpreter.invoke()
output_details = interpreter.get_output_details()[0]
output = np.squeeze(interpreter.get_tensor(output_details['index']))
# If the model is quantized (uint8 data), then dequantize the results
if output_details['dtype'] == np.uint8:
scale, zero_point = output_details['quantization']
output = scale * (output - zero_point)
ordered = np.argpartition(-output, top_k)
# if 0.90 above then regard user is wearing a helmet
if (top_k==1) and (output[1] > 0.9):
res = 1
else:
res = 0
return res
# for detect human face
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags=cv.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
def main():
import sys, getopt
checknum = 0
while True:
try:
# face recognizing code
print('face camera ')
args, video_src = getopt.getopt(sys.argv[1:2], '', ['cascade=', 'nested-cascade='])
try:
video_src = video_src[0]
except:
video_src = 0
args = dict(args)
cascade_fn = args.get('--cascade', "data/haarcascades/haarcascade_frontalface_alt.xml")
nested_fn = args.get('--nested-cascade', "data/haarcascades/haarcascade_eye.xml")
cascade = cv.CascadeClassifier(cv.samples.findFile(cascade_fn))
nested = cv.CascadeClassifier(cv.samples.findFile(nested_fn))
cam = create_capture(video_src, fallback='synth:bg={}:noise=0.05'.format(cv.samples.findFile('samples/data/lena.jpg')))
while True:
ret, img = cam.read()
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
gray = cv.equalizeHist(gray)
rects = detect(gray, cascade)
vis = img.copy()
if len(rects):
if not nested.empty():
print('into nested') # 사람이 들어왔을 때
for x1, y1, x2, y2 in rects:
roi = gray[y1:y2, x1:x2]
vis_roi = vis[y1:y2, x1:x2]
print('findrects')
subrects = detect(roi.copy(), nested)
if subrects!='[]':
faceok = 'faceok.wav'
fa = sa.WaveObject.from_wave_file(faceok)
face = fa.play()
face.wait_done()
print('detect!!')
break
cam.release() # face recognition camera off
print("helmet camera")
# helmet detectecting code
filename = 'helmet.wav'
wave_obj = sa.WaveObject.from_wave_file(filename)
helmetok = 'helmetok.wav'
wave = sa.WaveObject.from_wave_file(helmetok)
labels = "labels.txt"
model = "model_edgetpu.tflite"
interpreter = Interpreter(model, experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
interpreter.allocate_tensors()
_, height, width, _ = interpreter.get_input_details()[0]['shape']
# helmet detect camera on
with picamera.PiCamera(resolution=(640, 480), framerate=30) as camera:
camera.start_preview()
try:
stream = io.BytesIO()
for _ in camera.capture_continuous(stream, format='jpeg', use_video_port=True):
stream.seek(0)
image = Image.open(stream).convert('RGB').resize((width, height),Image.ANTIALIAS)
results = classify_image(interpreter, image)
print("result:")
print(results)
stream.seek(0)
stream.truncate()
# 헬멧 착용여부 판단
if results==0:
play_obj = wave_obj.play()
play_obj.wait_done()
checknum += 1
if checknum==3:
checknum = 0
break;
else:
helm = wave.play()
helm.wait_done()
print('GoodBoy')
break
finally:
camera.stop_preview()
except KeyboardInterrupt:
break
if __name__ == '__main__':
main()
cv.destroyAllWindows()
| [
"simpleaudio.WaveObject.from_wave_file",
"getopt.getopt",
"PIL.Image.open",
"numpy.argpartition",
"cv2.samples.findFile",
"io.BytesIO",
"picamera.PiCamera",
"cv2.equalizeHist",
"tflite_runtime.interpreter.load_delegate",
"cv2.destroyAllWindows",
"cv2.cvtColor"
] | [((1193, 1224), 'numpy.argpartition', 'np.argpartition', (['(-output)', 'top_k'], {}), '(-output, top_k)\n', (1208, 1224), True, 'import numpy as np\n'), ((5829, 5851), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (5849, 5851), True, 'import cv2 as cv\n'), ((1887, 1952), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:2]', '""""""', "['cascade=', 'nested-cascade=']"], {}), "(sys.argv[1:2], '', ['cascade=', 'nested-cascade='])\n", (1900, 1952), False, 'import sys, getopt\n'), ((3844, 3882), 'simpleaudio.WaveObject.from_wave_file', 'sa.WaveObject.from_wave_file', (['filename'], {}), '(filename)\n', (3872, 3882), True, 'import simpleaudio as sa\n'), ((3942, 3980), 'simpleaudio.WaveObject.from_wave_file', 'sa.WaveObject.from_wave_file', (['helmetok'], {}), '(helmetok)\n', (3970, 3980), True, 'import simpleaudio as sa\n'), ((2355, 2386), 'cv2.samples.findFile', 'cv.samples.findFile', (['cascade_fn'], {}), '(cascade_fn)\n', (2374, 2386), True, 'import cv2 as cv\n'), ((2431, 2461), 'cv2.samples.findFile', 'cv.samples.findFile', (['nested_fn'], {}), '(nested_fn)\n', (2450, 2461), True, 'import cv2 as cv\n'), ((2704, 2739), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (2715, 2739), True, 'import cv2 as cv\n'), ((2764, 2785), 'cv2.equalizeHist', 'cv.equalizeHist', (['gray'], {}), '(gray)\n', (2779, 2785), True, 'import cv2 as cv\n'), ((4355, 4409), 'picamera.PiCamera', 'picamera.PiCamera', ([], {'resolution': '(640, 480)', 'framerate': '(30)'}), '(resolution=(640, 480), framerate=30)\n', (4372, 4409), False, 'import picamera\n'), ((4513, 4525), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (4523, 4525), False, 'import io\n'), ((2549, 2593), 'cv2.samples.findFile', 'cv.samples.findFile', (['"""samples/data/lena.jpg"""'], {}), "('samples/data/lena.jpg')\n", (2568, 2593), True, 'import cv2 as cv\n'), ((4134, 4168), 'tflite_runtime.interpreter.load_delegate', 'load_delegate', (['"""libedgetpu.so.1.0"""'], {}), "('libedgetpu.so.1.0')\n", (4147, 4168), False, 'from tflite_runtime.interpreter import load_delegate\n'), ((3419, 3455), 'simpleaudio.WaveObject.from_wave_file', 'sa.WaveObject.from_wave_file', (['faceok'], {}), '(faceok)\n', (3447, 3455), True, 'import simpleaudio as sa\n'), ((4700, 4718), 'PIL.Image.open', 'Image.open', (['stream'], {}), '(stream)\n', (4710, 4718), False, 'from PIL import Image\n')] |
"""
tellotracker:
Allows manual operation of the drone and demo tracking mode.
Requires mplayer to record/save video.
Controls:
- tab to lift off
- WASD to move the drone
- space/shift to ascend/escent slowly
- Q/E to yaw slowly
- arrow keys to ascend, descend, or yaw quickly
- backspace to land, or P to palm-land
- enter to take a picture
- R to start recording video, R again to stop recording
(video and photos will be saved to a timestamped file in ~/Pictures/)
- Z to toggle camera zoom state
(zoomed-in widescreen or high FOV 4:3)
- T to toggle tracking
@author <NAME>, <NAME> and <NAME>
@copyright 2018 see license file for details
"""
import time
import datetime
import os
import tellopy
import numpy
import av
import cv2
from pynput import keyboard
from tracker import Tracker
#posenet
import os
import numpy as np
import sys
from tensorflow.lite.python.interpreter import Interpreter
from PIL import Image
import math
import threading
import traceback
frame = None
run_recv_thread = True
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def argmax2d(inp_3d):
"""
Get the x,y positions of the heatmap of each part's argmax()
"""
heatmapPositions = np.zeros(shape=(17,2))
heatmapConf = np.zeros(shape=(17,1))
for i in range(17):
argmax_i = np.unravel_index(inp_3d[:,:,i].argmax(), inp_3d[:,:,i].shape)
max_i = inp_3d[:,:,i].max()
heatmapPositions[i,:] = argmax_i
heatmapConf[i,:] = max_i
return heatmapPositions,heatmapConf
def get_offsetVector(heatmapPositions=None,offsets=None):
allArrays = np.zeros(shape=(17,2))
for idx,el in enumerate(heatmapPositions):
# print(el)
allArrays[idx,0] = offsets[int(el[0]),int(el[1]),idx]
allArrays[idx,1] = offsets[int(el[0]),int(el[1]),17+idx]
return allArrays
MODEL_NAME = "pose_TFLite_model"
GRAPH_NAME = 'detect.tflite'
LABELMAP_NAME = 'labelmap.txt'
resW, resH = '952x720'.split('x')
imW, imH = int(resW), int(resH)
use_TPU = False
min_thresh = 0.7
# Get path to current working directory
CWD_PATH = os.getcwd()
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if labels[0] == '???':
del(labels[0])
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
if use_TPU:
interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
else:
interpreter = Interpreter(model_path=PATH_TO_CKPT)
interpreter.allocate_tensors()
# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = width/2
input_std = width/2
# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()
#posenet
def main():
""" Create a tello controller and show the video feed."""
tellotrack = TelloCV()
# for packet in tellotrack.container.demux((tellotrack.vid_stream,)):
# for frame in packet.decode():
# start = time.time()
# image = tellotrack.process_frame(frame)
# print("image_time",time.time()-start)
# cv2.imshow('tello', image)
# _ = cv2.waitKey(1) & 0xFF
#posenet
try:
threading.Thread(target=tellotrack.recv_thread).start()
while True:
if frame is None:
time.sleep(0.01)
else:
# print("frame FOUNDD")
image = tellotrack.process_frame(frame)
cv2.imshow('Original', image)
# cv2.imshow('Canny', cv2.Canny(image, 100, 200))
cv2.waitKey(1)
# long delay
# time.sleep(0.5)
image = None
except Exception as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
print(ex)
finally:
run_recv_thread = False
cv2.destroyAllWindows()
#posenet
class TelloCV(object):
"""
TelloTracker builds keyboard controls on top of TelloPy as well
as generating images from the video stream and enabling opencv support
"""
def __init__(self):
self.prev_flight_data = None
self.record = False
self.tracking = False
self.keydown = False
self.date_fmt = '%Y-%m-%d_%H%M%S'
self.speed = 30
self.drone = tellopy.Tello()
self.init_drone() #posenet
self.init_controls()
# container for processing the packets into frames
self.container = av.open(self.drone.get_video_stream())
self.vid_stream = self.container.streams.video[0]
self.out_file = None
self.out_stream = None
self.out_name = None
self.start_time = time.time()
# tracking a color
green_lower = (30, 50, 50)
green_upper = (80, 255, 255)
#red_lower = (0, 50, 50)
# red_upper = (20, 255, 255)
# blue_lower = (110, 50, 50)
# upper_blue = (130, 255, 255)
self.track_cmd = ""
# self.tracker = Tracker(self.vid_stream.height,
# self.vid_stream.width,
# green_lower, green_upper) #posenet
self.tracker = Tracker(720,
960,
green_lower, green_upper) #posenet
#posenet
def recv_thread(self):
global frame
global run_recv_thread
print('start recv_thread()')
# drone = tellopy.Tello()
try:
# self.drone.connect()
# self.drone.wait_for_connection(60.0)
# #posenet
# self.drone.start_video()
# self.drone.subscribe(self.drone.EVENT_FLIGHT_DATA,
# self.flight_data_handler)
# self.drone.subscribe(self.drone.EVENT_FILE_RECEIVED,
# self.handle_flight_received)
#posenet
# container = av.open(self.drone.get_video_stream())
frame_count = 0
while run_recv_thread:
for f in self.container.decode(video=0):
frame_count = frame_count + 1
# skip first 300 frames
if frame_count < 300:
continue
frame = f
time.sleep(0.01)
except Exception as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
print(ex)
finally:
self.drone.quit()
#posenet
def init_drone(self):
"""Connect, uneable streaming and subscribe to events"""
# self.drone.log.set_level(2)
self.drone.connect()
self.drone.wait_for_connection(60.0) #posenet
self.drone.start_video()
self.drone.subscribe(self.drone.EVENT_FLIGHT_DATA,
self.flight_data_handler)
self.drone.subscribe(self.drone.EVENT_FILE_RECEIVED,
self.handle_flight_received)
def on_press(self, keyname):
"""handler for keyboard listener"""
if self.keydown:
return
try:
self.keydown = True
keyname = str(keyname).strip('\'')
print('+' + keyname)
if keyname == 'Key.esc':
self.drone.quit()
exit(0)
if keyname in self.controls:
key_handler = self.controls[keyname]
if isinstance(key_handler, str):
getattr(self.drone, key_handler)(self.speed)
else:
key_handler(self.speed)
except AttributeError:
print('special key {0} pressed'.format(keyname))
def on_release(self, keyname):
"""Reset on key up from keyboard listener"""
self.keydown = False
keyname = str(keyname).strip('\'')
print('-' + keyname)
if keyname in self.controls:
key_handler = self.controls[keyname]
if isinstance(key_handler, str):
getattr(self.drone, key_handler)(0)
else:
key_handler(0)
def init_controls(self):
"""Define keys and add listener"""
self.controls = {
'w': lambda speed: self.drone.forward(speed),#'forward',
's': 'backward',
'a': 'left',
'd': 'right',
'Key.space': 'up',
'Key.shift': 'down',
'Key.shift_r': 'down',
'q': 'counter_clockwise',
'e': 'clockwise',
'i': lambda speed: self.drone.flip_forward(),
'k': lambda speed: self.drone.flip_back(),
'j': lambda speed: self.drone.flip_left(),
'l': lambda speed: self.drone.flip_right(),
# arrow keys for fast turns and altitude adjustments
'Key.left': lambda speed: self.drone.counter_clockwise(speed),
'Key.right': lambda speed: self.drone.clockwise(speed),
'Key.up': lambda speed: self.drone.up(speed),
'Key.down': lambda speed: self.drone.down(speed),
'Key.tab': lambda speed: self.drone.takeoff(),
'Key.backspace': lambda speed: self.drone.land(),
'p': lambda speed: self.palm_land(speed),
't': lambda speed: self.toggle_tracking(speed),
'r': lambda speed: self.toggle_recording(speed),
'z': lambda speed: self.toggle_zoom(speed),
'Key.enter': lambda speed: self.take_picture(speed)
}
self.key_listener = keyboard.Listener(on_press=self.on_press,
on_release=self.on_release)
self.key_listener.start()
# self.key_listener.join()
def process_frame(self, frame):
"""convert frame to cv2 image and show"""
# Start timer (for calculating frame rate)
t1 = cv2.getTickCount()
image = cv2.cvtColor(numpy.array(
frame.to_image()), cv2.COLOR_RGB2BGR)
image = self.write_hud(image)
if self.record:
self.record_vid(frame)
# xoff, yoff = self.tracker.track(image)
xoff, yoff = 0,0
xLeftWrist, yLeftWrist =0,0
xNose, yNose =0,0
# print("CV xoff{}, yoff {}".format(xoff, yoff))
#posenet
frame_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
interpreter.set_tensor(input_details[0]['index'],input_data)
interpreter.invoke()
heatmapscores = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
offsets = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
# define vectorized sigmoid
sigmoid_v = np.vectorize(sigmoid)
# 1 sigmoid
sigmoheatmapscores = sigmoid_v(heatmapscores)
# 2 argmax2d
heatmapPositions,heatmapConfidence = argmax2d(sigmoheatmapscores)
# 3 offsetVectors
offsetVectors = get_offsetVector(heatmapPositions,offsets)
# 4 keypointPositions
outputStride = 32
keypointPositions = heatmapPositions * outputStride + offsetVectors
# 5 draw keypoints
for idx,el in enumerate(heatmapConfidence):
if heatmapConfidence[idx][0] >= min_thresh:
x = round((keypointPositions[idx][1]/width)*imW)
y = round((keypointPositions[idx][0]/height)*imH)
if 'right' in labels[idx]:
cv2.circle(image,(int(x),int(y)), 5, (0,255,0), -1)
elif 'left' in labels[idx]:
cv2.circle(image,(int(x),int(y)), 5, (0,0,255), -1)
elif 'nose' in labels[idx]:
xNose, yNose = int(x),int(y)
xoff, yoff = (x-int(960/2)),(int(720/2)-y)
# print("NOSE xoff{}, yoff {}".format(xoff, yoff))
cv2.circle(image,(int(x),int(y)), 5, (255,0,0), -1)
if 'leftWri' in labels[idx]:
xLeftWrist, yLeftWrist = int(x),int(y)
#posenet
def draw_arrows(frame):
"""Show the direction vector output in the cv2 window"""
#cv2.putText(frame,"Color:", (0, 35), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, thickness=2)
cv2.arrowedLine(frame, (int(960/2), int(720/2)),
(int(960/2 + xoff), int(720/2 - yoff)),
(0, 0, 255), 1)
return frame
# image = self.tracker.draw_arrows(image)
image = draw_arrows(image)
# Calculate framerate
t2 = cv2.getTickCount()
time1 = (t2-t1)/freq
frame_rate_calc= 1/time1
# Draw framerate in corner of frame
cv2.putText(image,
'FPS: {0:.2f}'.format(frame_rate_calc),
(imW-200,30),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(255,255,0),
1,
cv2.LINE_AA)
distance = 150
cmd = ""
# print(yoff)
# print("WRIST {}>>>> NOSE {}???? ".format(yLeftWrist,yNose),yLeftWrist>yNose)
if self.tracking:
# if yLeftWrist>yNose:
# print("RECORDING",yLeftWrist)
# cmd = "r"
# lambda speed: self.toggle_recording(speed)
if xoff < -distance and xoff>-960/2:
cmd = "counter_clockwise"
elif xoff > distance and xoff<960/2:
cmd = "clockwise"
elif yoff < -distance and yoff>-720/2:
cmd = "down"
elif yoff > distance and yoff<720/2:
print("UPPPPPPPPPPPPPPP",yoff)
cmd = "up"
else:
if self.track_cmd is not "":
getattr(self.drone, self.track_cmd)(0)
self.track_cmd = ""
if cmd is not self.track_cmd:
if cmd is not "":
print("track command:", cmd)
getattr(self.drone, cmd)(self.speed)
self.track_cmd = cmd
return image
def write_hud(self, frame):
"""Draw drone info, tracking and record on frame"""
stats = self.prev_flight_data.split('|')
stats.append("Tracking:" + str(self.tracking))
if self.drone.zoom:
stats.append("VID")
else:
stats.append("PIC")
if self.record:
diff = int(time.time() - self.start_time)
mins, secs = divmod(diff, 60)
stats.append("REC {:02d}:{:02d}".format(mins, secs))
for idx, stat in enumerate(stats):
text = stat.lstrip()
cv2.putText(frame, text, (0, 30 + (idx * 30)),
cv2.FONT_HERSHEY_SIMPLEX,
1.0, (255, 0, 0), lineType=30)
return frame
def toggle_recording(self, speed):
"""Handle recording keypress, creates output stream and file"""
if speed == 0:
return
self.record = not self.record
if self.record:
datename = [os.getenv('HOME'), datetime.datetime.now().strftime(self.date_fmt)]
self.out_name = '{}/Pictures/tello-{}.mp4'.format(*datename)
print("Outputting video to:", self.out_name)
self.out_file = av.open(self.out_name, 'w')
self.start_time = time.time()
self.out_stream = self.out_file.add_stream(
'mpeg4', self.vid_stream.rate)
self.out_stream.pix_fmt = 'yuv420p'
self.out_stream.width = self.vid_stream.width
self.out_stream.height = self.vid_stream.height
if not self.record:
print("Video saved to ", self.out_name)
self.out_file.close()
self.out_stream = None
def record_vid(self, frame):
"""
convert frames to packets and write to file
"""
new_frame = av.VideoFrame(
width=frame.width, height=frame.height, format=frame.format.name)
for i in range(len(frame.planes)):
new_frame.planes[i].update(frame.planes[i])
pkt = None
try:
pkt = self.out_stream.encode(new_frame)
except IOError as err:
print("encoding failed: {0}".format(err))
if pkt is not None:
try:
self.out_file.mux(pkt)
except IOError:
print('mux failed: ' + str(pkt))
def take_picture(self, speed):
"""Tell drone to take picture, image sent to file handler"""
if speed == 0:
return
self.drone.take_picture()
def palm_land(self, speed):
"""Tell drone to land"""
if speed == 0:
return
self.drone.palm_land()
def toggle_tracking(self, speed):
""" Handle tracking keypress"""
if speed == 0: # handle key up event
return
self.tracking = not self.tracking
print("tracking:", self.tracking)
return
def toggle_zoom(self, speed):
"""
In "video" mode the self.drone sends 1280x720 frames.
In "photo" mode it sends 2592x1936 (952x720) frames.
The video will always be centered in the window.
In photo mode, if we keep the window at 1280x720 that gives us ~160px on
each side for status information, which is ample.
Video mode is harder because then we need to abandon the 16:9 display size
if we want to put the HUD next to the video.
"""
if speed == 0:
return
self.drone.set_video_mode(not self.drone.zoom)
def flight_data_handler(self, event, sender, data):
"""Listener to flight data from the drone."""
text = str(data)
if self.prev_flight_data != text:
self.prev_flight_data = text
def handle_flight_received(self, event, sender, data):
"""Create a file in ~/Pictures/ to receive image from the drone"""
path = '%s/Pictures/tello-%s.jpeg' % (
os.getenv('HOME'),
datetime.datetime.now().strftime(self.date_fmt))
with open(path, 'wb') as out_file:
out_file.write(data)
print('Saved photo to %s' % path)
if __name__ == '__main__':
main() | [
"time.sleep",
"cv2.imshow",
"sys.exc_info",
"av.open",
"cv2.destroyAllWindows",
"math.exp",
"traceback.print_exception",
"tracker.Tracker",
"cv2.waitKey",
"cv2.getTickFrequency",
"numpy.float32",
"cv2.putText",
"cv2.cvtColor",
"threading.Thread",
"cv2.resize",
"time.time",
"numpy.vec... | [((2062, 2073), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2071, 2073), False, 'import os\n'), ((2173, 2219), 'os.path.join', 'os.path.join', (['CWD_PATH', 'MODEL_NAME', 'GRAPH_NAME'], {}), '(CWD_PATH, MODEL_NAME, GRAPH_NAME)\n', (2185, 2219), False, 'import os\n'), ((2261, 2310), 'os.path.join', 'os.path.join', (['CWD_PATH', 'MODEL_NAME', 'LABELMAP_NAME'], {}), '(CWD_PATH, MODEL_NAME, LABELMAP_NAME)\n', (2273, 2310), False, 'import os\n'), ((3385, 3407), 'cv2.getTickFrequency', 'cv2.getTickFrequency', ([], {}), '()\n', (3405, 3407), False, 'import cv2\n'), ((1183, 1206), 'numpy.zeros', 'np.zeros', ([], {'shape': '(17, 2)'}), '(shape=(17, 2))\n', (1191, 1206), True, 'import numpy as np\n'), ((1224, 1247), 'numpy.zeros', 'np.zeros', ([], {'shape': '(17, 1)'}), '(shape=(17, 1))\n', (1232, 1247), True, 'import numpy as np\n'), ((1578, 1601), 'numpy.zeros', 'np.zeros', ([], {'shape': '(17, 2)'}), '(shape=(17, 2))\n', (1586, 1601), True, 'import numpy as np\n'), ((2956, 2992), 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_path': 'PATH_TO_CKPT'}), '(model_path=PATH_TO_CKPT)\n', (2967, 2992), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), ((4607, 4630), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4628, 4630), False, 'import cv2\n'), ((5063, 5078), 'tellopy.Tello', 'tellopy.Tello', ([], {}), '()\n', (5076, 5078), False, 'import tellopy\n'), ((5440, 5451), 'time.time', 'time.time', ([], {}), '()\n', (5449, 5451), False, 'import time\n'), ((5938, 5981), 'tracker.Tracker', 'Tracker', (['(720)', '(960)', 'green_lower', 'green_upper'], {}), '(720, 960, green_lower, green_upper)\n', (5945, 5981), False, 'from tracker import Tracker\n'), ((10356, 10425), 'pynput.keyboard.Listener', 'keyboard.Listener', ([], {'on_press': 'self.on_press', 'on_release': 'self.on_release'}), '(on_press=self.on_press, on_release=self.on_release)\n', (10373, 10425), False, 'from pynput import keyboard\n'), ((10701, 10719), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (10717, 10719), False, 'import cv2\n'), ((11141, 11179), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (11153, 11179), False, 'import cv2\n'), ((11204, 11242), 'cv2.resize', 'cv2.resize', (['frame_rgb', '(width, height)'], {}), '(frame_rgb, (width, height))\n', (11214, 11242), False, 'import cv2\n'), ((11264, 11301), 'numpy.expand_dims', 'np.expand_dims', (['frame_resized'], {'axis': '(0)'}), '(frame_resized, axis=0)\n', (11278, 11301), True, 'import numpy as np\n'), ((11993, 12014), 'numpy.vectorize', 'np.vectorize', (['sigmoid'], {}), '(sigmoid)\n', (12005, 12014), True, 'import numpy as np\n'), ((13843, 13861), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (13859, 13861), False, 'import cv2\n'), ((17168, 17247), 'av.VideoFrame', 'av.VideoFrame', ([], {'width': 'frame.width', 'height': 'frame.height', 'format': 'frame.format.name'}), '(width=frame.width, height=frame.height, format=frame.format.name)\n', (17181, 17247), False, 'import av\n'), ((1043, 1055), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (1051, 1055), False, 'import math\n'), ((4451, 4465), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4463, 4465), False, 'import sys\n'), ((4474, 4535), 'traceback.print_exception', 'traceback.print_exception', (['exc_type', 'exc_value', 'exc_traceback'], {}), '(exc_type, exc_value, exc_traceback)\n', (4499, 4535), False, 'import traceback\n'), ((15909, 16014), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(0, 30 + idx * 30)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1.0)', '(255, 0, 0)'], {'lineType': '(30)'}), '(frame, text, (0, 30 + idx * 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0,\n (255, 0, 0), lineType=30)\n', (15920, 16014), False, 'import cv2\n'), ((16549, 16576), 'av.open', 'av.open', (['self.out_name', '"""w"""'], {}), "(self.out_name, 'w')\n", (16556, 16576), False, 'import av\n'), ((16607, 16618), 'time.time', 'time.time', ([], {}), '()\n', (16616, 16618), False, 'import time\n'), ((3888, 3935), 'threading.Thread', 'threading.Thread', ([], {'target': 'tellotrack.recv_thread'}), '(target=tellotrack.recv_thread)\n', (3904, 3935), False, 'import threading\n'), ((4011, 4027), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (4021, 4027), False, 'import time\n'), ((4158, 4187), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'image'], {}), "('Original', image)\n", (4168, 4187), False, 'import cv2\n'), ((4270, 4284), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4281, 4284), False, 'import cv2\n'), ((7056, 7072), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (7066, 7072), False, 'import time\n'), ((7154, 7168), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7166, 7168), False, 'import sys\n'), ((7181, 7242), 'traceback.print_exception', 'traceback.print_exception', (['exc_type', 'exc_value', 'exc_traceback'], {}), '(exc_type, exc_value, exc_traceback)\n', (7206, 7242), False, 'import traceback\n'), ((16323, 16340), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (16332, 16340), False, 'import os\n'), ((19277, 19294), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (19286, 19294), False, 'import os\n'), ((11447, 11469), 'numpy.float32', 'np.float32', (['input_data'], {}), '(input_data)\n', (11457, 11469), True, 'import numpy as np\n'), ((15682, 15693), 'time.time', 'time.time', ([], {}), '()\n', (15691, 15693), False, 'import time\n'), ((16342, 16365), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16363, 16365), False, 'import datetime\n'), ((19308, 19331), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (19329, 19331), False, 'import datetime\n')] |
# import the pygame module
import pygame
import math
import serial
# import pygame.locals for easier
# access to key coordinates
from pygame.locals import *
data = [100,100,100,110,120,130,100,90,80,70,100,100,200,200,300,300,340,400,100,90,80,70,230,400,300,200]
# initialize pygame
pygame.init()
# Define the dimensions of screen object
screen = pygame.display.set_mode((800, 800))
def setCircle(colorr,colorg,colorb,x,y,radius,border_width):
pygame.draw.circle(screen, (colorr, colorg, colorb), (x, y), radius, border_width)
bg = pygame.image.load("images/blue.png")
# Variable to keep our game loop running
gameOn = True
screen.blit(bg, (0, 0))
for i, distance in enumerate(data):
x = 400 - math.cos(math.radians(180/30*i)) * distance
y = 400 - math.sin(math.radians (180/30*i)) * distance
setCircle(255, 0, 40, x, y, 2, 0)
# mark middle
setCircle(0, 255, 49, 400, 400, 5 ,0 )
# mark 4 meter
setCircle(0, 255, 49, 400, 400, 400 ,2 )
# Our game loop
while gameOn:
# for loop through the event queue
for event in pygame.event.get():
# Check for KEYDOWN event
if event.type == KEYDOWN:
# If the Backspace key has been pressed set
# running to false to exit the main loop
if event.key == K_BACKSPACE:
gameOn = False
# Check for QUIT event
elif event.type == QUIT:
gameOn = False
# Update the display using flip
pygame.display.flip()
| [
"pygame.draw.circle",
"pygame.init",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.display.flip",
"math.radians",
"pygame.image.load"
] | [((287, 300), 'pygame.init', 'pygame.init', ([], {}), '()\n', (298, 300), False, 'import pygame\n'), ((352, 387), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(800, 800)'], {}), '((800, 800))\n', (375, 387), False, 'import pygame\n'), ((548, 584), 'pygame.image.load', 'pygame.image.load', (['"""images/blue.png"""'], {}), "('images/blue.png')\n", (565, 584), False, 'import pygame\n'), ((459, 545), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', '(colorr, colorg, colorb)', '(x, y)', 'radius', 'border_width'], {}), '(screen, (colorr, colorg, colorb), (x, y), radius,\n border_width)\n', (477, 545), False, 'import pygame\n'), ((1059, 1077), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1075, 1077), False, 'import pygame\n'), ((1513, 1534), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (1532, 1534), False, 'import pygame\n'), ((724, 750), 'math.radians', 'math.radians', (['(180 / 30 * i)'], {}), '(180 / 30 * i)\n', (736, 750), False, 'import math\n'), ((782, 808), 'math.radians', 'math.radians', (['(180 / 30 * i)'], {}), '(180 / 30 * i)\n', (794, 808), False, 'import math\n')] |
from builtins import range
from datetime import timedelta
import datetime
import math
from operator import itemgetter
import re
import calendar
import pytz
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext
DATE_FORMAT = '%Y-%m-%d'
DATETIME_FORMAT = '%Y-%m-%d %H:%M'
TIME_FORMAT = '%H:%M'
FB_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S+0000'
def get_list_of_monday_thru_friday_dates(starting_date=None, include_weekend=False):
"""
pass a week number and get a list of dates starting on monday of that week.
if today is a saturday or sunday, return next week instead
"""
current_week_number, current_weekday = datetime.datetime.now().isocalendar()[1:3]
if starting_date is None:
force = False
starting_date = datetime.datetime.now()
else:
force = True
selected_week_number = starting_date.isocalendar()[1]
current_monday = (datetime.datetime.now()-datetime.timedelta(days=current_weekday-1))
selected_monday = (starting_date-datetime.timedelta(days=starting_date.isocalendar()[2]-1))
if not force and current_weekday>5 and to_date_object(current_monday)==to_date_object(selected_monday):
# if today is already weekend, shift to next week
# xxx too automagical perhaps...
selected_monday += datetime.timedelta(days=7)
selected_week_number+=1
date_list = [to_date_object(selected_monday+datetime.timedelta(days=d)) for d in range(0,include_weekend and 7 or 5 )]
return list(date_list)
def range_days(start_date, duration_days, epoch = False):
time_start = timezone.datetime(start_date.year, start_date.month, start_date.day)
time_stop = timedelta(hours=24*duration_days)+time_start
if epoch:
return {'time_start':to_epoch(time_start), 'time_stop': to_epoch(time_stop)}
else:
return {'time_start':time_start, 'time_stop': time_stop}
def total_seconds(time_delta):
return (time_delta.days * 86400) + time_delta.seconds + (time_delta.microseconds*.000001)
def range_one_day(day = None, epoch = False):
if day is None:
day = timezone.now()
return range_days(day, duration_days=1, epoch=epoch)
def extract_time_range(parameters):
# time for
time_start = remote_date_str_to_date(parameters.get('time_start','1970-01-01T00:00:00'))
time_stop = timezone.now()
if 'time_stop' in parameters:
time_stop = remote_date_str_to_date(parameters.get('time_stop'))
return {'time_start':time_start, 'time_stop':time_stop}
def utc_to_tz(dt_object, timezone):
localized_dt = pytz.timezone("UTC").localize(dt_object)
return localized_dt.astimezone(timezone)
def tz_fix_from_account_to_utc(dt_object, timezone):
if to_epoch(dt_object) > 0:
localized_dt = timezone.localize(dt_object)
return localized_dt.astimezone(pytz.timezone("UTC"))
else:
return dt_object
def tz_fix_utc_epoch(utc_epoch, timezone):
"""Take a UTC epoch seconds value, and convert it into a date time string in the provided timezone"""
return timezone.normalize(
pytz.utc.localize(
from_epoch(utc_epoch)
)
).strftime(DATETIME_FORMAT)
def fb_request_timestamp_to_date(dt_object):
"""Convert a datetime object into a facebook stats request date object
:param datetime.datetime dt_object: datetime object
:return: dict
"""
if dt_object.year == 1970:
return None
return {
"month":dt_object.month,
"day" :dt_object.day,
"year" :dt_object.year
}
def get_month_list(start_date, end_date):
"""
Return sorted year+month for all dates in the range.
:param datetime.date start_date:
:param datetime.date end_date:
:return: a list of unique year,month tuples for all dates in the range.
"""
year_months = set()
if start_date==end_date:
end_date+=datetime.timedelta(days=1)
for d in daterange(start_date, end_date):
year_months.add((d.year, d.month))
return sorted(list(year_months))
def date_range(start_date, end_date, epoch=True):
"""
see `daterange`
"""
return daterange(start_date=start_date, end_date=end_date, epoch=epoch)
def daterange(start_date, end_date, epoch=False, skip_func=None):
"""
returns a generator that lists all dates starting with `start_date` up to and including `end_date`,
if `start_date` is newer than `end_date`, the dates are returned in reverse order.
The date most in the future is never included in the resulting list - the list is end exclusive.
:param bool epoch: return in unix timestamp at midnight UTC
:param datetime.date start_date: starting from...
:param datetime.date end_date: up to and including...
:param func skip_func: a function that takes a date in the range and returns True or False, if False, do not include the given date.
:return: generator that iterates dates
"""
if end_date<start_date:
days = (start_date-end_date).days
multiplier = -1
r = list(range(1,days+1))
else:
days = (end_date-start_date).days
multiplier = 1
r = list(range(0,days))
for n in r:
v = start_date + timedelta(days=n*multiplier)
skip = False
if skip_func:
skip = skip_func(v)
if not skip:
if epoch:
v = to_epoch(get_midnight(v))
yield v
def get_time_ranges(start_date, end_date, epoch=True):
"""
Return {'time_start' : DATE, 'time_stop' : DATE}, {'time_start' : DATE+1, 'time_stop' : DATE+1} ... etc
:param datetime.date start_date:
:param datetime.date end_date:
:param bool epoch: dates become epoch seconds if true, otherwise date objects
:return:
"""
if end_date<start_date:
days = (start_date-end_date).days
multiplier = -1
else:
multiplier = 1
days = (end_date-start_date).days
ranges = []
for add_days in range(0,days+1):
ranges.append(
range_days(start_date+timedelta(days=add_days*multiplier), 0, epoch=epoch)
)
return ranges
def to_epoch(dt, return_none=False):
"""Return the number of seconds since the epoch in UTC. Accepts strings in an the following datetime format (YYYY-MM-DD HH:MM) or a datetime object.
:param datetime.datetime dt: Datetime object or string
:param return_none: if `dt` is invalid, return None if this is true, otherwise return 0
:return: the epoch seconds as an `int`
"""
if dt is None:
if return_none:
return None
else:
return 0
if isinstance(dt, datetime.date):
return int(calendar.timegm(dt.timetuple()))
elif isinstance(dt, int):
return dt
elif not isinstance(dt, datetime.datetime):
try:
dt = datetime.datetime.strptime(dt, DATETIME_FORMAT)
except (TypeError, AttributeError, ValueError):
return 0
return int(calendar.timegm(dt.utctimetuple()))
def parse_fb_timestamp(timestamp):
if re.search(r'^\d\d\d\d\-\d\d\-\d\dT\d\d:\d\d:\d\d\+0000$', timestamp):
return timezone.datetime.strptime(timestamp, FB_DATETIME_FORMAT)
else:
raise ValueError("Invalid facebook timestamp [%s]" % timestamp)
def from_epoch(epo):
"""
Because facebook insists on sending dates and times in a different format in the graph API, we are forced to
parse this as well
:param int epo: epoc seconds to turn into a `datetime.datetime` object
:type epo: int
:return: Datetime object
"""
if epo is None:
epo = 0
return pytz.UTC.localize(datetime.datetime.utcfromtimestamp(int(epo)))
def chunks(l, chunk_size):
""" Yield successive n-sized chunks from l.
"""
for i in range(0, len(l), chunk_size):
yield l[i:i+chunk_size]
def get_date_ranges_excluding_gaps(dates, max_days_per_range=30):
"""
given a list of dates, the dates are returned as a list of ranges, with gaps being excluded and ranges being
at most `max_days_per_range` long.
date ranges are given in an exclusive way, meaning including the first date, and UP UNTIL but not including the last date
TODO: make inclusive
the date ranges are themselves arrays [[start_date, end_date], ....]
"""
if len(dates)==0:
return []
previous_date = dates[0]
current_range = [previous_date, previous_date]
ranges = [current_range]
for date in dates:
date_diff = date - previous_date
range_diff = date - current_range[0]
if date_diff.days > 1 or range_diff.days >= max_days_per_range:
current_range = [date, date + datetime.timedelta(days=1)]
ranges.append(current_range)
else:
current_range[1] = date + datetime.timedelta(days=1)
previous_date = date
return ranges
def date_range_chunks(start_date, end_date, chunk_size, last_date_excluded=True):
"""
Given a date range, split it into several chunks being at most
`chunk_size` days long.
"""
ranges = []
date_range_list = list(daterange(start_date, end_date))
range_list = chunks(date_range_list, chunk_size)
for current_range in range_list:
stop_ = current_range[-1]
if last_date_excluded:
stop_+=datetime.timedelta(days=1)
ranges.append([current_range[0], stop_])
return ranges
def get_gap_ranges_from_dates(dates, start_date, end_date, max_days_per_range=30):
"""
given a list of dates, the gaps are returned as an array of date ranges.
The ranges are never longer than `max_days_per_range`.
date ranges are given in an exclusive way, meaning including the first date, and excluding the last date
the date ranges are themselves arrays [[start_date, end_date], ....]
"""
one_day = timedelta(days=1)
start_date = to_date_object(start_date)
end_date = to_date_object(end_date)
corrected_start_date = start_date-one_day
dates = [corrected_start_date, end_date] + list(dates)
dates = sorted(set(dates))
ranges = []
dates = dates[dates.index(corrected_start_date):]
previous_date = dates[0]
for current_date in dates[1:]:
date_diff = current_date - previous_date
if date_diff.days > 1:
current_range = [previous_date+one_day, current_date]
split_ranges = date_range_chunks(current_range[0],
current_range[1],
chunk_size=max_days_per_range,
last_date_excluded=True)
ranges += split_ranges
previous_date = current_date
return ranges
def to_date_object(date_or_datetime_object):
if isinstance(date_or_datetime_object, datetime.datetime):
return date_or_datetime_object.date()
elif isinstance(date_or_datetime_object, datetime.date):
return date_or_datetime_object
else:
raise TypeError("Object passed is not a date or datetime.")
def get_midnight(dt_obj, add_days = 0):
tz = timezone.get_current_timezone()
midnight = timezone.datetime(dt_obj.year, dt_obj.month, dt_obj.day)+timedelta(days=add_days)
return tz.localize(midnight)
def get_working_hours_in_month(year, month, until_date=None, work_hours_per_day=8):
hours = 0
for monthday, weekday in calendar.Calendar(0).itermonthdays2(year=year, month=month):
if monthday==0 or weekday in (5,6):
continue
this_date = datetime.date(year=year, month=month, day=monthday)
if until_date and this_date>=until_date:
break
hours += work_hours_per_day
return hours
def convert_to_pst(date):
pst = pytz.timezone("US/Pacific")
return pst.normalize(date.astimezone(pst))
def midnight_pst(days=0):
"""
Facebook likes midnights in PST, so we oblige.
"""
import datetime
pst = pytz.timezone("US/Pacific")
pacific_time = convert_to_pst(pytz.utc.localize(datetime.datetime.utcnow()))
n = pacific_time+timedelta(days=days)
return pst.localize(datetime.datetime(n.year,n.month,n.day))
def midnight(days=0):
n = timezone.datetime.utcnow()+timedelta(days=days)
return timezone.datetime(n.year,n.month,n.day)
def remote_date_str_to_date(strdatetime):
try:
return timezone.datetime.strptime(strdatetime, DATETIME_FORMAT)
except (TypeError, ValueError):
try:
return timezone.datetime.strptime(strdatetime, '%Y-%m-%dT%H:%M:%S')
except (TypeError, ValueError):
return timezone.datetime(1970,1,1)
def remote_stop_datetime_str_to_time(strdatetime):
time = remote_datetime_str_to_time(strdatetime)
if time == datetime.time(hour=0, minute=0):
time = datetime.time(hour=23, minute=59)
else:
time = (timezone.datetime(year=1970, month=1, day=1, hour=time.hour, minute=time.minute) - timedelta(minutes=1)).time()
return time
def remote_datetime_str_to_time(strdatetime):
return timezone.datetime.strptime(strdatetime, DATETIME_FORMAT + ':%S').time()
def round_off(date_obj, round_to = 15):
"""
round the given datetime object to the nearest whole minute.
:param date_obj: A datetime object.
:param round_to: Nearest number of minutes to round to. Default is 15.
:return: The resulting datetime object.
"""
date_obj += timedelta(minutes=int(round(round_to/2)))
date_obj -= timedelta(minutes=date_obj.minute % round_to,
seconds=date_obj.second,
microseconds=date_obj.microsecond)
return date_obj
def pretty_duration(sec):
is_negative = sec<0
sec = math.fabs(sec)
minutes, seconds = divmod(sec, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
days, hours, minutes, seconds = [int(d) for d in [days, hours, minutes, seconds]]
return {
'is_negative':is_negative,
'days':days,
'hours':hours,
'minutes':minutes,
'seconds':seconds
}
| [
"datetime.datetime",
"pytz.timezone",
"django.utils.timezone.datetime.utcnow",
"calendar.Calendar",
"datetime.time",
"datetime.datetime.utcnow",
"datetime.datetime.strptime",
"django.utils.timezone.get_current_timezone",
"django.utils.timezone.datetime",
"django.utils.timezone.localize",
"django... | [((1637, 1705), 'django.utils.timezone.datetime', 'timezone.datetime', (['start_date.year', 'start_date.month', 'start_date.day'], {}), '(start_date.year, start_date.month, start_date.day)\n', (1654, 1705), False, 'from django.utils import timezone\n'), ((2383, 2397), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2395, 2397), False, 'from django.utils import timezone\n'), ((6093, 6111), 'builtins.range', 'range', (['(0)', '(days + 1)'], {}), '(0, days + 1)\n', (6098, 6111), False, 'from builtins import range\n'), ((7194, 7282), 're.search', 're.search', (['"""^\\\\d\\\\d\\\\d\\\\d\\\\-\\\\d\\\\d\\\\-\\\\d\\\\dT\\\\d\\\\d:\\\\d\\\\d:\\\\d\\\\d\\\\+0000$"""', 'timestamp'], {}), "('^\\\\d\\\\d\\\\d\\\\d\\\\-\\\\d\\\\d\\\\-\\\\d\\\\dT\\\\d\\\\d:\\\\d\\\\d:\\\\d\\\\d\\\\+0000$',\n timestamp)\n", (7203, 7282), False, 'import re\n'), ((9995, 10012), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (10004, 10012), False, 'from datetime import timedelta\n'), ((11264, 11295), 'django.utils.timezone.get_current_timezone', 'timezone.get_current_timezone', ([], {}), '()\n', (11293, 11295), False, 'from django.utils import timezone\n'), ((11909, 11936), 'pytz.timezone', 'pytz.timezone', (['"""US/Pacific"""'], {}), "('US/Pacific')\n", (11922, 11936), False, 'import pytz\n'), ((12108, 12135), 'pytz.timezone', 'pytz.timezone', (['"""US/Pacific"""'], {}), "('US/Pacific')\n", (12121, 12135), False, 'import pytz\n'), ((12414, 12455), 'django.utils.timezone.datetime', 'timezone.datetime', (['n.year', 'n.month', 'n.day'], {}), '(n.year, n.month, n.day)\n', (12431, 12455), False, 'from django.utils import timezone\n'), ((13641, 13750), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(date_obj.minute % round_to)', 'seconds': 'date_obj.second', 'microseconds': 'date_obj.microsecond'}), '(minutes=date_obj.minute % round_to, seconds=date_obj.second,\n microseconds=date_obj.microsecond)\n', (13650, 13750), False, 'from datetime import timedelta\n'), ((13886, 13900), 'math.fabs', 'math.fabs', (['sec'], {}), '(sec)\n', (13895, 13900), False, 'import math\n'), ((816, 839), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (837, 839), False, 'import datetime\n'), ((952, 975), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (973, 975), False, 'import datetime\n'), ((976, 1020), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(current_weekday - 1)'}), '(days=current_weekday - 1)\n', (994, 1020), False, 'import datetime\n'), ((1351, 1377), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (1369, 1377), False, 'import datetime\n'), ((1722, 1757), 'datetime.timedelta', 'timedelta', ([], {'hours': '(24 * duration_days)'}), '(hours=24 * duration_days)\n', (1731, 1757), False, 'from datetime import timedelta\n'), ((2149, 2163), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2161, 2163), False, 'from django.utils import timezone\n'), ((2818, 2846), 'django.utils.timezone.localize', 'timezone.localize', (['dt_object'], {}), '(dt_object)\n', (2835, 2846), False, 'from django.utils import timezone\n'), ((3997, 4023), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4015, 4023), False, 'import datetime\n'), ((7279, 7336), 'django.utils.timezone.datetime.strptime', 'timezone.datetime.strptime', (['timestamp', 'FB_DATETIME_FORMAT'], {}), '(timestamp, FB_DATETIME_FORMAT)\n', (7305, 7336), False, 'from django.utils import timezone\n'), ((11311, 11367), 'django.utils.timezone.datetime', 'timezone.datetime', (['dt_obj.year', 'dt_obj.month', 'dt_obj.day'], {}), '(dt_obj.year, dt_obj.month, dt_obj.day)\n', (11328, 11367), False, 'from django.utils import timezone\n'), ((11368, 11392), 'datetime.timedelta', 'timedelta', ([], {'days': 'add_days'}), '(days=add_days)\n', (11377, 11392), False, 'from datetime import timedelta\n'), ((11700, 11751), 'datetime.date', 'datetime.date', ([], {'year': 'year', 'month': 'month', 'day': 'monthday'}), '(year=year, month=month, day=monthday)\n', (11713, 11751), False, 'import datetime\n'), ((12238, 12258), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (12247, 12258), False, 'from datetime import timedelta\n'), ((12283, 12324), 'datetime.datetime', 'datetime.datetime', (['n.year', 'n.month', 'n.day'], {}), '(n.year, n.month, n.day)\n', (12300, 12324), False, 'import datetime\n'), ((12355, 12381), 'django.utils.timezone.datetime.utcnow', 'timezone.datetime.utcnow', ([], {}), '()\n', (12379, 12381), False, 'from django.utils import timezone\n'), ((12382, 12402), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (12391, 12402), False, 'from datetime import timedelta\n'), ((12521, 12577), 'django.utils.timezone.datetime.strptime', 'timezone.datetime.strptime', (['strdatetime', 'DATETIME_FORMAT'], {}), '(strdatetime, DATETIME_FORMAT)\n', (12547, 12577), False, 'from django.utils import timezone\n'), ((12915, 12946), 'datetime.time', 'datetime.time', ([], {'hour': '(0)', 'minute': '(0)'}), '(hour=0, minute=0)\n', (12928, 12946), False, 'import datetime\n'), ((12963, 12996), 'datetime.time', 'datetime.time', ([], {'hour': '(23)', 'minute': '(59)'}), '(hour=23, minute=59)\n', (12976, 12996), False, 'import datetime\n'), ((1495, 1531), 'builtins.range', 'range', (['(0)', '(include_weekend and 7 or 5)'], {}), '(0, include_weekend and 7 or 5)\n', (1500, 1531), False, 'from builtins import range\n'), ((2622, 2642), 'pytz.timezone', 'pytz.timezone', (['"""UTC"""'], {}), "('UTC')\n", (2635, 2642), False, 'import pytz\n'), ((2886, 2906), 'pytz.timezone', 'pytz.timezone', (['"""UTC"""'], {}), "('UTC')\n", (2899, 2906), False, 'import pytz\n'), ((5157, 5175), 'builtins.range', 'range', (['(1)', '(days + 1)'], {}), '(1, days + 1)\n', (5162, 5175), False, 'from builtins import range\n'), ((5266, 5280), 'builtins.range', 'range', (['(0)', 'days'], {}), '(0, days)\n', (5271, 5280), False, 'from builtins import range\n'), ((5323, 5353), 'datetime.timedelta', 'timedelta', ([], {'days': '(n * multiplier)'}), '(days=n * multiplier)\n', (5332, 5353), False, 'from datetime import timedelta\n'), ((9466, 9492), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (9484, 9492), False, 'import datetime\n'), ((11554, 11574), 'calendar.Calendar', 'calendar.Calendar', (['(0)'], {}), '(0)\n', (11571, 11574), False, 'import calendar\n'), ((12188, 12214), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (12212, 12214), False, 'import datetime\n'), ((13212, 13276), 'django.utils.timezone.datetime.strptime', 'timezone.datetime.strptime', (['strdatetime', "(DATETIME_FORMAT + ':%S')"], {}), "(strdatetime, DATETIME_FORMAT + ':%S')\n", (13238, 13276), False, 'from django.utils import timezone\n'), ((697, 720), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (718, 720), False, 'import datetime\n'), ((1458, 1484), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'd'}), '(days=d)\n', (1476, 1484), False, 'import datetime\n'), ((8945, 8971), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (8963, 8971), False, 'import datetime\n'), ((12646, 12706), 'django.utils.timezone.datetime.strptime', 'timezone.datetime.strptime', (['strdatetime', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(strdatetime, '%Y-%m-%dT%H:%M:%S')\n", (12672, 12706), False, 'from django.utils import timezone\n'), ((6167, 6204), 'datetime.timedelta', 'timedelta', ([], {'days': '(add_days * multiplier)'}), '(days=add_days * multiplier)\n', (6176, 6204), False, 'from datetime import timedelta\n'), ((6974, 7021), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['dt', 'DATETIME_FORMAT'], {}), '(dt, DATETIME_FORMAT)\n', (7000, 7021), False, 'import datetime\n'), ((8824, 8850), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (8842, 8850), False, 'import datetime\n'), ((12766, 12795), 'django.utils.timezone.datetime', 'timezone.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (12783, 12795), False, 'from django.utils import timezone\n'), ((13023, 13108), 'django.utils.timezone.datetime', 'timezone.datetime', ([], {'year': '(1970)', 'month': '(1)', 'day': '(1)', 'hour': 'time.hour', 'minute': 'time.minute'}), '(year=1970, month=1, day=1, hour=time.hour, minute=time.minute\n )\n', (13040, 13108), False, 'from django.utils import timezone\n'), ((13106, 13126), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (13115, 13126), False, 'from datetime import timedelta\n')] |
import os
import isort
paths = [os.getcwd(), os.path.join(os.getcwd(), 'webapp')]
for path in paths:
with os.scandir(path) as loe:
for entry in loe:
if entry.is_file() and entry.name.endswith('.py'):
print(entry.name)
isort.file(os.path.join(path, entry.name))
| [
"os.scandir",
"os.path.join",
"os.getcwd"
] | [((34, 45), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (43, 45), False, 'import os\n'), ((60, 71), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (69, 71), False, 'import os\n'), ((113, 129), 'os.scandir', 'os.scandir', (['path'], {}), '(path)\n', (123, 129), False, 'import os\n'), ((288, 318), 'os.path.join', 'os.path.join', (['path', 'entry.name'], {}), '(path, entry.name)\n', (300, 318), False, 'import os\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import PassAutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
class TestFcFusePass(PassAutoScanTest):
"""
x_var y_var(persistable)
\ /
mul bias_var(persistable)
|
mul_out_var bias_var(persistable)
\ /
elementwise_add
"""
def sample_predictor_configs(self, program_config):
# cpu
before_num_ops = len(program_config.ops) + 2
config = self.create_inference_config(use_gpu=False)
yield config, ["fc"], (1e-5, 1e-5)
# for gpu
config = self.create_inference_config(use_gpu=True)
yield config, ["fc"], (1e-5, 1e-5)
# trt static_shape
config = self.create_trt_inference_config()
config.enable_tensorrt_engine(
max_batch_size=8,
workspace_size=102400,
min_subgraph_size=0,
precision_mode=paddle_infer.PrecisionType.Float32,
use_static=False,
use_calib_mode=False)
yield config, ['fc'], (1e-5, 1e-5)
def add_ignore_pass_case(self):
# Here we put some skip rules to avoid known bugs
def teller1(program_config, predictor_config):
# shape of bias should be [1, mul_y_shape[-1]] or [mul_y_shape[-1]]
x_shape = list(program_config.inputs["mul_x"].shape)
y_shape = list(program_config.weights["mul_y"].shape)
bias_shape = program_config.weights["bias"].shape
bias_shape = list(program_config.weights["bias"].shape)
if predictor_config.tensorrt_engine_enabled():
# TensorRT cann't handle all the situation of elementwise_add
# disable it until this problem fixed
predictor_config.exp_disable_tensorrt_ops(["elementwise_add"])
if bias_shape != [y_shape[-1]] and bias_shape != [1, y_shape[-1]]:
return True
return False
def teller2(program_config, predictor_config):
# TODO fuse has bug while axis != -1
axis = program_config.ops[1].attrs["axis"]
if axis != -1 and axis != program_config.ops[0].attrs[
"x_num_col_dims"]:
return True
return False
self.add_ignore_check_case(
teller1,
IgnoreReasons.PASS_ACCURACY_ERROR,
"The pass output has diff while shape of bias is not [out_size] or [1, out_size].",
)
self.add_ignore_check_case(
teller2,
IgnoreReasons.PASS_ACCURACY_ERROR,
"The pass output has diff while axis of elementwise_add is not -1.",
)
def is_program_valid(self, prog_config):
add_x_rank = prog_config.ops[0].attrs["x_num_col_dims"] + 1
add_y_rank = len(prog_config.weights["bias"].shape)
axis = prog_config.ops[1].attrs["axis"]
if add_x_rank == add_y_rank:
if axis != -1 or axis != 0:
return False
return True
def sample_program_config(self, draw):
# 1. Generate shape of input:X of mul
x_shape = draw(
st.lists(st.integers(min_value=1, max_value=4),
min_size=2,
max_size=4))
# 2. Generate attr:x_num_col_dims/y_num_col_dims of mul
x_num_col_dims = draw(
st.integers(min_value=1, max_value=len(x_shape) - 1))
y_num_col_dims = 1
# 3. Generate legal shape of input:Y of mul
y_shape = draw(
st.lists(st.integers(min_value=1, max_value=8),
min_size=2,
max_size=2))
y_shape[0] = int(np.prod(x_shape[x_num_col_dims:]))
# 4. Generate legal attr:axis of elementwise_add
mul_out_shape = x_shape[:x_num_col_dims] + y_shape[1:]
axis = draw(st.integers(min_value=-1, max_value=x_num_col_dims))
# 5. Generate legal shape of input:Y of elementwise_add
if axis >= 0:
max_bias_rank = x_num_col_dims + 1 - axis
bias_rank = draw(st.integers(min_value=1, max_value=max_bias_rank))
bias_shape = mul_out_shape[axis:axis + bias_rank]
else:
max_bias_rank = 1
bias_rank = draw(
st.integers(min_value=1, max_value=len(mul_out_shape)))
bias_shape = mul_out_shape[-1 * bias_rank:]
# 6. Random choose if use broadcast for elementwise_add, e.g [3, 4] -> [1, 4]
if draw(st.booleans()):
broadcast_dims = draw(st.integers(min_value=1, max_value=bias_rank))
for i in range(0, broadcast_dims):
bias_shape[i] = 1
# 7. Random choose if add a relu operator
has_relu = draw(st.booleans())
# Now we have all the decided parameters to compose a program
# shape of inputs/weights tensors: x_shape, y_shape, bias_shape...
# parameters of operators: x_num_col_dims, y_num_col_dims, axis...
# a random boolean value(has_relu) to decide if program include a relu op
# Here we will compose a program
# Still has some risks that the program is invalid or cause bug while running
# Use function `is_program_valid` to filter the invalid programs before running
# Use function `add_skip_pass_case` to ignore the programs even if they cause bug while runing
mul_op = OpConfig(
"mul",
inputs={
"X": ["mul_x"],
"Y": ["mul_y"]
},
outputs={"Out": ["mul_out"]},
x_num_col_dims=x_num_col_dims,
y_num_col_dims=y_num_col_dims,
)
add_op = OpConfig(
"elementwise_add",
inputs={
"X": ["mul_out"],
"Y": ["bias"]
},
outputs={"Out": ["add_out"]},
axis=axis,
)
ops = [mul_op, add_op]
if has_relu:
relu_op = OpConfig("relu",
inputs={"X": ["add_out"]},
outputs={"Out": ["relu_out"]})
ops.append(relu_op)
program_config = ProgramConfig(
ops=ops,
weights={
"mul_y": TensorConfig(shape=y_shape),
"bias": TensorConfig(shape=bias_shape),
},
inputs={
"mul_x": TensorConfig(shape=x_shape),
},
outputs=ops[-1].outputs["Out"],
)
return program_config
def test(self):
self.run_and_statis(quant=False,
max_examples=500,
passes=["fc_fuse_pass"])
if __name__ == "__main__":
unittest.main()
| [
"numpy.prod",
"hypothesis.strategies.integers",
"program_config.TensorConfig",
"hypothesis.strategies.booleans",
"unittest.main",
"program_config.OpConfig"
] | [((7681, 7696), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7694, 7696), False, 'import unittest\n'), ((6363, 6516), 'program_config.OpConfig', 'OpConfig', (['"""mul"""'], {'inputs': "{'X': ['mul_x'], 'Y': ['mul_y']}", 'outputs': "{'Out': ['mul_out']}", 'x_num_col_dims': 'x_num_col_dims', 'y_num_col_dims': 'y_num_col_dims'}), "('mul', inputs={'X': ['mul_x'], 'Y': ['mul_y']}, outputs={'Out': [\n 'mul_out']}, x_num_col_dims=x_num_col_dims, y_num_col_dims=y_num_col_dims)\n", (6371, 6516), False, 'from program_config import TensorConfig, ProgramConfig, OpConfig\n'), ((6646, 6760), 'program_config.OpConfig', 'OpConfig', (['"""elementwise_add"""'], {'inputs': "{'X': ['mul_out'], 'Y': ['bias']}", 'outputs': "{'Out': ['add_out']}", 'axis': 'axis'}), "('elementwise_add', inputs={'X': ['mul_out'], 'Y': ['bias']},\n outputs={'Out': ['add_out']}, axis=axis)\n", (6654, 6760), False, 'from program_config import TensorConfig, ProgramConfig, OpConfig\n'), ((4643, 4676), 'numpy.prod', 'np.prod', (['x_shape[x_num_col_dims:]'], {}), '(x_shape[x_num_col_dims:])\n', (4650, 4676), True, 'import numpy as np\n'), ((4818, 4869), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(-1)', 'max_value': 'x_num_col_dims'}), '(min_value=-1, max_value=x_num_col_dims)\n', (4829, 4869), True, 'import hypothesis.strategies as st\n'), ((5457, 5470), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (5468, 5470), True, 'import hypothesis.strategies as st\n'), ((5709, 5722), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (5720, 5722), True, 'import hypothesis.strategies as st\n'), ((6936, 7010), 'program_config.OpConfig', 'OpConfig', (['"""relu"""'], {'inputs': "{'X': ['add_out']}", 'outputs': "{'Out': ['relu_out']}"}), "('relu', inputs={'X': ['add_out']}, outputs={'Out': ['relu_out']})\n", (6944, 7010), False, 'from program_config import TensorConfig, ProgramConfig, OpConfig\n'), ((4121, 4158), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(4)'}), '(min_value=1, max_value=4)\n', (4132, 4158), True, 'import hypothesis.strategies as st\n'), ((4512, 4549), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(8)'}), '(min_value=1, max_value=8)\n', (4523, 4549), True, 'import hypothesis.strategies as st\n'), ((5040, 5089), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': 'max_bias_rank'}), '(min_value=1, max_value=max_bias_rank)\n', (5051, 5089), True, 'import hypothesis.strategies as st\n'), ((5507, 5552), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': 'bias_rank'}), '(min_value=1, max_value=bias_rank)\n', (5518, 5552), True, 'import hypothesis.strategies as st\n'), ((7213, 7240), 'program_config.TensorConfig', 'TensorConfig', ([], {'shape': 'y_shape'}), '(shape=y_shape)\n', (7225, 7240), False, 'from program_config import TensorConfig, ProgramConfig, OpConfig\n'), ((7266, 7296), 'program_config.TensorConfig', 'TensorConfig', ([], {'shape': 'bias_shape'}), '(shape=bias_shape)\n', (7278, 7296), False, 'from program_config import TensorConfig, ProgramConfig, OpConfig\n'), ((7359, 7386), 'program_config.TensorConfig', 'TensorConfig', ([], {'shape': 'x_shape'}), '(shape=x_shape)\n', (7371, 7386), False, 'from program_config import TensorConfig, ProgramConfig, OpConfig\n')] |
import triple_walk_native
def walk_triples(triples_indexed, relation_tail_index,target_nodes, walk_length,padding_idx,seed,restart=True):
return triple_walk_native.walk_triples(triples_indexed,
relation_tail_index,
target_nodes,
walk_length,
padding_idx,
restart,
seed
)
def to_windows_cbow(walks, window_size, num_nodes,seed):
return triple_walk_native.to_windows_cbow(walks, window_size, num_nodes,seed)
def to_windows_triples_sg(walks, window_size, num_nodes,padding_idx,triples,seed):
return triple_walk_native.to_windows_triples(walks, window_size,num_nodes,padding_idx,triples,seed)
def to_windows_triples_cbow(walks, window_size, num_nodes,padding_idx,triples,seed):
return triple_walk_native.to_windows_triples_cbow(walks, window_size,num_nodes,padding_idx,triples,seed)
| [
"triple_walk_native.walk_triples",
"triple_walk_native.to_windows_triples",
"triple_walk_native.to_windows_triples_cbow",
"triple_walk_native.to_windows_cbow"
] | [((150, 278), 'triple_walk_native.walk_triples', 'triple_walk_native.walk_triples', (['triples_indexed', 'relation_tail_index', 'target_nodes', 'walk_length', 'padding_idx', 'restart', 'seed'], {}), '(triples_indexed, relation_tail_index,\n target_nodes, walk_length, padding_idx, restart, seed)\n', (181, 278), False, 'import triple_walk_native\n'), ((665, 736), 'triple_walk_native.to_windows_cbow', 'triple_walk_native.to_windows_cbow', (['walks', 'window_size', 'num_nodes', 'seed'], {}), '(walks, window_size, num_nodes, seed)\n', (699, 736), False, 'import triple_walk_native\n'), ((831, 931), 'triple_walk_native.to_windows_triples', 'triple_walk_native.to_windows_triples', (['walks', 'window_size', 'num_nodes', 'padding_idx', 'triples', 'seed'], {}), '(walks, window_size, num_nodes,\n padding_idx, triples, seed)\n', (868, 931), False, 'import triple_walk_native\n'), ((1021, 1126), 'triple_walk_native.to_windows_triples_cbow', 'triple_walk_native.to_windows_triples_cbow', (['walks', 'window_size', 'num_nodes', 'padding_idx', 'triples', 'seed'], {}), '(walks, window_size, num_nodes,\n padding_idx, triples, seed)\n', (1063, 1126), False, 'import triple_walk_native\n')] |
from django.shortcuts import render
from .models import Question
from .forms import SignUpForm
# Create your views here.
def home(request):
'''
Question 출력
'''
question_list = Question.objects.order_by('-create_date') # create_date 의 역순(-)
context = {'question_list':question_list}
return render(request, 'home/index.html', context)
def signup(request):
if request.method == 'POST':
signup_form = SignUpForm(request.POST)
if signup_form.is_valid():
user_instance = signup_form.save(commit=False)
user_instance.set_password(signup_form.cleaned_data.get['password'])
user_instance.save()
return render(request, 'accounts/signup_complete.html', {'username':user_instance.username})
else:
signup_form = SignUpForm()
return render(request, 'accounts/signup.html', {'form':signup_form}) | [
"django.shortcuts.render"
] | [((318, 361), 'django.shortcuts.render', 'render', (['request', '"""home/index.html"""', 'context'], {}), "(request, 'home/index.html', context)\n", (324, 361), False, 'from django.shortcuts import render\n'), ((844, 906), 'django.shortcuts.render', 'render', (['request', '"""accounts/signup.html"""', "{'form': signup_form}"], {}), "(request, 'accounts/signup.html', {'form': signup_form})\n", (850, 906), False, 'from django.shortcuts import render\n'), ((693, 784), 'django.shortcuts.render', 'render', (['request', '"""accounts/signup_complete.html"""', "{'username': user_instance.username}"], {}), "(request, 'accounts/signup_complete.html', {'username': user_instance\n .username})\n", (699, 784), False, 'from django.shortcuts import render\n')] |
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Chicago Taxi example using TFX DSL on Kubeflow with Google Cloud services."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
from typing import Dict, List, Optional, Text
from absl import app
from absl import flags
import tensorflow_model_analysis as tfma
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.extensions.google_cloud_ai_platform.pusher import executor as ai_platform_pusher_executor
from tfx.extensions.google_cloud_ai_platform.trainer import executor as ai_platform_trainer_executor
from tfx.extensions.google_cloud_big_query.example_gen import component as big_query_example_gen_component
from tfx.orchestration import data_types
from tfx.orchestration import pipeline
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
FLAGS = flags.FLAGS
flags.DEFINE_bool('distributed_training', False,
'If True, enable distributed training.')
_pipeline_name = 'chicago_taxi_pipeline_kubeflow_gcp'
# Directory and data locations (uses Google Cloud Storage).
_input_bucket = 'gs://my-bucket'
_output_bucket = 'gs://my-bucket'
_tfx_root = os.path.join(_output_bucket, 'tfx')
_pipeline_root = os.path.join(_tfx_root, _pipeline_name)
# Google Cloud Platform project id to use when deploying this pipeline.
_project_id = 'my-gcp-project'
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
# Copy this from the current directory to a GCS bucket and update the location
# below.
_module_file = os.path.join(_input_bucket, 'taxi_utils.py')
# Region to use for Dataflow jobs and AI Platform jobs.
# Dataflow: https://cloud.google.com/dataflow/docs/concepts/regional-endpoints
# AI Platform: https://cloud.google.com/ml-engine/docs/tensorflow/regions
_gcp_region = 'us-central1'
# A dict which contains the training job parameters to be passed to Google
# Cloud AI Platform. For the full set of parameters supported by Google Cloud AI
# Platform, refer to
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job
_ai_platform_training_args = {
'project': _project_id,
'region': _gcp_region,
# Starting from TFX 0.14, training on AI Platform uses custom containers:
# https://cloud.google.com/ml-engine/docs/containers-overview
# You can specify a custom container here. If not specified, TFX will use a
# a public container image matching the installed version of TFX.
# 'masterConfig': { 'imageUri': 'gcr.io/my-project/my-container' },
# Note that if you do specify a custom container, ensure the entrypoint
# calls into TFX's run_executor script (tfx/scripts/run_executor.py)
}
# A dict which contains the serving job parameters to be passed to Google
# Cloud AI Platform. For the full set of parameters supported by Google Cloud AI
# Platform, refer to
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.models
_ai_platform_serving_args = {
'model_name': 'chicago_taxi',
'project_id': _project_id,
# The region to use when serving the model. See available regions here:
# https://cloud.google.com/ml-engine/docs/regions
# Note that serving currently only supports a single region:
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.models#Model
'regions': [_gcp_region],
}
def create_pipeline(
pipeline_name: Text,
pipeline_root: Text,
module_file: Text,
ai_platform_training_args: Dict[Text, Text],
ai_platform_serving_args: Dict[Text, Text],
beam_pipeline_args: Optional[List[Text]] = None) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX and Kubeflow Pipelines.
Args:
pipeline_name: name of the TFX pipeline being created.
pipeline_root: root directory of the pipeline. Should be a valid GCS path.
module_file: uri of the module files used in Trainer and Transform
components.
ai_platform_training_args: Args of CAIP training job. Please refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job
for detailed description.
ai_platform_serving_args: Args of CAIP model deployment. Please refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.models
for detailed description.
beam_pipeline_args: Optional list of beam pipeline options. Please refer to
https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#setting-other-cloud-dataflow-pipeline-options.
When this argument is not provided, the default is to use GCP
DataflowRunner with 50GB disk size as specified in this function. If an
empty list is passed in, default specified by Beam will be used, which can
be found at
https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#setting-other-cloud-dataflow-pipeline-options
Returns:
A TFX pipeline object.
"""
# The rate at which to sample rows from the Taxi dataset using BigQuery.
# The full taxi dataset is > 200M record. In the interest of resource
# savings and time, we've set the default for this example to be much smaller.
# Feel free to crank it up and process the full dataset!
# By default it generates a 0.1% random sample.
query_sample_rate = data_types.RuntimeParameter(
name='query_sample_rate', ptype=float, default=0.001)
# This is the upper bound of FARM_FINGERPRINT in Bigquery (ie the max value of
# signed int64).
max_int64 = '0x7FFFFFFFFFFFFFFF'
# The query that extracts the examples from BigQuery. The Chicago Taxi dataset
# used for this example is a public dataset available on Google AI Platform.
# https://console.cloud.google.com/marketplace/details/city-of-chicago-public-data/chicago-taxi-trips
query = """
SELECT
pickup_community_area,
fare,
EXTRACT(MONTH FROM trip_start_timestamp) AS trip_start_month,
EXTRACT(HOUR FROM trip_start_timestamp) AS trip_start_hour,
EXTRACT(DAYOFWEEK FROM trip_start_timestamp) AS trip_start_day,
UNIX_SECONDS(trip_start_timestamp) AS trip_start_timestamp,
pickup_latitude,
pickup_longitude,
dropoff_latitude,
dropoff_longitude,
trip_miles,
pickup_census_tract,
dropoff_census_tract,
payment_type,
company,
trip_seconds,
dropoff_community_area,
tips
FROM `bigquery-public-data.chicago_taxi_trips.taxi_trips`
WHERE (ABS(FARM_FINGERPRINT(unique_key)) / {max_int64})
< {query_sample_rate}""".format(
max_int64=max_int64, query_sample_rate=str(query_sample_rate))
# Beam args to run data processing on DataflowRunner.
#
# TODO(b/151114974): Remove `disk_size_gb` flag after default is increased.
# TODO(b/151116587): Remove `shuffle_mode` flag after default is changed.
# TODO(b/156874687): Remove `machine_type` after IP addresses are no longer a
# scaling bottleneck.
if beam_pipeline_args is None:
beam_pipeline_args = [
'--runner=DataflowRunner',
'--project=' + _project_id,
'--temp_location=' + os.path.join(_output_bucket, 'tmp'),
'--region=' + _gcp_region,
# Temporary overrides of defaults.
'--disk_size_gb=50',
'--experiments=shuffle_mode=auto',
'--machine_type=n1-standard-8',
]
# Number of epochs in training.
train_steps = data_types.RuntimeParameter(
name='train_steps',
default=10000,
ptype=int,
)
# Number of epochs in evaluation.
eval_steps = data_types.RuntimeParameter(
name='eval_steps',
default=5000,
ptype=int,
)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = big_query_example_gen_component.BigQueryExampleGen(query=query)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Update ai_platform_training_args if distributed training was enabled.
# Number of worker machines used in distributed training.
worker_count = data_types.RuntimeParameter(
name='worker_count',
default=2,
ptype=int,
)
# Type of worker machines used in distributed training.
worker_type = data_types.RuntimeParameter(
name='worker_type',
default='standard',
ptype=str,
)
local_training_args = copy.deepcopy(ai_platform_training_args)
if FLAGS.distributed_training:
local_training_args.update({
# You can specify the machine types, the number of replicas for workers
# and parameter servers.
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#ScaleTier
'scaleTier': 'CUSTOM',
'masterType': 'large_model',
'workerType': worker_type,
'parameterServerType': 'standard',
'workerCount': worker_count,
'parameterServerCount': 1
})
# Uses user-provided Python function that implements a model using TF-Learn
# to train a model on Google Cloud AI Platform.
trainer = Trainer(
custom_executor_spec=executor_spec.ExecutorClassSpec(
ai_platform_trainer_executor.Executor),
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args={'num_steps': train_steps},
eval_args={'num_steps': eval_steps},
custom_config={
ai_platform_trainer_executor.TRAINING_ARGS_KEY:
local_training_args
})
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute a evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
# Change threshold will be ignored if there is no baseline (first run).
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to Google Cloud AI Platform if check passed.
# TODO(b/162451308): Add pusher back to components list once AIP Prediction
# Service supports TF>=2.3.
_ = Pusher(
custom_executor_spec=executor_spec.ExecutorClassSpec(
ai_platform_pusher_executor.Executor),
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
custom_config={
ai_platform_pusher_executor.SERVING_ARGS_KEY: ai_platform_serving_args
})
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, model_resolver, evaluator
],
beam_pipeline_args=beam_pipeline_args,
)
def main(unused_argv):
# Metadata config. The defaults works work with the installation of
# KF Pipelines using Kubeflow. If installing KF Pipelines using the
# lightweight deployment option, you may need to override the defaults.
metadata_config = kubeflow_dag_runner.get_default_kubeflow_metadata_config()
# This pipeline automatically injects the Kubeflow TFX image if the
# environment variable 'KUBEFLOW_TFX_IMAGE' is defined. Currently, the tfx
# cli tool exports the environment variable to pass to the pipelines.
tfx_image = os.environ.get('KUBEFLOW_TFX_IMAGE', None)
runner_config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
kubeflow_metadata_config=metadata_config,
# Specify custom docker image to use.
tfx_image=tfx_image)
kubeflow_dag_runner.KubeflowDagRunner(config=runner_config).run(
create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
module_file=_module_file,
ai_platform_training_args=_ai_platform_training_args,
ai_platform_serving_args=_ai_platform_serving_args,
))
if __name__ == '__main__':
app.run(main)
| [
"tfx.orchestration.pipeline.Pipeline",
"tfx.components.Transform",
"tfx.orchestration.data_types.RuntimeParameter",
"tensorflow_model_analysis.GenericValueThreshold",
"copy.deepcopy",
"tfx.components.ExampleValidator",
"tfx.orchestration.kubeflow.kubeflow_dag_runner.KubeflowDagRunnerConfig",
"tfx.comp... | [((1987, 2080), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""distributed_training"""', '(False)', '"""If True, enable distributed training."""'], {}), "('distributed_training', False,\n 'If True, enable distributed training.')\n", (2004, 2080), False, 'from absl import flags\n'), ((2290, 2325), 'os.path.join', 'os.path.join', (['_output_bucket', '"""tfx"""'], {}), "(_output_bucket, 'tfx')\n", (2302, 2325), False, 'import os\n'), ((2343, 2382), 'os.path.join', 'os.path.join', (['_tfx_root', '_pipeline_name'], {}), '(_tfx_root, _pipeline_name)\n', (2355, 2382), False, 'import os\n'), ((2749, 2793), 'os.path.join', 'os.path.join', (['_input_bucket', '"""taxi_utils.py"""'], {}), "(_input_bucket, 'taxi_utils.py')\n", (2761, 2793), False, 'import os\n'), ((6461, 6547), 'tfx.orchestration.data_types.RuntimeParameter', 'data_types.RuntimeParameter', ([], {'name': '"""query_sample_rate"""', 'ptype': 'float', 'default': '(0.001)'}), "(name='query_sample_rate', ptype=float, default=\n 0.001)\n", (6488, 6547), False, 'from tfx.orchestration import data_types\n'), ((8707, 8780), 'tfx.orchestration.data_types.RuntimeParameter', 'data_types.RuntimeParameter', ([], {'name': '"""train_steps"""', 'default': '(10000)', 'ptype': 'int'}), "(name='train_steps', default=10000, ptype=int)\n", (8734, 8780), False, 'from tfx.orchestration import data_types\n'), ((8856, 8927), 'tfx.orchestration.data_types.RuntimeParameter', 'data_types.RuntimeParameter', ([], {'name': '"""eval_steps"""', 'default': '(5000)', 'ptype': 'int'}), "(name='eval_steps', default=5000, ptype=int)\n", (8883, 8927), False, 'from tfx.orchestration import data_types\n'), ((9045, 9108), 'tfx.extensions.google_cloud_big_query.example_gen.component.BigQueryExampleGen', 'big_query_example_gen_component.BigQueryExampleGen', ([], {'query': 'query'}), '(query=query)\n', (9095, 9108), True, 'from tfx.extensions.google_cloud_big_query.example_gen import component as big_query_example_gen_component\n'), ((9205, 9260), 'tfx.components.StatisticsGen', 'StatisticsGen', ([], {'examples': "example_gen.outputs['examples']"}), "(examples=example_gen.outputs['examples'])\n", (9218, 9260), False, 'from tfx.components import StatisticsGen\n'), ((9325, 9414), 'tfx.components.SchemaGen', 'SchemaGen', ([], {'statistics': "statistics_gen.outputs['statistics']", 'infer_feature_shape': '(False)'}), "(statistics=statistics_gen.outputs['statistics'],\n infer_feature_shape=False)\n", (9334, 9414), False, 'from tfx.components import SchemaGen\n'), ((9515, 9622), 'tfx.components.ExampleValidator', 'ExampleValidator', ([], {'statistics': "statistics_gen.outputs['statistics']", 'schema': "schema_gen.outputs['schema']"}), "(statistics=statistics_gen.outputs['statistics'], schema=\n schema_gen.outputs['schema'])\n", (9531, 9622), False, 'from tfx.components import ExampleValidator\n'), ((9724, 9842), 'tfx.components.Transform', 'Transform', ([], {'examples': "example_gen.outputs['examples']", 'schema': "schema_gen.outputs['schema']", 'module_file': 'module_file'}), "(examples=example_gen.outputs['examples'], schema=schema_gen.\n outputs['schema'], module_file=module_file)\n", (9733, 9842), False, 'from tfx.components import Transform\n'), ((10009, 10079), 'tfx.orchestration.data_types.RuntimeParameter', 'data_types.RuntimeParameter', ([], {'name': '"""worker_count"""', 'default': '(2)', 'ptype': 'int'}), "(name='worker_count', default=2, ptype=int)\n", (10036, 10079), False, 'from tfx.orchestration import data_types\n'), ((10178, 10256), 'tfx.orchestration.data_types.RuntimeParameter', 'data_types.RuntimeParameter', ([], {'name': '"""worker_type"""', 'default': '"""standard"""', 'ptype': 'str'}), "(name='worker_type', default='standard', ptype=str)\n", (10205, 10256), False, 'from tfx.orchestration import data_types\n'), ((10305, 10345), 'copy.deepcopy', 'copy.deepcopy', (['ai_platform_training_args'], {}), '(ai_platform_training_args)\n', (10318, 10345), False, 'import copy\n'), ((12724, 12890), 'tfx.components.Evaluator', 'Evaluator', ([], {'examples': "example_gen.outputs['examples']", 'model': "trainer.outputs['model']", 'baseline_model': "model_resolver.outputs['model']", 'eval_config': 'eval_config'}), "(examples=example_gen.outputs['examples'], model=trainer.outputs[\n 'model'], baseline_model=model_resolver.outputs['model'], eval_config=\n eval_config)\n", (12733, 12890), False, 'from tfx.components import Evaluator\n'), ((13556, 13803), 'tfx.orchestration.pipeline.Pipeline', 'pipeline.Pipeline', ([], {'pipeline_name': 'pipeline_name', 'pipeline_root': 'pipeline_root', 'components': '[example_gen, statistics_gen, schema_gen, example_validator, transform,\n trainer, model_resolver, evaluator]', 'beam_pipeline_args': 'beam_pipeline_args'}), '(pipeline_name=pipeline_name, pipeline_root=pipeline_root,\n components=[example_gen, statistics_gen, schema_gen, example_validator,\n transform, trainer, model_resolver, evaluator], beam_pipeline_args=\n beam_pipeline_args)\n', (13573, 13803), False, 'from tfx.orchestration import pipeline\n'), ((14107, 14165), 'tfx.orchestration.kubeflow.kubeflow_dag_runner.get_default_kubeflow_metadata_config', 'kubeflow_dag_runner.get_default_kubeflow_metadata_config', ([], {}), '()\n', (14163, 14165), False, 'from tfx.orchestration.kubeflow import kubeflow_dag_runner\n'), ((14400, 14442), 'os.environ.get', 'os.environ.get', (['"""KUBEFLOW_TFX_IMAGE"""', 'None'], {}), "('KUBEFLOW_TFX_IMAGE', None)\n", (14414, 14442), False, 'import os\n'), ((14462, 14573), 'tfx.orchestration.kubeflow.kubeflow_dag_runner.KubeflowDagRunnerConfig', 'kubeflow_dag_runner.KubeflowDagRunnerConfig', ([], {'kubeflow_metadata_config': 'metadata_config', 'tfx_image': 'tfx_image'}), '(kubeflow_metadata_config=\n metadata_config, tfx_image=tfx_image)\n', (14505, 14573), False, 'from tfx.orchestration.kubeflow import kubeflow_dag_runner\n'), ((14999, 15012), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (15006, 15012), False, 'from absl import app\n'), ((11014, 11084), 'tfx.dsl.components.base.executor_spec.ExecutorClassSpec', 'executor_spec.ExecutorClassSpec', (['ai_platform_trainer_executor.Executor'], {}), '(ai_platform_trainer_executor.Executor)\n', (11045, 11084), False, 'from tfx.dsl.components.base import executor_spec\n'), ((11745, 11764), 'tfx.types.Channel', 'Channel', ([], {'type': 'Model'}), '(type=Model)\n', (11752, 11764), False, 'from tfx.types import Channel\n'), ((11787, 11814), 'tfx.types.Channel', 'Channel', ([], {'type': 'ModelBlessing'}), '(type=ModelBlessing)\n', (11794, 11814), False, 'from tfx.types import Channel\n'), ((13262, 13331), 'tfx.dsl.components.base.executor_spec.ExecutorClassSpec', 'executor_spec.ExecutorClassSpec', (['ai_platform_pusher_executor.Executor'], {}), '(ai_platform_pusher_executor.Executor)\n', (13293, 13331), False, 'from tfx.dsl.components.base import executor_spec\n'), ((14629, 14688), 'tfx.orchestration.kubeflow.kubeflow_dag_runner.KubeflowDagRunner', 'kubeflow_dag_runner.KubeflowDagRunner', ([], {'config': 'runner_config'}), '(config=runner_config)\n', (14666, 14688), False, 'from tfx.orchestration.kubeflow import kubeflow_dag_runner\n'), ((8422, 8457), 'os.path.join', 'os.path.join', (['_output_bucket', '"""tmp"""'], {}), "(_output_bucket, 'tmp')\n", (8434, 8457), False, 'import os\n'), ((12025, 12062), 'tensorflow_model_analysis.ModelSpec', 'tfma.ModelSpec', ([], {'signature_name': '"""eval"""'}), "(signature_name='eval')\n", (12039, 12062), True, 'import tensorflow_model_analysis as tfma\n'), ((12097, 12115), 'tensorflow_model_analysis.SlicingSpec', 'tfma.SlicingSpec', ([], {}), '()\n', (12113, 12115), True, 'import tensorflow_model_analysis as tfma\n'), ((12127, 12177), 'tensorflow_model_analysis.SlicingSpec', 'tfma.SlicingSpec', ([], {'feature_keys': "['trip_start_hour']"}), "(feature_keys=['trip_start_hour'])\n", (12143, 12177), True, 'import tensorflow_model_analysis as tfma\n'), ((12387, 12441), 'tensorflow_model_analysis.GenericValueThreshold', 'tfma.GenericValueThreshold', ([], {'lower_bound': "{'value': 0.6}"}), "(lower_bound={'value': 0.6})\n", (12413, 12441), True, 'import tensorflow_model_analysis as tfma\n'), ((12517, 12625), 'tensorflow_model_analysis.GenericChangeThreshold', 'tfma.GenericChangeThreshold', ([], {'direction': 'tfma.MetricDirection.HIGHER_IS_BETTER', 'absolute': "{'value': -1e-10}"}), "(direction=tfma.MetricDirection.HIGHER_IS_BETTER,\n absolute={'value': -1e-10})\n", (12544, 12625), True, 'import tensorflow_model_analysis as tfma\n')] |
#===============================================================================
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#
# Intel(R) Integrated Performance Primitives Cryptography (Intel(R) IPP Cryptography)
#
import re
import sys
import os
import hashlib
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--header', action='store', required=True, help='Intel IPP Cryptography dispatcher will be generated for fucntions in Header')
parser.add_argument('-o', '--out-directory', action='store', required=True, help='Output folder for generated files')
parser.add_argument('-l', '--cpu-list', action='store', required=True, help='Actual CPU list: semicolon separated string')
parser.add_argument('-c', '--compiler', action='store', required=True, help='Compiler')
args = parser.parse_args()
Header = args.header
OutDir = args.out_directory
cpulist = args.cpu_list.split(';')
compiler = args.compiler
headerID= False ## Header ID define to avoid multiple include like: #if !defined( __IPPCP_H__ )
from gen_disp_common import readNextFunction
HDR= open( Header, 'r' )
h= HDR.readlines()
HDR.close()
## keep filename only
(incdir, Header)= os.path.split(Header)
## original header name to declare external functions as internal for dispatcher
OrgH= Header
isFunctionFound = True
curLine = 0
FunName = ""
FunArg = ""
if(compiler == "GNU" or compiler == "Clang"):
while (isFunctionFound == True):
result = readNextFunction(h, curLine, headerID)
curLine = result['curLine']
FunName = result['FunName']
FunArg = result['FunArg']
isFunctionFound = result['success']
if (isFunctionFound == True):
##################################################
## create dispatcher files ASM
##################################################
ASMDISP= open( os.sep.join([OutDir, "jmp_" + FunName+"_" + hashlib.sha512(FunName.encode('utf-8')).hexdigest()[:8] +".asm"]), 'w' )
for cpu in cpulist:
ASMDISP.write("extern "+cpu+"_"+FunName+":function\n")
ASMDISP.write("extern ippcpJumpIndexForMergedLibs\n")
ASMDISP.write("extern ippcpInit:function\n\n")
ASMDISP.write("""
segment .data
align 4
dd .Lin_{FunName}
.Larraddr_{FunName}:
""".format(FunName=FunName))
for cpu in cpulist:
ASMDISP.write(" dd "+cpu+"_"+FunName+"\n")
ASMDISP.write("""
segment .text
extern _GLOBAL_OFFSET_TABLE_
global {FunName}:function ({FunName}.LEnd{FunName} - {FunName})
.Lin_{FunName}:
{endbr32}
push ebx
mov ebx, eax
call ippcpInit wrt ..plt
pop ebx
align 16
{FunName}:
{endbr32}
call .L1
.L1:
pop eax
add eax, _GLOBAL_OFFSET_TABLE_ + $$ - .L1 wrt ..gotpc
mov edx, [eax + ippcpJumpIndexForMergedLibs wrt ..got]
mov edx, [edx]
jmp dword [eax + edx*4 + .Larraddr_{FunName} wrt ..gotoff]
.LEnd{FunName}:
""".format(FunName=FunName, endbr32='db 0xf3, 0x0f, 0x1e, 0xfb'))
ASMDISP.close()
else:
while (isFunctionFound == True):
result = readNextFunction(h, curLine, headerID)
curLine = result['curLine']
FunName = result['FunName']
FunArg = result['FunArg']
isFunctionFound = result['success']
if (isFunctionFound == True):
##################################################
## create dispatcher files: C file with inline asm
##################################################
DISP= open( os.sep.join([OutDir, "jmp_"+FunName+"_" + hashlib.sha512(FunName.encode('utf-8')).hexdigest()[:8] + ".c"]), 'w' )
DISP.write("""#include "ippcpdefs.h"\n\n""")
DISP.write("typedef void (*IPP_PROC)(void);\n\n")
DISP.write("extern int ippcpJumpIndexForMergedLibs;\n")
DISP.write("extern IPP_STDCALL ippcpInit();\n\n")
DISP.write("extern IppStatus IPP_STDCALL in_"+FunName+FunArg+";\n")
for cpu in cpulist:
DISP.write("extern IppStatus IPP_STDCALL "+cpu+"_"+FunName+FunArg+";\n")
DISP.write("""
__asm( " .data");
__asm( " .align 4");
__asm( "arraddr:");
__asm( " .long in_{FunName}");""".format(FunName=FunName))
size = 4
for cpu in cpulist:
size = size + 4
DISP.write("""\n__asm( " .long {cpu}_{FunName}");""".format(FunName=FunName, cpu=cpu))
DISP.write("""
__asm( " .type arraddr,@object");
__asm( " .size arraddr,{size}");
__asm( " .data");\n""".format(size=size))
DISP.write("""
#undef IPPAPI
#define IPPAPI(type,name,arg) __declspec(naked) void IPP_STDCALL name arg
__declspec(naked) IPP_PROC {FunName}{FunArg}
{{
__asm( ".L0: call .L1");
__asm( ".L1: pop %eax");
__asm( "add $_GLOBAL_OFFSET_TABLE_ - .L1, %eax" );
__asm( "movd ippcpJumpIndexForMergedLibs@GOT(%eax), %xmm0" );
__asm( "movd %xmm0, %edx" );
__asm( "mov (%edx), %edx" );
__asm( "jmp *(arraddr@GOTOFF+4)(%eax,%edx,4)" );
__asm( ".global in_{FunName}" );
__asm( "in_{FunName}:" );
{endbr32}
__asm( "push %ebx" );
__asm( "mov %eax, %ebx" );
__asm( "call ippcpInit@PLT" );
__asm( "pop %ebx" );
__asm( "jmp .L0" );
}};
""".format(FunName=FunName, FunArg=FunArg, endbr32='__asm( ".byte 0xf3, 0x0f, 0x1e, 0xfb" );'))
DISP.close()
| [
"gen_disp_common.readNextFunction",
"argparse.ArgumentParser",
"os.path.split"
] | [((912, 937), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (935, 937), False, 'import argparse\n'), ((1803, 1824), 'os.path.split', 'os.path.split', (['Header'], {}), '(Header)\n', (1816, 1824), False, 'import os\n'), ((2089, 2127), 'gen_disp_common.readNextFunction', 'readNextFunction', (['h', 'curLine', 'headerID'], {}), '(h, curLine, headerID)\n', (2105, 2127), False, 'from gen_disp_common import readNextFunction\n'), ((3904, 3942), 'gen_disp_common.readNextFunction', 'readNextFunction', (['h', 'curLine', 'headerID'], {}), '(h, curLine, headerID)\n', (3920, 3942), False, 'from gen_disp_common import readNextFunction\n')] |
from kickoff.kickoff_training import KickOff, KickOff1v1, KickOffOrange
from math import pi
kickoff_exercises = [
# KickOff('Center Kickoff', car_start_x=0, car_start_y=-4608, car_yaw=(.5 * pi)),
# KickOff('Left Center Kickoff', car_start_x=256, car_start_y=-3840, car_yaw=(.5 * pi)),
# KickOff('Right Center Kickoff', car_start_x=-256, car_start_y=-3840, car_yaw=(.5 * pi)),
KickOff('Left Kickoff', car_start_x=2048, car_start_y=-2560, car_yaw=(.75 * pi)),
# KickOff('Right Kickoff', car_start_x=-2048, car_start_y=-2560, car_yaw=(.25 * pi)),
]
kickoff_orange_exercises = [
# KickOffOrange('Center Kickoff', car_start_x=0, car_start_y=-4608, car_yaw=(.5 * pi)),
# KickOffOrange('Left Center Kickoff', car_start_x=256, car_start_y=-3840, car_yaw=(.5 * pi)),
# KickOffOrange('Right Center Kickoff', car_start_x=-256, car_start_y=-3840, car_yaw=(.5 * pi)),
# KickOffOrange('Left Kickoff', car_start_x=2048, car_start_y=-2560, car_yaw=(.75 * pi)),
KickOffOrange('Right Kickoff', car_start_x=-2048, car_start_y=-2560, car_yaw=(.25 * pi)),
]
kickoff_1v1_exercises = [
# KickOff1v1('Center Kickoff', car_start_x=0, car_start_y=-4608, car_yaw=(.5 * pi)),
# KickOff1v1('Left Center Kickoff', car_start_x=256, car_start_y=-3840, car_yaw=(.5 * pi)),
# KickOff1v1('Right Center Kickoff', car_start_x=-256, car_start_y=-3840, car_yaw=(.5 * pi)),
# KickOff1v1('Left Kickoff', car_start_x=2048, car_start_y=-2560, car_yaw=(.75 * pi)),
KickOff1v1('Right Kickoff', car_start_x=-2048, car_start_y=-2560, car_yaw=(.25 * pi)),
]
| [
"kickoff.kickoff_training.KickOffOrange",
"kickoff.kickoff_training.KickOff1v1",
"kickoff.kickoff_training.KickOff"
] | [((393, 472), 'kickoff.kickoff_training.KickOff', 'KickOff', (['"""Left Kickoff"""'], {'car_start_x': '(2048)', 'car_start_y': '(-2560)', 'car_yaw': '(0.75 * pi)'}), "('Left Kickoff', car_start_x=2048, car_start_y=-2560, car_yaw=0.75 * pi)\n", (400, 472), False, 'from kickoff.kickoff_training import KickOff, KickOff1v1, KickOffOrange\n'), ((987, 1078), 'kickoff.kickoff_training.KickOffOrange', 'KickOffOrange', (['"""Right Kickoff"""'], {'car_start_x': '(-2048)', 'car_start_y': '(-2560)', 'car_yaw': '(0.25 * pi)'}), "('Right Kickoff', car_start_x=-2048, car_start_y=-2560,\n car_yaw=0.25 * pi)\n", (1000, 1078), False, 'from kickoff.kickoff_training import KickOff, KickOff1v1, KickOffOrange\n'), ((1484, 1573), 'kickoff.kickoff_training.KickOff1v1', 'KickOff1v1', (['"""Right Kickoff"""'], {'car_start_x': '(-2048)', 'car_start_y': '(-2560)', 'car_yaw': '(0.25 * pi)'}), "('Right Kickoff', car_start_x=-2048, car_start_y=-2560, car_yaw=\n 0.25 * pi)\n", (1494, 1573), False, 'from kickoff.kickoff_training import KickOff, KickOff1v1, KickOffOrange\n')] |
import re
def parseDeviceId(id):
match = re.search('(#|\\\\)vid_([a-f0-9]{4})&pid_([a-f0-9]{4})(&|#|\\\\)', id, re.IGNORECASE)
return [int(match.group(i), 16) if match else None for i in [2, 3]]
| [
"re.search"
] | [((43, 133), 're.search', 're.search', (['"""(#|\\\\\\\\)vid_([a-f0-9]{4})&pid_([a-f0-9]{4})(&|#|\\\\\\\\)"""', 'id', 're.IGNORECASE'], {}), "('(#|\\\\\\\\)vid_([a-f0-9]{4})&pid_([a-f0-9]{4})(&|#|\\\\\\\\)', id, re.\n IGNORECASE)\n", (52, 133), False, 'import re\n')] |
import cv2
from tkinter import Tk
from tkinter.filedialog import askopenfilename
import numpy as np
import imutils
import threading
def main():
cap = cv2.VideoCapture(vid_path)
status1, previous_frame = cap.read()
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
copy_frame = cv2.cvtColor(previous_frame, cv2.COLOR_BGR2GRAY)
fgbg = cv2.createBackgroundSubtractorMOG2()
hsv = np.zeros_like(previous_frame)
hsv[...,1] = 255
t = 20
red = 30
check_red = 1
start = 0
radiuce_up_limit =60
radiuce_low_limit = 30
i = 0
while(i < total_frames - 1):
ret, frame = cap.read()
i = i + 1
frame1 = frame.copy()
current_frame = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
current_frame = cv2.GaussianBlur(current_frame, (var_blur,var_blur), 0)
# frame differening
frame_diff = cv2.absdiff(current_frame,copy_frame)
ret ,binary_image1 = cv2.threshold(frame_diff,3,255,cv2.THRESH_BINARY)
# Background Subtraction
binary_image3 = fgbg.apply(current_frame)
# combination of two methods
final_binary = cv2.bitwise_and(binary_image3,binary_image1)
lab_val = 255
n_labels, img_labeled, lab_stats, _ = \
cv2.connectedComponentsWithStats(final_binary, connectivity=8,
ltype=cv2.CV_32S)
if check_red == 1:
red = red +10
if red > radiuce_up_limit:
check_red =0
else:
red = red -10
if red == radiuce_low_limit:
check_red =1
if lab_stats[1:, 4].size > 2:
re = lab_stats[1:, 4].argsort()[-2:][::-1] + 1
largest_mask = np.zeros(final_binary.shape, dtype=np.uint8)
largest_mask[img_labeled == re[0]] = lab_val
cnts1 = cv2.findContours(largest_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts1 = cnts1[0] if imutils.is_cv2() else cnts1[1]
X1 = cnts1[0][0]
cX1 = X1[0][0]
cY1 = X1[0][1]
cv2.circle(frame, (cX1, cY1), red, (0, 255, 255), 3)
cv2.putText(frame,'Breathing',(10,40),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,255),1,cv2.LINE_AA)
cv2.imshow('Frame',frame)
else:
t = t+1
if t > 40:
if lab_stats[1:, 4].size > 0 and start == 1:
t = 0
cv2.putText(frame,'Not Breathing',(10,40),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1,cv2.LINE_AA)
cv2.imshow('Frame',frame)
else:
cv2.circle(frame, (cX1, cY1), red, (0, 255, 255), 3)
cv2.putText(frame,'Breathing',(10,40),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,255),1,cv2.LINE_AA)
cv2.imshow('Frame',frame)
previous_frame = current_frame
k = cv2.waitKey(1) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Tk().withdraw()
vid_path = askopenfilename(filetypes =(("Video File", "*.mp4"),("Video File","*.avi"),("Video File", "*.flv"),("All Files","*.*")),
title = "Choose a video.")
no_of_threads = 1
var_blur = 3
thred = []
jobs = []
for i in range(0, no_of_threads):
thred = threading.Thread(target=main)
jobs.append(thred)
for j in jobs:
j.start()
for j in jobs:
j.join()
#
#
#
| [
"cv2.createBackgroundSubtractorMOG2",
"imutils.is_cv2",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.threshold",
"cv2.waitKey",
"tkinter.filedialog.askopenfilename",
"cv2.putText",
"cv2.circle",
"cv2.cvtColor",
"cv2.GaussianBlur",
"cv2.bitwise_and",
"numpy.zeros",
"cv2.connectedComponentsWi... | [((3251, 3404), 'tkinter.filedialog.askopenfilename', 'askopenfilename', ([], {'filetypes': "(('Video File', '*.mp4'), ('Video File', '*.avi'), ('Video File', '*.flv'),\n ('All Files', '*.*'))", 'title': '"""Choose a video."""'}), "(filetypes=(('Video File', '*.mp4'), ('Video File', '*.avi'),\n ('Video File', '*.flv'), ('All Files', '*.*')), title='Choose a video.')\n", (3266, 3404), False, 'from tkinter.filedialog import askopenfilename\n'), ((174, 200), 'cv2.VideoCapture', 'cv2.VideoCapture', (['vid_path'], {}), '(vid_path)\n', (190, 200), False, 'import cv2\n'), ((322, 370), 'cv2.cvtColor', 'cv2.cvtColor', (['previous_frame', 'cv2.COLOR_BGR2GRAY'], {}), '(previous_frame, cv2.COLOR_BGR2GRAY)\n', (334, 370), False, 'import cv2\n'), ((382, 418), 'cv2.createBackgroundSubtractorMOG2', 'cv2.createBackgroundSubtractorMOG2', ([], {}), '()\n', (416, 418), False, 'import cv2\n'), ((429, 458), 'numpy.zeros_like', 'np.zeros_like', (['previous_frame'], {}), '(previous_frame)\n', (442, 458), True, 'import numpy as np\n'), ((3195, 3218), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3216, 3218), False, 'import cv2\n'), ((3523, 3552), 'threading.Thread', 'threading.Thread', ([], {'target': 'main'}), '(target=main)\n', (3539, 3552), False, 'import threading\n'), ((754, 794), 'cv2.cvtColor', 'cv2.cvtColor', (['frame1', 'cv2.COLOR_BGR2GRAY'], {}), '(frame1, cv2.COLOR_BGR2GRAY)\n', (766, 794), False, 'import cv2\n'), ((819, 875), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['current_frame', '(var_blur, var_blur)', '(0)'], {}), '(current_frame, (var_blur, var_blur), 0)\n', (835, 875), False, 'import cv2\n'), ((932, 970), 'cv2.absdiff', 'cv2.absdiff', (['current_frame', 'copy_frame'], {}), '(current_frame, copy_frame)\n', (943, 970), False, 'import cv2\n'), ((1008, 1060), 'cv2.threshold', 'cv2.threshold', (['frame_diff', '(3)', '(255)', 'cv2.THRESH_BINARY'], {}), '(frame_diff, 3, 255, cv2.THRESH_BINARY)\n', (1021, 1060), False, 'import cv2\n'), ((1211, 1256), 'cv2.bitwise_and', 'cv2.bitwise_and', (['binary_image3', 'binary_image1'], {}), '(binary_image3, binary_image1)\n', (1226, 1256), False, 'import cv2\n'), ((1351, 1436), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['final_binary'], {'connectivity': '(8)', 'ltype': 'cv2.CV_32S'}), '(final_binary, connectivity=8, ltype=cv2.CV_32S\n )\n', (1383, 1436), False, 'import cv2\n'), ((3224, 3228), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (3226, 3228), False, 'from tkinter import Tk\n'), ((1910, 1954), 'numpy.zeros', 'np.zeros', (['final_binary.shape'], {'dtype': 'np.uint8'}), '(final_binary.shape, dtype=np.uint8)\n', (1918, 1954), True, 'import numpy as np\n'), ((2308, 2360), 'cv2.circle', 'cv2.circle', (['frame', '(cX1, cY1)', 'red', '(0, 255, 255)', '(3)'], {}), '(frame, (cX1, cY1), red, (0, 255, 255), 3)\n', (2318, 2360), False, 'import cv2\n'), ((2373, 2479), 'cv2.putText', 'cv2.putText', (['frame', '"""Breathing"""', '(10, 40)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 255, 255)', '(1)', 'cv2.LINE_AA'], {}), "(frame, 'Breathing', (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, \n 255, 255), 1, cv2.LINE_AA)\n", (2384, 2479), False, 'import cv2\n'), ((2477, 2503), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (2487, 2503), False, 'import cv2\n'), ((3113, 3127), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3124, 3127), False, 'import cv2\n'), ((2146, 2162), 'imutils.is_cv2', 'imutils.is_cv2', ([], {}), '()\n', (2160, 2162), False, 'import imutils\n'), ((2685, 2792), 'cv2.putText', 'cv2.putText', (['frame', '"""Not Breathing"""', '(10, 40)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(1)', 'cv2.LINE_AA'], {}), "(frame, 'Not Breathing', (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (0, 0, 255), 1, cv2.LINE_AA)\n", (2696, 2792), False, 'import cv2\n'), ((2795, 2821), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (2805, 2821), False, 'import cv2\n'), ((2855, 2907), 'cv2.circle', 'cv2.circle', (['frame', '(cX1, cY1)', 'red', '(0, 255, 255)', '(3)'], {}), '(frame, (cX1, cY1), red, (0, 255, 255), 3)\n', (2865, 2907), False, 'import cv2\n'), ((2924, 3030), 'cv2.putText', 'cv2.putText', (['frame', '"""Breathing"""', '(10, 40)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 255, 255)', '(1)', 'cv2.LINE_AA'], {}), "(frame, 'Breathing', (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, \n 255, 255), 1, cv2.LINE_AA)\n", (2935, 3030), False, 'import cv2\n'), ((3032, 3058), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (3042, 3058), False, 'import cv2\n')] |
from abc import (
ABC,
abstractmethod,
)
from argparse import (
ArgumentError,
ArgumentParser,
Namespace,
_ArgumentGroup,
_SubParsersAction,
)
import asyncio
import logging
from typing import (
Any,
cast,
Iterable,
Tuple,
Type,
)
from async_service import background_asyncio_service
from lahja import EndpointAPI
from eth.abc import AtomicDatabaseAPI
from eth_utils import (
to_tuple,
ValidationError,
)
from p2p.asyncio_utils import create_task, wait_first
from trinity.boot_info import BootInfo
from trinity.config import (
Eth1AppConfig,
)
from trinity.components.builtin.metrics.component import metrics_service_from_args
from trinity.components.builtin.metrics.service.asyncio import AsyncioMetricsService
from trinity.components.builtin.metrics.service.noop import NOOP_METRICS_SERVICE
from trinity.constants import (
NETWORKING_EVENTBUS_ENDPOINT,
SYNC_FULL,
SYNC_LIGHT,
SYNC_BEAM,
)
from trinity.chains.base import AsyncChainAPI
from trinity.components.builtin.syncer.cli import NormalizeCheckpointURI
from trinity.db.eth1.chain import AsyncChainDB
from trinity.db.eth1.header import AsyncHeaderDB
from trinity.extensibility.asyncio import (
AsyncioIsolatedComponent
)
from trinity.nodes.base import (
Node,
)
from trinity.protocol.common.peer import (
BasePeer,
BasePeerPool,
)
from trinity.protocol.eth.peer import (
ETHPeerPool,
)
from trinity.protocol.les.peer import (
LESPeerPool,
)
from trinity.sync.full.service import (
FullChainSyncer,
)
from trinity.sync.beam.service import (
BeamSyncService,
)
from trinity.sync.header.chain import (
HeaderChainSyncer,
)
from trinity.sync.light.chain import (
LightChainSyncer,
)
def add_shared_argument(arg_group: _ArgumentGroup, arg_name: str, **kwargs: Any) -> None:
try:
arg_group.add_argument(arg_name, **kwargs)
except ArgumentError as err:
if f"conflicting option string: {arg_name}" in str(err):
# --arg_name is used for multiple strategies but only one of them can
# add the flag. We do not want strategies to rely on other strategies to add the flag
# so we have to catch the error and silence it.
pass
else:
# Re-raise in case we caught a different error than we expected.
raise
def add_sync_from_checkpoint_arg(arg_group: _ArgumentGroup) -> None:
add_shared_argument(
arg_group,
'--sync-from-checkpoint',
action=NormalizeCheckpointURI,
help=(
"Start syncing from a trusted checkpoint specified using URI syntax:"
"By specific block, eth://block/byhash/<hash>?score=<score>"
"Let etherscan pick a block near the tip, eth://block/byetherscan/latest"
),
default=None,
)
def add_disable_backfill_arg(arg_group: _ArgumentGroup) -> None:
add_shared_argument(
arg_group,
'--disable-backfill',
action="store_true",
help="Disable backfilling of historical headers and blocks",
default=False,
)
class BaseSyncStrategy(ABC):
@classmethod
def configure_parser(cls, arg_group: _ArgumentGroup) -> None:
"""
Configure the argument parser for the specific sync strategy.
"""
pass
@classmethod
@abstractmethod
def get_sync_mode(cls) -> str:
...
@abstractmethod
async def sync(self,
args: Namespace,
logger: logging.Logger,
chain: AsyncChainAPI,
base_db: AtomicDatabaseAPI,
peer_pool: BasePeerPool,
event_bus: EndpointAPI) -> None:
...
class NoopSyncStrategy(BaseSyncStrategy):
@classmethod
def get_sync_mode(cls) -> str:
return 'none'
async def sync(self,
args: Namespace,
logger: logging.Logger,
chain: AsyncChainAPI,
base_db: AtomicDatabaseAPI,
peer_pool: BasePeerPool,
event_bus: EndpointAPI) -> None:
logger.info("Node running without sync (--sync-mode=%s)", self.get_sync_mode())
await asyncio.Future()
class FullSyncStrategy(BaseSyncStrategy):
@classmethod
def get_sync_mode(cls) -> str:
return SYNC_FULL
async def sync(self,
args: Namespace,
logger: logging.Logger,
chain: AsyncChainAPI,
base_db: AtomicDatabaseAPI,
peer_pool: BasePeerPool,
event_bus: EndpointAPI) -> None:
syncer = FullChainSyncer(
chain,
AsyncChainDB(base_db),
base_db,
cast(ETHPeerPool, peer_pool),
)
await syncer.run()
class BeamSyncStrategy(BaseSyncStrategy):
@classmethod
def get_sync_mode(cls) -> str:
return SYNC_BEAM
@classmethod
def configure_parser(cls, arg_group: _ArgumentGroup) -> None:
arg_group.add_argument(
'--force-beam-block-number',
type=int,
help="Force beam sync to activate on a specific block number (for testing)",
default=None,
)
add_disable_backfill_arg(arg_group)
add_sync_from_checkpoint_arg(arg_group)
async def sync(self,
args: Namespace,
logger: logging.Logger,
chain: AsyncChainAPI,
base_db: AtomicDatabaseAPI,
peer_pool: BasePeerPool,
event_bus: EndpointAPI) -> None:
syncer = BeamSyncService(
chain,
AsyncChainDB(base_db),
base_db,
cast(ETHPeerPool, peer_pool),
event_bus,
args.sync_from_checkpoint,
args.force_beam_block_number,
not args.disable_backfill
)
async with background_asyncio_service(syncer) as manager:
await manager.wait_finished()
class HeaderSyncStrategy(BaseSyncStrategy):
@classmethod
def get_sync_mode(cls) -> str:
return 'header'
@classmethod
def configure_parser(cls, arg_group: _ArgumentGroup) -> None:
add_sync_from_checkpoint_arg(arg_group)
add_disable_backfill_arg(arg_group)
async def sync(self,
args: Namespace,
logger: logging.Logger,
chain: AsyncChainAPI,
base_db: AtomicDatabaseAPI,
peer_pool: BasePeerPool,
event_bus: EndpointAPI) -> None:
syncer = HeaderChainSyncer(
chain,
AsyncChainDB(base_db),
cast(ETHPeerPool, peer_pool),
enable_backfill=not args.disable_backfill,
checkpoint=args.sync_from_checkpoint,
)
async with background_asyncio_service(syncer) as manager:
await manager.wait_finished()
class LightSyncStrategy(BaseSyncStrategy):
@classmethod
def get_sync_mode(cls) -> str:
return SYNC_LIGHT
async def sync(self,
args: Namespace,
logger: logging.Logger,
chain: AsyncChainAPI,
base_db: AtomicDatabaseAPI,
peer_pool: BasePeerPool,
event_bus: EndpointAPI) -> None:
syncer = LightChainSyncer(
chain,
AsyncHeaderDB(base_db),
cast(LESPeerPool, peer_pool),
)
async with background_asyncio_service(syncer) as manager:
await manager.wait_finished()
class SyncerComponent(AsyncioIsolatedComponent):
default_strategy = BeamSyncStrategy()
strategies: Tuple[BaseSyncStrategy, ...] = (
HeaderSyncStrategy(),
FullSyncStrategy(),
default_strategy,
LightSyncStrategy(),
NoopSyncStrategy(),
)
name = "Sync / PeerPool"
endpoint_name = NETWORKING_EVENTBUS_ENDPOINT
@property
def is_enabled(self) -> bool:
return True
@classmethod
def configure_parser(cls, arg_parser: ArgumentParser, subparser: _SubParsersAction) -> None:
if type(cls.default_strategy) not in cls.extract_strategy_types():
raise ValidationError(f"Default strategy {cls.default_strategy} not in strategies")
syncing_parser = arg_parser.add_argument_group('sync mode')
mode_parser = syncing_parser.add_mutually_exclusive_group()
mode_parser.add_argument(
'--sync-mode',
choices=cls.extract_modes(),
default=cls.default_strategy.get_sync_mode(),
)
for sync_strategy in cls.strategies:
sync_strategy.configure_parser(syncing_parser)
@classmethod
def validate_cli(cls, boot_info: BootInfo) -> None:
# this will trigger a ValidationError if the specified strategy isn't known.
cls.get_active_strategy(boot_info)
# This will trigger a ValidationError if the loaded EIP1085 file
# has errors such as an unsupported mining method
boot_info.trinity_config.get_app_config(Eth1AppConfig).get_chain_config()
@classmethod
@to_tuple
def extract_modes(cls) -> Iterable[str]:
for strategy in cls.strategies:
yield strategy.get_sync_mode()
@classmethod
@to_tuple
def extract_strategy_types(cls) -> Iterable[Type[BaseSyncStrategy]]:
for strategy in cls.strategies:
yield type(strategy)
@classmethod
def get_active_strategy(cls, boot_info: BootInfo) -> BaseSyncStrategy:
active_strategy: BaseSyncStrategy = None
for strategy in cls.strategies:
if strategy.get_sync_mode().lower() == boot_info.args.sync_mode.lower():
if active_strategy is not None:
raise ValidationError(
f"Ambiguous sync strategy. Both {active_strategy} and {strategy} apply"
)
active_strategy = strategy
if active_strategy is None:
if boot_info.args.sync_mode is not None:
raise ValidationError(
f"No matching sync mode for: --sync-mode={boot_info.args.sync_mode}"
)
return cls.default_strategy
else:
return active_strategy
async def do_run(self, event_bus: EndpointAPI) -> None:
boot_info = self._boot_info
if boot_info.args.enable_metrics:
metrics_service = metrics_service_from_args(boot_info.args, AsyncioMetricsService)
else:
# Use a NoopMetricsService so that no code branches need to be taken if metrics
# are disabled
metrics_service = NOOP_METRICS_SERVICE
trinity_config = boot_info.trinity_config
NodeClass = trinity_config.get_app_config(Eth1AppConfig).node_class
node = NodeClass(event_bus, metrics_service, trinity_config)
strategy = self.get_active_strategy(boot_info)
async with background_asyncio_service(node) as node_manager:
sync_task = create_task(
self.launch_sync(node, strategy, boot_info, event_bus), self.name)
# The Node service is our responsibility, so we must exit if either that or the syncer
# returns.
node_manager_task = create_task(
node_manager.wait_finished(), f'{NodeClass.__name__} wait_finished() task')
tasks = [sync_task, node_manager_task]
try:
await wait_first(tasks, max_wait_after_cancellation=2)
except asyncio.TimeoutError:
self.logger.warning(
"Timed out waiting for tasks to terminate after cancellation: %s",
tasks
)
async def launch_sync(self,
node: Node[BasePeer],
strategy: BaseSyncStrategy,
boot_info: BootInfo,
event_bus: EndpointAPI) -> None:
await node.get_manager().wait_started()
await strategy.sync(
boot_info.args,
self.logger,
node.get_chain(),
node.base_db,
node.get_peer_pool(),
event_bus,
)
if __name__ == "__main__":
# SyncerComponent depends on a separate component to get peer candidates, so when running it
# you must pass the path to the discovery component's IPC file, like:
# $ python .../syncer/component.py --trinity-root-dir /tmp/syncer \
# --connect-to-endpoints /tmp/syncer/mainnet/ipcs-eth1/discovery.ipc
from trinity.extensibility.component import run_asyncio_eth1_component
run_asyncio_eth1_component(SyncerComponent)
| [
"p2p.asyncio_utils.wait_first",
"eth_utils.ValidationError",
"trinity.db.eth1.header.AsyncHeaderDB",
"trinity.components.builtin.metrics.component.metrics_service_from_args",
"trinity.db.eth1.chain.AsyncChainDB",
"trinity.extensibility.component.run_asyncio_eth1_component",
"typing.cast",
"asyncio.Fut... | [((12835, 12878), 'trinity.extensibility.component.run_asyncio_eth1_component', 'run_asyncio_eth1_component', (['SyncerComponent'], {}), '(SyncerComponent)\n', (12861, 12878), False, 'from trinity.extensibility.component import run_asyncio_eth1_component\n'), ((4260, 4276), 'asyncio.Future', 'asyncio.Future', ([], {}), '()\n', (4274, 4276), False, 'import asyncio\n'), ((4754, 4775), 'trinity.db.eth1.chain.AsyncChainDB', 'AsyncChainDB', (['base_db'], {}), '(base_db)\n', (4766, 4775), False, 'from trinity.db.eth1.chain import AsyncChainDB\n'), ((4810, 4838), 'typing.cast', 'cast', (['ETHPeerPool', 'peer_pool'], {}), '(ETHPeerPool, peer_pool)\n', (4814, 4838), False, 'from typing import Any, cast, Iterable, Tuple, Type\n'), ((5751, 5772), 'trinity.db.eth1.chain.AsyncChainDB', 'AsyncChainDB', (['base_db'], {}), '(base_db)\n', (5763, 5772), False, 'from trinity.db.eth1.chain import AsyncChainDB\n'), ((5807, 5835), 'typing.cast', 'cast', (['ETHPeerPool', 'peer_pool'], {}), '(ETHPeerPool, peer_pool)\n', (5811, 5835), False, 'from typing import Any, cast, Iterable, Tuple, Type\n'), ((6009, 6043), 'async_service.background_asyncio_service', 'background_asyncio_service', (['syncer'], {}), '(syncer)\n', (6035, 6043), False, 'from async_service import background_asyncio_service\n'), ((6754, 6775), 'trinity.db.eth1.chain.AsyncChainDB', 'AsyncChainDB', (['base_db'], {}), '(base_db)\n', (6766, 6775), False, 'from trinity.db.eth1.chain import AsyncChainDB\n'), ((6789, 6817), 'typing.cast', 'cast', (['ETHPeerPool', 'peer_pool'], {}), '(ETHPeerPool, peer_pool)\n', (6793, 6817), False, 'from typing import Any, cast, Iterable, Tuple, Type\n'), ((6954, 6988), 'async_service.background_asyncio_service', 'background_asyncio_service', (['syncer'], {}), '(syncer)\n', (6980, 6988), False, 'from async_service import background_asyncio_service\n'), ((7523, 7545), 'trinity.db.eth1.header.AsyncHeaderDB', 'AsyncHeaderDB', (['base_db'], {}), '(base_db)\n', (7536, 7545), False, 'from trinity.db.eth1.header import AsyncHeaderDB\n'), ((7559, 7587), 'typing.cast', 'cast', (['LESPeerPool', 'peer_pool'], {}), '(LESPeerPool, peer_pool)\n', (7563, 7587), False, 'from typing import Any, cast, Iterable, Tuple, Type\n'), ((7619, 7653), 'async_service.background_asyncio_service', 'background_asyncio_service', (['syncer'], {}), '(syncer)\n', (7645, 7653), False, 'from async_service import background_asyncio_service\n'), ((8354, 8431), 'eth_utils.ValidationError', 'ValidationError', (['f"""Default strategy {cls.default_strategy} not in strategies"""'], {}), "(f'Default strategy {cls.default_strategy} not in strategies')\n", (8369, 8431), False, 'from eth_utils import to_tuple, ValidationError\n'), ((10613, 10677), 'trinity.components.builtin.metrics.component.metrics_service_from_args', 'metrics_service_from_args', (['boot_info.args', 'AsyncioMetricsService'], {}), '(boot_info.args, AsyncioMetricsService)\n', (10638, 10677), False, 'from trinity.components.builtin.metrics.component import metrics_service_from_args\n'), ((11133, 11165), 'async_service.background_asyncio_service', 'background_asyncio_service', (['node'], {}), '(node)\n', (11159, 11165), False, 'from async_service import background_asyncio_service\n'), ((10230, 10320), 'eth_utils.ValidationError', 'ValidationError', (['f"""No matching sync mode for: --sync-mode={boot_info.args.sync_mode}"""'], {}), "(\n f'No matching sync mode for: --sync-mode={boot_info.args.sync_mode}')\n", (10245, 10320), False, 'from eth_utils import to_tuple, ValidationError\n'), ((9940, 10033), 'eth_utils.ValidationError', 'ValidationError', (['f"""Ambiguous sync strategy. Both {active_strategy} and {strategy} apply"""'], {}), "(\n f'Ambiguous sync strategy. Both {active_strategy} and {strategy} apply')\n", (9955, 10033), False, 'from eth_utils import to_tuple, ValidationError\n'), ((11652, 11700), 'p2p.asyncio_utils.wait_first', 'wait_first', (['tasks'], {'max_wait_after_cancellation': '(2)'}), '(tasks, max_wait_after_cancellation=2)\n', (11662, 11700), False, 'from p2p.asyncio_utils import create_task, wait_first\n')] |
import numpy as np
import scipy.stats as stats
import scipy.special as spec
import util
class HMCParams:
def __init__(self, tau, tau_g, L, eta, mass, r_clip, grad_clip):
self.tau = tau
self.tau_g = tau_g
self.L = L
self.eta = eta
self.mass = mass
self.r_clip = r_clip
self.grad_clip = grad_clip
class GradClipCounter:
def __init__(self):
self.clipped_grad = 0
self.grad_accesses = 0
def zcdp_iters(epsilon, delta, params, n, compute_less_grad=False):
rho = (np.sqrt(epsilon - np.log(delta)) - np.sqrt(-np.log(delta)))**2
rho_l = 1 / (2 * params.tau**2 * n)
rho_g = 1 / (2 * params.tau_g**2 * n)
# print("rho_l: {}".format(rho_l))
# print("rho_g: {}".format(rho_g))
if compute_less_grad:
iters = int((rho - rho_g) / (rho_l + params.L * rho_g))
else:
iters = int(rho / (rho_l + (params.L + 1) * rho_g))
return iters
def adp_delta(k, epsilon, params, n, compute_less_grad=False):
tau_l = params.tau
tau_g = params.tau_g
L = params.L
grad_evals = k * L + 1 if compute_less_grad else k * (L + 1)
mu = k / (2 * tau_l**2 * n) + grad_evals / (2 * tau_g**2 * n)
term1 = spec.erfc((epsilon - mu) / (2 * np.sqrt(mu)))
term2 = np.exp(epsilon) * spec.erfc((epsilon + mu) / (2 * np.sqrt(mu)))
return (0.5 * (term1 - term2)).sum()
def adp_iters(epsilon, delta, params, n, compute_less_grad=False):
low_iters = zcdp_iters(epsilon, delta, params, n, compute_less_grad)
up_iters = max(low_iters, 1)
while adp_delta(up_iters, epsilon, params, n, compute_less_grad) < delta:
up_iters *= 2
while int(up_iters) - int(low_iters) > 1:
new_iters = (low_iters + up_iters) / 2
new_delta = adp_delta(new_iters, epsilon, params, n, compute_less_grad)
if new_delta > delta:
up_iters = new_iters
else:
low_iters = new_iters
if adp_delta(int(up_iters), epsilon, params, n, compute_less_grad) < delta:
return int(up_iters)
else:
return int(low_iters)
def hmc(problem, theta0, epsilon, delta, params, verbose=True, use_adp=True, compute_less_grad=False):
data = problem.data
n, data_dim = data.shape
dim = theta0.size
temp_scale = problem.temp_scale
tau = params.tau
tau_g = params.tau_g
L = params.L
eta = params.eta
mass = params.mass
r_clip = params.r_clip
grad_clip = params.grad_clip
if not use_adp:
iters = zcdp_iters(epsilon, delta, params, n, compute_less_grad)
else:
iters = adp_iters(epsilon, delta, params, n, compute_less_grad)
if verbose:
print("Iterations: {}".format(iters))
sigma = tau * np.sqrt(n)
chain = np.zeros((iters + 1, dim))
chain[0, :] = theta0
leapfrog_chain = np.zeros((iters * L, dim))
clipped_r = np.zeros(iters)
clipped_grad_counter = GradClipCounter()
accepts = 0
grad_noise_sigma = 2 * tau_g * np.sqrt(n) * grad_clip
def grad_fun(theta):
ll_grad, clips = problem.log_likelihood_grad_clipped(grad_clip, theta, data)
clipped_grad_counter.clipped_grad += clips
clipped_grad_counter.grad_accesses += 1
pri_grad = problem.log_prior_grad(theta)
return temp_scale * (ll_grad + stats.norm.rvs(size=dim, scale=grad_noise_sigma)) + pri_grad
if compute_less_grad:
grad = grad_fun(theta0)
llc = problem.log_likelihood_no_sum(theta0, data)
for i in range(iters):
current = chain[i, :]
#TODO: this assumes diagonal M
p = stats.norm.rvs(size=dim) * np.sqrt(mass)
p_orig = p.copy()
prop = current.copy()
if compute_less_grad:
grad_new = grad.copy()
else:
grad_new = grad_fun(current)
for j in range(L):
p += 0.5 * eta * (grad_new)# - 0.5 * grad_noise_sigma**2 * p / mass)
prop += eta * p / mass
leapfrog_chain[i * L + j] = prop
grad_new = grad_fun(prop)
p += 0.5 * eta * (grad_new)# - 0.5 * grad_noise_sigma**2 * p / mass)
llp = problem.log_likelihood_no_sum(prop, data)
r = llp - llc
d = np.sqrt(np.sum((current - prop)**2))
clip = d * r_clip
clipped_r[i] = np.sum(np.abs(r) > clip)
r = np.clip(r, -clip, clip)
lpp = problem.log_prior(prop)
lpc = problem.log_prior(current)
s = stats.norm.rvs(size=1, scale=sigma * d * 2 * r_clip)
dp = 0.5 * np.sum(p_orig**2 / mass) - 0.5 * np.sum(p**2 / mass)
dH = dp + temp_scale * (np.sum(r) + s) + lpp - lpc
u = np.log(np.random.rand())
if u < dH - 0.5 * (temp_scale * sigma * d * 2 * r_clip)**2:
chain[i + 1, :] = prop
if compute_less_grad:
grad = grad_new
llc = llp
accepts += 1
else:
chain[i + 1, :] = current
if verbose and (i + 1) % 100 == 0:
print("Iteration: {}".format(i + 1))
if verbose:
print("Gradient evals: {}".format(clipped_grad_counter.grad_accesses))
return util.MCMCResult(
problem, chain, leapfrog_chain, iters, accepts, np.sum(clipped_r) / n / iters,
np.sum(clipped_grad_counter.clipped_grad) / n / clipped_grad_counter.grad_accesses
)
# return (
# chain, leapfrog_chain, accepts, clipped_r, iters,
# clipped_grad_counter.clipped_grad, clipped_grad_counter.grad_accesses
# )
| [
"numpy.clip",
"numpy.abs",
"numpy.sqrt",
"numpy.random.rand",
"numpy.log",
"scipy.stats.norm.rvs",
"numpy.exp",
"numpy.sum",
"numpy.zeros"
] | [((2760, 2786), 'numpy.zeros', 'np.zeros', (['(iters + 1, dim)'], {}), '((iters + 1, dim))\n', (2768, 2786), True, 'import numpy as np\n'), ((2833, 2859), 'numpy.zeros', 'np.zeros', (['(iters * L, dim)'], {}), '((iters * L, dim))\n', (2841, 2859), True, 'import numpy as np\n'), ((2876, 2891), 'numpy.zeros', 'np.zeros', (['iters'], {}), '(iters)\n', (2884, 2891), True, 'import numpy as np\n'), ((1281, 1296), 'numpy.exp', 'np.exp', (['epsilon'], {}), '(epsilon)\n', (1287, 1296), True, 'import numpy as np\n'), ((2736, 2746), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2743, 2746), True, 'import numpy as np\n'), ((4332, 4355), 'numpy.clip', 'np.clip', (['r', '(-clip)', 'clip'], {}), '(r, -clip, clip)\n', (4339, 4355), True, 'import numpy as np\n'), ((4449, 4501), 'scipy.stats.norm.rvs', 'stats.norm.rvs', ([], {'size': '(1)', 'scale': '(sigma * d * 2 * r_clip)'}), '(size=1, scale=sigma * d * 2 * r_clip)\n', (4463, 4501), True, 'import scipy.stats as stats\n'), ((2989, 2999), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2996, 2999), True, 'import numpy as np\n'), ((3593, 3617), 'scipy.stats.norm.rvs', 'stats.norm.rvs', ([], {'size': 'dim'}), '(size=dim)\n', (3607, 3617), True, 'import scipy.stats as stats\n'), ((3620, 3633), 'numpy.sqrt', 'np.sqrt', (['mass'], {}), '(mass)\n', (3627, 3633), True, 'import numpy as np\n'), ((4217, 4246), 'numpy.sum', 'np.sum', (['((current - prop) ** 2)'], {}), '((current - prop) ** 2)\n', (4223, 4246), True, 'import numpy as np\n'), ((4652, 4668), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4666, 4668), True, 'import numpy as np\n'), ((1255, 1266), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (1262, 1266), True, 'import numpy as np\n'), ((4302, 4311), 'numpy.abs', 'np.abs', (['r'], {}), '(r)\n', (4308, 4311), True, 'import numpy as np\n'), ((4521, 4547), 'numpy.sum', 'np.sum', (['(p_orig ** 2 / mass)'], {}), '(p_orig ** 2 / mass)\n', (4527, 4547), True, 'import numpy as np\n'), ((4554, 4575), 'numpy.sum', 'np.sum', (['(p ** 2 / mass)'], {}), '(p ** 2 / mass)\n', (4560, 4575), True, 'import numpy as np\n'), ((5212, 5229), 'numpy.sum', 'np.sum', (['clipped_r'], {}), '(clipped_r)\n', (5218, 5229), True, 'import numpy as np\n'), ((5251, 5292), 'numpy.sum', 'np.sum', (['clipped_grad_counter.clipped_grad'], {}), '(clipped_grad_counter.clipped_grad)\n', (5257, 5292), True, 'import numpy as np\n'), ((568, 581), 'numpy.log', 'np.log', (['delta'], {}), '(delta)\n', (574, 581), True, 'import numpy as np\n'), ((594, 607), 'numpy.log', 'np.log', (['delta'], {}), '(delta)\n', (600, 607), True, 'import numpy as np\n'), ((1331, 1342), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (1338, 1342), True, 'import numpy as np\n'), ((3311, 3359), 'scipy.stats.norm.rvs', 'stats.norm.rvs', ([], {'size': 'dim', 'scale': 'grad_noise_sigma'}), '(size=dim, scale=grad_noise_sigma)\n', (3325, 3359), True, 'import scipy.stats as stats\n'), ((4606, 4615), 'numpy.sum', 'np.sum', (['r'], {}), '(r)\n', (4612, 4615), True, 'import numpy as np\n')] |