content
stringlengths
0
1.55M
<import_stmt>h5py<import_stmt>numpy<as>np<line_sep>file=h5py.File('/data2/wt/openimages/vc_feature/1coco_train_all_bu_2.hdf5' 'r')<for_stmt>keys file<block_start>feature=file[keys]['feature'][:]<line_sep>np.save('/data2/wt/openimages/vc_feature/coco_vc_all_bu/'+keys+'.npy' feature)<block_end>
#MenuTitle: New Tab with Overkerned Pairs # -*- coding: utf-8 -*- <import_from_future_stmt> division print_function unicode_literals<line_sep>__doc__=""" Asks a threshold percentage, and opens a new tab with all kern pairs going beyond the width threshold. """<import_stmt>vanilla<class_stmt>FindOverkerns(object)<block_start><def_stmt>__init__ self# Window 'self.w': <block_start>windowWidth=350<line_sep>windowHeight=135<line_sep>windowWidthResize=100# user can resize width by this value windowHeightResize=0# user can resize height by this value self.w=vanilla.FloatingWindow((windowWidth windowHeight) # default window size "Find Negative Overkerns in This Master" # window title minSize=(windowWidth windowHeight) # minimum size (for resizing) maxSize=(windowWidth+windowWidthResize windowHeight+windowHeightResize) # maximum size (for resizing) autosaveName="com.mekkablue.FindOverkerns.mainwindow"# stores last window position and size )<line_sep># UI elements: self.w.text_1=vanilla.TextBox((15-1 12+2 220 14) "Open tab with kerns beyond threshold:" sizeStyle='small')<line_sep>self.w.threshold=vanilla.EditText((225 12-1 -15 20) "40" sizeStyle='small')<line_sep>self.w.text_2=vanilla.TextBox((15-1 12+25 -15 14) "(Maximum percentage of letter widths that may be kerned.)" sizeStyle='small')<line_sep>self.w.limitToExportingGlyphs=vanilla.CheckBox((15 12+50 150 20) "Limit to exporting glyphs" value=<true> callback=self.SavePreferences sizeStyle='small')<line_sep># Run Button: self.w.runButton=vanilla.Button((-100-15 -20-15 -15 -15) "Open Tab" sizeStyle='regular' callback=self.FindOverkernsMain)<line_sep>self.w.setDefaultButton(self.w.runButton)<line_sep># Load Settings: <if_stmt><not>self.LoadPreferences()<block_start>print("Note: 'Find Overkerns' could not load preferences. Will resort to defaults")<block_end># Open window and focus on it: self.w.open()<line_sep>self.w.makeKey()<block_end><def_stmt>SavePreferences self sender<block_start><try_stmt><block_start>Glyphs.defaults["com.mekkablue.FindOverkerns.threshold"]=self.w.threshold.get()<line_sep>Glyphs.defaults["com.mekkablue.FindOverkerns.limitToExportingGlyphs"]=self.w.limitToExportingGlyphs.get()<block_end><except_stmt><block_start><return><false><block_end><return><true><block_end><def_stmt>LoadPreferences self<block_start><try_stmt><block_start>Glyphs.registerDefault("com.mekkablue.FindOverkerns.threshold" "40")<line_sep>Glyphs.registerDefault("com.mekkablue.FindOverkerns.limitToExportingGlyphs" <true>)<line_sep>self.w.threshold.set(Glyphs.defaults["com.mekkablue.FindOverkerns.threshold"])<line_sep>self.w.limitToExportingGlyphs.set(Glyphs.defaults["com.mekkablue.FindOverkerns.limitToExportingGlyphs"])<block_end><except_stmt><block_start><return><false><block_end><return><true><block_end><def_stmt>FindOverkernsMain self sender<block_start><try_stmt># brings macro window to front and clears its log: <block_start>Glyphs.clearLog()<line_sep># retrieve user entry: thresholdFactor=<none><try_stmt><block_start>thresholdFactor=float(Glyphs.defaults["com.mekkablue.FindOverkerns.threshold"])/100.0<block_end><except_stmt><block_start>Message(title="Value Error" message="The threshold value you entered is invalid" OKButton="Oops")<block_end>limitToExportingGlyphs=bool(Glyphs.defaults["com.mekkablue.FindOverkerns.limitToExportingGlyphs"])<line_sep># continuer if user entry is valid: <if_stmt><not>thresholdFactor<is><none><block_start>thisFont=Glyphs.font# frontmost font thisMaster=thisFont.selectedFontMaster# current master masterKerning=thisFont.kerning[thisMaster.id]# kerning dictionary tabText=""# the text appearing in the new tab # collect minimum widths for every kerning group: leftGroupMinimumWidths={}<line_sep>leftGroupNarrowestGlyphs={}<line_sep>rightGroupMinimumWidths={}<line_sep>rightGroupNarrowestGlyphs={}<if_stmt>limitToExportingGlyphs<block_start>theseGlyphs=[g<for>g thisFont.glyphs<if>g.export]<block_end><else_stmt><block_start>theseGlyphs=thisFont.glyphs<block_end><for_stmt>thisGlyph theseGlyphs<block_start>thisLayer=thisGlyph.layers[thisMaster.id]<line_sep># left side of the glyph (= right side of kern pair) <if_stmt>thisGlyph.leftKerningGroup<block_start><if_stmt>thisGlyph.leftKerningGroup<in>leftGroupMinimumWidths<block_start><if_stmt>thisLayer.width<l>leftGroupMinimumWidths[thisGlyph.leftKerningGroup]<block_start>leftGroupMinimumWidths[thisGlyph.leftKerningGroup]=thisLayer.width<line_sep>leftGroupNarrowestGlyphs[thisGlyph.leftKerningGroup]=thisGlyph.name<block_end><block_end><else_stmt><block_start>leftGroupMinimumWidths[thisGlyph.leftKerningGroup]=thisLayer.width<line_sep>leftGroupNarrowestGlyphs[thisGlyph.leftKerningGroup]=thisGlyph.name<block_end><block_end># right side of the glyph (= left side of kern pair) <if_stmt>thisGlyph.rightKerningGroup<block_start><if_stmt>thisGlyph.rightKerningGroup<in>rightGroupMinimumWidths<block_start><if_stmt>thisLayer.width<l>rightGroupMinimumWidths[thisGlyph.rightKerningGroup]<block_start>rightGroupMinimumWidths[thisGlyph.rightKerningGroup]=thisLayer.width<line_sep>rightGroupNarrowestGlyphs[thisGlyph.rightKerningGroup]=thisGlyph.name<block_end><block_end><else_stmt><block_start>rightGroupMinimumWidths[thisGlyph.rightKerningGroup]=thisLayer.width<line_sep>rightGroupNarrowestGlyphs[thisGlyph.rightKerningGroup]=thisGlyph.name<block_end><block_end><block_end># go through kern values and collect them in tabText: <for_stmt>leftKey masterKerning.keys()<block_start><for_stmt>rightKey masterKerning[leftKey].keys()<block_start>kernValue=masterKerning[leftKey][rightKey]<if_stmt>kernValue<l>0<block_start>leftWidth=<none><line_sep>rightWidth=<none><try_stmt># collect widths for comparison <block_start><if_stmt>leftKey[0]<eq>"@"# leftKey is a group name like "@MMK_L_y" <block_start>groupName=leftKey[7:]<line_sep>leftWidth=rightGroupMinimumWidths[groupName]<line_sep>leftGlyphName=rightGroupNarrowestGlyphs[groupName]<block_end><else_stmt># leftKey is a glyph ID like "<KEY>" <block_start>leftGlyph=thisFont.glyphForId_(leftKey)<line_sep># exclude if non-exporting and user limited to exporting glyphs: <if_stmt>limitToExportingGlyphs<and><not>leftGlyph.export<block_start>kernValue=0.0<block_end>leftWidth=leftGlyph.layers[thisMaster.id].width<line_sep>leftGlyphName=leftGlyph.name<block_end><if_stmt>rightKey[0]<eq>"@"# rightKey is a group name like "@MMK_R_y" <block_start>groupName=rightKey[7:]<line_sep>rightWidth=leftGroupMinimumWidths[groupName]<line_sep>rightGlyphName=leftGroupNarrowestGlyphs[groupName]<block_end><else_stmt># rightKey is a glyph ID like "<KEY>" <block_start>rightGlyph=thisFont.glyphForId_(rightKey)<line_sep># exclude if non-exporting and user limited to exporting glyphs: <if_stmt>limitToExportingGlyphs<and><not>rightGlyph.export<block_start>kernValue=0.0<block_end>rightWidth=rightGlyph.layers[thisMaster.id].width<line_sep>rightGlyphName=rightGlyph.name<block_end># compare widths and collect overkern if it is one: # (kernValue of excluded glyphs will be 0.0 and not trigger the if clause) <if_stmt>abs(kernValue)<g>thresholdFactor<times>leftWidth<or>abs(kernValue)<g>thresholdFactor<times>rightWidth<block_start>tabText<augadd>"/%s/%s\n"%(leftGlyphName rightGlyphName)<block_end><block_end><except_stmt>Exception<as>e# probably a kerning group name found in the kerning data, but no glyph assigned to it: # brings macro window to front and reports warning: <block_start>Glyphs.showMacroWindow()<import_stmt>traceback<line_sep>errormsg=traceback.format_exc().lower()<for_stmt>side ("left" "right")<block_start><if_stmt><not>side<in>errormsg<block_start>print("⚠️ Warning: The %s group '%s' found in your kerning data does not appear in any glyph. Clean up your kerning, and run the script again."%(side groupName ))<block_end><block_end><block_end><block_end><block_end><block_end><if_stmt>tabText# opens new Edit tab: <block_start>thisFont.newTab(tabText[:-1])<block_end><else_stmt><block_start>Message(title="No Overkerns Found" message="Could not find any kern pairs beyond the threshold in this master." OKButton="Phew!")<block_end><block_end><if_stmt><not>self.SavePreferences(self)<block_start>print("Note: 'Find Overkerns' could not write preferences.")<block_end># self.w.close() # delete if you want window to stay open <block_end><except_stmt>Exception<as>e# brings macro window to front and reports error: <block_start>Glyphs.showMacroWindow()<line_sep>print("Find Overkerns Error: %s"%e)<import_stmt>traceback<line_sep>print(traceback.format_exc())<block_end><block_end><block_end>FindOverkerns()<line_sep>
# vim:fileencoding=utf-8:noet _log=[]<line_sep>vars={}<line_sep>vvars={'version':703}<line_sep>_tabpage=0<line_sep>_mode='n'<line_sep>_buf_purge_events=set()<line_sep>options={'paste':0 'ambiwidth':'single' 'columns':80 'encoding':'utf-8' }<line_sep>_last_bufnr=0<line_sep>_highlights={}<import_from_stmt>collections defaultdict<as>_defaultdict<line_sep>_environ=_defaultdict(<lambda>:'')<del_stmt>_defaultdict<line_sep>_thread_id=<none><def_stmt>_set_thread_id <block_start><global>_thread_id<import_from_stmt>threading current_thread<line_sep>_thread_id=current_thread().ident<block_end># Assuming import is done from the main thread _set_thread_id()<def_stmt>_print_log <block_start><for_stmt>item _log<block_start>print(item)<block_end>_log[:]=()<block_end><def_stmt>_vim func<block_start><import_from_stmt>functools wraps<import_from_stmt>threading current_thread<line_sep>@wraps(func)<def_stmt>f *args **kwargs<block_start><global>_thread_id<if_stmt>_thread_id<ne>current_thread().ident<block_start><raise>RuntimeError('Accessing vim from separate threads is not allowed')<block_end>_log.append((func.__name__ args))<line_sep><return>func(*args **kwargs)<block_end><return>f<block_end><def_stmt>_unicode func<block_start><import_from_stmt>functools wraps<import_stmt>sys<if_stmt>sys.version_info<l>(3 )<block_start><return>func<block_end>@wraps(func)<def_stmt>f *args **kwargs<block_start><import_from_stmt>powerline.lib.unicode u<line_sep>ret=func(*args **kwargs)<if_stmt>isinstance(ret bytes)<block_start>ret=u(ret)<block_end><return>ret<block_end><return>f<block_end><class_stmt>_Buffers(object)<block_start>@_vim<def_stmt>__init__ self<block_start>self.d={}<block_end>@_vim<def_stmt>__len__ self<block_start><return>len(self.d)<block_end>@_vim<def_stmt>__getitem__ self item<block_start><return>self.d[item]<block_end>@_vim<def_stmt>__setitem__ self item value<block_start>self.d[item]=value<block_end>@_vim<def_stmt>__iter__ self<block_start><return>iter(self.d.values())<block_end>@_vim<def_stmt>__contains__ self item<block_start><return>item<in>self.d<block_end>@_vim<def_stmt>_keys self<block_start><return>self.d.keys()<block_end>@_vim<def_stmt>_pop self *args **kwargs<block_start><return>self.d.pop(*args **kwargs)<block_end><block_end>buffers=_Buffers()<class_stmt>_ObjList(object)<block_start>@_vim<def_stmt>__init__ self objtype<block_start>self.l=[]<line_sep>self.objtype=objtype<block_end>@_vim<def_stmt>__getitem__ self item<block_start><return>self.l[item-int(item<g>0)]<block_end>@_vim<def_stmt>__len__ self<block_start><return>len(self.l)<block_end>@_vim<def_stmt>__iter__ self<block_start><return>iter(self.l)<block_end>@_vim<def_stmt>_pop self idx<block_start>obj=self.l.pop(idx-1)<for_stmt>moved_obj self.l[idx-1:]<block_start>moved_obj.number<augsub>1<block_end><return>obj<block_end>@_vim<def_stmt>_append self *args **kwargs<block_start><return>self.l.append(*args **kwargs)<block_end>@_vim<def_stmt>_new self *args **kwargs<block_start>number=len(self)+1<line_sep>new_obj=self.objtype(number *args **kwargs)<line_sep>self._append(new_obj)<line_sep><return>new_obj<block_end><block_end><def_stmt>_construct_result r<block_start><import_stmt>sys<if_stmt>sys.version_info<l>(3 )<block_start><return>r<block_end><else_stmt><block_start><if_stmt>isinstance(r str)<block_start><return>r.encode('utf-8')<block_end><elif_stmt>isinstance(r list)<block_start><return>[_construct_result(i)<for>i r]<block_end><elif_stmt>isinstance(r dict)<block_start><return>dict(((_construct_result(k) _construct_result(v))<for>k,v r.items()))<block_end><return>r<block_end><block_end><def_stmt>_str_func func<block_start><import_from_stmt>functools wraps<line_sep>@wraps(func)<def_stmt>f *args **kwargs<block_start><return>_construct_result(func(*args **kwargs))<block_end><return>f<block_end><def_stmt>_log_print <block_start><import_stmt>sys<for_stmt>entry _log<block_start>sys.stdout.write(repr(entry)+'\n')<block_end><block_end>_current_group=<none><line_sep>_on_wipeout=[]<line_sep>@_vim<def_stmt>command cmd<block_start><global>_current_group<line_sep>cmd=cmd.lstrip()<if_stmt>cmd.startswith('let g:')<block_start><import_stmt>re<line_sep>varname,value=re.compile(r'^let g:(\w+)\s*=\s*(.*)').match(cmd).groups()<line_sep>vars[varname]=value<block_end><elif_stmt>cmd.startswith('hi ')<block_start>sp=cmd.split()<line_sep>_highlights[sp[1]]=sp[2:]<block_end><elif_stmt>cmd.startswith('augroup')<block_start>augroup=cmd.partition(' ')[2]<if_stmt>augroup.upper()<eq>'END'<block_start>_current_group=<none><block_end><else_stmt><block_start>_current_group=augroup<block_end><block_end><elif_stmt>cmd.startswith('autocmd')<block_start>rest=cmd.partition(' ')[2]<line_sep>auevent,rest=rest.partition(' ')[::2]<line_sep>pattern,aucmd=rest.partition(' ')[::2]<if_stmt>auevent<ne>'BufWipeout'<or>pattern<ne>'*'<block_start><raise>NotImplementedError<block_end><import_stmt>sys<if_stmt>sys.version_info<l>(3 )<block_start><if_stmt><not>aucmd.startswith(':python ')<block_start><raise>NotImplementedError<block_end><block_end><else_stmt><block_start><if_stmt><not>aucmd.startswith(':python3 ')<block_start><raise>NotImplementedError<block_end><block_end>_on_wipeout.append(aucmd.partition(' ')[2])<block_end><elif_stmt>cmd.startswith('set ')<block_start><if_stmt>cmd.startswith('set statusline=')<block_start>options['statusline']=cmd[len('set statusline='):]<block_end><elif_stmt>cmd.startswith('set tabline=')<block_start>options['tabline']=cmd[len('set tabline='):]<block_end><else_stmt><block_start><raise>NotImplementedError(cmd)<block_end><block_end><else_stmt><block_start><raise>NotImplementedError(cmd)<block_end><block_end>@_vim@_unicode<def_stmt>eval expr<block_start><if_stmt>expr.startswith('g:')<block_start><return>vars[expr[2:]]<block_end><elif_stmt>expr.startswith('v:')<block_start><return>vvars[expr[2:]]<block_end><elif_stmt>expr.startswith('&')<block_start><return>options[expr[1:]]<block_end><elif_stmt>expr.startswith('$')<block_start><return>_environ[expr[1:]]<block_end><elif_stmt>expr.startswith('PowerlineRegisterCachePurgerEvent')<block_start>_buf_purge_events.add(expr[expr.find('"')+1:expr.rfind('"')-1])<line_sep><return>'0'<block_end><elif_stmt>expr.startswith('exists(')<block_start><return>'0'<block_end><elif_stmt>expr.startswith('getwinvar(')<block_start><import_stmt>re<line_sep>match=re.match(r'^getwinvar\((\d+), "(\w+)"\)$' expr)<if_stmt><not>match<block_start><raise>NotImplementedError(expr)<block_end>winnr=int(match.group(1))<line_sep>varname=match.group(2)<line_sep><return>_emul_getwinvar(winnr varname)<block_end><elif_stmt>expr.startswith('has_key(')<block_start><import_stmt>re<line_sep>match=re.match(r'^has_key\(getwinvar\((\d+), ""\), "(\w+)"\)$' expr)<if_stmt>match<block_start>winnr=int(match.group(1))<line_sep>varname=match.group(2)<line_sep><return>0+(varname<in>current.tabpage.windows[winnr].vars)<block_end><else_stmt><block_start>match=re.match(r'^has_key\(gettabwinvar\((\d+), (\d+), ""\), "(\w+)"\)$' expr)<if_stmt><not>match<block_start><raise>NotImplementedError(expr)<block_end>tabnr=int(match.group(1))<line_sep>winnr=int(match.group(2))<line_sep>varname=match.group(3)<line_sep><return>0+(varname<in>tabpages[tabnr].windows[winnr].vars)<block_end><block_end><elif_stmt>expr<eq>'getbufvar("%", "NERDTreeRoot").path.str()'<block_start><import_stmt>os<assert_stmt>os.path.basename(current.buffer.name).startswith('NERD_tree_')<line_sep><return>'/usr/include'<block_end><elif_stmt>expr.startswith('getbufvar(')<block_start><import_stmt>re<line_sep>match=re.match(r'^getbufvar\((\d+), ["\'](.+)["\']\)$' expr)<if_stmt><not>match<block_start><raise>NotImplementedError(expr)<block_end>bufnr=int(match.group(1))<line_sep>varname=match.group(2)<line_sep><return>_emul_getbufvar(bufnr varname)<block_end><elif_stmt>expr<eq>'tabpagenr()'<block_start><return>current.tabpage.number<block_end><elif_stmt>expr<eq>'tabpagenr("$")'<block_start><return>len(tabpages)<block_end><elif_stmt>expr.startswith('tabpagewinnr(')<block_start>tabnr=int(expr[len('tabpagewinnr('):-1])<line_sep><return>tabpages[tabnr].window.number<block_end><elif_stmt>expr.startswith('tabpagebuflist(')<block_start><import_stmt>re<line_sep>match=re.match(r'tabpagebuflist\((\d+)\)\[(\d+)\]' expr)<line_sep>tabnr=int(match.group(1))<line_sep>winnr=int(match.group(2))+1<line_sep><return>tabpages[tabnr].windows[winnr].buffer.number<block_end><elif_stmt>expr.startswith('gettabwinvar(')<block_start><import_stmt>re<line_sep>match=re.match(r'gettabwinvar\((\d+), (\d+), "(\w+)"\)' expr)<line_sep>tabnr=int(match.group(1))<line_sep>winnr=int(match.group(2))<line_sep>varname=match.group(3)<line_sep><return>tabpages[tabnr].windows[winnr].vars[varname]<block_end><elif_stmt>expr.startswith('type(function(')<block_start><import_stmt>re<line_sep>match=re.match(r'^type\(function\("([^"]+)"\)\) == 2$' expr)<if_stmt><not>match<block_start><raise>NotImplementedError(expr)<block_end><return>0<block_end><raise>NotImplementedError(expr)<block_end>@_vim<def_stmt>bindeval expr<block_start><if_stmt>expr<eq>'g:'<block_start><return>vars<block_end><elif_stmt>expr<eq>'{}'<block_start><return>{}<block_end><elif_stmt>expr<eq>'[]'<block_start><return>[]<block_end><import_stmt>re<line_sep>match=re.compile(r'^function\("([^"\\]+)"\)$').match(expr)<if_stmt>match<block_start><return>globals()['_emul_'+match.group(1)]<block_end><else_stmt><block_start><raise>NotImplementedError<block_end><block_end>@_vim@_str_func<def_stmt>_emul_mode *args<block_start><if_stmt>args<and>args[0]<block_start><return>_mode<block_end><else_stmt><block_start><return>_mode[0]<block_end><block_end>@_vim@_str_func<def_stmt>_emul_getbufvar bufnr varname<block_start><import_stmt>re<if_stmt>varname[0]<eq>'&'<block_start><if_stmt>bufnr<eq>'%'<block_start>bufnr=current.buffer.number<block_end><if_stmt>bufnr<not><in>buffers<block_start><return>''<block_end><try_stmt><block_start><return>buffers[bufnr].options[varname[1:]]<block_end><except_stmt>KeyError<block_start><try_stmt><block_start><return>options[varname[1:]]<block_end><except_stmt>KeyError<block_start><return>''<block_end><block_end><block_end><elif_stmt>re.match('^[a-zA-Z_]+$' varname)<block_start><if_stmt>bufnr<eq>'%'<block_start>bufnr=current.buffer.number<block_end><if_stmt>bufnr<not><in>buffers<block_start><return>''<block_end><return>buffers[bufnr].vars[varname]<block_end><raise>NotImplementedError<block_end>@_vim@_str_func<def_stmt>_emul_getwinvar winnr varname<block_start><return>current.tabpage.windows[winnr].vars.get(varname '')<block_end>@_vim<def_stmt>_emul_setwinvar winnr varname value<block_start>current.tabpage.windows[winnr].vars[varname]=value<block_end>@_vim<def_stmt>_emul_virtcol expr<block_start><if_stmt>expr<eq>'.'<block_start><return>current.window.cursor[1]+1<block_end><if_stmt>isinstance(expr list)<and>len(expr)<eq>3<block_start><return>expr[-2]+expr[-1]<block_end><raise>NotImplementedError<block_end>_v_pos=<none><line_sep>@_vim<def_stmt>_emul_getpos expr<block_start><if_stmt>expr<eq>'.'<block_start><return>[0 current.window.cursor[0]+1 current.window.cursor[1]+1 0]<block_end><if_stmt>expr<eq>'v'<block_start><return>_v_pos<or>[0 current.window.cursor[0]+1 current.window.cursor[1]+1 0]<block_end><raise>NotImplementedError<block_end>@_vim@_str_func<def_stmt>_emul_fnamemodify path modstring<block_start><import_stmt>os<line_sep>_modifiers={'~':<lambda>path:path.replace(os.environ['HOME'].encode('utf-8') b'~')<if>path.startswith(os.environ['HOME'].encode('utf-8'))<else>path '.':<lambda>path:(<lambda>tpath:path<if>tpath[:3]<eq>b'..'+os.sep.encode()<else>tpath)(os.path.relpath(path)) 't':<lambda>path:os.path.basename(path) 'h':<lambda>path:os.path.dirname(path) }<for_stmt>mods modstring.split(':')[1:]<block_start>path=_modifiers[mods](path)<block_end><return>path<block_end>@_vim@_str_func<def_stmt>_emul_expand expr<block_start><global>_abuf<if_stmt>expr<eq>'<abuf>'<block_start><return>_abuf<or>current.buffer.number<block_end><raise>NotImplementedError<block_end>@_vim<def_stmt>_emul_bufnr expr<block_start><if_stmt>expr<eq>'$'<block_start><return>_last_bufnr<block_end><raise>NotImplementedError<block_end>@_vim<def_stmt>_emul_exists ident<block_start><if_stmt>ident.startswith('g:')<block_start><return>ident[2:]<in>vars<block_end><elif_stmt>ident.startswith(':')<block_start><return>0<block_end><raise>NotImplementedError<block_end>@_vim<def_stmt>_emul_line2byte line<block_start>buflines=current.buffer._buf_lines<if_stmt>line<eq>len(buflines)+1<block_start><return>sum((len(s)<for>s buflines))+1<block_end><raise>NotImplementedError<block_end>@_vim<def_stmt>_emul_line expr<block_start>cursorline=current.window.cursor[0]+1<line_sep>numlines=len(current.buffer._buf_lines)<if_stmt>expr<eq>'w0'<block_start><return>max(cursorline-5 1)<block_end><if_stmt>expr<eq>'w$'<block_start><return>min(cursorline+5 numlines)<block_end><raise>NotImplementedError<block_end>@_vim@_str_func<def_stmt>_emul_strtrans s# FIXME Do more replaces <block_start><return>s.replace(b'\xFF' b'<ff>')<block_end>@_vim@_str_func<def_stmt>_emul_bufname bufnr<block_start><try_stmt><block_start><return>buffers[bufnr]._name<or>b''<block_end><except_stmt>KeyError<block_start><return>b''<block_end><block_end>_window_id=0<class_stmt>_Window(object)<block_start><def_stmt>__init__ self number buffer=<none> cursor=(1 0) width=80<block_start><global>_window_id<line_sep>self.cursor=cursor<line_sep>self.width=width<line_sep>self.number=number<if_stmt>buffer<block_start><if_stmt>type(buffer)<is>_Buffer<block_start>self.buffer=buffer<block_end><else_stmt><block_start>self.buffer=_Buffer(**buffer)<block_end><block_end><else_stmt><block_start>self.buffer=_Buffer()<block_end>_window_id<augadd>1<line_sep>self._window_id=_window_id<line_sep>self.options={}<line_sep>self.vars={'powerline_window_id':self._window_id }<block_end><def_stmt>__repr__ self<block_start><return>'<window '+str(self.number-1)+'>'<block_end><block_end><class_stmt>_Tabpage(object)<block_start><def_stmt>__init__ self number<block_start>self.windows=_ObjList(_Window)<line_sep>self.number=number<block_end><def_stmt>_new_window self **kwargs<block_start>self.window=self.windows._new(**kwargs)<line_sep><return>self.window<block_end><def_stmt>_close_window self winnr open_window=<true><block_start>curwinnr=self.window.number<line_sep>win=self.windows._pop(winnr)<if_stmt>self.windows<and>winnr<eq>curwinnr<block_start>self.window=self.windows[-1]<block_end><elif_stmt>open_window<block_start>current.tabpage._new_window()<block_end><return>win<block_end><def_stmt>_close self<block_start><global>_tabpage<while_stmt>self.windows<block_start>self._close_window(1 <false>)<block_end>tabpages._pop(self.number)<line_sep>_tabpage=len(tabpages)<block_end><block_end>tabpages=_ObjList(_Tabpage)<line_sep>_abuf=<none><class_stmt>_Buffer(object)<block_start><def_stmt>__init__ self name=<none><block_start><global>_last_bufnr<line_sep>_last_bufnr<augadd>1<line_sep>bufnr=_last_bufnr<line_sep>self.number=bufnr<line_sep># FIXME Use unicode() for python-3 self.name=name<line_sep>self.vars={'changedtick':1}<line_sep>self.options={'modified':0 'readonly':0 'fileformat':'unix' 'filetype':'' 'buftype':'' 'fileencoding':'utf-8' 'textwidth':80 }<line_sep>self._buf_lines=['']<line_sep>self._undostate=[self._buf_lines[:]]<line_sep>self._undo_written=len(self._undostate)<line_sep>buffers[bufnr]=self<block_end>@property<def_stmt>name self<block_start><import_stmt>sys<if_stmt>sys.version_info<l>(3 )<block_start><return>self._name<block_end><else_stmt><block_start><return>str(self._name 'utf-8')<if>self._name<else><none><block_end><block_end>@name.setter<def_stmt>name self name<block_start><if_stmt>name<is><none><block_start>self._name=<none><block_end><else_stmt><block_start><import_stmt>os<if_stmt>type(name)<is><not>bytes<block_start>name=name.encode('utf-8')<block_end><if_stmt>b':/'<in>name<block_start>self._name=name<block_end><else_stmt><block_start>self._name=os.path.abspath(name)<block_end><block_end><block_end><def_stmt>__getitem__ self line<block_start><return>self._buf_lines[line]<block_end><def_stmt>__setitem__ self line value<block_start>self.options['modified']=1<line_sep>self.vars['changedtick']<augadd>1<line_sep>self._buf_lines[line]=value<import_from_stmt>copy copy<line_sep>self._undostate.append(copy(self._buf_lines))<block_end><def_stmt>__setslice__ self *args<block_start>self.options['modified']=1<line_sep>self.vars['changedtick']<augadd>1<line_sep>self._buf_lines.__setslice__(*args)<import_from_stmt>copy copy<line_sep>self._undostate.append(copy(self._buf_lines))<block_end><def_stmt>__getslice__ self *args<block_start><return>self._buf_lines.__getslice__(*args)<block_end><def_stmt>__len__ self<block_start><return>len(self._buf_lines)<block_end><def_stmt>__repr__ self<block_start><return>'<buffer '+str(self.name)+'>'<block_end><def_stmt>__del__ self<block_start><global>_abuf<line_sep>bufnr=self.number<try_stmt><block_start><import_stmt>__main__<block_end><except_stmt>ImportError<block_start><pass><block_end><except_stmt>RuntimeError# Module may have already been garbage-collected <block_start><pass><block_end><else_stmt><block_start><if_stmt>_on_wipeout<block_start>_abuf=bufnr<try_stmt><block_start><for_stmt>event _on_wipeout<block_start>exec(event __main__.__dict__)<block_end><block_end><finally_stmt><block_start>_abuf=<none><block_end><block_end><block_end><block_end><block_end><class_stmt>_Current(object)<block_start>@property<def_stmt>buffer self<block_start><return>self.window.buffer<block_end>@property<def_stmt>window self<block_start><return>self.tabpage.window<block_end>@property<def_stmt>tabpage self<block_start><return>tabpages[_tabpage-1]<block_end><block_end>current=_Current()<line_sep>_dict=<none><line_sep>@_vim<def_stmt>_init <block_start><global>_dict<if_stmt>_dict<block_start><return>_dict<block_end>_dict={}<for_stmt>varname,value globals().items()<block_start><if_stmt>varname[0]<ne>'_'<block_start>_dict[varname]=value<block_end><block_end>_tabnew()<line_sep><return>_dict<block_end>@_vim<def_stmt>_get_segment_info <block_start>mode_translations={chr(ord('V')-0x40):'^V' chr(ord('S')-0x40):'^S' }<line_sep>mode=_mode<line_sep>mode=mode_translations.get(mode mode)<line_sep>window=current.window<line_sep>buffer=current.buffer<line_sep>tabpage=current.tabpage<line_sep><return>{'window':window 'winnr':window.number 'buffer':buffer 'bufnr':buffer.number 'tabpage':tabpage 'tabnr':tabpage.number 'window_id':window._window_id 'mode':mode 'encoding':options['encoding'] }<block_end>@_vim<def_stmt>_launch_event event<block_start><pass><block_end>@_vim<def_stmt>_start_mode mode<block_start><global>_mode<if_stmt>mode<eq>'i'<block_start>_launch_event('InsertEnter')<block_end><elif_stmt>_mode<eq>'i'<block_start>_launch_event('InsertLeave')<block_end>_mode=mode<block_end>@_vim<def_stmt>_undo <block_start><if_stmt>len(current.buffer._undostate)<eq>1<block_start><return><block_end>buffer=current.buffer<line_sep>buffer._undostate.pop(-1)<line_sep>buffer._buf_lines=buffer._undostate[-1]<if_stmt>buffer._undo_written<eq>len(buffer._undostate)<block_start>buffer.options['modified']=0<block_end><block_end>@_vim<def_stmt>_edit name=<none><block_start><if_stmt>current.buffer.name<is><none><block_start>buffer=current.buffer<line_sep>buffer.name=name<block_end><else_stmt><block_start>buffer=_Buffer(name)<line_sep>current.window.buffer=buffer<block_end><block_end>@_vim<def_stmt>_tabnew name=<none><block_start><global>windows<line_sep><global>_tabpage<line_sep>tabpage=tabpages._new()<line_sep>windows=tabpage.windows<line_sep>_tabpage=len(tabpages)<line_sep>_new(name)<line_sep><return>tabpage<block_end>@_vim<def_stmt>_new name=<none><block_start>current.tabpage._new_window(buffer={'name':name})<block_end>@_vim<def_stmt>_split <block_start>current.tabpage._new_window(buffer=current.buffer)<block_end>@_vim<def_stmt>_close winnr wipe=<true><block_start>win=current.tabpage._close_window(winnr)<if_stmt>wipe<block_start><for_stmt>w current.tabpage.windows<block_start><if_stmt>w.buffer.number<eq>win.buffer.number<block_start><break><block_end><block_end><else_stmt><block_start>_bw(win.buffer.number)<block_end><block_end><block_end>@_vim<def_stmt>_bw bufnr=<none><block_start>bufnr=bufnr<or>current.buffer.number<line_sep>winnr=1<for_stmt>win current.tabpage.windows<block_start><if_stmt>win.buffer.number<eq>bufnr<block_start>_close(winnr wipe=<false>)<block_end>winnr<augadd>1<block_end>buffers._pop(bufnr)<if_stmt><not>buffers<block_start>_Buffer()<block_end>_b(max(buffers._keys()))<block_end>@_vim<def_stmt>_b bufnr<block_start>current.window.buffer=buffers[bufnr]<block_end>@_vim<def_stmt>_set_cursor line col<block_start>current.window.cursor=(line col)<if_stmt>_mode<eq>'n'<block_start>_launch_event('CursorMoved')<block_end><elif_stmt>_mode<eq>'i'<block_start>_launch_event('CursorMovedI')<block_end><block_end>@_vim<def_stmt>_get_buffer <block_start><return>current.buffer<block_end>@_vim<def_stmt>_set_bufoption option value bufnr=<none><block_start>buffers[bufnr<or>current.buffer.number].options[option]=value<if_stmt>option<eq>'filetype'<block_start>_launch_event('FileType')<block_end><block_end><class_stmt>_WithNewBuffer(object)<block_start><def_stmt>__init__ self func *args **kwargs<block_start>self.call=<lambda>:func(*args **kwargs)<block_end><def_stmt>__enter__ self<block_start>self.call()<line_sep>self.bufnr=current.buffer.number<line_sep><return>_get_segment_info()<block_end><def_stmt>__exit__ self *args<block_start>_bw(self.bufnr)<block_end><block_end>@_vim<def_stmt>_set_dict d new setfunc=<none><block_start><if_stmt><not>setfunc<block_start><def_stmt>setfunc k v<block_start>d[k]=v<block_end><block_end>old={}<line_sep>na=[]<for_stmt>k,v new.items()<block_start><try_stmt><block_start>old[k]=d[k]<block_end><except_stmt>KeyError<block_start>na.append(k)<block_end>setfunc(k v)<block_end><return>old na<block_end><class_stmt>_WithBufOption(object)<block_start><def_stmt>__init__ self **new<block_start>self.new=new<block_end><def_stmt>__enter__ self<block_start>self.buffer=current.buffer<line_sep>self.old=_set_dict(self.buffer.options self.new _set_bufoption)[0]<block_end><def_stmt>__exit__ self *args<block_start>self.buffer.options.update(self.old)<block_end><block_end><class_stmt>_WithMode(object)<block_start><def_stmt>__init__ self new<block_start>self.new=new<block_end><def_stmt>__enter__ self<block_start>self.old=_mode<line_sep>_start_mode(self.new)<line_sep><return>_get_segment_info()<block_end><def_stmt>__exit__ self *args<block_start>_start_mode(self.old)<block_end><block_end><class_stmt>_WithDict(object)<block_start><def_stmt>__init__ self d **new<block_start>self.new=new<line_sep>self.d=d<block_end><def_stmt>__enter__ self<block_start>self.old,self.na=_set_dict(self.d self.new)<block_end><def_stmt>__exit__ self *args<block_start>self.d.update(self.old)<for_stmt>k self.na<block_start>self.d.pop(k)<block_end><block_end><block_end><class_stmt>_WithSplit(object)<block_start><def_stmt>__enter__ self<block_start>_split()<block_end><def_stmt>__exit__ self *args<block_start>_close(2 wipe=<false>)<block_end><block_end><class_stmt>_WithBufName(object)<block_start><def_stmt>__init__ self new<block_start>self.new=new<block_end><def_stmt>__enter__ self<block_start><import_stmt>os<line_sep>buffer=current.buffer<line_sep>self.buffer=buffer<line_sep>self.old=buffer.name<line_sep>buffer.name=self.new<block_end><def_stmt>__exit__ self *args<block_start>self.buffer.name=self.old<block_end><block_end><class_stmt>_WithNewTabPage(object)<block_start><def_stmt>__init__ self *args **kwargs<block_start>self.args=args<line_sep>self.kwargs=kwargs<block_end><def_stmt>__enter__ self<block_start>self.tab=_tabnew(*self.args **self.kwargs)<block_end><def_stmt>__exit__ self *args<block_start>self.tab._close()<block_end><block_end><class_stmt>_WithGlobal(object)<block_start><def_stmt>__init__ self **kwargs<block_start>self.kwargs=kwargs<block_end><def_stmt>__enter__ self<block_start>self.empty=object()<line_sep>self.old=dict(((key globals().get(key self.empty))<for>key self.kwargs))<line_sep>globals().update(self.kwargs)<block_end><def_stmt>__exit__ self *args<block_start><for_stmt>k,v self.old.items()<block_start><if_stmt>v<is>self.empty<block_start>globals().pop(k <none>)<block_end><else_stmt><block_start>globals()[k]=v<block_end><block_end><block_end><block_end>@_vim<def_stmt>_with key *args **kwargs<block_start><if_stmt>key<eq>'buffer'<block_start><return>_WithNewBuffer(_edit *args **kwargs)<block_end><elif_stmt>key<eq>'bufname'<block_start><return>_WithBufName(*args **kwargs)<block_end><elif_stmt>key<eq>'mode'<block_start><return>_WithMode(*args **kwargs)<block_end><elif_stmt>key<eq>'bufoptions'<block_start><return>_WithBufOption(**kwargs)<block_end><elif_stmt>key<eq>'options'<block_start><return>_WithDict(options **kwargs)<block_end><elif_stmt>key<eq>'globals'<block_start><return>_WithDict(vars **kwargs)<block_end><elif_stmt>key<eq>'wvars'<block_start><return>_WithDict(current.window.vars **kwargs)<block_end><elif_stmt>key<eq>'environ'<block_start><return>_WithDict(_environ **kwargs)<block_end><elif_stmt>key<eq>'split'<block_start><return>_WithSplit()<block_end><elif_stmt>key<eq>'tabpage'<block_start><return>_WithNewTabPage(*args **kwargs)<block_end><elif_stmt>key<eq>'vpos'<block_start><return>_WithGlobal(_v_pos=[0 kwargs['line'] kwargs['col'] kwargs['off']])<block_end><block_end><class_stmt>error(Exception)<block_start><pass><block_end>
<import_from_stmt>pathlib Path<import_stmt>csv<import_from_stmt>ruamel.yaml YAML<def_stmt>get_msg_numbers <block_start>all_datum=[]<with_stmt>open(Path(__file__).resolve().parent/"msg_numbers.csv" 'r')<as>f<block_start>reader=csv.reader(f delimiter=',')<line_sep>next(reader)# skip header <for_stmt>row reader<block_start><if_stmt>len(row)<eq>0<block_start><break><block_end>litvin_number,bns_number,og_number,uni_number=row<line_sep>all_datum.append((int(litvin_number) bns_number og_number int(uni_number) ))<block_end><block_end><assert_stmt>len(all_datum)<eq>1651<line_sep><return>all_datum<block_end><def_stmt>get_spg_table <block_start>all_datum={}<with_stmt>open(Path("../spg.csv") 'r')<as>f<block_start>reader=csv.reader(f delimiter=',')<for_stmt>row reader<block_start>hall_number,choice,number,hall_symbol=int(row[0]) row[2] int(row[4]) row[6]<line_sep>all_datum[hall_number]={'choice':choice 'number':number 'hall_symbol':hall_symbol }<block_end><block_end><assert_stmt>len(all_datum)<eq>530<line_sep><return>all_datum<block_end><def_stmt>get_msg_table # Load MSG for ITA standard settings <block_start><with_stmt>open(Path("./magnetic_hall_symbols.yaml") 'r')<as>f<block_start>all_datum=dict(YAML().load(f))<block_end><return>all_datum<block_end>
<import_stmt>gdb<import_stmt>re<import_from_stmt>.utils cast relpath<def_stmt>cstr val<block_start><try_stmt><block_start><return>val.string()<block_end><except_stmt>gdb.MemoryError<block_start><return>'[bad-ptr 0x%x]'%val.address<block_end><block_end><def_stmt>enum v<block_start><return>v.type.target().fields()[int(v)].name<block_end><class_stmt>ProgramCounter()<block_start><def_stmt>__init__ self pc<block_start>self.pc=cast(pc 'unsigned long')<block_end><def_stmt>__str__ self<block_start><if_stmt>self.pc<eq>0<block_start><return>'null'<block_end>line=gdb.execute('info line *0x%x'%self.pc to_string=<true>)<line_sep>m=re.match(r'Line (\d+) of "(.*)"' line)<if_stmt>m<block_start>lnum,path=m.groups()<line_sep><return>'%s:%s'%(relpath(path) lnum)<block_end><else_stmt><block_start><return>'0x%x'%self.pc<block_end><block_end><block_end><class_stmt>GdbStructBase()<block_start><def_stmt>__init__ self obj<block_start>self._obj=obj<block_end><def_stmt>to_string self<block_start><return>str(self)<block_end><def_stmt>dump self<block_start>res=['%s = %s'%(field getattr(self field))<for>field self._obj.type]<line_sep><return>'\n'.join(res)<block_end><def_stmt>display_hint self<block_start><return>'map'<block_end><block_end><class_stmt>GdbStructMeta(type)<block_start><def_stmt>__new__ cls name bases dct<block_start>t=gdb.lookup_type(dct['__ctype__'])<line_sep># for each field of ctype make property getter of the same name <for_stmt>f t.fields()<block_start><def_stmt>mkgetter fname caster<block_start><if_stmt>caster<is><none><block_start><return><lambda>x:x._obj[fname]<block_end># use cast function if available <def_stmt>_caster x<block_start>val=x._obj[fname]<try_stmt><block_start><return>caster(val)<block_end><except_stmt>gdb.MemoryError<block_start><return>'[bad-ptr: 0x%x]'%val.address<block_end><block_end><return>_caster<block_end>caster=<none><if_stmt>'__cast__'<in>dct<block_start>caster=dct['__cast__'].get(f.name <none>)<block_end>dct[f.name]=property(mkgetter(f.name caster))<block_end># classes created with GdbStructMeta will inherit from GdbStructBase <return>super().__new__(cls name (GdbStructBase )+bases dct)<block_end><block_end><class_stmt>BinTime(metaclass=GdbStructMeta)<block_start>__ctype__='struct bintime'<line_sep>__cast__={'sec':int 'frac':int}<def_stmt>as_float self<block_start><return>float(self.sec)+float(self.frac)/2<power>64<block_end><def_stmt>__str__ self<block_start><return>'bintime{%.6f}'%self.as_float()<block_end><block_end><class_stmt>List()<block_start><def_stmt>__init__ self lst field<block_start>self.lst=lst<line_sep>self.field=field<block_end><def_stmt>__iter__ self<block_start>item=self.lst['lh_first']<while_stmt>item<ne>0<block_start>item=item.dereference()<line_sep><yield>item<line_sep>item=item[self.field]['le_next']<block_end><block_end><block_end><class_stmt>TailQueue()<block_start><def_stmt>__init__ self tq field<block_start>self.tq=tq<line_sep>self.field=field<block_end><def_stmt>__iter__ self<block_start>item=self.tq['tqh_first']<while_stmt>item<ne>0<block_start>item=item.dereference()<line_sep><yield>item<line_sep>item=item[self.field]['tqe_next']<block_end><block_end><block_end><class_stmt>LinkerSet()<block_start><def_stmt>__init__ self name typ<block_start>self.start=gdb.parse_and_eval('(%s **)&__start_set_%s'%(typ name))<line_sep>self.stop=gdb.parse_and_eval('(%s **)&__stop_set_%s'%(typ name))<block_end><def_stmt>__iter__ self<block_start>item=self.start<while_stmt>item<l>self.stop<block_start><yield>item.dereference().dereference()<line_sep>item=item+1<block_end><block_end><block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> unicode_literals<import_from_stmt>django.db models migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('queue' '0005_queue_node_required') ]<line_sep>operations=[migrations.RemoveField(model_name='queuedepthbyjobtype' name='job_type' ) migrations.DeleteModel(name='QueueDepthByJobType' ) migrations.DeleteModel(name='QueueDepthByPriority' ) ]<block_end>
# ---------------------------------------------------------------------------- # Copyright 2015 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- <import_stmt>h5py<import_stmt>numpy<as>np<def_stmt>convert_rgb_to_bokehrgba img_data downsample=1<block_start>""" Convert RGB image to two-dimensional array of RGBA values (encoded as 32-bit integers) (required by Bokeh). The functionality is currently not available in Bokeh. An issue was raised here: https://github.com/bokeh/bokeh/issues/1699 and this function is a modified version of the suggested solution. Arguments: img_data: img (ndarray, shape: [N, M, 3], dtype: uint8): image data dh: height of image dw: width of image Returns: img (ndarray): 2D image array of RGBA values """<if_stmt>img_data.dtype<ne>np.uint8<block_start><raise>NotImplementedError<block_end><if_stmt>img_data.ndim<ne>3<block_start><raise>NotImplementedError<block_end># downsample for render performance, v-flip since plot origin is bottom left # img_data = np.transpose(img_data, (1,2,0)) img_data=img_data[::-downsample ::downsample :]<line_sep>img_h,img_w,C=img_data.shape<line_sep># add an alpha channel to the image and recast from pixels of u8u8u8u8 to u32 #bokeh_img = np.dstack([img_data, 255 * np.ones((img_h, img_w), np.uint8)]) #final_image = bokeh_img.reshape(img_h, img_w * (C+1)).view(np.uint32) # put last 3 frames into separate color channels and add alpha channel bokeh_img=np.dstack([img_data[: : 1] img_data[: : 2] img_data[: : 3] 255<times>np.ones((img_h img_w) np.uint8)])<line_sep>final_image=bokeh_img.reshape(img_h img_w<times>4).view(np.uint32)<line_sep><return>final_image<block_end><def_stmt>h5_deconv_data f<block_start>""" Read deconv visualization data from hdf5 file. Returns: list of lists. Each inner list represents one layer, and consists of tuples (fm, deconv_data) """<line_sep>ret=list()<if_stmt>'deconv'<not><in>f.keys()<block_start><return><none><block_end>act_data=f['deconv/max_act']<line_sep>img_data=f['deconv/img']<for_stmt>layer act_data.keys()<block_start>layer_data=list()<for_stmt>fm range(act_data[layer]['vis'].shape[0])# to avoid storing entire dataset, imgs are cached as needed, have to look up <block_start>batch_ind,img_ind=act_data[layer]['batch_img'][fm]<line_sep>img_store=img_data['batch_{}'.format(batch_ind)]<line_sep>img_cache_ofs=img_store.attrs[str(img_ind)]<line_sep># have to convert from rgb to rgba and cast as uint32 dtype for bokeh plot_img=convert_rgb_to_bokehrgba(img_store['HWC_uint8'][: : : img_cache_ofs])<line_sep>plot_deconv=convert_rgb_to_bokehrgba(act_data[layer]['vis'][fm])<line_sep>layer_data.append((fm plot_deconv plot_img))<block_end>ret.append((layer layer_data))<block_end><return>ret<block_end>
<import_stmt>can<line_sep>bus1=can.interface.Bus('can0' bustype='virtual')<line_sep>bus2=can.interface.Bus('can0' bustype='virtual')<line_sep>msg1=can.Message(arbitration_id=0xabcde data=[1 2 3])<line_sep>bus1.send(msg1)<line_sep>msg2=bus2.recv()<line_sep>print(hex(msg1.arbitration_id))<line_sep>print(hex(msg2.arbitration_id))<assert_stmt>msg1.arbitration_id<eq>msg2.arbitration_id<line_sep>
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Function used in Worker tests of handling HTTP functions."""<import_from_stmt>flask render_template<def_stmt>function request<block_start>"""Test HTTP function whose behavior depends on the given mode. The function returns a success, a failure, or throws an exception, depending on the given mode. Args: request: The HTTP request which triggered this function. Must contain name of the requested mode in the 'mode' field in JSON document in request body. Returns: Value and status code defined for the given mode. Raises: Exception: Thrown when requested in the incoming mode specification. """<if_stmt>request.args<and>"message"<in>request.args<block_start>message=request.args.get("message")<block_end><elif_stmt>request.get_json()<and>"message"<in>request.get_json()<block_start>message=request.get_json()["message"]<block_end><else_stmt><block_start>message="Hello World!"<block_end><return>render_template("hello.html" name=message)<block_end>
<import_from_stmt>colorama Fore<import_stmt>os<line_sep>all_paths=[]<line_sep>dir_name=input('Enter the name of directory you want to clear: ')<line_sep>extension=set()<def_stmt>source_path dir_name<block_start><for_stmt>root os.walk("/home")<block_start><if_stmt>dir_name<eq>root[0].split('/')[-1]<block_start>all_paths.append(root[0])<block_end><block_end><for_stmt>i range(len(all_paths))<block_start>print()<line_sep>print("{}. {}".format(i+1 all_paths[i]))<block_end><if_stmt>len(all_paths)<eq>0<block_start>print(Fore.LIGHTRED_EX+'No directory found')<line_sep>exit()<block_end>choice=int(input('\nEnter the option number: '))<if_stmt>choice<l>1<or>choice<g>len(all_paths)<block_start>print(Fore.LIGHTRED_EX+'Wrong choice entered')<line_sep>exit()<block_end><else_stmt><block_start>path=all_paths[choice-1]<block_end><return>path<block_end><def_stmt>print_before path<block_start>print("Cleaning {} located at {}\n".format(path.split('/')[-1] path))<line_sep>print(Fore.LIGHTBLUE_EX+"Before cleaning\n"+Fore.RESET)<for_stmt>files os.listdir(path)<block_start>print(files end='\t')<block_end>print()<block_end><def_stmt>destination_path path<block_start>os.chdir(path)<for_stmt>f os.listdir()<block_start>name=(os.path.splitext(f))[0]<line_sep>ext=(os.path.splitext(f))[1]<line_sep>extension.add(ext[1:])<block_end>new_dir="New"+path.split('/')[-1]<line_sep>new_dir_path=os.path.join(path new_dir)<if_stmt><not>os.path.exists(new_dir_path)<block_start>os.mkdir(new_dir_path)<block_end><return>new_dir_path new_dir<block_end><def_stmt>organise new_dir_path new_dir path<block_start><for_stmt>ext extension<block_start>folder=os.path.join(new_dir_path ext)<if_stmt><not>os.path.exists(folder)<block_start>os.mkdir(folder)<block_end><if_stmt>ext<ne>''<block_start><for_stmt>f os.listdir()<block_start><if_stmt>os.path.splitext(f)[1].strip('.')<eq>ext<block_start>os.rename(f os.path.join(folder f))<block_end><block_end><block_end><else_stmt><block_start><for_stmt>f os.listdir()<block_start><if_stmt>f<ne>new_dir<and>os.path.splitext(f)[1].strip('.')<eq>ext<block_start>print(f)<line_sep>inner_folder=os.path.join(new_dir_path f)<if_stmt>os.path.exists(inner_folder)<block_start>os.chdir(os.path.join(path f))<for_stmt>file os.listdir()<block_start>new_path=os.path.join(inner_folder file)<line_sep>os.rename(file new_path)<block_end>os.rmdir(os.path.join(path f))<block_end><else_stmt><block_start>os.rename(f inner_folder)<block_end><block_end><block_end><block_end><block_end><block_end><def_stmt>print_after path<block_start>print(Fore.LIGHTBLUE_EX+"\nAfter cleaning\n"+Fore.RESET)<for_stmt>files os.listdir(path)<block_start>print(files end='\t')<block_end>print(Fore.LIGHTMAGENTA_EX+"\n\nCLEANED\n"+Fore.RESET)<block_end><def_stmt>file_manage <block_start>path=source_path(dir_name)<line_sep>print_before(path)<line_sep>new_dir_path,new_dir=destination_path(path)<line_sep>organise(new_dir_path new_dir path)<line_sep>print_after(path)<block_end>file_manage()<line_sep>
<import_stmt>spacy<line_sep>nlp=spacy.load("ja_core_news_md")<line_sep>doc1=nlp("暖かい夏の日です")<line_sep>doc2=nlp("外は晴れています")<line_sep># doc1とdoc2の類似度を取得 similarity=doc1.similarity(doc2)<line_sep>print(similarity)<line_sep>
""" Tests for inequality.py """<import_stmt>numpy<as>np<import_from_stmt>numpy.testing assert_allclose assert_raises<import_from_stmt>scipy.stats linregress<import_from_stmt>quantecon lorenz_curve gini_coefficient shorrocks_index rank_size<def_stmt>test_lorenz_curve <block_start>""" Tests `lorenz` function, which calculates the lorenz curve An income distribution where everyone has almost the same wealth should be similar to a straight line An income distribution where one person has almost the wealth should be flat and then shoot straight up when it approaches one """<line_sep>n=3000<line_sep># Almost Equal distribution y=np.repeat(1 n)+np.random.normal(scale=0.0001 size=n)<line_sep>cum_people,cum_income=lorenz_curve(y)<line_sep>assert_allclose(cum_people cum_income rtol=1e-03)<line_sep># Very uneven distribution y=np.repeat(0.001 n)<line_sep>y[4]=100000<line_sep>pop_cum,income_cum=lorenz_curve(y)<line_sep>expected_income_cum=np.repeat(0. n+1)<line_sep>expected_income_cum[-1]=1.<line_sep>assert_allclose(expected_income_cum income_cum atol=1e-4)<block_end><def_stmt>test_gini_coeff <block_start>""" Tests how the function `gini_coefficient` calculates the Gini coefficient with the Pareto and the Weibull distribution. Analytically, we know that Pareto with parameter `a` has G = 1 / (2*a - 1) Likewise, for the Weibull distribution with parameter `a` we know that G = 1 - 2**(-1/a) """<line_sep>n=10000<line_sep># Tests Pareto: G = 1 / (2*a - 1) a=np.random.randint(2 15)<line_sep>expected=1/(2<times>a-1)<line_sep>y=(np.random.pareto(a size=n)+1)<times>2<line_sep>coeff=gini_coefficient(y)<line_sep>assert_allclose(expected coeff rtol=1e-01)<line_sep># Tests Weibull: G = 1 - 2**(-1/a) a=np.random.randint(2 15)<line_sep>expected=1-2<power>(-1/a)<line_sep>y=np.random.weibull(a size=n)<line_sep>coeff=gini_coefficient(y)<line_sep>assert_allclose(expected coeff rtol=1e-01)<block_end><def_stmt>test_shorrocks_index <block_start>""" Test Shorrocks mobility index function against the example used in 'Wealth distribution and social mobility in the US: A quantitative approach' (Benhabib, <NAME>, 2017).'' https://www.econ.nyu.edu/user/bisina/RevisionAugust.pdf """<line_sep># Construct the mobility matrix from Benhabib et al. P=[[0.222 0.222 0.215 0.187 0.081 0.038 0.029 0.006] [0.221 0.220 0.215 0.188 0.082 0.039 0.029 0.006] [0.207 0.209 0.210 0.194 0.090 0.046 0.036 0.008] [0.198 0.201 0.207 0.198 0.095 0.052 0.040 0.009] [0.175 0.178 0.197 0.207 0.110 0.067 0.054 0.012] [0.182 0.184 0.200 0.205 0.106 0.062 0.050 0.011] [0.123 0.125 0.166 0.216 0.141 0.114 0.094 0.021] [0.084 0.084 0.142 0.228 0.170 0.143 0.121 0.028]]<line_sep>expected=0.98# result from paper index=shorrocks_index(P)<line_sep>assert_allclose(expected index rtol=1e-2)<block_end><def_stmt>test_rank_size <block_start>""" Tests `rank_size` function, which generates rank-size data for a Pareto distribution. The rank-size plot for a sample drawn from a Pareto distribution should be a straight line. The length of the `rank_data` array should be within (c x 100)% of the size of the distribution. """<line_sep>np.random.seed(15)<line_sep>sample_size=10000<line_sep>c=0.74<line_sep># Tests Pareto; r_squared ~ 1 pareto_draw=np.exp(np.random.exponential(scale=1.0 size=sample_size))<line_sep>rank_data,size_data=rank_size(pareto_draw c=c)<assert_stmt>len(rank_data)<eq>len(size_data)<line_sep>assert_allclose(c<times>sample_size len(rank_data) rtol=1e-3)<line_sep>_,_,r_value,_,_=linregress(np.log(rank_data) np.log(size_data))<line_sep>r_sqval=r_value<power>2<line_sep>assert_allclose(r_sqval 1 rtol=1e-3)<line_sep># Tests Exponential; r_squared < 1 z=np.random.randn(sample_size)<line_sep>exp_draw=np.exp(z)<line_sep>rank_data_exp,size_data_exp=rank_size(exp_draw c=c)<line_sep>_,_,r_value_exp,_,_=linregress(np.log(rank_data_exp) np.log(size_data_exp))<line_sep>r_sqval_exp=r_value_exp<power>2<line_sep>assert_raises(AssertionError assert_allclose r_sqval_exp 1 rtol=1e-3)<block_end>
type(Key.F4 KeyModifier.CTRL)<line_sep>sleep(1)<line_sep>exit(0)<line_sep>
<import_from_stmt>collections Counter<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_from_stmt>typing List Set<import_stmt>re<import_stmt>string<import_from_stmt>tqdm tqdm<import_stmt>logging<line_sep>logger=logging.getLogger(__name__)<import_from_stmt>.util read_gqa<line_sep># -------------------------------------------------------------------------- # Constants # -------------------------------------------------------------------------- UNK="<unk>"<line_sep>SOS="<sos>"<line_sep>EOS="<eos>"<line_sep>SPACE="<space>"<line_sep>CHARS=["<"+i+">"<for>i string.ascii_lowercase]+["<"+i+">"<for>i string.ascii_uppercase]<line_sep>SPECIAL_TOKENS=[UNK SOS EOS SPACE]#+ CHARS UNK_ID=SPECIAL_TOKENS.index(UNK)<line_sep>SOS_ID=SPECIAL_TOKENS.index(SOS)<line_sep>EOS_ID=SPECIAL_TOKENS.index(EOS)<line_sep># -------------------------------------------------------------------------- # Pretokenize # -------------------------------------------------------------------------- ENGLISH_PUNCTUATION='!"#$%&()*+,-./:;=?@[\\]^_`{|}~'<line_sep># -------------------------------------------------------------------------- <def_stmt>pretokenize_general text<block_start>text=text.replace("\n" "")<line_sep>text=re.sub(r'\s*$' '' text)<line_sep>text=text.replace(" " f" {SPACE} ")<line_sep><return>text<block_end><def_stmt>detokenize_general text<block_start>text=text.replace(f" {SPACE} " " ")<line_sep><return>text<block_end><def_stmt>pretokenize_json value<block_start><if_stmt>isinstance(value str)<or>isinstance(value bool)<or>isinstance(value int)<block_start><return>str(value)<block_end><raise>ValueError("Unsupported json value type")<block_end><def_stmt>pretokenize_english text<block_start>text=pretokenize_general(text)<for_stmt>p ENGLISH_PUNCTUATION<block_start>text=text.replace(p f" {p} ")<block_end>text=re.sub(r'\s*$' '' text)<line_sep><return>text<block_end><def_stmt>detokenize_english text<block_start>text=detokenize_general(text)<for_stmt>p ENGLISH_PUNCTUATION<block_start>text=text.replace(f" {p} " p)<block_end><return>text<block_end><def_stmt>bytes_to_string p<block_start><if_stmt>len(p)<eq>0<block_start><return>""<block_end>decode_utf8=np.vectorize(<lambda>v:v.decode("utf-8"))<line_sep>p=decode_utf8(p)<line_sep>s=''.join(p)<line_sep><return>s<block_end># -------------------------------------------------------------------------- # Vocab # -------------------------------------------------------------------------- <class_stmt>Vocab(object)<block_start><def_stmt>__init__ self table:List[str]<block_start>self.table=table<block_end><def_stmt>__contains__ self value<block_start><return>value<in>self.table<block_end><def_stmt>__iter__ self<block_start><return>iter(self.table)<block_end><def_stmt>__len__ self<block_start><return>len(self.table)<block_end># -------------------------------------------------------------------------- # <def_stmt>lookup self value<block_start><try_stmt><block_start><return>self.table.index(value)<block_end><except_stmt>ValueError<block_start><return>UNK_ID<block_end><block_end><def_stmt>inverse_lookup self value<block_start><try_stmt><block_start><return>self.table[value]<block_end><except_stmt>IndexError<block_start><return>UNK<block_end><block_end><def_stmt>ids_to_string self line output_as_array=<false><block_start>d=[self.inverse_lookup(i)<for>i line]<if_stmt>output_as_array<block_start><return>d<block_end><else_stmt><block_start><return>' '.join(d)<block_end><block_end><def_stmt>string_to_ids self line<block_start><return>[self.lookup(i)<for>i line.split(' ')]<block_end><def_stmt>expand_unknowns self line<block_start>unknowns=set(line.split(' '))<line_sep>unknowns<augsub>set(self.table)<line_sep>unknowns<augsub>set([''])<for_stmt>t unknowns<block_start>spaced=''.join([f"<{c}> "<for>c t])<line_sep>line=line.replace(t spaced)<block_end><return>line<block_end><def_stmt>english_to_ids self line# TODO: Make greedy w.r.t. tokens with spaces in them <block_start>line=pretokenize_english(line)<line_sep>line=self.expand_unknowns(line)<line_sep>line=self.string_to_ids(line)<line_sep><return>line<block_end><def_stmt>ids_to_english self line<block_start>line=self.ids_to_string(line)<line_sep>line=detokenize_english(line)<line_sep><return>line<block_end><def_stmt>prediction_value_to_string self v output_as_array=<false><block_start>"""Rough 'n' ready get me the hell outta here fn. Tries its best to deal with the mess of datatypes that end up coming out"""<if_stmt>isinstance(v np.int64)<block_start>s=self.inverse_lookup(v)<block_end><elif_stmt>isinstance(v np.ndarray)<block_start><if_stmt>v.dtype<eq>np.int64<block_start>s=self.ids_to_string(v output_as_array)<block_end><elif_stmt>v.dtype<eq>object<block_start>s=bytes_to_string(v)<block_end><else_stmt><block_start><raise>ValueError()<block_end><block_end><else_stmt><block_start><raise>ValueError()<block_end><return>s<block_end><def_stmt>save self args<block_start><with_stmt>tf.gfile.GFile(args["vocab_path"] 'w')<as>out_file<block_start><for_stmt>i self.table<block_start>out_file.write(i+"\n")<block_end><block_end><block_end># -------------------------------------------------------------------------- # Make me a vocab! # -------------------------------------------------------------------------- @classmethod<def_stmt>load cls path size<block_start>tokens=list()<with_stmt>tf.gfile.GFile(path)<as>file<block_start><for_stmt>line file.readlines()<block_start>tokens.append(line.replace("\n" ""))<if_stmt>len(tokens)<eq>size<block_start><break><block_end><block_end><block_end><assert_stmt>len(tokens)<eq>len(set(tokens)) f"Duplicate lines in {path}"<line_sep><return>Vocab(tokens)<block_end>@classmethod<def_stmt>load_from_args cls args<block_start><return>Vocab.load(args["vocab_path"] args["vocab_size"])<block_end>@classmethod<def_stmt>build cls args gqa_to_tokens limit=<none><block_start>hits=Counter()<def_stmt>add tokens:List[str]<block_start><for_stmt>token tokens<block_start><if_stmt>token<not><in>["" " " "\n"]<block_start>hits[token]<augadd>1<block_end><block_end><block_end><for_stmt>i tqdm(read_gqa(args limit=limit) total=limit)<block_start>add(gqa_to_tokens(i))<block_end>tokens=list()<line_sep>tokens.extend(SPECIAL_TOKENS)<for_stmt>i,c hits.most_common(args["vocab_size"])<block_start><if_stmt>len(tokens)<eq>args["vocab_size"]<block_start><break><block_end><if_stmt>i<not><in>tokens<block_start>tokens.append(i)<block_end><block_end><assert_stmt>len(tokens)<le>args["vocab_size"]<line_sep>v=Vocab(tokens)<line_sep>v.save(args)<line_sep><return>v<block_end><block_end>
<import_from_future_stmt> absolute_import unicode_literals<import_from_stmt>datetime datetime timedelta<import_from_stmt>io BytesIO<import_from_stmt>stravalib model exc attributes unithelper<as>uh<import_from_stmt>stravalib.client Client<import_from_stmt>stravalib.tests.functional FunctionalTestBase<class_stmt>ClientWriteTest(FunctionalTestBase)<block_start><def_stmt>test_create_activity self<block_start>""" Test Client.create_activity simple case. """<line_sep>now=datetime.now().replace(microsecond=0)<line_sep>a=self.client.create_activity("test_create_activity#simple" activity_type=model.Activity.RIDE start_date_local=now elapsed_time=timedelta(hours=3 minutes=4 seconds=5) distance=uh.miles(15.2))<line_sep>print(a)<line_sep>self.assertIsInstance(a model.Activity)<line_sep>self.assertEquals("test_create_activity#simple" a.name)<line_sep>self.assertEquals(now a.start_date_local)<line_sep>self.assertEquals(round(float(uh.miles(15.2)) 2) round(float(uh.miles(a.distance)) 2))<line_sep>self.assertEquals(timedelta(hours=3 minutes=4 seconds=5) a.elapsed_time)<block_end><def_stmt>test_update_activity self<block_start>""" Test Client.update_activity simple case. """<line_sep>now=datetime.now().replace(microsecond=0)<line_sep>a=self.client.create_activity("test_update_activity#create" activity_type=model.Activity.RIDE start_date_local=now elapsed_time=timedelta(hours=3 minutes=4 seconds=5) distance=uh.miles(15.2))<line_sep>self.assertIsInstance(a model.Activity)<line_sep>self.assertEquals("test_update_activity#create" a.name)<line_sep>update1=self.client.update_activity(a.id name="test_update_activivty#update")<line_sep>self.assertEquals("test_update_activivty#update" update1.name)<line_sep>self.assertFalse(update1.private)<line_sep>self.assertFalse(update1.trainer)<line_sep>self.assertFalse(update1.commute)<line_sep>update2=self.client.update_activity(a.id private=<true>)<line_sep>self.assertTrue(update2.private)<line_sep>update3=self.client.update_activity(a.id trainer=<true>)<line_sep>self.assertTrue(update3.private)<line_sep>self.assertTrue(update3.trainer)<block_end><def_stmt>test_upload_activity self<block_start>""" Test uploading an activity. NOTE: This requires clearing out the uploaded activities from configured writable Strava acct. """<with_stmt>open(os.path.join(RESOURCES_DIR 'sample.tcx'))<as>fp<block_start>uploader=self.client.upload_activity(fp data_type='tcx')<line_sep>self.assertTrue(uploader.is_processing)<line_sep>a=uploader.wait()<line_sep>self.assertTrue(uploader.is_complete)<line_sep>self.assertIsInstance(a model.Activity)<line_sep>self.assertEquals("02/21/2009 Leiden, ZH, The Netherlands" a.name)<line_sep># And we'll get an error if we try the same file again <with_stmt>self.assertRaises(exc.ActivityUploadFailed)<block_start>self.client.upload_activity(fp data_type='tcx')<block_end><block_end><block_end><block_end>
<import_stmt>numpy<as>np<import_from_stmt>prml.nn.array.ones ones<import_from_stmt>prml.nn.array.zeros zeros<import_from_stmt>prml.nn.config config<import_from_stmt>prml.nn.function Function<import_from_stmt>prml.nn.network Network<class_stmt>BatchNormalizationFunction(Function)<block_start><def_stmt>_forward self x<block_start>self.mean=x.mean(axis=0)<line_sep>self.xc=x-self.mean<line_sep>self.var=np.mean(self.xc<power>2 axis=0)<line_sep>self.std=np.sqrt(self.var+1e-7)<line_sep><return>self.xc/self.std<block_end><def_stmt>_backward self delta x# dstd = -np.mean((delta * self.xc) / (self.std ** 2), axis=0) <block_start>dxc=delta/self.std-self.xc<times>np.mean((delta<times>self.xc)/(self.std<power>3) axis=0)<line_sep><return>dxc-np.mean(dxc axis=0)<line_sep># dstd = -np.mean((delta * self.xc) / (self.std ** 2), axis=0) # dxc = delta / self.std + self.xc * dstd / self.std # return dxc - np.mean(dxc, axis=0) # dxn = delta # dxc = dxn / self.std # dstd = -np.sum((dxn * self.xc) / (self.std ** 2), axis=0) # dvar = 0.5 * dstd / self.std # dxc += 2.0 * self.xc * dvar / delta.shape[0] # dmu = np.sum(dxc, axis=0) # dx = dxc - dmu / delta.shape[0] # return dx <block_end><block_end><class_stmt>BatchNormalization(Network)<block_start><def_stmt>__init__ self ndim scale=<none> bias=<none> momentum=0.9<block_start>super().__init__()<line_sep>self.momentum=momentum<with_stmt>self.set_parameter()<block_start>self.mean=zeros(ndim)<line_sep>self.var=ones(ndim)<block_end><block_end><def_stmt>__call__ self x<block_start>shape=x.shape<line_sep>x=x.reshape(-1 x.shape[-1])<if_stmt>config.is_updating_bn<block_start>func=BatchNormalizationFunction()<line_sep>out=func.forward(x)<line_sep>self.mean.value=self.momentum<times>self.mean.value+(1-self.momentum)<times>func.mean<line_sep>self.var.value=self.momentum<times>self.var.value+(1-self.momentum)<times>func.var<del_stmt>func.mean<del_stmt>func.var<block_end><else_stmt><block_start>xc=x-self.mean<line_sep>out=xc/np.sqrt(self.var.value+1e-7)<block_end><return>out.reshape(*shape)<block_end><block_end>
# # Copyright (C) [2020] Futurewei Technologies, Inc. # # FORCE-RISCV is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES # OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. # See the License for the specific language governing permissions and # limitations under the License. # <import_from_stmt>common.path_utils PathUtils<import_from_stmt>common.msg_utils Msg<import_from_stmt>common.sys_utils SysUtils<import_from_stmt>common.errors *<import_from_stmt>executors.app_executor *<import_from_stmt>classes.control_item ControlItem CtrlItmKeys<class_stmt>FrunToCtrlExecutor(AppExecutor)<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.mFrunToCtrlCmd=<none><line_sep>self.log=<none><line_sep>self.elog=<none><block_end><def_stmt>load self aCtrlItem<block_start>super().load(aCtrlItem)<line_sep>self.mFrunToCtrlCmd=self.ctrl_item.fruntoctrl.get("path" <none>)<block_end><def_stmt>skip self<block_start><if_stmt><not>self.ctrl_item.fruntoctrl.get("run" <false>)<block_start>Msg.user("[FrunToCtrlExecutor::skip] skipping - run is not True...")<line_sep><return><true><block_end>Msg.user("[FrunToCtrlExecutor::skip] not skipping")<line_sep><return><false><block_end><def_stmt>execute self<block_start><if_stmt><not>PathUtils.check_file("./_def_frun.py")<block_start>Msg.user("[FrunToCtrlExecutor::skip] skipping - no _def_frun.py found")<line_sep><return><true><block_end>my_cmd=self.mFrunToCtrlCmd<if_stmt>my_cmd<is><none><block_start>Msg.user("[FrunToCtrlExecutor::skip] skipping - no path was given")<line_sep><return><true><block_end>Msg.user("FrunToCtrlCommand = "+str({"frun-to-ctrl-cmd":my_cmd}))<line_sep>Msg.flush()<line_sep>self.log="frun_to_ctrl_result.log"<line_sep>self.elog="frun_to_ctrl_result.err"<line_sep>my_result=SysUtils.exec_process(my_cmd self.log self.elog self.ctrl_item.timeout <true>)<line_sep>Msg.user("FrunToCtrlResult = "+str(my_result))<line_sep>Msg.flush()<line_sep><return>SysUtils.success(int(my_result[0]))<block_end><block_end>
# Generated by Django 2.2.1 on 2019-07-19 10:12 <import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<import_stmt>django.utils.timezone<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('app' '0001_initial') ]<line_sep>operations=[migrations.CreateModel(name='Process' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('name' models.CharField(default='detection' max_length=50)) ('created_at' models.DateTimeField(default=django.utils.timezone.now)) ] options={'db_table':'processes' } ) migrations.CreateModel(name='ProcessStatus' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('name' models.CharField(default='Not Started' max_length=100)) ('created_at' models.DateTimeField(default=django.utils.timezone.now)) ] options={'db_table':'process_statuses' } ) migrations.CreateModel(name='Experiment' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('results_path' models.TextField(default='')) ('created_at' models.DateTimeField(default=django.utils.timezone.now)) ('dataset' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='app.Dataset')) ('process' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='app.Process')) ('process_status' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='app.ProcessStatus')) ] options={'db_table':'experiments' } ) ]<block_end>
<import_stmt>torch<import_stmt>numpy<as>np<import_from_stmt>tqdm tqdm<import_from_stmt>typing Union List Tuple Any Dict<import_from_stmt>easydict EasyDict<import_from_stmt>.dataset preprocess InferenceDataset InferenceDatasetWithKeypoints<import_from_stmt>.network build_spin<import_from_stmt>.. BasePose3dRunner BasePose3dRefiner ACTIONS<import_from_stmt>iPERCore.tools.human_digitalizer.bodynets SMPL<import_from_stmt>iPERCore.tools.utils.dataloaders build_inference_loader<import_from_stmt>iPERCore.tools.utils.geometry.boxes cal_head_bbox<import_from_stmt>iPERCore.tools.utils.geometry.cam_pose_utils cam_init2orig cam_norm<import_from_stmt>iPERCore.tools.utils.filesio.persistence load_toml_file<line_sep>__all__=["SPINRunner"]<class_stmt>SPINRunner(BasePose3dRunner)<block_start><def_stmt>__init__ self cfg_or_path:Union[EasyDict str] device=torch.device("cpu")<block_start>""" Args: cfg_or_path (EasyDict or str): the configuration EasyDict or the cfg_path with `toml` file. If it is an EasyDict instance, it must contains the followings, --ckpt_path (str): the path of the pre-trained checkpoints; --smpl_path (str): the path of the smpl model; --smpl_mean_params (str): the path of the mean parameters of SMPL. Otherwise if it is a `toml` file, an example could be the followings, ckpt_path = "./assets/pretrains/spin_ckpt.pth" smpl_path = "./assets/pretrains/smpl_model.pkl" smpl_mean_params = "./assets/pretrains/smpl_mean_params.npz" device (torch.device): """<line_sep>self.device=device<line_sep># RGB self.MEAN=torch.as_tensor([0.485 0.456 0.406])[<none> : <none> <none>].to(self.device)<line_sep>self.STD=torch.as_tensor([0.229 0.224 0.225])[<none> : <none> <none>].to(self.device)<if_stmt>isinstance(cfg_or_path str)<block_start>cfg=EasyDict(load_toml_file(cfg_or_path))<block_end><else_stmt><block_start>cfg=cfg_or_path<block_end>self.model=build_spin(pretrained=<false>)<line_sep>checkpoint=torch.load(cfg["ckpt_path"])<line_sep>self.model.load_state_dict(checkpoint strict=<true>)<line_sep>self.model.eval()<line_sep>self._smpl=SMPL(cfg["smpl_path"]).to(self.device)<line_sep>self.model=self.model.to(self.device)<block_end><def_stmt>__call__ self image:np.ndarray boxes:Union[np.ndarray List Tuple Any] action:ACTIONS=ACTIONS.SPLIT<arrow>Dict[str Any]<block_start>""" Args: image (np.ndarray): (H, W, C), color intensity [0, 255] with BGR color channel; boxes (np.ndarray or List, or Tuple or None): (N, 4) action: -- 0: only return `cams`, `pose` and `shape` of SMPL; -- 1: return `cams`, `pose`, `shape` and `verts`. -- 2: return `cams`, `pose`, `shape`, `verts`, `j2d` and `j3d`. Returns: result (dict): """<line_sep>image=np.copy(image)<line_sep>proc_img,proc_info=preprocess(image boxes)<line_sep>proc_img=torch.tensor(proc_img).to(device=self.device)[<none>]<with_stmt>torch.no_grad()<block_start>proc_img=(proc_img-self.MEAN)/self.STD<line_sep>smpls=self.model(proc_img)<line_sep>cams_orig=cam_init2orig(smpls[: 0:3] proc_info["scale"] torch.tensor(proc_info["start_pt"] device=self.device).float())<line_sep>cams=cam_norm(cams_orig proc_info["im_shape"][0])<line_sep>smpls[: 0:3]=cams<if_stmt>action<eq>ACTIONS.SPLIT<block_start>result=self.body_model.split(smpls)<block_end><elif_stmt>action<eq>ACTIONS.SKIN<block_start>result=self.body_model.skinning(smpls)<block_end><elif_stmt>action<eq>ACTIONS.SMPL<block_start>result={"theta":smpls}<block_end><else_stmt><block_start>result=self.body_model.get_details(smpls)<block_end>result["proc_info"]=proc_info<block_end><return>result<block_end><def_stmt>run_with_smplify self image_paths:List[str] boxes:List[Union[List Tuple np.ndarray]] keypoints_info:Dict smplify_runner:BasePose3dRefiner batch_size:int=16 num_workers:int=4 filter_invalid:bool=<true> temporal:bool=<true><block_start>""" Args: image_paths (list of str): the image paths; boxes (list of Union[np.np.ndarray, list, tuple)): the bounding boxes of each image; keypoints_info (Dict): the keypoints information of each image; smplify_runner (BasePose3dRefiner): the simplify instance, it must contains the keypoint_formater; batch_size (int): the mini-batch size; num_workers (int): the number of processes; filter_invalid (bool): the flag to control whether filter invalid frames or not; temporal (bool): use temporal smooth optimization or not. Returns: smpl_infos (dict): the estimated smpl infomations, it contains, --all_init_smpls (torch.Tensor): (num, 85), the initialized smpls; --all_opt_smpls (torch.Tensor): (num, 85), the optimized smpls; --all_valid_ids (torch.Tensor): (num of valid frames,), the valid indexes. """<def_stmt>head_is_valid head_boxes<block_start><return>(head_boxes[: 1]-head_boxes[: 0])<times>(head_boxes[: 3]-head_boxes[: 2])<g>10<times>10<block_end>dataset=InferenceDatasetWithKeypoints(image_paths boxes keypoints_info smplify_runner.keypoint_formater image_size=224 temporal=temporal)<line_sep>data_loader=build_inference_loader(dataset batch_size=batch_size num_workers=num_workers)<line_sep>""" sample (dict): the sample information, it contains, --image (torch.Tensor): (3, 224, 224) is the cropped image range of [0, 1] and normalized by MEAN and STD, RGB channel; --orig_image (torch.Tensor): (3, height, width) is the in rage of [0, 1], RGB channel; --im_shape (torch.Tensor): (height, width) --keypoints (dict): (num_joints, 3), and num_joints could be [75,]. --center (torch.Tensor): (2,); --start_pt (torch.Tensor): (2,); --scale (torch.Tensor): (1,); --img_path (str): the image path. """<line_sep>all_init_smpls=[]<line_sep>all_opt_smpls=[]<line_sep>all_pose3d_img_ids=[]<for_stmt>sample tqdm(data_loader)<block_start>images=sample["image"].to(self.device)<line_sep>start_pt=sample["start_pt"].to(self.device)<line_sep>scale=sample["scale"][: <none>].to(self.device).float()<line_sep>im_shape=sample["im_shape"][: 0:1].to(self.device)<line_sep>keypoints_info=sample["keypoints"].to(self.device)<line_sep>img_ids=sample["img_id"]<with_stmt>torch.no_grad()<block_start>init_smpls=self.model(images)<block_end>cams_orig=cam_init2orig(init_smpls[: 0:3] scale start_pt)<line_sep>cams=cam_norm(cams_orig im_shape)<line_sep>init_smpls[: 0:3]=cams<line_sep>smplify_results=smplify_runner(keypoints_info cams init_smpls[: -10:] init_smpls[: 3:-10] proc_kps=<false> temporal=temporal)<line_sep>opt_smpls=torch.cat([cams smplify_results["new_opt_pose"] smplify_results["new_opt_betas"]] dim=1)<if_stmt>filter_invalid<block_start>opt_smpls_info=self.get_details(opt_smpls)<line_sep>head_boxes=cal_head_bbox(opt_smpls_info["j2d"] image_size=512)<line_sep>valid=head_is_valid(head_boxes).nonzero(as_tuple=<false>)<line_sep>valid.squeeze_(-1)<line_sep>img_ids=img_ids[valid]<block_end>all_init_smpls.append(init_smpls.cpu())<line_sep>all_opt_smpls.append(opt_smpls.cpu())<line_sep>all_pose3d_img_ids.append(img_ids.cpu())<block_end>all_init_smpls=torch.cat(all_init_smpls dim=0)<line_sep>all_opt_smpls=torch.cat(all_opt_smpls dim=0)<line_sep>all_valid_ids=torch.cat(all_pose3d_img_ids dim=0)<line_sep>smpl_infos={"all_init_smpls":all_init_smpls "all_opt_smpls":all_opt_smpls "all_valid_ids":all_valid_ids}<line_sep><return>smpl_infos<block_end><def_stmt>run self image_paths:List[str] boxes:List[List] batch_size:int=16 num_workers:int=4 filter_invalid:bool=<true> temporal:bool=<true><block_start>""" Args: image_paths (list of str): the image paths; boxes (list of list): the bounding boxes of each image; batch_size (int): the mini-batch size; num_workers (int): the number of processes; filter_invalid (bool): the flag to control whether filter invalid frames or not; temporal (bool): use temporal smooth optimization or not. Returns: smpl_infos (dict): the estimated smpl infomations, it contains, --all_init_smpls (torch.Tensor): (num, 85), the initialized smpls; --all_opt_smpls (torch.Tensor): None --all_valid_ids (torch.Tensor): (num of valid frames,), the valid indexes. """<def_stmt>head_is_valid head_boxes<block_start><return>(head_boxes[: 1]-head_boxes[: 0])<times>(head_boxes[: 3]-head_boxes[: 2])<g>10<times>10<block_end>dataset=InferenceDataset(image_paths boxes image_size=224)<line_sep>data_loader=build_inference_loader(dataset batch_size=batch_size num_workers=num_workers)<line_sep>""" sample (dict): the sample information, it contains, --image (torch.Tensor): (3, 224, 224) is the cropped image range of [0, 1] and normalized by MEAN and STD, RGB channel; --orig_image (torch.Tensor): (3, height, width) is the in rage of [0, 1], RGB channel; --im_shape (torch.Tensor): (height, width) --keypoints (dict): (num_joints, 3), and num_joints could be [75,]. --center (torch.Tensor): (2,); --start_pt (torch.Tensor): (2,); --scale (torch.Tensor): (1,); --img_path (str): the image path. """<line_sep>all_init_smpls=[]<line_sep>all_pose3d_img_ids=[]<for_stmt>sample tqdm(data_loader)<block_start>images=sample["image"].to(self.device)<line_sep>start_pt=sample["start_pt"].to(self.device)<line_sep>scale=sample["scale"][: <none>].to(self.device).float()<line_sep>im_shape=sample["im_shape"][: 0:1].to(self.device)<line_sep>img_ids=sample["img_id"]<with_stmt>torch.no_grad()<block_start>init_smpls=self.model(images)<block_end>cams_orig=cam_init2orig(init_smpls[: 0:3] scale start_pt)<line_sep>cams=cam_norm(cams_orig im_shape)<line_sep>init_smpls[: 0:3]=cams<if_stmt>filter_invalid<block_start>init_smpls_info=self.get_details(init_smpls)<line_sep>head_boxes=cal_head_bbox(init_smpls_info["j2d"] image_size=512)<line_sep>valid=head_is_valid(head_boxes).nonzero(as_tuple=<false>)<line_sep>valid.squeeze_(-1)<line_sep>img_ids=img_ids[valid]<block_end>all_init_smpls.append(init_smpls.cpu())<line_sep>all_pose3d_img_ids.append(img_ids.cpu())<block_end>all_init_smpls=torch.cat(all_init_smpls dim=0)<line_sep>all_valid_ids=torch.cat(all_pose3d_img_ids dim=0)<line_sep>smpl_infos={"all_init_smpls":all_init_smpls "all_opt_smpls":<none> "all_valid_ids":all_valid_ids}<line_sep><return>smpl_infos<block_end><def_stmt>get_details self smpls<block_start><return>self._smpl.get_details(smpls)<block_end>@property<def_stmt>mean_theta self<block_start>mean_cam=self.model.init_cam<line_sep>mean_pose=self.model.init_pose<line_sep>mean_shape=self.model.init_shape<line_sep>mean_theta=torch.cat([mean_cam mean_pose mean_shape] dim=-1)[0]<line_sep><return>mean_theta<block_end>@property<def_stmt>body_model self<block_start><return>self._smpl<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>logging<if_stmt>__name__<eq>'__main__'<block_start>logging.basicConfig()<block_end>_log=logging.getLogger(__name__)<import_stmt>pyxb.binding.generate<import_stmt>pyxb.binding.datatypes<as>xs<import_stmt>pyxb.binding.basis<import_stmt>pyxb.utils.domutils<import_stmt>os.path<line_sep>xsd='''<?xml version="1.0" encoding="UTF-8"?> <xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"> <xs:complexType name="tDescription" mixed="true"> <xs:sequence> <xs:element ref="sub-description" minOccurs="0"/> </xs:sequence> </xs:complexType> <xs:element name="sub-description" type="xs:string"/> <xs:element name="description" type="tDescription"/> </xs:schema>'''<line_sep>code=pyxb.binding.generate.GeneratePython(schema_text=xsd)<line_sep>#open('code.py', 'w').write(code) #print code rv=compile(code 'test' 'exec')<line_sep>eval(rv)<import_from_stmt>pyxb.exceptions_ *<import_stmt>unittest<class_stmt>TestTrac_200907231924(unittest.TestCase)# This verifies that we do not improperly interpret non-element # content as being the content of a nested element. <block_start><def_stmt>testSub self<block_start>xml='<sub-description>Floor</sub-description>'<line_sep>instance=CreateFromDocument(xml)<line_sep>self.assertEqual(instance 'Floor')<block_end><def_stmt>testMain self<block_start>xml='<description>Main Office</description>'<line_sep>instance=CreateFromDocument(xml)<line_sep>self.assertEqual(1 len(instance.orderedContent()))<line_sep>self.assertTrue(instance.sub_description<is><none>)<line_sep>self.assertEqual(instance.orderedContent()[0].value 'Main Office')<block_end><def_stmt>testMainSub self<block_start>xml='<description>Main Office<sub-description>Floor</sub-description>State</description>'<line_sep>instance=CreateFromDocument(xml)<line_sep>self.assertTrue(instance.sub_description<is><not><none>)<line_sep>self.assertEqual(instance.sub_description 'Floor')<line_sep>self.assertEqual(3 len(instance.orderedContent()))<line_sep>self.assertEqual(instance.orderedContent()[0].value 'Main Office')<line_sep>self.assertEqual(instance.orderedContent()[2].value 'State')<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>pytest<import_stmt>sqlite3<import_from_stmt>unittest.mock call Mock<import_from_stmt>allennlp.common.testing AllenNlpTestCase<import_from_stmt>scripts.ai2_internal.resume_daemon BeakerStatus create_table handler logger resume start_autoresume <line_sep># Don't spam the log in tests. logger.removeHandler(handler)<class_stmt>ResumeDaemonTest(AllenNlpTestCase)<block_start><def_stmt>setup_method self<block_start>super().setup_method()<line_sep>self.connection=sqlite3.connect(":memory:")<line_sep>create_table(self.connection)<block_end><def_stmt>test_create_beaker_status_works self<block_start>status=BeakerStatus("stopped")<assert_stmt>status.name<eq>"stopped"<block_end><def_stmt>test_create_beaker_status_throws self<block_start><with_stmt>pytest.raises(ValueError)<block_start>status=BeakerStatus("garbage")<assert_stmt>status.name<eq>"garbage"<block_end><block_end><def_stmt>test_does_nothing_on_empty_db self<block_start>beaker=Mock()<line_sep>resume(self.connection beaker)<assert_stmt><not>beaker.method_calls<block_end><def_stmt>test_does_not_resume_a_running_experiment self<block_start>beaker=Mock()<line_sep>experiment_id="foo"<line_sep>start_autoresume(self.connection experiment_id 5)<line_sep>beaker.get_status.return_value=BeakerStatus.running<line_sep>resume(self.connection beaker)<line_sep>beaker.get_status.assert_called()<assert_stmt>len(beaker.method_calls)<eq>1<block_end><def_stmt>test_does_not_resume_a_finished_experiment self<block_start>beaker=Mock()<line_sep>experiment_id="foo"<line_sep>start_autoresume(self.connection experiment_id 5)<line_sep>beaker.get_status.return_value=BeakerStatus.succeeded<line_sep>resume(self.connection beaker)<line_sep>beaker.get_status.assert_called()<assert_stmt>len(beaker.method_calls)<eq>1<block_end><def_stmt>test_does_resume_a_preempted_experiment self<block_start>beaker=Mock()<line_sep>experiment_id="foo"<line_sep>start_autoresume(self.connection experiment_id 5)<line_sep>beaker.get_status.return_value=BeakerStatus.preempted<line_sep>beaker.resume.return_value="foo2"<line_sep>resume(self.connection beaker)<line_sep>beaker.get_status.assert_called()<line_sep>beaker.resume.assert_called()<assert_stmt>len(beaker.method_calls)<eq>2<block_end><def_stmt>test_respects_upper_bound_on_resumes self<block_start>beaker=Mock()<line_sep>experiment_id="foo"<line_sep>start_autoresume(self.connection experiment_id 5)<line_sep>beaker.get_status.return_value=BeakerStatus.preempted<for_stmt>i range(10)<block_start>beaker.resume.return_value=f"foo{i}"<line_sep>resume(self.connection beaker)<block_end>calls=[call.get_status("foo") call.resume("foo") call.get_status("foo0") call.resume("foo0") call.get_status("foo1") call.resume("foo1") call.get_status("foo2") call.resume("foo2") call.get_status("foo3") call.resume("foo3") call.get_status("foo4") ]<line_sep>beaker.assert_has_calls(calls)<block_end><def_stmt>test_handles_a_realistic_scenario self<block_start>beaker=Mock()<line_sep>experiment_id="foo"<line_sep>start_autoresume(self.connection experiment_id 5)<line_sep>beaker.get_status.return_value=BeakerStatus.preempted<for_stmt>i range(10)<block_start>beaker.resume.return_value=f"foo{i}"<if_stmt>i<eq>2<block_start>beaker.get_status.return_value=BeakerStatus.succeeded<block_end>resume(self.connection beaker)<block_end>calls=[call.get_status("foo") call.resume("foo") call.get_status("foo0") call.resume("foo0") call.get_status("foo1") ]<line_sep>beaker.assert_has_calls(calls)<block_end><block_end>
""" Useful geometric operations, e.g. Orthographic projection and a differentiable Rodrigues formula Parts of the code are taken from https://github.com/MandyMo/pytorch_HMR """<import_stmt>torch<def_stmt>rodrigues theta<block_start>"""Convert axis-angle representation to rotation matrix. Args: theta: size = [B, 3] Returns: Rotation matrix corresponding to the quaternion -- size = [B, 3, 3] """<line_sep>l1norm=torch.norm(theta+1e-8 p=2 dim=1)<line_sep>angle=torch.unsqueeze(l1norm -1)<line_sep>normalized=torch.div(theta angle)<line_sep>angle=angle<times>0.5<line_sep>v_cos=torch.cos(angle)<line_sep>v_sin=torch.sin(angle)<line_sep>quat=torch.cat([v_cos v_sin<times>normalized] dim=1)<line_sep><return>quat2mat(quat)<block_end><def_stmt>quat2mat quat<block_start>"""Convert quaternion coefficients to rotation matrix. Args: quat: size = [B, 4] 4 <===>(w, x, y, z) Returns: Rotation matrix corresponding to the quaternion -- size = [B, 3, 3] """<line_sep>norm_quat=quat<line_sep>norm_quat=norm_quat/norm_quat.norm(p=2 dim=1 keepdim=<true>)<line_sep>w,x,y,z=norm_quat[: 0] norm_quat[: 1] norm_quat[: 2] norm_quat[: 3]<line_sep>B=quat.size(0)<line_sep>w2,x2,y2,z2=w.pow(2) x.pow(2) y.pow(2) z.pow(2)<line_sep>wx,wy,wz=w<times>x w<times>y w<times>z<line_sep>xy,xz,yz=x<times>y x<times>z y<times>z<line_sep>rotMat=torch.stack([w2+x2-y2-z2 2<times>xy-2<times>wz 2<times>wy+2<times>xz 2<times>wz+2<times>xy w2-x2+y2-z2 2<times>yz-2<times>wx 2<times>xz-2<times>wy 2<times>wx+2<times>yz w2-x2-y2+z2] dim=1).view(B 3 3)<line_sep><return>rotMat<block_end><def_stmt>orthographic_projection X camera<block_start>"""Perform orthographic projection of 3D points X using the camera parameters Args: X: size = [B, N, 3] camera: size = [B, 3] Returns: Projected 2D points -- size = [B, N, 2] """<line_sep>camera=camera.view(-1 1 3)<line_sep>X_trans=X[: : :2]+camera[: : 1:]<line_sep>shape=X_trans.shape<line_sep>X_2d=(camera[: : 0]<times>X_trans.view(shape[0] -1)).view(shape)<line_sep><return>X_2d<block_end>
""" Examples -------- Convert existing jupyter notebook to an airbnb knowledge repo format - python convert_to_knowledge_repo.py --ml_repo . --knowledge_repo knowledge-repo Deploying the webapp - knowledge_repo --repo knowledge-repo deploy """<import_stmt>os<import_stmt>re<import_stmt>json<import_stmt>subprocess<import_from_stmt>dateutil parser<as>date_parser<def_stmt>main ml_repo knowledge_repo inplace<block_start>ml_repo_path=os.path.abspath(ml_repo)<line_sep>knowledge_repo_path=os.path.abspath(knowledge_repo)<if_stmt><not>os.path.isdir(knowledge_repo_path)<block_start>init_knowledge_repo(knowledge_repo_path)<block_end>convert_all_posts(ml_repo_path knowledge_repo_path inplace)<block_end><def_stmt>init_knowledge_repo path<block_start>cmd='knowledge_repo --repo {} init'.format(path)<line_sep>subprocess.call(cmd shell=<true>)<block_end><def_stmt>convert_all_posts path knowledge_repo_path inplace<block_start>"""Recursive walk down all directory to perform the conversion"""<if_stmt>os.path.isdir(path)<block_start>files=[os.path.join(path f)<for>f os.listdir(path)]<for_stmt>f files<block_start>convert_all_posts(f knowledge_repo_path inplace)<block_end><block_end><elif_stmt>'-converted'<not><in>path<block_start>head,ext=os.path.splitext(path)<if_stmt>ext<eq>".ipynb"<block_start><try_stmt><block_start>converter=IpynbConverter(knowledge_repo_path inplace)<line_sep>notebook=converter.convert(path)<line_sep>converter.add(notebook)<block_end><except_stmt>Exception<as>e<block_start>print('Skipping: {}'.format(path))<line_sep>print(e)<block_end><block_end><block_end><block_end><class_stmt>IpynbConverter<block_start>""" Converts Jupyter notebook to airbnb knowledge repo format [1]_. Parameters ---------- knowledge_repo_path : str Path to store the airbnb knowledge repo-ed notebook. inplace : bool Whether to perform the conversion inplace or not. If false, then it will create a new notebook that has the '-converted' appended to the file name. Attributes ---------- date_created_ : str Input notebook's creation date. date_updated_ : str Input notebook's latest updated date. tags_ : str The notebook's filename is use as the tag in this automated conversion process. e.g. /Users/ethen/machine-learning/trees/decision_tree.ipynb, we would use 'decision_tree' as the tag. github_link_ : str Notebook's original link on github. title_ : str Notebook's title, uses the first level 1 markdown header that's not 'Table of Contents' that could be automatically generated by newer version of notebook. e.g. # Decision Tree (Classification)\n, then Decision Tree (Classification) would be our title. References ---------- .. [1] `Airbnb knowledge repo <https://github.com/airbnb/knowledge-repo>`_ """<line_sep>AUTHOR='<NAME>'<line_sep>DATE_FORMAT='%Y-%m-%d'<line_sep>REPO_NAME='machine-learning'<line_sep>BASE_URL='https://github.com/ethen8181/'<def_stmt>__init__ self knowledge_repo_path inplace<block_start>self.inplace=inplace<line_sep>self.knowledge_repo_path=knowledge_repo_path<block_end><def_stmt>convert self path<block_start>""" Convert the input path's notebook to a knowledge repo. This will add a mandatory raw cell that contains the yaml information needed by the knowledge repo and an additional cell that contains link to the notebook on github. Parameters ---------- path : str Path that has the '.ipynb' extension. Returns ------- notebook : dict Updated Jupyter notebook's raw json represented in dictionary format. Ready to be passed to the .add method to add to the knowledge repo. """<line_sep>self.date_created_=self._date_created(path)<line_sep>self.date_updated_=self._date_updated(path)<line_sep>self.tags_,self.github_link_=self._tags_and_github_link(path)<with_stmt>open(path encoding='utf-8')<as>f<block_start>notebook=json.load(f)<block_end>self.title_=self._title(notebook)<line_sep># prepend the dictionary header to notebook['cells'] notebook['cells']=([self._construct_header()]+[self._construct_github_link_cell()]+notebook['cells'])<if_stmt><not>self.inplace<block_start>head,ext=os.path.splitext(path)<line_sep>head<augadd>'-converted'<line_sep>path=head+ext<block_end>self._path=path<line_sep><return>notebook<block_end><def_stmt>_date_created self path<block_start>"""Grab the date of creation through git log."""<line_sep>cmd='git log --diff-filter=A --follow --format=%cd -1 -- {}'.format(path)<line_sep><return>self._git_date_cmd(cmd)<block_end><def_stmt>_date_updated self path<block_start>"""Grab the last date modified through git log."""<line_sep>cmd='git log --format=%cd -1 -- {}'.format(path)<line_sep><return>self._git_date_cmd(cmd)<block_end><def_stmt>_git_date_cmd self cmd<block_start>"""Run bash command to retrieve and format date string."""<line_sep>date_str=subprocess.check_output(cmd shell=<true>)<line_sep>date_dt=date_parser.parse(date_str)<line_sep>formatted_date=date_dt.strftime(self.DATE_FORMAT)<line_sep><return>formatted_date<block_end><def_stmt>_tags_and_github_link self path<block_start>""" Use file name as tags, e.g. /Users/ethen/machine-learning/trees/decision_tree.ipynb we would use 'decision_tree' as the tag """<line_sep>_,file_path=path.split(self.REPO_NAME)<line_sep>_,file_name=os.path.split(file_path)<line_sep>tags,_=os.path.splitext(file_name)<line_sep># /blob/master indicates github master branch link=self.BASE_URL+self.REPO_NAME+'/blob/master'+file_path<line_sep><return>tags link<block_end><def_stmt>_title self notebook<block_start>""" A title in the notebook always starts with the '#' indicating a markdown level 1 header e.g. # Decision Tree (Classification)\n thus we can just parse all the text in between the '#' and the line break '\n' """<line_sep># TODO : we could fall back to the file path if it doesn't exist perhaps? title_pattern=re.compile('# (.*)\n')<for_stmt>cell notebook['cells']<block_start><if_stmt>cell['cell_type']<eq>'markdown'# the [0] indicates the # title pattern # should always appear in the first line <block_start>source=cell['source'][0]<line_sep>matched=title_pattern.match(source)<if_stmt>matched<is><not><none><block_start>title=matched.group(1)<line_sep># newer version of notebooks includes a # Table of Contents automatically in the first # cell, skip that and find the next level 1 header <if_stmt><not>title<eq>'Table of Contents'<block_start><break><block_end><block_end><block_end><block_end><return>title<block_end><def_stmt>_construct_header self<block_start>"""Create a knowledge repo style header as a dictionary."""<def_stmt>flatten_list l<block_start>""" Although not needed for the current version, we could have multiple tags and authors, in that case we would need to flatten them out. """<line_sep>flat=[]<for_stmt>item l<block_start><if_stmt>isinstance(item list)<block_start>flat<augadd>item<block_end><else_stmt><block_start>flat.append(item)<block_end><block_end><return>flat<block_end>header={'cell_type':'raw' 'metadata':{}}<line_sep># header text required by the knowledge repo # a '- ' in front is required for knowledge repo tag header_text=['---' 'title: {}'.format(self.title_) 'authors:' '- {}'.format(self.AUTHOR) 'tags:' '- '+self.tags_ 'created_at: {}'.format(self.date_created_) 'updated_at: {}'.format(self.date_updated_) 'tldr: Nothing for tldr section as of now.' '---']<line_sep>header_text=flatten_list(header_text)<line_sep>header_text=[text+'\n'<for>text header_text[:-1]]+[header_text[-1]]<line_sep>header['source']=header_text<line_sep><return>header<block_end><def_stmt>_construct_github_link_cell self<block_start>"""Add a cell that contains link to original notebook on github"""<line_sep>github_link_cell={'cell_type':'markdown' 'metadata':{} 'source':['Link to original notebook: {}'.format(self.github_link_)]}<line_sep><return>github_link_cell<block_end><def_stmt>add self notebook<block_start>""" Add the converted notebook to the knowledge repo. Parameters ---------- notebook : dict Jupyter notebook's raw json represented in dictionary format. """<with_stmt>open(self._path 'w' encoding='utf-8')<as>f<block_start>json.dump(notebook f)<block_end># create a run knowledge repo command destination=os.path.join(self.knowledge_repo_path 'project' self.tags_)<line_sep>cmd='knowledge_repo --repo {} add {} -p {}'.format(self.knowledge_repo_path self._path destination)<line_sep># communicate with the shell output to enable # continuation of the script execution p=subprocess.Popen(cmd stdout=subprocess.PIPE stdin=subprocess.PIPE stderr=subprocess.STDOUT shell=<true>)<line_sep>p.communicate(input=b'generated by automated airbnb knowledge repo setup')<if_stmt><not>self.inplace<block_start>os.remove(self._path)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>argparse<line_sep>parser=argparse.ArgumentParser(description='Convert the machine-learning repository to an Airbnb Knowledge Repo.')<line_sep>parser.add_argument('--ml_repo' type=str help='Path to the root directory of the machine-learning repo.')<line_sep>parser.add_argument('--knowledge_repo' type=str help='Path to the knowledge repo.')<line_sep>parser.add_argument('--inplace' action='store_true' help='Modify the existing .ipynb in place.')<line_sep>args=vars(parser.parse_args())<line_sep>main(**args)<block_end>
<import_stmt>urllib.request<as>urllib2<import_stmt>urllib.parse<as>urlparse<import_from_stmt>urllib.request urlretrieve<import_stmt>logging<import_stmt>numpy<as>np<import_from_stmt>allennlp.commands.elmo ElmoEmbedder DEFAULT_OPTIONS_FILE DEFAULT_WEIGHT_FILE<import_stmt>pickle<as>pkl<import_stmt>os<import_stmt>gzip<import_stmt>sys<class_stmt>ELMoWordEmbeddings<block_start><def_stmt>__init__ self embeddings_path elmo_options_file=DEFAULT_OPTIONS_FILE elmo_weight_file=DEFAULT_WEIGHT_FILE elmo_mode='average' elmo_cuda_device=-1<block_start>self.embeddings_path=embeddings_path<line_sep>self.embedding_name=os.path.splitext(os.path.basename(embeddings_path))[0]<if>embeddings_path<is><not><none><else>'None'<line_sep>self.word2Idx=<none><line_sep>self.embeddings=<none><line_sep>self.elmo_options_file=elmo_options_file<line_sep>self.elmo_weight_file=elmo_weight_file<line_sep>self.elmo_cuda_device=elmo_cuda_device<line_sep>self.elmo_mode=elmo_mode<line_sep>self.elmo=<none><line_sep>self.cache_computed_elmo_embeddings=<false><line_sep>self.cache={}<line_sep>self.lazyCacheFiles=[]<block_end><def_stmt>getConfig self<block_start><return>{"embeddings_path":self.embeddings_path "elmo_options_file":self.elmo_options_file "elmo_weight_file":self.elmo_weight_file "elmo_mode":self.elmo_mode "elmo_cuda_device":self.elmo_cuda_device}<block_end><def_stmt>sentenceLookup self sentences<block_start>elmo_vectors=<none><line_sep># :: Elmo :: <if_stmt>self.elmo_mode<is><not><none><block_start>elmo_vectors=self.getElmoEmbedding(sentences)<block_end># :: Word Embedding :: tokens_vectors=<none><if_stmt>self.embeddings_path<is><not><none><block_start><if_stmt>self.word2Idx<is><none><or>self.embeddings<is><none><block_start>self.word2Idx,self.embeddings=self.readEmbeddings(self.embeddings_path)<block_end>tokens_vectors=[]<for_stmt>sentence sentences<block_start>per_token_embedding=[]<for_stmt>token sentence['tokens']<block_start>vecId=self.word2Idx['UNKNOWN_TOKEN']<if_stmt>token<in>self.word2Idx<block_start>vecId=self.word2Idx[token]<block_end><elif_stmt>token.lower()<in>self.word2Idx<block_start>vecId=self.word2Idx[token.lower()]<block_end>per_token_embedding.append(self.embeddings[vecId])<block_end>per_token_embedding=np.asarray(per_token_embedding)<line_sep>tokens_vectors.append(per_token_embedding)<block_end><block_end>out_vectors={}<if_stmt>tokens_vectors<is><not><none><block_start>out_vectors['tokens']=tokens_vectors<block_end><if_stmt>elmo_vectors<is><not><none><block_start>out_vectors['elmo']=elmo_vectors<block_end><return>out_vectors<block_end><def_stmt>batchLookup self sentences feature_name<block_start><if_stmt>feature_name<eq>'tokens'<block_start><if_stmt>self.word2Idx<is><none><or>self.embeddings<is><none><block_start>self.word2Idx,self.embeddings=self.readEmbeddings(self.embeddings_path)<block_end>tokens_vectors=[]<for_stmt>sentence sentences<block_start>per_token_embedding=[]<for_stmt>token sentence['tokens']<block_start>vecId=self.word2Idx['UNKNOWN_TOKEN']<if_stmt>token<in>self.word2Idx<block_start>vecId=self.word2Idx[token]<block_end><elif_stmt>token.lower()<in>self.word2Idx<block_start>vecId=self.word2Idx[token.lower()]<block_end>per_token_embedding.append(self.embeddings[vecId])<block_end>per_token_embedding=np.asarray(per_token_embedding)<line_sep>tokens_vectors.append(per_token_embedding)<block_end><return>np.asarray(tokens_vectors)<block_end><elif_stmt>feature_name<eq>'elmo'<block_start><return>np.asarray(self.getElmoEmbedding(sentences))<block_end><else_stmt><block_start>print("Unknown feature name was passed to singleSentenceLookup")<assert_stmt>(<false>)<block_end><block_end><def_stmt>applyElmoMode self elmo_vectors<block_start><if_stmt>self.elmo_mode<eq>'average'<block_start><return>np.average(elmo_vectors axis=0).astype(np.float32)<block_end><elif_stmt>self.elmo_mode<eq>'weighted_average'<block_start><return>np.swapaxes(elmo_vectors 0 1)<block_end><elif_stmt>self.elmo_mode<eq>'last'<block_start><return>elmo_vectors[-1 : :]<block_end><elif_stmt>isinstance(self.elmo_mode int)<block_start><return>elmo_vectors[int(self.elmo_mode) : :]<block_end><else_stmt><block_start>print("Unknown ELMo mode")<assert_stmt>(<false>)<block_end><block_end><def_stmt>getElmoEmbedding self sentences<block_start><if_stmt>len(self.lazyCacheFiles)<g>0<block_start>self._loadLazyCache()<block_end>elmo_embeddings=[]<line_sep>non_cached_sentences=[]<line_sep>non_cached_sentences_indices=[]<line_sep># :: Lookup cached sentences :: <for_stmt>sentence sentences<block_start>tokens=sentence['tokens']<line_sep>cache_key=tuple(tokens)<if_stmt>len(self.cache)<g>0<and>cache_key<in>self.cache<block_start>elmo_embeddings.append(self.applyElmoMode(self.cache[cache_key]))<block_end><else_stmt><block_start>non_cached_sentences.append(tokens)<line_sep>non_cached_sentences_indices.append(len(elmo_embeddings))<line_sep>elmo_embeddings.append(<none>)<block_end><block_end># :: Compute ELMo on the fly :: <if_stmt>len(non_cached_sentences)<g>0<block_start><if_stmt>self.elmo<is><none><block_start>self.loadELMo()<block_end>idx=0<for_stmt>elmo_vectors self.elmo.embed_sentences(non_cached_sentences)<block_start><assert_stmt>(elmo_embeddings[non_cached_sentences_indices[idx]]<eq><none>)<line_sep>elmo_embeddings[non_cached_sentences_indices[idx]]=self.applyElmoMode(elmo_vectors)<if_stmt>self.cache_computed_elmo_embeddings<block_start>tokens=non_cached_sentences[idx]<line_sep>cache_key=tuple(tokens)<line_sep>self.cache[cache_key]=elmo_vectors<block_end>idx<augadd>1<block_end><block_end><return>elmo_embeddings<block_end><def_stmt>getIdentifier self<block_start>"""Returns a unique identifier for this lookup function"""<line_sep><return>"ELMoWordEmbeddings_"+self.embedding_name+"_"+str(self.elmo_mode)<block_end><def_stmt>loadELMo self<block_start>self.elmo=ElmoEmbedder(self.elmo_options_file self.elmo_weight_file self.elmo_cuda_device)<block_end><def_stmt>loadCache self inputPath<block_start>self.lazyCacheFiles.append(inputPath)<block_end><def_stmt>storeCache self outputPath<block_start>f=open(outputPath 'wb')<line_sep>pkl.dump(self.cache f -1)<line_sep>f.close()<block_end><def_stmt>addToCache self sentences<block_start><if_stmt>self.elmo<is><none><block_start>self.loadELMo()<block_end>idx=0<for_stmt>elmoEmbedding self.elmo.embed_sentences(sentences)<block_start>cache_key=tuple(sentences[idx])<line_sep>self.cache[cache_key]=elmoEmbedding<line_sep>idx<augadd>1<block_end><block_end><def_stmt>_loadLazyCache self<block_start><while_stmt>len(self.lazyCacheFiles)<g>0<block_start>inputPath=self.lazyCacheFiles.pop()<if_stmt><not>os.path.isfile(inputPath)<block_start>print("ELMo cache file not found:" inputPath)<line_sep><continue><block_end>f=open(inputPath 'rb')<line_sep>loaded_cache=pkl.load(f)<line_sep>f.close()<if_stmt>len(self.cache)<eq>0<block_start>self.cache=loaded_cache<block_end><else_stmt><block_start>self.cache.update(loaded_cache)<block_end><block_end><block_end><def_stmt>readEmbeddings self embeddingsPath<block_start>filename=os.path.basename(embeddingsPath)<if_stmt><not>os.path.isfile(embeddingsPath)<block_start><if_stmt>filename<in>['komninos_english_embeddings.gz' 'levy_english_dependency_embeddings.gz' 'reimers_german_embeddings.gz']<block_start>self.getEmbeddings(filename embeddingsPath)<block_end><else_stmt><block_start>print("The embeddings file %s was not found"%embeddingsPath)<line_sep>exit()<block_end><block_end># :: Read in word embeddings :: logging.info("Read file: %s"%embeddingsPath)<line_sep>word2Idx={}<line_sep>embeddings=[]<line_sep>embeddingsIn=gzip.open(embeddingsPath "rt")<if>embeddingsPath.endswith('.gz')<else>open(embeddingsPath encoding="utf8")<line_sep>embeddingsDimension=<none><for_stmt>line embeddingsIn<block_start>split=line.rstrip().split(" ")<line_sep>word=split[0]<if_stmt>embeddingsDimension<eq><none><block_start>embeddingsDimension=len(split)-1<block_end><if_stmt>(len(split)-1)<ne>embeddingsDimension# Assure that all lines in the embeddings file are of the same length <block_start>print("ERROR: A line in the embeddings file had more or less dimensions than expected. Skip token.")<line_sep><continue><block_end><if_stmt>len(word2Idx)<eq>0# Add padding+unknown <block_start>word2Idx["PADDING_TOKEN"]=len(word2Idx)<line_sep>vector=np.zeros(embeddingsDimension)<line_sep>embeddings.append(vector)<line_sep>word2Idx["UNKNOWN_TOKEN"]=len(word2Idx)<line_sep>rndState=np.random.RandomState(seed=12345)<line_sep># Fixed rnd seed for unknown token, so that it is always the same vector=rndState.uniform(-0.25 0.25 embeddingsDimension)# Alternativ -sqrt(3/dim) ... sqrt(3/dim) embeddings.append(vector)<block_end>vector=np.array([float(num)<for>num split[1:]])<line_sep>embeddings.append(vector)<line_sep>word2Idx[word]=len(word2Idx)<block_end><return>word2Idx embeddings<block_end><def_stmt>getEmbeddings self filename savePath<block_start><if_stmt><not>os.path.isfile(savePath)<block_start>self.download("https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/"+filename savePath)<block_end><block_end><def_stmt>download self url savePath silent=<false><block_start>filename=os.path.basename(urlparse.urlparse(url).path)<or>'downloaded.file'<def_stmt>get_size <block_start>meta=urllib2.urlopen(url).info()<line_sep>meta_func=meta.getheaders<if>hasattr(meta 'getheaders')<else>meta.get_all<line_sep>meta_length=meta_func('Content-Length')<try_stmt><block_start><return>int(meta_length[0])<block_end><except_stmt><block_start><return>0<block_end><block_end><def_stmt>kb_to_mb kb<block_start><return>kb/1024.0/1024.0<block_end><def_stmt>callback blocks block_size total_size<block_start>current=blocks<times>block_size<line_sep>percent=100.0<times>current/total_size<line_sep>line='[{0}{1}]'.format('='<times>int(percent/2) ' '<times>(50-int(percent/2)))<line_sep>status='\r{0:3.0f}%{1} {2:3.1f}/{3:3.1f} MB'<line_sep>sys.stdout.write(status.format(percent line kb_to_mb(current) kb_to_mb(total_size)))<block_end>logging.info('Downloading: {0} ({1:3.1f} MB)'.format(url kb_to_mb(get_size())))<try_stmt><block_start>(savePath headers)=urlretrieve(url savePath <none><if>silent<else>callback)<block_end><except_stmt><block_start>os.remove(savePath)<line_sep><raise>Exception("Can't download {0}".format(savePath))<block_end><else_stmt><block_start>print()<line_sep>logging.info('Downloaded to: {0}'.format(savePath))<block_end><return>savePath<block_end><block_end>
<import_from_future_stmt> print_function<import_from_stmt>tdda.rexpy extract<import_from_stmt>tdda.rexpy.seq common_string_sequence<import_from_stmt>tdda.rexpy.relib re<line_sep>x=extract(['Roger' 'Coger' 'Doger'] tag=<true> as_object=<true>)<line_sep>print(x)<line_sep>patternToExamples=x.pattern_matches()<line_sep>sequences=[]<for_stmt>j,(pattern examples) enumerate(patternToExamples.items())<block_start>N=len(examples)<if_stmt>N<l>1<block_start>print('%s:%s'%(pattern examples))<block_end><else_stmt><block_start>eparts=[re.match(x.results.rex[j] e).groups()<for>e examples]<line_sep>nparts=len(eparts[0])<for_stmt>i range(nparts)<block_start>(L R)=(eparts[0][i] eparts[1][i])<line_sep>n=2<line_sep>s=common_string_sequence(L R)<while_stmt>n<l>N<and>s<ne>''<block_start>s=common_string_sequence(s eparts[n][i])<line_sep>n<augadd>1<block_end>sequences.append(s)<block_end><block_end><block_end>print(sequences)<line_sep>
<import_from_stmt>django.db models<import_from_stmt>django.db.models Case F Q Value When<import_from_stmt>psqlextra.expressions HStoreRef<import_from_stmt>psqlextra.fields HStoreField<import_from_stmt>.fake_model get_fake_model<def_stmt>test_query_annotate_hstore_key_ref <block_start>"""Tests whether annotating using a :see:HStoreRef expression works correctly. This allows you to select an individual hstore key. """<line_sep>model_fk=get_fake_model({"title":HStoreField()})<line_sep>model=get_fake_model({"fk":models.ForeignKey(model_fk on_delete=models.CASCADE)})<line_sep>fk=model_fk.objects.create(title={"en":"english" "ar":"arabic"})<line_sep>model.objects.create(fk=fk)<line_sep>queryset=(model.objects.annotate(english_title=HStoreRef("fk__title" "en")).values("english_title").first())<assert_stmt>queryset["english_title"]<eq>"english"<block_end><def_stmt>test_query_annotate_rename <block_start>"""Tests whether field names can be overwritten with a annotated field."""<line_sep>model=get_fake_model({"title":models.CharField(max_length=12)})<line_sep>model.objects.create(title="swen")<line_sep>obj=model.objects.annotate(title=F("title")).first()<assert_stmt>obj.title<eq>"swen"<block_end><def_stmt>test_query_annotate_rename_chain <block_start>"""Tests whether annotations are behaving correctly after a QuerySet chain."""<line_sep>model=get_fake_model({"name":models.CharField(max_length=10) "value":models.IntegerField() })<line_sep>model.objects.create(name="test" value=23)<line_sep>obj=model.objects.values("name").annotate(value=F("value"))[:1]<assert_stmt>"value"<in>obj[0]<assert_stmt>obj[0]["value"]<eq>23<block_end><def_stmt>test_query_annotate_rename_order <block_start>"""Tests whether annotation order is preserved after a rename."""<line_sep>model=get_fake_model({"name":models.CharField(max_length=10) "value":models.IntegerField() })<line_sep>qs=model.objects.annotate(value=F("value") value_2=F("value"))<assert_stmt>list(qs.query.annotations.keys())<eq>["value" "value_2"]<block_end><def_stmt>test_query_annotate_in_expression <block_start>"""Tests whether annotations can be used in expressions."""<line_sep>model=get_fake_model({"name":models.CharField(max_length=10)})<line_sep>model.objects.create(name="henk")<line_sep>result=model.objects.annotate(real_name=F("name") is_he_henk=Case(When(Q(real_name="henk") then=Value("really henk")) default=Value("definitely not henk") output_field=models.CharField() ) ).first()<assert_stmt>result.real_name<eq>"henk"<assert_stmt>result.is_he_henk<eq>"really henk"<block_end><def_stmt>test_query_hstore_value_update_f_ref <block_start>"""Tests whether F(..) expressions can be used in hstore values when performing update queries."""<line_sep>model=get_fake_model({"name":models.CharField(max_length=255) "name_new":HStoreField()})<line_sep>model.objects.create(name="waqas" name_new=dict(en="swen"))<line_sep>model.objects.update(name_new=dict(en=models.F("name")))<line_sep>inst=model.objects.all().first()<assert_stmt>inst.name_new.get("en")<eq>"waqas"<block_end><def_stmt>test_query_hstore_value_update_cast <block_start>"""Tests whether values in a HStore field are automatically cast to strings when doing updates."""<line_sep>model=get_fake_model({"title":HStoreField()})<line_sep>model.objects.create(title=dict(en="test"))<line_sep>model.objects.update(title=dict(en=2))<line_sep>inst=model.objects.all().first()<assert_stmt>inst.title.get("en")<eq>"2"<block_end><def_stmt>test_query_hstore_value_update_escape <block_start>"""Tests whether values in a HStore field are properly escaped using prepared statement values."""<line_sep>model=get_fake_model({"title":HStoreField()})<line_sep>model.objects.create(title=dict(en="test"))<line_sep>model.objects.update(title=dict(en="console.log('test')"))<line_sep>inst=model.objects.all().first()<assert_stmt>inst.title.get("en")<eq>"console.log('test')"<block_end>
<import_stmt>sys<line_sep>sys.path.append("../../")<import_from_stmt>appJar gui<def_stmt>showPositions <block_start><for_stmt>widg app.getContainer().grid_slaves()<block_start>row,column=widg.grid_info()["row"] widg.grid_info()["column"]<line_sep>print(widg row column)<block_end><block_end><with_stmt>gui("Grid Demo" "300x300" sticky="news" expand="both")<as>app<block_start><for_stmt>x range(5)<block_start><for_stmt>y range(5)<block_start>app.label(str(x)+str(y) row=x column=y)<block_end><block_end>app.button("PRESS" showPositions colspan=5)<block_end>
""" This module contains functions which implement conversion between different (neighbouring) versions of RunDescriber. """<import_from_stmt>typing Dict List<import_from_stmt>..dependencies InterDependencies_<import_from_stmt>..param_spec ParamSpec ParamSpecBase<import_from_stmt>.rundescribertypes RunDescriberV0Dict RunDescriberV1Dict RunDescriberV2Dict RunDescriberV3Dict <import_from_stmt>.v0 InterDependencies<def_stmt>old_to_new idps:InterDependencies<arrow>InterDependencies_<block_start>""" Create a new InterDependencies_ object (new style) from an existing InterDependencies object (old style). Leaves the original object unchanged. Incidentally, this function can serve as a validator of the original object """<line_sep>namedict:Dict[str ParamSpec]={ps.name:ps<for>ps idps.paramspecs}<line_sep>dependencies={}<line_sep>inferences={}<line_sep>standalones_mut=[]<line_sep>root_paramspecs:List[ParamSpecBase]=[]<for_stmt>ps idps.paramspecs<block_start>deps=tuple(namedict[n].base_version()<for>n ps.depends_on_)<line_sep>inffs=tuple(namedict[n].base_version()<for>n ps.inferred_from_)<if_stmt>len(deps)<g>0<block_start>dependencies.update({ps.base_version():deps})<line_sep>root_paramspecs<augadd>list(deps)<block_end><if_stmt>len(inffs)<g>0<block_start>inferences.update({ps.base_version():inffs})<line_sep>root_paramspecs<augadd>list(inffs)<block_end><if_stmt>len(deps)<eq>len(inffs)<eq>0<block_start>standalones_mut.append(ps.base_version())<block_end><block_end>standalones=tuple(set(standalones_mut).difference(set(root_paramspecs)))<line_sep>idps_=InterDependencies_(dependencies=dependencies inferences=inferences standalones=standalones)<line_sep><return>idps_<block_end><def_stmt>new_to_old idps:InterDependencies_<arrow>InterDependencies<block_start>""" Create a new InterDependencies object (old style) from an existing InterDependencies_ object (new style). Leaves the original object unchanged. Only meant to be used for ensuring backwards-compatibility until we update sqlite module to forget about ParamSpecs """<line_sep>paramspecs:Dict[str ParamSpec]={}<line_sep># first the independent parameters <for_stmt>indeps idps.dependencies.values()<block_start><for_stmt>indep indeps<block_start>paramspecs.update({indep.name:ParamSpec(name=indep.name paramtype=indep.type label=indep.label unit=indep.unit)})<block_end><block_end><for_stmt>inffs idps.inferences.values()<block_start><for_stmt>inff inffs<block_start>paramspecs.update({inff.name:ParamSpec(name=inff.name paramtype=inff.type label=inff.label unit=inff.unit)})<block_end><block_end><for_stmt>ps_base idps._paramspec_to_id.keys()<block_start>paramspecs.update({ps_base.name:ParamSpec(name=ps_base.name paramtype=ps_base.type label=ps_base.label unit=ps_base.unit)})<block_end><for_stmt>ps,indeps idps.dependencies.items()<block_start><for_stmt>indep indeps<block_start>paramspecs[ps.name]._depends_on.append(indep.name)<block_end><block_end><for_stmt>ps,inffs idps.inferences.items()<block_start><for_stmt>inff inffs<block_start>paramspecs[ps.name]._inferred_from.append(inff.name)<block_end><block_end><return>InterDependencies(*tuple(paramspecs.values()))<block_end><def_stmt>v0_to_v1 old:RunDescriberV0Dict<arrow>RunDescriberV1Dict<block_start>""" Convert a v0 RunDescriber Dict to a v1 RunDescriber Dict """<line_sep>old_idps=InterDependencies._from_dict(old["interdependencies"])<line_sep>new_idps_dict=old_to_new(old_idps)._to_dict()<line_sep><return>RunDescriberV1Dict(version=1 interdependencies=new_idps_dict)<block_end><def_stmt>v1_to_v2 old:RunDescriberV1Dict<arrow>RunDescriberV2Dict<block_start>""" Convert a v1 RunDescriber Dict to a v2 RunDescriber Dict """<line_sep>interdeps_dict=old['interdependencies']<line_sep>interdeps_=InterDependencies_._from_dict(interdeps_dict)<line_sep>interdepsdict=new_to_old(interdeps_)._to_dict()<line_sep><return>RunDescriberV2Dict(version=2 interdependencies_=interdeps_dict interdependencies=interdepsdict)<block_end><def_stmt>v2_to_v3 old:RunDescriberV2Dict<arrow>RunDescriberV3Dict<block_start><return>RunDescriberV3Dict(version=3 interdependencies=old['interdependencies'] interdependencies_=old['interdependencies_'] shapes=<none>)<block_end><def_stmt>v0_to_v2 old:RunDescriberV0Dict<arrow>RunDescriberV2Dict<block_start>""" Convert a v0 RunDescriber Dict to a v2 RunDescriber Dict """<line_sep><return>v1_to_v2(v0_to_v1(old))<block_end><def_stmt>v0_to_v3 old:RunDescriberV0Dict<arrow>RunDescriberV3Dict<block_start><return>v2_to_v3(v0_to_v2(old))<block_end><def_stmt>v1_to_v3 old:RunDescriberV1Dict<arrow>RunDescriberV3Dict<block_start><return>v2_to_v3(v1_to_v2(old))<block_end><def_stmt>v3_to_v2 new:RunDescriberV3Dict<arrow>RunDescriberV2Dict<block_start><return>RunDescriberV2Dict(version=2 interdependencies=new['interdependencies'] interdependencies_=new['interdependencies_'] )<block_end><def_stmt>v2_to_v1 new:RunDescriberV2Dict<arrow>RunDescriberV1Dict<block_start>""" Convert a v2 RunDescriber Dict to a v1 RunDescriber Dict """<line_sep>rundescriberdictv1=RunDescriberV1Dict(version=1 interdependencies=new['interdependencies_'])<line_sep><return>rundescriberdictv1<block_end><def_stmt>v1_to_v0 new:RunDescriberV1Dict<arrow>RunDescriberV0Dict<block_start>""" Convert a v1 RunDescriber Dict to a v0 RunDescriber Dict """<line_sep>interdeps_dict=new['interdependencies']<line_sep>interdeps_=InterDependencies_._from_dict(interdeps_dict)<line_sep>interdepsdict=new_to_old(interdeps_)._to_dict()<line_sep>rundescriberv0dict=RunDescriberV0Dict(version=0 interdependencies=interdepsdict)<line_sep><return>rundescriberv0dict<block_end><def_stmt>v3_to_v1 new:RunDescriberV3Dict<arrow>RunDescriberV1Dict<block_start><return>v2_to_v1(v3_to_v2(new))<block_end><def_stmt>v2_to_v0 new:RunDescriberV2Dict<arrow>RunDescriberV0Dict<block_start>""" Convert a v2 RunDescriber Dict to a v0 RunDescriber Dict """<line_sep><return>v1_to_v0(v2_to_v1(new))<block_end><def_stmt>v3_to_v0 new:RunDescriberV3Dict<arrow>RunDescriberV0Dict<block_start><return>v1_to_v0(v3_to_v1(new))<block_end>
<import_from_stmt>SeleniumLibrary.base LibraryComponent keyword<class_stmt>my_lib_args(LibraryComponent)<block_start><def_stmt>__init__ self ctx arg1 arg2 *args **kwargs<block_start>LibraryComponent.__init__(self ctx)<line_sep>self.arg1=arg1<line_sep>self.arg2=arg2<line_sep>self.args=args<line_sep>self.kwargs=kwargs<block_end>@keyword(tags=["MyTag"])<def_stmt>foo_1 self<block_start>self.info("foo")<block_end>@keyword<def_stmt>bar_2 self arg<block_start>self.info(arg)<block_end>@keyword<def_stmt>add_cookie self foo bar<block_start>self.info(foo)<line_sep>self.info(bar)<block_end><block_end>
# -*- coding: utf-8 -*- # © 2016 <NAME> <<EMAIL>>, Trustcode # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). <import_from_stmt>. pos_order<import_from_stmt>. pos_session<import_from_stmt>. invoice_eletronic<import_from_stmt>. account_journal<import_from_stmt>. pos_payment_method<line_sep>
"""Script to verify permissions have transferred post groups/guardian. "docker-compose run --rm web python3 -m scripts.remove_after_use.verify_groups_guardian_migration" """<import_stmt>logging<import_from_stmt>random randint<import_from_stmt>website.app setup_django<line_sep>setup_django()<import_from_stmt>django.apps apps<import_from_stmt>django.contrib.auth.models Permission Group<import_from_stmt>osf.utils.permissions PERMISSIONS reduce_permissions<import_from_stmt>osf.models AbstractNode Contributor Preprint Node Registration QuickFilesNode<import_from_stmt>osf.models.node NodeGroupObjectPermission<import_from_stmt>osf.models.preprint PreprintGroupObjectPermission<import_from_stmt>osf.utils.permissions READ WRITE ADMIN<line_sep>logger=logging.getLogger(__name__)<line_sep>logging.basicConfig(level=logging.INFO)<def_stmt>check_expected expected actual error_msg<block_start><if_stmt>expected<ne>actual<block_start>logger.info('{}. Expected {} rows migrated; received {}.'.format(error_msg expected actual))<block_end><else_stmt><block_start>logger.info('{} rows added.'.format(actual))<block_end><block_end><def_stmt>verify_permissions_created <block_start>""" Expecting three permissions added, read, write, admin perms """<line_sep>expected=len(PERMISSIONS)<line_sep>actual=Permission.objects.filter(codename__in=PERMISSIONS).count()<line_sep>check_expected(expected actual 'Discepancy in Permission table.')<block_end><def_stmt>verify_auth_groups <block_start>""" Expecting three groups added for every AbstractNode - read/write/admin """<line_sep>expected=AbstractNode.objects.count()<times>3<line_sep>actual=Group.objects.filter(name__icontains='node_').count()<line_sep>check_expected(expected actual 'Discepancy in auth_group table.')<block_end><def_stmt>verify_expected_node_group_object_permission_counts <block_start>""" For every AbstactNode, three Django groups - admin, write, read are created. Admin group gets admin/write/read perms, write - write/read, and read: read. So for every node, 6 line items added to NodeGroupObjectPermission. Linking these groups with their permissions to the given node. """<line_sep>expected_nodegroupobjperm_count=AbstractNode.objects.count()<times>6<line_sep>actual_nodegroupobjperm_count=NodeGroupObjectPermission.objects.count()<line_sep>check_expected(expected_nodegroupobjperm_count actual_nodegroupobjperm_count 'Discrepancy in NodeGroupObjectPermission table.')<block_end><def_stmt>verify_expected_contributor_migration <block_start>""" Based on contributor admin/write/read columns, users are migrated to the osfgroupuser table and added to the appropriate Django group. """<line_sep>OSFUserGroup=apps.get_model('osf' 'osfuser_groups')<line_sep>expected=Contributor.objects.count()<line_sep>actual=OSFUserGroup.objects.filter(group__name__icontains='node_').count()<line_sep>check_expected(expected actual 'Discrepancy in contributor migration to OSFUserGroup table.')<block_end><def_stmt>verify_preprint_foreign_key_migration <block_start>expected_preprintgroupobjperm_count=Preprint.objects.count()<times>6<line_sep>actual_preprintgroupobjperm_count=PreprintGroupObjectPermission.objects.count()<line_sep>check_expected(expected_preprintgroupobjperm_count actual_preprintgroupobjperm_count 'Discrepancy in PreprintGroupObjectPermission table.')<block_end><def_stmt>verify_random_objects <block_start>resources=[Node Registration QuickFilesNode]<for_stmt>resource resources<block_start><for_stmt>i range(1 10)<block_start>random_resource=_get_random_object(resource)<if_stmt>random_resource<block_start>_verify_contributor_perms(random_resource)<block_end><block_end><block_end><block_end><def_stmt>_verify_contributor_perms resource<block_start><for_stmt>user resource.contributors<block_start>contrib=Contributor.objects.get(node=resource user=user)<if_stmt>contrib.admin<block_start><if_stmt>contrib.permission<ne>ADMIN<block_start>_suspected_contributor_migration_error(contrib)<block_end><block_end><elif_stmt>contrib.write<block_start><if_stmt>contrib.permission<ne>WRITE<block_start>_suspected_contributor_migration_error(contrib)<block_end><block_end><elif_stmt>contrib.read<block_start><if_stmt>contrib.permission<ne>READ<block_start>_suspected_contributor_migration_error(contrib)<block_end><block_end><block_end><block_end><def_stmt>_suspected_contributor_migration_error contrib<block_start>logger.info('Suspected contributor migration error on {}.'.format(contrib._id))<block_end><def_stmt>_get_random_object model<block_start>model_count=model.objects.count()<if_stmt>model_count<block_start><return>model.objects.all()[randint(1 model_count-1)]<block_end><return><none><block_end><def_stmt>main <block_start>logger.info('Verifying permissions created...')<line_sep>verify_permissions_created()<line_sep>logger.info('Verifying auth groups created...')<line_sep>verify_auth_groups()<line_sep>logger.info('Verifying node groups given permissions to their nodes...')<line_sep>verify_expected_node_group_object_permission_counts()<line_sep>logger.info('Verifying contributors added to node django groups...')<line_sep>verify_expected_contributor_migration()<line_sep>logger.info('Verifying preprint perms migrated to direct foreign key table...')<line_sep>verify_preprint_foreign_key_migration()<line_sep>logger.info('Verifying a selection of random contributor permissions...')<line_sep>verify_random_objects()<line_sep>logger.info('Done!')<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>sys<import_stmt>time<import_stmt>unittest<import_stmt>inspect<import_stmt>nose<import_from_stmt>lib.noseplugin OptionParser parser_option<import_from_stmt>lib base<import_from_stmt>lib.base BGP_FSM_ESTABLISHED local<import_from_stmt>lib.gobgp GoBGPContainer<import_from_stmt>lib.exabgp ExaBGPContainer<line_sep>counter=1<line_sep>_SCENARIOS={}<def_stmt>register_scenario cls<block_start><global>counter<line_sep>_SCENARIOS[counter]=cls<line_sep>counter<augadd>1<block_end><def_stmt>lookup_scenario name<block_start><for_stmt>value list(_SCENARIOS.values())<block_start><if_stmt>value.__name__<eq>name<block_start><return>value<block_end><block_end><return><none><block_end><def_stmt>wait_for f timeout=120<block_start>interval=1<line_sep>count=0<while_stmt><true><block_start><if_stmt>f()<block_start><return><block_end>time.sleep(interval)<line_sep>count<augadd>interval<if_stmt>count<ge>timeout<block_start><raise>Exception('timeout')<block_end><block_end><block_end>@register_scenario<class_stmt>MalformedMpReachNlri(object)<block_start>""" No.1 malformaed mp-reach-nlri """<line_sep>@staticmethod<def_stmt>boot env<block_start>gobgp_ctn_image_name=env.parser_option.gobgp_image<line_sep>log_level=env.parser_option.gobgp_log_level<line_sep>g1=GoBGPContainer(name='g1' asn=65000 router_id='192.168.0.1' ctn_image_name=gobgp_ctn_image_name log_level=log_level)<line_sep>e1=ExaBGPContainer(name='e1' asn=65001 router_id='192.168.0.2')<line_sep>e2=ExaBGPContainer(name='e2' asn=65001 router_id='192.168.0.2')<line_sep>ctns=[g1 e1 e2]<line_sep>initial_wait_time=max(ctn.run()<for>ctn ctns)<line_sep>time.sleep(initial_wait_time)<for_stmt>q [e1 e2]<block_start>g1.add_peer(q is_rs_client=<true>)<line_sep>q.add_peer(g1)<block_end>env.g1=g1<line_sep>env.e1=e1<line_sep>env.e2=e2<block_end>@staticmethod<def_stmt>setup env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<for_stmt>c [e1 e2]<block_start>g1.wait_for(BGP_FSM_ESTABLISHED c)<block_end># advertise malformed MP_REACH_NLRI e1.add_route('10.7.0.17/32' attribute='0x0e 0x60 0x11223344')<block_end>@staticmethod<def_stmt>check env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<def_stmt>f <block_start><for_stmt>line e1.log().split('\n')<block_start><if_stmt>'UPDATE message error / Attribute Flags Error / 0x600E0411223344'<in>line<block_start><return><true><block_end><block_end><return><false><block_end>wait_for(f)<line_sep># check e2 is still established g1.wait_for(BGP_FSM_ESTABLISHED e2)<block_end>@staticmethod<def_stmt>executor env<block_start>lookup_scenario("MalformedMpReachNlri").boot(env)<line_sep>lookup_scenario("MalformedMpReachNlri").setup(env)<line_sep>lookup_scenario("MalformedMpReachNlri").check(env)<block_end><block_end>@register_scenario<class_stmt>MalformedMpUnReachNlri(object)<block_start>""" No.2 malformaed mp-unreach-nlri """<line_sep>@staticmethod<def_stmt>boot env<block_start>lookup_scenario("MalformedMpReachNlri").boot(env)<block_end>@staticmethod<def_stmt>setup env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<for_stmt>c [e1 e2]<block_start>g1.wait_for(BGP_FSM_ESTABLISHED c)<block_end># advertise malformed MP_UNREACH_NLRI e1.add_route('10.7.0.17/32' attribute='0x0f 0x60 0x11223344')<block_end>@staticmethod<def_stmt>check env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<def_stmt>f <block_start><for_stmt>line e1.log().split('\n')<block_start><if_stmt>'UPDATE message error / Attribute Flags Error / 0x600F0411223344'<in>line<block_start><return><true><block_end><block_end><return><false><block_end>wait_for(f)<line_sep># check e2 is still established g1.wait_for(BGP_FSM_ESTABLISHED e2)<block_end>@staticmethod<def_stmt>executor env<block_start>lookup_scenario("MalformedMpUnReachNlri").boot(env)<line_sep>lookup_scenario("MalformedMpUnReachNlri").setup(env)<line_sep>lookup_scenario("MalformedMpUnReachNlri").check(env)<block_end><block_end>@register_scenario<class_stmt>MalformedAsPath(object)<block_start>""" No.3 malformaed as-path """<line_sep>@staticmethod<def_stmt>boot env<block_start>lookup_scenario("MalformedMpReachNlri").boot(env)<block_end>@staticmethod<def_stmt>setup env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<for_stmt>c [e1 e2]<block_start>g1.wait_for(BGP_FSM_ESTABLISHED c)<block_end># advertise malformed AS_PATH # Send the attribute to the length and number of aspath is inconsistent # Attribute Type 0x02 (AS_PATH) # Attribute Flag 0x40 (well-known transitive) # Attribute Value 0x02020000ffdc ( # segment type = 02 # segment length = 02 -> # correct value = 01 # as number = 65500 ) e1.add_route('10.7.0.17/32' attribute='0x02 0x60 0x11223344')<block_end>@staticmethod<def_stmt>check env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<def_stmt>f <block_start><for_stmt>line e1.log().split('\n')<block_start><if_stmt>'UPDATE message error / Attribute Flags Error / 0x60020411223344'<in>line<block_start><return><true><block_end><block_end><return><false><block_end>wait_for(f)<line_sep># check e2 is still established g1.wait_for(BGP_FSM_ESTABLISHED e2)<block_end>@staticmethod<def_stmt>executor env<block_start>lookup_scenario("MalformedAsPath").boot(env)<line_sep>lookup_scenario("MalformedAsPath").setup(env)<line_sep>lookup_scenario("MalformedAsPath").check(env)<block_end><block_end>@register_scenario<class_stmt>MalformedAs4Path(object)<block_start>""" No.4 malformaed as4-path """<line_sep>@staticmethod<def_stmt>boot env<block_start>lookup_scenario("MalformedMpReachNlri").boot(env)<block_end>@staticmethod<def_stmt>setup env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<for_stmt>c [e1 e2]<block_start>g1.wait_for(BGP_FSM_ESTABLISHED c)<block_end># advertise malformed AS4_PATH e1.add_route('10.7.0.17/32' attribute='0x11 0x60 0x11223344')<block_end>@staticmethod<def_stmt>check env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<def_stmt>f <block_start><for_stmt>line e1.log().split('\n')<block_start><if_stmt>'UPDATE message error / Attribute Flags Error / 0x60110411223344'<in>line<block_start><return><true><block_end><block_end><return><false><block_end>wait_for(f)<line_sep># check e2 is still established g1.wait_for(BGP_FSM_ESTABLISHED e2)<block_end>@staticmethod<def_stmt>executor env<block_start>lookup_scenario("MalformedAs4Path").boot(env)<line_sep>lookup_scenario("MalformedAs4Path").setup(env)<line_sep>lookup_scenario("MalformedAs4Path").check(env)<block_end><block_end>@register_scenario<class_stmt>MalformedNexthop(object)<block_start>""" No.5 malformaed nexthop """<line_sep>@staticmethod<def_stmt>boot env<block_start>lookup_scenario("MalformedMpReachNlri").boot(env)<block_end>@staticmethod<def_stmt>setup env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<for_stmt>c [e1 e2]<block_start>g1.wait_for(BGP_FSM_ESTABLISHED c)<block_end># advertise malformed NEXT_HOP # 0x0e: MP_REACH_NLRI # 0x60: Optional, Transitive # 0x01: AFI(IPv4) # 0x01: SAFI(unicast) # 0x10: Length of Next Hop Address # 0xffffff00: Network address of Next Hop # 0x00: Reserved e1.add_route('10.7.0.17/32' attribute='0x0e 0x60 0x010110ffffff0000')<block_end>@staticmethod<def_stmt>check env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<def_stmt>f <block_start><for_stmt>line e1.log().split('\n')<block_start><if_stmt>'UPDATE message error / Attribute Flags Error / 0x600E08010110FFFFFF0000'<in>line<block_start><return><true><block_end><block_end><return><false><block_end>wait_for(f)<line_sep># check e2 is still established g1.wait_for(BGP_FSM_ESTABLISHED e2)<block_end>@staticmethod<def_stmt>executor env<block_start>lookup_scenario("MalformedNexthop").boot(env)<line_sep>lookup_scenario("MalformedNexthop").setup(env)<line_sep>lookup_scenario("MalformedNexthop").check(env)<block_end><block_end>@register_scenario<class_stmt>MalformedRouteFamily(object)<block_start>""" No.6 malformaed route family """<line_sep>@staticmethod<def_stmt>boot env<block_start>lookup_scenario("MalformedMpReachNlri").boot(env)<block_end>@staticmethod<def_stmt>setup env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<for_stmt>c [e1 e2]<block_start>g1.wait_for(BGP_FSM_ESTABLISHED c)<block_end># advertise malformed ROUTE_FAMILY # 0x0e: MP_REACH_NLRI # 0x60: Optional, Transitive # 0x01: AFI(IPv4) # 0x01: SAFI(unicast) # 0x10: Length of Next Hop Address # 0xffffff00: Network address of Next Hop # 0x00: Reserved e1.add_route('10.7.0.17/32' attribute='0x0e 0x60 0x0002011020010db800000000000000000000000100')<block_end>@staticmethod<def_stmt>check env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<def_stmt>f <block_start><for_stmt>line e1.log().split('\n')<block_start><if_stmt>'UPDATE message error / Attribute Flags Error / 0x600E150002011020010DB800000000000000000000000100'<in>line<block_start><return><true><block_end><block_end><return><false><block_end>wait_for(f)<line_sep># check e2 is still established g1.wait_for(BGP_FSM_ESTABLISHED e2)<block_end>@staticmethod<def_stmt>executor env<block_start>lookup_scenario("MalformedRouteFamily").boot(env)<line_sep>lookup_scenario("MalformedRouteFamily").setup(env)<line_sep>lookup_scenario("MalformedRouteFamily").check(env)<block_end><block_end>@register_scenario<class_stmt>MalformedAsPathSegmentLengthInvalid(object)<block_start>""" No.7 malformaed aspath segment length invalid """<line_sep>@staticmethod<def_stmt>boot env<block_start>lookup_scenario("MalformedMpReachNlri").boot(env)<block_end>@staticmethod<def_stmt>setup env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<for_stmt>c [e1 e2]<block_start>g1.wait_for(BGP_FSM_ESTABLISHED c)<block_end># advertise malformed AS_PATH SEGMENT LENGTH # Send the attribute to the length and number of aspath is inconsistent # Attribute Type 0x02 (AS_PATH) # Attribute Flag 0x40 (well-known transitive) # Attribute Value 0x02020000ffdc ( # segment type = 02 # segment length = 02 -> # correct value = 01 # as number = 65500 ) e1.add_route('10.7.0.17/32' attribute='0x02 0x40 0x0202ffdc')<block_end>@staticmethod<def_stmt>check env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<def_stmt>f <block_start><for_stmt>line e1.log().split('\n')<block_start><if_stmt>'UPDATE message error / Malformed AS_PATH / 0x4002040202FFDC'<in>line<block_start><return><true><block_end><block_end><return><false><block_end>wait_for(f)<line_sep># check e2 is still established g1.wait_for(BGP_FSM_ESTABLISHED e2)<block_end>@staticmethod<def_stmt>executor env<block_start>lookup_scenario("MalformedAsPathSegmentLengthInvalid").boot(env)<line_sep>lookup_scenario("MalformedAsPathSegmentLengthInvalid").setup(env)<line_sep>lookup_scenario("MalformedAsPathSegmentLengthInvalid").check(env)<block_end><block_end>@register_scenario<class_stmt>MalformedNexthopLoopbackAddr(object)<block_start>""" No.8 malformaed nexthop loopback addr """<line_sep>@staticmethod<def_stmt>boot env<block_start>lookup_scenario("MalformedMpReachNlri").boot(env)<block_end>@staticmethod<def_stmt>setup env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<for_stmt>c [e1 e2]<block_start>g1.wait_for(BGP_FSM_ESTABLISHED c)<block_end># Malformed Invalid NEXT_HOP Attribute # Send the attribute of invalid nexthop # next-hop 127.0.0.1 -> # correct value = other than loopback and 0.0.0.0 address e1.add_route('10.7.0.17/32' nexthop='127.0.0.1')<block_end>@staticmethod<def_stmt>check env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<def_stmt>f <block_start><for_stmt>line e1.log().split('\n')<block_start><if_stmt>'UPDATE message error / Invalid NEXT_HOP Attribute / 0x4003047F000001'<in>line<block_start><return><true><block_end><block_end><return><false><block_end>wait_for(f)<line_sep># check e2 is still established g1.wait_for(BGP_FSM_ESTABLISHED e2)<block_end>@staticmethod<def_stmt>executor env<block_start>lookup_scenario("MalformedNexthopLoopbackAddr").boot(env)<line_sep>lookup_scenario("MalformedNexthopLoopbackAddr").setup(env)<line_sep>lookup_scenario("MalformedNexthopLoopbackAddr").check(env)<block_end><block_end>@register_scenario<class_stmt>MalformedOriginType(object)<block_start>""" No.9 malformaed origin type """<line_sep>@staticmethod<def_stmt>boot env<block_start>lookup_scenario("MalformedMpReachNlri").boot(env)<block_end>@staticmethod<def_stmt>setup env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<for_stmt>c [e1 e2]<block_start>g1.wait_for(BGP_FSM_ESTABLISHED c)<block_end># Invalid ORIGIN Attribute # Send the attribute of origin type 4 # Attribute Type 0x01 (Origin) # Attribute Flag 0x40 (well-known transitive) # Attribute Value 0x04 ( # origin type = 04 -> # correct value = 01 or 02 or 03 ) e1.add_route('10.7.0.17/32' attribute='0x1 0x40 0x04')<block_end>@staticmethod<def_stmt>check env<block_start>g1=env.g1<line_sep>e1=env.e1<line_sep>e2=env.e2<def_stmt>f <block_start><for_stmt>line e1.log().split('\n')<block_start><if_stmt>'UPDATE message error / Invalid ORIGIN Attribute / 0x40010104'<in>line<block_start><return><true><block_end><block_end><return><false><block_end>wait_for(f)<line_sep># check e2 is still established g1.wait_for(BGP_FSM_ESTABLISHED e2)<block_end>@staticmethod<def_stmt>executor env<block_start>lookup_scenario("MalformedOriginType").boot(env)<line_sep>lookup_scenario("MalformedOriginType").setup(env)<line_sep>lookup_scenario("MalformedOriginType").check(env)<block_end><block_end><class_stmt>TestGoBGPBase(unittest.TestCase)<block_start>wait_per_retry=5<line_sep>retry_limit=10<line_sep>@classmethod<def_stmt>setUpClass cls<block_start>idx=parser_option.test_index<line_sep>base.TEST_PREFIX=parser_option.test_prefix<line_sep>cls.parser_option=parser_option<line_sep>cls.executors=[]<if_stmt>idx<eq>0<block_start>print('unset test-index. run all test sequential')<for_stmt>_,v list(_SCENARIOS.items())<block_start><for_stmt>k,m inspect.getmembers(v inspect.isfunction)<block_start><if_stmt>k<eq>'executor'<block_start>cls.executor=m<block_end><block_end>cls.executors.append(cls.executor)<block_end><block_end><elif_stmt>idx<not><in>_SCENARIOS<block_start>print('invalid test-index. # of scenarios: {0}'.format(len(_SCENARIOS)))<line_sep>sys.exit(1)<block_end><else_stmt><block_start><for_stmt>k,m inspect.getmembers(_SCENARIOS[idx] inspect.isfunction)<block_start><if_stmt>k<eq>'executor'<block_start>cls.executor=m<block_end><block_end>cls.executors.append(cls.executor)<block_end><block_end><def_stmt>test self<block_start><for_stmt>e self.executors<block_start><yield>e<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>output=local("which docker 2>&1 > /dev/null ; echo $?" capture=<true>)<if_stmt>int(output)<ne>0<block_start>print("docker not found")<line_sep>sys.exit(1)<block_end>nose.main(argv=sys.argv addplugins=[OptionParser()] defaultTest=sys.argv[0])<block_end>
<import_stmt>demistomock<as>demisto<line_sep>action=demisto.getArg('action')<if_stmt>action<not><in>['link' 'unlink']<block_start>action='link'<block_end>demisto.results(demisto.executeCommand("linkIncidents" {"linkedIncidentIDs":demisto.getArg("linkedIncidentIDs") "action":action}))<line_sep>
# Author: <NAME> # Uncomment the next line to see my email # print "Author's email: ", "61706c69636163696f6e616d656469646140676d61696c2e636f6d".decode("hex") <try_stmt><block_start><import_stmt>Tkinter<as>tk<import_stmt>ttk<block_end><except_stmt>ImportError<block_start><import_stmt>tkinter<as>tk<import_from_stmt>tkinter ttk<block_end><class_stmt>MouseWheel(object)<block_start><def_stmt>__init__ self root factor=0.5<block_start>self.activeArea=<none><line_sep>self.factor=factor<import_stmt>platform<line_sep>os=platform.system()<if_stmt>os<eq>"Linux"<block_start>root.bind_all('<4>' self.onMouseWheel add='+')<line_sep>root.bind_all('<5>' self.onMouseWheel add='+')<block_end><else_stmt># Windows and MacOS <block_start>root.bind_all("<MouseWheel>" self.onMouseWheel add='+')<block_end><block_end><def_stmt>onMouseWheel self event<block_start><if_stmt>self.activeArea<block_start>self.activeArea.onMouseWheel(event.delta)<block_end><block_end><def_stmt>mouseWheel_bind self widget<block_start>self.activeArea=widget<block_end><def_stmt>mouseWheel_unbind self<block_start>self.activeArea=<none><block_end><def_stmt>add_scrolling self scrollingArea xscrollbar=<none> yscrollbar=<none><block_start>scrollingArea.bind('<Enter>' <lambda>event:self.mouseWheel_bind(scrollingArea))<line_sep>scrollingArea.bind('<Leave>' <lambda>event:self.mouseWheel_unbind())<if_stmt>xscrollbar<and><not>hasattr(xscrollbar 'onMouseWheel')<block_start>setattr(xscrollbar 'onMouseWheel' <lambda>delta:scrollingArea.xview("scroll" (-1)<times>int(delta/(120<times>self.factor)) "units"))<block_end><if_stmt>yscrollbar<and><not>hasattr(yscrollbar 'onMouseWheel')<block_start>setattr(yscrollbar 'onMouseWheel' <lambda>delta:scrollingArea.yview("scroll" (-1)<times>int(delta/(120<times>self.factor)) "units"))<block_end>active_scrollbar_on_mouse_wheel=yscrollbar<or>xscrollbar<if_stmt>active_scrollbar_on_mouse_wheel<block_start>setattr(scrollingArea 'onMouseWheel' active_scrollbar_on_mouse_wheel.onMouseWheel)<block_end><for_stmt>scrollbar (xscrollbar yscrollbar)<block_start><if_stmt>scrollbar<block_start>scrollbar.bind('<Enter>' <lambda>event scrollbar=scrollbar:self.mouseWheel_bind(scrollbar))<line_sep>scrollbar.bind('<Leave>' <lambda>event:self.mouseWheel_unbind())<block_end><block_end><block_end><block_end><class_stmt>simultaneousScrollbar(ttk.Scrollbar)<block_start><def_stmt>__init__ self master factor=0.5 **kwargs<block_start>self.__scrollableWidgets=[]<if_stmt>'orient'<in>kwargs<block_start><if_stmt>kwargs['orient']<eq>tk.VERTICAL<block_start>self.__orientLabel='y'<block_end><elif_stmt>kwargs['orient']<eq>tk.HORIZONTAL<block_start>self.__orientLabel='x'<block_end><else_stmt><block_start><raise>Exception("Bad 'orient' argument in scrollbar.")<block_end><block_end><else_stmt><block_start>self.__orientLabel='y'<block_end>kwargs['command']=self.onScroll<line_sep>self.factor=factor<line_sep>ttk.Scrollbar.__init__(self master **kwargs)<block_end><def_stmt>add_ScrollableArea self *scrollableWidgets<block_start><for_stmt>widget scrollableWidgets<block_start>self.__scrollableWidgets.append(widget)<line_sep>widget[self.__orientLabel+'scrollcommand']=self.set<block_end><block_end><def_stmt>onScroll self *args<block_start><for_stmt>widget self.__scrollableWidgets<block_start>getattr(widget self.__orientLabel+'view')(*args)<block_end><block_end><def_stmt>onMouseWheel self delta<block_start><for_stmt>widget self.__scrollableWidgets<block_start>getattr(widget self.__orientLabel+'view')("scroll" (-1)<times>int(delta/(120<times>self.factor)) "units")<block_end><block_end><block_end><def_stmt>test <block_start>root=tk.Tk()<line_sep>scrollbar=simultaneousScrollbar(root orient=tk.HORIZONTAL)<line_sep>scrollbar.pack(side=tk.TOP fill=tk.X)<line_sep>emptySpace=tk.Frame(root height=18)<line_sep>emptySpace.pack()<line_sep>tk.Label(root text='First scrolled frame:').pack(anchor=tk.W)<line_sep>canvas1=tk.Canvas(root width=300 height=100)<line_sep>canvas1.pack(anchor=tk.NW)<line_sep>frame1=tk.Frame(canvas1)<line_sep>frame1.pack()<for_stmt>i range(20)<block_start>tk.Label(frame1 text="Label "+str(i)).pack(side=tk.LEFT)<block_end>canvas1.create_window(0 0 window=frame1 anchor='nw')<line_sep>canvas1.update_idletasks()<line_sep>canvas1['scrollregion']=(0 0 frame1.winfo_reqwidth() frame1.winfo_reqheight())<line_sep>tk.Label(root text='Second scrolled frame:').pack(anchor=tk.W)<line_sep>canvas2=tk.Canvas(root width=300 height=100)<line_sep>canvas2.pack(anchor=tk.NW)<line_sep>frame2=tk.Frame(canvas2)<line_sep>frame2.pack()<for_stmt>i range(20)<block_start>tk.Label(frame2 text="Label "+str(i)).pack(side=tk.LEFT)<block_end>canvas2.create_window(0 0 window=frame2 anchor='nw')<line_sep>canvas2.update_idletasks()<line_sep>canvas2['scrollregion']=(0 0 frame2.winfo_reqwidth() frame2.winfo_reqheight())<line_sep>scrollbar.add_ScrollableArea(canvas1 canvas2)<line_sep>MouseWheel(root).add_scrolling(canvas1 xscrollbar=scrollbar)<line_sep>MouseWheel(root).add_scrolling(canvas2 xscrollbar=scrollbar)<line_sep>root.mainloop()<block_end><if_stmt>__name__<eq>'__main__'<block_start>test()<block_end>
"""Benchmarks the file handler"""<import_from_stmt>logbook Logger FileHandler<import_from_stmt>tempfile NamedTemporaryFile<line_sep>log=Logger('Test logger')<def_stmt>run <block_start>f=NamedTemporaryFile()<with_stmt>FileHandler(f.name)<as>handler<block_start><for_stmt>x xrange(500)<block_start>log.warning('this is handled')<block_end><block_end><block_end>
<import_from_stmt>datetime datetime<import_from_stmt>os.path dirname join<import_stmt>pytest<import_from_stmt>city_scrapers_core.constants COMMISSION PASSED<import_from_stmt>city_scrapers_core.utils file_response<import_from_stmt>freezegun freeze_time<import_from_stmt>city_scrapers.spiders.chi_ssa_51 ChiSsa51Spider<line_sep>test_response=file_response(join(dirname(__file__) "files" "chi_ssa_51.html") url="http://www.cbatechworks.org/" )<line_sep>spider=ChiSsa51Spider()<line_sep>freezer=freeze_time("2019-07-19")<line_sep>freezer.start()<line_sep>parsed_items=[item<for>item spider.parse(test_response)]<line_sep>freezer.stop()<def_stmt>test_start <block_start><assert_stmt>parsed_items[0]["start"]<eq>datetime(2019 3 13 12 0)<block_end><def_stmt>test_end <block_start><assert_stmt>parsed_items[0]["end"]<eq>datetime(2019 3 13 13 0)<block_end><def_stmt>test_id <block_start><assert_stmt>parsed_items[0]["id"]<eq>"chi_ssa_51/201903131200/x/commission"<block_end><def_stmt>test_status <block_start><assert_stmt>parsed_items[0]["status"]<eq>PASSED<block_end>@pytest.mark.parametrize("item" parsed_items)<def_stmt>test_all_day item<block_start><assert_stmt>item["all_day"]<is><false><block_end>@pytest.mark.parametrize("item" parsed_items)<def_stmt>test_title item<block_start><assert_stmt>item["title"]<eq>"Commission"<block_end>@pytest.mark.parametrize("item" parsed_items)<def_stmt>test_description item<block_start><assert_stmt>item["description"]<eq>""<block_end>@pytest.mark.parametrize("item" parsed_items)<def_stmt>test_time_notes item<block_start><assert_stmt>item["time_notes"]<eq>""<block_end>@pytest.mark.parametrize("item" parsed_items)<def_stmt>test_location item<block_start><assert_stmt>item["location"]<eq>{"address":"806 East 78th Street, Chicago IL 60619" "name":"<NAME>" }<block_end>@pytest.mark.parametrize("item" parsed_items)<def_stmt>test_source item<block_start><assert_stmt>item["source"]<eq>"http://www.cbatechworks.org/"<block_end>@pytest.mark.parametrize("item" parsed_items)<def_stmt>test_links item<block_start><assert_stmt>item["links"]<eq>[]<block_end>@pytest.mark.parametrize("item" parsed_items)<def_stmt>test_classification item<block_start><assert_stmt>item["classification"]<eq>COMMISSION<block_end>
<import_stmt>attr<import_stmt>pandas<import_from_stmt>sarif_om *<import_from_stmt>src.exception.VulnerabilityNotFoundException VulnerabilityNotFoundException<line_sep>VERSION="2.1.0"<line_sep>SCHEMA="https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json"<class_stmt>SarifHolder<block_start><def_stmt>__init__ self<block_start>self.sarif=SarifLog(runs=[] version=VERSION schema_uri=SCHEMA)<line_sep>self.translationDict=dict()<block_end># each analysis is defined by a Run <def_stmt>addRun self newRun# Check if already exists an analysis performed by the same tool <block_start><for_stmt>run self.sarif.runs<block_start><if_stmt>run.tool.driver.name<eq>newRun.tool.driver.name# Append Unique Rules <block_start><for_stmt>rule newRun.tool.driver.rules<block_start><if_stmt>isNotDuplicateRule(rule run.tool.driver.rules)<block_start>run.tool.driver.rules.append(rule)<block_end><block_end># Append Unique Artifacts <for_stmt>artifact newRun.artifacts<block_start><if_stmt>isNotDuplicateArtifact(artifact run.artifacts)<block_start>run.artifacts.append(artifact)<block_end><block_end># Append Unique Logical Locations <if_stmt>newRun.logical_locations<is><not><none><block_start><for_stmt>logicalLocation newRun.logical_locations<block_start><if_stmt>isNotDuplicateLogicalLocation(logicalLocation run.logical_locations)<block_start>run.logical_locations.append(logicalLocation)<block_end><block_end><block_end># Append Results <for_stmt>result newRun.results<block_start>run.results.append(result)<block_end><return><block_end><block_end>self.sarif.runs.append(newRun)<block_end># to print the analysis from a given tool <def_stmt>printToolRun self tool<block_start>run=-1<for_stmt>i range(len(self.sarif.runs))<block_start><if_stmt>self.sarif.runs[i].tool.driver.name.lower()<eq>tool.lower()<block_start>run=i<block_end><block_end>sarifIndividual=SarifLog(runs=[] version=VERSION schema_uri=SCHEMA)<if_stmt>run<ne>-1<block_start>sarifIndividual.runs.append(self.sarif.runs[run])<block_end><return>self.serializeSarif(sarifIndividual)<block_end># print json formatted the SARIF file <def_stmt>print self<block_start><return>self.serializeSarif(self.sarif)<block_end># creates dictionary to fix variable names from sarif_om to standard sarif <def_stmt>serialize self inst field value<block_start><if_stmt>field<is><not><none><block_start>self.translationDict[field.name]=field.metadata['schema_property_name']<block_end><return>value<block_end># filters SARIF keys to discard default values in output <def_stmt>filterUnusedKeys self field value<block_start><return><not>(value<is><none><or>(field.default<eq>value<and>field.name<ne>"level")<or>(isinstance(field.default attr.Factory)<and>field.default.factory()<eq>value))<block_end># returns a dictionary based on the schema_property_name and the values of the SARIF object <def_stmt>serializeSarif self sarifObj<block_start>valuesDict=attr.asdict(sarifObj filter=self.filterUnusedKeys value_serializer=self.serialize)<line_sep><return>self.recursiveSarif(valuesDict)<block_end># uses translationDict to fix variable names from sarif_om to standard SARIF <def_stmt>recursiveSarif self serializedSarif<block_start><if_stmt>isinstance(serializedSarif (int str))<block_start><return>serializedSarif<block_end><if_stmt>isinstance(serializedSarif dict)<block_start>dic=dict()<for_stmt>key,value serializedSarif.items()<block_start>dic[self.translationDict[key]]=self.recursiveSarif(value)<block_end><return>dic<block_end><if_stmt>isinstance(serializedSarif list)<block_start>lis=list()<for_stmt>item serializedSarif<block_start>lis.append(self.recursiveSarif(item))<block_end><return>lis<block_end><block_end><block_end><def_stmt>parseRule tool vulnerability full_description=<none><block_start>vuln_info=findVulnerabilityOnTable(tool vulnerability)<if_stmt>full_description<is><none><block_start><return>ReportingDescriptor(id=vuln_info["RuleId"] short_description=MultiformatMessageString(vuln_info["Vulnerability"]) name=vuln_info["Type"]+"Vulnerability")<block_end><return>ReportingDescriptor(id=vuln_info["RuleId"] short_description=MultiformatMessageString(vuln_info["Vulnerability"]) full_description=MultiformatMessageString(full_description) name=vuln_info["Type"]+"Vulnerability")<block_end><def_stmt>parseResult tool vulnerability level="warning" uri=<none> line=<none> end_line=<none> column=<none> snippet=<none> logicalLocation=<none><block_start>vuln_info=findVulnerabilityOnTable(tool vulnerability)<line_sep>level=parseLevel(level)<line_sep>locations=[Location(physical_location=PhysicalLocation(artifact_location=ArtifactLocation(uri=uri) region=Region(start_line=line end_line=end_line start_column=column snippet=ArtifactContent(text=snippet))))]<if_stmt>logicalLocation<is><not><none><block_start>locations[0].logical_locations=[logicalLocation]<block_end><return>Result(rule_id=vuln_info["RuleId"] message=Message(text=vulnerability) level=level locations=locations)<block_end><def_stmt>parseArtifact uri source_language="Solidity"<block_start><return>Artifact(location=ArtifactLocation(uri=uri) source_language=source_language)<block_end><def_stmt>parseLogicalLocation name kind="contract"<block_start><return>LogicalLocation(name=name kind=kind)<block_end># returns the row from the table for a given vulnerability and tool <def_stmt>findVulnerabilityOnTable tool vulnerability_found<block_start>table=pandas.read_csv("src/output_parser/sarif_vulnerability_mapping.csv")<line_sep>tool_table=table.loc[table["Tool"]<eq>tool]<line_sep># Due to messages that have extra information (for example the line where the vulnerability was found) this loop # will search if the vulnerability expressed on table exist inside vulnerability found <for_stmt>index,row tool_table.iterrows()<block_start><if_stmt>row["Vulnerability"]<in>vulnerability_found<or>vulnerability_found<in>row["Vulnerability"]<block_start><return>row<block_end><block_end><raise>VulnerabilityNotFoundException(tool=tool vulnerability=vulnerability_found)<block_end># given a level produced by a tool, returns the level in SARIF format <def_stmt>parseLevel level<block_start><if_stmt>isinstance(level int)<block_start><return>"warning"<block_end><if_stmt>level.lower()<eq>"warning"<or>level.lower()<eq>"warnings"<or>level.lower()<eq>"medium"<block_start><return>"warning"<block_end><if_stmt>level.lower()<eq>"error"<or>level.lower()<eq>"violations"<or>level.lower()<eq>"high"<block_start><return>"error"<block_end><if_stmt>level.lower()<eq>"note"<or>level.lower()<eq>"conflicts"<or>level.lower()<eq>"informational"<block_start><return>"note"<block_end><if_stmt>level.lower<eq>"none"<or>level.lower()<eq>"safe"<block_start><return>"none"<block_end><return>"warning"<block_end># Returns True when rule is unique <def_stmt>isNotDuplicateRule newRule rulesList<block_start><for_stmt>rule rulesList<block_start><if_stmt>rule.id<eq>newRule.id<block_start><return><false><block_end><block_end><return><true><block_end># Returns True when artifact is unique <def_stmt>isNotDuplicateArtifact newArtifact artifactsList<block_start><for_stmt>artifact artifactsList<block_start><if_stmt>artifact.location.uri<eq>newArtifact.location.uri<block_start><return><false><block_end><block_end><return><true><block_end># Returns True when LogicalLocation is unique <def_stmt>isNotDuplicateLogicalLocation newLogicalLocation logicalLocationList<block_start><for_stmt>logicalLocation logicalLocationList<block_start><if_stmt>logicalLocation.name<eq>newLogicalLocation.name<block_start><return><false><block_end><block_end><return><true><block_end>
# Copyright (c) 2018 Tigera, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>logging<import_stmt>subprocess<import_stmt>time<import_from_stmt>kubernetes client<import_from_stmt>tests.k8st.test_base TestBase<import_from_stmt>tests.k8st.utils.utils retry_until_success DiagsCollector kubectl node_info run<line_sep>_log=logging.getLogger(__name__)<class_stmt>TestGracefulRestart(TestBase)<block_start><def_stmt>get_restart_node_pod_name self<block_start>self.restart_pod_name=run("kubectl get po -n kube-system"+" -l k8s-app=calico-node"+" --field-selector status.podIP="+self.restart_node_ip+" -o jsonpath='{.items[*].metadata.name}'")<if_stmt>self.restart_pod_name<eq>""<block_start><raise>Exception('pod name not found')<block_end><block_end><def_stmt>_test_restart_route_churn self num_repeats restart_func expect_churn<block_start><with_stmt>DiagsCollector()# Get 2 worker node names, one to monitor routes and one # to have its calico-node restarted. The first name # returned is always the master, so skip that. <block_start>nodes,ips,_=node_info()<line_sep>self.assertGreater(len(nodes) 2)<line_sep>monitor_node=nodes[1]<line_sep>self.restart_node=nodes[2]<line_sep>self.restart_node_ip=ips[2]<line_sep># Start running ip monitor on the monitor node, to monitor # IPv4 route changes. We use "fd00:10:244" to identify # and exclude IPv6 workload block routes like # fd00:10:244:0:1cc0:b1ac:ad47:e7c0/122. These definitely # _do_ flap when the host of that block restarts, but it # is not yet clear why this is; specifically it is not yet # known if it indicates anything wrong with calico/node's # GR setup. See # https://marc.info/?l=bird-users&m=158298182509702&w=2 # for the mailing list discussion so far. run("docker exec -d %s sh -c 'stdbuf -oL ip -ts monitor route | stdbuf -oL grep -v fd00:10:244 > rmon.txt'"%monitor_node)<line_sep># Find the name of the calico-node pod on the restart node. self.get_restart_node_pod_name()<line_sep># Restart the calico-node several times, on the other node. <for_stmt>i range(num_repeats)# Restart it. <block_start>_log.info("Iteration %d: restart pod %s" i self.restart_pod_name)<line_sep>restart_func(self)<block_end># Kill the ip monitor process. run("docker exec %s pkill ip"%monitor_node)<line_sep># Dump the monitor output. monitor_output=run("docker exec %s cat rmon.txt"%monitor_node)<if_stmt>expect_churn# Assert that it is not empty. <block_start>self.assertNotEqual(monitor_output "")<block_end><else_stmt># Assert that it is empty. <block_start>self.assertEqual(monitor_output "")<block_end><block_end><block_end><def_stmt>test_methodology self# Test the methodology here, by verifying that we _do_ observe # route churn if we kill BIRD with SIGTERM. <block_start><def_stmt>kill_bird self<block_start>run("docker exec %s pkill bird"%self.restart_node)<def_stmt>check_bird_running <block_start>run("docker exec %s pgrep bird"%self.restart_node)<block_end>retry_until_success(check_bird_running retries=10 wait_time=1)<line_sep>time.sleep(5)<block_end># Expect non-GR behaviour, i.e. route churn. self._test_restart_route_churn(3 kill_bird <true>)<block_end><def_stmt>test_graceful_restart self# Test that we do _not_ observe route churn when Kubernetes # deletes and restarts a pod. <block_start><def_stmt>delete_calico_node_pod self<block_start>run("kubectl delete po %s -n kube-system"%self.restart_pod_name)<line_sep># Wait until a replacement calico-node pod has been created. retry_until_success(self.get_restart_node_pod_name retries=10 wait_time=1)<line_sep># Wait until it is ready, before returning. run("kubectl wait po %s -n kube-system --timeout=2m --for=condition=ready"%self.restart_pod_name)<block_end># Expect GR behaviour, i.e. no route churn. self._test_restart_route_churn(8 delete_calico_node_pod <false>)<block_end><block_end><class_stmt>TestAllRunning(TestBase)<block_start><def_stmt>test_kubesystem_pods_running self<block_start><with_stmt>DiagsCollector()<block_start>self.check_pod_status('kube-system')<block_end><block_end><def_stmt>test_default_pods_running self<block_start><with_stmt>DiagsCollector()<block_start>self.check_pod_status('default')<block_end><block_end><def_stmt>test_calico_monitoring_pods_running self<block_start><with_stmt>DiagsCollector()<block_start>self.check_pod_status('calico-monitoring')<block_end><block_end><block_end><class_stmt>TestSimplePolicy(TestBase)<block_start><def_stmt>setUp self<block_start>TestBase.setUp(self)<line_sep>self.create_namespace("policy-demo")<line_sep>self.deploy("nginx:1.7.9" "nginx" "policy-demo" 80)<line_sep># Create two client pods that live for the duration of the # test. We will use 'kubectl exec' to try wgets from these at # particular times. # # We do it this way - instead of one-shot pods that are # created, try wget, and then exit - because it takes a # relatively long time (7 seconds?) in this test setup for # Calico routing and policy to be set up correctly for a newly # created pod. In particular it's possible that connection # from a just-created pod will fail because that pod's IP has # not yet propagated to the IP set for the ingress policy on # the server pod - which can confuse test code that is # expecting connection failure for some other reason. kubectl("run access -n policy-demo"+" --overrides='{\"metadata\": {\"annotations\": {\"cni.projectcalico.org/floatingIPs\":\"[\\\"172.16.31.10\\\", \\\"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b\\\"]\"}}}' "<concat>" --image busybox --command /bin/sleep -- 3600")<line_sep>kubectl("run no-access -n policy-demo"+" --image busybox --command /bin/sleep -- 3600")<line_sep>kubectl("wait --timeout=2m --for=condition=available"+" deployment/nginx -n policy-demo")<line_sep>kubectl("wait --timeout=2m --for=condition=ready"+" pod/access -n policy-demo")<line_sep>kubectl("wait --timeout=2m --for=condition=ready"+" pod/no-access -n policy-demo")<block_end><def_stmt>tearDown self# Delete deployment <block_start>kubectl("delete --grace-period 0 pod access -n policy-demo")<line_sep>kubectl("delete --grace-period 0 pod no-access -n policy-demo")<line_sep>self.delete_and_confirm("policy-demo" "ns")<block_end><def_stmt>test_simple_policy self<block_start><with_stmt>DiagsCollector()# Check we can talk to service. <block_start>retry_until_success(self.can_connect retries=10 wait_time=1 function_args=["access"])<line_sep>_log.info("Client 'access' connected to open service")<line_sep>retry_until_success(self.can_connect retries=10 wait_time=1 function_args=["no-access"])<line_sep>_log.info("Client 'no-access' connected to open service")<line_sep># Create default-deny policy policy=client.V1NetworkPolicy(metadata=client.V1ObjectMeta(name="default-deny" namespace="policy-demo") spec={"podSelector":{"matchLabels":{} } })<line_sep>client.NetworkingV1Api().create_namespaced_network_policy(body=policy namespace="policy-demo" )<line_sep>_log.debug("Isolation policy created")<line_sep># Check we cannot talk to service retry_until_success(self.cannot_connect retries=10 wait_time=1 function_args=["access"])<line_sep>_log.info("Client 'access' failed to connect to isolated service")<line_sep>retry_until_success(self.cannot_connect retries=10 wait_time=1 function_args=["no-access"])<line_sep>_log.info("Client 'no-access' failed to connect to isolated service")<line_sep># Create allow policy policy=client.V1NetworkPolicy(metadata=client.V1ObjectMeta(name="access-nginx" namespace="policy-demo") spec={'ingress':[{'from':[{'podSelector':{'matchLabels':{'run':'access'}}}]}] 'podSelector':{'matchLabels':{'app':'nginx'}}})<line_sep>client.NetworkingV1Api().create_namespaced_network_policy(body=policy namespace="policy-demo" )<line_sep>_log.debug("Allow policy created.")<line_sep># Check we can talk to service as 'access' retry_until_success(self.can_connect retries=10 wait_time=1 function_args=["access"])<line_sep>_log.info("Client 'access' connected to protected service")<line_sep># Check we cannot talk to service as 'no-access' retry_until_success(self.cannot_connect retries=10 wait_time=1 function_args=["no-access"])<line_sep>_log.info("Client 'no-access' failed to connect to protected service")<block_end><block_end><def_stmt>can_connect self name<block_start><if_stmt><not>self.check_connected(name)<block_start>_log.warning("'%s' failed to connect, when connection was expected" name)<line_sep><raise>self.ConnectionError<block_end>_log.info("'%s' connected, as expected" name)<block_end><def_stmt>cannot_connect self name<block_start><if_stmt>self.check_connected(name)<block_start>_log.warning("'%s' unexpectedly connected" name)<line_sep><raise>self.ConnectionError<block_end>_log.info("'%s' failed to connect, as expected" name)<block_end>@staticmethod<def_stmt>check_connected name<block_start><try_stmt><block_start>kubectl("exec "+name+" -n policy-demo"+" -- /bin/wget -O /dev/null -q --timeout=1 nginx")<block_end><except_stmt>subprocess.CalledProcessError<block_start>_log.exception("Failed to wget from nginx service")<line_sep><return><false><block_end>_log.debug("Contacted service")<line_sep><return><true><block_end><class_stmt>ConnectionError(Exception)<block_start><pass><block_end><block_end>
<def_stmt>extractIsogashiineetoWordpressCom item<block_start>''' Parser for 'isogashiineeto.wordpress.com' '''<line_sep>vol,chp,frag,postfix=extractVolChapterFragmentPostfix(item['title'])<if_stmt><not>(chp<or>vol)<or>"preview"<in>item['title'].lower()<block_start><return><none><block_end>tagmap=[('NEET Hello Work' 'NEET dakedo Hello Work ni Ittara Isekai ni Tsuretekareta' 'translated') ('Dark Magician Hero' 'Dark Magician as a Hero' 'translated') ('Hatena☆Illusion' 'Hatena☆Illusion' 'translated') ('PRC' 'PRC' 'translated') ('Loiterous' 'Loiterous' 'oel') ]<for_stmt>tagname,name,tl_type tagmap<block_start><if_stmt>tagname<in>item['tags']<block_start><return>buildReleaseMessageWithType(item name vol chp frag=frag postfix=postfix tl_type=tl_type)<block_end><block_end><return><false><block_end>
<import_from_stmt>powerlift.bench Experiment Store<import_from_stmt>powerlift.executors.docker InsecureDocker<import_from_stmt>powerlift.executors.localmachine LocalMachine<import_from_stmt>powerlift.executors.azure_ci AzureContainerInstance<import_stmt>pytest<import_stmt>os<def_stmt>_add x y<block_start><return>x+y<block_end><def_stmt>_err_handler e<block_start><raise>e<block_end><def_stmt>_trials task<block_start><if_stmt>task.problem<eq>"binary"<and>task.scalar_measure("n_rows")<le>10000<block_start><return>["rf" "svm"]<block_end><return>[]<block_end><def_stmt>_benchmark trial<block_start><import_from_stmt>sklearn.ensemble RandomForestClassifier<import_from_stmt>sklearn.svm LinearSVC<import_from_stmt>sklearn.calibration CalibratedClassifierCV<import_from_stmt>sklearn.model_selection train_test_split<import_from_stmt>sklearn.metrics roc_auc_score<import_from_stmt>sklearn.pipeline Pipeline<import_from_stmt>sklearn.preprocessing OneHotEncoder FunctionTransformer<import_from_stmt>sklearn.compose ColumnTransformer<import_from_stmt>sklearn.impute SimpleImputer<if_stmt>trial.task.problem<eq>"binary"<and>trial.task.origin<eq>"openml"<block_start>X,y,meta=trial.task.data(["X" "y" "meta"])<line_sep># Holdout split X_tr,X_te,y_tr,y_te=train_test_split(X y test_size=0.3)<line_sep># Build preprocessor is_cat=meta["categorical_mask"]<line_sep>cat_cols=[idx<for>idx range(X.shape[1])<if>is_cat[idx]]<line_sep>num_cols=[idx<for>idx range(X.shape[1])<if><not>is_cat[idx]]<line_sep>cat_ohe_step=("ohe" OneHotEncoder(sparse=<true> handle_unknown="ignore"))<line_sep>cat_pipe=Pipeline([cat_ohe_step])<line_sep>num_pipe=Pipeline([("identity" FunctionTransformer())])<line_sep>transformers=[("cat" cat_pipe cat_cols) ("num" num_pipe num_cols)]<line_sep>ct=Pipeline([("ct" ColumnTransformer(transformers=transformers)) ("missing" SimpleImputer(add_indicator=<true> strategy="most_frequent") ) ])<line_sep># Connect preprocessor with target learner <if_stmt>trial.method.name<eq>"svm"<block_start>clf=Pipeline([("ct" ct) ("est" CalibratedClassifierCV(LinearSVC()))])<block_end><else_stmt><block_start>clf=Pipeline([("ct" ct) ("est" RandomForestClassifier())])<block_end># Train clf.fit(X_tr y_tr)<line_sep># Predict predictions=clf.predict_proba(X_te)[: 1]<line_sep># Score auc=roc_auc_score(y_te predictions)<line_sep>trial.log("auc" auc)<block_end><block_end><def_stmt>test_multiprocessing <block_start>"""This tests exists to ensure there is no hang in pytest."""<import_from_stmt>multiprocessing.pool Pool<line_sep>pool=Pool()<line_sep>results=[]<line_sep>num_tasks=32<for_stmt>i range(num_tasks)<block_start>result=pool.apply_async(_add (i i) error_callback=_err_handler)<line_sep>results.append(result)<block_end>counter=0<for_stmt>i range(num_tasks)<block_start>counter<augadd>results[i].get()<block_end><assert_stmt>counter<eq>992<line_sep>pool.close()<block_end># def test_scikit_experiment_aci(populated_azure_store): @pytest.mark.skip("Remove this when testing ACI.")<def_stmt>test_scikit_experiment_aci <block_start>""" As of 2022-06-09: - Takes roughly 20 seconds to submit 10 tasks. - Roughly 80 seconds for first runs to return. - 180 seconds to complete (5 parallel containers). """<import_from_stmt>dotenv load_dotenv<line_sep>load_dotenv()<line_sep>azure_tenant_id=os.getenv("AZURE_TENANT_ID")<line_sep>azure_client_id=os.getenv("AZURE_CLIENT_ID")<line_sep>azure_client_secret=os.getenv("AZURE_CLIENT_SECRET")<line_sep>subscription_id=os.getenv("AZURE_SUBSCRIPTION_ID")<line_sep>resource_group=os.getenv("AZURE_RESOURCE_GROUP")<line_sep>store=Store(os.getenv("AZURE_DB_URL") force_recreate=<false>)<line_sep># store = populated_azure_store executor=AzureContainerInstance(store azure_tenant_id azure_client_id azure_client_secret subscription_id resource_group n_running_containers=5 num_cores=1 mem_size_gb=2 raise_exception=<true> )<line_sep>experiment=Experiment(store)<line_sep>executor=experiment.run(_benchmark _trials timeout=10 executor=executor)<line_sep>executor.join()<block_end><def_stmt>test_scikit_experiment_debug populated_store<block_start>store=populated_store<line_sep>executor=LocalMachine(store n_cpus=1 raise_exception=<true>)<line_sep>experiment=Experiment(store name="scikit")<line_sep>executor=experiment.run(_benchmark _trials timeout=10 executor=executor)<line_sep>executor.join()<block_end><def_stmt>test_scikit_experiment_local populated_store<block_start>store=populated_store<line_sep>executor=LocalMachine(store n_cpus=2)<line_sep>experiment=Experiment(store name="scikit")<line_sep>executor=experiment.run(_benchmark _trials timeout=10 executor=executor)<line_sep>executor.join()<block_end><def_stmt>test_scikit_experiment_docker populated_store<block_start><import_from_stmt>dotenv load_dotenv<line_sep>load_dotenv()<line_sep>uri=os.getenv("DOCKER_DB_URL")<line_sep>executor=InsecureDocker(populated_store n_running_containers=2 docker_db_uri=uri)<line_sep>experiment=Experiment(populated_store name="scikit")<line_sep>executor=experiment.run(_benchmark _trials timeout=10 executor=executor)<line_sep>executor.join()<block_end>
"""Defines the factory for creating monitors"""<import_from_future_stmt> unicode_literals<import_stmt>logging<line_sep>logger=logging.getLogger(__name__)<line_sep>_SCANNERS={}<def_stmt>add_scanner_type scanner_class<block_start>"""Registers a scanner class so it can be used for Scale Scans :param scanner_class: The class definition for a scanner :type scanner_class: class:`ingest.scan.scanners.scanner.Scanner` """<line_sep>scanner=scanner_class()<if_stmt>scanner.scanner_type<in>_SCANNERS<block_start>logger.warning('Duplicate scanner registration: %s' scanner.scanner_type)<block_end>_SCANNERS[scanner.scanner_type]=scanner_class<block_end><def_stmt>get_scanner scanner_type<block_start>"""Returns a scanner of the given type that is set to scan the given workspace :param scanner_type: The unique identifier of a registered scanner :type scanner_type: string :returns: A scanner for storing and retrieving files. :rtype: :class:`ingest.scan.scanners.scanner.Scanner` """<if_stmt>scanner_type<in>_SCANNERS<block_start><return>_SCANNERS[scanner_type]()<block_end><raise>KeyError('\'%s\' is an invalid scanner type'%scanner_type)<block_end><def_stmt>get_scanner_types <block_start>"""Returns a list of type identifiers for all registered scanners :returns: A list of scanner types :rtype: [string] """<line_sep><return>_SCANNERS.keys()<block_end>
<import_stmt>logging.config<import_stmt>os<import_stmt>structlog<import_from_stmt>node_launcher.constants NODE_LAUNCHER_DATA_PATH OPERATING_SYSTEM<line_sep>timestamper=structlog.processors.TimeStamper(fmt='%Y-%m-%d %H:%M:%S')<line_sep>pre_chain=[# Add the log level and a timestamp to the event_dict if the log entry # is not from structlog. structlog.stdlib.add_log_level timestamper ]<line_sep>logging.config.dictConfig({'version':1 'disable_existing_loggers':<false> 'formatters':{'plain':{'()':structlog.stdlib.ProcessorFormatter 'processor':structlog.dev.ConsoleRenderer(colors=<false>) 'foreign_pre_chain':pre_chain } 'colored':{'()':structlog.stdlib.ProcessorFormatter 'processor':structlog.dev.ConsoleRenderer(colors=<true>) 'foreign_pre_chain':pre_chain } } 'handlers':{'default':{'level':'DEBUG' 'class':'logging.StreamHandler' 'formatter':'colored' } 'file':{'level':'DEBUG' 'class':'logging.handlers.WatchedFileHandler' 'filename':os.path.join(NODE_LAUNCHER_DATA_PATH[OPERATING_SYSTEM] 'debug.log') 'formatter':'plain' } } 'loggers':{'':{'handlers':['default' 'file'] 'level':'DEBUG' 'propagate':<true> } }})<def_stmt>dropper logger method_name event_dict<block_start><for_stmt>key event_dict[0][0].keys()<block_start><if_stmt>'rpcpass'<in>key<block_start>event_dict[0][0][key]='<PASSWORD>'<block_end><block_end><return>event_dict<block_end>structlog.configure(processors=[structlog.stdlib.add_log_level structlog.stdlib.PositionalArgumentsFormatter() timestamper structlog.processors.StackInfoRenderer() structlog.processors.format_exc_info structlog.stdlib.ProcessorFormatter.wrap_for_formatter dropper] context_class=dict logger_factory=structlog.stdlib.LoggerFactory() wrapper_class=structlog.stdlib.BoundLogger cache_logger_on_first_use=<true> )<line_sep>log=structlog.get_logger()<line_sep>
<import_stmt>argparse<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>kornia<as>K<import_from_stmt>kornia.contrib FaceDetector FaceDetectorResult FaceKeypoint<def_stmt>draw_keypoint img:np.ndarray det:FaceDetectorResult kpt_type:FaceKeypoint<arrow>np.ndarray<block_start>kpt=det.get_keypoint(kpt_type).int().tolist()<line_sep><return>cv2.circle(img kpt 2 (255 0 0) 2)<block_end><def_stmt>scale_image img:np.ndarray size:int<arrow>np.ndarray<block_start>h,w=img.shape[:2]<line_sep>scale=1.<times>size/w<line_sep><return>cv2.resize(img (int(w<times>scale) int(h<times>scale)))<block_end><def_stmt>my_app # select the device <block_start>device=torch.device('cpu')<if_stmt>args.cuda<and>torch.cuda.is_available()<block_start>device=torch.device('cuda:0')<line_sep>torch.backends.cudnn.benchmark=<true><block_end># create the video capture object cap=cv2.VideoCapture(0)<line_sep># compute scale width=cap.get(cv2.CAP_PROP_FRAME_WIDTH)<line_sep>height=cap.get(cv2.CAP_PROP_FRAME_HEIGHT)<line_sep>fps=cap.get(cv2.CAP_PROP_FPS)<line_sep>print(f"Video: h/w: {height}/{width} fps:{fps}")<line_sep>scale=1.<times>args.image_size/width<line_sep>w,h=int(width<times>scale) int(height<times>scale)<line_sep># create the video writer object fourcc=cv2.VideoWriter_fourcc('M' 'J' 'P' 'G')<line_sep>out=cv2.VideoWriter(args.video_out fourcc fps (w h))<line_sep># create the detector object face_detection=FaceDetector().to(device)<line_sep>cv2.namedWindow('frame' cv2.WINDOW_NORMAL)<line_sep>draw_keypoints:bool=<false><while_stmt>(<true>)# Capture the video frame # by frame <block_start>_,frame=cap.read()<line_sep>start=cv2.getTickCount()<line_sep># preprocess frame=scale_image(frame args.image_size)<line_sep>img=K.image_to_tensor(frame keepdim=<false>).to(device)<line_sep>img=K.color.bgr_to_rgb(img.float())<line_sep># detect ! <with_stmt>torch.no_grad()<block_start>dets=face_detection(img)<block_end>dets=[FaceDetectorResult(o)<for>o dets]<line_sep>fps:float=cv2.getTickFrequency()/(cv2.getTickCount()-start)<line_sep># show image frame_vis=frame.copy()<line_sep>frame_vis=cv2.putText(frame_vis f"FPS: {fps:.1f}" (10 20) cv2.FONT_HERSHEY_DUPLEX 0.5 (255 255 255))<for_stmt>b dets<block_start><if_stmt>b.score<l>args.vis_threshold<block_start><continue><block_end># draw face bounding box line_thickness=2<line_sep>line_length=10<line_sep>x1,y1=b.top_left.int().tolist()<line_sep>frame_vis=cv2.line(frame_vis (x1 y1) (x1+line_length y1) (0 255 0) thickness=line_thickness)<line_sep>frame_vis=cv2.line(frame_vis (x1 y1) (x1 y1+line_length) (0 255 0) thickness=line_thickness)<line_sep>x1,y1=b.top_right.int().tolist()<line_sep>frame_vis=cv2.line(frame_vis (x1 y1) (x1-line_length y1) (0 255 0) thickness=line_thickness)<line_sep>frame_vis=cv2.line(frame_vis (x1 y1) (x1 y1+line_length) (0 255 0) thickness=line_thickness)<line_sep>x1,y1=b.bottom_right.int().tolist()<line_sep>frame_vis=cv2.line(frame_vis (x1 y1) (x1-line_length y1) (0 255 0) thickness=line_thickness)<line_sep>frame_vis=cv2.line(frame_vis (x1 y1) (x1 y1-line_length) (0 255 0) thickness=line_thickness)<line_sep>x1,y1=b.bottom_left.int().tolist()<line_sep>frame_vis=cv2.line(frame_vis (x1 y1) (x1+line_length y1) (0 255 0) thickness=line_thickness)<line_sep>frame_vis=cv2.line(frame_vis (x1 y1) (x1 y1-line_length) (0 255 0) thickness=line_thickness)<if_stmt>draw_keypoints# draw facial keypoints <block_start>frame_vis=draw_keypoint(frame_vis b FaceKeypoint.EYE_LEFT)<line_sep>frame_vis=draw_keypoint(frame_vis b FaceKeypoint.EYE_RIGHT)<line_sep>frame_vis=draw_keypoint(frame_vis b FaceKeypoint.NOSE)<line_sep>frame_vis=draw_keypoint(frame_vis b FaceKeypoint.MOUTH_LEFT)<line_sep>frame_vis=draw_keypoint(frame_vis b FaceKeypoint.MOUTH_RIGHT)<line_sep># draw the text score and FPS pt=b.top_left.int().tolist()<line_sep>frame_vis=cv2.putText(frame_vis f"{b.score:.2f}" (pt[0] pt[1]-12) cv2.FONT_HERSHEY_DUPLEX 0.5 (255 255 255))<block_end><block_end># write the processed frame out.write(frame_vis)<line_sep># Display the resulting frame cv2.imshow('frame' frame_vis)<line_sep># the 's' button is set as the # switching button to draw the face keypoints <if_stmt>cv2.waitKey(1)<eq>ord('s')<block_start>draw_keypoints=<not>draw_keypoints<block_end># the 'q' button is set as the # quitting button you may use any # desired button of your choice <if_stmt>cv2.waitKey(1)<eq>ord('q')<block_start><break><block_end><block_end># After the loop release the cap and writing objects cap.release()<line_sep>out.release()<line_sep># Destroy all the windows cv2.destroyAllWindows()<block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser(description='Face and Landmark Detection')<line_sep>parser.add_argument('--video_out' required=<true> type=str help='the file path to write the output.')<line_sep>parser.add_argument('--image_size' default=320 type=int help='the image size to process.')<line_sep>parser.add_argument('--vis_threshold' default=0.8 type=float help='visualization_threshold')<line_sep>parser.add_argument('--vis_keypoints' dest='vis_keypoints' action='store_true')<line_sep>parser.add_argument('--cuda' dest='cuda' action='store_true')<line_sep>args=parser.parse_args()<line_sep>my_app()<block_end>
# encoding: utf-8 """ Test data builders for text XML elements """<import_from_stmt>...unitdata BaseBuilder<import_from_stmt>.shared CT_OnOffBuilder CT_StringBuilder<class_stmt>CT_BrBuilder(BaseBuilder)<block_start>__tag__='w:br'<line_sep>__nspfxs__=('w' )<line_sep>__attrs__=('w:type' 'w:clear')<block_end><class_stmt>CT_EmptyBuilder(BaseBuilder)<block_start>__nspfxs__=('w' )<line_sep>__attrs__=()<def_stmt>__init__ self tag<block_start>self.__tag__=tag<line_sep>super(CT_EmptyBuilder self).__init__()<block_end><block_end><class_stmt>CT_JcBuilder(BaseBuilder)<block_start>__tag__='w:jc'<line_sep>__nspfxs__=('w' )<line_sep>__attrs__=('w:val' )<block_end><class_stmt>CT_PBuilder(BaseBuilder)<block_start>__tag__='w:p'<line_sep>__nspfxs__=('w' )<line_sep>__attrs__=()<block_end><class_stmt>CT_PPrBuilder(BaseBuilder)<block_start>__tag__='w:pPr'<line_sep>__nspfxs__=('w' )<line_sep>__attrs__=()<block_end><class_stmt>CT_RBuilder(BaseBuilder)<block_start>__tag__='w:r'<line_sep>__nspfxs__=('w' )<line_sep>__attrs__=()<block_end><class_stmt>CT_RPrBuilder(BaseBuilder)<block_start>__tag__='w:rPr'<line_sep>__nspfxs__=('w' )<line_sep>__attrs__=()<block_end><class_stmt>CT_SectPrBuilder(BaseBuilder)<block_start>__tag__='w:sectPr'<line_sep>__nspfxs__=('w' )<line_sep>__attrs__=()<block_end><class_stmt>CT_TextBuilder(BaseBuilder)<block_start>__tag__='w:t'<line_sep>__nspfxs__=('w' )<line_sep>__attrs__=()<def_stmt>with_space self value<block_start>self._set_xmlattr('xml:space' str(value))<line_sep><return>self<block_end><block_end><class_stmt>CT_UnderlineBuilder(BaseBuilder)<block_start>__tag__='w:u'<line_sep>__nspfxs__=('w' )<line_sep>__attrs__=('w:val' 'w:color' 'w:themeColor' 'w:themeTint' 'w:themeShade')<block_end><def_stmt>a_b <block_start><return>CT_OnOffBuilder('w:b')<block_end><def_stmt>a_bCs <block_start><return>CT_OnOffBuilder('w:bCs')<block_end><def_stmt>a_br <block_start><return>CT_BrBuilder()<block_end><def_stmt>a_caps <block_start><return>CT_OnOffBuilder('w:caps')<block_end><def_stmt>a_cr <block_start><return>CT_EmptyBuilder('w:cr')<block_end><def_stmt>a_cs <block_start><return>CT_OnOffBuilder('w:cs')<block_end><def_stmt>a_dstrike <block_start><return>CT_OnOffBuilder('w:dstrike')<block_end><def_stmt>a_jc <block_start><return>CT_JcBuilder()<block_end><def_stmt>a_noProof <block_start><return>CT_OnOffBuilder('w:noProof')<block_end><def_stmt>a_shadow <block_start><return>CT_OnOffBuilder('w:shadow')<block_end><def_stmt>a_smallCaps <block_start><return>CT_OnOffBuilder('w:smallCaps')<block_end><def_stmt>a_snapToGrid <block_start><return>CT_OnOffBuilder('w:snapToGrid')<block_end><def_stmt>a_specVanish <block_start><return>CT_OnOffBuilder('w:specVanish')<block_end><def_stmt>a_strike <block_start><return>CT_OnOffBuilder('w:strike')<block_end><def_stmt>a_tab <block_start><return>CT_EmptyBuilder('w:tab')<block_end><def_stmt>a_vanish <block_start><return>CT_OnOffBuilder('w:vanish')<block_end><def_stmt>a_webHidden <block_start><return>CT_OnOffBuilder('w:webHidden')<block_end><def_stmt>a_p <block_start><return>CT_PBuilder()<block_end><def_stmt>a_pPr <block_start><return>CT_PPrBuilder()<block_end><def_stmt>a_pStyle <block_start><return>CT_StringBuilder('w:pStyle')<block_end><def_stmt>a_sectPr <block_start><return>CT_SectPrBuilder()<block_end><def_stmt>a_t <block_start><return>CT_TextBuilder()<block_end><def_stmt>a_u <block_start><return>CT_UnderlineBuilder()<block_end><def_stmt>an_emboss <block_start><return>CT_OnOffBuilder('w:emboss')<block_end><def_stmt>an_i <block_start><return>CT_OnOffBuilder('w:i')<block_end><def_stmt>an_iCs <block_start><return>CT_OnOffBuilder('w:iCs')<block_end><def_stmt>an_imprint <block_start><return>CT_OnOffBuilder('w:imprint')<block_end><def_stmt>an_oMath <block_start><return>CT_OnOffBuilder('w:oMath')<block_end><def_stmt>an_outline <block_start><return>CT_OnOffBuilder('w:outline')<block_end><def_stmt>an_r <block_start><return>CT_RBuilder()<block_end><def_stmt>an_rPr <block_start><return>CT_RPrBuilder()<block_end><def_stmt>an_rStyle <block_start><return>CT_StringBuilder('w:rStyle')<block_end><def_stmt>an_rtl <block_start><return>CT_OnOffBuilder('w:rtl')<block_end>
<import_from_future_stmt> unicode_literals<import_stmt>codecs<def_stmt>encode_hex value<block_start><return>'0x'+codecs.decode(codecs.encode(value 'hex') 'utf8')<block_end><def_stmt>decode_hex value<block_start>_,_,hex_part=value.rpartition('x')<line_sep><return>codecs.decode(hex_part 'hex')<block_end>
<import_from_stmt>unittest.mock patch<import_stmt>graphene<import_from_stmt>.....payment TransactionKind<import_from_stmt>.....payment.gateways.dummy_credit_card TOKEN_EXPIRED TOKEN_VALIDATION_MAPPING <import_from_stmt>.....payment.models ChargeStatus<import_from_stmt>....tests.utils get_graphql_content<line_sep>CAPTURE_QUERY=""" mutation PaymentCapture($paymentId: ID!, $amount: PositiveDecimal) { paymentCapture(paymentId: $paymentId, amount: $amount) { payment { id, chargeStatus } errors { field message code } } } """<def_stmt>test_payment_capture_success staff_api_client permission_manage_orders payment_txn_preauth<block_start>payment=payment_txn_preauth<assert_stmt>payment.charge_status<eq>ChargeStatus.NOT_CHARGED<line_sep>payment_id=graphene.Node.to_global_id("Payment" payment.pk)<line_sep>variables={"paymentId":payment_id "amount":str(payment_txn_preauth.total)}<line_sep>response=staff_api_client.post_graphql(CAPTURE_QUERY variables permissions=[permission_manage_orders])<line_sep>content=get_graphql_content(response)<line_sep>data=content["data"]["paymentCapture"]<assert_stmt><not>data["errors"]<line_sep>payment_txn_preauth.refresh_from_db()<assert_stmt>payment.charge_status<eq>ChargeStatus.FULLY_CHARGED<assert_stmt>payment.transactions.count()<eq>2<line_sep>txn=payment.transactions.last()<assert_stmt>txn.kind<eq>TransactionKind.CAPTURE<block_end><def_stmt>test_payment_capture_with_invalid_argument staff_api_client permission_manage_orders payment_txn_preauth<block_start>payment=payment_txn_preauth<assert_stmt>payment.charge_status<eq>ChargeStatus.NOT_CHARGED<line_sep>payment_id=graphene.Node.to_global_id("Payment" payment.pk)<line_sep>variables={"paymentId":payment_id "amount":0}<line_sep>response=staff_api_client.post_graphql(CAPTURE_QUERY variables permissions=[permission_manage_orders])<line_sep>content=get_graphql_content(response)<line_sep>data=content["data"]["paymentCapture"]<assert_stmt>len(data["errors"])<eq>1<assert_stmt>data["errors"][0]["message"]<eq>"Amount should be a positive number."<block_end><def_stmt>test_payment_capture_with_payment_non_authorized_yet staff_api_client permission_manage_orders payment_dummy<block_start>"""Ensure capture a payment that is set as authorized is failing with the proper error message. """<line_sep>payment=payment_dummy<assert_stmt>payment.charge_status<eq>ChargeStatus.NOT_CHARGED<line_sep>payment_id=graphene.Node.to_global_id("Payment" payment.pk)<line_sep>variables={"paymentId":payment_id "amount":1}<line_sep>response=staff_api_client.post_graphql(CAPTURE_QUERY variables permissions=[permission_manage_orders])<line_sep>content=get_graphql_content(response)<line_sep>data=content["data"]["paymentCapture"]<assert_stmt>data["errors"]<eq>[{"field":<none> "message":"Cannot find successful auth transaction." "code":"PAYMENT_ERROR" }]<block_end><def_stmt>test_payment_capture_gateway_error staff_api_client permission_manage_orders payment_txn_preauth monkeypatch# given <block_start>payment=payment_txn_preauth<assert_stmt>payment.charge_status<eq>ChargeStatus.NOT_CHARGED<line_sep>payment_id=graphene.Node.to_global_id("Payment" payment.pk)<line_sep>variables={"paymentId":payment_id "amount":str(payment_txn_preauth.total)}<line_sep>monkeypatch.setattr("saleor.payment.gateways.dummy.dummy_success" <lambda>:<false>)<line_sep># when response=staff_api_client.post_graphql(CAPTURE_QUERY variables permissions=[permission_manage_orders])<line_sep># then content=get_graphql_content(response)<line_sep>data=content["data"]["paymentCapture"]<assert_stmt>data["errors"]<eq>[{"field":<none> "message":"Unable to process capture" "code":"PAYMENT_ERROR"}]<line_sep>payment_txn_preauth.refresh_from_db()<assert_stmt>payment.charge_status<eq>ChargeStatus.NOT_CHARGED<assert_stmt>payment.transactions.count()<eq>2<line_sep>txn=payment.transactions.last()<assert_stmt>txn.kind<eq>TransactionKind.CAPTURE<assert_stmt><not>txn.is_success<block_end>@patch("saleor.payment.gateways.dummy_credit_card.plugin."<concat>"DummyCreditCardGatewayPlugin.DEFAULT_ACTIVE" <true> )<def_stmt>test_payment_capture_gateway_dummy_credit_card_error staff_api_client permission_manage_orders payment_txn_preauth monkeypatch# given <block_start>token=TOKEN_EXPIRED<line_sep>error=TOKEN_VALIDATION_MAPPING[token]<line_sep>payment=payment_txn_preauth<line_sep>payment.gateway="mirumee.payments.dummy_credit_card"<line_sep>payment.save()<line_sep>transaction=payment.transactions.last()<line_sep>transaction.token=token<line_sep>transaction.save()<assert_stmt>payment.charge_status<eq>ChargeStatus.NOT_CHARGED<line_sep>payment_id=graphene.Node.to_global_id("Payment" payment.pk)<line_sep>variables={"paymentId":payment_id "amount":str(payment_txn_preauth.total)}<line_sep>monkeypatch.setattr("saleor.payment.gateways.dummy_credit_card.dummy_success" <lambda>:<false>)<line_sep># when response=staff_api_client.post_graphql(CAPTURE_QUERY variables permissions=[permission_manage_orders])<line_sep># then content=get_graphql_content(response)<line_sep>data=content["data"]["paymentCapture"]<assert_stmt>data["errors"]<eq>[{"field":<none> "message":error "code":"PAYMENT_ERROR"}]<line_sep>payment_txn_preauth.refresh_from_db()<assert_stmt>payment.charge_status<eq>ChargeStatus.NOT_CHARGED<assert_stmt>payment.transactions.count()<eq>2<line_sep>txn=payment.transactions.last()<assert_stmt>txn.kind<eq>TransactionKind.CAPTURE<assert_stmt><not>txn.is_success<block_end>
# See URL: https://hakibenita.com/fast-load-data-python-postgresql <import_stmt>time<import_from_stmt>functools wraps<import_from_stmt>memory_profiler memory_usage# type: ignore <def_stmt>profile fn<block_start>@wraps(fn)<def_stmt>inner *args **kwargs<block_start>fn_kwargs_str=", ".join(f"{k}={v}"<for>k,v kwargs.items())<line_sep>print(f"\n{fn.__name__}({fn_kwargs_str})")<line_sep># Measure time t=time.perf_counter()<line_sep>return_value=fn(*args **kwargs)<line_sep>elapsed=time.perf_counter()-t<line_sep>print(f"Time spent: {elapsed:0.4}")<line_sep># Measure memory mem,return_value=memory_usage((fn args kwargs) retval=<true> timeout=200 interval=1e-7)<line_sep>print(f"Memory used: {max(mem)-min(mem)}")<line_sep><return>return_value<block_end><return>inner<block_end>
<import_stmt>random<import_from_stmt>OpenGL.GL *<import_from_stmt>OpenGL.GLUT *<import_from_stmt>OpenGL.GLU *<line_sep>""" Generating squares This example will generate 25 squares each in a randomly chosen grayvalue. The grayvalue is chosen out of 25 different possiblities. Every redraw of the window will create a new set of squares. http://www.de-brauwer.be/wiki/wikka.php?wakka=PyOpenGLSquares """<def_stmt>initFun <block_start>glClearColor(1.0 1.0 1.0 0.0)<line_sep>glColor3f(0.0 0.0 0.0)<line_sep>glMatrixMode(GL_PROJECTION)<line_sep>glLoadIdentity()<line_sep>gluOrtho2D(0.0 640.0 0.0 480.0)<block_end><def_stmt>displayFun <block_start>glClear(GL_COLOR_BUFFER_BIT)<for_stmt>i range(0 25)<block_start>gray=idx=random.randint(0 25)/25.0<line_sep>glColor3f(gray gray gray)<line_sep>glRecti(random.randint(0 640) random.randint(0 480) random.randint(0 640) random.randint(0 480))<block_end>glFlush()<block_end><if_stmt>__name__<eq>'__main__'<block_start>glutInit()<line_sep>glutInitWindowSize(640 480)<line_sep>glutCreateWindow(b"DrawSquares")<line_sep>glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB)<line_sep>glutDisplayFunc(displayFun)<line_sep>initFun()<line_sep>glutMainLoop()<block_end>
# Generated by Django 2.0.3 on 2018-07-19 10:20 <import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("account" "0022_auto_20180718_0956")]<line_sep>operations=[migrations.AlterModelOptions(name="user" options={"permissions":(("manage_users" "Manage customers.") ("manage_staff" "Manage staff.") ("impersonate_users" "Impersonate customers.") )} )]<block_end>
""" Python wrapper for libui. """<import_from_stmt>pylibui libui<import_from_stmt>.control Control<class_stmt>Tab(Control)<block_start><def_stmt>__init__ self<block_start>""" Creates a new tab. """<line_sep>super().__init__()<line_sep>self.control=libui.uiNewTab()<block_end><def_stmt>append self name control<block_start>""" Appends a control to the tab. :param name: str :param control: uiControl :return: None """<line_sep>libui.uiTabAppend(self.control name control.pointer())<block_end><def_stmt>insertAt self name before control<block_start>""" Deletes a control from the tab. :param name: str :param before: int :param control: uiControl :return: None """<line_sep>libui.uiTabInsertAt(self.control name before control.pointer())<block_end><def_stmt>delete self index<block_start>""" Deletes a control from the tab. :param tab: uiTab :param index: int :return: None """<line_sep>libui.uiTabDelete(self.control index)<block_end><def_stmt>setMargined self page margined<block_start>""" Sets whether the tab's page is margined or not. :param page: int :param margined: bool :return: None """<line_sep>libui.uiTabSetMargined(self.control page int(margined))<block_end><def_stmt>getMargined self page<block_start>""" Returns whether the tab's page is margined or not. :param page: int :return: bool """<line_sep><return>bool(libui.uiTabMargined(self.control page))<block_end><def_stmt>getNumPages self<block_start>""" Returns the number of pages in the tab. :return: int """<line_sep><return>libui.uiTabNumPages(self.control)<block_end><block_end>
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for metric utils."""<import_from_stmt>absl.testing absltest<import_from_stmt>absl.testing parameterized<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_from_stmt>language.mentionmemory.utils metric_utils<import_stmt>numpy<as>np<line_sep>_LARGE_NUMBER=1e12<class_stmt>ComputeMetricsTest(absltest.TestCase)<block_start>"""Test whether metrics computations produce expected values."""<line_sep>batch_size=32<line_sep>seq_len=20<line_sep>vocab_size=100<def_stmt>test_logit_values_as_expected self<block_start>"""Test whether metrics computations produce expected values."""<line_sep>logits=np.random.rand(self.batch_size self.seq_len self.vocab_size)<line_sep>targets=np.random.randint(self.vocab_size size=(self.batch_size self.seq_len))<line_sep>dense_targets=jax.nn.one_hot(targets self.vocab_size)<line_sep>weights=np.random.randint(2 size=(self.batch_size self.seq_len))<line_sep># Check loss and denominator make sense for random values loss,denominator=metric_utils.compute_weighted_cross_entropy(logits targets weights )<line_sep>expected_loss=-jax.nn.log_softmax(logits axis=-1)<times>dense_targets<line_sep>expected_loss=(expected_loss<times>np.expand_dims(weights axis=-1)).sum()<line_sep>self.assertAlmostEqual(loss expected_loss 1)<line_sep>self.assertAlmostEqual(denominator weights.sum() 1)<line_sep># Check loss makes sense for uniform and degenerate scores logits=np.ones(shape=(self.batch_size self.seq_len self.vocab_size))<line_sep>loss,denominator=metric_utils.compute_weighted_cross_entropy(logits targets weights )<line_sep>expected_loss=np.log(self.vocab_size)<line_sep>self.assertAlmostEqual(loss/denominator expected_loss 4)<line_sep>logits=np.zeros(shape=(self.batch_size self.seq_len self.vocab_size))<line_sep>logits=logits+(_LARGE_NUMBER<times>dense_targets-_LARGE_NUMBER<times>(1-dense_targets))<line_sep>loss,denominator=metric_utils.compute_weighted_cross_entropy(logits targets weights )<line_sep>self.assertAlmostEqual(loss/denominator 0.0 4)<block_end><def_stmt>test_prob_values_as_expected self<block_start>probs=np.random.rand(self.batch_size self.seq_len self.vocab_size)<line_sep>targets=np.random.randint(self.vocab_size size=(self.batch_size self.seq_len))<line_sep>dense_targets=jax.nn.one_hot(targets self.vocab_size)<line_sep>weights=np.random.randint(2 size=(self.batch_size self.seq_len))<line_sep># Check loss and denominator make sense with probs as inputs loss,denominator=metric_utils.compute_weighted_cross_entropy(probs targets weights inputs_are_prob=<true> )<line_sep>expected_loss=-np.log(probs)<times>dense_targets<line_sep>expected_loss=(expected_loss<times>np.expand_dims(weights axis=-1)).sum()<line_sep>self.assertAlmostEqual(loss expected_loss 1)<line_sep>self.assertAlmostEqual(denominator weights.sum() 1)<line_sep># Check loss makes sense for uniform and degenerate probabilities probs=np.ones(shape=(self.batch_size self.seq_len self.vocab_size))<line_sep>probs=probs/self.vocab_size<line_sep>loss,denominator=metric_utils.compute_weighted_cross_entropy(probs targets weights inputs_are_prob=<true> )<line_sep>expected_loss=np.log(self.vocab_size)<line_sep>self.assertAlmostEqual(loss/denominator expected_loss 4)<line_sep>probs=np.zeros(shape=(self.batch_size self.seq_len self.vocab_size))<line_sep>probs=probs+dense_targets<line_sep>loss,denominator=metric_utils.compute_weighted_cross_entropy(probs targets weights inputs_are_prob=<true> )<line_sep>self.assertAlmostEqual(loss/denominator 0.0 4)<block_end><def_stmt>test_accuracy_as_expected self<block_start>logits=np.random.rand(self.batch_size self.seq_len self.vocab_size)<line_sep>targets=np.random.randint(self.vocab_size size=(self.batch_size self.seq_len))<line_sep>dense_targets=jax.nn.one_hot(targets self.vocab_size)<line_sep>weights=np.random.randint(2 size=(self.batch_size self.seq_len))<line_sep># Check accuracy and denominator make sense logits=np.ones((self.batch_size self.seq_len self.vocab_size) dtype=np.float32)<line_sep>correct=np.random.randint(2 size=(self.batch_size self.seq_len 1))<line_sep>logits=logits+dense_targets<times>(0.5<times>correct-0.5<times>(1-correct))<line_sep>acc,denominator=metric_utils.compute_weighted_accuracy(logits targets weights )<line_sep>expected_accuracy=(np.squeeze(correct)<times>weights).sum()/weights.sum()<line_sep>self.assertAlmostEqual(acc/denominator expected_accuracy 1)<line_sep>self.assertAlmostEqual(denominator weights.sum() 1)<block_end><block_end><class_stmt>ComputeCrossEntropyTest(parameterized.TestCase)<block_start>"""Test whether loss and metrics computations produce expected values."""<line_sep>@parameterized.parameters((0 1 29 31 31) # Tests with large score values (1 1000000 29 31) (2 1000000 29 31) # Tests with large number of positive, negatives and neutral classes (3 100 29 1001) (4 100 323 31) # Tests whether lack of positives affects the numerical stability (5 1 29 31 1 31) (6 1 29 31 0 31) (7 1 29 31 31 1) (8 1 29 31 31 0) (9 1 29 31 1 1) (10 1 29 31 0 0) (11 1000000 29 31 0 0) (12 100 29 1001 0 0) (13 100 323 31 0 0) )<def_stmt>test_loss_and_metrics_as_expected self seed scale local_n_mentions global_n_mentions max_num_positives=<none> max_num_negatives=<none><block_start>"""Test whether loss and metrics computation produces expected values."""<line_sep>np.random.seed(seed)<line_sep>max_num_negatives=max_num_negatives<or>global_n_mentions<line_sep>max_num_positives=max_num_positives<or>global_n_mentions<line_sep>shape=(local_n_mentions global_n_mentions)<line_sep>scores=np.random.random(shape)<times>scale<line_sep>num_positives=np.random.randint(max_num_positives+1 size=(local_n_mentions))<line_sep>num_positives[0]=0<line_sep>num_positives[-1]=global_n_mentions<line_sep>num_negatives=np.random.randint(max_num_negatives+1 size=(local_n_mentions))<line_sep>num_negatives=np.minimum(num_negatives global_n_mentions-num_positives)<line_sep>positives=np.zeros(shape dtype=np.bool_)<line_sep>negatives=np.zeros(shape dtype=np.bool_)<for_stmt>index range(local_n_mentions)<block_start>ids=np.random.choice(global_n_mentions num_positives[index]+num_negatives[index] replace=<false>)<line_sep>positives[index ids[:num_positives[index]]]=<true><line_sep>negatives[index ids[num_positives[index]:]]=<true><block_end>self.assertEqual(np.logical_and(positives negatives).sum() 0)<line_sep>weights=np.logical_and(num_positives<g>0 num_negatives<g>0)<line_sep>(actual_loss actual_metrics (actual_acc_per_sample actual_weights_per_sample))=metric_utils.compute_cross_entropy_loss_with_positives_and_negatives_masks(scores positives negatives)<line_sep>expected_loss,expected_acc,expected_denom=0 0 0<line_sep>expected_acc_per_sample=[]<line_sep># Consider every sample independently <for_stmt>i range(local_n_mentions)<block_start><if_stmt><not>weights[i]<block_start>expected_acc_per_sample.append(0)<line_sep><continue><block_end># Collect positive and negative scores positive_scores,negative_scores=[] []<for_stmt>j range(global_n_mentions)<block_start><if_stmt>positives[i j]<block_start>positive_scores.append(scores[i j])<block_end><if_stmt>negatives[i j]<block_start>negative_scores.append(scores[i j])<block_end><block_end>self.assertNotEmpty(positive_scores)<line_sep>self.assertNotEmpty(negative_scores)<line_sep>n_pos=len(positive_scores)<line_sep>max_negative_scores=max(negative_scores)<line_sep>current_loss,current_acc=0 0<line_sep># Consider positive class per sample independently # and compute loss using a naive softmax op <for_stmt>pos_index range(n_pos)<block_start>current_scores=np.array([positive_scores[pos_index]]+negative_scores)<line_sep>current_scores=jax.nn.log_softmax(current_scores)<line_sep>current_loss<augadd>-current_scores[0]<line_sep>current_acc<augadd>int(positive_scores[pos_index]<g>max_negative_scores)<block_end>expected_loss<augadd>current_loss/n_pos<line_sep>expected_acc<augadd>current_acc/n_pos<line_sep>expected_denom<augadd>1<line_sep>expected_acc_per_sample.append(current_acc/n_pos)<block_end>self.assertAlmostEqual(actual_loss expected_loss places=2)<line_sep>self.assertAlmostEqual(actual_metrics['loss'] expected_loss places=2)<line_sep>self.assertAlmostEqual(actual_metrics['acc'] expected_acc places=4)<line_sep>self.assertAlmostEqual(actual_metrics['denominator'] expected_denom places=4)<line_sep>self.assertTrue(np.all(weights<eq>actual_weights_per_sample))<line_sep>self.assertSequenceAlmostEqual(actual_acc_per_sample expected_acc_per_sample places=4)<block_end><block_end><class_stmt>ComputeMetricsFromDuplicatesTest(absltest.TestCase)<block_start>"""Test whether metrics computation produces expected values."""<line_sep>batch_size=32<line_sep>seq_len=20<line_sep>num_items=100<line_sep>num_classes=200<def_stmt>test_values_as_expected self<block_start>"""Test whether metrics computation produces expected values."""<line_sep>probs=np.ones((self.batch_size self.seq_len self.num_items) dtype=np.float32)/self.num_items<line_sep>classes=np.ones((self.batch_size self.seq_len self.num_items) dtype=np.int32)<line_sep>targets=np.ones((self.batch_size self.seq_len) dtype=np.int32)<line_sep>weights=np.random.randint(2 size=(self.batch_size self.seq_len))<line_sep># Check case where all classes are targets loss,avg_prob,denominator=metric_utils.compute_loss_and_prob_from_probs_with_duplicates(probs classes targets weights )<line_sep>self.assertAlmostEqual(loss/denominator 0.0 4)<line_sep>self.assertAlmostEqual(avg_prob/denominator 1.0 4)<line_sep>self.assertAlmostEqual(denominator weights.sum() 4)<line_sep># Check case where no classes are targets targets=np.zeros((self.batch_size self.seq_len) dtype=np.int32)<line_sep>loss,avg_prob,denominator=metric_utils.compute_loss_and_prob_from_probs_with_duplicates(probs classes targets weights )<line_sep>self.assertAlmostEqual(avg_prob/denominator 0.0 4)<line_sep># Check random cases classes=np.random.randint(self.num_classes size=(self.batch_size self.seq_len self.num_items))<line_sep>targets=np.random.randint(self.num_classes size=(self.batch_size self.seq_len))<line_sep>loss,avg_prob,denominator=metric_utils.compute_loss_and_prob_from_probs_with_duplicates(probs classes targets weights )<line_sep>correct_probs=(classes<eq>np.expand_dims(targets axis=-1))<times>probs<line_sep>expected_avg_prob=(correct_probs<times>np.expand_dims(weights axis=-1)).sum()/weights.sum()<line_sep>self.assertAlmostEqual(avg_prob/denominator expected_avg_prob 4)<block_end><block_end><class_stmt>ProcessMetricsTest(absltest.TestCase)<block_start>"""Test metrics processing."""<def_stmt>test_values_as_expected self<block_start>"""Test whether processed dictionaries match expected values."""<line_sep>metric_dict={'cat1':{'key':2.0 'denominator':1.0} 'cat2':{'key':2.0 'denominator':2.0} }<line_sep>processed_metrics=metric_utils.process_metrics(metric_dict)<line_sep>expected_result={'cat1_key':2.0 'cat1_denom':1.0 'cat2_key':1.0 'cat2_denom':2.0 }<line_sep>self.assertEqual(processed_metrics expected_result)<line_sep>metric_dict={'cat1':{'key':2.0 'denominator':1.0} 'cat2':{'key':2.0 'denominator':2.0} }<line_sep>processed_metrics=metric_utils.process_metrics(metric_dict prefix='pref')<line_sep>expected_result={'pref/cat1_key':2.0 'pref/cat1_denom':1.0 'pref/cat2_key':1.0 'pref/cat2_denom':2.0 }<line_sep>self.assertEqual(processed_metrics expected_result)<block_end><block_end><class_stmt>UpdateMetricsDTypeTest(absltest.TestCase)<block_start>"""Test metrics processing."""<def_stmt>test_types_as_expected self<block_start>"""Test whether updated metrics match expected types."""<line_sep>metric_dict={'cat1':{'key':jnp.asarray([1] dtype=jnp.int32) 'denominator':jnp.asarray([1] dtype=jnp.int16)} 'cat2':{'key':2.0 'denominator':jnp.asarray([1] dtype=jnp.bfloat16)} }<line_sep>processed_metrics=metric_utils.update_metrics_dtype(metric_dict)<line_sep>self.assertEqual(processed_metrics['cat1']['key'].dtype jnp.float32)<line_sep>self.assertEqual(processed_metrics['cat1']['denominator'].dtype jnp.float32)<line_sep>self.assertIsInstance(processed_metrics['cat2']['key'] float)<line_sep>self.assertEqual(processed_metrics['cat2']['denominator'].dtype jnp.float32)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end>
#################################################### ## Tool name: BetterBusBuffers ## Created by: <NAME>, Esri, <EMAIL> ## Last updated: 5 December 2017 #################################################### ''' BetterBusBuffers Polygon Tool: Step 1 - Preprocess Buffers BetterBusBuffers provides a quantitative measure of access to public transit in your city. It creates buffers around the transit stops and weights them by the number of trips that pass that stop during the time window you select, accounting for areas served by more than one stop. Output can be shown as the total number of trips or the average number of trips per hour during the time window. You can use the symbology settings of the resulting feature class to highlight the frequency of service in different areas of town. Note that the tool tells you nothing about the destination of the buses that pass by the stops, only how many of them there are. BetterBusBuffers uses GTFS public transit data and ArcGIS Network Analyst. Step 1 does the following: - Creates service areas around your transit stops - Runs some post-processing on those service areas to prepare them for further analysis You should only have to run Step 1 once for the geography and buffer size you are analyzing. Step 1 will take a while to run for larger transit systems. '''<line_sep>################################################################################ '''Copyright 2017 Esri Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.'''<line_sep>################################################################################ <import_stmt>os sqlite3<import_from_stmt>shutil copyfile<import_stmt>arcpy<import_stmt>BBB_SharedFunctions<def_stmt>runTool outDir outGDB inSQLDbase inNetworkDataset imp BufferSize restrictions TrimSettings<block_start><try_stmt># ----- Set up the run ----- <block_start><try_stmt><block_start>BBB_SharedFunctions.CheckArcVersion(min_version_pro="1.2")<line_sep>BBB_SharedFunctions.CheckArcInfoLicense()<line_sep>BBB_SharedFunctions.CheckOutNALicense()<line_sep>BBB_SharedFunctions.CheckWorkspace()<line_sep># It's okay to overwrite in-memory stuff. OverwriteOutput=arcpy.env.overwriteOutput# Get the orignal value so we can reset it. arcpy.env.overwriteOutput=<true><line_sep># Append .gdb to geodatabase name. <if_stmt><not>outGDB.lower().endswith(".gdb")<block_start>outGDB<augadd>".gdb"<block_end>outGDBwPath=os.path.join(outDir outGDB)<line_sep># Create a file geodatabase for the results. arcpy.management.CreateFileGDB(outDir outGDB)<line_sep># Make a copy of the input SQL file in the Step 1 output so we can modify it. SQLDbase=os.path.join(outGDBwPath "Step1_GTFS.sql")<line_sep>copyfile(inSQLDbase SQLDbase)<line_sep># Connect to or create the SQL file. conn=sqlite3.connect(SQLDbase)<line_sep>c=BBB_SharedFunctions.c=conn.cursor()<line_sep>impedanceAttribute=BBB_SharedFunctions.CleanUpImpedance(imp)<line_sep>TrimPolys,TrimPolysValue=BBB_SharedFunctions.CleanUpTrimSettings(TrimSettings)<block_end><except_stmt><block_start>arcpy.AddError("Error setting up run.")<line_sep><raise><block_end>#----- Make a feature class of GTFS stops that we can use for buffers ----- <try_stmt># Create a feature class of transit stops <block_start>arcpy.AddMessage("Creating a feature class of GTFS stops...")<line_sep>StopsLayer,StopIDList=BBB_SharedFunctions.MakeStopsFeatureClass(os.path.join(outGDBwPath "Step1_Stops"))<block_end><except_stmt><block_start>arcpy.AddError("Error creating a feature class of GTFS stops.")<line_sep><raise><block_end>#----- Create Service Areas around all stops in the system ----- <try_stmt><block_start>arcpy.AddMessage("Creating service areas around stops...")<line_sep>arcpy.AddMessage("(This step will take a while for large networks.)")<line_sep>polygons=BBB_SharedFunctions.MakeServiceAreasAroundStops(StopsLayer inNetworkDataset impedanceAttribute BufferSize restrictions TrimPolys TrimPolysValue)<block_end><except_stmt><block_start>arcpy.AddError("Error creating service areas around stops.")<line_sep><raise><block_end>#----- Post-process the polygons to prepare for Step 2 ----- <try_stmt><block_start>arcpy.AddMessage("Reformatting polygons for further analysis...")<line_sep>arcpy.AddMessage("(This step will take a while for large networks.)")<line_sep># ----- Flatten the overlapping service area polygons ----- # Use World Cylindrical Equal Area (WKID 54034) to ensure proper use of cluster tolerance in meters arcpy.env.outputCoordinateSystem=BBB_SharedFunctions.WorldCylindrical<line_sep># Flatten the overlapping polygons. This will ultimately be our output. # Dummy points to use in FeatureToPolygon to get rid of unnecessary fields. dummypoints=arcpy.management.CreateFeatureclass("in_memory" "DummyPoints" "POINT")<line_sep># The flattened polygons will be our ultimate output in the end (final # output of step 2). FlatPolys=os.path.join(outGDBwPath "Step1_FlatPolys")<line_sep># FeatureToPolygon flattens overalpping polys. # Set a large cluster tolerance to eliminate small sliver polygons and to # keep the output file size down. Boundaries may move up to the distance # specified in the cluster tolerance, but some amount of movement is # acceptable, as service area polygons are inexact anyway. # The large cluster tolerance may cause some geometry issues with the output # later, but this is the best solution I've found so far that doesn't eat # up too much analysis time and memory clusTol="5 meters"<line_sep>arcpy.management.FeatureToPolygon(polygons FlatPolys clusTol "" dummypoints)<line_sep>arcpy.management.Delete(dummypoints)<line_sep># Add a field to the output file for number of trips and num trips / hour. # Also create a polygon id field so we can keep track of them. arcpy.management.AddField(FlatPolys "PolyID" "LONG")<line_sep>arcpy.management.AddField(FlatPolys "NumTrips" "LONG")<line_sep>arcpy.management.AddField(FlatPolys "NumTripsPerHr" "DOUBLE")<line_sep>arcpy.management.AddField(FlatPolys "NumStopsInRange" "LONG")<line_sep>arcpy.management.AddField(FlatPolys "MaxWaitTime" "DOUBLE")<line_sep># ----- Create stacked points, one for each original SA polygon ----- # Create points for use in the Identity tool (one point per poly) FlattenedPoints=os.path.join(outGDBwPath "Step1_FlattenedPoints")<line_sep>arcpy.management.FeatureToPoint(FlatPolys FlattenedPoints "INSIDE")<line_sep># Use Identity to stack points and keep the stop_ids from the original SAs. # Results in a points layer with fields ORIG_FID for the IDs of the # flattened polygons and a stop_id column with the stop ids. # Points are stacked, and each has only one stop_id. StackedPoints=os.path.join(outGDBwPath "Step1_StackedPoints")<line_sep>arcpy.analysis.Identity(FlattenedPoints polygons StackedPoints)<line_sep>arcpy.management.Delete(FlattenedPoints)<line_sep># ----- Read the Stacked Points into an SQL table ----- # Create a SQL table associating the Polygon FID with the stop_ids that serve it. c.execute("DROP TABLE IF EXISTS StackedPoints;")<line_sep>schema="Polygon_FID LONG, stop_id TEXT"<line_sep>create_stmt="CREATE TABLE StackedPoints (%s);"%schema<line_sep>c.execute(create_stmt)<line_sep># Add data to the table. Track Polygon IDs with no associated stop_ids so we can delete them. FIDsToDelete=[]<line_sep>AddToStackedPts=[]<with_stmt>arcpy.da.SearchCursor(StackedPoints ["ORIG_FID" "stop_id"])<as>StackedPtCursor<block_start><for_stmt>row StackedPtCursor<block_start><if_stmt><not>row[1]<block_start>FIDsToDelete.append(row[0])<block_end><else_stmt><block_start>AddToStackedPts.append((row[0] row[1] ))<block_end><block_end><block_end># Add the OD items to the SQL table c.executemany('''INSERT INTO StackedPoints \ (Polygon_FID, stop_id) \ VALUES (?, ?);''' AddToStackedPts)<line_sep>conn.commit()<line_sep>arcpy.management.Delete(StackedPoints)<line_sep>FIDsToDelete=set(FIDsToDelete)<line_sep># ----- Delete polygons not associated with any stop_ids ----- # These were generated by the FeatureToPolygon tool in areas completely # surrounded by other polygons and aren't associated with any stops. # Make feature layer containing only the polygons we want to delete. desc2=arcpy.Describe(FlatPolys)<line_sep>OutputOIDName=desc2.OIDFieldName<line_sep># Anything with 0 area will just cause problems later. WhereClause='"Shape_Area" = 0'<if_stmt>FIDsToDelete<block_start>WhereClause<augadd>' OR "'+OutputOIDName+'" IN ('<for_stmt>FID FIDsToDelete<block_start>WhereClause<augadd>str(FID)+", "<block_end>WhereClause=WhereClause[:-2]+")"<block_end>arcpy.management.MakeFeatureLayer(FlatPolys "FlatPolysLayer" WhereClause)<line_sep># Delete the polygons that don't correspond to any stop_ids. arcpy.management.DeleteFeatures("FlatPolysLayer")<line_sep># ----- Populate the PolyID field ----- # Set PolyID equal to the OID. expression="!"+OutputOIDName+"!"<line_sep>arcpy.management.CalculateField(FlatPolys "PolyID" expression "PYTHON")<block_end><except_stmt><block_start>arcpy.AddError("Error post-processing polygons")<line_sep><raise><block_end>arcpy.AddMessage("Done!")<line_sep>arcpy.AddMessage("Files written to output geodatabase "+outGDBwPath+":")<line_sep>arcpy.AddMessage("- Step1_Stops")<line_sep>arcpy.AddMessage("- Step1_FlatPolys")<line_sep>arcpy.AddMessage("- Step1_GTFS.sql")<line_sep># Tell the tool that this is output. This will add the output to the map. arcpy.SetParameterAsText(8 os.path.join(outGDBwPath "Step1_Stops"))<line_sep>arcpy.SetParameterAsText(9 os.path.join(outGDBwPath "Step1_FlatPolys"))<line_sep>arcpy.SetParameterAsText(10 os.path.join(outGDBwPath "Step1_GTFS.sql"))<block_end><except_stmt>BBB_SharedFunctions.CustomError<block_start>arcpy.AddError("Failed to create BetterBusBuffers polygons.")<line_sep><pass><block_end><except_stmt><block_start>arcpy.AddError("Failed to create BetterBusBuffers polygons.")<line_sep><raise><block_end><block_end>
""" Discover SNMPv3 SecurityEngineId ++++++++++++++++++++++++++++++++ Send SNMP GET request using the following scenario and options: * try to communicate with a SNMPv3 Engine using: * a non-existing user * over IPv4/UDP * to an Agent at demo.snmplabs.com:161 * if remote SNMP Engine ID is discovered, send SNMP GET request: * with SNMPv3, user 'usr-md5-none', MD5 authentication, no privacy at discovered securityEngineId * to the same SNMP Engine ID * for an OID in text form """<line_sep># <import_from_stmt>pysnmp.hlapi *<line_sep>snmpEngine=SnmpEngine()<line_sep>transportTarget=UdpTransportTarget(('demo.snmplabs.com' 161))<line_sep># # To discover remote SNMP EngineID we will tap on SNMP engine inner workings # by setting up execution point observer setup on INTERNAL class PDU processing # observerContext={}<line_sep># Register a callback to be invoked at specified execution point of # SNMP Engine and passed local variables at execution point's local scope snmpEngine.observer.registerObserver(<lambda>e p v c:c.update(securityEngineId=v['securityEngineId']) 'rfc3412.prepareDataElements:internal' cbCtx=observerContext)<line_sep># Send probe SNMP request with invalid credentials authData=UsmUserData('non-existing-user')<line_sep>errorIndication,errorStatus,errorIndex,varBinds=next(getCmd(snmpEngine authData transportTarget ContextData() ObjectType(ObjectIdentity('SNMPv2-MIB' 'sysDescr' 0))))<line_sep># See if our SNMP engine received REPORT PDU containing securityEngineId <if_stmt>'securityEngineId'<not><in>observerContext<block_start>print("Can't discover peer EngineID, errorIndication: %s"%errorIndication)<line_sep><raise>Exception()<block_end>securityEngineId=observerContext.pop('securityEngineId')<line_sep>print('Remote securityEngineId = %s'%securityEngineId.prettyPrint())<line_sep># # Query remote SNMP Engine using usmUserTable entry configured for it # authData=UsmUserData('usr-md5-none' 'authkey1' securityEngineId=securityEngineId)<line_sep>iterator=getCmd(snmpEngine authData transportTarget ContextData() ObjectType(ObjectIdentity('1.3.6.1.2.1.1.1.0')))<line_sep>errorIndication,errorStatus,errorIndex,varBinds=next(iterator)<if_stmt>errorIndication<block_start>print(errorIndication)<block_end><elif_stmt>errorStatus<block_start>print('%s at %s'%(errorStatus.prettyPrint() errorIndex<and>varBinds[int(errorIndex)-1][0]<or>'?'))<block_end><else_stmt><block_start><for_stmt>name,val varBinds<block_start>print('%s = %s'%(name.prettyPrint() val.prettyPrint()))<block_end><block_end>
# -*- coding: utf-8 -*- """ Microsoft-Windows-WiFiHotspotService GUID : 814182fe-58f7-11e1-853c-78e7d1ca7337 """<import_from_stmt>construct Int8sl Int8ul Int16ul Int16sl Int32sl Int32ul Int64sl Int64ul Bytes Double Float32l Struct<import_from_stmt>etl.utils WString CString SystemTime Guid<import_from_stmt>etl.dtyp Sid<import_from_stmt>etl.parsers.etw.core Etw declare guid<line_sep>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=1003 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_1003_0(Etw)<block_start>pattern=Struct("uString"/WString "Dword"/Int32ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=1004 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_1004_0(Etw)<block_start>pattern=Struct("uString"/WString "Dword"/Int32ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=1005 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_1005_0(Etw)<block_start>pattern=Struct("Ptr1"/Int64ul "Ptr2"/Int64ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=1006 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_1006_0(Etw)<block_start>pattern=Struct("uString"/WString "Dword"/Int32ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=1007 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_1007_0(Etw)<block_start>pattern=Struct("uString"/WString "Dword"/Int32ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=1008 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_1008_0(Etw)<block_start>pattern=Struct("uString"/WString "Dword"/Int32ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=1009 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_1009_0(Etw)<block_start>pattern=Struct("uString"/WString "Dword"/Int32ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=1010 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_1010_0(Etw)<block_start>pattern=Struct("uString"/WString "Dword"/Int32ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=1011 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_1011_0(Etw)<block_start>pattern=Struct("uString"/WString "Dword"/Int32ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=1012 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_1012_0(Etw)<block_start>pattern=Struct("uString"/WString "Dword"/Int32ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=2000 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_2000_0(Etw)<block_start>pattern=Struct("uString1"/WString "uString2"/WString "Dword1"/Int32ul "Dword2"/Int32ul "Dword3"/Int32ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=3000 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_3000_0(Etw)<block_start>pattern=Struct("Dword1"/Int32ul "Dword2"/Int32ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=3001 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_3001_0(Etw)<block_start>pattern=Struct("Ptr"/Int64ul "Dword"/Int32ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=3002 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_3002_0(Etw)<block_start>pattern=Struct("Ptr"/Int64ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=3003 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_3003_0(Etw)<block_start>pattern=Struct("uString"/WString "Dword"/Int32ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=3004 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_3004_0(Etw)<block_start>pattern=Struct("uString"/WString "Dword"/Int32ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=4000 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_4000_0(Etw)<block_start>pattern=Struct("aString"/CString)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=4001 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_4001_0(Etw)<block_start>pattern=Struct("uString"/WString "Dword"/Int32ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=4002 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_4002_0(Etw)<block_start>pattern=Struct("uString"/WString "Dword"/Int32ul)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=4003 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_4003_0(Etw)<block_start>pattern=Struct("Dword1"/Int32ul "Dword2"/Int32ul "aString1"/CString "Dword3"/Int32ul "Dword4"/Int32ul "Dword5"/Int32ul "uString1"/WString)<block_end>@declare(guid=guid("814182fe-58f7-11e1-853c-78e7d1ca7337") event_id=4004 version=0)<class_stmt>Microsoft_Windows_WiFiHotspotService_4004_0(Etw)<block_start>pattern=Struct("uString"/WString "Dword"/Int32ul)<block_end>
""" :codeauthor: :email:`<NAME> <<EMAIL>>` """<import_stmt>datetime<import_stmt>salt.utils.ssdp<as>ssdp<import_stmt>salt.utils.stringutils<import_from_stmt>tests.support.mock MagicMock patch<import_from_stmt>tests.support.unit TestCase skipIf<try_stmt><block_start><import_stmt>pytest<block_end><except_stmt>ImportError<block_start>pytest=<none><block_end><class_stmt>Mocks<block_start><def_stmt>get_socket_mock self expected_ip expected_hostname<block_start>""" Get a mock of a socket :return: """<line_sep>sck=MagicMock()<line_sep>sck.getsockname=MagicMock(return_value=(expected_ip 123456))<line_sep>sock_mock=MagicMock()<line_sep>sock_mock.socket=MagicMock(return_value=sck)<line_sep>sock_mock.gethostname=MagicMock(return_value=expected_hostname)<line_sep>sock_mock.gethostbyname=MagicMock(return_value=expected_ip)<line_sep><return>sock_mock<block_end><def_stmt>get_ssdp_factory self expected_ip=<none> expected_hostname=<none> **config<block_start><if_stmt>expected_ip<is><none><block_start>expected_ip="127.0.0.1"<block_end><if_stmt>expected_hostname<is><none><block_start>expected_hostname="localhost"<block_end>sock_mock=self.get_socket_mock(expected_ip expected_hostname)<with_stmt>patch("salt.utils.ssdp.socket" sock_mock)<block_start>factory=ssdp.SSDPFactory(**config)<block_end><return>factory<block_end><def_stmt>get_ssdp_discovery_client self expected_ip=<none> expected_hostname=<none> **config<block_start><if_stmt>expected_ip<is><none><block_start>expected_ip="127.0.0.1"<block_end><if_stmt>expected_hostname<is><none><block_start>expected_hostname="localhost"<block_end>sock_mock=self.get_socket_mock(expected_ip expected_hostname)<with_stmt>patch("salt.utils.ssdp.socket" sock_mock)<block_start>factory=ssdp.SSDPDiscoveryClient(**config)<block_end><return>factory<block_end><def_stmt>get_ssdp_discovery_server self expected_ip=<none> expected_hostname=<none> **config<block_start><if_stmt>expected_ip<is><none><block_start>expected_ip="127.0.0.1"<block_end><if_stmt>expected_hostname<is><none><block_start>expected_hostname="localhost"<block_end>sock_mock=self.get_socket_mock(expected_ip expected_hostname)<with_stmt>patch("salt.utils.ssdp.socket" sock_mock)<block_start>factory=ssdp.SSDPDiscoveryServer(**config)<block_end><return>factory<block_end><block_end>@skipIf(pytest<is><none> "PyTest is missing")<class_stmt>SSDPBaseTestCase(TestCase Mocks)<block_start>""" TestCase for SSDP-related parts. """<line_sep>@staticmethod<def_stmt>exception_generic *args **kwargs<block_start>""" Side effect :return: """<line_sep><raise>Exception("some network error")<block_end>@staticmethod<def_stmt>exception_attr_error *args **kwargs<block_start>""" Side effect :return: """<line_sep><raise>AttributeError("attribute error: {}. {}".format(args kwargs))<block_end>@patch("salt.utils.ssdp._json" <none>)@patch("salt.utils.ssdp.asyncio" <none>)<def_stmt>test_base_avail self<block_start>""" Test SSDP base class availability method. :return: """<line_sep>base=ssdp.SSDPBase()<assert_stmt><not>base._is_available()<with_stmt>patch("salt.utils.ssdp._json" <true>)<block_start><assert_stmt><not>base._is_available()<block_end><with_stmt>patch("salt.utils.ssdp.asyncio" <true>)<block_start><assert_stmt><not>base._is_available()<block_end><with_stmt>patch("salt.utils.ssdp._json" <true>) patch("salt.utils.ssdp.asyncio" <true>)<block_start><assert_stmt>base._is_available()<block_end><block_end><def_stmt>test_base_protocol_settings self<block_start>""" Tests default constants data. :return: """<line_sep>base=ssdp.SSDPBase()<line_sep>v_keys=["signature" "answer" "port" "listen_ip" "timeout"]<line_sep>v_vals=["__salt_master_service" {} 4520 "0.0.0.0" 3]<for_stmt>key v_keys<block_start><assert_stmt>key<in>base.DEFAULTS<block_end><for_stmt>key base.DEFAULTS<block_start><assert_stmt>key<in>v_keys<block_end><for_stmt>key,value zip(v_keys v_vals)<block_start><assert_stmt>base.DEFAULTS[key]<eq>value<block_end><block_end><def_stmt>test_base_self_ip self<block_start>""" Test getting self IP method. :return: """<line_sep>base=ssdp.SSDPBase()<line_sep>expected_ip="192.168.1.10"<line_sep>expected_host="oxygen"<line_sep>sock_mock=self.get_socket_mock(expected_ip expected_host)<with_stmt>patch("salt.utils.ssdp.socket" sock_mock)<block_start><assert_stmt>base.get_self_ip()<eq>expected_ip<block_end>sock_mock.socket().getsockname.side_effect=SSDPBaseTestCase.exception_generic<with_stmt>patch("salt.utils.ssdp.socket" sock_mock)<block_start><assert_stmt>base.get_self_ip()<eq>expected_ip<block_end><block_end><block_end>@skipIf(pytest<is><none> "PyTest is missing")<class_stmt>SSDPFactoryTestCase(TestCase Mocks)<block_start>""" Test socket protocol """<def_stmt>test_attr_check self<block_start>""" Tests attributes are set to the base class :return: """<line_sep>config={ssdp.SSDPBase.SIGNATURE:"-signature-" ssdp.SSDPBase.ANSWER:{"this-is":"the-answer"} }<line_sep>expected_ip="10.10.10.10"<line_sep>factory=self.get_ssdp_factory(expected_ip=expected_ip **config)<for_stmt>attr [ssdp.SSDPBase.SIGNATURE ssdp.SSDPBase.ANSWER]<block_start><assert_stmt>hasattr(factory attr)<assert_stmt>getattr(factory attr)<eq>config[attr]<block_end><assert_stmt><not>factory.disable_hidden<assert_stmt>factory.my_ip<eq>expected_ip<block_end><def_stmt>test_transport_sendto_success self<block_start>""" Test transport send_to. :return: """<line_sep>transport=MagicMock()<line_sep>log=MagicMock()<line_sep>factory=self.get_ssdp_factory()<with_stmt>patch.object(factory "transport" transport) patch.object(factory "log" log)<block_start>data={"some":"data"}<line_sep>addr="10.10.10.10"<line_sep>factory._sendto(data=data addr=addr)<assert_stmt>factory.transport.sendto.called<assert_stmt>factory.transport.sendto.mock_calls[0][1][0]["some"]<eq>"data"<assert_stmt>factory.transport.sendto.mock_calls[0][2]["addr"]<eq>"10.10.10.10"<assert_stmt>factory.log.debug.called<assert_stmt>factory.log.debug.mock_calls[0][1][0]<eq>"Sent successfully"<block_end><block_end><def_stmt>test_transport_sendto_retry self<block_start>""" Test transport send_to. :return: """<with_stmt>patch("salt.utils.ssdp.time.sleep" MagicMock())<block_start>transport=MagicMock()<line_sep>transport.sendto=MagicMock(side_effect=SSDPBaseTestCase.exception_attr_error)<line_sep>log=MagicMock()<line_sep>factory=self.get_ssdp_factory()<with_stmt>patch.object(factory "transport" transport) patch.object(factory "log" log)<block_start>data={"some":"data"}<line_sep>addr="10.10.10.10"<line_sep>factory._sendto(data=data addr=addr)<assert_stmt>factory.transport.sendto.called<assert_stmt>ssdp.time.sleep.called<assert_stmt>(ssdp.time.sleep.call_args[0][0]<g>0<and>ssdp.time.sleep.call_args[0][0]<l>0.5)<assert_stmt>factory.log.debug.called<assert_stmt>"Permission error"<in>factory.log.debug.mock_calls[0][1][0]<block_end><block_end><block_end><def_stmt>test_datagram_signature_bad self<block_start>""" Test datagram_received on bad signature :return: """<line_sep>factory=self.get_ssdp_factory()<line_sep>data="nonsense"<line_sep>addr="10.10.10.10" "foo.suse.de"<with_stmt>patch.object(factory "log" MagicMock())<block_start>factory.datagram_received(data=data addr=addr)<assert_stmt>factory.log.debug.called<assert_stmt>"Received bad signature from"<in>factory.log.debug.call_args[0][0]<assert_stmt>factory.log.debug.call_args[0][1]<eq>addr[0]<assert_stmt>factory.log.debug.call_args[0][2]<eq>addr[1]<block_end><block_end><def_stmt>test_datagram_signature_wrong_timestamp_quiet self<block_start>""" Test datagram receives a wrong timestamp (no reply). :return: """<line_sep>factory=self.get_ssdp_factory()<line_sep>data="{}nonsense".format(ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.SIGNATURE])<line_sep>addr="10.10.10.10" "foo.suse.de"<with_stmt>patch.object(factory "log" MagicMock()) patch.object(factory "_sendto" MagicMock())<block_start>factory.datagram_received(data=data addr=addr)<assert_stmt>factory.log.debug.called<assert_stmt>("Received invalid timestamp in package"<in>factory.log.debug.call_args[0][0])<assert_stmt><not>factory._sendto.called<block_end><block_end><def_stmt>test_datagram_signature_wrong_timestamp_reply self<block_start>""" Test datagram receives a wrong timestamp. :return: """<line_sep>factory=self.get_ssdp_factory()<line_sep>factory.disable_hidden=<true><line_sep>signature=ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.SIGNATURE]<line_sep>data="{}nonsense".format(signature)<line_sep>addr="10.10.10.10" "foo.suse.de"<with_stmt>patch.object(factory "log" MagicMock()) patch.object(factory "_sendto" MagicMock())<block_start>factory.datagram_received(data=data addr=addr)<assert_stmt>factory.log.debug.called<assert_stmt>("Received invalid timestamp in package"<in>factory.log.debug.call_args[0][0])<assert_stmt>factory._sendto.called<assert_stmt>("{}:E:Invalid timestamp".format(signature)<eq>factory._sendto.call_args[0][0])<block_end><block_end><def_stmt>test_datagram_signature_outdated_timestamp_quiet self<block_start>""" Test if datagram processing reacts on outdated message (more than 20 seconds). Quiet mode. :return: """<line_sep>factory=self.get_ssdp_factory()<line_sep>signature=ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.SIGNATURE]<line_sep>data="{}{}".format(signature "1516623820")<line_sep>addr="10.10.10.10" "foo.suse.de"<line_sep>ahead_dt=datetime.datetime.fromtimestamp(1516623841)<line_sep>curnt_dt=datetime.datetime.fromtimestamp(1516623820)<line_sep>delta=datetime.timedelta(0 20)<with_stmt>patch.object(factory "log" MagicMock()) patch.object(factory "_sendto") patch("salt.utils.ssdp.datetime.datetime" MagicMock()) patch("salt.utils.ssdp.datetime.datetime.now" MagicMock(return_value=ahead_dt)) patch("salt.utils.ssdp.datetime.datetime.fromtimestamp" MagicMock(return_value=curnt_dt) ) patch("salt.utils.ssdp.datetime.timedelta" MagicMock(return_value=delta))<block_start>factory.datagram_received(data=data addr=addr)<assert_stmt>factory.log.debug.called<assert_stmt><not>factory.disable_hidden<assert_stmt><not>factory._sendto.called<assert_stmt>"Received outdated package"<in>factory.log.debug.call_args[0][0]<block_end><block_end><def_stmt>test_datagram_signature_outdated_timestamp_reply self<block_start>""" Test if datagram processing reacts on outdated message (more than 20 seconds). Reply mode. :return: """<line_sep>factory=self.get_ssdp_factory()<line_sep>factory.disable_hidden=<true><line_sep>signature=ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.SIGNATURE]<line_sep>data="{}{}".format(signature "1516623820")<line_sep>addr="10.10.10.10" "foo.suse.de"<line_sep>ahead_dt=datetime.datetime.fromtimestamp(1516623841)<line_sep>curnt_dt=datetime.datetime.fromtimestamp(1516623820)<line_sep>delta=datetime.timedelta(0 20)<with_stmt>patch.object(factory "log" MagicMock()) patch.object(factory "_sendto") patch("salt.utils.ssdp.datetime.datetime" MagicMock()) patch("salt.utils.ssdp.datetime.datetime.now" MagicMock(return_value=ahead_dt)) patch("salt.utils.ssdp.datetime.datetime.fromtimestamp" MagicMock(return_value=curnt_dt) ) patch("salt.utils.ssdp.datetime.timedelta" MagicMock(return_value=delta))<block_start>factory.datagram_received(data=data addr=addr)<assert_stmt>factory.log.debug.called<assert_stmt>factory.disable_hidden<assert_stmt>factory._sendto.called<assert_stmt>factory._sendto.call_args[0][0]<eq>"{}:E:Timestamp is too old".format(signature)<assert_stmt>"Received outdated package"<in>factory.log.debug.call_args[0][0]<block_end><block_end><def_stmt>test_datagram_signature_correct_timestamp_reply self<block_start>""" Test if datagram processing sends out correct reply within 20 seconds. :return: """<line_sep>factory=self.get_ssdp_factory()<line_sep>factory.disable_hidden=<true><line_sep>signature=ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.SIGNATURE]<line_sep>data="{}{}".format(signature "1516623820")<line_sep>addr="10.10.10.10" "foo.suse.de"<line_sep>ahead_dt=datetime.datetime.fromtimestamp(1516623840)<line_sep>curnt_dt=datetime.datetime.fromtimestamp(1516623820)<line_sep>delta=datetime.timedelta(0 20)<with_stmt>patch.object(factory "log" MagicMock()) patch.object(factory "_sendto") patch("salt.utils.ssdp.datetime.datetime" MagicMock()) patch("salt.utils.ssdp.datetime.datetime.now" MagicMock(return_value=ahead_dt)) patch("salt.utils.ssdp.datetime.datetime.fromtimestamp" MagicMock(return_value=curnt_dt) ) patch("salt.utils.ssdp.datetime.timedelta" MagicMock(return_value=delta))<block_start>factory.datagram_received(data=data addr=addr)<assert_stmt>factory.log.debug.called<assert_stmt>factory.disable_hidden<assert_stmt>factory._sendto.called<assert_stmt>factory._sendto.call_args[0][0]<eq>salt.utils.stringutils.to_bytes("{}:@:{{}}".format(signature))<assert_stmt>'Received "%s" from %s:%s'<in>factory.log.debug.call_args[0][0]<block_end><block_end><block_end>@skipIf(pytest<is><none> "PyTest is missing")<class_stmt>SSDPServerTestCase(TestCase Mocks)<block_start>""" Server-related test cases """<def_stmt>test_config_detached self<block_start>""" Test if configuration is not a reference. :return: """<line_sep>old_ip="10.10.10.10"<line_sep>new_ip="20.20.20.20"<line_sep>config={"answer":{"master":old_ip}}<with_stmt>patch("salt.utils.ssdp.SSDPDiscoveryServer.get_self_ip" MagicMock(return_value=new_ip) )<block_start>srv=ssdp.SSDPDiscoveryServer(**config)<assert_stmt>srv._config["answer"]["master"]<eq>new_ip<assert_stmt>config["answer"]["master"]<eq>old_ip<block_end><block_end><def_stmt>test_run self<block_start>""" Test server runner. :return: """<with_stmt>patch("salt.utils.ssdp.SSDPFactory" MagicMock())<block_start>config={"answer":{"master":"10.10.10.10"} ssdp.SSDPBase.LISTEN_IP:"10.10.10.10" ssdp.SSDPBase.PORT:12345 }<line_sep>srv=self.get_ssdp_discovery_server(**config)<line_sep>srv.create_datagram_endpoint=MagicMock()<line_sep>srv.log=MagicMock()<line_sep>trnsp=MagicMock()<line_sep>proto=MagicMock()<line_sep>loop=MagicMock()<line_sep>loop.run_until_complete=MagicMock(return_value=(trnsp proto))<line_sep>io=MagicMock()<line_sep>io.ported=<false><line_sep>io.get_event_loop=MagicMock(return_value=loop)<with_stmt>patch("salt.utils.ssdp.asyncio" io)<block_start>srv.run()<line_sep>cde_args=io.get_event_loop().create_datagram_endpoint.call_args[1]<line_sep>cfg_ip_addr,cfg_port=cde_args["local_addr"]<assert_stmt>io.get_event_loop.called<assert_stmt>io.get_event_loop().run_until_complete.called<assert_stmt>io.get_event_loop().create_datagram_endpoint.called<assert_stmt>io.get_event_loop().run_forever.called<assert_stmt>trnsp.close.called<assert_stmt>loop.close.called<assert_stmt>srv.log.info.called<assert_stmt>(srv.log.info.call_args[0][0]<eq>"Stopping service discovery listener.")<assert_stmt>"allow_broadcast"<in>cde_args<assert_stmt>cde_args["allow_broadcast"]<assert_stmt>"local_addr"<in>cde_args<assert_stmt>(<not>cfg_ip_addr<eq>ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.LISTEN_IP]<and>cfg_ip_addr<eq>"10.10.10.10")<assert_stmt>(<not>cfg_port<eq>ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.PORT]<and>cfg_port<eq>12345)<block_end><block_end><block_end><block_end>@skipIf(pytest<is><none> "PyTest is missing")<class_stmt>SSDPClientTestCase(TestCase Mocks)<block_start>""" Client-related test cases """<class_stmt>Resource<block_start>""" Fake network reader """<def_stmt>__init__ self<block_start>self.pool=[("some" "10.10.10.10") ("data" "20.20.20.20") ("data" "10.10.10.10") (<none> <none>) ]<block_end><def_stmt>read self *args **kwargs<block_start><return>self.pool.pop(0)<block_end><block_end><def_stmt>test_config_passed self<block_start>""" Test if the configuration is passed. :return: """<line_sep>config={ssdp.SSDPBase.SIGNATURE:"SUSE Enterprise Server" ssdp.SSDPBase.TIMEOUT:5 ssdp.SSDPBase.PORT:12345 }<line_sep>clnt=self.get_ssdp_discovery_client(**config)<assert_stmt>clnt._config[ssdp.SSDPBase.SIGNATURE]<eq>config[ssdp.SSDPBase.SIGNATURE]<assert_stmt>clnt._config[ssdp.SSDPBase.PORT]<eq>config[ssdp.SSDPBase.PORT]<assert_stmt>clnt._config[ssdp.SSDPBase.TIMEOUT]<eq>config[ssdp.SSDPBase.TIMEOUT]<block_end><def_stmt>test_config_detached self<block_start>""" Test if the passed configuration is not a reference. :return: """<line_sep>config={ssdp.SSDPBase.SIGNATURE:"SUSE Enterprise Server" }<line_sep>clnt=self.get_ssdp_discovery_client(**config)<line_sep>clnt._config["foo"]="bar"<assert_stmt>"foo"<in>clnt._config<assert_stmt>"foo"<not><in>config<block_end><def_stmt>test_query self<block_start>""" Test if client queries the broadcast :return: """<line_sep>config={ssdp.SSDPBase.SIGNATURE:"SUSE Enterprise Server" ssdp.SSDPBase.PORT:4000 }<line_sep>f_time=1111<line_sep>_socket=MagicMock()<with_stmt>patch("salt.utils.ssdp.socket" _socket) patch("salt.utils.ssdp.time.time" MagicMock(return_value=f_time))<block_start>clnt=ssdp.SSDPDiscoveryClient(**config)<line_sep>clnt._query()<assert_stmt>clnt._socket.sendto.called<line_sep>message,target=clnt._socket.sendto.call_args[0]<assert_stmt>message<eq>salt.utils.stringutils.to_bytes("{}{}".format(config[ssdp.SSDPBase.SIGNATURE] f_time))<assert_stmt>target[0]<eq>"<broadcast>"<assert_stmt>target[1]<eq>config[ssdp.SSDPBase.PORT]<block_end><block_end><def_stmt>test_get_masters_map self<block_start>""" Test getting map of the available masters on the network :return: """<line_sep>_socket=MagicMock()<line_sep>response={}<with_stmt>patch("salt.utils.ssdp.socket" _socket)<block_start>clnt=ssdp.SSDPDiscoveryClient()<line_sep>clnt._socket.recvfrom=SSDPClientTestCase.Resource().read<line_sep>clnt.log=MagicMock()<line_sep>clnt._collect_masters_map(response=response)<assert_stmt>"10.10.10.10"<in>response<assert_stmt>"20.20.20.20"<in>response<assert_stmt>response["10.10.10.10"]<eq>["some" "data"]<assert_stmt>response["20.20.20.20"]<eq>["data"]<block_end><block_end><def_stmt>test_get_masters_map_error_handling self<block_start>""" Test getting map handles timeout network exception :return: """<line_sep>_socket=MagicMock()<line_sep>response={}<line_sep>error_msg="fake testing timeout just had happened"<with_stmt>patch("salt.utils.ssdp.socket" _socket)<block_start>clnt=ssdp.SSDPDiscoveryClient()<line_sep>clnt._socket.recvfrom=MagicMock(side_effect=Exception(error_msg))<line_sep>clnt.log=MagicMock()<line_sep>clnt._collect_masters_map(response=response)<assert_stmt>clnt.log.error.called<assert_stmt>("Discovery master collection failure"<in>clnt.log.error.call_args[0][0])<assert_stmt>error_msg<eq>str(clnt.log.error.call_args[0][1])<assert_stmt><not>response<block_end><block_end><def_stmt>test_discover_no_masters self<block_start>""" Test discover available master on the network (none found). :return: """<line_sep>clnt=self.get_ssdp_discovery_client()<line_sep>clnt._query=MagicMock()<line_sep>clnt._collect_masters_map=MagicMock()<line_sep>clnt.log=MagicMock()<line_sep>clnt.discover()<assert_stmt>clnt.log.info.called<assert_stmt>clnt.log.info.call_args[0][0]<eq>"No master has been discovered."<block_end><def_stmt>test_discover_general_error self<block_start>""" Test discover available master on the network (erroneous found) :return: """<line_sep>_socket=MagicMock()<line_sep>error="Admins on strike due to broken coffee machine"<line_sep>signature=ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.SIGNATURE]<line_sep>fake_resource=SSDPClientTestCase.Resource()<line_sep>fake_resource.pool=[("{}:E:{}".format(signature error) "10.10.10.10") (<none> <none>) ]<with_stmt>patch("salt.utils.ssdp.socket" _socket)<block_start>clnt=ssdp.SSDPDiscoveryClient()<line_sep>clnt._socket.recvfrom=fake_resource.read<line_sep>clnt._query=MagicMock()<line_sep>clnt.log=MagicMock()<line_sep>clnt.discover()<assert_stmt>len(clnt.log.error.mock_calls)<eq>1<assert_stmt>("Error response from the service publisher"<in>clnt.log.error.call_args[0][0])<assert_stmt>"10.10.10.10"<eq>clnt.log.error.call_args[0][1]<assert_stmt>clnt.log.error.call_args[1]<eq>{}<assert_stmt>clnt.log.error.call_args[0][2]<eq>error<block_end><block_end><def_stmt>test_discover_timestamp_error self<block_start>""" Test discover available master on the network (outdated timestamp) :return: """<line_sep>_socket=MagicMock()<line_sep>error=("We only support a 1200 bps connection. Routing timestamp problems on"<concat>" neural net.")<line_sep>signature=ssdp.SSDPBase.DEFAULTS[ssdp.SSDPBase.SIGNATURE]<line_sep>fake_resource=SSDPClientTestCase.Resource()<line_sep>fake_resource.pool=[("{}:E:{}".format(signature error) "10.10.10.10") (<none> <none>) ]<with_stmt>patch("salt.utils.ssdp.socket" _socket)<block_start>clnt=ssdp.SSDPDiscoveryClient()<line_sep>clnt._socket.recvfrom=fake_resource.read<line_sep>clnt._query=MagicMock()<line_sep>clnt.log=MagicMock()<line_sep>clnt.discover()<assert_stmt>len(clnt.log.error.mock_calls)<eq>2<assert_stmt>("Error response from the service publisher"<in>clnt.log.error.mock_calls[0][1][0])<assert_stmt>clnt.log.error.mock_calls[0][1][2]<eq>error<assert_stmt>clnt.log.error.mock_calls[0][2]<eq>{}<assert_stmt>("Publisher sent shifted timestamp"<in>clnt.log.error.mock_calls[1][1][0])<assert_stmt>(clnt.log.error.mock_calls[1][1][1]<eq>clnt.log.error.mock_calls[0][1][1]<eq>"10.10.10.10")<block_end><block_end><block_end>
<import_stmt>time<import_stmt>cv2<import_stmt>numpy<as>np<import_from_stmt>chainer serializers Variable<import_stmt>chainer.functions<as>F<import_stmt>argparse<import_from_stmt>darknet19 *<import_from_stmt>yolov2 *<import_from_stmt>yolov2_grid_prob *<import_from_stmt>yolov2_bbox *<line_sep>n_classes=10<line_sep>n_boxes=5<line_sep>partial_layer=18<def_stmt>copy_conv_layer src dst layers<block_start><for_stmt>i layers<block_start>src_layer=eval("src.conv%d"%i)<line_sep>dst_layer=eval("dst.conv%d"%i)<line_sep>dst_layer.W=src_layer.W<line_sep>dst_layer.b=src_layer.b<block_end><block_end><def_stmt>copy_bias_layer src dst layers<block_start><for_stmt>i layers<block_start>src_layer=eval("src.bias%d"%i)<line_sep>dst_layer=eval("dst.bias%d"%i)<line_sep>dst_layer.b=src_layer.b<block_end><block_end><def_stmt>copy_bn_layer src dst layers<block_start><for_stmt>i layers<block_start>src_layer=eval("src.bn%d"%i)<line_sep>dst_layer=eval("dst.bn%d"%i)<line_sep>dst_layer.N=src_layer.N<line_sep>dst_layer.avg_var=src_layer.avg_var<line_sep>dst_layer.avg_mean=src_layer.avg_mean<line_sep>dst_layer.gamma=src_layer.gamma<line_sep>dst_layer.eps=src_layer.eps<block_end><block_end># load model print("loading original model...")<line_sep>input_weight_file="./backup/darknet19_448_final.model"<line_sep>output_weight_file="./backup/partial.model"<line_sep>model=Darknet19Predictor(Darknet19())<line_sep>serializers.load_hdf5(input_weight_file model)# load saved model yolov2=YOLOv2(n_classes=n_classes n_boxes=n_boxes)<line_sep>copy_conv_layer(model.predictor yolov2 range(1 partial_layer+1))<line_sep>copy_bias_layer(model.predictor yolov2 range(1 partial_layer+1))<line_sep>copy_bn_layer(model.predictor yolov2 range(1 partial_layer+1))<line_sep>model=YOLOv2Predictor(yolov2)<line_sep>print("saving model to %s"%(output_weight_file))<line_sep>serializers.save_hdf5("%s"%(output_weight_file) model)<line_sep>
<import_from_stmt>pl_bolts.models.rl.advantage_actor_critic_model AdvantageActorCritic<import_from_stmt>pl_bolts.models.rl.double_dqn_model DoubleDQN<import_from_stmt>pl_bolts.models.rl.dqn_model DQN<import_from_stmt>pl_bolts.models.rl.dueling_dqn_model DuelingDQN<import_from_stmt>pl_bolts.models.rl.noisy_dqn_model NoisyDQN<import_from_stmt>pl_bolts.models.rl.per_dqn_model PERDQN<import_from_stmt>pl_bolts.models.rl.reinforce_model Reinforce<import_from_stmt>pl_bolts.models.rl.sac_model SAC<import_from_stmt>pl_bolts.models.rl.vanilla_policy_gradient_model VanillaPolicyGradient<line_sep>__all__=["AdvantageActorCritic" "DoubleDQN" "DQN" "DuelingDQN" "NoisyDQN" "PERDQN" "Reinforce" "SAC" "VanillaPolicyGradient" ]<line_sep>
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>unittest<import_from_stmt>telemetry story<import_from_stmt>telemetry.story shared_state<line_sep># pylint: disable=abstract-method <class_stmt>SharedStateBar(shared_state.SharedState)<block_start><pass><block_end><class_stmt>StoryFoo(story.Story)<block_start><def_stmt>__init__ self name='' labels=<none><block_start>super(StoryFoo self).__init__(SharedStateBar name labels)<block_end><block_end><class_stmt>StoryTest(unittest.TestCase)<block_start><def_stmt>testStoriesHaveDifferentIds self<block_start>s0=story.Story(SharedStateBar 'foo')<line_sep>s1=story.Story(SharedStateBar 'bar')<line_sep>self.assertNotEqual(s0.id s1.id)<block_end><def_stmt>testNamelessStoryDisplayName self<block_start>s=StoryFoo()<line_sep>self.assertEquals('StoryFoo' s.display_name)<block_end><def_stmt>testNamedStoryDisplayName self<block_start>s=StoryFoo('Bar')<line_sep>self.assertEquals('Bar' s.display_name)<block_end><def_stmt>testStoryFileSafeName self<block_start>s=StoryFoo('Foo Bar:Baz~0')<line_sep>self.assertEquals('Foo_Bar_Baz_0' s.file_safe_name)<block_end><def_stmt>testNamelessStoryAsDict self<block_start>s=story.Story(SharedStateBar)<line_sep>s_dict=s.AsDict()<line_sep>self.assertEquals(s_dict['id'] s.id)<line_sep>self.assertNotIn('name' s_dict)<block_end><def_stmt>testNamedStoryAsDict self<block_start>s=story.Story(SharedStateBar 'Foo')<line_sep>s_dict=s.AsDict()<line_sep>self.assertEquals(s_dict['id'] s.id)<line_sep>self.assertEquals('Foo' s_dict['name'])<block_end><def_stmt>testMakeJavaScriptDeterministic self<block_start>s=story.Story(SharedStateBar)<line_sep>self.assertTrue(s.make_javascript_deterministic)<line_sep>s=story.Story(SharedStateBar make_javascript_deterministic=<false>)<line_sep>self.assertFalse(s.make_javascript_deterministic)<line_sep>s=story.Story(SharedStateBar make_javascript_deterministic=<true>)<line_sep>self.assertTrue(s.make_javascript_deterministic)<block_end><block_end>
""" Rename this file to 'secret.py' once all settings are defined """<line_sep>SECRET_KEY="..."<line_sep>HOSTNAME="example.com"<line_sep>DATABASE_URL="mysql://<user>:<password>@<host>/<database>"<line_sep>AWS_ACCESS_KEY_ID="12345"<line_sep>AWS_SECRET_ACCESS_KEY="12345"<line_sep>
<import_stmt>datetime<import_stmt>numpy<as>np<import_stmt>os<import_stmt>pandas<as>pd<import_stmt>shutil<import_stmt>urllib.request<import_stmt>zipfile<line_sep>__all__=['fetch_ml_ratings' ]<line_sep>VARIANTS={'100k':{'filename':'u.data' 'sep':'\t'} '1m':{'filename':'ratings.dat' 'sep':r'::'} '10m':{'filename':'ratings.dat' 'sep':r'::'} '20m':{'filename':'ratings.csv' 'sep':','}}<def_stmt>fetch_ml_ratings data_dir_path=<none> variant='20m' verbose=<false><block_start>"""Fetches MovieLens ratings dataset. Parameters ---------- data_dir_path : str, default=None Explicit data directory path to MovieLens ratings file. variant : {'100k', '1m', '10m', '20m'}, default='20m' Movie lens dataset variant. verbose : bool, default=False Whether or not downloading and unzipping the dataset with verbose. Returns ------- df : pandas.DataFrame The MovieLens ratings dataset. """<if_stmt>data_dir_path<is><none><block_start>data_dir_path=_get_data_dir_path(data_dir_path)<line_sep>dirname='ml-'+variant<line_sep>filename=VARIANTS[variant]['filename']<line_sep>csv_path=os.path.join(data_dir_path dirname filename)<line_sep>zip_path=os.path.join(data_dir_path dirname)+'.zip'<line_sep>url='http://files.grouplens.org/datasets/movielens/ml-'+variant+'.zip'<block_end><else_stmt><block_start>csv_path=data_dir_path<block_end><if_stmt>os.path.exists(csv_path)# Return data loaded into a DataFrame <block_start>df=_ml_ratings_csv_to_df(csv_path variant)<line_sep><return>df<block_end><elif_stmt>os.path.exists(zip_path)# Unzip file before calling back itself <block_start><if_stmt>verbose<block_start>print('Unzipping data...')<block_end><with_stmt>zipfile.ZipFile(zip_path 'r')<as>zf<block_start>zf.extractall(data_dir_path)<block_end><if_stmt>variant<eq>'10m'<block_start>os.rename(os.path.join(data_dir_path 'ml-10M100K') os.path.join(data_dir_path dirname))<block_end>os.remove(zip_path)<line_sep><return>fetch_ml_ratings(variant=variant verbose=verbose)<block_end><else_stmt># Download the ZIP file before calling back itself <block_start><if_stmt>verbose<block_start>print('Downloading data...')<block_end><with_stmt>urllib.request.urlopen(url)<as>r open(zip_path 'wb')<as>f<block_start>shutil.copyfileobj(r f)<block_end><return>fetch_ml_ratings(variant=variant verbose=verbose)<block_end><block_end><def_stmt>_get_data_dir_path data_dir_path=<none><block_start>"""Returns the path of the funk-svd data directory. This folder is used to store large datasets to avoid downloading them several times. By default the data dir is set to a folder named 'funk_svd_data' in the user home folder. Alternatively, it can be set by the `FUNK_SVD_DATA` environment variable or programmatically by giving an explicit `data_dir_path`. If the folder does not already exist, it is automatically created. Parameters ---------- data_dir_path : str, default=None Explicit data directory path for large datasets. Returns ------- data_dir_path: str Explicit data directory path for large datasets. """<if_stmt>data_dir_path<is><none><block_start>default=os.path.join('~' 'funk_svd_data')<line_sep>data_dir_path=os.environ.get('FUNK_SVD_DATA' default=default)<line_sep>data_dir_path=os.path.expanduser(data_dir_path)<block_end><if_stmt><not>os.path.exists(data_dir_path)<block_start>os.makedirs(data_dir_path)<block_end><return>data_dir_path<block_end><def_stmt>_ml_ratings_csv_to_df csv_path variant<block_start>names=['u_id' 'i_id' 'rating' 'timestamp']<line_sep>dtype={'u_id':np.uint32 'i_id':np.uint32 'rating':np.float64}<def_stmt>date_parser time<block_start><return>datetime.datetime.fromtimestamp(float(time))<block_end>df=pd.read_csv(csv_path names=names dtype=dtype header=0 sep=VARIANTS[variant]['sep'] parse_dates=['timestamp'] date_parser=date_parser engine='python')<line_sep>df.sort_values(by='timestamp' inplace=<true>)<line_sep>df.reset_index(drop=<true> inplace=<true>)<line_sep><return>df<block_end>
<import_stmt>os<import_stmt>scipy<as>sp<import_stmt>scipy.misc<import_stmt>imreg_dft<as>ird<line_sep>basedir=os.path.join('..' 'examples')<line_sep># the TEMPLATE im0=sp.misc.imread(os.path.join(basedir "sample1.png") <true>)<line_sep># the image to be transformed im1=sp.misc.imread(os.path.join(basedir "sample2.png") <true>)<line_sep>result=ird.translation(im0 im1)<line_sep>tvec=result["tvec"].round(4)<line_sep># the Transformed IMaGe. timg=ird.transform_img(im1 tvec=tvec)<line_sep># Maybe we don't want to show plots all the time <if_stmt>os.environ.get("IMSHOW" "yes")<eq>"yes"<block_start><import_stmt>matplotlib.pyplot<as>plt<line_sep>ird.imshow(im0 im1 timg)<line_sep>plt.show()<block_end>print("Translation is {}, success rate {:.4g}".format(tuple(tvec) result["success"]))<line_sep>
<import_stmt>os<line_sep>DNS_OVER_TLS_PORT=853<line_sep>CHUNK_SIZE=128<line_sep>CHECK_CERT=<true># We recommend using valid certificates. An invalid certificate (self-signed) might trigger alerts on some systems. LOCAL_HOST='localhost'<line_sep>MAX_BUFFER=4096<line_sep>MAX_CLIENTS=5<if_stmt>os.getcwd()<eq>'DNSoTLS'<block_start>CERT_FILE='cert.ccc'<block_end><elif_stmt>os.getcwd()<eq>'PyExfil'<block_start>CERT_FILE='pyexfil/Comm/DNSoTLS/cert.ccc'<block_end><else_stmt><block_start>CERT_FILE='pyexfil/Comm/DNSoTLS/cert.ccc'<block_end>
""" Given an integer n, count the total number of digit 1 appearing in all non-negative integers less than or equal to n. Example: Input: 13 Output: 6 Explanation: Digit 1 occurred in the following numbers: 1, 10, 11, 12, 13. """<class_stmt>Solution<block_start><def_stmt>countDigitOne self n:int<arrow>int<block_start>cnt=0<line_sep>mark=1<while_stmt>n<ge>mark<block_start>c,r=divmod(n (mark<times>10))<line_sep>cnt<augadd>c<times>mark<if_stmt>r<ge>mark<block_start>cnt<augadd>min(r-mark+1 mark)<block_end>mark<augmul>10<block_end><return>cnt<block_end><block_end>
# Generated by Django 2.1.7 on 2019-02-21 15:50 <import_stmt>django.contrib.postgres.fields.jsonb<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('modelchimp' '0045_remove_machinelearningmodel_deep_learning_parameters') ]<line_sep>operations=[migrations.CreateModel(name='ExperimentAsset' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('meta_dict' django.contrib.postgres.fields.jsonb.JSONField(null=<true>)) ('asset' models.FileField(null=<true> upload_to='asset/')) ('custom_file_name' models.CharField(blank=<true> default='' max_length=200 null=<true>)) ('date_created' models.DateTimeField(auto_now_add=<true>)) ('date_modified' models.DateTimeField(auto_now=<true>)) ('ml_model' models.ForeignKey(blank=<true> on_delete=django.db.models.deletion.CASCADE related_name='asset_experiment' to='modelchimp.MachineLearningModel')) ('project' models.ForeignKey(blank=<true> on_delete=django.db.models.deletion.CASCADE related_name='asset_project' to='modelchimp.Project')) ] ) ]<block_end>
# source ./bin/activate <import_stmt>subprocess<as>cmd<line_sep>response=input("[d]ev, [t]est?\n")<if_stmt>response.startswith("t")<block_start>cp=cmd.run(f"pytest" check=<true> shell=<true>)<block_end><else_stmt><block_start>cp=cmd.run(f"uvicorn main:app --reload" check=<true> shell=<true>)<block_end>
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. fb_native=struct(android_aar=native.android_aar android_app_modularity=native.android_app_modularity android_binary=native.android_binary android_build_config=native.android_build_config android_bundle=native.android_bundle android_instrumentation_apk=native.android_instrumentation_apk android_instrumentation_test=native.android_instrumentation_test android_library=native.android_library android_manifest=native.android_manifest android_prebuilt_aar=native.android_prebuilt_aar android_resource=native.android_resource apk_genrule=native.apk_genrule apple_asset_catalog=native.apple_asset_catalog apple_binary=native.apple_binary apple_bundle=native.apple_bundle apple_library=native.apple_library apple_package=native.apple_package apple_resource=native.apple_resource apple_test=native.apple_test cgo_library=native.cgo_library command_alias=native.command_alias config_setting=native.config_setting constraint_setting=native.constraint_setting constraint_value=native.constraint_value core_data_model=native.core_data_model csharp_library=native.csharp_library cxx_binary=native.cxx_binary cxx_genrule=native.cxx_genrule cxx_library=native.cxx_library cxx_lua_extension=native.cxx_lua_extension cxx_precompiled_header=native.cxx_precompiled_header cxx_python_extension=native.cxx_python_extension cxx_test=native.cxx_test d_binary=native.d_binary d_library=native.d_library d_test=native.d_test export_file=native.export_file filegroup=native.filegroup gen_aidl=native.gen_aidl genrule=native.genrule go_binary=native.go_binary go_library=native.go_library go_test=native.go_test groovy_library=native.groovy_library groovy_test=native.groovy_test gwt_binary=native.gwt_binary halide_library=native.halide_library haskell_binary=native.haskell_binary haskell_ghci=native.haskell_ghci haskell_haddock=native.haskell_haddock haskell_library=native.haskell_library haskell_prebuilt_library=native.haskell_prebuilt_library http_archive=native.http_archive http_file=native.http_file jar_genrule=native.jar_genrule java_annotation_processor=native.java_annotation_processor java_binary=native.java_binary java_library=native.java_library java_test=native.java_test js_bundle=native.js_bundle js_bundle_genrule=native.js_bundle_genrule js_library=native.js_library keystore=native.keystore kotlin_library=native.kotlin_library kotlin_test=native.kotlin_test lua_binary=native.lua_binary lua_library=native.lua_library ndk_library=native.ndk_library ocaml_binary=native.ocaml_binary ocaml_library=native.ocaml_library platform=native.platform prebuilt_apple_framework=native.prebuilt_apple_framework prebuilt_cxx_library=native.prebuilt_cxx_library prebuilt_cxx_library_group=native.prebuilt_cxx_library_group prebuilt_dotnet_library=native.prebuilt_dotnet_library prebuilt_go_library=native.prebuilt_go_library prebuilt_jar=native.prebuilt_jar prebuilt_native_library=native.prebuilt_native_library prebuilt_ocaml_library=native.prebuilt_ocaml_library prebuilt_python_library=native.prebuilt_python_library prebuilt_rust_library=native.prebuilt_rust_library python_binary=native.python_binary python_library=native.python_library python_test=native.python_test remote_file=native.remote_file robolectric_test=native.robolectric_test rust_binary=native.rust_binary rust_library=native.rust_library rust_test=native.rust_test scala_library=native.scala_library scala_test=native.scala_test scene_kit_assets=native.scene_kit_assets sh_binary=native.sh_binary sh_test=native.sh_test swift_library=native.swift_library test_suite=native.test_suite versioned_alias=native.versioned_alias worker_tool=native.worker_tool xcode_postbuild_script=native.xcode_postbuild_script xcode_prebuild_script=native.xcode_prebuild_script xcode_workspace_config=native.xcode_workspace_config zip_file=native.zip_file )<line_sep>
<import_stmt>http.client<import_stmt>json<import_stmt>logging<import_stmt>os<import_stmt>ssl<import_stmt>threading<import_stmt>urllib.parse<import_from_stmt>base64 b64encode<import_from_stmt>datetime datetime<import_from_stmt>http.client HTTPConnection HTTPResponse<import_from_stmt>typing Optional Dict<import_from_stmt>pyctuator.auth Auth BasicAuth<line_sep># pylint: disable=too-many-instance-attributes <class_stmt>BootAdminRegistrationHandler<block_start><def_stmt>__init__ self registration_url:str registration_auth:Optional[Auth] application_name:str pyctuator_base_url:str start_time:datetime service_url:str registration_interval_sec:float application_metadata:Optional[dict]=<none> ssl_context:Optional[ssl.SSLContext]=<none> <arrow><none><block_start>self.registration_url=registration_url<line_sep>self.registration_auth=registration_auth<line_sep>self.application_name=application_name<line_sep>self.pyctuator_base_url=pyctuator_base_url<line_sep>self.start_time=start_time<line_sep>self.service_url=service_url<if>service_url.endswith("/")<else>service_url+"/"<line_sep>self.registration_interval_sec=registration_interval_sec<line_sep>self.instance_id=<none><line_sep>self.application_metadata=application_metadata<if>application_metadata<else>{}<line_sep>self.ssl_context=ssl_context<line_sep>self.should_continue_registration_schedule:bool=<false><line_sep>self.disable_certificate_validation_for_https_registration:bool=os.getenv("PYCTUATOR_REGISTRATION_NO_CERT")<is><not><none><block_end><def_stmt>_schedule_next_registration self registration_interval_sec:float<arrow><none><block_start>timer=threading.Timer(registration_interval_sec self._register_with_admin_server [])<line_sep>timer.setDaemon(<true>)<line_sep>timer.start()<block_end><def_stmt>_register_with_admin_server self<arrow><none># When waking up, make sure registration is still needed <block_start><if_stmt><not>self.should_continue_registration_schedule<block_start><return><block_end>registration_data={"name":self.application_name "managementUrl":self.pyctuator_base_url "healthUrl":f"{self.pyctuator_base_url}/health" "serviceUrl":self.service_url "metadata":{"startup":self.start_time.isoformat() **self.application_metadata}}<line_sep>logging.debug("Trying to post registration data to %s: %s" self.registration_url registration_data)<line_sep>conn:Optional[HTTPConnection]=<none><try_stmt><block_start>headers={"Content-type":"application/json"}<line_sep>self.authenticate(headers)<line_sep>response=self._http_request(self.registration_url "POST" headers json.dumps(registration_data))<if_stmt>response.status<l>200<or>response.status<ge>300<block_start>logging.warning("Failed registering with boot-admin, got %s - %s" response.status response.read())<block_end><else_stmt><block_start>self.instance_id=json.loads(response.read().decode('utf-8'))["id"]<block_end><block_end><except_stmt>Exception<as>e# pylint: disable=broad-except <block_start>logging.warning("Failed registering with boot-admin, %s (%s)" e type(e))<block_end><finally_stmt><block_start><if_stmt>conn<block_start>conn.close()<block_end><block_end># Schedule the next registration unless asked to abort <if_stmt>self.should_continue_registration_schedule<block_start>self._schedule_next_registration(self.registration_interval_sec)<block_end><block_end><def_stmt>deregister_from_admin_server self<arrow><none><block_start><if_stmt>self.instance_id<is><none><block_start><return><block_end>headers={}<line_sep>self.authenticate(headers)<line_sep>deregistration_url=f"{self.registration_url}/{self.instance_id}"<line_sep>logging.info("Deregistering from %s" deregistration_url)<line_sep>conn:Optional[HTTPConnection]=<none><try_stmt><block_start>response=self._http_request(deregistration_url "DELETE" headers)<if_stmt>response.status<l>200<or>response.status<ge>300<block_start>logging.warning("Failed deregistering from boot-admin, got %s - %s" response.status response.read())<block_end><block_end><except_stmt>Exception<as>e# pylint: disable=broad-except <block_start>logging.warning("Failed deregistering from boot-admin, %s (%s)" e type(e))<block_end><finally_stmt><block_start><if_stmt>conn<block_start>conn.close()<block_end><block_end><block_end><def_stmt>authenticate self headers:Dict<arrow><none><block_start><if_stmt>isinstance(self.registration_auth BasicAuth)<block_start>password=self.registration_auth.password<if>self.registration_auth.password<else>""<line_sep>authorization_string=self.registration_auth.username+":"+password<line_sep>encoded_authorization:str=b64encode(bytes(authorization_string "utf-8")).decode("ascii")<line_sep>headers["Authorization"]=f"Basic {encoded_authorization}"<block_end><block_end><def_stmt>start self initial_delay_sec:float=<none><arrow><none><block_start>logging.info("Starting recurring registration of %s with %s" self.pyctuator_base_url self.registration_url)<line_sep>self.should_continue_registration_schedule=<true><line_sep>self._schedule_next_registration(initial_delay_sec<or>self.registration_interval_sec)<block_end><def_stmt>stop self<arrow><none><block_start>logging.info("Stopping recurring registration")<line_sep>self.should_continue_registration_schedule=<false><block_end><def_stmt>_http_request self url:str method:str headers:Dict[str str] body:Optional[str]=<none><arrow>HTTPResponse<block_start>url_parts=urllib.parse.urlsplit(url)<if_stmt>url_parts.scheme<eq>"http"<block_start>conn=http.client.HTTPConnection(url_parts.hostname url_parts.port)<block_end><elif_stmt>url_parts.scheme<eq>"https"<block_start>context=self.ssl_context<if_stmt><not>context<and>self.disable_certificate_validation_for_https_registration<block_start>context=ssl.SSLContext()<line_sep>context.verify_mode=ssl.CERT_NONE<block_end>conn=http.client.HTTPSConnection(url_parts.hostname url_parts.port context=context)<block_end><else_stmt><block_start><raise>ValueError(f"Unknown scheme in {url}")<block_end>conn.request(method url_parts.path body=body headers=headers )<line_sep><return>conn.getresponse()<block_end><block_end>
<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>torchvision.models vgg11 vgg16 resnet34<line_sep>""" Code heavily adapted from ternaus robot-surgery-segmentation https://github.com/ternaus/robot-surgery-segmentation """<class_stmt>MultiClass_Resnet34(nn.Module)<block_start><def_stmt>__init__ self num_classes=1 num_filters=32 pretrained=<true> is_deconv=<false><block_start>super().__init__()<line_sep>self.num_classes=num_classes<line_sep>self.pool=nn.MaxPool2d(2 2)<line_sep>self.encoder=resnet34(pretrained=pretrained)<line_sep>self.relu=nn.ReLU(inplace=<true>)<line_sep>self.conv1=nn.Sequential(self.encoder.conv1 self.encoder.bn1 self.encoder.relu self.pool)<line_sep>self.conv2=self.encoder.layer1<line_sep>self.conv3=self.encoder.layer2<line_sep>self.conv4=self.encoder.layer3<line_sep>self.conv5=self.encoder.layer4<line_sep>self.center=MultiClass_DecoderBlock(512 num_filters<times>8<times>2 num_filters<times>8 is_deconv)<line_sep>self.dec5=MultiClass_DecoderBlock(512+num_filters<times>8 num_filters<times>8<times>2 num_filters<times>8 is_deconv)<line_sep>self.dec4=MultiClass_DecoderBlock(256+num_filters<times>8 num_filters<times>8<times>2 num_filters<times>8 is_deconv)<line_sep>self.dec3=MultiClass_DecoderBlock(128+num_filters<times>8 num_filters<times>4<times>2 num_filters<times>2 is_deconv)<line_sep>self.dec2=MultiClass_DecoderBlock(64+num_filters<times>2 num_filters<times>2<times>2 num_filters<times>2<times>2 is_deconv)<line_sep>self.dec1=MultiClass_DecoderBlock(num_filters<times>2<times>2 num_filters<times>2<times>2 num_filters is_deconv)<line_sep>self.dec0=MultiClass_ConvRelu(num_filters num_filters)<line_sep>self.final=nn.Conv2d(num_filters num_classes kernel_size=1)<block_end><def_stmt>forward self x<block_start>conv1=self.conv1(x)<line_sep>conv2=self.conv2(conv1)<line_sep>conv3=self.conv3(conv2)<line_sep>conv4=self.conv4(conv3)<line_sep>conv5=self.conv5(conv4)<line_sep>center=self.center(self.pool(conv5))<line_sep>dec5=self.dec5(torch.cat([center conv5] 1))<line_sep>dec4=self.dec4(torch.cat([dec5 conv4] 1))<line_sep>dec3=self.dec3(torch.cat([dec4 conv3] 1))<line_sep>dec2=self.dec2(torch.cat([dec3 conv2] 1))<line_sep>dec1=self.dec1(dec2)<line_sep>dec0=self.dec0(dec1)<line_sep>x_out=self.final(dec0)<line_sep><return>x_out<block_end><block_end><class_stmt>MultiClass_UNet_VGG16(nn.Module)<block_start><def_stmt>__init__ self num_classes=1 num_filters=32 pretrained=<true><block_start>super().__init__()<line_sep>self.num_classes=num_classes<line_sep>self.encoder=vgg16(pretrained=pretrained).features<line_sep>self.pool=nn.MaxPool2d(2 2)<line_sep>self.relu=nn.ReLU(inplace=<true>)<line_sep>self.conv1=nn.Sequential(self.encoder[0] self.relu self.encoder[2] self.relu)<line_sep>self.conv2=nn.Sequential(self.encoder[5] self.relu self.encoder[7] self.relu)<line_sep>self.conv3=nn.Sequential(self.encoder[10] self.relu self.encoder[12] self.relu self.encoder[14] self.relu)<line_sep>self.conv4=nn.Sequential(self.encoder[17] self.relu self.encoder[19] self.relu self.encoder[21] self.relu)<line_sep>self.conv5=nn.Sequential(self.encoder[24] self.relu self.encoder[26] self.relu self.encoder[28] self.relu)<line_sep>self.center=MultiClass_DecoderBlock(512 num_filters<times>8<times>2 num_filters<times>8)<line_sep>self.dec5=MultiClass_DecoderBlock(512+num_filters<times>8 num_filters<times>8<times>2 num_filters<times>8)<line_sep>self.dec4=MultiClass_DecoderBlock(512+num_filters<times>8 num_filters<times>8<times>2 num_filters<times>8)<line_sep>self.dec3=MultiClass_DecoderBlock(256+num_filters<times>8 num_filters<times>4<times>2 num_filters<times>2)<line_sep>self.dec2=MultiClass_DecoderBlock(128+num_filters<times>2 num_filters<times>2<times>2 num_filters)<line_sep>self.dec1=MultiClass_ConvRelu(64+num_filters num_filters)<line_sep>self.final=nn.Conv2d(num_filters num_classes kernel_size=1)<block_end><def_stmt>forward self x<block_start>conv1=self.conv1(x)<line_sep>conv2=self.conv2(self.pool(conv1))<line_sep>conv3=self.conv3(self.pool(conv2))<line_sep>conv4=self.conv4(self.pool(conv3))<line_sep>conv5=self.conv5(self.pool(conv4))<line_sep>center=self.center(self.pool(conv5))<line_sep>dec5=self.dec5(torch.cat([center conv5] 1))<line_sep>dec4=self.dec4(torch.cat([dec5 conv4] 1))<line_sep>dec3=self.dec3(torch.cat([dec4 conv3] 1))<line_sep>dec2=self.dec2(torch.cat([dec3 conv2] 1))<line_sep>dec1=self.dec1(torch.cat([dec2 conv1] 1))<line_sep>x_out=self.final(dec1)<line_sep><return>x_out<block_end><block_end><class_stmt>MultiClass_UNet_VGG11(nn.Module)<block_start><def_stmt>__init__ self num_classes=1 num_filters=32 pretrained=<true><block_start>super().__init__()<line_sep>self.num_classes=num_classes<line_sep>self.pool=nn.MaxPool2d(2 2)<line_sep>self.encoder=vgg11(pretrained=pretrained).features<line_sep>self.relu=nn.ReLU(inplace=<true>)<line_sep>self.conv1=nn.Sequential(self.encoder[0] self.relu)<line_sep>self.conv2=nn.Sequential(self.encoder[3] self.relu)<line_sep>self.conv3=nn.Sequential(self.encoder[6] self.relu self.encoder[8] self.relu )<line_sep>self.conv4=nn.Sequential(self.encoder[11] self.relu self.encoder[13] self.relu )<line_sep>self.conv5=nn.Sequential(self.encoder[16] self.relu self.encoder[18] self.relu )<line_sep>self.center=MultiClass_DecoderBlock(256+num_filters<times>8 num_filters<times>8<times>2 num_filters<times>8 is_deconv=<true>)<line_sep>self.dec5=MultiClass_DecoderBlock(512+num_filters<times>8 num_filters<times>8<times>2 num_filters<times>8 is_deconv=<true>)<line_sep>self.dec4=MultiClass_DecoderBlock(512+num_filters<times>8 num_filters<times>8<times>2 num_filters<times>4 is_deconv=<true>)<line_sep>self.dec3=MultiClass_DecoderBlock(256+num_filters<times>4 num_filters<times>4<times>2 num_filters<times>2 is_deconv=<true>)<line_sep>self.dec2=MultiClass_DecoderBlock(128+num_filters<times>2 num_filters<times>2<times>2 num_filters is_deconv=<true>)<line_sep>self.dec1=MultiClass_ConvRelu(64+num_filters num_filters)<line_sep>self.final=nn.Conv2d(num_filters num_classes kernel_size=1)<block_end><def_stmt>forward self x<block_start>conv1=self.conv1(x)<line_sep>conv2=self.conv2(self.pool(conv1))<line_sep>conv3=self.conv3(self.pool(conv2))<line_sep>conv4=self.conv4(self.pool(conv3))<line_sep>conv5=self.conv5(self.pool(conv4))<line_sep>center=self.center(self.pool(conv5))<line_sep>dec5=self.dec5(torch.cat([center conv5] 1))<line_sep>dec4=self.dec4(torch.cat([dec5 conv4] 1))<line_sep>dec3=self.dec3(torch.cat([dec4 conv3] 1))<line_sep>dec2=self.dec2(torch.cat([dec3 conv2] 1))<line_sep>dec1=self.dec1(torch.cat([dec2 conv1] 1))<line_sep>x_out=self.final(dec1)<line_sep><return>x_out<block_end><block_end><class_stmt>MultiClass_LinkNet34(nn.Module)<block_start><def_stmt>__init__ self num_classes=1 num_channels=3 pretrained=<true><block_start>super().__init__()<assert_stmt>num_channels<eq>3<line_sep>self.num_classes=num_classes<line_sep>filters=[64 128 256 512]<line_sep>resnet=resnet34(pretrained=pretrained)<line_sep>self.firstconv=resnet.conv1<line_sep>self.firstbn=resnet.bn1<line_sep>self.firstrelu=resnet.relu<line_sep>self.firstmaxpool=resnet.maxpool<line_sep>self.encoder1=resnet.layer1<line_sep>self.encoder2=resnet.layer2<line_sep>self.encoder3=resnet.layer3<line_sep>self.encoder4=resnet.layer4<line_sep># Decoder self.decoder4=DecoderBlockLinkNet(filters[3] filters[2])<line_sep>self.decoder3=DecoderBlockLinkNet(filters[2] filters[1])<line_sep>self.decoder2=DecoderBlockLinkNet(filters[1] filters[0])<line_sep>self.decoder1=DecoderBlockLinkNet(filters[0] filters[0])<line_sep># Final Classifier self.finaldeconv1=nn.ConvTranspose2d(filters[0] 32 3 stride=2)<line_sep>self.finalrelu1=nn.ReLU(inplace=<true>)<line_sep>self.finalconv2=nn.Conv2d(32 32 3)<line_sep>self.finalrelu2=nn.ReLU(inplace=<true>)<line_sep>self.finalconv3=nn.Conv2d(32 num_classes 2 padding=1)<block_end># noinspection PyCallingNonCallable <def_stmt>forward self x# Encoder <block_start>x=self.firstconv(x)<line_sep>x=self.firstbn(x)<line_sep>x=self.firstrelu(x)<line_sep>x=self.firstmaxpool(x)<line_sep>e1=self.encoder1(x)<line_sep>e2=self.encoder2(e1)<line_sep>e3=self.encoder3(e2)<line_sep>e4=self.encoder4(e3)<line_sep># Decoder with Skip Connections d4=self.decoder4(e4)+e3<line_sep>d3=self.decoder3(d4)+e2<line_sep>d2=self.decoder2(d3)+e1<line_sep>d1=self.decoder1(d2)<line_sep># Final Classification f1=self.finaldeconv1(d1)<line_sep>f2=self.finalrelu1(f1)<line_sep>f3=self.finalconv2(f2)<line_sep>f4=self.finalrelu2(f3)<line_sep>f5=self.finalconv3(f4)<line_sep>x_out=f5<line_sep><return>x_out<block_end><block_end><class_stmt>MultiClass_ConvRelu(nn.Module)<block_start><def_stmt>__init__ self in_ out<block_start>super().__init__()<line_sep>self.conv=nn.Conv2d(in_ out 3 padding=1)<line_sep>self.activation=nn.ReLU(inplace=<true>)<block_end><def_stmt>forward self x<block_start>x=self.conv(x)<line_sep>x=self.activation(x)<line_sep><return>x<block_end><block_end><class_stmt>MultiClass_DecoderBlock(nn.Module)<block_start><def_stmt>__init__ self in_channels middle_channels out_channels is_deconv=<true><block_start>super().__init__()<line_sep>self.in_channels=in_channels<if_stmt>is_deconv<block_start>self.block=nn.Sequential(MultiClass_ConvRelu(in_channels middle_channels) nn.ConvTranspose2d(middle_channels out_channels kernel_size=4 stride=2 padding=1) nn.ReLU(inplace=<true>))<block_end><else_stmt><block_start>self.block=nn.Sequential(nn.Upsample(scale_factor=2 mode='bilinear') MultiClass_ConvRelu(in_channels middle_channels) MultiClass_ConvRelu(middle_channels out_channels) )<block_end><block_end><def_stmt>forward self x<block_start><return>self.block(x)<block_end><block_end><class_stmt>DecoderBlockLinkNet(nn.Module)<block_start><def_stmt>__init__ self in_channels n_filters<block_start>super().__init__()<line_sep>self.relu=nn.ReLU(inplace=<true>)<line_sep># B, C, H, W -> B, C/4, H, W self.conv1=nn.Conv2d(in_channels in_channels<floordiv>4 1)<line_sep>self.norm1=nn.BatchNorm2d(in_channels<floordiv>4)<line_sep># B, C/4, H, W -> B, C/4, 2 * H, 2 * W self.deconv2=nn.ConvTranspose2d(in_channels<floordiv>4 in_channels<floordiv>4 kernel_size=4 stride=2 padding=1 output_padding=0)<line_sep>self.norm2=nn.BatchNorm2d(in_channels<floordiv>4)<line_sep># B, C/4, H, W -> B, C, H, W self.conv3=nn.Conv2d(in_channels<floordiv>4 n_filters 1)<line_sep>self.norm3=nn.BatchNorm2d(n_filters)<block_end><def_stmt>forward self x<block_start>x=self.conv1(x)<line_sep>x=self.norm1(x)<line_sep>x=self.relu(x)<line_sep>x=self.deconv2(x)<line_sep>x=self.norm2(x)<line_sep>x=self.relu(x)<line_sep>x=self.conv3(x)<line_sep>x=self.norm3(x)<line_sep>x=self.relu(x)<line_sep><return>x<block_end><block_end>
<import_from_stmt>shadowlands.sl_dapp SLDapp SLFrame<import_stmt>pyperclip os<import_stmt>schedule<import_from_stmt>shadowlands.tui.debug debug<import_stmt>pdb<class_stmt>NetworkConnection(SLDapp)<block_start><def_stmt>initialize self<block_start>self.add_sl_frame(NetworkStrategies(self 10 26 title="Network Options"))<line_sep>self.connection_strategy=<none><block_end><def_stmt>attempt_connection self<block_start>fn=self._interface.node.__getattribute__(self.conn_fn)<line_sep>self._interface.node.thread_shutdown=<true><line_sep>self._interface.node.heartbeat_thread.join()<line_sep>self._interface.node.thread_shutdown=<false><try_stmt><block_start><if_stmt>len(self.args)<g>0<block_start><return>fn(self.args)<block_end><else_stmt><block_start><return>fn()<block_end><block_end><except_stmt>StaleBlockchain<block_start>self._scene.add_effect(MessageDialog(self._screen "Stale blockchain on selected Node"))<line_sep><return><block_end>self._interface.node.start_heartbeat_thread()<block_end><block_end><class_stmt>NetworkStrategies(SLFrame)<block_start><def_stmt>initialize self<block_start>options=[('Local node' 'connect_w3_local') ('Custom infura' 'connect_w3_custom_infura') ('Custom http' 'connect_w3_custom_http') ('Custom websocket' 'connect_w3_custom_websocket') ('Custom ipc' 'connect_w3_custom_ipc') ]<line_sep>self.listbox_value=self.add_listbox(5 options on_select=self._select#default_value=self.dapp.config.connection_strategy )<line_sep>self.add_button(self.close "Cancel")<block_end><def_stmt>_select self<block_start>connect_fn=self.listbox_value()<line_sep>self.dapp.connection_strategy=connect_fn<if_stmt>connect_fn<eq>'connect_w3_custom_http'<block_start>self.dapp.add_sl_frame(CustomHttpUri(self.dapp 5 30 title="Custom Http URI"))<block_end><elif_stmt>connect_fn<eq>'connect_w3_custom_ipc'<block_start>self.dapp.add_sl_frame(CustomIpc(self.dapp 5 30 title="Custom IPC path"))<block_end><elif_stmt>connect_fn<eq>'connect_w3_custom_websocket'<block_start>self.dapp.add_sl_frame(CustomWebsocket(self.dapp 5 30 title="Custom Websocket URI"))<block_end><elif_stmt>connect_fn<eq>'connect_w3_custom_infura'<block_start>self.dapp.add_sl_frame(CustomInfura(self.dapp 12 45 title="Custom Infura Credentials"))<line_sep>self.close()<block_end><block_end><block_end><class_stmt>CustomInfura(SLFrame)<block_start><def_stmt>initialize self<block_start>self.add_divider()<line_sep>self.add_label(" WEB3_INFURA_PROJECT_ID")<line_sep>self.id_value=self.add_textbox('' default_value=os.environ.get('WEB3_INFURA_PROJECT_ID'))<line_sep>self.add_label(" WEB3_INFURA_API_SECRET")<line_sep>self.secret_value=self.add_textbox('' default_value=os.environ.get('WEB3_INFURA_API_SECRET'))<line_sep>self.add_button_row([("Connect" self._connect 0) ("Cancel" self.close 3)])<block_end><def_stmt>_connect self<block_start>id_value=self.id_value()<line_sep>secret_value=self.secret_value()<line_sep>self.dapp.config.connection_args=(self.id_value() self.secret_value())<line_sep>self.dapp.config.connection_strategy=self.dapp.connection_strategy<line_sep>#debug(); pdb.set_trace() schedule.once().do(self.dapp.node.poll)<line_sep>self.close()<block_end><block_end><class_stmt>CustomHttpUri(SLFrame)<block_start><def_stmt>initialize self<block_start>self.add_label("Ex: http://192.168.1.150:8545")<line_sep>self.text_value=self.add_textbox()<line_sep>self.add_button(self.close "Cancel")<block_end><block_end><class_stmt>CustomIpc(SLFrame)<block_start><def_stmt>initialize self<block_start>self.add_label("Ex: http://192.168.1.150:8545")<line_sep>self.text_value=self.add_textbox()<line_sep>self.add_button(self.close "Cancel")<block_end><block_end><class_stmt>CustomWebsocket(SLFrame)<block_start><def_stmt>initialize self<block_start>self.add_label("Ex: http://192.168.1.150:8545")<line_sep>self.text_value=self.add_textbox()<line_sep>self.add_button(self.close "Cancel")<block_end><block_end>
#Licensed to the Apache Software Foundation (ASF) under one #or more contributor license agreements. See the NOTICE file #distributed with this work for additional information #regarding copyright ownership. The ASF licenses this file #to you under the Apache License, Version 2.0 (the #"License"); you may not use this file except in compliance #with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. <import_stmt>errno sys os traceback stat socket re warnings signal<import_from_stmt>hodlib.Common.tcp tcpSocket tcpError<import_from_stmt>hodlib.Common.threads simpleCommand<line_sep>setUGV={'S_ISUID':2 'S_ISGID':1 'S_ISVTX':0}<line_sep>reEscapeSeq=r"\\(.)?"<line_sep>reEscapeSeq=re.compile(reEscapeSeq)<line_sep>HOD_INTERRUPTED_CODE=127<line_sep>HOD_INTERRUPTED_MESG="Hod interrupted. Cleaning up and exiting"<line_sep>TORQUE_USER_LIMITS_COMMENT_FIELD="User-limits exceeded. "+"Requested:([0-9]*) Used:([0-9]*) MaxLimit:([0-9]*)"<line_sep>TORQUE_USER_LIMITS_EXCEEDED_MSG="Requested number of nodes exceeded "+"maximum user limits. "<class_stmt>AlarmException(Exception)<block_start><def_stmt>__init__ self msg=''<block_start>self.message=msg<line_sep>Exception.__init__(self msg)<block_end><def_stmt>__repr__ self<block_start><return>self.message<block_end><block_end><def_stmt>isProcessRunning pid<block_start>'''Check if a process is running, by sending it a 0 signal, and checking for errors'''<line_sep># This method is documented in some email threads on the python mailing list. # For e.g.: http://mail.python.org/pipermail/python-list/2002-May/144522.html <try_stmt><block_start>os.kill(pid 0)<line_sep><return><true><block_end><except_stmt>OSError err<block_start><return>err.errno<eq>errno.EPERM<block_end><block_end><def_stmt>untar file targetDir<block_start>status=<false><line_sep>command='tar -C %s -zxf %s'%(targetDir file)<line_sep>commandObj=simpleCommand('untar' command)<line_sep>commandObj.start()<line_sep>commandObj.wait()<line_sep>commandObj.join()<if_stmt>commandObj.exit_code()<eq>0<block_start>status=<true><block_end><return>status<block_end><def_stmt>tar tarFile tarDirectory tarList<block_start>currentDir=os.getcwd()<line_sep>os.chdir(tarDirectory)<line_sep>status=<false><line_sep>command='tar -czf %s '%(tarFile)<for_stmt>file tarList<block_start>command="%s%s "%(command file)<block_end>commandObj=simpleCommand('tar' command)<line_sep>commandObj.start()<line_sep>commandObj.wait()<line_sep>commandObj.join()<if_stmt>commandObj.exit_code()<eq>0<block_start>status=<true><block_end><else_stmt><block_start>status=commandObj.exit_status_string()<block_end>os.chdir(currentDir)<line_sep><return>status<block_end><def_stmt>to_http_url list<block_start>"""convert [hostname, port] to a http url"""<line_sep>str=''<line_sep>str="http://%s:%s"%(list[0] list[1])<line_sep><return>str<block_end><def_stmt>get_exception_string <block_start>(type value tb)=sys.exc_info()<line_sep>exceptList=traceback.format_exception(type value tb)<line_sep>exceptString=''<for_stmt>line exceptList<block_start>exceptString="%s%s"%(exceptString line)<block_end><return>exceptString<block_end><def_stmt>get_exception_error_string <block_start>(type value tb)=sys.exc_info()<if_stmt>value<block_start>exceptString="%s %s"%(type value)<block_end><else_stmt><block_start>exceptString=type<block_end><return>exceptString<block_end><def_stmt>check_timestamp timeStamp<block_start>""" Checks the validity of a timeStamp. timeStamp - (YYYY-MM-DD HH:MM:SS in UTC) returns True or False """<line_sep>isValid=<true><try_stmt><block_start>timeStruct=time.strptime(timeStamp "%Y-%m-%d %H:%M:%S")<block_end><except_stmt><block_start>isValid=<false><block_end><return>isValid<block_end><def_stmt>sig_wrapper sigNum handler *args<block_start><if_stmt>args<block_start>handler(args)<block_end><else_stmt><block_start>handler()<block_end><block_end><def_stmt>get_perms filename<block_start>mode=stat.S_IMODE(os.stat(filename)[stat.ST_MODE])<line_sep>permsString=''<line_sep>permSet=0<line_sep>place=2<for_stmt>who "USR" "GRP" "OTH"<block_start><for_stmt>what "R" "W" "X"<block_start><if_stmt>mode&getattr(stat "S_I"+what+who)<block_start>permSet=permSet+2<power>place<block_end>place=place-1<block_end>permsString="%s%s"%(permsString permSet)<line_sep>permSet=0<line_sep>place=2<block_end>permSet=0<for_stmt>permFlag setUGV.keys()<block_start><if_stmt>mode&getattr(stat permFlag)<block_start>permSet=permSet+2<power>setUGV[permFlag]<block_end><block_end>permsString="%s%s"%(permSet permsString)<line_sep><return>permsString<block_end><def_stmt>local_fqdn <block_start>"""Return a system's true FQDN rather than any aliases, which are occasionally returned by socket.gethostname."""<line_sep>fqdn=<none><line_sep>me=os.uname()[1]<line_sep>nameInfo=socket.gethostbyname_ex(me)<line_sep>nameInfo[1].append(nameInfo[0])<for_stmt>name nameInfo[1]<block_start><if_stmt>name.count(".")<and>name.startswith(me)<block_start>fqdn=name<block_end><block_end><if_stmt>fqdn<eq><none><block_start>fqdn=me<block_end><return>(fqdn)<block_end><def_stmt>need_to_allocate allocated config command<block_start>status=<true><if_stmt>allocated.isSet()<block_start>status=<false><block_end><elif_stmt>re.search("\s*dfs.*$" command)<and>config['gridservice-hdfs']['external']<block_start>status=<false><block_end><elif_stmt>config['gridservice-mapred']['external']<block_start>status=<false><block_end><return>status<block_end><def_stmt>filter_warnings <block_start>warnings.filterwarnings('ignore' message=".*?'with' will become a reserved keyword.*")<block_end><def_stmt>args_to_string list<block_start>"""return a string argument space seperated"""<line_sep>arg=''<for_stmt>item list<block_start>arg="%s%s "%(arg item)<block_end><return>arg[:-1]<block_end><def_stmt>replace_escapes object<block_start>""" replace any escaped character. e.g \, with , \= with = and so on """<line_sep># here object is either a config object or a options object <for_stmt>section object._mySections<block_start><for_stmt>option object._configDef[section].keys()<block_start><if_stmt>object[section].has_key(option)<block_start><if_stmt>object._configDef[section][option]['type']<eq>'keyval'<block_start>keyValDict=object[section][option]<line_sep>object[section][option]={}<for_stmt>(key value) keyValDict.iteritems()<block_start>match=reEscapeSeq.search(value)<if_stmt>match<block_start>value=reEscapeSeq.sub(r"\1" value)<block_end>object[section][option][key]=value<block_end><block_end><block_end><block_end><block_end><block_end><def_stmt>hadoopVersion hadoopDir java_home log# Determine the version of hadoop being used by executing the # hadoop version command. Code earlier in idleTracker.py <block_start>hadoopVersion={'major':<none> 'minor':<none>}<line_sep>hadoopPath=os.path.join(hadoopDir 'bin' 'hadoop')<line_sep>cmd="%s version"%hadoopPath<line_sep>log.debug('Executing command %s to find hadoop version'%cmd)<line_sep>env=os.environ<line_sep>env['JAVA_HOME']=java_home<line_sep>hadoopVerCmd=simpleCommand('HadoopVersion' cmd env)<line_sep>hadoopVerCmd.start()<line_sep>hadoopVerCmd.wait()<line_sep>hadoopVerCmd.join()<if_stmt>hadoopVerCmd.exit_code()<eq>0<block_start>verLine=hadoopVerCmd.output()[0]<line_sep>log.debug('Version from hadoop command: %s'%verLine)<line_sep>hadoopVerRegExp=re.compile("Hadoop ([0-9]+)\.([0-9]+).*")<line_sep>verMatch=hadoopVerRegExp.match(verLine)<if_stmt>verMatch<ne><none><block_start>hadoopVersion['major']=verMatch.group(1)<line_sep>hadoopVersion['minor']=verMatch.group(2)<block_end><block_end><return>hadoopVersion<block_end><def_stmt>get_cluster_status hdfsAddress mapredAddress<block_start>"""Determine the status of the cluster based on socket availability of HDFS and Map/Reduce."""<line_sep>status=0<line_sep>mapredSocket=tcpSocket(mapredAddress)<try_stmt><block_start>mapredSocket.open()<line_sep>mapredSocket.close()<block_end><except_stmt>tcpError<block_start>status=14<block_end>hdfsSocket=tcpSocket(hdfsAddress)<try_stmt><block_start>hdfsSocket.open()<line_sep>hdfsSocket.close()<block_end><except_stmt>tcpError<block_start><if_stmt>status<g>0<block_start>status=10<block_end><else_stmt><block_start>status=13<block_end><block_end><return>status<block_end><def_stmt>parseEquals list# takes in a list of keyval pairs e.g ['a=b','c=d'] and returns a # dict e.g {'a'='b','c'='d'}. Used in GridService/{mapred.py/hdfs.py} and # HodRing/hodring.py. No need for specially treating escaped =. as in \=, # since all keys are generated by hod and don't contain such anomalies <block_start>dict={}<for_stmt>elems list<block_start>splits=elems.split('=')<line_sep>dict[splits[0]]=splits[1]<block_end><return>dict<block_end><def_stmt>getMapredSystemDirectory mrSysDirRoot userid jobid<block_start><return>os.path.join(mrSysDirRoot userid 'mapredsystem' jobid)<block_end><class_stmt>HodInterrupt<block_start><def_stmt>__init__ self<block_start>self.HodInterruptFlag=<false><line_sep>self.log=<none><block_end><def_stmt>set_log self log<block_start>self.log=log<block_end><def_stmt>init_signals self<block_start><def_stmt>sigStop sigNum handler<block_start>sig_wrapper(sigNum self.setFlag)<block_end>signal.signal(signal.SIGTERM sigStop)# 15 : software termination signal signal.signal(signal.SIGQUIT sigStop)# 3 : Quit program signal.signal(signal.SIGINT sigStop)# 2 ^C : Interrupt program <def_stmt>sig_wrapper sigNum handler *args<block_start>self.log.critical("Caught signal %s."%sigNum)<if_stmt>args<block_start>handler(args)<block_end><else_stmt><block_start>handler()<block_end><block_end><block_end><def_stmt>setFlag self val=<true><block_start>self.HodInterruptFlag=val<block_end><def_stmt>isSet self<block_start><return>self.HodInterruptFlag<block_end><block_end><class_stmt>HodInterruptException(Exception)<block_start><def_stmt>__init__ self value=""<block_start>self.value=value<block_end><def_stmt>__str__ self<block_start><return>repr(self.value)<block_end><block_end>hodInterrupt=HodInterrupt()<line_sep>
<import_from_stmt>math sqrt pow sin pi cos<import_from_stmt>jmetal.core.problem FloatProblem<import_from_stmt>jmetal.core.solution FloatSolution<line_sep>""" .. module:: ZDT :platform: Unix, Windows :synopsis: ZDT problem family of multi-objective problems. .. moduleauthor:: <NAME> <<EMAIL>> """<class_stmt>ZDT1(FloatProblem)<block_start>""" Problem ZDT1. .. note:: Bi-objective unconstrained problem. The default number of variables is 30. .. note:: Continuous problem having a convex Pareto front """<def_stmt>__init__ self number_of_variables:int=30<block_start>""" :param number_of_variables: Number of decision variables of the problem. """<line_sep>super(ZDT1 self).__init__()<line_sep>self.number_of_variables=number_of_variables<line_sep>self.number_of_objectives=2<line_sep>self.number_of_constraints=0<line_sep>self.obj_directions=[self.MINIMIZE self.MINIMIZE]<line_sep>self.obj_labels=['x' 'y']<line_sep>self.lower_bound=self.number_of_variables<times>[0.0]<line_sep>self.upper_bound=self.number_of_variables<times>[1.0]<block_end><def_stmt>evaluate self solution:FloatSolution<arrow>FloatSolution<block_start>g=self.eval_g(solution)<line_sep>h=self.eval_h(solution.variables[0] g)<line_sep>solution.objectives[0]=solution.variables[0]<line_sep>solution.objectives[1]=h<times>g<line_sep><return>solution<block_end><def_stmt>eval_g self solution:FloatSolution<block_start>g=sum(solution.variables)-solution.variables[0]<line_sep>constant=9.0/(solution.number_of_variables-1)<line_sep><return>constant<times>g+1.0<block_end><def_stmt>eval_h self f:float g:float<arrow>float<block_start><return>1.0-sqrt(f/g)<block_end><def_stmt>get_name self<block_start><return>'ZDT1'<block_end><block_end><class_stmt>ZDT1Modified(ZDT1)<block_start>""" Problem ZDT1Modified. .. note:: Version including a loop for increasing the computing time of the evaluation functions. """<def_stmt>__init__ self number_of_variables=30<block_start>super(ZDT1Modified self).__init__(number_of_variables)<block_end><def_stmt>evaluate self solution:FloatSolution<arrow>FloatSolution<block_start>s:float=0.0<for_stmt>i range(1000)<block_start><for_stmt>j range(10000)<block_start>s<augadd>i<times>0.235/1.234+1.23525<times>j<block_end><block_end><return>super().evaluate(solution)<block_end><block_end><class_stmt>ZDT2(ZDT1)<block_start>""" Problem ZDT2. .. note:: Bi-objective unconstrained problem. The default number of variables is 30. .. note:: Continuous problem having a non-convex Pareto front """<def_stmt>eval_h self f:float g:float<arrow>float<block_start><return>1.0-pow(f/g 2.0)<block_end><def_stmt>get_name self<block_start><return>'ZDT2'<block_end><block_end><class_stmt>ZDT3(ZDT1)<block_start>""" Problem ZDT3. .. note:: Bi-objective unconstrained problem. The default number of variables is 30. .. note:: Continuous problem having a partitioned Pareto front """<def_stmt>eval_h self f:float g:float<arrow>float<block_start><return>1.0-sqrt(f/g)-(f/g)<times>sin(10.0<times>f<times>pi)<block_end><def_stmt>get_name self<block_start><return>'ZDT3'<block_end><block_end><class_stmt>ZDT4(ZDT1)<block_start>""" Problem ZDT4. .. note:: Bi-objective unconstrained problem. The default number of variables is 10. .. note:: Continuous multi-modal problem having a convex Pareto front """<def_stmt>__init__ self number_of_variables:int=10<block_start>""" :param number_of_variables: Number of decision variables of the problem. """<line_sep>super(ZDT4 self).__init__(number_of_variables=number_of_variables)<line_sep>self.lower_bound=self.number_of_variables<times>[-5.0]<line_sep>self.upper_bound=self.number_of_variables<times>[5.0]<line_sep>self.lower_bound[0]=0.0<line_sep>self.upper_bound[0]=1.0<block_end><def_stmt>eval_g self solution:FloatSolution<block_start>g=0.0<for_stmt>i range(1 solution.number_of_variables)<block_start>g<augadd>pow(solution.variables[i] 2.0)-10.0<times>cos(4.0<times>pi<times>solution.variables[i])<block_end>g<augadd>1.0+10.0<times>(solution.number_of_variables-1)<line_sep><return>g<block_end><def_stmt>eval_h self f:float g:float<arrow>float<block_start><return>1.0-sqrt(f/g)<block_end><def_stmt>get_name self<block_start><return>'ZDT4'<block_end><block_end><class_stmt>ZDT6(ZDT1)<block_start>""" Problem ZDT6. .. note:: Bi-objective unconstrained problem. The default number of variables is 10. .. note:: Continuous problem having a non-convex Pareto front """<def_stmt>__init__ self number_of_variables:int=10<block_start>""" :param number_of_variables: Number of decision variables of the problem. """<line_sep>super(ZDT6 self).__init__(number_of_variables=number_of_variables)<block_end><def_stmt>eval_g self solution:FloatSolution<block_start>g=sum(solution.variables)-solution.variables[0]<line_sep>g=g/(solution.number_of_variables-1)<line_sep>g=pow(g 0.25)<line_sep>g=9.0<times>g<line_sep>g=1.0+g<line_sep><return>g<block_end><def_stmt>eval_h self f:float g:float<arrow>float<block_start><return>1.0-pow(f/g 2.0)<block_end><def_stmt>get_name self<block_start><return>'ZDT6'<block_end><block_end>
""" Train a network."""<import_stmt>logging<import_stmt>argparse<line_sep># This is a weird hack to avoid Intel MKL issues on the cluster when this is called as a subprocess of a process that has itself initialized PyTorch. # Since numpy gets imported later anyway for dataset stuff, this shouldn't affect performance. <import_stmt>numpy<as>np# noqa: F401 <import_from_stmt>os.path isdir<import_from_stmt>pathlib Path<import_stmt>torch<import_stmt>e3nn<import_stmt>e3nn.util.jit<import_from_stmt>nequip.model model_from_config<import_from_stmt>nequip.utils Config<import_from_stmt>nequip.data dataset_from_config<import_from_stmt>nequip.utils.test assert_AtomicData_equivariant set_irreps_debug<import_from_stmt>nequip.utils load_file dtype_from_name<import_from_stmt>nequip.scripts.logger set_up_script_logger<line_sep>default_config=dict(root="./" run_name="NequIP" wandb=<false> wandb_project="NequIP" compile_model=<false> model_builders=["EnergyModel" "PerSpeciesRescale" "ForceOutput" "RescaleEnergyEtc" ] dataset_statistics_stride=1 default_dtype="float32" allow_tf32=<false> # TODO: until we understand equivar issues verbose="INFO" model_debug_mode=<false> equivariance_test=<false> grad_anomaly_mode=<false> append=<false> _jit_bailout_depth=2 # avoid 20 iters of pain, see https://github.com/pytorch/pytorch/issues/52286 )<def_stmt>main args=<none> running_as_script:bool=<true><block_start>config=parse_command_line(args)<if_stmt>running_as_script<block_start>set_up_script_logger(config.get("log" <none>) config.verbose)<block_end>found_restart_file=isdir(f"{config.root}/{config.run_name}")<if_stmt>found_restart_file<and><not>config.append<block_start><raise>RuntimeError(f"Training instance exists at {config.root}/{config.run_name}; "<concat>"either set append to True or use a different root or runname")<block_end># for fresh new train <if_stmt><not>found_restart_file<block_start>trainer=fresh_start(config)<block_end><else_stmt><block_start>trainer=restart(config)<block_end># Train trainer.save()<line_sep>trainer.train()<line_sep><return><block_end><def_stmt>parse_command_line args=<none><block_start>parser=argparse.ArgumentParser(description="Train a NequIP model.")<line_sep>parser.add_argument("config" help="configuration file")<line_sep>parser.add_argument("--equivariance-test" help="test the model's equivariance before training on n (default 1) random frames from the dataset" const=1 type=int nargs="?" )<line_sep>parser.add_argument("--model-debug-mode" help="enable model debug mode, which can sometimes give much more useful error messages at the cost of some speed. Do not use for production training!" action="store_true" )<line_sep>parser.add_argument("--grad-anomaly-mode" help="enable PyTorch autograd anomaly mode to debug NaN gradients. Do not use for production training!" action="store_true" )<line_sep>parser.add_argument("--log" help="log file to store all the screen logging" type=Path default=<none> )<line_sep>args=parser.parse_args(args=args)<line_sep>config=Config.from_file(args.config defaults=default_config)<for_stmt>flag ("model_debug_mode" "equivariance_test" "grad_anomaly_mode")<block_start>config[flag]=getattr(args flag)<or>config[flag]<block_end><return>config<block_end><def_stmt>_set_global_options config<block_start>"""Configure global options of libraries like `torch` and `e3nn` based on `config`."""<line_sep># Set TF32 support # See https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices <if_stmt>torch.cuda.is_available()<block_start><if_stmt>torch.torch.backends.cuda.matmul.allow_tf32<and><not>config.allow_tf32# it is enabled, and we dont want it to, so disable: <block_start>torch.backends.cuda.matmul.allow_tf32=<false><line_sep>torch.backends.cudnn.allow_tf32=<false><block_end><block_end># For avoiding 20 steps of painfully slow JIT recompilation # See https://github.com/pytorch/pytorch/issues/52286 torch._C._jit_set_bailout_depth(config["_jit_bailout_depth"])<if_stmt>config.model_debug_mode<block_start>set_irreps_debug(enabled=<true>)<block_end>torch.set_default_dtype(dtype_from_name(config.default_dtype))<if_stmt>config.grad_anomaly_mode<block_start>torch.autograd.set_detect_anomaly(<true>)<block_end>e3nn.set_optimization_defaults(**config.get("e3nn_optimization_defaults" {}))<block_end><def_stmt>fresh_start config<block_start>_set_global_options(config)<line_sep># = Make the trainer = <if_stmt>config.wandb<block_start><import_stmt>wandb# noqa: F401 <import_from_stmt>nequip.train.trainer_wandb TrainerWandB<line_sep># download parameters from wandb in case of sweeping <import_from_stmt>nequip.utils.wandb init_n_update<line_sep>config=init_n_update(config)<line_sep>trainer=TrainerWandB(model=<none> **dict(config))<block_end><else_stmt><block_start><import_from_stmt>nequip.train.trainer Trainer<line_sep>trainer=Trainer(model=<none> **dict(config))<block_end># what is this # to update wandb data? config.update(trainer.params)<line_sep># = Load the dataset = dataset=dataset_from_config(config prefix="dataset")<line_sep>logging.info(f"Successfully loaded the data set of type {dataset}...")<try_stmt><block_start>validation_dataset=dataset_from_config(config prefix="validation_dataset")<line_sep>logging.info(f"Successfully loaded the validation data set of type {validation_dataset}...")<block_end><except_stmt>KeyError# It couldn't be found <block_start>validation_dataset=<none><block_end># = Train/test split = trainer.set_dataset(dataset validation_dataset)<line_sep># = Build model = final_model=model_from_config(config=config initialize=<true> dataset=trainer.dataset_train)<line_sep>logging.info("Successfully built the network...")<if_stmt>config.compile_model<block_start>final_model=e3nn.util.jit.script(final_model)<line_sep>logging.info("Successfully compiled model...")<block_end># Equivar test <if_stmt>config.equivariance_test<g>0<block_start>n_train:int=len(trainer.dataset_train)<assert_stmt>config.equivariance_test<le>n_train<line_sep>final_model.eval()<line_sep>indexes=torch.randperm(n_train)[:config.equivariance_test]<line_sep>errstr=assert_AtomicData_equivariant(final_model [trainer.dataset_train[i]<for>i indexes])<line_sep>final_model.train()<line_sep>logging.info("Equivariance test passed; equivariance errors:\n"<concat>" Errors are in real units, where relevant.\n"<concat>" Please note that the large scale of the typical\n"<concat>" shifts to the (atomic) energy can cause\n"<concat>" catastrophic cancellation and give incorrectly\n"<concat>" the equivariance error as zero for those fields.\n"<concat>f"{errstr}")<del_stmt>errstr indexes n_train<block_end># Set the trainer trainer.model=final_model<line_sep># Store any updated config information in the trainer trainer.update_kwargs(config)<line_sep><return>trainer<block_end><def_stmt>restart config# load the dictionary <block_start>restart_file=f"{config.root}/{config.run_name}/trainer.pth"<line_sep>dictionary=load_file(supported_formats=dict(torch=["pt" "pth"]) filename=restart_file enforced_format="torch" )<line_sep># compare dictionary to config and update stop condition related arguments <for_stmt>k config.keys()<block_start><if_stmt>config[k]<ne>dictionary.get(k "")<block_start><if_stmt>k<eq>"max_epochs"<block_start>dictionary[k]=config[k]<line_sep>logging.info(f'Update "{k}" to {dictionary[k]}')<block_end><elif_stmt>k.startswith("early_stop")<block_start>dictionary[k]=config[k]<line_sep>logging.info(f'Update "{k}" to {dictionary[k]}')<block_end><elif_stmt>isinstance(config[k] type(dictionary.get(k "")))<block_start><raise>ValueError(f'Key "{k}" is different in config and the result trainer.pth file. Please double check')<block_end><block_end><block_end># recursive loop, if same type but different value # raise error config=Config(dictionary exclude_keys=["state_dict" "progress"])<line_sep># dtype, etc. _set_global_options(config)<if_stmt>config.wandb<block_start><import_from_stmt>nequip.train.trainer_wandb TrainerWandB<import_from_stmt>nequip.utils.wandb resume<line_sep>resume(config)<line_sep>trainer=TrainerWandB.from_dict(dictionary)<block_end><else_stmt><block_start><import_from_stmt>nequip.train.trainer Trainer<line_sep>trainer=Trainer.from_dict(dictionary)<block_end># = Load the dataset = dataset=dataset_from_config(config prefix="dataset")<line_sep>logging.info(f"Successfully re-loaded the data set of type {dataset}...")<try_stmt><block_start>validation_dataset=dataset_from_config(config prefix="validation_dataset")<line_sep>logging.info(f"Successfully re-loaded the validation data set of type {validation_dataset}...")<block_end><except_stmt>KeyError# It couldn't be found <block_start>validation_dataset=<none><block_end>trainer.set_dataset(dataset validation_dataset)<line_sep><return>trainer<block_end><if_stmt>__name__<eq>"__main__"<block_start>main(running_as_script=<true>)<block_end>
<import_stmt>numpy<as>np<import_from_stmt>scipy.stats chi2_contingency ks_2samp<import_from_stmt>typing Callable Dict List Optional Tuple Union<import_from_stmt>alibi_detect.cd.base BaseUnivariateDrift<class_stmt>TabularDrift(BaseUnivariateDrift)<block_start><def_stmt>__init__ self x_ref:Union[np.ndarray list] p_val:float=.05 categories_per_feature:Dict[int Optional[int]]=<none> preprocess_x_ref:bool=<true> update_x_ref:Optional[Dict[str int]]=<none> preprocess_fn:Optional[Callable]=<none> correction:str='bonferroni' alternative:str='two-sided' n_features:Optional[int]=<none> input_shape:Optional[tuple]=<none> data_type:Optional[str]=<none><arrow><none><block_start>""" Mixed-type tabular data drift detector with Bonferroni or False Discovery Rate (FDR) correction for multivariate data. Kolmogorov-Smirnov (K-S) univariate tests are applied to continuous numerical data and Chi-Squared (Chi2) univariate tests to categorical data. Parameters ---------- x_ref Data used as reference distribution. p_val p-value used for significance of the K-S and Chi2 test for each feature. If the FDR correction method is used, this corresponds to the acceptable q-value. categories_per_feature Dictionary with as keys the column indices of the categorical features and optionally as values the number of possible categorical values for that feature or a list with the possible values. If you know which features are categorical and simply want to infer the possible values of the categorical feature from the reference data you can pass a Dict[int, NoneType] such as {0: None, 3: None} if features 0 and 3 are categorical. If you also know how many categories are present for a given feature you could pass this in the `categories_per_feature` dict in the Dict[int, int] format, e.g. *{0: 3, 3: 2}*. If you pass N categories this will assume the possible values for the feature are [0, ..., N-1]. You can also explicitly pass the possible categories in the Dict[int, List[int]] format, e.g. {0: [0, 1, 2], 3: [0, 55]}. Note that the categories can be arbitrary int values. preprocess_x_ref Whether to already preprocess and infer categories and frequencies for categorical reference data. update_x_ref Reference data can optionally be updated to the last n instances seen by the detector or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while for reservoir sampling {'reservoir_sampling': n} is passed. preprocess_fn Function to preprocess the data before computing the data drift metrics. Typically a dimensionality reduction technique. correction Correction type for multivariate data. Either 'bonferroni' or 'fdr' (False Discovery Rate). alternative Defines the alternative hypothesis for the K-S tests. Options are 'two-sided', 'less' or 'greater'. n_features Number of features used in the combined K-S/Chi-Squared tests. No need to pass it if no preprocessing takes place. In case of a preprocessing step, this can also be inferred automatically but could be more expensive to compute. input_shape Shape of input data. data_type Optionally specify the data type (tabular, image or time-series). Added to metadata. """<line_sep>super().__init__(x_ref=x_ref p_val=p_val preprocess_x_ref=preprocess_x_ref update_x_ref=update_x_ref preprocess_fn=preprocess_fn correction=correction n_features=n_features input_shape=input_shape data_type=data_type)<line_sep>self.alternative=alternative<line_sep>self.x_ref_categories,self.cat_vars={} []# no categorical features assumed present <if_stmt>isinstance(categories_per_feature dict)<block_start>vals=list(categories_per_feature.values())<line_sep>int_types=(int np.int16 np.int32 np.int64)<if_stmt>all(v<is><none><for>v vals)# categories_per_feature = Dict[int, NoneType] <block_start>x_flat=self.x_ref.reshape(self.x_ref.shape[0] -1)<line_sep>categories_per_feature={f:list(np.unique(x_flat[: f]))# type: ignore <for>f categories_per_feature.keys()}<block_end><elif_stmt>all(isinstance(v int_types)<for>v vals)# categories_per_feature = Dict[int, int] <block_start>categories_per_feature={f:list(np.arange(v))# type: ignore <for>f,v categories_per_feature.items()}<block_end><elif_stmt><not>all(isinstance(v list)<for>v vals)<and>all(isinstance(v int_types)<for>val vals<for>v val)# type: ignore <block_start><raise>ValueError('categories_per_feature needs to be None or one of '<concat>'Dict[int, NoneType], Dict[int, int], Dict[int, List[int]]')<block_end>self.x_ref_categories=categories_per_feature<line_sep>self.cat_vars=list(self.x_ref_categories.keys())<block_end><block_end><def_stmt>feature_score self x_ref:np.ndarray x:np.ndarray<arrow>Tuple[np.ndarray np.ndarray]<block_start>""" Compute K-S or Chi-Squared test statistics and p-values per feature. Parameters ---------- x_ref Reference instances to compare distribution with. x Batch of instances. Returns ------- Feature level p-values and K-S or Chi-Squared statistics. """<line_sep>x_ref=x_ref.reshape(x_ref.shape[0] -1)<line_sep>x=x.reshape(x.shape[0] -1)<line_sep># apply counts on union of categories per variable in both the reference and test data <if_stmt>self.cat_vars<block_start>x_categories={f:list(np.unique(x[: f]))<for>f self.cat_vars}<line_sep>all_categories={f:list(set().union(self.x_ref_categories[f] x_categories[f]))# type: ignore <for>f self.cat_vars}<line_sep>x_ref_count=self._get_counts(x_ref all_categories)<line_sep>x_count=self._get_counts(x all_categories)<block_end>p_val=np.zeros(self.n_features dtype=np.float32)<line_sep>dist=np.zeros_like(p_val)<for_stmt>f range(self.n_features)<block_start><if_stmt>f<in>self.cat_vars<block_start>contingency_table=np.vstack((x_ref_count[f] x_count[f]))<line_sep>dist[f],p_val[f],_,_=chi2_contingency(contingency_table)<block_end><else_stmt><block_start>dist[f],p_val[f]=ks_2samp(x_ref[: f] x[: f] alternative=self.alternative mode='asymp')<block_end><block_end><return>p_val dist<block_end><def_stmt>_get_counts self x:np.ndarray categories:Dict[int List[int]]<arrow>Dict[int List[int]]<block_start>""" Utility method for getting the counts of categories for each categorical variable. """<line_sep><return>{f:[(x[: f]<eq>v).sum()<for>v vals]<for>f,vals categories.items()}<block_end><block_end>
################################################################################# # The Institute for the Design of Advanced Energy Systems Integrated Platform # Framework (IDAES IP) was produced under the DOE Institute for the # Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 # by the software owners: The Regents of the University of California, through # Lawrence Berkeley National Laboratory, National Technology & Engineering # Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University # Research Corporation, et al. All rights reserved. # # Please see the files COPYRIGHT.md and LICENSE.md for full copyright and # license information. ################################################################################# """ Tests for rate forms """<import_stmt>pytest<import_from_stmt>pyomo.environ Block ConcreteModel Var units<as>pyunits<import_from_stmt>idaes.generic_models.properties.core.generic.generic_reaction GenericReactionParameterBlock ConcentrationForm<import_from_stmt>idaes.generic_models.properties.core.reactions.rate_forms *<import_from_stmt>idaes.core.util.testing PhysicalParameterTestBlock<import_from_stmt>idaes.core.util.misc add_object_reference<line_sep>@pytest.mark.unit<def_stmt>test_power_law_rate_no_order <block_start>m=ConcreteModel()<line_sep># # Add a test thermo package for validation m.pparams=PhysicalParameterTestBlock()<line_sep>m.thermo=m.pparams.build_state_block([1])<line_sep>m.rparams=GenericReactionParameterBlock(default={"property_package":m.pparams "base_units":{"time":pyunits.s "mass":pyunits.kg "amount":pyunits.mol "length":pyunits.m "temperature":pyunits.K} "rate_reactions":{"r1":{"stoichiometry":{("p1" "c1"):-1 ("p1" "c2"):2} "rate_form":power_law_rate "concentration_form":ConcentrationForm.moleFraction}}})<line_sep># Create a dummy state block m.rxn=Block([1])<line_sep>add_object_reference(m.rxn[1] "phase_component_set" m.pparams._phase_component_set)<line_sep>add_object_reference(m.rxn[1] "params" m.rparams)<line_sep>add_object_reference(m.rxn[1] "state_ref" m.thermo[1])<line_sep>m.rxn[1].k_rxn=Var(["r1"] initialize=1)<line_sep>power_law_rate.build_parameters(m.rparams.reaction_r1 m.rparams.config.rate_reactions["r1"])<line_sep># Check parameter construction <assert_stmt>isinstance(m.rparams.reaction_r1.reaction_order Var)<assert_stmt>len(m.rparams.reaction_r1.reaction_order)<eq>4<for_stmt>i,v m.rparams.reaction_r1.reaction_order.items()<block_start><try_stmt><block_start>stoic=m.rparams.config.rate_reactions.r1.stoichiometry[i]<block_end><except_stmt>KeyError<block_start>stoic=0<block_end><if_stmt>stoic<l>1<block_start><assert_stmt>v.value<eq>-stoic<block_end><else_stmt><block_start><assert_stmt>v.value<eq>0<block_end><block_end># Check reaction form rform=power_law_rate.return_expression(m.rxn[1] m.rparams.reaction_r1 "r1" 300)<assert_stmt>str(rform)<eq>str(m.rxn[1].k_rxn["r1"]<times>m.thermo[1].mole_frac_phase_comp["p1" "c1"]<power>m.rparams.reaction_r1.reaction_order["p1" "c1"])<block_end>@pytest.mark.unit<def_stmt>test_power_law_rate_with_order <block_start>m=ConcreteModel()<line_sep># # Add a test thermo package for validation m.pparams=PhysicalParameterTestBlock()<line_sep>m.thermo=m.pparams.build_state_block([1])<line_sep>m.rparams=GenericReactionParameterBlock(default={"property_package":m.pparams "base_units":{"time":pyunits.s "mass":pyunits.kg "amount":pyunits.mol "length":pyunits.m "temperature":pyunits.K} "rate_reactions":{"r1":{"stoichiometry":{("p1" "c1"):-1 ("p1" "c2"):2} "rate_form":power_law_rate "concentration_form":ConcentrationForm.moleFraction "parameter_data":{"reaction_order":{("p1" "c1"):1 ("p1" "c2"):2 ("p2" "c1"):3 ("p2" "c2"):4}}}}})<line_sep># Create a dummy state block m.rxn=Block([1])<line_sep>add_object_reference(m.rxn[1] "phase_component_set" m.pparams._phase_component_set)<line_sep>add_object_reference(m.rxn[1] "params" m.rparams)<line_sep>add_object_reference(m.rxn[1] "state_ref" m.thermo[1])<line_sep>m.rxn[1].k_rxn=Var(["r1"] initialize=1)<line_sep>power_law_rate.build_parameters(m.rparams.reaction_r1 m.rparams.config.rate_reactions["r1"])<line_sep># Check parameter construction <assert_stmt>isinstance(m.rparams.reaction_r1.reaction_order Var)<assert_stmt>len(m.rparams.reaction_r1.reaction_order)<eq>4<assert_stmt>m.rparams.reaction_r1.reaction_order["p1" "c1"].value<eq>1<assert_stmt>m.rparams.reaction_r1.reaction_order["p1" "c2"].value<eq>2<assert_stmt>m.rparams.reaction_r1.reaction_order["p2" "c1"].value<eq>3<assert_stmt>m.rparams.reaction_r1.reaction_order["p2" "c2"].value<eq>4<line_sep># Check reaction form rform=power_law_rate.return_expression(m.rxn[1] m.rparams.reaction_r1 "r1" 300)<assert_stmt>str(rform)<eq>str(m.rxn[1].k_rxn["r1"]<times>(m.thermo[1].mole_frac_phase_comp["p1" "c1"]<power>m.rparams.reaction_r1.reaction_order["p1" "c1"]<times>m.thermo[1].mole_frac_phase_comp["p1" "c2"]<power>m.rparams.reaction_r1.reaction_order["p1" "c2"]<times>m.thermo[1].mole_frac_phase_comp["p2" "c1"]<power>m.rparams.reaction_r1.reaction_order["p2" "c1"]<times>m.thermo[1].mole_frac_phase_comp["p2" "c2"]<power>m.rparams.reaction_r1.reaction_order["p2" "c2"]))<block_end>
<import_stmt>logging<import_stmt>socket<import_from_stmt>typing Optional<import_from_stmt>tracardi.config tracardi<line_sep>logger=logging.getLogger('utils.network')<line_sep>logger.setLevel(tracardi.logging_level)<def_stmt>get_local_ip <arrow>Optional[str]<block_start><try_stmt><block_start>s=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<line_sep>s.connect(("8.8.8.8" 80))<line_sep><return>s.getsockname()[0]<block_end><except_stmt>OSError<as>e<block_start>logger.error(str(e))<line_sep><return><none><block_end><block_end>local_ip=get_local_ip()<line_sep>
<import_from_stmt>typing Optional<import_from_stmt>tortoise.exceptions DoesNotExist<import_from_stmt>chapter7.authentication.models AccessToken AccessTokenTortoise UserDB UserTortoise <import_from_stmt>chapter7.authentication.password verify_password<async_keyword><def_stmt>authenticate email:str password:str<arrow>Optional[UserDB]<block_start><try_stmt><block_start>user=<await>UserTortoise.get(email=email)<block_end><except_stmt>DoesNotExist<block_start><return><none><block_end><if_stmt><not>verify_password(password user.hashed_password)<block_start><return><none><block_end><return>UserDB.from_orm(user)<block_end><async_keyword><def_stmt>create_access_token user:UserDB<arrow>AccessToken<block_start>access_token=AccessToken(user_id=user.id)<line_sep>access_token_tortoise=<await>AccessTokenTortoise.create(**access_token.dict())<line_sep><return>AccessToken.from_orm(access_token_tortoise)<block_end>
"""Mel cepstral distortion (MCD) computations in python."""<line_sep># Copyright 2014, 2015, 2016, 2017 <NAME> # This file is part of mcd. # See `License` for details of license and warranty. __version__='0.5.dev1'<line_sep>
# -*- coding: utf-8 -*- <import_stmt>json<import_stmt>os<import_stmt>datetime<import_stmt>time<import_stmt>random<import_from_stmt>copy deepcopy<import_from_stmt>bson ObjectId json_util<import_from_stmt>itertools chain<import_from_stmt>flask_admin BaseView expose<import_from_stmt>flask_babel gettext<as>_<import_from_stmt>flask_login current_user<import_from_stmt>mongoengine.queryset Q<import_from_stmt>flask request current_app redirect jsonify Response Markup flash url_for make_response<import_stmt>application.models<as>Models<import_from_stmt>application.controllers.admin AdminView<import_from_stmt>application.extensions admin<import_stmt>application.services.jobs<as>Jobs<import_from_stmt>application.utils Pagination format_date<import_from_stmt>configs.config TEMPLATE_DIR<line_sep>num_per_page=50<line_sep>delay_status_by_date={'PAYMENT_RECEIVED':3 'PROCESSING':1 'SHIPPING':5 'PORT_ARRIVED':4 }<def_stmt>to_json lo<block_start>dt={}<line_sep>dt['id']=str(lo.id)<line_sep>dt['is_closed']=lo.is_closed<line_sep>dt['close_reason']=lo.close_reason<line_sep>dt['created_at']=lo.created_at<line_sep>dt['detail']=lo.detail.to_mongo()<line_sep>dt['detail']['partner']=(<lambda>p:p<and>p.name)(lo.detail.partner)<line_sep>dt['address']=lo.order.address.to_json()<line_sep>dt['order_id']=lo.order.short_id<line_sep>dt['logistic_provider']=lo.order.logistic_provider<line_sep>dt['entries']=[entry_to_json(entry)<for>entry lo.entries]<line_sep>dt['estimated_weight']=lo.estimated_weight<line_sep>dt['returned_entries']=[entry_to_json(entry)<for>entry lo.returned_entries]<line_sep><return>dt<block_end><def_stmt>entry_to_json entry<block_start>dt={}<line_sep>dt['id']=str(entry.id)<line_sep>dt['item']=entry.item_snapshot.to_mongo()<line_sep>dt['spec']=entry.item_spec_snapshot.to_mongo()<try_stmt><block_start>dt['item']['weight']=entry.item_snapshot.weight<block_end><except_stmt><block_start><pass><block_end><try_stmt><block_start>dt['item']['title_en']=entry.item_snapshot.title_en<block_end><except_stmt><block_start><pass><block_end>dt['amount_usd']=entry.amount_usd<line_sep>dt['amount']=entry.amount<line_sep>dt['quantity']=entry.quantity<line_sep>dt['unit_price']=entry.unit_price<line_sep>dt['created_at']=entry.created_at<line_sep>dt['remark']=entry.remark<line_sep>dt['shipping_info']=entry.shipping_info<line_sep><return>dt<block_end><def_stmt>restruct_query data<block_start>format_date=<lambda>d:datetime.datetime.strptime(d '%Y-%m-%dT%H:%M:%S.%fZ')<line_sep>status=data.get('status')<line_sep>query={}<for_stmt>k,v data.items()<block_start><if_stmt>v<in>[<none> u"None" "" "null"]<block_start><continue><block_end><if_stmt>k[-3:]<eq>'_no'<block_start>query.update({'detail__%s'%k:v})<block_end><elif_stmt>k<in>['status']<block_start>query.update({'detail__%s'%k:v})<block_end><elif_stmt>k<eq>'start'<block_start><if_stmt>status<block_start>date_field=Models.LogisticDetail.attr_by_log_stat[status]<line_sep>query.update({'detail__%s__gte'%date_field:format_date(v)})<block_end><else_stmt><block_start>query.update({'created_at__gte':format_date(v)})<block_end><block_end><elif_stmt>k<eq>'end'<block_start><if_stmt>status<block_start>date_field=Models.LogisticDetail.attr_by_log_stat[status]<line_sep>query.update({'detail__%s__lt'%date_field:format_date(v)})<block_end><else_stmt><block_start>query.update({'created_at__lt':format_date(v)})<block_end><block_end><elif_stmt>k<eq>'query'<block_start><if_stmt>v.startswith('MB')<block_start>query.update({'detail__partner_tracking_no':v})<block_end><elif_stmt>ObjectId.is_valid(v)<block_start>query.update({'id':v})<block_end><else_stmt><block_start>query.update({'tracking_no':v})<block_end><block_end><elif_stmt>k<eq>'partner'<block_start>partner=Models.Partner.objects(name=v).first()<line_sep>query.update({'detail__partner':partner})<block_end><elif_stmt>k<eq>'channel'<block_start>query.update({'detail__channel':v})<block_end><else_stmt><block_start>query.update({'%s'%k:v})<block_end><block_end><return>query<block_end><class_stmt>N(AdminView)<block_start>_permission='logistic'<line_sep>@expose('/' methods=['GET' 'POST' 'DELETE' 'PATCH'])<def_stmt>index self status="ALL"<block_start><def_stmt>render_tpml status<block_start><return>make_response(open(os.path.join(TEMPLATE_DIR 'admin/logistic/index.html')).read())<block_end><def_stmt>render_json lid<block_start><return>jsonify(message="OK")<block_end><return>request.is_xhr<and>{'GET':<lambda>f:render_json(f.get('id')) }[request.method](request.form)<or>render_tpml(status)<block_end>@expose("/logistics" methods=["GET"])<def_stmt>logistics self<block_start>items_range=request.headers.get('Range' "0-9")<line_sep>start,end=items_range.split('-')<line_sep>per_page=int(end)-int(start)+1<line_sep>query=restruct_query(request.args)<line_sep>tracking_no=query.pop("tracking_no" "")<line_sep>include_closed=query.get('include_closed')<and>query.pop('include_closed')<try_stmt><block_start><if_stmt>include_closed<block_start>los=Models.Logistic.objects(**query)<block_end><else_stmt><block_start>los=Models.Logistic.objects(is_closed=<false> **query)<block_end><if_stmt>tracking_no<block_start>los=los.filter(Q(detail__us_tracking_no=tracking_no)|Q(detail__cn_tracking_no=tracking_no))<block_end><if_stmt>request.args.get('status')<block_start>los=los.order_by('detail__%s'%Models.LogisticDetail.attr_by_log_stat[request.args.get('status')])<block_end><block_end><except_stmt><block_start><pass><block_end><if_stmt>query.get('receiver')<block_start>addrs=Models.Address.objects(receiver=query.get('receiver')).distinct('id')<line_sep>orders=Models.Order.commodities(address__in=addrs)<line_sep>los=list(chain.from_iterable(order.logistics<for>order orders))<block_end><if_stmt>query.get('order_id')<block_start>orders=Models.Order.commodities(short_id=int(query.get('order_id')))<line_sep>los=list(chain.from_iterable(order.logistics<for>order orders))<block_end><try_stmt><block_start>los_size=los.count()<block_end><except_stmt><block_start>los_size=len(los)<block_end>data=los[int(start):int(end)]<line_sep>data=[to_json(l)<for>l data]<line_sep>resp=make_response(json_util.dumps(data) 200)<line_sep>resp.headers['Accept-Range']='items'<line_sep>resp.headers['Content-Range']='%s-%s/%s'%(start end los_size)<line_sep>resp.headers['Content-Type']='application/json'<line_sep><return>resp<block_end>@expose("/logistics_delay/<status>/<delay_type>" methods=["GET"])@expose("/logistics_delay/<status>/" methods=["GET"])@expose("/logistics_delay/" methods=["GET"])<def_stmt>logistics_delay self status=<none> delay_type=<none><block_start>utcnow=datetime.datetime.utcnow()<if_stmt>status<block_start>items_range=request.headers.get('Range' "0-9")<line_sep>start,end=items_range.split('-')<line_sep>per_page=int(end)-int(start)+1<line_sep>query=restruct_query(request.args)<line_sep>tracking_no=query.pop("tracking_no" "")<line_sep>date_field=Models.LogisticDetail.attr_by_log_stat[status]<line_sep>delay_days=datetime.timedelta(days=delay_status_by_date[status])<line_sep>query.update({'detail__%s__lt'%date_field:utcnow-delay_days 'detail__status':status })<line_sep>los=Models.Logistic.objects(is_closed=<false> **query).order_by('detail__%s'%date_field)<if_stmt>tracking_no<block_start>los=los.filter(Q(detail__us_tracking_no=tracking_no)|Q(detail__cn_tracking_no=tracking_no))<block_end><if_stmt>delay_type<block_start>los=los.filter(detail__delay_details__reason__contains=delay_type)<block_end>data=los[int(start):int(end)]<line_sep>data=[to_json(l)<for>l data]<line_sep>resp=make_response(json_util.dumps(data) 200)<line_sep>resp.headers['Accept-Range']='items'<line_sep>resp.headers['Content-Range']='%s-%s/%s'%(start end los.count())<line_sep>resp.headers['Content-Type']='application/json'<line_sep><return>resp<block_end>data={}<for_stmt>status ["PAYMENT_RECEIVED" 'PROCESSING' 'SHIPPING' "PORT_ARRIVED"]<block_start>los=Models.Logistic.objects(is_closed=<false>)<line_sep>date_field=Models.LogisticDetail.attr_by_log_stat[status]<line_sep>delay_days=datetime.timedelta(days=delay_status_by_date[status])<line_sep>query={'detail__%s__lt'%date_field:utcnow-delay_days 'detail__status':status }<line_sep>count=los.filter(**query).count()<line_sep>data.update({status:count})<block_end><return>jsonify(results=data)<block_end>@expose("/logistics_irregular/<process_status>/<irr_type>" methods=["GET"])@expose("/logistics_irregular/<process_status>/" methods=["GET"])@expose("/logistics_irregular" methods=["GET"])<def_stmt>logistics_irregular self process_status=<none> irr_type=<none><block_start>utcnow=datetime.datetime.utcnow()<if_stmt>process_status<block_start>items_range=request.headers.get('Range' "0-9")<line_sep>start,end=items_range.split('-')<line_sep>query=restruct_query(request.args)<line_sep>tracking_no=query.pop('tracking_no' '')<line_sep>los=Models.Logistic.objects(detail__irregular_details__process_status=process_status **query).order_by('-detail.irregular_details.created_at')<if_stmt>irr_type<block_start>los=los.filter(detail__irregular_details__irr_type=irr_type).order_by('-detail.irregular_details.created_at')<block_end><if_stmt>tracking_no<block_start>los=los.filter(Q(detail__us_tracking_no=tracking_no)|Q(detail__cn_tracking_no=tracking_no))<block_end>data=los[int(start):int(end)]<line_sep>data=[to_json(l)<for>l data]<line_sep>resp=make_response(json_util.dumps(data) 200)<line_sep>resp.headers['Accept-Range']='items'<line_sep>resp.headers['Content-Range']='%s-%s/%s'%(start end los.count())<line_sep>resp.headers['Content-Type']='application/json'<line_sep><return>resp<block_end>data={}<for_stmt>status ["WAITING_PROCESS" "PROCESSING" "PROCESSED"]<block_start>los=Models.Logistic.objects(detail__irregular_details__process_status=status)<line_sep>data.update({status:los.count()})<block_end><return>jsonify(results=data)<block_end>@expose("/update" methods=["PUT"])<def_stmt>update self<block_start>query=request.get_json()<line_sep>dt={}<for_stmt>k,v query.items()<block_start><if_stmt>v<in>[<none> u"None" "" "null"]<block_start><continue><block_end><if_stmt>'date'<in>k<block_start>val=datetime.datetime.strptime(v '%Y-%m-%d')<block_end><elif_stmt>k.startswith('real')<block_start>val=float(v)<block_end><elif_stmt>k<eq>'partner'<block_start>val=Models.Partner.objects(name=v).first()<block_end><elif_stmt>k<eq>'irregularity'<block_start>val=Models.LogisticIrregular(irr_at_status=v.get('status') irr_type=v.get('type') reason=v.get('reason') desc=v.get('desc'))<block_end><else_stmt><block_start>val=v.strip()<block_end>dt.update({k:val})<block_end><try_stmt><block_start>lo=Models.Logistic.objects.get(id=dt.pop('lid'))<line_sep>lo.update_logistic(dt)<line_sep><return>jsonify(message="OK" remarks=lo.detail.remarks delays=lo.detail.delay_details irregularities=lo.detail.irregular_details)<block_end><except_stmt>Exception<as>e<block_start><return>jsonify(message="Failed" desc=e.message)<block_end><block_end>@expose("/update_delay" methods=["PUT"])<def_stmt>update_delay self<block_start>query=request.get_json()<try_stmt><block_start>lo=Models.Logistic.objects.get(id=query['lid'])<line_sep>delays=lo.detail.delay_details.filter(status=query['status'])<line_sep>delays.update(is_done=query['is_done'])<line_sep>lo.save()<line_sep><return>jsonify(message="OK")<block_end><except_stmt>Exception<as>e<block_start><return>jsonify(message="Failed" desc=e.message)<block_end><block_end>@expose("/update_irr_step" methods=["PUT"])<def_stmt>update_irr_step self<block_start>query=request.get_json()<line_sep>dt={}<for_stmt>k,v query.items()<block_start>dt.update({k:v})<block_end><try_stmt><block_start>lo=Models.Logistic.objects.get(id=dt['lid'])<line_sep>irregular=lo.detail.irregular_details.filter(irr_type=dt['irr_type']).first()<line_sep>irregular.steps=dt['solutions']<line_sep>lo.save()<line_sep><return>jsonify(message="OK" irr_detail=irregular)<block_end><except_stmt>Exception<as>e<block_start><return>jsonify(message="Failed" desc=e.message)<block_end><block_end>@expose("/set_irr_done" methods=["PUT"])<def_stmt>set_irr_done self<block_start>query=request.get_json()<line_sep>dt={}<for_stmt>k,v query.items()<block_start>dt.update({k:v})<block_end><try_stmt><block_start>lo=Models.Logistic.objects.get(id=dt['lid'])<line_sep>irregular=lo.detail.irregular_details.filter(irr_type=dt['irr_type']).first()<line_sep>irregular.process_status=dt['process_status']<line_sep>lo.save()<line_sep><return>jsonify(message="OK" irr_detail=irregular)<block_end><except_stmt>Exception<as>e<block_start><return>jsonify(message="Failed" desc=e.message)<block_end><block_end>@expose("/update_irr_remark" methods=["PUT"])<def_stmt>update_irr_remark self<block_start>query=request.get_json()<line_sep>dt={}<for_stmt>k,v query.items()<block_start>dt.update({k:v})<block_end><try_stmt><block_start>lo=Models.Logistic.objects.get(id=dt['lid'])<line_sep>irregular=lo.detail.irregular_details.filter(irr_type=dt['irr_type']).first()<line_sep>remark=Models.LogisticRemark(content=dt['irr_remark'] creator=current_user.name)<line_sep>irregular.remarks.append(remark)<line_sep>lo.save()<line_sep><return>jsonify(message="OK" irr_detail=irregular)<block_end><except_stmt>Exception<as>e<block_start><return>jsonify(message="Failed" desc=e.message)<block_end><block_end>@expose("/merge" methods=["POST"])<def_stmt>merge self<block_start>lids=request.json.get('lids')<if_stmt><not>lids<block_start><return>jsonify(message="Failed" desc="error~~~")<block_end>los=[Models.Logistic.objects(id=lid).first()<for>lid lids]<if_stmt><not>type(los)<is>list<block_start><return>jsonify(message="Failed" desc="please select more than 2 logistics")<block_end>start=0<for_stmt>index range(len(los)-1)<block_start><if_stmt>los[index+1].detail.cn_tracking_no<ne>los[start].detail.cn_tracking_no<or>los[index+1].order<ne>los[0].order<block_start><return>jsonify(message="Failed" desc="CTN and OrderID should be the same")<block_end><block_end><for_stmt>index range(len(los)-1)<block_start>map(<lambda>e:los[index+1].entries.append(e) los[index].entries)<line_sep>los[index].entries=[]<line_sep>los[index].save()<line_sep>los[index].close('merged with %s'%los[index+1].id datetime.datetime.utcnow())<line_sep>los[index+1].save()<if_stmt>index+1<eq>len(los)-1<block_start>comment=Models.LogisticRemark(content=u"合并单" creator=current_user.name)<line_sep>los[index+1].detail.remarks.append(comment)<line_sep>los[index+1].save()<block_end><block_end><return>jsonify(message="OK" lid=str(los[index+1].id))<block_end>@expose("/split_entries" methods=["POST"])<def_stmt>split_entries self<block_start>entries=request.json.get('selected')<if_stmt><not>entries<block_start><return>jsonify(message="Failed" desc="Please select entries!")<block_end>lids=[]<line_sep>entry_ids=[]<for_stmt>l entries<block_start>c=l.split(':')<line_sep>lids.append(c[1])<line_sep>entry_ids.append(c[0])<block_end>los=[Models.Logistic.objects(id=lid).first()<for>lid set(lids)]<line_sep>e_lst=[]<for_stmt>i entry_ids<block_start>e=Models.OrderEntry.objects(id=str(i)).first()<line_sep>e_lst.append(e)<block_end>entries_groups=map(<lambda>lo:filter(<lambda>e:e<in>lo.entries e_lst) los)<for_stmt>lo,lst zip(los entries_groups)<block_start>lo.fork_by_entries([e.id<for>e lst])<block_end><return>jsonify(message="OK" oid=lo.order.short_id)<block_end>@expose('/split_quantity' methods=['POST'])<def_stmt>split_quantity self<block_start>lid=request.json.get('lid')<line_sep>eid=request.json.get('eid')<line_sep>quantity=request.json.get('quantity')<line_sep>lo=Models.Logistic.objects(id=lid).first()<line_sep>entry=Models.OrderEntry.objects(id=eid).first()<if_stmt>entry.quantity<g>1<and>entry.quantity-int(quantity)<ge>1<and>entry<and>lo<block_start>entry.quantity<augsub>int(quantity)<line_sep>entry.update_snapshot()<line_sep>entry.update_amount()<line_sep>new_entry=deepcopy(entry)<line_sep>new_entry.__class__=Models.OrderEntry<line_sep>new_entry.id=<none><line_sep>new_entry.quantity=int(quantity)<line_sep>new_entry.update_snapshot()<line_sep>new_entry.update_amount()<line_sep>new_entry.save()<line_sep>lo.entries.append(new_entry)<line_sep>lo.save()<line_sep>order=lo.order<line_sep>order.entries.append(new_entry)<line_sep>order.save()<block_end><else_stmt><block_start><return>jsonify(message="Failed" desc="quantity error~~~~~~")<block_end><return>jsonify(message="OK" entries=[json.loads(json_util.dumps(entry_to_json(entry)))<for>entry lo.entries])<block_end>@expose('/download' methods=["GET"])<def_stmt>download self<block_start>FIELDS=[u"包裹ID" u'IMG No' u'CTN' u"下单日期" u"订单ID" u'订单短号' u'收件人' u'手机号' u'合作物流商' u'remark' u"下单备注" u"估重" u"渠道"]<line_sep>now=datetime.datetime.now()<line_sep>status=request.args.get('status')<line_sep>query=restruct_query(request.args)<line_sep>delay_export=query.get('delay_export')<and>query.pop('delay_export')<line_sep>delay_type=query.get('delay_type')<and>query.pop('delay_type')<try_stmt><block_start>los=Models.Logistic.objects(is_closed=<false> **query)<if_stmt>status<block_start>los=los.order_by('detail__%s'%Models.LogisticDetail.attr_by_log_stat[status])<block_end><block_end><except_stmt><block_start><pass><block_end><if_stmt>delay_export<block_start>date_field=Models.LogisticDetail.attr_by_log_stat[status]<line_sep>delay_days=datetime.timedelta(days=delay_status_by_date[status])<line_sep>query={'detail__%s__lt'%date_field:datetime.datetime.utcnow()-delay_days 'detail__status':status }<line_sep>los=los.filter(**query).order_by('detail__%s'%date_field)<if_stmt>delay_type<block_start>los=los.filter(detail__delay_details__reason__contains=delay_type)<block_end><block_end><if_stmt>query.get('receiver')<block_start>addrs=Models.Address.objects(receiver=query.get('receiver')).distinct('id')<line_sep>orders=Models.Order.commodities(address__in=addrs)<line_sep>los=list(chain.from_iterable(order.logistics<for>order orders))<block_end><if_stmt>query.get('order_id')<block_start>orders=Models.Order.commodities(short_id=int(query.get('order_id')))<line_sep>los=list(chain.from_iterable(order.logistics<for>order orders))<block_end><def_stmt>generate <block_start><yield>','.join(st<for>st FIELDS)+'\n'<for_stmt>log los<block_start><yield>','.join([str(log.id) log.detail.partner_tracking_no log.detail.carrier_tracking_no log.detail.cn_tracking_no log.detail.cn_logistic_name format_date(log.detail.payment_received_date) str(log.order.id) str(log.order.short_id) log.order.address.receiver log.order.address.mobile_number format_date(log.detail.processing_date) format_date(log.detail.shipping_date) format_date(log.detail.port_arrived_date) format_date(log.detail.received_date) format_date(log.detail.modified) log.detail.partner.name<if>log.detail.partner<else>'' '; '.join([r.content<for>r log.detail.remarks]) log.detail.extra<or>'' str(log.estimated_weight) log.detail.channel ])+'\n'<block_end><block_end><return>Response(generate() mimetype="text/csv" headers={"Content-Disposition":"attachment;filename=%s %s.csv"%(format_date(now '%Y-%m-%d') 'dumps_file')})<block_end>@expose('/partner' methods=["GET"])<def_stmt>partner self<block_start>partners=Models.Partner.objects().distinct('name')<line_sep><return>jsonify(results=partners message="OK")<block_end>@expose('/close/<lid>' methods=['GET'])<def_stmt>close self lid<block_start>lo=Models.Logistic.objects(id=lid).first()<line_sep>lo.close("Closed By %s"%current_user.name)<line_sep><return>jsonify(message="OK")<block_end>@expose('/logs/<ltype>/<lid>' methods=['GET'])<def_stmt>logs self ltype lid<block_start><if_stmt>ltype<eq>'express'<block_start>logs=Models.Logistic.objects(id=lid).first().express_tracking<line_sep><return>self.render('admin/logistic/express.html' logs=logs)<block_end><elif_stmt>ltype<eq>'logistic'<block_start>logs=Models.LogisticLog.objects(logistic_id=lid log_type__ne='API')<line_sep>user=<lambda>i:getattr(Models.User.objects(id=i).first() 'name' '')<if>i<and>i<ne>'system'<else>i<line_sep><return>self.render('admin/logistic/logs.html' logs=logs user=user)<block_end><elif_stmt>ltype<eq>'print'<block_start>lo=Models.Logistic.objects(id=lid).first()<if_stmt>lo.is_closed<block_start><return>Response('this logistics id has been closed.')<block_end><return>self.render('admin/logistic/print_page.html' lo=lo)<block_end><block_end>@expose('/refresh/<company>/<number>' methods=['GET'])<def_stmt>refresh self company number<block_start>Jobs.express.kuaidi_request(company number)<line_sep><return>jsonify(message="OK")<block_end>@expose('/back_status' methods=['GET'])<def_stmt>back_status self<block_start>lid=request.args.get('lid')<line_sep>status=request.args.get('status')<line_sep>l=Models.Logistic.objects(id=lid).first()<line_sep>l.detail.status=status<line_sep>setattr(l.detail Models.LogisticDetail.attr_by_log_stat[status] datetime.datetime.utcnow())<line_sep>l.save()<line_sep>order=l.order<line_sep>order.update_logistic_status()<line_sep><return>jsonify(message="OK")<block_end><block_end>admin.add_view(N(name=_('Logistics Backend') category='Logistics' menu_icon_type="fa" menu_icon_value="truck"))<line_sep>
<import_stmt>sys struct ctypes<def_stmt>set_float obj value<block_start><assert_stmt>isinstance(obj float) 'Object must be a float!'<assert_stmt>isinstance(value float) 'Value must be a float!'<line_sep>stop=sys.getsizeof(obj)<line_sep>start=stop-struct.calcsize('d')<line_sep>array=ctypes.cast(id(obj) ctypes.POINTER(ctypes.c_ubyte))<for_stmt>args zip(range(start stop) struct.pack('d' value))<block_start>array.__setitem__(*args)<block_end><block_end>
# Copyright 2022 Google. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """T5X model subclasses for prompt tuning."""<import_stmt>functools<import_from_stmt>typing Optional Mapping MutableMapping Any Tuple<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_from_stmt>t5x models<import_from_stmt>flaxformer.types Array<line_sep>PyTreeDef=type(jax.tree_structure(<none>))<class_stmt>PromptDecoderOnlyModel(models.DecoderOnlyModel)<block_start>"""A prompted DecoderOnly Model that uses the prefill cache for prompting."""<def_stmt>__init__ self prompt_length:int *args **kwargs<block_start>super().__init__(*args **kwargs)<line_sep>self.prompt_length=prompt_length<block_end><def_stmt>_compute_logits self params:Mapping[str Array] batch:Mapping[str jnp.ndarray] dropout_rng:Optional[jnp.ndarray]=<none><arrow>jnp.ndarray<block_start>"""Hack off the prompt when calculating logits."""<line_sep>logits=super()._compute_logits(params batch dropout_rng)<line_sep><return>logits[: self.prompt_length:]<block_end><def_stmt>predict_batch_with_aux self params:Mapping[str Array] batch:Mapping[str jnp.ndarray] rng:Optional[jax.random.KeyArray]=<none> * return_all_decodes:bool=<false> num_decodes:int=1 decoder_params:Optional[MutableMapping[str Any]]=<none> <arrow>Tuple[jnp.ndarray Mapping[str jnp.ndarray]]# Get the maximum shape of an example, [B, input_tokens + target_tokens] <block_start>target_shape=batch['decoder_input_tokens'].shape<line_sep>target_type=batch['decoder_input_tokens'].dtype<line_sep># We need this to be the full cache (including the prompt) length so masks # and biases match when used with single slices. max_decode_length=target_shape[1]+self.prompt_length<line_sep># Initialize a zero'd out auto-regressive cache. The shape for the keys and # values are [B, ..., max_decode_length], the cache_index is a scalar, and # the relpos_bias cache is # [1, num_heads, max_decode_length, max_decode_length]. # # We need to call this with `decode=True` so that the relpos cache is # created, otherwise it would get created inside of the `lax.while_loop` and # cause an error. However, when the `layers.PromptDecoderOnly` is called # with `decode=True` the prompt is not added to the input because the input # is expected to have a single step in the sequence dimension (The fact that # this input has more than 1 step is okay because the single step check in # the dense attention class is after the `is_initialized` check. When this # is called with more than one step, the cache is created but the # `is_initialized` check fails so we never get to the shape check). # # We can't set `prefill=True` which would normally be used to make sure the # prompt is applied to the inputs, because then both `prefill` and `decode` # are true and this causes an error (eventually `prefill` should be # refactored to be a state of `decode` but that is too much to change right # now). This means that our cache would only the input shape, there # wouldn't be room for the prompt. So we update the shape of the dummy data # to include the prompt. # # Now we can call apply with `decode=True` (so all parts of the cache are # initialized) and the shapes will be correct (they will be longer to fit # the prompts). prompted_shape=target_shape[:1]+(max_decode_length )<line_sep>_,variables_with_cache=self.module.apply({'params':params} jnp.ones(prompted_shape target_type) jnp.ones(prompted_shape target_type) enable_dropout=<false> decode=<true> prefill=<false> mutable=['cache'])<line_sep>cache=variables_with_cache['cache']<line_sep># Calculate the size of the inputs for each example by summing their # causal attention mask. This mask has 1 extra 1 token than the number of # inputs so subtract off 1. inputs_lengths=jnp.sum(batch['decoder_causal_attention'] axis=1)-1<line_sep># Since decoder_input_tokens is shifted to the right AND # `decoder_causal_attention` has one more 1 than the number of inputs # tokens, this sifts out targets portion of the decoder_input_tokens. inputs=batch['decoder_input_tokens']<times>batch['decoder_causal_attention']<line_sep># We can prefill the cache with both the prompt representations and the # input representations so our prefill length is the inputs plus the prompt # length. prefill_lengths=inputs_lengths+self.prompt_length<line_sep># If `self._inputs_bidirectional_attention = False`, we should not pass # batch['decoder_causal_attention'] to `module.apply` during cache prefill # and pass None instead. maybe_decoder_causal_attention=self._get_decoder_causal_attention(batch)<line_sep># This prefills the cache. Because prefill=True, the # layers.PromptDecoderOnly will add the prompt to the input. _,variables_with_cache=self.module.apply({'params':params 'cache':cache } inputs # Use the `decoder_causal_attention` as the targets so the decoder # attention mask will correctly cover the whole input, otherwise the # first input token (the 0 for BOS) will not be included in the mask. # This also restricts the mask to not include any target positions like # it would if you used `decoder_target_tokens`. batch['decoder_causal_attention'] decoder_causal_attention=maybe_decoder_causal_attention mutable=['cache'] enable_dropout=<false> prefill=<true> prefill_lengths=prefill_lengths )<line_sep># The cache index will now be converted to a vector of `[B]` prefilled_cache=variables_with_cache['cache']<line_sep>tokens_ids_to_logits=functools.partial(self._compute_logits_from_slice params=params max_decode_length=max_decode_length)<line_sep># Make sure that `decoder_params` can be unpacked with `**decoder_params` # below. <if_stmt>decoder_params<is><none><block_start>decoder_params={}<block_end><if_stmt>rng<is><not><none><block_start><if_stmt>decoder_params.get('decode_rng')<is><not><none><block_start><raise>ValueError(f'Got RNG both from the `rng` argument ({rng}) and '<concat>f"`decoder_params['decode_rng']` ({decoder_params['decode_rng']}). "<concat>'Please specify one or the other.')<block_end>decoder_params['decode_rng']=rng<block_end># When we run the actual decode function we will be indexing into the input # array to extract the next token (or write it when generating). It make # sure this index (which was created from prefilling and includes the prompt # length) is the correct location we need to pad the input as if there were # prompts added. # Note: Adding the prompt to the end isn't well-formed. If the prompt # (which doesn't have a token) is the last position, what should the last # input token (which is fed into the decode function first) be? To insert # a prompt at the "end" of the input we should actually insert it just # before EOS. Decoding will begin part way though the inputs, the actual # prompt positions will never be used as token inputs so their value does # not matter (although, we use 2 just to avoid any complications multiple # EOS tokens might bring), additionally, because the cached keys and value # are used, we don't need the prompt to be inserted in the real location, we # just need the shift in the location of the EOS to change appropriately. # Thus we can use this padding with experiments where the prompt is not # prepended. prompt_pad=jnp.full((inputs.shape[0] self.prompt_length) 2 dtype=inputs.dtype)<line_sep>inputs_with_fake_prompts=jnp.concatenate([prompt_pad inputs] axis=1)<line_sep># Using the above-defined single-step decoder function, run a decoding # function that will produces a [batch, 1, max_decode_length] output. decoded_sequences,scores=self._decode_fn(inputs=inputs_with_fake_prompts cache=prefilled_cache tokens_to_logits=tokens_ids_to_logits eos_id=self.output_vocabulary.eos_id num_decodes=num_decodes initial_index=prefill_lengths **decoder_params)<if_stmt><not>return_all_decodes# Search returns [n_batch, n_beam/decodes, n_length] with the beam/decode # dimension sorted in increasing order of log-probability. # `scores` is [batch, beam/decode_size] # We take the highest scoring sequence (-1) and its score <block_start>decoded_sequences=decoded_sequences[: -1 :]<line_sep># Beam search returns [] aux={'scores':scores[: -1]}<block_end><else_stmt># We return all samples and scores, rather than just the top ones. <block_start>aux={'scores':scores}<block_end># Remove the prompt + input tokens from each sequence and shuffle them # to the back of the sequence. sequences=models.remove_prefix(decoded_sequences prefill_lengths)<line_sep># Remove the extra space the prompt took up (these are all shifted to the # end and zero'd out). Use ... so we don't need to know if there this was # all decodes or just one. trimmed_sequences=sequences[<ellipsis> :-self.prompt_length]<line_sep><return>trimmed_sequences aux<block_end><block_end>
""" 相比于原始的plot.py文件,增加了如下的功能: 1.可以直接在pycharm或者vscode执行,也可以用命令行传参; 2.按exp_name排序,而不是按时间排序; 3.固定好每个exp_name的颜色; 4.可以调节曲线的线宽,便于观察; 5.保存图片到本地,便于远程ssh画图~ 6.自动显示全屏 7.图片自适应 8.针对颜色不敏感的人群,可以在每条legend上注明性能值,和性能序号 9.对图例legend根据性能从高到低排序,便于分析比较 10.提供clip_xaxis值,对训练程度进行统一截断,图看起来更整洁。 seaborn版本0.8.1 """<import_stmt>seaborn<as>sns<import_stmt>pandas<as>pd<import_stmt>matplotlib.pyplot<as>plt<import_stmt>json<import_stmt>os<import_stmt>os.path<as>osp<import_stmt>numpy<as>np<line_sep>DIV_LINE_WIDTH=50<line_sep># Global vars for tracking and labeling data at load time. exp_idx=0<line_sep>units=dict()<def_stmt>plot_data data xaxis='Epoch' value="TestEpRet" condition="Condition1" smooth=1 linewidth=4 rank=<true> performance=<true> **kwargs<block_start>performance_rank_dict={}<line_sep>condition2_list=[]<if_stmt>smooth<g>1<block_start>""" smooth data with moving window average. that is, smoothed_y[t] = average(y[t-k], y[t-k+1], ..., y[t+k-1], y[t+k]) where the "smooth" param is width of that window (2k+1) """<line_sep>y=np.ones(smooth)<for_stmt>datum data<block_start>condition2_list.append(datum["Condition2"].values[0])<line_sep>x=np.asarray(datum[value])<line_sep>z=np.ones(len(x))<line_sep>smoothed_x=np.convolve(x y 'same')/np.convolve(z y 'same')<line_sep>datum[value]=smoothed_x<line_sep># add mean performance to performance_rank{dict} print("rank-add:" datum[condition].values[0])<if_stmt>datum[condition].values[0]<not><in>performance_rank_dict.keys()<block_start>performance_rank_dict[datum[condition].values[0]]=np.mean(smoothed_x[-len(smoothed_x)<floordiv>10:])<block_end><else_stmt><block_start>performance_rank_dict[datum[condition].values[0]]<augadd>np.mean(smoothed_x[-len(smoothed_x)<floordiv>10:])<block_end><block_end><block_end># concern the multi-seeds: <for_stmt>key performance_rank_dict.keys()<block_start>seed_num=sum([1<for>cond condition2_list<if>key<in>cond])<line_sep>performance_rank_dict[key]<augdiv>seed_num<block_end># value list 获取性能值排序序号 performance_list=[]<line_sep>performance_rank_keys=[]<for_stmt>key,val performance_rank_dict.items()<block_start>print(key val)<line_sep>performance_list.append(val)<line_sep>performance_rank_keys.append(key)<block_end># 获取列表排序序号,一定要argsort2次~ performance_rank_list=np.argsort(np.argsort(-np.array(performance_list)))<line_sep>performance_rank_sort_dict={performance_rank_keys[index]:performance_rank_list[index]<for>index range(len(performance_rank_list))}<line_sep>print("performance_rank_list:" performance_rank_list)<line_sep># 修改data[condition]的名字 <for_stmt>index,datum enumerate(data)<block_start>origin_key=datum[condition].values[0]<if_stmt>performance<block_start>p=performance_rank_dict[origin_key]<line_sep>datum[condition]='P-'+str(np.round(p 3))+"-"+datum[condition]<block_end><if_stmt>rank<block_start>rank_value=performance_rank_sort_dict[origin_key]<line_sep>datum[condition]='Rank-'+str(rank_value)+"-"+datum[condition]<block_end><block_end><if_stmt>isinstance(data list)<block_start>data=pd.concat(data ignore_index=<true>)<block_end>sns.set(style="darkgrid" font_scale=1.75 )<line_sep># # data按照lenged排序; data.sort_values(by='Condition1' axis=0)<line_sep>sns.tsplot(data=data time=xaxis value=value unit="Unit" condition=condition ci='sd' linewidth=linewidth color=sns.color_palette("Paired" len(data)) # palette=sns.color_palette("hls", 8), **kwargs)<line_sep>""" If you upgrade to any version of Seaborn greater than 0.8.1, switch from tsplot to lineplot replacing L29 with: sns.lineplot(data=data, x=xaxis, y=value, hue=condition, ci='sd', **kwargs) Changes the colorscheme and the default legend style, though. plt.legend() loc:图例位置,可取('best', 'upper right', 'upper left', 'lower left', 'lower right', 'right', 'center left', 'center , right', 'lower center', 'upper center', 'center') 若是使用了bbox_to_anchor,则这项就无效了 fontsize: int或float或{'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'},字体大小; frameon: 是否显示图例边框, ncol: 图例的列的数量,默认为1, title: 为图例添加标题 shadow: 是否为图例边框添加阴影, markerfirst: True表示图例标签在句柄右侧,false反之, markerscale: 图例标记为原图标记中的多少倍大小, numpoints: 表示图例中的句柄上的标记点的个数,一般设为1, fancybox: 是否将图例框的边角设为圆形 framealpha: 控制图例框的透明度 borderpad: 图例框内边距 labelspacing: 图例中条目之间的距离 handlelength: 图例句柄的长度 bbox_to_anchor: (横向看右,纵向看下),如果要自定义图例位置或者将图例画在坐标外边,用它, 比如bbox_to_anchor=(1.4,0.8),这个一般配合着ax.get_position(), set_position([box.x0, box.y0, box.width*0.8 , box.height])使用 """<line_sep># 对图例legend也做一个排序,这样看起来更直观~ handles,labels=plt.gca().get_legend_handles_labels()<line_sep>sorted_handles=[]<line_sep>sorted_labels=[]<for_stmt>index range(len(handles))<block_start>order_index=list(performance_rank_list).index(index)<line_sep>sorted_handles.append(handles[order_index])<line_sep>sorted_labels.append(labels[order_index])<block_end>plt.legend(sorted_handles sorted_labels loc='upper center' labelspacing=0.25 ncol=1 handlelength=6 mode="expand" borderaxespad=0. )<line_sep># plt.legend(loc='upper center', # ncol=1, # handlelength=6, # mode="expand", # borderaxespad=0., # ) """ For the version of the legend used in the Spinning Up benchmarking page, swap L38 with: plt.legend(loc='upper center', ncol=6, handlelength=1, mode="expand", borderaxespad=0., prop={'size': 13}) """<line_sep>xscale=np.max(np.asarray(data[xaxis]))<g>5e3<if_stmt>xscale# Just some formatting niceness: x-axis scale in scientific notation if max x is large <block_start>plt.ticklabel_format(style='sci' axis='x' scilimits=(0 0))<block_end>plt.tight_layout(pad=0.5)<block_end><def_stmt>get_datasets logdir condition=<none><block_start>""" Recursively look through logdir for output files produced by spinup.logx.Logger. Assumes that any file "progress.txt" is a valid hit. """<line_sep><global>exp_idx<line_sep><global>units<line_sep>datasets=[]<line_sep>roots=[]<line_sep>exp_names=[]<for_stmt>root,_,files os.walk(logdir)<block_start><if_stmt>'progress.txt'<in>files<block_start>exp_name=<none><try_stmt><block_start>config_path=open(os.path.join(root 'config.json'))<line_sep>config=json.load(config_path)<if_stmt>'exp_name'<in>config<block_start>exp_name=config['exp_name']<block_end>exp_names.append(exp_name)<line_sep>roots.append(root)<block_end><except_stmt>Exception<as>e<block_start>print("e:" e)<line_sep>print('No file named config.json')<block_end><block_end><block_end># just leave one seed: # roots_names_dict = {exp_names[index]: roots[index] for index in range(len(exp_names))} # exp_name(str) --> roots(list) with diff seeds roots_names_dict={exp_names[index]:roots<for>index range(len(exp_names))}<for_stmt>key,value roots_names_dict.items()<block_start>print(key value)<block_end># 按照实验名排序 roots_names_list=sorted(roots_names_dict.items() key=<lambda>x:x[0])<line_sep>print("roots_names_list:" roots_names_list)<line_sep>roots_names_dict={tup[0]:tup[1]<for>tup roots_names_list}<line_sep>print("roots_names_dict:" roots_names_dict)<for_stmt>exp_name,roots roots_names_dict.items()<block_start><for_stmt>root roots<block_start>condition1=condition<or>exp_name<or>'exp'<line_sep>condition2=condition1+'-'+str(exp_idx)<line_sep>exp_idx<augadd>1<if_stmt>condition1<not><in>units<block_start>units[condition1]=0<block_end>unit=units[condition1]<line_sep>units[condition1]<augadd>1<line_sep># x轴截断值,默认为None,如果设置的为具体值,则直接统一截断。需要根据当前的x轴坐标手动添加,比如steps,1e6,epochs数量级是500。 # 以epoch=300截断为例,直接修改clip_xaxis=300即可 clip_xaxis=<none><try_stmt><block_start>exp_data=pd.read_table(os.path.join(root 'progress.txt'))<if_stmt>clip_xaxis<is><not><none><block_start>exp_data=exp_data[:clip_xaxis]<block_end>line_num=len(exp_data)<line_sep>print('line num:{}, read from {}'.format(line_num os.path.join(root 'progress.txt')))<block_end><except_stmt><block_start>print('Could not read from %s'%os.path.join(root 'progress.txt'))<line_sep><continue><block_end>performance='TestEpRet'<if>'TestEpRet'<in>exp_data<else>'AverageTestEpRet'<line_sep>exp_data.insert(len(exp_data.columns) 'Unit' unit)<line_sep>exp_data.insert(len(exp_data.columns) 'Condition1' condition1)<line_sep>exp_data.insert(len(exp_data.columns) 'Condition2' condition2)<line_sep>exp_data.insert(len(exp_data.columns) 'Performance' exp_data[performance])<line_sep>datasets.append(exp_data)<block_end><block_end># # 默认按照时间顺序获取文件夹数据 # print("-"*10, 'sorted by time', '-'*10) # for root, _, files in os.walk(logdir): # if 'progress.txt' in files: # exp_name = None # try: # config_path = open(os.path.join(root, 'config.json')) # config = json.load(config_path) # if 'exp_name' in config: # exp_name = config['exp_name'] # except: # print('No file named config.json') # condition1 = condition or exp_name or 'exp' # condition2 = condition1 + '-' + str(exp_idx) # exp_idx += 1 # if condition1 not in units: # units[condition1] = 0 # unit = units[condition1] # units[condition1] += 1 # # try: # exp_data = pd.read_table(os.path.join(root, 'progress.txt')) # line_num = len(exp_data) # print('line num:{}, read from {}'.format(line_num, # os.path.join(root, 'progress.txt'))) # except: # print('Could not read from %s' % os.path.join(root, 'progress.txt')) # continue # # performance = 'AverageTestEpRet' if 'AverageTestEpRet' in exp_data else 'TestEpRet' # # performance = 'AverageEpRet' if 'AverageTestEpRet' in exp_data else 'AverageEpRet' # performance = 'TestSuccess' if 'TestSuccess' in exp_data else 'AverageEpRet' # exp_data.insert(len(exp_data.columns),'Unit',unit) # exp_data.insert(len(exp_data.columns),'Condition1',condition1) # exp_data.insert(len(exp_data.columns),'Condition2',condition2) # exp_data.insert(len(exp_data.columns),'Performance',exp_data[performance]) # datasets.append(exp_data) <return>datasets<block_end><def_stmt>get_all_datasets all_logdirs legend=<none> select=<none> exclude=<none><block_start>""" For every entry in all_logdirs, 1) check if the entry is a real directory and if it is, pull data from it; 2) if not, check to see if the entry is a prefix for a real directory, and pull data from that. """<line_sep>logdirs=[]<for_stmt>logdir all_logdirs<block_start><if_stmt>osp.isdir(logdir)<and>logdir[-1]<eq>os.sep<block_start>logdirs<augadd>[logdir]<block_end><else_stmt><block_start>basedir=osp.dirname(logdir)<line_sep>fulldir=<lambda>x:osp.join(basedir x)<line_sep>prefix=logdir.split(os.sep)[-1]<line_sep>print("basedir:" basedir)<line_sep>listdir=os.listdir(basedir)<line_sep>logdirs<augadd>sorted([fulldir(x)<for>x listdir<if>prefix<in>x])<block_end><block_end>""" Enforce selection rules, which check logdirs for certain substrings. Makes it easier to look at graphs from particular ablations, if you launch many jobs at once with similar names. """<if_stmt>select<is><not><none><block_start>logdirs=[log<for>log logdirs<if>all(x<in>log<for>x select)]<block_end><if_stmt>exclude<is><not><none><block_start>logdirs=[log<for>log logdirs<if>all(<not>(x<in>log)<for>x exclude)]<block_end># Verify logdirs print('Plotting from...\n'+'='<times>DIV_LINE_WIDTH+'\n')<for_stmt>logdir logdirs<block_start>print(logdir)<block_end>print('\n'+'='<times>DIV_LINE_WIDTH)<line_sep># Make sure the legend is compatible with the logdirs <assert_stmt><not>(legend)<or>(len(legend)<eq>len(logdirs)) "Must give a legend title for each set of experiments."<line_sep># Load data from logdirs data=[]<if_stmt>legend<block_start><for_stmt>log,leg zip(logdirs legend)<block_start>data<augadd>get_datasets(log leg)<block_end><block_end><else_stmt><block_start><for_stmt>log logdirs<block_start>data<augadd>get_datasets(log)<block_end><block_end><return>data<block_end><def_stmt>make_plots all_logdirs legend=<none> xaxis=<none> values=<none> count=<false> font_scale=1.5 smooth=1 linewidth=4 select=<none> exclude=<none> estimator='mean' rank=<true> performance=<true> <block_start>data=get_all_datasets(all_logdirs legend select exclude)<line_sep>values=values<if>isinstance(values list)<else>[values]<line_sep>condition='Condition2'<if>count<else>'Condition1'<line_sep>estimator=getattr(np estimator)# choose what to show on main curve: mean? max? min? <for_stmt>value values<block_start>plt.figure()<line_sep>plot_data(data xaxis=xaxis value=value condition=condition smooth=smooth estimator=estimator linewidth=linewidth rank=rank performance=performance)<block_end># 默认最大化图片 manager=plt.get_current_fig_manager()<try_stmt># matplotlib3.3.4 work <block_start>manager.resize(*manager.window.maxsize())<block_end><except_stmt># matplotlib3.2.1//2.2.3 work <block_start>manager.window.showMaximized()<block_end>fig=plt.gcf()<line_sep>fig.set_size_inches((16 9) forward=<false>)<line_sep>select_str=''<line_sep>exclude_str=''<line_sep>print("select:" select)<line_sep>print("select_str:" select_str)<if_stmt>select<is><not><none><and>type(select)<is>list<block_start><for_stmt>s_str select<block_start>select_str<augadd>s_str<block_end><block_end><if_stmt>exclude<is><not><none><and>type(exclude)<is>list<block_start><for_stmt>s_str exclude<block_start>exclude_str<augadd>s_str<block_end><block_end>print("select_str:" select_str)<try_stmt># 如果非远程,则显示图片 <block_start>plt.show()<block_end><except_stmt><block_start><pass><block_end>fig.savefig(all_logdirs[0]+'ep_reward_'+select_str+exclude_str+'.png' bbox_inches='tight' dpi=300)<line_sep># plt.savefig(all_logdirs[0] + 'ep_reward.png', # bbox_inches='tight', # dpi=300, # ) <block_end><def_stmt>main <block_start><import_stmt>argparse<line_sep>parser=argparse.ArgumentParser()<import_stmt>sys<line_sep># 如果是命令行启动,调用下面的语句,必须要输入数据路径! <if_stmt>len(sys.argv)<g>1<block_start>print("run in command: \n argv:" sys.argv '\n' '-'<times>30)<line_sep>parser.add_argument('logdir' nargs='*')<line_sep># other nargs parser.add_argument('--select' nargs='*' help='在当前路径下,选择特定关键词,不能是下一个文件夹,'<concat>'在idle中不能是字符串,在终端,不用加双引号,多个关键词可以用空格隔开')<line_sep>parser.add_argument('--exclude' nargs='*' help='同select')<block_end><else_stmt># 如果是idle启动,用于debug,则需要将路径加入到下面的语句! <block_start>print("run in pycharm\n" '-'<times>30)<line_sep>parser.add_argument('--logdir' '-r' type=list default=[# windows路径示例:这个2020的意思是,要保留子文件夹的一些前缀,比如子文件夹的名叫"2020-reach-*",不能只是"plot_demo_files\" r"plot_demo_files\2020" # Ubuntu路径示例: # "plot_demo_files/2020", ])<line_sep># other nargs parser.add_argument('--select' default=[] )<line_sep>parser.add_argument('--exclude' default=[] )<block_end>parser.add_argument('--legend' '-l' nargs='*')<line_sep>parser.add_argument('--xaxis' '-x' default='TotalEnvInteracts' help='选择什么为横坐标,默认为TotalEnvInteracts')<line_sep>parser.add_argument('--value' '-y' default='Performance' nargs='*' help='选择特定变量为性能指标,默认为AverageTestEpRet')<line_sep>parser.add_argument('--count' action='store_true' help='是否显示每个随机种子,加--count为显示')<line_sep># parser.add_argument('--count', default="False") parser.add_argument('--smooth' '-s' type=int default=20 help='滑动平均,20看起来会更平滑些')<line_sep>parser.add_argument('--linewidth' '-lw' type=float default=4 help='实验线宽,粗点容易分清')<line_sep>parser.add_argument('--rank' type=bool default=<true> help='是否在legend上显示性能排序')<line_sep>parser.add_argument('--performance' type=bool default=<true> help='是否在legend上显示性能值')<line_sep>parser.add_argument('--est' default='mean')<line_sep>args=parser.parse_args()<line_sep>print("args:" args)<line_sep>make_plots(args.logdir args.legend args.xaxis args.value args.count smooth=args.smooth select=args.select exclude=args.exclude estimator=args.est linewidth=args.linewidth rank=args.rank performance=args.performance)<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_stmt>torch<import_stmt>meshzoo<def_stmt>generate_2D_mesh H W<block_start>_,faces=meshzoo.rectangle(xmin=-1. xmax=1. ymin=-1. ymax=1. nx=W ny=H zigzag=<true>)<line_sep>x=torch.arange(0 W 1).float().cuda()<line_sep>y=torch.arange(0 H 1).float().cuda()<line_sep>xx=x.repeat(H 1)<line_sep>yy=y.view(H 1).repeat(1 W)<line_sep>grid=torch.stack([xx yy] dim=0)<line_sep><return>grid faces<block_end>
<import_from_stmt>rest_framework serializers<import_from_stmt>baserow.contrib.database.views.models GridViewFieldOptions<class_stmt>GridViewFieldOptionsSerializer(serializers.ModelSerializer)<block_start><class_stmt>Meta<block_start>model=GridViewFieldOptions<line_sep>fields=("width" "hidden" "order")<block_end><block_end><class_stmt>GridViewFilterSerializer(serializers.Serializer)<block_start>field_ids=serializers.ListField(allow_empty=<false> required=<false> default=<none> child=serializers.IntegerField() help_text="Only the fields related to the provided ids are added to the "<concat>"response. If None are provided all fields will be returned." )<line_sep>row_ids=serializers.ListField(allow_empty=<false> child=serializers.IntegerField() help_text="Only rows related to the provided ids are added to the response." )<block_end>
MainWindow.clearData()<line_sep>MainWindow.openPost3D()<line_sep>PostProcess.script_openFile(-1 "Post3D" "%examplesPath%/water.vtk")<line_sep>PostProcess.script_openFile(-1 "Post3D" "%examplesPath%/platform.vtk")<line_sep>PostProcess.script_applyClicked(-1 "Post3D")<line_sep>PostProcess.script_Properties_streamline_integration_direction(-1 "Post3D" 3 2)<line_sep>PostProcess.script_Properties_streamline_integration_type(-1 "Post3D" 3 1)<line_sep>PostProcess.script_Properties_streamline_integration_stepUnit(-1 "Post3D" 3 2)<line_sep>PostProcess.script_Properties_streamline_seeds_num_points(-1 "Post3D" 3 100)<line_sep>PostProcess.script_FilterStreamLine(-1 "Post3D" 1)<line_sep>PostProcess.script_applyClicked(-1 "Post3D")<line_sep>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("PROD1")<line_sep>process.source=cms.Source("IntSource")<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(3))<line_sep>process.out=cms.OutputModule("PoolOutputModule" fileName=cms.untracked.string('testEdmProvDump.root') outputCommands=cms.untracked.vstring('keep *' 'drop *_intProducerA_*_*'))<line_sep>process.a1=cms.EDAnalyzer("TestFindProduct" inputTags=cms.untracked.VInputTag(cms.InputTag("source")) expectedSum=cms.untracked.int32(12) inputTagsNotFound=cms.untracked.VInputTag(cms.InputTag("source" processName=cms.InputTag.skipCurrentProcess()) cms.InputTag("intProducer" processName=cms.InputTag.skipCurrentProcess()) cms.InputTag("intProducerU" processName=cms.InputTag.skipCurrentProcess())))<line_sep>process.a2=cms.EDAnalyzer("TestFindProduct" inputTags=cms.untracked.VInputTag(cms.InputTag("intProducerA")) expectedSum=cms.untracked.int32(300))<line_sep>process.a3=cms.EDAnalyzer("TestFindProduct" inputTags=cms.untracked.VInputTag(cms.InputTag("aliasForInt")) expectedSum=cms.untracked.int32(300))<line_sep>process.intProducer=cms.EDProducer("IntProducer" ivalue=cms.int32(1))<line_sep>process.intProducerU=cms.EDProducer("IntProducer" ivalue=cms.int32(10))<line_sep>process.intProducerA=cms.EDProducer("IntProducer" ivalue=cms.int32(100))<line_sep>process.aliasForInt=cms.EDAlias(intProducerA=cms.VPSet(cms.PSet(type=cms.string('edmtestIntProduct'))))<line_sep>process.intVectorProducer=cms.EDProducer("IntVectorProducer" count=cms.int32(9) ivalue=cms.int32(11))<line_sep>process.t=cms.Task(process.intProducerU process.intProducerA process.intVectorProducer)<line_sep>process.p=cms.Path(process.intProducer<times>process.a1<times>process.a2<times>process.a3 process.t)<line_sep>process.e=cms.EndPath(process.out)<line_sep>
<import_from_stmt>fractions Fraction<import_from_stmt>typing Tuple<import_stmt>pytest<import_from_stmt>hypothesis HealthCheck assume example given settings<import_from_stmt>hypothesis.strategies integers<import_from_stmt>raiden.tests.unit.transfer.test_channel make_hash_time_lock_state<import_from_stmt>raiden.tests.utils factories<import_from_stmt>raiden.tests.utils.factories NettingChannelEndStateProperties NettingChannelStateProperties <import_from_stmt>raiden.tests.utils.mediation_fees get_amount_with_fees get_initial_amount_for_amount_after_fees <import_from_stmt>raiden.transfer.mediated_transfer.initiator calculate_safe_amount_with_fee<import_from_stmt>raiden.transfer.mediated_transfer.mediation_fee NUM_DISCRETISATION_POINTS FeeScheduleState Interpolate calculate_imbalance_fees linspace <import_from_stmt>raiden.transfer.mediated_transfer.mediator get_amount_without_fees<import_from_stmt>raiden.transfer.state NettingChannelState<import_from_stmt>raiden.utils.mediation_fees ppm_fee_per_channel<import_from_stmt>raiden.utils.typing Balance FeeAmount PaymentAmount PaymentWithFeeAmount ProportionalFeeAmount TokenAmount <def_stmt>test_interpolation <block_start>interp=Interpolate((0 100) (0 100))<for_stmt>i range(101)<block_start><assert_stmt>interp(i)<eq>i<block_end>interp=Interpolate((0 50 100) (0 100 200))<for_stmt>i range(101)<block_start><assert_stmt>interp(i)<eq>2<times>i<block_end>interp=Interpolate((0 50 100) (0 -50 50))<assert_stmt>interp(40)<eq>-40<assert_stmt>interp(60)<eq>-30<assert_stmt>interp(90)<eq>30<assert_stmt>interp(99)<eq>48<line_sep>interp=Interpolate((0 100) (Fraction("12.35") Fraction("67.2")))<assert_stmt>interp(0)<eq>Fraction("12.35")<assert_stmt>interp(50)<eq>pytest.approx((12.35+67.2)/2)<assert_stmt>interp(100)<eq>Fraction("67.2")<block_end><def_stmt>test_imbalance_penalty <block_start>r"""Test an imbalance penalty by moving back and forth The imbalance fee looks like 20 | / | / 10 |\. / | \. / 0 | \/ --------------- 0 50 100 For each input, we first assume the channel is used to forward tokens to a payee, which moves the capacity from x1 to x2. The we assume the same amount is mediated in the opposite direction (moving from x2 to x1) and check that the calculated fee is the same as before just with the opposite sign. """<line_sep>v_schedule=FeeScheduleState(imbalance_penalty=[(TokenAmount(0) FeeAmount(10)) (TokenAmount(50) FeeAmount(0)) (TokenAmount(100) FeeAmount(20)) ])<line_sep>reverse_schedule=FeeScheduleState(imbalance_penalty=[(TokenAmount(0) FeeAmount(20)) (TokenAmount(50) FeeAmount(0)) (TokenAmount(100) FeeAmount(10)) ])<for_stmt>cap_fees,x1,amount,expected_fee_in,expected_fee_out [# Uncapped fees (<false> 0 50 -8 -10) (<false> 50 30 20 12) (<false> 0 10 -2 -2) (<false> 10 10 -2 -2) (<false> 0 20 -3 -4) (<false> 40 15 0 0) (<false> 50 31 <none> 12) (<false> 100 1 <none> <none>) # Capped fees (<true> 0 50 0 0) (<true> 50 30 20 12) (<true> 0 10 0 0) (<true> 10 10 0 0) (<true> 0 20 0 0) (<true> 40 15 0 0) ]<block_start>v_schedule.cap_fees=cap_fees<line_sep>amount_with_fees=get_amount_with_fees(amount_without_fees=PaymentWithFeeAmount(amount) balance_in=Balance(x1) balance_out=Balance(100) schedule_in=v_schedule schedule_out=FeeScheduleState(cap_fees=cap_fees) receivable_amount=TokenAmount(100-x1) )<if_stmt>expected_fee_in<is><none><block_start><assert_stmt>amount_with_fees<is><none><block_end><else_stmt><block_start><assert_stmt>amount_with_fees<is><not><none><assert_stmt>amount_with_fees-amount<eq>FeeAmount(expected_fee_in)<block_end>reverse_schedule.cap_fees=cap_fees<line_sep>amount_with_fees=get_amount_with_fees(amount_without_fees=PaymentWithFeeAmount(amount) balance_in=Balance(0) balance_out=Balance(100-x1) schedule_in=FeeScheduleState(cap_fees=cap_fees) schedule_out=reverse_schedule receivable_amount=TokenAmount(100) )<if_stmt>expected_fee_out<is><none><block_start><assert_stmt>amount_with_fees<is><none><block_end><else_stmt><block_start><assert_stmt>amount_with_fees<is><not><none><assert_stmt>amount_with_fees-amount<eq>FeeAmount(expected_fee_out)<block_end><block_end><block_end><def_stmt>test_fee_capping <block_start>r""" Test the capping when one section of the fee function crossed from the positive into negative fees. Here, our fee curve looks like: Fee | 5 + |\ | \ 0 +--+-----+-> incoming_amount | 25\ 100 | \ | \ | \ | \ -15 + \ 0 When capping it, we need to insert the intersection point of (25, 0) into our piecewise linear function before capping all y values to zero. Otherwise we would just interpolate between (0, 5) and (100, 0). """<line_sep>schedule=FeeScheduleState(imbalance_penalty=[(TokenAmount(0) FeeAmount(0)) (TokenAmount(100) FeeAmount(20))] flat=FeeAmount(5) )<line_sep>fee_func=FeeScheduleState.mediation_fee_func(schedule_in=FeeScheduleState() schedule_out=schedule balance_in=Balance(0) balance_out=Balance(100) receivable=TokenAmount(100) amount_with_fees=PaymentWithFeeAmount(5) cap_fees=<true> )<assert_stmt>fee_func(30)<eq>0# 5 - 6, capped <assert_stmt>fee_func(20)<eq>5-4<block_end><def_stmt>test_linspace <block_start><assert_stmt>linspace(TokenAmount(0) TokenAmount(4) 5)<eq>[0 1 2 3 4]<assert_stmt>linspace(TokenAmount(0) TokenAmount(4) 4)<eq>[0 1 3 4]<assert_stmt>linspace(TokenAmount(0) TokenAmount(4) 3)<eq>[0 2 4]<assert_stmt>linspace(TokenAmount(0) TokenAmount(4) 2)<eq>[0 4]<assert_stmt>linspace(TokenAmount(0) TokenAmount(0) 3)<eq>[0 0 0]<with_stmt>pytest.raises(AssertionError)<block_start><assert_stmt>linspace(TokenAmount(0) TokenAmount(4) 1)<block_end><with_stmt>pytest.raises(AssertionError)<block_start><assert_stmt>linspace(TokenAmount(4) TokenAmount(0) 2)<block_end><block_end><def_stmt>test_rebalancing_fee_calculation <block_start>sample=calculate_imbalance_fees(TokenAmount(200) ProportionalFeeAmount(50_000))# 5% <assert_stmt>sample<is><not><none><assert_stmt>len(sample)<eq>NUM_DISCRETISATION_POINTS<assert_stmt>all(0<le>x<le>200<for>x,_ sample)<assert_stmt>max(x<for>x,_ sample)<eq>200<assert_stmt>all(0<le>y<le>10<for>_,y sample)<assert_stmt>max(y<for>_,y sample)<eq>10# 5% of the 200 TokenAmount capacity sample=calculate_imbalance_fees(TokenAmount(100) ProportionalFeeAmount(20_000))# 2% <assert_stmt>sample<is><not><none><assert_stmt>len(sample)<eq>NUM_DISCRETISATION_POINTS<assert_stmt>all(0<le>x<le>100<for>x,_ sample)<assert_stmt>max(x<for>x,_ sample)<eq>100<assert_stmt>all(0<le>y<le>2<for>_,y sample)<assert_stmt>max(y<for>_,y sample)<eq>2# 2% of the 100 TokenAmount capacity sample=calculate_imbalance_fees(TokenAmount(15) ProportionalFeeAmount(50_000))# 5% <assert_stmt>sample<is><not><none><assert_stmt>len(sample)<eq>16<assert_stmt>all(0<le>x<le>16<for>x,_ sample)<assert_stmt>max(x<for>x,_ sample)<eq>15<assert_stmt>all(0<le>y<le>1<for>_,y sample)<assert_stmt>max(y<for>_,y sample)<eq>1# 5% of the 5 rounded up # test rounding of the max_balance_fee calculation sample=calculate_imbalance_fees(TokenAmount(1000) ProportionalFeeAmount(5_490))# 0.549% <assert_stmt>sample<is><not><none><assert_stmt>len(sample)<eq>NUM_DISCRETISATION_POINTS<assert_stmt>all(0<le>x<le>1000<for>x,_ sample)<assert_stmt>max(x<for>x,_ sample)<eq>1000<assert_stmt>all(0<le>y<le>5<for>_,y sample)<assert_stmt>max(y<for>_,y sample)<eq>5# 5.49 is rounded to 5 sample=calculate_imbalance_fees(TokenAmount(1000) ProportionalFeeAmount(5_500))# 0.55% <assert_stmt>sample<is><not><none><assert_stmt>len(sample)<eq>NUM_DISCRETISATION_POINTS<assert_stmt>all(0<le>x<le>1000<for>x,_ sample)<assert_stmt>max(x<for>x,_ sample)<eq>1000<assert_stmt>all(0<le>y<le>6<for>_,y sample)<assert_stmt>max(y<for>_,y sample)<eq>6# 5.5 is rounded to 6 # test cases where no imbalance fee is created <assert_stmt>calculate_imbalance_fees(TokenAmount(0) ProportionalFeeAmount(1))<is><none><assert_stmt>calculate_imbalance_fees(TokenAmount(10) ProportionalFeeAmount(0))<is><none><block_end>@pytest.mark.parametrize("flat_fee, prop_fee, initial_amount, expected_amount" [# pure flat fee (50 0 1000 1000-50-50) # proportional fee (0 1_000_000 2000 1000) # 100% per hop mediation fee (0 100_000 1100 1000) # 10% per hop mediation fee (0 50_000 1050 1000) # 5% per hop mediation fee (0 10_000 1010 1000) # 1% per hop mediation fee (0 10_000 101 100) # 1% per hop mediation fee (0 4_990 100 100) # 0,499% per hop mediation fee gets rounded away # mixed tests (1 500_000 1000+500+2 1000) (10 500_000 1000+500+20 997) (100 500_000 1000+500+200 967) # - (1 100_000 1000+100+2 1000) (10 100_000 1000+100+20 999) (100 100_000 1000+100+200 991) # - (1 10_000 1000+10+2 1000) (10 10_000 1000+10+20 1000) (100 10_000 1000+10+200 999) # - (100 500_000 1000+750 1000) # - values found in run_test_mediated_transfer_with_fees (0 200_000 47+9 47) (0 200_000 39+8 39) ] )<def_stmt>test_get_lock_amount_after_fees flat_fee prop_fee initial_amount expected_amount<block_start>"""Tests mediation fee deduction."""<line_sep>prop_fee_per_channel=ppm_fee_per_channel(ProportionalFeeAmount(prop_fee))<line_sep>lock=make_hash_time_lock_state(amount=initial_amount)<line_sep>channel_in=factories.create(NettingChannelStateProperties(partner_state=NettingChannelEndStateProperties(balance=TokenAmount(2000)) fee_schedule=FeeScheduleState(flat=flat_fee proportional=prop_fee_per_channel) ))<line_sep>channel_out=factories.create(NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=TokenAmount(2000)) fee_schedule=FeeScheduleState(flat=flat_fee proportional=prop_fee_per_channel) ))<line_sep>locked_after_fees=get_amount_without_fees(amount_with_fees=lock.amount channel_in=channel_in channel_out=channel_out)<assert_stmt>locked_after_fees<eq>expected_amount<block_end>@pytest.mark.parametrize("cap_fees, flat_fee, prop_fee, imbalance_fee, initial_amount, expected_amount" [# No capping of the mediation fees # The higher the imbalance fee, the stronger the impact of the fee iteration (<false> 0 0 10_000 50_000 50_000+2_000) (<false> 0 0 20_000 50_000 50_000+3_995) (<false> 0 0 30_000 50_000 50_000+5_910) (<false> 0 0 40_000 50_000 50_000+7_613) (<false> 0 0 50_000 50_000 50_000+9_091) # Capping of mediation fees (<true> 0 0 10_000 50_000 50_000) (<true> 0 0 20_000 50_000 50_000) (<true> 0 0 30_000 50_000 50_000) (<true> 0 0 40_000 50_000 50_000) (<true> 0 0 50_000 50_000 50_000) ] )<def_stmt>test_get_lock_amount_after_fees_imbalanced_channel cap_fees flat_fee prop_fee imbalance_fee initial_amount expected_amount<block_start>"""Tests mediation fee deduction."""<line_sep>balance=TokenAmount(100_000)<line_sep>prop_fee_per_channel=ppm_fee_per_channel(ProportionalFeeAmount(prop_fee))<line_sep>imbalance_fee=calculate_imbalance_fees(channel_capacity=balance proportional_imbalance_fee=ProportionalFeeAmount(imbalance_fee))<line_sep>lock=make_hash_time_lock_state(amount=initial_amount)<line_sep>channel_in=factories.create(NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=TokenAmount(0)) partner_state=NettingChannelEndStateProperties(balance=balance) fee_schedule=FeeScheduleState(cap_fees=cap_fees flat=FeeAmount(flat_fee) proportional=prop_fee_per_channel imbalance_penalty=imbalance_fee ) ))<line_sep>channel_out=factories.create(NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=balance) partner_state=NettingChannelEndStateProperties(balance=TokenAmount(0)) fee_schedule=FeeScheduleState(cap_fees=cap_fees flat=FeeAmount(flat_fee) proportional=prop_fee_per_channel imbalance_penalty=imbalance_fee ) ))<line_sep>locked_after_fees=get_amount_without_fees(amount_with_fees=lock.amount channel_in=channel_in channel_out=channel_out)<assert_stmt>locked_after_fees<eq>expected_amount<block_end>@given(integers(min_value=0 max_value=100) integers(min_value=0 max_value=10_000) integers(min_value=0 max_value=50_000) integers(min_value=1 max_value=90_000_000_000_000_000) integers(min_value=1 max_value=100_000_000_000_000_000) integers(min_value=1 max_value=100_000_000_000_000_000) )@settings(suppress_health_check=[HealthCheck.filter_too_much])<def_stmt>test_fee_round_trip flat_fee prop_fee imbalance_fee amount balance1 balance2<block_start>"""Tests mediation fee deduction. First we're doing a PFS-like calculation going backwards from the target amount to get the amount that the initiator has to send. Then we calculate the fees from a mediator's point of view and check if `amount_with_fees - fees = amount`. """<line_sep># Find examples where there is a reasonable chance of succeeding amount=int(min(amount balance1<times>0.95-1 balance2<times>0.95-1))<line_sep>assume(amount<g>0)<line_sep>total_balance=TokenAmount(100_000_000_000_000_000_000)<line_sep>prop_fee_per_channel=ppm_fee_per_channel(ProportionalFeeAmount(prop_fee))<line_sep>imbalance_fee=calculate_imbalance_fees(channel_capacity=total_balance proportional_imbalance_fee=ProportionalFeeAmount(imbalance_fee) )<line_sep>channel_in=factories.create(NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=total_balance-balance1) partner_state=NettingChannelEndStateProperties(balance=balance1) fee_schedule=FeeScheduleState(cap_fees=<false> flat=FeeAmount(flat_fee) proportional=prop_fee_per_channel imbalance_penalty=imbalance_fee ) ))<line_sep>channel_out=factories.create(NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=balance2) partner_state=NettingChannelEndStateProperties(balance=total_balance-balance2) fee_schedule=FeeScheduleState(cap_fees=<false> flat=FeeAmount(flat_fee) proportional=prop_fee_per_channel imbalance_penalty=imbalance_fee ) ))<line_sep># How much do we need to send so that the target receives `amount`? PFS-like calculation. fee_calculation=get_initial_amount_for_amount_after_fees(amount_after_fees=PaymentAmount(amount) channels=[(channel_in channel_out)])<line_sep>assume(fee_calculation)# There is not enough capacity for the payment in all cases <assert_stmt>fee_calculation<line_sep># How much would a mediator send to the target? Ideally exactly `amount`. amount_without_margin_after_fees=get_amount_without_fees(amount_with_fees=fee_calculation.total_amount channel_in=channel_in channel_out=channel_out )<line_sep>assume(amount_without_margin_after_fees)# We might lack capacity for the payment <assert_stmt>abs(amount-amount_without_margin_after_fees)<le>1# Equal except for rounding errors # If we add the fee margin, the mediator must always send at least `amount` to the target! amount_with_fee_and_margin=calculate_safe_amount_with_fee(fee_calculation.amount_without_fees FeeAmount(sum(fee_calculation.mediation_fees)))<line_sep>amount_with_margin_after_fees=get_amount_without_fees(amount_with_fees=amount_with_fee_and_margin channel_in=channel_in channel_out=channel_out)<line_sep>assume(amount_with_margin_after_fees)# We might lack capacity to add margins <assert_stmt>amount_with_margin_after_fees<ge>amount<block_end>@example(flat_fee=0 prop_fee=0 imbalance_fee=1277 amount=1 balance1=33 balance2=481)@given(integers(min_value=0 max_value=100) integers(min_value=0 max_value=10_000) integers(min_value=0 max_value=50_000) integers(min_value=1 max_value=90_000_000_000_000_000_000) integers(min_value=1 max_value=100_000_000_000_000_000_000) integers(min_value=1 max_value=100_000_000_000_000_000_000) )@settings(suppress_health_check=[HealthCheck.filter_too_much])<def_stmt>test_fee_add_remove_invariant flat_fee prop_fee imbalance_fee amount balance1 balance2<block_start>"""First adding and then removing fees must yield the original value"""<line_sep>total_balance=TokenAmount(100_000_000_000_000_000_000)<line_sep>prop_fee_per_channel=ppm_fee_per_channel(ProportionalFeeAmount(prop_fee))<line_sep>imbalance_fee=calculate_imbalance_fees(channel_capacity=total_balance proportional_imbalance_fee=ProportionalFeeAmount(imbalance_fee) )<line_sep>fee_schedule=FeeScheduleState(cap_fees=<false> flat=FeeAmount(flat_fee) proportional=prop_fee_per_channel imbalance_penalty=imbalance_fee )<line_sep>channel_in=factories.create(NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=total_balance-balance1) partner_state=NettingChannelEndStateProperties(balance=balance1) fee_schedule=fee_schedule ))<line_sep>channel_out=factories.create(NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=balance2) partner_state=NettingChannelEndStateProperties(balance=total_balance-balance2) fee_schedule=fee_schedule ))<line_sep>amount_with_fees=get_amount_with_fees(amount_without_fees=amount schedule_in=channel_in.fee_schedule schedule_out=channel_out.fee_schedule receivable_amount=balance1 balance_in=total_balance-balance1 balance_out=balance2 )<line_sep>assume(amount_with_fees)<assert_stmt>amount_with_fees<line_sep>amount_without_fees=get_amount_without_fees(amount_with_fees=amount_with_fees channel_in=channel_in channel_out=channel_out)<line_sep>assume(amount_without_fees)<assert_stmt>amount-1<le>amount_without_fees<le>amount+1<block_end><def_stmt>running_sum a<block_start>total=0<for_stmt>item a<block_start>total<augadd>item<line_sep><yield>total<block_end><block_end><def_stmt>make_channel_pair fee_schedule:FeeScheduleState balance1:int=0 balance2:int=0<arrow>Tuple[NettingChannelState NettingChannelState]<block_start>balance1=TokenAmount(balance1)<line_sep>balance2=TokenAmount(balance2)<line_sep><return>(factories.create(NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=balance2) partner_state=NettingChannelEndStateProperties(balance=balance1) fee_schedule=fee_schedule )) factories.create(NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=balance1) partner_state=NettingChannelEndStateProperties(balance=balance2) fee_schedule=fee_schedule )) )<block_end><def_stmt>test_mfee1 <block_start>"""Unit test for the fee calculation in the mfee1_flat_fee scenario"""<line_sep>amount=10_000<line_sep>deposit=100_000<line_sep>flat_fee=100<floordiv>2<line_sep>fee_schedule=FeeScheduleState(flat=FeeAmount(flat_fee))<line_sep>channels=make_channel_pair(fee_schedule deposit)<line_sep># How much do we need to send so that the target receives `amount`? PFS-like calculation. fee_calculation=get_initial_amount_for_amount_after_fees(amount_after_fees=PaymentAmount(amount) channels=[channels channels])<assert_stmt>fee_calculation<line_sep>amount_with_margin=calculate_safe_amount_with_fee(fee_calculation.amount_without_fees FeeAmount(sum(fee_calculation.mediation_fees)))<assert_stmt>amount_with_margin<eq>10_211<line_sep># print values for scenario print(deposit-amount_with_margin amount_with_margin)<for_stmt>med_fee running_sum(fee_calculation.mediation_fees)<block_start>print(deposit-amount_with_margin+med_fee amount_with_margin-med_fee)<block_end><block_end><def_stmt>test_mfee2 <block_start>"""Unit test for the fee calculation in the mfee2_proportional_fees scenario"""<line_sep>amount=10_000<line_sep>deposit=100_000<line_sep>prop_fee=ppm_fee_per_channel(ProportionalFeeAmount(10_000))<line_sep>fee_schedule=FeeScheduleState(proportional=ProportionalFeeAmount(prop_fee))<line_sep>channels=make_channel_pair(fee_schedule deposit)<line_sep># How much do we need to send so that the target receives `amount`? PFS-like calculation. fee_calculation=get_initial_amount_for_amount_after_fees(amount_after_fees=PaymentAmount(amount) channels=[channels channels])<assert_stmt>fee_calculation<line_sep>amount_with_margin=calculate_safe_amount_with_fee(fee_calculation.amount_without_fees FeeAmount(sum(fee_calculation.mediation_fees)))<assert_stmt>amount_with_margin<eq>10_213<line_sep># print values for scenario print(deposit-amount_with_margin amount_with_margin)<for_stmt>med_fee running_sum(fee_calculation.mediation_fees)<block_start>print(deposit-amount_with_margin+med_fee amount_with_margin-med_fee)<block_end><block_end><def_stmt>test_mfee3 <block_start>"""Unit test for the fee calculation in the mfee3_only_imbalance_fees scenario"""<line_sep>amount=500_000_000_000_000_000<line_sep>deposit=TokenAmount(1_000_000_000_000_000_000)<line_sep>imbalance_penalty=calculate_imbalance_fees(deposit ProportionalFeeAmount(10_000))<line_sep>fee_schedule=FeeScheduleState(imbalance_penalty=imbalance_penalty cap_fees=<false>)<line_sep>channels=make_channel_pair(fee_schedule deposit)<line_sep># How much do we need to send so that the target receives `amount`? PFS-like calculation. fee_calculation=get_initial_amount_for_amount_after_fees(amount_after_fees=PaymentAmount(amount) channels=[channels])<assert_stmt>fee_calculation<line_sep>amount_with_margin=calculate_safe_amount_with_fee(fee_calculation.amount_without_fees FeeAmount(sum(fee_calculation.mediation_fees)))<assert_stmt>amount_with_margin<eq>480_850_038_799_922_400<line_sep># print values for scenario print("{:_} {:_}".format(deposit-amount_with_margin amount_with_margin))<for_stmt>med_fee running_sum(fee_calculation.mediation_fees)<block_start>print("{:_} {:_}".format(deposit-amount_with_margin+med_fee amount_with_margin-med_fee))<block_end><block_end><def_stmt>test_mfee4 <block_start>"""Unit test for the fee calculation in the mfee4_combined_fees scenario"""<line_sep>amount=PaymentAmount(500_000_000_000_000_000)<line_sep>deposit=1_000_000_000_000_000_000<line_sep>prop_fee=ppm_fee_per_channel(ProportionalFeeAmount(10_000))<line_sep>imbalance_penalty=calculate_imbalance_fees(TokenAmount(deposit<times>2) ProportionalFeeAmount(20_000))<line_sep>fee_schedule=FeeScheduleState(flat=FeeAmount(100<floordiv>2) proportional=prop_fee imbalance_penalty=imbalance_penalty cap_fees=<false> )<line_sep>channels=make_channel_pair(fee_schedule deposit deposit)<line_sep># How much do we need to send so that the target receives `amount`? PFS-like calculation. fee_calculation=get_initial_amount_for_amount_after_fees(amount_after_fees=PaymentAmount(amount) channels=[channels channels])<assert_stmt>fee_calculation<line_sep>amount_with_margin=calculate_safe_amount_with_fee(amount FeeAmount(sum(fee_calculation.mediation_fees)))<line_sep># Calculate mediation fees for both mediators med_fees=[]<line_sep>incoming_amount=amount_with_margin<for_stmt>_ range(2)<block_start>outgoing_amount=get_amount_without_fees(amount_with_fees=incoming_amount channel_in=channels[0] channel_out=channels[1])<assert_stmt>outgoing_amount<line_sep>med_fees.append(incoming_amount-outgoing_amount)<line_sep>incoming_amount=outgoing_amount<block_end><assert_stmt>amount_with_margin<eq>543_503_066_141_505_551<line_sep># print values for scenario print("{:_} {:_}".format(deposit-amount_with_margin deposit+amount_with_margin))<for_stmt>med_fee running_sum(med_fees)<block_start>print("{:_} {:_}".format(deposit-amount_with_margin+med_fee deposit+amount_with_margin-med_fee))<block_end><block_end>
<import_from_stmt>typing List Tuple<import_stmt>numpy<as>np<import_from_stmt>pyrep.objects.shape Shape<import_from_stmt>pyrep.objects.dummy Dummy<import_from_stmt>pyrep.objects.proximity_sensor ProximitySensor<import_from_stmt>rlbench.backend.task Task<import_from_stmt>rlbench.backend.conditions DetectedCondition NothingGrasped<import_from_stmt>rlbench.backend.spawn_boundary SpawnBoundary<line_sep>NUM_SHELVES_IN_SAFE=3<class_stmt>PutMoneyInSafe(Task)<block_start><def_stmt>init_task self<arrow><none><block_start>self.index_dic={0:'bottom' 1:'middle' 2:'top'}<line_sep>self.money=Shape('dollar_stack')<line_sep>self.money_boundary=Shape('dollar_stack_boundary')<line_sep>self.register_graspable_objects([self.money])<line_sep>self.success_conditions=[NothingGrasped(self.robot.gripper)]<line_sep>self.w1_rel_pos=[-2.7287<times>10<power>(-4) -2.3246<times>10<power>(-6) +4.5627<times>10<power>(-2)]<line_sep>self.w1_rel_ori=[-3.1416 7.2824<times>10<power>(-1) -2.1265<times>10<power>(-2)]<block_end><def_stmt>init_episode self index:int<arrow>List[str]<block_start>self.target_shelf=index<line_sep>w4=Dummy('waypoint4')<line_sep>target_dummy_name='dummy_shelf'+str(self.target_shelf)<line_sep>target_pos_dummy=Dummy(target_dummy_name)<line_sep>target_pos=target_pos_dummy.get_position()<line_sep>w4.set_position(target_pos reset_dynamics=<false>)<line_sep>self.success_detector=ProximitySensor(('success_detector'+str(self.target_shelf)))<while_stmt>len(self.success_conditions)<g>1<block_start>self.success_conditions.pop()<block_end>self.success_conditions.append(DetectedCondition(self.money self.success_detector))<line_sep>self.register_success_conditions(self.success_conditions)<line_sep>b=SpawnBoundary([self.money_boundary])<line_sep>b.sample(self.money min_rotation=(0.00 0.00 0.00) max_rotation=(0.00 0.00 +0.5<times>np.pi))<line_sep><return>['put the money away in the safe on the %s shelf'%self.index_dic[index] 'leave the money on the %s shelf on the safe'%self.index_dic[index] 'place the stack of bank notes on the %s shelf of the safe'%self.index_dic[index]]<block_end><def_stmt>variation_count self<arrow>int<block_start><return>NUM_SHELVES_IN_SAFE<block_end><def_stmt>base_rotation_bounds self<arrow>Tuple[List[float] List[float]]<block_start><return>[0.0 0.0 0.0] [0.0 0.0 +0.5<times>np.pi]<block_end><block_end>
<import_from_stmt>aerosandbox.common ExplicitAnalysis<import_stmt>aerosandbox.numpy<as>np<import_stmt>subprocess<import_from_stmt>pathlib Path<import_from_stmt>aerosandbox.geometry Airplane<import_from_stmt>aerosandbox.performance OperatingPoint<import_from_stmt>typing Union List Dict<import_stmt>tempfile<import_stmt>warnings<class_stmt>AVL(ExplicitAnalysis)<block_start>""" An interface to AVL, a 3D vortex lattice aerodynamics code developed by <NAME> at MIT. Requires AVL to be on your computer; AVL is available here: https://web.mit.edu/drela/Public/web/avl/ It is recommended (but not required) that you add AVL to your system PATH environment variable such that it can be called with the command `avl`. If this is not the case, you need to specify the path to your AVL executable using the `avl_command` argument of the constructor. Usage example: >>>avl = asb.AVL( >>> airplane=my_airplane, >>> op_point=asb.OperatingPoint( >>> velocity=100, # m/s >>> alpha=5, # deg >>> beta=4, # deg >>> p=0.01, # rad/sec >>> q=0.02, # rad/sec >>> r=0.03, # rad/sec >>> ) >>>) >>>outputs = avl.run() """<def_stmt>__init__ self airplane:Airplane op_point:OperatingPoint=OperatingPoint() avl_command:str="avl" verbose:bool=<false> working_directory:str=<none> <block_start>""" Interface to AVL. Args: airplane: The airplane object you wish to analyze. op_point: The operating point you wish to analyze at. avl_command: The command-line argument to call AVL. * If AVL is on your system PATH, then you can just leave this as "avl". * If AVL is not on your system PATH, thjen you should provide a filepath to the AVL executable. Note that AVL is not on your PATH by default. To tell if AVL is on your system PATH, open up a terminal and type "avl". * If the AVL menu appears, it's on your PATH. * If you get something like "'avl' is not recognized as an internal or external command..." or "Command 'avl' not found, did you mean...", then it is not on your PATH and you'll need to specify the location of your AVL executable as a string. To add AVL to your path, modify your system's environment variables. (Google how to do this for your OS.) verbose: working_directory: """<line_sep>self.airplane=airplane<line_sep>self.op_point=op_point<line_sep>self.avl_command=avl_command<line_sep>self.verbose=verbose<line_sep>self.working_directory=working_directory<block_end><def_stmt>run self<arrow>Dict<block_start><return>self._run_avl()<block_end><def_stmt>_default_keystroke_file_contents self<arrow>List[str]<block_start>run_file_contents=[]<line_sep># Disable graphics run_file_contents<augadd>["plop" "g" "" ]<line_sep># Enter oper mode run_file_contents<augadd>["oper" ]<line_sep># Set parameters run_file_contents<augadd>["m"<concat>f"mn {self.op_point.mach()}" f"v {self.op_point.velocity}" f"d {self.op_point.atmosphere.density()}" "g 9.81" ""]<line_sep># Set analysis state p_bar=self.op_point.p<times>self.airplane.b_ref/(2<times>self.op_point.velocity)<line_sep>q_bar=self.op_point.q<times>self.airplane.c_ref/(2<times>self.op_point.velocity)<line_sep>r_bar=self.op_point.r<times>self.airplane.b_ref/(2<times>self.op_point.velocity)<line_sep>run_file_contents<augadd>[f"a a {self.op_point.alpha}" f"b b {self.op_point.beta}" f"r r {p_bar}" f"p p {q_bar}" f"y y {r_bar}"]<line_sep><return>run_file_contents<block_end><def_stmt>_run_avl self run_command:str=<none> <arrow>Dict[str np.ndarray]<block_start>""" Private function to run AVL. Args: run_command: A string with any AVL keystroke inputs that you'd like. By default, you start off within the OPER menu. All of the inputs indicated in the constructor have been set already, but you can override them here ( for this run only) if you want. Returns: A dictionary containing all of your results. """<with_stmt>tempfile.TemporaryDirectory()<as>directory<block_start>directory=Path(directory)<line_sep>### Alternatively, work in another directory: <if_stmt>self.working_directory<is><not><none><block_start>directory=Path(self.working_directory)<block_end># For debugging # Designate an intermediate file for file I/O output_filename="output.txt"<with_stmt>open(directory/output_filename "w+")<as>f<block_start><pass><block_end># Handle the airplane file airplane_file="airplane.avl"<line_sep>self.airplane.write_avl(directory/airplane_file)<line_sep># Handle the run file keystroke_file_contents=self._default_keystroke_file_contents()<if_stmt>run_command<is><not><none><block_start>keystroke_file_contents<augadd>[run_command]<block_end>keystroke_file_contents<augadd>["x" "st" f"{output_filename}" "o" "" "" "quit"]<line_sep>keystroke_file="keystroke_file.txt"<with_stmt>open(directory/keystroke_file "w+")<as>f<block_start>f.write("\n".join(keystroke_file_contents))<block_end>command=f'{self.avl_command} {airplane_file} < {keystroke_file}'<line_sep>### Execute subprocess.call(command shell=<true> cwd=directory stdout=<none><if>self.verbose<else>subprocess.DEVNULL)<line_sep>##### Parse the output file # Read the file <with_stmt>open(directory/output_filename "r")<as>f<block_start>output_data=f.read()<block_end># Trim off the first few lines that contain name, # of panels, etc. output_data="\n".join(output_data.split("\n")[8:])<line_sep>### Iterate through the string to find all the numeric values, based on where "=" appears. values=[]<line_sep>index=output_data.find("=")<while_stmt>index<ne>-1<block_start>output_data=output_data[index+1:]<line_sep>number=output_data[:12].split("\n")[0]<line_sep>number=float(number)<line_sep>values.append(number)<line_sep>index=output_data.find("=")<block_end>### Record the keys associated with those values: keys=["Sref" "Cref" "Bref" "Xref" "Yref" "Zref" "alpha" "pb/2V" "p'b/2V" "beta" "qc/2V" "mach" "rb/2V" "r'b/2V" "CX" # Note: these refer to "CXtot", etc. in AVL, but the "tot" is redundant. "Cl" "Cl'" "CY" "Cm" "CZ" "Cn" "Cn'" "CL" "CD" "CDvis" "CDind" "CLff" "CDff" "Cyff" "e" "CLa" "CLb" "CYa" "CYb" "Cla" "Clb" "Cma" "Cmb" "Cna" "Cnb" "CLp" "CLq" "CLr" "CYp" "CYq" "CYr" "Clp" "Clq" "Clr" "Cmp" "Cmq" "Cmr" "Cnp" "Cnq" "Cnr" "Xnp" "Clb Cnr / Clr Cnb"]<if_stmt>len(values)<ne>57<and>len(values)<ne>56# Sometimes the spiral mode term is inexplicably not displayed by AVL <block_start><raise>RuntimeError("AVL could not run for some reason!\n"<concat>"Investigate by turning on the `verbose` flag and looking at the output.\n"<concat>"(Common culprit: angular rates too high.)")<block_end>res={k:v<for>k,v zip(keys values)}<line_sep>##### Add a few more outputs for ease of use res["p"]=res["pb/2V"]<times>(2<times>self.op_point.velocity/self.airplane.b_ref)<line_sep>res["q"]=res["qc/2V"]<times>(2<times>self.op_point.velocity/self.airplane.c_ref)<line_sep>res["r"]=res["rb/2V"]<times>(2<times>self.op_point.velocity/self.airplane.b_ref)<line_sep><return>res<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'### Import Vanilla Airplane <block_start><import_stmt>aerosandbox<as>asb<import_from_stmt>pathlib Path<line_sep>geometry_folder=Path(asb.__file__).parent.parent/"tutorial"/"04 - Geometry"/"example_geometry"<import_stmt>sys<line_sep>sys.path.insert(0 str(geometry_folder))<import_from_stmt>vanilla airplane<as>vanilla<line_sep>### Do the AVL run avl=AVL(airplane=vanilla op_point=OperatingPoint(atmosphere=asb.Atmosphere(altitude=0) velocity=1 alpha=0.433476 beta=0 p=0 q=0 r=0 ) )<line_sep>res=avl.run()<for_stmt>k,v res.items()<block_start>print(f"{str(k).rjust(10)} : {v}")<block_end><block_end>
""" Code for acquisition strategies. """<line_sep>
# pycrc -- parameterisable CRC calculation utility and C source code generator # # Copyright (c) 2017 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ This modules simplifies an expression. import pycrc.expr as exp my_expr = exp.Xor('var', exp.Parenthesis(exp.And('0x700', 4))) print('"{}" -> "{}"'.format(my_expr, my_expr.simplify())) """<def_stmt>_classify val<block_start>""" Creates a Terminal object if the parameter is a string or an integer. """<if_stmt>type(val)<is>int<block_start><return>Terminal(val)<block_end><if_stmt>type(val)<is>str<block_start><if_stmt>val.isdigit()<block_start><return>Terminal(int(val) val)<block_end><if_stmt>val[:2].lower()<eq>'0x'<block_start><return>Terminal(int(val 16) val)<block_end><return>Terminal(val)<block_end><return>val<block_end><class_stmt>Expression(object)<block_start>""" Base class for all expressions. """<def_stmt>is_int self val=<none><block_start><return><false><block_end><block_end><class_stmt>Terminal(Expression)<block_start>""" A terminal object. """<def_stmt>__init__ self val pretty=<none><block_start>""" Construct a Terminal. The val variable is usually a string or an integer. Integers may also be passed as strings. The pretty-printer will use the string when formatting the expression. """<line_sep>self.val=val<line_sep>self.pretty=pretty<block_end><def_stmt>__str__ self<block_start>""" Return the string expression of this object. """<if_stmt>self.pretty<is><none><block_start><return>str(self.val)<block_end><return>self.pretty<block_end><def_stmt>simplify self<block_start>""" Return a simplified version of this sub-expression. """<line_sep><return>self<block_end><def_stmt>is_int self val=<none><block_start>""" Return True if the value of this Terminal is an integer. """<if_stmt>type(self.val)<is>int<block_start><return>val<is><none><or>self.val<eq>val<block_end><return><false><block_end><block_end><class_stmt>FunctionCall(Expression)<block_start>""" Represent a function call """<def_stmt>__init__ self name args<block_start>""" Construct a function call object. """<line_sep>self.name=_classify(name)<line_sep>self.args=[_classify(arg)<for>arg args]<block_end><def_stmt>__str__ self<block_start>""" Return the string expression of this object. """<line_sep><return>str(self.name)+'('+', '.join([str(arg)<for>arg self.args])+')'<block_end><def_stmt>simplify self<block_start>""" Return a simplified version of this sub-expression. """<line_sep>args=[arg.simplify()<for>arg self.args]<line_sep><return>FunctionCall(self.name args)<block_end><block_end><class_stmt>Parenthesis(Expression)<block_start>""" Represent a pair of round brackets. """<def_stmt>__init__ self val<block_start>""" Construct a parenthesis object. """<line_sep>self.val=_classify(val)<block_end><def_stmt>simplify self<block_start>""" Return a simplified version of this sub-expression. """<line_sep>val=self.val.simplify()<if_stmt>type(val)<is>Terminal<block_start><return>val<block_end><return>Parenthesis(val)<block_end><def_stmt>__str__ self<block_start>""" Return the string expression of this object. """<line_sep><return>'('+str(self.val)+')'<block_end><block_end><class_stmt>Add(Expression)<block_start>""" Represent an addition of operands. """<def_stmt>__init__ self lhs rhs<block_start>""" Construct an addition object. """<line_sep>self.lhs=_classify(lhs)<line_sep>self.rhs=_classify(rhs)<block_end><def_stmt>simplify self<block_start>""" Return a simplified version of this sub-expression. """<line_sep>lhs=self.lhs.simplify()<line_sep>rhs=self.rhs.simplify()<if_stmt>lhs.is_int()<and>rhs.is_int()<block_start><return>Terminal(lhs.val+rhs.val)<block_end><if_stmt>lhs.is_int(0)<block_start><return>rhs<block_end><if_stmt>rhs.is_int(0)<block_start><return>lhs<block_end><return>Add(lhs rhs)<block_end><def_stmt>__str__ self<block_start>""" Return the string expression of this object. """<line_sep><return>str(self.lhs)+' + '+str(self.rhs)<block_end><block_end><class_stmt>Sub(Expression)<block_start>""" Represent a subtraction of operands. """<def_stmt>__init__ self lhs rhs<block_start>""" Construct subtraction object. """<line_sep>self.lhs=_classify(lhs)<line_sep>self.rhs=_classify(rhs)<block_end><def_stmt>simplify self<block_start>""" Return a simplified version of this sub-expression. """<line_sep>lhs=self.lhs.simplify()<line_sep>rhs=self.rhs.simplify()<if_stmt>lhs.is_int()<and>rhs.is_int()<block_start><return>Terminal(lhs.val-rhs.val)<block_end><if_stmt>lhs.is_int(0)<block_start><return>rhs<block_end><if_stmt>rhs.is_int(0)<block_start><return>lhs<block_end><return>Sub(lhs rhs)<block_end><def_stmt>__str__ self<block_start>""" Return the string expression of this object. """<line_sep><return>str(self.lhs)+' - '+str(self.rhs)<block_end><block_end><class_stmt>Mul(Expression)<block_start>""" Represent the multiplication of operands. """<def_stmt>__init__ self lhs rhs<block_start>""" Construct a multiplication object. """<line_sep>self.lhs=_classify(lhs)<line_sep>self.rhs=_classify(rhs)<block_end><def_stmt>simplify self<block_start>""" Return a simplified version of this sub-expression. """<line_sep>lhs=self.lhs.simplify()<line_sep>rhs=self.rhs.simplify()<if_stmt>lhs.is_int()<and>rhs.is_int()<block_start><return>Terminal(lhs.val<times>rhs.val)<block_end><if_stmt>lhs.is_int(0)<or>rhs.is_int(0)<block_start><return>Terminal(0)<block_end><if_stmt>lhs.is_int(1)<block_start><return>rhs<block_end><if_stmt>rhs.is_int(1)<block_start><return>lhs<block_end><return>Mul(lhs rhs)<block_end><def_stmt>__str__ self<block_start>""" Return the string expression of this object. """<line_sep><return>str(self.lhs)+' * '+str(self.rhs)<block_end><block_end><class_stmt>Shl(Expression)<block_start>""" Shift left operation. """<def_stmt>__init__ self lhs rhs<block_start>""" Construct a shift left object. """<line_sep>self.lhs=_classify(lhs)<line_sep>self.rhs=_classify(rhs)<block_end><def_stmt>simplify self<block_start>""" Return a simplified version of this sub-expression. """<line_sep>lhs=self.lhs.simplify()<line_sep>rhs=self.rhs.simplify()<if_stmt>lhs.is_int()<and>rhs.is_int()<block_start><return>Terminal(lhs.val<lshift>rhs.val)<block_end><if_stmt>lhs.is_int(0)<block_start><return>Terminal(0)<block_end><if_stmt>rhs.is_int(0)<block_start><return>lhs<block_end><return>Shl(lhs rhs)<block_end><def_stmt>__str__ self<block_start>""" Return the string expression of this object. """<line_sep><return>str(self.lhs)+' << '+str(self.rhs)<block_end><block_end><class_stmt>Shr(Expression)<block_start>""" Shift right operation. """<def_stmt>__init__ self lhs rhs<block_start>""" Construct a shift right object. """<line_sep>self.lhs=_classify(lhs)<line_sep>self.rhs=_classify(rhs)<block_end><def_stmt>simplify self<block_start>""" Return a simplified version of this sub-expression. """<line_sep>lhs=self.lhs.simplify()<line_sep>rhs=self.rhs.simplify()<if_stmt>lhs.is_int()<and>rhs.is_int()<block_start><return>Terminal(lhs.val<rshift>rhs.val)<block_end><if_stmt>lhs.is_int(0)<block_start><return>Terminal(0)<block_end><if_stmt>rhs.is_int(0)<block_start><return>lhs<block_end><return>Shr(lhs rhs)<block_end><def_stmt>__str__ self<block_start>""" Return the string expression of this object. """<line_sep><return>str(self.lhs)+' >> '+str(self.rhs)<block_end><block_end><class_stmt>Or(Expression)<block_start>""" Logical or operation. """<def_stmt>__init__ self lhs rhs<block_start>""" Construct a logical and object. """<line_sep>self.lhs=_classify(lhs)<line_sep>self.rhs=_classify(rhs)<block_end><def_stmt>simplify self<block_start>""" Return a simplified version of this sub-expression. """<line_sep>lhs=self.lhs.simplify()<line_sep>rhs=self.rhs.simplify()<if_stmt>lhs.is_int()<and>rhs.is_int()<block_start><return>Terminal(lhs.val|rhs.val)<block_end><if_stmt>lhs.is_int(0)<block_start><return>rhs<block_end><if_stmt>rhs.is_int(0)<block_start><return>lhs<block_end><return>Or(lhs rhs)<block_end><def_stmt>__str__ self<block_start>""" Return the string expression of this object. """<line_sep><return>str(self.lhs)+' | '+str(self.rhs)<block_end><block_end><class_stmt>And(Expression)<block_start>""" Logical and operation. """<def_stmt>__init__ self lhs rhs<block_start>""" Construct a logical and object. """<line_sep>self.lhs=_classify(lhs)<line_sep>self.rhs=_classify(rhs)<block_end><def_stmt>simplify self<block_start>""" Return a simplified version of this sub-expression. """<line_sep>lhs=self.lhs.simplify()<line_sep>rhs=self.rhs.simplify()<if_stmt>lhs.is_int()<and>rhs.is_int()<block_start><return>Terminal(lhs.val&rhs.val)<block_end><if_stmt>lhs.is_int(0)<or>rhs.is_int(0)<block_start><return>Terminal(0)<block_end><return>And(lhs rhs)<block_end><def_stmt>__str__ self<block_start>""" Return the string expression of this object. """<line_sep><return>str(self.lhs)+' & '+str(self.rhs)<block_end><block_end><class_stmt>Xor(Expression)<block_start>""" Logical xor operation. """<def_stmt>__init__ self lhs rhs<block_start>""" Construct a logical xor object. """<line_sep>self.lhs=_classify(lhs)<line_sep>self.rhs=_classify(rhs)<block_end><def_stmt>simplify self<block_start>""" Return a simplified version of this sub-expression. """<line_sep>lhs=self.lhs.simplify()<line_sep>rhs=self.rhs.simplify()<if_stmt>lhs.is_int()<and>rhs.is_int()<block_start><return>Terminal(lhs.val^rhs.val)<block_end><if_stmt>lhs.is_int(0)<block_start><return>rhs<block_end><if_stmt>rhs.is_int(0)<block_start><return>lhs<block_end><return>Xor(lhs rhs)<block_end><def_stmt>__str__ self<block_start>""" Return the string expression of this object. """<line_sep><return>str(self.lhs)+' ^ '+str(self.rhs)<block_end><block_end>
""" --- title: Hierarchical Transformers Are More Efficient Language Models summary: > This is an annotated implementation/tutorial of hourglass model in PyTorch. --- # Hierarchical Transformers Are More Efficient Language Models This is a [PyTorch](https://pytorch.org) implementation of the paper [Hierarchical Transformers Are More Efficient Language Models](https://papers.labml.ai/paper/2110.13711). This paper introduces a hierarchical transformer architecture to handle long sequences efficiently. The first half of the transformer layers down-sample tokens and the second half up-samples with direct skip connections between layers of the same resolution. This is a little similar to [U-Net](../../diffusion/ddpm/unet.html) for vision tasks. They try different up-sampling and down-sampling techniques and build a model with the best performing up and down-sampling techniques which they call the hourglass model. Here we have implemented the simplest up-sampling and down-sampling techniques for simplicity. We will consider adding more complex (and better performing) implementations later. Here is [the training code](experiment.html) for the hourglass model. [![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://app.labml.ai/run/855b82363e4911ec9ae4a5b9c69d5061) """<import_from_stmt>typing List<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>labml_helpers.module Module<import_from_stmt>labml_nn.transformers MultiHeadAttention TransformerLayer<import_from_stmt>labml_nn.transformers.feed_forward FeedForward<import_from_stmt>labml_nn.transformers.utils subsequent_mask<class_stmt>HourGlass(Module)<block_start>""" ## Hourglass model This model recursively adds layers to the middle while shortening the sequence by down-sampling. The shortened sequence processed by another hourglass model is sandwiched between two normal transformer layers. (A transformer layer has a [self-attention layer](../mha.html) and a [position-wise feed-forward layer](../feed_forward.html)). """<def_stmt>__init__ self n_heads:int d_model:int dropout:float d_ff:int shortening_factors:List[int]<block_start>""" * `n_heads` is the number of heads in [multi-head attention layers](../mha.html) * `d_model` is the size of the token embeddings * `dropout` is the dropout probability * `d_ff` is the dimensionality of the hidden layer in [position-wise feed-forward layers](../feed_forward.html) * `shortening_factors` is the list of shortening factors """<line_sep>super().__init__()<line_sep># The transformer layer before down-sampling self.pre=TransformerLayer(d_model=d_model # [Multi-head attention layer](../mha.html) self_attn=MultiHeadAttention(n_heads d_model dropout) # [Position wise feed-forward layers](.. / feed_forward.html) feed_forward=FeedForward(d_model d_ff dropout) # dropout_prob=dropout)<line_sep># Auto-regressive mask self.mask=AutoregressiveMask()<line_sep># The shortening factor $k$ (or the down-sampling rate) k=shortening_factors[0]<line_sep># We shift the tokens to the right by $k - 1$ steps to make sure # information doesn't leak from the future tokens to past tokens # as a result of down-sampling and up-sampling self.shift_right=ShiftRight(k-1)<line_sep># Shortening or the down-sampling layer. We use the simplest form - average pooling. # The paper shows that attention based down sampling works best, which we haven't implemented yet. self.shortening=AvgPoolShortening(k)<line_sep># If there are no more shortening (middle of the hourglass) <if_stmt>len(shortening_factors)<eq>1# The center layer is another transformer layer <block_start>self.shortened=TransformerLayer(d_model=d_model self_attn=MultiHeadAttention(n_heads d_model dropout) feed_forward=FeedForward(d_model d_ff dropout) dropout_prob=dropout)<line_sep># Autoregressive mask self.mask_short=AutoregressiveMask()<line_sep>self.hour_glass=<none><block_end><else_stmt># Insert another hourglass model recursively <block_start>self.hour_glass=HourGlass(n_heads d_model dropout d_ff shortening_factors[1:])<block_end># Up-sampling layer. We use naive up-sampling for simplicity and the paper shows attention based up sampling # works better. self.up_sampling=NaiveUpSampling(k)<line_sep># The final transformer layer after up-sampling self.post=TransformerLayer(d_model=d_model self_attn=MultiHeadAttention(n_heads d_model dropout) feed_forward=FeedForward(d_model d_ff dropout) dropout_prob=dropout)<block_end><def_stmt>forward self x:torch.Tensor# Initial transformer layer # $$x \leftarrow PreVanillaLayers(x)$$ <block_start>x=self.pre(x=x mask=self.mask(x))<line_sep># Shifting and shortening # $$x' \leftarrow Shortening(ShiftRight(x,k−1),k)$$ x_short=self.shortening(self.shift_right(x))<line_sep># If we are at the center of the hourglass, # $$\textbf{\small if } \text{\small E\scriptsize MPTY}(shorten\_factors) \textbf{\small then}$$ <if_stmt>self.hour_glass<is><none># Center transformer layer # $$x' \leftarrow ShortenedLayers(x')$$ <block_start>x_short=self.shortened(x=x_short mask=self.mask_short(x_short))<block_end># $$\textbf{else}$$ <else_stmt># $$x' \leftarrow \text{\small H\scriptsize OURGLASS}(x, shorten\_factors)$$ <block_start>x_short=self.hour_glass(x_short)<block_end># Up-sample the shortened sequence and add a skip connection # $$x \leftarrow x + Upsampling(x, x', k)$$ x=x+self.up_sampling(x x_short)<line_sep># Final transformer layer # $$x \leftarrow PostVanillaLayers(x)$$ x=self.post(x=x mask=self.mask(x))<line_sep># <return>x<block_end><block_end><class_stmt>ShiftRight(Module)<block_start>""" ### Shift right operation This shifts the sequence to the right by the given number of steps """<def_stmt>__init__ self shift:int<block_start>""" * `shift` is the number of steps to shift by """<line_sep>super().__init__()<line_sep># cannot be negative <assert_stmt>shift<ge>0<line_sep># self.shift=shift<block_end><def_stmt>forward self x:torch.Tensor<block_start>""" * `x` is a tensor of shape `[seq_len, ...]` """<line_sep># If the shift is $0$ return the original <if_stmt>self.shift<eq>0<block_start><return>x<block_end># Zeros to be appended to the left prefix=x.new_zeros([self.shift *x.shape[1:]])<line_sep># Concatenate the zeros and truncate the right <return>torch.cat([prefix x[:-self.shift]])<block_end><block_end><class_stmt>AvgPoolShortening(Module)<block_start>""" ### Average pool shortening This down-samples by a given factor with average pooling """<def_stmt>__init__ self k:int<block_start>""" * `k` is the shortening factor """<line_sep>super().__init__()<line_sep># Average pooling layer self.pool=nn.AvgPool1d(k ceil_mode=<true>)<block_end><def_stmt>forward self x:torch.Tensor<block_start>""" * `x` is of shape `[seq_len, batch_size, d_model]` """<line_sep># Pooling layer accepts shape `[batch_size, d_model, seq_len]` so we # permute axes. <return>self.pool(x.permute(1 2 0)).permute(2 0 1)<block_end><block_end><class_stmt>NaiveUpSampling(Module)<block_start>""" ### Naive up-sampling This up-samples by repeating """<def_stmt>__init__ self k:int<block_start>""" * `k` is the shortening factor """<line_sep>super().__init__()<line_sep>self.k=k<block_end><def_stmt>forward self x:torch.Tensor x_short:torch.Tensor<block_start>""" * `x` is the tensor with embeddings before down-sampling * `x_short` is the tensor of higher density (to be up-sampled) representations """<line_sep># Repeat across the sequence dimension expanded=torch.repeat_interleave(x_short self.k dim=0)<line_sep># Truncate the extra embeddings at the end expanded=expanded[:x.shape[0]]<line_sep># <return>expanded<block_end><block_end><class_stmt>AutoregressiveMask(Module)<block_start>""" ### Generate auto-regressive mask """<def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.mask=<none><block_end><def_stmt>forward self x:torch.Tensor# Create a mask if we haven't created or sizes have changed <block_start><if_stmt>self.mask<is><none><or>self.mask.size(0)<ne>len(x)# [Subsequent mask](../utils.html), will mask out tokens from seeing future tokens <block_start>self.mask=subsequent_mask(len(x)).to(x.device)<block_end># <return>self.mask<block_end><block_end><class_stmt>LinearPoolingShortening(Module)<block_start>""" ### 🚧 Linear pooling for down-sampling This concatenates the consecutive tokens embeddings that need to be merged and do a linear transformation to map it to the size of a single token embedding. """<def_stmt>__init__ self<block_start>super().__init__()<line_sep><raise>NotImplementedError<block_end><block_end><class_stmt>AttentionBasedShortening(Module)<block_start>""" ### 🚧 Down-sampling with attention \begin{align} x' &= S(x) + Attention \Big(Q=S(x),K = x, V =x \Big) \\ x' &= x' + FFN(x') \end{align} where $S(x)$ is average pooling or linear pooling. """<def_stmt>__init__ self<block_start>super().__init__()<line_sep><raise>NotImplementedError<block_end><block_end><class_stmt>LinearUpSampling(Module)<block_start>""" ### 🚧 Linear projection for up-sampling Make a linear projection of dense token embeddings to a size of $d_{\text{model}} k$. """<def_stmt>__init__ self<block_start>super().__init__()<line_sep><raise>NotImplementedError<block_end><block_end><class_stmt>AttentionBasedUpSampling(Module)<block_start>""" ### 🚧 Attention based up-sampling \begin{align} x &= U(x,x') + Attention \Big(Q=U(x,x'),K = x', V = x' \Big) \\ x &= x + FFN(x) \end{align} where $U(x,x') = x + LinearUpsampling(x')$ """<def_stmt>__init__ self<block_start>super().__init__()<line_sep><raise>NotImplementedError<block_end><block_end>
# FIXME: need to break down config manager testing a bit more # @pytest.mark.parametrize('pass_del_cfg', (True, False)) # def test_config_manager_init(mocker, pass_del_cfg): # """NOTE: unlike other configs this one validates itself on creation # """ # # Mocks # patch_del_cfg = mocker.patch('jobfunnel.config.manager.DelayConfig') # patch_os = mocker.patch('jobfunnel.config.manager.os') # patch_os.path.exists.return_value = False # check it makes all paths # mock_master_csv = mocker.Mock() # mock_block_list = mocker.Mock() # mock_dupe_list = mocker.Mock() # mock_cache_folder = mocker.Mock() # mock_search_cfg = mocker.Mock() # mock_proxy_cfg = mocker.Mock() # mock_del_cfg = mocker.Mock() # # FUT # cfg = JobFunnelConfigManager( # master_csv_file=mock_master_csv, # user_block_list_file=mock_block_list, # duplicates_list_file=mock_dupe_list, # cache_folder=mock_cache_folder, # search_config=mock_search_cfg, # delay_config=mock_del_cfg if pass_del_cfg else None, # proxy_config=mock_proxy_cfg, # log_file='', # TODO optional? # ) # # Assertions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Copyright 2014 California Institute of Technology. ALL RIGHTS RESERVED. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # United States Government Sponsorship acknowledged. This software is subject to # U.S. export control laws and regulations and has been classified as 'EAR99 NLR' # (No [Export] License Required except when exporting to an embargoed country, # end user, or in support of a prohibited end use). By downloading this software, # the user agrees to comply with all applicable U.S. export laws and regulations. # The user has the responsibility to obtain export licenses, or other export # authority as may be required before exporting this software to any 'EAR99' # embargoed foreign country or citizen of those countries. # # Author: <NAME> #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <import_stmt>isce<import_stmt>stdproc<import_stmt>isceobj<import_stmt>logging<import_stmt>numpy<as>np<import_from_stmt>Poly2d Polynomial<import_from_stmt>stdproc.stdproc.offsetpoly.Offsetpoly Offsetpoly<line_sep>logger=logging.getLogger('dense')<def_stmt>load_pickle step='outliers1'<block_start><import_stmt>cPickle<line_sep>insarObj=cPickle.load(open('PICKLE/{0}'.format(step) 'rb'))<line_sep><return>insarObj<block_end><def_stmt>runOffPolyISCE offField<block_start>''' Estimate polynomial here. '''<line_sep>inArr=np.array(offField.unpackOffsets())<line_sep>x=inArr[: 0]<line_sep>y=inArr[: 2]<line_sep>dx=inArr[: 1]<line_sep>dy=inArr[: 3]<line_sep>sig=inArr[: 4]<line_sep>obj=Offsetpoly()<line_sep>obj.setLocationAcross(list(x))<line_sep>obj.setLocationDown(list(y))<line_sep>obj.setSNR(list(sig))<line_sep>obj.setOffset(list(dy))<line_sep>obj.offsetpoly()<line_sep>val=obj.offsetPoly<line_sep># print('Range: ', val) azpol=Polynomial(rangeOrder=2 azimuthOrder=2)<line_sep>azpol.setCoeffs([[val[0] val[1] val[4]] [val[2] val[3]] [val[5]]])<line_sep>obj.setOffset(list(dx))<line_sep>obj.offsetpoly()<line_sep>val=obj.offsetPoly<line_sep># print('Azimuth: ', val) rgpol=Polynomial(rangeOrder=2 azimuthOrder=2)<line_sep>rgpol.setCoeffs([[val[0] val[1] val[4]] [val[2] val[3]] [val[5]]])<line_sep><return>azpol rgpol<block_end><def_stmt>runOffPoly offField<block_start>''' Estimate polynomial here. '''<line_sep>inArr=np.array(offField.unpackOffsets())<line_sep>x=inArr[: 0]<line_sep>y=inArr[: 2]<line_sep>dx=inArr[: 1]<line_sep>dy=inArr[: 3]<line_sep>sig=inArr[: 4]<line_sep>snr=1.0+1.0/sig<line_sep>xOrder=2<line_sep>yOrder=2<line_sep>#####Normalization factors ymin=np.min(y)<line_sep>ynorm=np.max(y)-ymin<if_stmt>ynorm<eq>0<block_start>ynorm=1.0<block_end>yoff=np.int(np.round(np.mean(dy)))<line_sep>y=(y-ymin)/ynorm<line_sep>xmin=np.min(x)<line_sep>xnorm=np.max(x)-xmin<if_stmt>xnorm<eq>0<block_start>xnorm=1.0<block_end>x=(x-xmin)/xnorm<line_sep>arrList=[]<for_stmt>ii range(yOrder+1)<block_start>yfact=np.power(y ii)<for_stmt>jj range(yOrder+1-ii)<block_start>temp=np.power(x jj)<times>yfact<line_sep>arrList.append(temp.reshape((temp.size 1)))<block_end><block_end>A=np.hstack(arrList)<line_sep>A=A/snr[: <none>]<line_sep>b=dy/snr<line_sep>val,res,rank,eigs=np.linalg.lstsq(A b rcond=1.0e-12)<line_sep>print('Az Chi : ' np.sqrt(res/(1.0<times>len(b))))<line_sep>azpol=Polynomial(rangeOrder=2 azimuthOrder=2)<line_sep>azpol.setCoeffs([val[0:3] val[3:5] val[5:]])<line_sep>azpol._meanRange=xmin<line_sep>azpol._normRange=xnorm<line_sep>azpol._meanAzimuth=ymin<line_sep>azpol._normAzimuth=ynorm<line_sep>b=dx/snr<line_sep>val,res,rank,eigs=np.linalg.lstsq(A b rcond=1.0e-12)<line_sep>print('Rg chi : ' np.sqrt(res/(1.0<times>len(b))))<line_sep>rgpol=Polynomial(rangeOrder=2 azimuthOrder=2)<line_sep>rgpol.setCoeffs([val[0:3] val[3:5] val[5:]])<line_sep>rgpol._meanRange=xmin<line_sep>rgpol._normRange=xnorm<line_sep>rgpol._meanAzimuth=ymin<line_sep>rgpol._normAzimuth=ynorm<line_sep><return>azpol rgpol<block_end><if_stmt>__name__<eq>'__main__'<block_start>iObj=load_pickle()<line_sep>print('Done loading pickle')<line_sep>width=iObj.getReferenceSlcImage().getWidth()<line_sep>length=iObj.getReferenceSlcImage().getLength()<line_sep>print('Image Dimensions: ' length width)<line_sep>print('Results from numpy code')<line_sep>azpol,rgpol=runOffPoly(iObj.getRefinedOffsetField())<line_sep>print('Upper Left: ' rgpol(1 0) azpol(1 0))<line_sep>print('Upper Right: ' rgpol(1 width-1) azpol(1 width-1))<line_sep>print('Lower Left: ' rgpol(length+1 0) azpol(length+1 0))<line_sep>print('Lower Right: ' rgpol(length+1 width-1) azpol(length+1 width-1))<line_sep>print('Results from old method')<line_sep>az1,rg1=runOffPolyISCE(iObj.getRefinedOffsetField())<line_sep>print('Upper Left: ' rg1(1 0) az1(1 0))<line_sep>print('Upper Right: ' rg1(1 width-1) az1(1 width-1))<line_sep>print('Lower Left: ' rg1(length+1 0) az1(length+1 0))<line_sep>print('Lower Right: ' rg1(length+1 width-1) az1(length+1 width-1))<block_end>
<import_stmt>os<import_stmt>sys<import_stmt>json<import_stmt>time<import_from_stmt>distutils.version LooseVersion<import_stmt>importlib<import_stmt>pip<import_from_stmt>enum Enum<import_stmt>logging<import_stmt>csv<import_stmt>subprocess<try_stmt><block_start>main=pip.main<block_end><except_stmt>AttributeError# module 'pip' has no attribute 'main' <block_start><import_from_stmt>pip._internal main<block_end>apikey=''<line_sep>password=''<line_sep>username=''<line_sep>bkey=''<line_sep>buy_trades={}<line_sep>sell_trades={}<line_sep>items={}<line_sep>key_price=0<line_sep>bud_price=0<line_sep>escrow=<none><line_sep>whitelist=[]<line_sep>currencies={'bud':'Earbuds' 'ref':'Refined Metal' 'rec':'Reclaimed Metal' 'scrap':'Scrap Metal' 'key':'Mann Co. Supply Crate Key'}<line_sep>packages=['steampy' 'requests']<line_sep>declined_trades=<none><line_sep>past_time=time.time()<line_sep>start_time=time.time()<line_sep>logging.basicConfig(filename='trade.log' level=logging.DEBUG format='[%(asctime)s][%(levelname)s][%(name)s]: %(message)s' datefmt='%m/%d/%Y %I:%M:%S %p')<line_sep>start_text=""" _____ _____ ____ _____ ____ _ ____ U _____ u ____ U ___ u _____ |_ " _| |" ___||___"\ |_ " _|U | _"\ u U /"\ u | _"\ \| ___"|/ U | __")u \/"_ \/|_ " _| | | U| |_ uU __) | | | \| |_) |/ \/ _ \/ /| | | | | _|" \| _ \/ | | | | | | /| |\ \| _|/ \/ __/ \ /| |\ | _ < / ___ \ U| |_| |\| |___ | |_) |.-,_| |_| | /| |\ u |_|U |_| |_____|u u |_|U |_| \_\ /_/ \_\ |____/ u|_____| |____/ \_)-\___/ u |_|U _// \\\_ )(\\\,- << // _// \\\_ // \\\_ \\\ >> |||_ << >> _|| \\\_ \\\ _// \\\_ (__) (__)(__)(_/(__)(__) (__) (__)(__) (__)(__) (__)(__)_) (__) (__) (__) (__) (__) (__) (__) Created by: Zwork101 Github: https://github.com/Zwork101 Steam: https://steamcommunity.com/id/ZWORK101 THIS VERSION IS NO LONGER UNDER DEVELOPMENT AND BUGS WILL NOT BE FIXED. IT IS HIGHLY RECOMMENDED TO SWITCH TO THE NEW VERSION. YOU CAN FIND THIS AT: https://github.com/mninc/tf2-trade-bot-2\n """<class_stmt>TradeOfferStatus(Enum)<block_start>INVALID=1<line_sep>ACTIVE=2<line_sep>ACCEPTED=3<line_sep>EXPIRED=4<line_sep>CANCELED=6<line_sep>INVALID_ITEMS=8<line_sep>WAIT_CONF=9<line_sep>WAIT_SFAC=10<line_sep>ESCROW=11<block_end><class_stmt>TradeManager<block_start>""" The manager for trades. This will be used to organize trades and keep everything from falling apart. Prams: client (steampy.client.SteamClient object) and conf (steampy.confirmation.ConfirmationExecutor) Public values: client and conf (see above) Public functions: accept, check_trades_content, get_new_trades, check_good_trades, check_bad_trades """<def_stmt>__init__ self client conf<block_start>self._trades=[]<line_sep>self._pending_trades=[]<line_sep>self._try_confs=[]<line_sep>self._declined_trades=[]<line_sep>self.client=client<line_sep>self.conf=conf<block_end><def_stmt>decline self trade<block_start><if_stmt>decline_trades<block_start>self.client.decline_trade_offer(trade.id)<block_end><if_stmt>trade.id<not><in>self._declined_trades<block_start>self._declined_trades.append(trade.id)<block_end><block_end><def_stmt>accept self trade<block_start>""" The accept function handles accepting trades. This is important, because different errors could occur. Prams: (self), trade (Trade object) Output: None """<try_stmt><block_start>self.client.accept_trade_offer(trade.id)<line_sep><return><true><block_end><except_stmt>BaseException<as>BE<block_start><if_stmt>BE.__class__<eq>KeyError<block_start>print(f'ERROR: Issue confirming trade: {trade.id}, trying again')<line_sep>#self._trades.remove(trade) self._pending_trades.append(trade)<block_end>logging.warning(f'TRADE ACCEPT ERROR: {type(BE).__name__}: {BE}')<line_sep><return><false><block_end><block_end><def_stmt>check_trades_content self<block_start>""" This will check the current trades in self._pending_trades and decide if they are correct or not Then it will move the good trades to self._declined_trades and self._trades after acccepting/declining trade offers. Prams: (self) Output: None """<for_stmt>trade range(len(self._pending_trades)-1 -1 -1)<block_start>trade=self._pending_trades[trade]<line_sep>sell_value=0<line_sep>buy_value=0<line_sep>extra_sell=[]<line_sep>extra_buy=[]<if_stmt><not>trade.items_to_give<block_start>self._pending_trades.remove(trade)<line_sep>self._trades.append(trade)<line_sep>self.accept(trade)<line_sep><continue><block_end>exit_trade=<false><for_stmt>item trade.items_to_give<block_start><if_stmt><not>exit_trade<block_start><if_stmt>item<not><in>sell_trades<block_start><if_stmt>item<in>currencies.values()<block_start>extra_sell.append(item)<block_end><else_stmt><block_start>print('[TRADE]: Unknown item we\'re giving, declining')<line_sep>self.decline(trade)<line_sep>self._pending_trades.remove(trade)<line_sep>logging.info("DECLINING TRADE WITH UN-KNOWN ITEM")<line_sep>exit_trade=<true><block_end><block_end><else_stmt><block_start>sell_value=add_values(float(sell_trades[item]) float(sell_value))<block_end><block_end><block_end><if_stmt>exit_trade<block_start><continue><block_end><for_stmt>item trade.items_to_receive<block_start><if_stmt>item<in>buy_trades<block_start>buy_value=add_values(float(buy_trades[item]) float(buy_value))<block_end><elif_stmt>item<in>currencies.values()<block_start>extra_buy.append(item)<block_end><block_end>sell_curr=sort(extra_sell)<line_sep>buy_curr=sort(extra_buy)<line_sep>sell_value<augadd>calculate(sell_curr[0] sell_curr[1] sell_curr[2] sell_curr[3] sell_curr[4])<line_sep>buy_value<augadd>calculate(buy_curr[0] buy_curr[1] buy_curr[2] buy_curr[3] buy_curr[4])<if_stmt>sell_value<le>buy_value<block_start>print(f'[TRADE]: Looks good! They gave us:\n{str(trade.items_to_receive)}')<line_sep>print(f'[TRADE]: We gave them:\n{str(trade.items_to_give)}')<line_sep>print('[TRADE]: Attempting to accept offer')<try_stmt><block_start>logging.info(f"ATTEMPTING TRADE: {trade.id}\nSELL: {sell_value} BUY:{buy_value}\n{trade.trade}")<line_sep>self._trades.append(trade)<line_sep>self._pending_trades.remove(trade)<line_sep>self.accept(trade)<block_end><except_stmt>ConfirmationExpected<block_start>logging.warning(f'FAILED TO CONFIRM TRADE: {trade.id} (FIRST TRY)')<line_sep>self._try_confs.append(trade.id)<block_end><block_end><else_stmt><block_start>print(f'[TRADE]: No good! They offered us:\n{str(trade.items_to_receive)}')<line_sep>print(f'[TRADE]: For our:\n{str(trade.items_to_give)}')<line_sep>print('[TRADE]: Declining offer')<line_sep>logging.info(f"DECLINING INVALID TRADE: {trade.id}\nSELL: {sell_value} BUY:{buy_value}\n{trade.trade}")<line_sep>self.decline(trade)<line_sep>self._pending_trades.remove(trade)<block_end><block_end><block_end><def_stmt>get_new_trades self<block_start>""" Collects new trades, will compare them to current trades to ensure they are new. Accepts if the sender is whitelisted, delcines if the user is a scammer or escrow. If not, moved it to self._pending_trades (list) Prams: (self) Output: None """<line_sep>new_trades=client.get_trade_offers()['response']<line_sep>#logging.debug(new_trades) <for_stmt>new_trade new_trades['trade_offers_received']<block_start><if_stmt>(<not>new_trade['tradeofferid']<in>[t.id<for>t self._trades])<or>(new_trade['tradeofferid']<in>self._declined_trades)<block_start>id64=76561197960265728+new_trade['accountid_other']<line_sep>trade=Trade(new_trade id64)<line_sep>logging.info(f"FOUND NEW TRADE: {trade.id}")<if_stmt>str(id64)<in>whitelist<block_start>print(f"[WHITELIST]: Neat! The user sending this trade is whitelisted! Attempting confirmation (STEAM ID:{id64})")<line_sep>logging.info(f'TRADE WHITELISTED ATTEMPTING TRADE: {trade.id}')<line_sep>self.accept(trade)<line_sep>self._trades.append(trade)<line_sep><continue><block_end>print(f'[TRADE]: Found trade (ID: {trade.id})')<if_stmt>self._check_partner(trade)<block_start><if_stmt><not>accept_escrow<and>trade.escrow<block_start>print("[TRADE]: Trade is escrow, declining")<line_sep>logging.info(f'DECLINING ESCROW TRADE: {trade.trade}')<line_sep>self.decline(trade)<block_end><else_stmt><block_start>self._pending_trades.append(trade)<block_end><block_end><block_end><block_end><block_end><def_stmt>_check_partner self trade<block_start>""" To check if the user is a scammer from backpack.tf and steamrep. This uses the backpack.tf API. The API will supply the steamrep stats for the user. If the user is a scammer, it will decline the trade and move it to self._declined_trades. Prams: (self), trade (Trade object) Output: None """<line_sep>print("[TRADE]: Checking for trade bans on backpack.tf and steamrep.com")<line_sep>rJson=requests.get(f"https://backpack.tf/api/users/info/v1?" data={'key':bkey 'steamids':trade.other_steamid}).json()<line_sep>logging.debug(str(rJson))<if_stmt>"bans"<in>rJson['users'][trade.other_steamid].keys()<block_start><if_stmt>"steamrep_caution"<in>rJson['users'][trade.other_steamid]['bans']<or>"steamrep_scammer"<in>rJson['users'][trade.other_steamid]['bans']<block_start>print("[steamrep.com]: SCAMMER")<line_sep>print('[TRADE]: Ending trade...')<line_sep>logging.info(f"DECLINED SCAMMER (ID:{trade.other_steamid})")<line_sep>self.decline(trade)<line_sep><return><false><block_end>print('[steamrep.com]: User is not banned')<if_stmt>"all"<in>rJson['users'][trade.other_steamid]['bans']<block_start>print('[backpack.tf]: SCAMMER')<line_sep>print('[TRADE]: Ending trade...')<line_sep>logging.info(f"DECLINED SCAMMER (ID:{trade.other_steamid})")<line_sep>self.decline(trade)<line_sep><return><false><block_end>print('[backpack.tf]: User is clean')<block_end>print("[backpack.tf/steamrep.com]: User is clean")<line_sep><return><true><block_end><def_stmt>check_bad_trades self<block_start>""" Looks at the current trades in self._trades and checks if the trade has become invalid (eg if the trade was cancled). It will remove it from trades and report what happened to the user Prams: (self) Output: None """<for_stmt>trade_index range(len(self._trades)-1 -1 -1)<block_start>trade=self._trades[trade_index]<line_sep>status=trade.status()<if_stmt>status<eq>TradeOfferStatus.INVALID.value<block_start>print(f'[ERROR]: Trade offer id {trade.id} seems to be invalid')<line_sep>self._trades.remove(trade)<line_sep>logging.warning(f'TRADE {trade.id} BECAME invalid')<block_end><elif_stmt>status<eq>TradeOfferStatus.CANCELED.value<block_start>print(f'[TRADE]: Trade {trade.id} was canceled.')<line_sep>self._trades.remove(trade)<line_sep>logging.warning(f'TRADE {trade.id} BECAME canceled')<block_end><elif_stmt>status<eq>TradeOfferStatus.EXPIRED.value<block_start>print(f'[TRADE]: Trade {trade.id} has expired... How did that happen?')<line_sep>self._trades.remove(trade)<line_sep>logging.warning(f'TRADE {trade.id} BECAME expired')<block_end><elif_stmt>status<eq>TradeOfferStatus.INVALID_ITEMS.value<block_start>print(f'[TRADE]: Items attempting to trade became invalid. {trade.id}')<line_sep>self._trades.remove(trade)<line_sep>logging.warning(f'TRADE {trade.id} BECAME invalid_items')<block_end><elif_stmt>status<eq>TradeOfferStatus.ESCROW.value<and><not>accept_escrow<block_start>print('[ERROR]: Whoops, escrow trade was confirmed. Sorry about that')<line_sep>self._trades.remove(trade)<line_sep>logging.fatal(f'ACCEPTED ESCROW TRADE')<block_end><block_end><block_end><def_stmt>check_good_trades self<block_start>""" This method does 2 things. The first thing it does is check to see if trades have been accepted. If they have, they will be removed from self._trades and will report that the trade was accepted. The second thing is to try and confirm trades that are having issues confirming. If it was confirmed, it will be removed from self._try_confs, and report to user it was confirmed. Prams: (self) Output: None """<for_stmt>trade_index range(len(self._trades)-1 -1 -1)<block_start>trade=self._trades[trade_index]<line_sep>status=trade.status()<if_stmt>status<eq>TradeOfferStatus.ACCEPTED.value<block_start>print(f'[TRADE]: Accepted trade {trade.id}')<line_sep>self._trades.remove(trade)<line_sep>logging.info(f'TRADE {trade.id} WAS ACCEPTED')<block_end><block_end><block_end><def_stmt>confirm_check self<block_start><if_stmt>confirm_settings<eq>'all'<block_start>logging.debug('ACCEPTING EVERYTHING')<for_stmt>confirmation self.conf._get_confirmations()<block_start>self.conf._send_confirmation(confirmation)<line_sep>logging.info(f'SENT CONFIRMATION FOR CONF WITH ID OF {confirmation.id}')<block_end><block_end><elif_stmt>confirm_settings<eq>'trade'<block_start><for_stmt>tradeid self._try_confs<block_start><try_stmt><block_start>self.conf.send_trade_allow_request(tradeid)<line_sep>print(f'[TRADE]: Accepted trade {tradeid}')<line_sep>logging.info(f'TRADE {tradeid} WAS ACCEPTED (after manual confirmation)')<line_sep>self._try_confs.remove(tradeid)<block_end><except_stmt>ConfirmationExpected<block_start>logging.debug(f'CONFIRMATION FAILED ON {tradeid}')<block_end><block_end><block_end><block_end><block_end><class_stmt>Trade<block_start>""" This is an object mainly to store data about a trade, and make it easy to access. It can also the currency in the trade and fetch the status of the trade. Prams: trade_json (dict), other_steamid (str) Public values: self.trade (dict), self.escrow (int), self.items_to_give (list), self.items_to_receive (list), self.id (int/str), self.other_steamid (str) Public functions: sort, status """<def_stmt>__init__ self trade_json:dict other_steamid:int<block_start>self.trade=trade_json<line_sep>self.escrow=int(trade_json['escrow_end_date'])<line_sep>self.items_to_give=self._items_to_give()<line_sep>self.items_to_receive=self._items_to_receive()<line_sep>self.id=trade_json["tradeofferid"]<line_sep>self.other_steamid=str(other_steamid)<block_end><def_stmt>_items_to_receive self<block_start>""" Adds all items to self.items_to_receive as their market name. Should only be used in initialization. Prams: (self) Output: item_names (list) """<line_sep>item_names=[]<for_stmt>assetID self.trade['items_to_receive']<block_start>item_names.append(self.trade['items_to_receive'][assetID]['market_name'])<block_end><return>item_names<block_end><def_stmt>_items_to_give self<block_start>""" Adds all items to self.items_to_give as their market name. Should only be used in initialization. Prams: (self) Output: item_names (list) """<line_sep>item_names=[]<for_stmt>assetID self.trade['items_to_give']<block_start>item_names.append(self.trade['items_to_give'][assetID]['market_name'])<block_end><return>item_names<block_end><def_stmt>sort self typ<block_start>""" Counts the amount of a currency type there is in one side of the trade. "sort" is sort of misleading (see what I did there), it just counts how many scraps, recs, ref, keys and buds there are. Prams: (self), type (str) Output: curr (list) """<if_stmt>typ<eq>'sell'<block_start><return>sort(self.items_to_receive)<block_end><else_stmt><block_start><return>sort(self.items_to_give)<block_end><block_end><def_stmt>status self<block_start>""" Fetches the status of the trade from steam. This way we can get live data. Prams: (self) Output: trade_json['trade_offer_state'] (int/str) """<try_stmt><block_start>trade_json=client.get_trade_offer(self.id)['response']['offer']<block_end><except_stmt>KeyError#If key error, the trade doesn't exist anymore. If so, it's invalid <block_start>trade_json={'trade_offer_state':1}<block_end><return>trade_json['trade_offer_state']<block_end><block_end><def_stmt>add_values v1 v2<block_start>v1_rem,v2_rem=int(str(v1).split('.')[1]) int(str(v2).split('.')[1])<line_sep>ref=int(v1)+int(v2)<line_sep>v1_rec,v2_rec=v1_rem<floordiv>33 v2_rem<floordiv>33<line_sep>v1_rem,v2_rem=v1_rem-v1_rec<times>33 v2_rem-v2_rec<times>33<line_sep>srp_added=v1_rem+v2_rem<line_sep>v1_rec<augadd>srp_added<floordiv>33<line_sep>srp_added<augsub>(srp_added<floordiv>33)<times>33<line_sep>rec_added=v1_rec+v2_rec<line_sep>ref<augadd>rec_added<floordiv>3<line_sep>rec_added<augsub>(rec_added<floordiv>3)<times>3<line_sep><return>float(str(ref)+'.'+str(rec_added<times>33+srp_added))<block_end><def_stmt>sort items:list<block_start>curr=[0 0 0 0 0]<for_stmt>item items<block_start><if_stmt>item<eq>currencies['scrap']<block_start>curr[0]<augadd>1<block_end><elif_stmt>item<eq>currencies['rec']<block_start>curr[1]<augadd>1<block_end><elif_stmt>item<eq>currencies['ref']<block_start>curr[2]<augadd>1<block_end><elif_stmt>item<eq>currencies['key']<block_start>curr[3]<augadd>1<block_end><elif_stmt>item<eq>currencies['bud']<block_start>curr[4]<augadd>1<block_end><block_end><return>curr<block_end><def_stmt>check_for_updates <block_start><with_stmt>open('__version__' 'r')<as>file<block_start>curr_version=file.read()<block_end>r=requests.get('https://raw.githubusercontent.com/Zwork101/tf2-trade-bot/master/__version__')<line_sep>new_version=r.text<if_stmt>LooseVersion(new_version)<g>LooseVersion(curr_version)<block_start>print('[PROGRAM]: A new version is available, would you like to install?')<line_sep>yn=input('[y/n]: ')<if_stmt>yn[0].lower()<eq>'y'<block_start>print('[Installer]: Starting installation...' end='')<line_sep>bot_update=requests.get('https://raw.githubusercontent.com/Zwork101/tf2-trade-bot/master/bot.py')<with_stmt>open('__version__' 'w')<as>file<block_start>file.write(new_version)<line_sep>print('.' end='')<block_end><with_stmt>open('bot.py' 'w')<as>file<block_start>file.write(bot_update.text)<line_sep>print('.')<block_end>print('Update complete! Restart now.')<line_sep>input('press enter to close program...\n')<line_sep>os._exit(0)<block_end><block_end><block_end><def_stmt>calculate scrapC recC refC keyC budC#For each currency, add it using add_values function <block_start>total_value=0.0<for_stmt>scrap range(scrapC)<block_start>total_value=add_values(total_value .11)<block_end><for_stmt>rec range(recC)<block_start>total_value=add_values(total_value .33)<block_end><for_stmt>ref range(refC)<block_start>total_value=add_values(total_value 1.0)<block_end><for_stmt>key range(keyC)<block_start>total_value=add_values(total_value float(key_price))<block_end><for_stmt>bud range(budC)<block_start>total_value=add_values(total_value float(bud_price))<block_end><return>total_value<block_end><def_stmt>check_install pkg c imp=''<block_start><try_stmt><block_start>importlib.import_module(pkg)<line_sep>print(f'[PROGRAM]: Required package is installed {c}/{len(packages)}')<line_sep>logging.debug(f"MODULE {pkg} IS INSTALLED")<block_end><except_stmt><block_start>logging.info(f"MODULE {pkg} IS NOT INSTALLED")<if_stmt>imp<block_start>pkg=imp<block_end>print('[PROGRAM]: A required package is not installed, installing...')<line_sep>main(['install' pkg])<line_sep>print('[PROGRAM]: Installed package! Please restart this program to continue.')<line_sep>input('press enter to close program...\n')<line_sep>os._exit(0)<block_end><block_end># def check_trade(trade_obj, items_value, typ): # curr = trade_obj.sort(typ) # value = calculate(curr[0], curr[1], curr[2], curr[3], curr[4]) # if typ == 'sell': # b_curr = trade_obj.sort('buy') # items_value += calculate(b_curr[0], b_curr[1], b_curr[2], b_curr[3], b_curr[4]) # else: # s_curr = trade_obj.sort('sell') # items_value += calculate(s_curr[0], s_curr[1], s_curr[2], s_curr[3], s_curr[4]) # # logging.debug(f"TRADE {trade_obj.id} is a {typ} trade, and is worth {value}, with items being {items_value}") # if typ == 'sell': # if value >= items_value: # return True # else: # return False # else: # if value <= items_value: # return True # else: # return False <def_stmt>heartbeat <block_start><global>past_time<line_sep>print(f"[HEARTBEAT]: ~{90-int(time.time()-past_time)} seconds until next heartbeat")<if_stmt>int(time.time()-past_time)<ge>90<block_start>p=requests.post(f"https://backpack.tf/api/aux/heartbeat/v1?" data={"token":token "automatic":"all"})<if_stmt>p.status_code<ne>200<block_start>print(f'[HEARTBEAT]: Error when sending heartbeat: {p.json()["message"]}')<line_sep>logging.warning(f"ERROR SENDING HEARTBEAT: {p.json()['message']}")<block_end><else_stmt><block_start>print("[HEARTBEAT]: Sent heartbeat to backpack.tf")<line_sep>logging.info("HEARTBEAT SENT")<line_sep>past_time=time.time()<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>print(start_text)<for_stmt>pkg packages<block_start>check_install(pkg packages.index(pkg)+1 ''<if>pkg<ne>'backpackpy'<else>'backpack.py')<block_end><import_from_stmt>steampy.client SteamClient<import_from_stmt>steampy confirmation<import_from_stmt>steampy.exceptions InvalidCredentials ConfirmationExpected<line_sep>#from backpackpy import listings <import_stmt>requests<line_sep>check_for_updates()<try_stmt><block_start><with_stmt>open('settings.json' 'r')<as>cfg<block_start><try_stmt><block_start>data=json.load(cfg)<try_stmt><block_start>apikey,password,username,bkey,accept_escrow=data['apikey'] data['password'] data['username'] data['bkey'] data['accept_escrow']<line_sep>token=requests.get(f"https://backpack.tf/api/aux/token/v1?key={bkey}").json()['token']<line_sep>decline_trades=data.get('decline_trades' 1)<line_sep>confirm_settings=data.get('confirm_options' 'trades')<block_end><except_stmt>KeyError<as>k<block_start>logging.warning(f'SETTINGS FILE MISSING {k} VALUE')<line_sep>print(f'[settings.json]: Whoops! You are missing the {k} value')<line_sep>input('Press enter to close program...\n')<line_sep>os._exit(1)<block_end><block_end><except_stmt>json.JSONDecodeError<block_start>logging.warning('INVALID SETTINGS FILE')<line_sep>print('[PROGRAM]: Whoops! It would seem that you settings.json file is invalid!')<line_sep>input('press enter to close program...\n')<line_sep>os._exit(1)<block_end><block_end>logging.debug("LOADED SETTINGS")<block_end><except_stmt>FileNotFoundError<block_start>logging.warning("SETTINGS NOT FOUND, CREATING")<line_sep>print('[PROGRAM]: File settings.json not found! Would you like to make one?')<line_sep>yn=input('[y/n]: ')<if_stmt>yn[0].lower()<eq>'y'<block_start>apikey=input('[settings.json]: Enter your steam API key. (https://steamcommunity.com/dev/apikey)\n')<line_sep>password=input('[settings.json]: Enter your password. \n')<line_sep>username=input('[settings.json]: Enter your username. \n')<line_sep>bkey=input('[settings.json]: Enter your backpack.tf API key. (https://backpack.tf/api/register)\n')<line_sep>accept_escrow=input('[settings.json]: Accept escrow trades? (0 for no, 1 for yes)\n')<line_sep>print('[PROGRAM]: Writing data to file...')<with_stmt>open('settings.json' 'w')<as>file<block_start>json.dump({'apikey':apikey 'password':password 'username':username 'bkey':bkey "accept_escrow":accept_escrow} file)<block_end>print('[PROGRAM]: Wrote to file')<block_end><else_stmt><block_start>print("[PROGRAM]: Can't run without user information.")<line_sep>input('Press enter to close program...\n')<line_sep>os._exit(1)<block_end><block_end>client=SteamClient(apikey)<line_sep>conf=<none><line_sep>print('[PROGRAM]: Obtaining bud and key values from backpack.tf...')<line_sep>rJson=requests.get(f'https://backpack.tf/api/IGetCurrencies/v1?key={bkey}').json()['response']<line_sep>logging.debug(f"KEY VALUE RESPONSE: {rJson}")<if_stmt>rJson['success']<block_start>key_price=rJson['currencies']['keys']['price']['value']<line_sep>bud_price=rJson['currencies']['earbuds']['price']['value']<line_sep>print(f'[PROGRAM]: Obtained values! KEY <{key_price} ref>, BUD <{bud_price} keys>.')<line_sep>logging.debug("OBTAINED KEY AND BUD VALUES")<block_end><else_stmt><block_start>logging.fatal("FAILED TO OBTAIN KEY AND BUG VALUES")<line_sep>print(f'[backpack.tf]: {rJson["message"]}')<line_sep>input('Press enter to close program...\n')<line_sep>os._exit(1)<block_end><try_stmt><block_start>client.login(username password '<PASSWORD>')<block_end><except_stmt>json.decoder.JSONDecodeError<block_start>logging.warning("STEAMGUARD FILE INVALID")<line_sep>print('[steamguard.json]: Unable to read file.')<line_sep>input('Press enter to close program...\n')<line_sep>os._exit(1)<block_end><except_stmt>FileNotFoundError<block_start>logging.warning("UNABLE TO FIND STEAMGAURD FILE")<line_sep>print('[steamguard.json]: Unable to find file.')<line_sep>input('Press enter to close program...\n')<line_sep>os._exit(1)<block_end><except_stmt>InvalidCredentials<block_start>logging.info("CREDENTIALS INVALID")<line_sep>print('[PROGRAM]: Your username, password, ID and/or secrets are invalid.')<line_sep>input('Press enter to close program...\n')<line_sep>os._exit(1)<block_end><else_stmt><block_start>conf=confirmation.ConfirmationExecutor(client.steam_guard['identity_secret'] client.steam_guard['steamid'] client._session)<line_sep>logging.info("CREATED CLIENT AND CONFIRMATION MANAGER")<block_end>print(f'[PROGRAM]: Connected to steam! Logged in as {username}')<try_stmt><block_start><with_stmt>open('trade.csv' 'r')<as>file<block_start>reader=csv.DictReader(file)<line_sep>count=1<line_sep>fails=[]<for_stmt>row reader<block_start>count<augadd>1<try_stmt><block_start><if_stmt>row['type'].strip()[0].lower()<eq>'s'<block_start>p=row['price'].split('.')<line_sep>p=[int(i)<for>i p]<line_sep>price=calculate(p[0] p[1] p[2] p[3] p[4])<line_sep>sell_trades[row['item_name'].strip().replace("$$" ",")]=price<block_end><elif_stmt>row['type'].strip()[0].lower()<eq>'b'<block_start>p=row['price'].split('.')<line_sep>p=[int(i)<for>i p]<line_sep>price=calculate(p[0] p[1] p[2] p[3] p[4])<line_sep>buy_trades[row['item_name'].strip().replace("$$" ",")]=price<block_end><block_end><except_stmt>AttributeError<block_start>fails.append(count)<block_end><block_end>logging.info(f'LOADED TRADE DATA: BUY: {buy_trades} SELL: {sell_trades}')<block_end><block_end><except_stmt>FileNotFoundError<block_start>logging.warning("TRADE FILE NOT FOUND")<line_sep>print('[trade.data]: Unable to find file.')<line_sep>input('Press enter to close program...\n')<line_sep>os._exit(1)<block_end>print(f'[CSV]: Failed to load these lines: {fails}')<line_sep>print('[PROGRAM]: Finished loading trading data.')<line_sep># yn = input("Would you like to sync to backpack.tf listings?\n[y/n]: ") # if yn[0].lower() == 'y': # steamid = client.steam_guard['steamid'] # steam_inv = requests.get(f'http://steamcommunity.com/inventory/{steamid}/440/2?l=english&count=5000').json() # bp_listings = requests.get("https://backpack.tf/api/classifieds/listings/v1?", data={'token':token}).json() # class_id = False # for classified in bp_listings["listings"]: # asset_id = classified['id'] # for item in steam_inv['assets']: # if item['assetid'] == classified['id']: # class_id = item['classid'] # if class_id: # for item in steam_inv['descriptions']: # if item['classid'] == class_id: # market_name = item['market_name'] # market_type = classified['intent'] # ref, keys = classified['currencies']['metal'], classified['currencies']['keys'] # sep = str(ref).split('.') # if len(sep) == 2: # price = calculate(int(sep[0])/11, 0, int(sep[0]), keys, 0) # else: # price = calculate(0, 0, int(ref), keys, 0) # if market_type: # sell_trades[market_name] = price # else: # buy_trades[market_name] = price # print(buy_trades) # print(sell_trades) # os._exit(0) <try_stmt><block_start><with_stmt>open('whitelist.data' 'r')<as>file<block_start>steam_ids=file.read()<if_stmt>steam_ids<block_start><for_stmt>steam_id steam_ids.split(',')<block_start>whitelist.append(steam_id)<block_end><block_end><block_end>print(f'[WHITELIST]: Whitelist created with the following ids: {whitelist}')<line_sep>logging.info(f"LOADED WHITELIST: {whitelist}")<block_end><except_stmt>FileNotFoundError<block_start>logging.debug("WHITELIST NOT FOUND")<block_end>print('[PROGRAM]: Everything ready, starting trading.')<line_sep>print('[PROGRAM]: Press Ctrl+C to close at any time.')<line_sep>manager=TradeManager(client conf)<while_stmt><true><block_start><if_stmt>time.time()-start_time<ge>3600<block_start><pass><line_sep>#subprocess.call(["python", os.path.join(sys.path[0], __file__)] + sys.argv[1:]) <block_end><try_stmt><block_start>heartbeat()<try_stmt><block_start>manager.get_new_trades()<line_sep>print('[TRADE-MANAGER] STEP 1 (get new trades) COMPLETE')<line_sep>logging.debug("(STEP 1 COMPLETE)")<block_end><except_stmt>json.decoder.JSONDecodeError<block_start>print("[PROGRAM]: Unexpected error, taking a break (10 seconds).")<line_sep>time.sleep(10)<line_sep>print('Starting again...')<line_sep><continue><block_end>manager.check_trades_content()<line_sep>print('[TRADE-MANAGER]: STEP 2 (check new trades) COMPLETE')<line_sep>logging.debug("(STEP 2 COMPLETE)")<line_sep>manager.check_bad_trades()<line_sep>print('[TRADE-MANAGER]: STEP 3 (check for trades gone bad) COMPLETE')<line_sep>logging.debug("(STEP 3 COMPLETE)")<line_sep>manager.check_good_trades()<line_sep>print('[TRADE-MANAGER]: STEP 4 (check for successful trades) COMPLETE')<line_sep>logging.debug("(STEP 4 COMPLETE)")<line_sep>manager.confirm_check()<line_sep>print('[TRADE-MANAGER]: STEP 5 (check confirmations) COMPLETE')<line_sep>logging.debug("(STEP 5 COMPLETE)")<line_sep>print('[PROGRAM]: Cooling down... (10)')<block_end><except_stmt>InterruptedError<block_start>os._exit(0)<block_end><except_stmt>BaseException<as>BE<block_start>print(f'[ERROR]: {type(BE).__name__}: {BE}')<line_sep>logging.warning(f"UNEXPECTED ERROR: {type(BE).__name__}: {BE}")<block_end>time.sleep(10)<block_end><block_end>
""" Example of defining a custom (image) transform using FFCV. For tutorial, see https://docs.ffcv.io/ffcv_examples/custom_transforms.html. """<import_stmt>time<import_stmt>numpy<as>np<import_stmt>torchvision<import_from_stmt>ffcv.fields IntField RGBImageField<import_from_stmt>ffcv.fields.decoders SimpleRGBImageDecoder<import_from_stmt>ffcv.loader Loader OrderOption<import_from_stmt>ffcv.pipeline.compiler Compiler<import_from_stmt>ffcv.pipeline.operation Operation AllocationQuery<import_from_stmt>ffcv.transforms ToTensor<import_from_stmt>ffcv.writer DatasetWriter<import_from_stmt>dataclasses replace<class_stmt>PickACorner(Operation)<block_start><def_stmt>generate_code self<block_start>parallel_range=Compiler.get_iterator()<def_stmt>pick_a_corner images dst<block_start>which_corner=np.random.rand(images.shape[0])<for_stmt>i parallel_range(images.shape[0])<block_start><if_stmt>which_corner[i]<eq>0<block_start>dst[i]=images[i :images.shape[1]<floordiv>2 :images.shape[2]<floordiv>2]<block_end><else_stmt><block_start>dst[i]=images[i -images.shape[1]<floordiv>2: -images.shape[2]<floordiv>2:]<block_end><block_end><return>dst<block_end>pick_a_corner.is_parallel=<true><line_sep><return>pick_a_corner<block_end><def_stmt>declare_state_and_memory self previous_state<block_start>h,w,c=previous_state.shape<line_sep>new_shape=(h<floordiv>2 w<floordiv>2 c)<line_sep>new_state=replace(previous_state shape=new_shape)<line_sep>mem_allocation=AllocationQuery(new_shape previous_state.dtype)<line_sep><return>(new_state mem_allocation)<block_end><block_end># Step 1: Create an FFCV-compatible CIFAR-10 dataset ds=torchvision.datasets.CIFAR10('/tmp' train=<true> download=<true>)<line_sep>writer=DatasetWriter('/tmp/cifar.beton' {'image':RGBImageField() 'label':IntField()})<line_sep>writer.from_indexed_dataset(ds)<line_sep># Step 2: Create data loaders BATCH_SIZE=512<line_sep># Create loaders image_pipelines={'with':[SimpleRGBImageDecoder() PickACorner() ToTensor()] 'without':[SimpleRGBImageDecoder() ToTensor()]}<for_stmt>name,pipeline image_pipelines.items()<block_start>loader=Loader(f'/tmp/cifar.beton' batch_size=BATCH_SIZE num_workers=16 order=OrderOption.RANDOM drop_last=<true> pipelines={'image':pipeline})<line_sep># First epoch includes compilation time <for_stmt>ims,labs loader<block_start><pass><block_end>start_time=time.time()<for_stmt>_ range(100)<block_start><for_stmt>ims,labs loader<block_start><pass><block_end><block_end>print(f'Method: {name} | Shape: {ims.shape} | Time per epoch: {(time.time()-start_time)/100:.5f}s')<block_end>
<import_stmt>unittest.mock<import_from_stmt>programy.clients.config ClientConfigurationData<import_from_stmt>programy.clients.events.client EventBotClient<import_from_stmt>programytest.clients.arguments MockArgumentParser<class_stmt>MockEventBotClient(EventBotClient)<block_start><def_stmt>__init__ self id argument_parser=<none><block_start>EventBotClient.__init__(self id argument_parser)<block_end><def_stmt>get_client_configuration self<block_start><return>ClientConfigurationData("events")<block_end><def_stmt>load_license_keys self<block_start><pass><block_end><block_end><class_stmt>MockRunningEventBotClient(EventBotClient)<block_start><def_stmt>__init__ self id argument_parser=<none><block_start>EventBotClient.__init__(self id argument_parser)<line_sep>self.prior=<false><line_sep>self.ran=<false><line_sep>self.post=<false><block_end><def_stmt>get_client_configuration self<block_start><return>ClientConfigurationData("events")<block_end><def_stmt>load_license_keys self<block_start><pass><block_end><def_stmt>prior_to_run_loop self<block_start>self.prior=<true><block_end><def_stmt>wait_and_answer self<block_start>self.ran=<true><block_end><def_stmt>post_run_loop self<block_start>self.post=<true><block_end><block_end><class_stmt>EventBotClientTests(unittest.TestCase)<block_start><def_stmt>test_init_raw self<block_start>arguments=MockArgumentParser()<with_stmt>self.assertRaises(NotImplementedError)<block_start>client=EventBotClient("testevents" arguments)<block_end><block_end><def_stmt>test_init_actual self<block_start>arguments=MockArgumentParser()<line_sep>client=MockEventBotClient("testevents" arguments)<line_sep>self.assertIsNotNone(client)<with_stmt>self.assertRaises(NotImplementedError)<block_start>client.wait_and_answer()<block_end><block_end><def_stmt>test_init_running self<block_start>arguments=MockArgumentParser()<line_sep>client=MockRunningEventBotClient("testevents" arguments)<line_sep>self.assertIsNotNone(client)<line_sep>client.run()<line_sep>self.assertTrue(client.prior)<line_sep>self.assertTrue(client.ran)<line_sep>self.assertTrue(client.post)<block_end><block_end>
<import_stmt>torch<import_from_stmt>torch nn<as>nn<import_from_stmt>basicsr.archs.arch_util ResidualBlockNoBN Upsample make_layer<import_from_stmt>basicsr.utils.registry ARCH_REGISTRY<line_sep>@ARCH_REGISTRY.register()<class_stmt>EDSR(nn.Module)<block_start>"""EDSR network structure. Paper: Enhanced Deep Residual Networks for Single Image Super-Resolution. Ref git repo: https://github.com/thstkdgus35/EDSR-PyTorch Args: num_in_ch (int): Channel number of inputs. num_out_ch (int): Channel number of outputs. num_feat (int): Channel number of intermediate features. Default: 64. num_block (int): Block number in the trunk network. Default: 16. upscale (int): Upsampling factor. Support 2^n and 3. Default: 4. res_scale (float): Used to scale the residual in residual block. Default: 1. img_range (float): Image range. Default: 255. rgb_mean (tuple[float]): Image mean in RGB orders. Default: (0.4488, 0.4371, 0.4040), calculated from DIV2K dataset. """<def_stmt>__init__ self num_in_ch num_out_ch num_feat=64 num_block=16 upscale=4 res_scale=1 img_range=255. rgb_mean=(0.4488 0.4371 0.4040)<block_start>super(EDSR self).__init__()<line_sep>self.img_range=img_range<line_sep>self.mean=torch.Tensor(rgb_mean).view(1 3 1 1)<line_sep>self.conv_first=nn.Conv2d(num_in_ch num_feat 3 1 1)<line_sep>self.body=make_layer(ResidualBlockNoBN num_block num_feat=num_feat res_scale=res_scale pytorch_init=<true>)<line_sep>self.conv_after_body=nn.Conv2d(num_feat num_feat 3 1 1)<line_sep>self.upsample=Upsample(upscale num_feat)<line_sep>self.conv_last=nn.Conv2d(num_feat num_out_ch 3 1 1)<block_end><def_stmt>forward self x<block_start>self.mean=self.mean.type_as(x)<line_sep>x=(x-self.mean)<times>self.img_range<line_sep>x=self.conv_first(x)<line_sep>res=self.conv_after_body(self.body(x))<line_sep>res<augadd>x<line_sep>x=self.conv_last(self.upsample(res))<line_sep>x=x/self.img_range+self.mean<line_sep><return>x<block_end><block_end>
# encoding: utf-8 <import_stmt>json<import_stmt>yaml<import_stmt>click<import_stmt>googleanalytics<as>ga<import_from_stmt>googleanalytics utils<import_from_stmt>.common cli<line_sep># TODO: the blueprint stuff can probably be simplified so that # it's little more than just a call to ga.describe <def_stmt>from_blueprint scope src<block_start>description=yaml.load(src)<line_sep>blueprint=ga.Blueprint(description)<line_sep>credentials={}<line_sep>credentials.update(blueprint.identity<or>{})<line_sep>credentials.update(blueprint.scope)<line_sep>profile=ga.authenticate(interactive=<true> save=<true> **credentials)<line_sep><return>blueprint.queries(profile)<block_end># TODO: add any query generation improvements not associated with # string parsing back into blueprint generation and query.refine # so they apply across the board <def_stmt>from_args scope metrics start stop days limit dimensions filter segment **description# LIMIT can be a plain limit or start and length <block_start><if_stmt>limit<block_start>limit=list(map(int limit.split(',')))<block_end>description.update({'range':{'start':start 'stop':stop 'days':days } 'metrics':utils.cut(metrics ',') 'limit':limit })<if_stmt>dimensions<block_start>description['dimensions']=utils.cut(dimensions ',')<block_end>query=ga.query.describe(scope description)<for_stmt>f filter<block_start>query=ga.query.refine(query {'filter':dict(utils.cut(f '=' ','))})<block_end><for_stmt>s segment<block_start>query=ga.query.refine(query {'segment':dict(utils.cut(s '=' ','))})<block_end><return>[query]<block_end># TODO: maybe include an --interactive option, which defers # to `shell` but with a prefilled query? @cli.command()@click.argument('metrics')@click.option('--dimensions')@click.option('--start' help='Start date in ISO format, e.g. 2016-01-01.')@click.option('--stop')@click.option('--days' help='Days to count forward from start date, counts backwards when negative.' default=0 type=int)@click.option('--limit' help='Return only the first <n> or <start>,<n> results.')@click.option('--sort' help='Sort by a metric; prefix with - to sort from high to low.')@click.option('--debug' is_flag=<true>)@click.option('--filter' multiple=<true>)@click.option('--segment' multiple=<true>)@click.option('--precision' type=click.IntRange(0 2) default=1 help='Increase or decrease query precision.')@click.option('-i' '--interval' type=click.Choice(['hour' 'day' 'week' 'month' 'year' 'total']) default='total' help='Return hourly, daily etc. numbers.')@click.option('-o' '--output' type=click.Choice(['csv' 'json' 'ascii']) default='ascii' help='Output format; human-readable ascii table by default.')@click.option('--with-metadata' is_flag=<true>)@click.option('-b' '--blueprint' type=click.File('r'))@click.option('--realtime' is_flag=<true> help='Use the RealTime API instead of the Core API.')@click.pass_obj<def_stmt>query scope blueprint debug output with_metadata realtime **description<block_start>""" e.g. googleanalytics --identity debrouwere --account debrouwere --webproperty http://debrouwere.org \ query pageviews \ --start yesterday --limit -10 --sort -pageviews \ --dimensions pagepath \ --debug """<if_stmt>realtime<block_start>description['type']='realtime'<block_end><if_stmt>blueprint<block_start>queries=from_blueprint(scope blueprint)<block_end><else_stmt><block_start><if_stmt><not>isinstance(scope ga.account.Profile)<block_start><raise>ValueError("Account and webproperty needed for query.")<block_end>queries=from_args(scope **description)<block_end><for_stmt>query queries<block_start><if_stmt>debug<block_start>click.echo(query.build())<block_end>report=query.serialize(format=output with_metadata=with_metadata)<line_sep>click.echo(report)<block_end><block_end>
# !/usr/bin/env python # -*- coding: utf-8 -*- """ Defines the unit tests for the :mod:`colour.appearance.hunt` module. """<import_stmt>numpy<as>np<import_from_stmt>itertools permutations<import_from_stmt>colour.appearance VIEWING_CONDITIONS_HUNT InductionFactors_Hunt XYZ_to_Hunt <import_from_stmt>colour.appearance.tests.common AbstractColourAppearanceModelTest<import_from_stmt>colour.utilities as_float_array domain_range_scale ignore_numpy_errors tstack <line_sep>__author__='Colour Developers'<line_sep>__copyright__='Copyright (C) 2013-2021 - Colour Developers'<line_sep>__license__='New BSD License - https://opensource.org/licenses/BSD-3-Clause'<line_sep>__maintainer__='Colour Developers'<line_sep>__email__='<EMAIL>'<line_sep>__status__='Production'<line_sep>__all__=['TestHuntColourAppearanceModel']<class_stmt>TestHuntColourAppearanceModel(AbstractColourAppearanceModelTest)<block_start>""" Defines :mod:`colour.appearance.hunt` module unit tests methods for *Hunt* colour appearance model. """<line_sep>FIXTURE_BASENAME='hunt.csv'<line_sep>OUTPUT_ATTRIBUTES={'J':'J' 'C_94':'C' 'h_S':'h' 's':'s' 'Q':'Q' 'M94':'M'}<def_stmt>output_specification_from_data self data<block_start>""" Returns the *Hunt* colour appearance model output specification from given data. Parameters ---------- data : list Fixture data. Returns ------- CAM_Specification_Hunt Hunt colour appearance model specification. """<line_sep>XYZ=tstack([data['X'] data['Y'] data['Z']])<line_sep>XYZ_w=tstack([data['X_w'] data['Y_w'] data['Z_w']])<line_sep>XYZ_b=tstack([data['X_w'] 0.2<times>data['Y_w'] data['Z_w']])<line_sep>specification=XYZ_to_Hunt(XYZ XYZ_w XYZ_b data['L_A'] InductionFactors_Hunt(data['N_c'] data['N_b']) CCT_w=data['T'])<line_sep><return>specification<block_end><def_stmt>test_domain_range_scale_XYZ_to_Hunt self<block_start>""" Tests :func:`colour.appearance.hunt.XYZ_to_Hunt` definition domain and range scale support. """<line_sep>XYZ=np.array([19.01 20.00 21.78])<line_sep>XYZ_w=np.array([95.05 100.00 108.88])<line_sep>XYZ_b=np.array([95.05 100.00 108.88])<line_sep>L_A=318.31<line_sep>surround=VIEWING_CONDITIONS_HUNT['Normal Scenes']<line_sep>CCT_w=6504.0<line_sep>specification=XYZ_to_Hunt(XYZ XYZ_w XYZ_b L_A surround CCT_w=CCT_w)<line_sep>d_r=(('reference' 1 1) (1 0.01 np.array([1 1 1/360 1 1 1 np.nan np.nan])) (100 1 np.array([1 1 100/360 1 1 1 np.nan np.nan])) )<for_stmt>scale,factor_a,factor_b d_r<block_start><with_stmt>domain_range_scale(scale)<block_start>np.testing.assert_almost_equal(XYZ_to_Hunt(XYZ<times>factor_a XYZ_w<times>factor_a XYZ_b<times>factor_a L_A surround CCT_w=CCT_w) as_float_array(specification)<times>factor_b decimal=7)<block_end><block_end><block_end>@ignore_numpy_errors<def_stmt>test_raise_exception_CIECAM02_to_XYZ self<block_start>""" Tests :func:`colour.appearance.hunt.XYZ_to_Hunt` definition raised exception. """<line_sep>XYZ=np.array([19.01 20.00 21.78])<line_sep>XYZ_w=np.array([95.05 100.00 108.88])<line_sep>XYZ_b=np.array([95.05 100.00 108.88])<line_sep>L_A=318.31<line_sep>surround=VIEWING_CONDITIONS_HUNT['Normal Scenes']<line_sep>CCT_w=6504.0<line_sep>S=S_w=0.5<try_stmt><block_start>XYZ_to_Hunt(XYZ XYZ_w XYZ_b L_A surround)<block_end><except_stmt>ValueError<block_start><pass><block_end><try_stmt><block_start>XYZ_to_Hunt(XYZ XYZ_w XYZ_b L_A surround CCT_w=CCT_w S=S)<block_end><except_stmt>ValueError<block_start><pass><block_end><try_stmt><block_start>XYZ_to_Hunt(XYZ XYZ_w XYZ_b L_A surround CCT_w=CCT_w S_w=S_w)<block_end><except_stmt>ValueError<block_start><pass><block_end><block_end>@ignore_numpy_errors<def_stmt>test_XYZ_p_CIECAM02_to_XYZ self<block_start>""" Tests :func:`colour.appearance.hunt.XYZ_to_Hunt` definition *XYZ_p* argument handling. """<line_sep>XYZ=np.array([19.01 20.00 21.78])<line_sep>XYZ_w=np.array([95.05 100.00 108.88])<line_sep>XYZ_b=XYZ_p=np.array([95.05 100.00 108.88])<line_sep>L_A=318.31<line_sep>surround=VIEWING_CONDITIONS_HUNT['Normal Scenes']<line_sep>CCT_w=6504.0<line_sep>np.testing.assert_almost_equal(XYZ_to_Hunt(XYZ XYZ_w XYZ_b L_A surround XYZ_p=XYZ_p CCT_w=CCT_w ) np.array([30.046267861960700 0.121050839936350 269.273759446144600 0.019909320692942 22.209765491265024 0.123896438259997 np.nan np.nan]) decimal=7)<block_end>@ignore_numpy_errors<def_stmt>test_nan_XYZ_to_Hunt self<block_start>""" Tests :func:`colour.appearance.hunt.XYZ_to_Hunt` definition nan support. """<line_sep>cases=[-1.0 0.0 1.0 -np.inf np.inf np.nan]<line_sep>cases=set(permutations(cases<times>3 r=3))<for_stmt>case cases<block_start>XYZ=np.array(case)<line_sep>XYZ_w=np.array(case)<line_sep>XYZ_b=np.array(case)<line_sep>L_A=case[0]<line_sep>surround=InductionFactors_Hunt(case[0] case[0])<line_sep>CCT_w=case[0]<line_sep>XYZ_to_Hunt(XYZ XYZ_w XYZ_b L_A surround CCT_w=CCT_w)<block_end><block_end><block_end>
#------------------------------------------------------------------------------ # # Copyright (c) Microsoft Corporation. # All rights reserved. # # This code is licensed under the MIT License. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files(the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and / or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions : # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # #------------------------------------------------------------------------------ <import_stmt>sys<import_stmt>requests<import_stmt>httpretty<import_stmt>json<import_from_stmt>datetime datetime<try_stmt><block_start><import_stmt>unittest2<as>unittest<block_end><except_stmt>ImportError<block_start><import_stmt>unittest<block_end><try_stmt><block_start><import_from_stmt>unittest mock<block_end><except_stmt>ImportError<block_start><import_stmt>mock<block_end><import_stmt>adal<import_from_stmt>adal.authority Authority<import_from_stmt>adal self_signed_jwt<import_from_stmt>adal.self_signed_jwt SelfSignedJwt<import_from_stmt>adal.authentication_context AuthenticationContext<import_from_stmt>tests util<import_from_stmt>tests.util parameters<as>cp<class_stmt>TestSelfSignedJwt(unittest.TestCase)<block_start>testNowDate=cp['nowDate']<line_sep>testJwtId=cp['jwtId']<line_sep>expectedJwtWithThumbprint=cp['expectedJwtWithThumbprint']<line_sep>expectedJwtWithPublicCert=cp['expectedJwtWithPublicCert']<line_sep>unexpectedJwt='unexpectedJwt'<line_sep>testAuthority=Authority('https://login.microsoftonline.com/naturalcauses.com' <false>)<line_sep>testClientId='d6835713-b745-48d1-bb62-7a8248477d35'<line_sep>testCert=cp['cert']<line_sep>testPublicCert=cp['publicCert']<def_stmt>_create_jwt self cert thumbprint public_certificate=<none> encodeError=<none><block_start>ssjwt=SelfSignedJwt(cp['callContext'] self.testAuthority self.testClientId)<line_sep>self_signed_jwt._get_date_now=mock.MagicMock(return_value=self.testNowDate)<line_sep>self_signed_jwt._get_new_jwt_id=mock.MagicMock(return_value=self.testJwtId)<if_stmt>encodeError<block_start>self_signed_jwt._encode_jwt=mock.MagicMock(return_value=self.unexpectedJwt)<block_end><else_stmt><block_start>expected=self.expectedJwtWithPublicCert<if>public_certificate<else>self.expectedJwtWithThumbprint<line_sep>self_signed_jwt._encode_jwt=mock.MagicMock(return_value=expected)<block_end>jwt=ssjwt.create(cert thumbprint public_certificate=public_certificate)<line_sep><return>jwt<block_end><def_stmt>_create_jwt_and_match_expected_err self testCert thumbprint encodeError=<none><block_start><with_stmt>self.assertRaises(Exception)<block_start>self._create_jwt(testCert thumbprint encodeError=encodeError)<block_end><block_end><def_stmt>_create_jwt_and_match_expected_jwt self cert thumbprint<block_start>jwt=self._create_jwt(cert thumbprint)<line_sep>self.assertTrue(jwt 'No JWT generated')<line_sep>self.assertTrue(jwt<eq>self.expectedJwtWithThumbprint 'Generated JWT does not match expected:{}'.format(jwt))<block_end><def_stmt>test_jwt_hash_with_public_cert self<block_start>jwt=self._create_jwt(self.testCert cp['certHash'] public_certificate=self.testPublicCert)<line_sep>self.assertTrue(jwt<eq>self.expectedJwtWithPublicCert 'Generated JWT does not match expected:{}'.format(jwt))<block_end><def_stmt>test_create_jwt_hash_colons self<block_start>self._create_jwt_and_match_expected_jwt(self.testCert cp['certHash'])<block_end><def_stmt>test_create_jwt_hash_spaces self<block_start>thumbprint=cp['certHash'].replace(':' ' ')<line_sep>self._create_jwt_and_match_expected_jwt(self.testCert thumbprint)<block_end><def_stmt>test_create_jwt_hash_straight_hex self<block_start>thumbprint=cp['certHash'].replace(':' '')<line_sep>self._create_jwt_and_match_expected_jwt(self.testCert thumbprint)<block_end><def_stmt>test_create_jwt_invalid_cert self<block_start>self._create_jwt_and_match_expected_err('foobar' cp['certHash'] encodeError=<true>)<block_end><def_stmt>test_create_jwt_invalid_thumbprint_1 self<block_start>self._create_jwt_and_match_expected_err(self.testCert 'zzzz')<block_end><def_stmt>test_create_jwt_invalid_thumbprint_wrong_size self<block_start>thumbprint='C1:5D:EA:86:56:AD:DF:67:BE:80:31:D8:5E:BD:DC:5A:D6:C4:36:E7:AA'<line_sep>self._create_jwt_and_match_expected_err(self.testCert thumbprint)<block_end><def_stmt>test_create_jwt_invalid_thumbprint_invalid_char self<block_start>thumbprint='C1:5D:EA:86:56:AD:DF:67:BE:80:31:D8:5E:BD:DC:5A:D6:C4:36:Ez'<line_sep>self._create_jwt_and_match_expected_err(self.testCert thumbprint)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>