content stringlengths 0 1.55M |
|---|
"""
Convert between text notebook metadata and jupyter cell metadata.
Standard cell metadata are documented here:
See also https://ipython.org/ipython-doc/3/notebook/nbformat.html#cell-metadata
"""<import_stmt>ast<import_stmt>re<import_from_stmt>json dumps loads<try_stmt><block_start><import_from_stmt>json JSONDecodeError<block_end><except_stmt>ImportError<block_start>JSONDecodeError=ValueError<block_end><import_from_stmt>.languages _JUPYTER_LANGUAGES<line_sep># Map R Markdown's "echo", "results" and "include" to "hide_input" and "hide_output", that are understood by the
# `runtools` extension for Jupyter notebook, and by nbconvert (use the `hide_input_output.tpl` template).
# See http://jupyter-contrib-nbextensions.readthedocs.io/en/latest/nbextensions/runtools/readme.html
_RMARKDOWN_TO_RUNTOOLS_OPTION_MAP=[(("include" "FALSE") [("hide_input" <true>) ("hide_output" <true>)]) (("echo" "FALSE") [("hide_input" <true>)]) (("results" "'hide'") [("hide_output" <true>)]) (("results" '"hide"') [("hide_output" <true>)]) ]<line_sep># Alternatively, Jupytext can also map the Jupyter Book options to R Markdown
_RMARKDOWN_TO_JUPYTER_BOOK_MAP=[(("include" "FALSE") "remove_cell") (("echo" "FALSE") "remove_input") (("results" "'hide'") "remove_output") (("results" '"hide"') "remove_output") ]<line_sep>_JUPYTEXT_CELL_METADATA=[# Pre-jupytext metadata
"skipline" "noskipline" # Jupytext metadata
"cell_marker" "lines_to_next_cell" "lines_to_end_of_cell_marker" ]<line_sep>_IGNORE_CELL_METADATA=",".join("-{}".format(name)<for>name [# Frequent cell metadata that should not enter the text representation
# (these metadata are preserved in the paired Jupyter notebook).
"autoscroll" "collapsed" "scrolled" "trusted" "execution" "ExecuteTime" ]+_JUPYTEXT_CELL_METADATA)<line_sep>_IDENTIFIER_RE=re.compile(r"^[a-zA-Z_\.]+[a-zA-Z0-9_\.]*$")<class_stmt>RLogicalValueError(Exception)<block_start>"""Incorrect value for R boolean"""<block_end><class_stmt>RMarkdownOptionParsingError(Exception)<block_start>"""Error when parsing Rmd cell options"""<block_end><def_stmt>_py_logical_values rbool<block_start><if_stmt>rbool<in>["TRUE" "T"]<block_start><return><true><block_end><if_stmt>rbool<in>["FALSE" "F"]<block_start><return><false><block_end><raise>RLogicalValueError<block_end><def_stmt>metadata_to_rmd_options language metadata use_runtools=<false><block_start>"""Convert language and metadata information to their rmd representation"""<line_sep>options=(language<or>"R").lower()<if_stmt>"name"<in>metadata<block_start>options<augadd>" "+metadata["name"]+","<del_stmt>metadata["name"]<block_end><if_stmt>use_runtools<block_start><for_stmt>rmd_option,jupyter_options _RMARKDOWN_TO_RUNTOOLS_OPTION_MAP<block_start><if_stmt>all([metadata.get(opt_name)<eq>opt_value<for>opt_name,opt_value jupyter_options])<block_start>options<augadd>" {}={},".format(rmd_option[0] "FALSE"<if>rmd_option[1]<is><false><else>rmd_option[1])<for_stmt>opt_name,_ jupyter_options<block_start>metadata.pop(opt_name)<block_end><block_end><block_end><block_end><else_stmt><block_start><for_stmt>rmd_option,tag _RMARKDOWN_TO_JUPYTER_BOOK_MAP<block_start><if_stmt>tag<in>metadata.get("tags" [])<block_start>options<augadd>" {}={},".format(rmd_option[0] "FALSE"<if>rmd_option[1]<is><false><else>rmd_option[1])<line_sep>metadata["tags"]=[i<for>i metadata["tags"]<if>i<ne>tag]<if_stmt><not>metadata["tags"]<block_start>metadata.pop("tags")<block_end><block_end><block_end><block_end><for_stmt>opt_name metadata<block_start>opt_value=metadata[opt_name]<line_sep>opt_name=opt_name.strip()<if_stmt>opt_name<eq>"active"<block_start>options<augadd>' {}="{}",'.format(opt_name str(opt_value))<block_end><elif_stmt>isinstance(opt_value bool)<block_start>options<augadd>" {}={},".format(opt_name "TRUE"<if>opt_value<else>"FALSE")<block_end><elif_stmt>isinstance(opt_value list)<block_start>options<augadd>" {}={},".format(opt_name "c({})".format(", ".join(['"{}"'.format(str(v))<for>v opt_value])) )<block_end><elif_stmt>isinstance(opt_value str)<block_start><if_stmt>opt_value.startswith("#R_CODE#")<block_start>options<augadd>" {}={},".format(opt_name opt_value[8:])<block_end><elif_stmt>'"'<not><in>opt_value<block_start>options<augadd>' {}="{}",'.format(opt_name opt_value)<block_end><else_stmt><block_start>options<augadd>" {}='{}',".format(opt_name opt_value)<block_end><block_end><else_stmt><block_start>options<augadd>" {}={},".format(opt_name str(opt_value))<block_end><block_end><if_stmt><not>language<block_start>options=options[2:]<block_end><return>options.strip(",").strip()<block_end><def_stmt>update_metadata_from_rmd_options name value metadata use_runtools=<false><block_start>"""Map the R Markdown cell visibility options to the Jupyter ones"""<if_stmt>use_runtools<block_start><for_stmt>rmd_option,jupyter_options _RMARKDOWN_TO_RUNTOOLS_OPTION_MAP<block_start><if_stmt>name<eq>rmd_option[0]<and>value<eq>rmd_option[1]<block_start><for_stmt>opt_name,opt_value jupyter_options<block_start>metadata[opt_name]=opt_value<block_end><return><true><block_end><block_end><block_end><else_stmt><block_start><for_stmt>rmd_option,tag _RMARKDOWN_TO_JUPYTER_BOOK_MAP<block_start><if_stmt>name<eq>rmd_option[0]<and>value<eq>rmd_option[1]<block_start>metadata.setdefault("tags" []).append(tag)<line_sep><return><true><block_end><block_end><block_end><return><false><block_end><class_stmt>ParsingContext<block_start>"""
Class for determining where to split rmd options
"""<line_sep>parenthesis_count=0<line_sep>curly_bracket_count=0<line_sep>square_bracket_count=0<line_sep>in_single_quote=<false><line_sep>in_double_quote=<false><def_stmt>__init__ self line<block_start>self.line=line<block_end><def_stmt>in_global_expression self<block_start>"""Currently inside an expression"""<line_sep><return>(self.parenthesis_count<eq>0<and>self.curly_bracket_count<eq>0<and>self.square_bracket_count<eq>0<and><not>self.in_single_quote<and><not>self.in_double_quote)<block_end><def_stmt>count_special_chars self char prev_char<block_start>"""Update parenthesis counters"""<if_stmt>char<eq>"("<block_start>self.parenthesis_count<augadd>1<block_end><elif_stmt>char<eq>")"<block_start>self.parenthesis_count<augsub>1<if_stmt>self.parenthesis_count<l>0<block_start><raise>RMarkdownOptionParsingError('Option line "{}" has too many '<concat>"closing parentheses".format(self.line))<block_end><block_end><elif_stmt>char<eq>"{"<block_start>self.curly_bracket_count<augadd>1<block_end><elif_stmt>char<eq>"}"<block_start>self.curly_bracket_count<augsub>1<if_stmt>self.curly_bracket_count<l>0<block_start><raise>RMarkdownOptionParsingError('Option line "{}" has too many '<concat>"closing curly brackets".format(self.line))<block_end><block_end><elif_stmt>char<eq>"["<block_start>self.square_bracket_count<augadd>1<block_end><elif_stmt>char<eq>"]"<block_start>self.square_bracket_count<augsub>1<if_stmt>self.square_bracket_count<l>0<block_start><raise>RMarkdownOptionParsingError('Option line "{}" has too many '<concat>"closing square brackets".format(self.line))<block_end><block_end><elif_stmt>char<eq>"'"<and>prev_char<ne>"\\"<block_start>self.in_single_quote=<not>self.in_single_quote<block_end><elif_stmt>char<eq>'"'<and>prev_char<ne>"\\"<block_start>self.in_double_quote=<not>self.in_double_quote<block_end><block_end><block_end><def_stmt>parse_rmd_options line<block_start>"""
Given a R markdown option line, returns a list of pairs name,value
:param line:
:return:
"""<line_sep>parsing_context=ParsingContext(line)<line_sep>result=[]<line_sep>prev_char=""<line_sep>name=""<line_sep>value=""<for_stmt>char ","+line+","<block_start><if_stmt>parsing_context.in_global_expression()<block_start><if_stmt>char<eq>","<block_start><if_stmt>name<ne>""<or>value<ne>""<block_start><if_stmt>result<and>name<eq>""<block_start><raise>RMarkdownOptionParsingError('Option line "{}" has no name for '<concat>"option value {}".format(line value))<block_end>result.append((name.strip() value.strip()))<line_sep>name=""<line_sep>value=""<block_end><block_end><elif_stmt>char<eq>"="<block_start><if_stmt>name<eq>""<block_start>name=value<line_sep>value=""<block_end><else_stmt><block_start>value<augadd>char<block_end><block_end><else_stmt><block_start>parsing_context.count_special_chars(char prev_char)<line_sep>value<augadd>char<block_end><block_end><else_stmt><block_start>parsing_context.count_special_chars(char prev_char)<line_sep>value<augadd>char<block_end>prev_char=char<block_end><if_stmt><not>parsing_context.in_global_expression()<block_start><raise>RMarkdownOptionParsingError('Option line "{}" is not properly terminated'.format(line))<block_end><return>result<block_end><def_stmt>rmd_options_to_metadata options use_runtools=<false><block_start>"""Parse rmd options and return a metadata dictionary"""<line_sep>options=re.split(r"\s|," options 1)<if_stmt>len(options)<eq>1<block_start>language=options[0]<line_sep>chunk_options=[]<block_end><else_stmt><block_start>language,others=options<line_sep>language=language.rstrip(" ,")<line_sep>others=others.lstrip(" ,")<line_sep>chunk_options=parse_rmd_options(others)<block_end>language="R"<if>language<eq>"r"<else>language<line_sep>metadata={}<for_stmt>i,opt enumerate(chunk_options)<block_start>name,value=opt<if_stmt>i<eq>0<and>name<eq>""<block_start>metadata["name"]=value<line_sep><continue><block_end><if_stmt>update_metadata_from_rmd_options(name value metadata use_runtools=use_runtools)<block_start><continue><block_end><try_stmt><block_start>metadata[name]=_py_logical_values(value)<line_sep><continue><block_end><except_stmt>RLogicalValueError<block_start>metadata[name]=value<block_end><block_end><for_stmt>name metadata<block_start>try_eval_metadata(metadata name)<block_end><if_stmt>"eval"<in>metadata<and><not>is_active(".Rmd" metadata)<block_start><del_stmt>metadata["eval"]<block_end><return>metadata.get("language")<or>language metadata<block_end><def_stmt>try_eval_metadata metadata name<block_start>"""Evaluate the metadata to a python object, if possible"""<line_sep>value=metadata[name]<if_stmt><not>isinstance(value str)<block_start><return><block_end><if_stmt>(value.startswith('"')<and>value.endswith('"'))<or>(value.startswith("'")<and>value.endswith("'"))<block_start>metadata[name]=value[1:-1]<line_sep><return><block_end><if_stmt>value.startswith("c(")<and>value.endswith(")")<block_start>value="["+value[2:-1]+"]"<block_end><elif_stmt>value.startswith("list(")<and>value.endswith(")")<block_start>value="["+value[5:-1]+"]"<block_end><try_stmt><block_start>metadata[name]=ast.literal_eval(value)<block_end><except_stmt>(SyntaxError ValueError)<block_start><if_stmt>name<ne>"name"<block_start>metadata[name]="#R_CODE#"+value<block_end><return><block_end><block_end><def_stmt>is_active ext metadata default=<true><block_start>"""Is the cell active for the given file extension?"""<if_stmt>metadata.get("run_control" {}).get("frozen")<is><true><block_start><return>ext<eq>".ipynb"<block_end><for_stmt>tag metadata.get("tags" [])<block_start><if_stmt>tag.startswith("active-")<block_start><return>ext.replace("." "")<in>tag.split("-")<block_end><block_end><if_stmt>"active"<not><in>metadata<block_start><return>default<block_end><return>ext.replace("." "")<in>re.split(r"\.|," metadata["active"])<block_end><def_stmt>metadata_to_double_percent_options metadata plain_json<block_start>"""Metadata to double percent lines"""<line_sep>text=[]<if_stmt>"title"<in>metadata<block_start>text.append(metadata.pop("title"))<block_end><if_stmt>"cell_depth"<in>metadata<block_start>text.insert(0 "%"<times>metadata.pop("cell_depth"))<block_end><if_stmt>"cell_type"<in>metadata<block_start>text.append("[{}]".format(metadata.pop("region_name" metadata.pop("cell_type"))))<block_end><return>metadata_to_text(" ".join(text) metadata plain_json=plain_json)<block_end><def_stmt>incorrectly_encoded_metadata text<block_start>"""Encode a text that Jupytext cannot parse as a cell metadata"""<line_sep><return>{"incorrectly_encoded_metadata":text}<block_end><def_stmt>isidentifier text<block_start>"""Can this text be a proper key?"""<line_sep><return>_IDENTIFIER_RE.match(text)<block_end><def_stmt>is_jupyter_language language<block_start>"""Is this a jupyter language?"""<for_stmt>lang _JUPYTER_LANGUAGES<block_start><if_stmt>language.lower()<eq>lang.lower()<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>parse_key_equal_value text<block_start>"""Parse a string of the form 'key1=value1 key2=value2'"""<line_sep># Empty metadata?
text=text.strip()<if_stmt><not>text<block_start><return>{}<block_end>last_space_pos=text.rfind(" ")<line_sep># Just an identifier?
<if_stmt><not>text.startswith("--")<and>isidentifier(text[last_space_pos+1:])<block_start>key=text[last_space_pos+1:]<line_sep>value=<none><line_sep>result={key:value}<if_stmt>last_space_pos<g>0<block_start>result.update(parse_key_equal_value(text[:last_space_pos]))<block_end><return>result<block_end># Iterate on the '=' signs, starting from the right
equal_sign_pos=<none><while_stmt><true><block_start>equal_sign_pos=text.rfind("=" <none> equal_sign_pos)<if_stmt>equal_sign_pos<l>0<block_start><return>incorrectly_encoded_metadata(text)<block_end># Do we have an identifier on the left of the equal sign?
prev_whitespace=text[:equal_sign_pos].rstrip().rfind(" ")<line_sep>key=text[prev_whitespace+1:equal_sign_pos].strip()<if_stmt><not>isidentifier(key.replace("." ""))<block_start><continue><block_end><try_stmt><block_start>value=relax_json_loads(text[equal_sign_pos+1:])<block_end><except_stmt>(ValueError SyntaxError)# try with a longer expression
<block_start><continue><block_end># Combine with remaining metadata
metadata=(parse_key_equal_value(text[:prev_whitespace])<if>prev_whitespace<g>0<else>{})<line_sep># Append our value
metadata[key]=value<line_sep># And return
<return>metadata<block_end><block_end><def_stmt>relax_json_loads text catch=<false><block_start>"""Parse a JSON string or similar"""<line_sep>text=text.strip()<try_stmt><block_start><return>loads(text)<block_end><except_stmt>JSONDecodeError<block_start><pass><block_end><if_stmt><not>catch<block_start><return>ast.literal_eval(text)<block_end><try_stmt><block_start><return>ast.literal_eval(text)<block_end><except_stmt>(ValueError SyntaxError)<block_start><pass><block_end><return>incorrectly_encoded_metadata(text)<block_end><def_stmt>is_json_metadata text<block_start>"""Is this a JSON metadata?"""<line_sep>first_curly_bracket=text.find("{")<if_stmt>first_curly_bracket<l>0<block_start><return><false><block_end>first_equal_sign=text.find("=")<if_stmt>first_equal_sign<l>0<block_start><return><true><block_end><return>first_curly_bracket<l>first_equal_sign<block_end><def_stmt>text_to_metadata text allow_title=<false><block_start>"""Parse the language/cell title and associated metadata"""<line_sep># Parse the language or cell title = everything before the last blank space before { or =
text=text.strip()<line_sep>first_curly_bracket=text.find("{")<line_sep>first_equal_sign=text.find("=")<if_stmt>first_curly_bracket<l>0<or>(0<le>first_equal_sign<l>first_curly_bracket)# this is a key=value metadata line
# case one = the options may be preceded with a language
<block_start><if_stmt><not>allow_title<block_start><if_stmt>is_jupyter_language(text)<block_start><return>text {}<block_end><if_stmt>" "<not><in>text<block_start><return>"" parse_key_equal_value(text)<block_end>language,options=text.split(" " 1)<if_stmt>is_jupyter_language(language)<block_start><return>language parse_key_equal_value(options)<block_end><return>"" parse_key_equal_value(text)<block_end># case two = a title may be before the options
# we split the title into words
<if_stmt>first_equal_sign<ge>0<block_start>words=text[:first_equal_sign].split(" ")<line_sep># last word is the key before the equal sign!
<while_stmt>words<and><not>words[-1]<block_start>words.pop()<block_end><if_stmt>words<block_start>words.pop()<block_end><block_end><else_stmt><block_start>words=text.split(" ")<block_end># and we remove words on the right that are attributes (they start with '.')
<while_stmt>words<and>(<not>words[-1].strip()<or>words[-1].startswith("."))<block_start>words.pop()<block_end>title=" ".join(words)<line_sep><return>title parse_key_equal_value(text[len(title):])<block_end># json metadata line
<return>(text[:first_curly_bracket].strip() relax_json_loads(text[first_curly_bracket:] catch=<true>) )<block_end><def_stmt>metadata_to_text language_or_title metadata=<none> plain_json=<false><block_start>"""Write the cell metadata in the format key=value"""<line_sep># Was metadata the first argument?
<if_stmt>metadata<is><none><block_start>metadata,language_or_title=language_or_title metadata<block_end>metadata={key:metadata[key]<for>key metadata<if>key<not><in>_JUPYTEXT_CELL_METADATA}<line_sep>text=[language_or_title]<if>language_or_title<else>[]<if_stmt>language_or_title<is><none><block_start><if_stmt>("title"<in>metadata<and>"{"<not><in>metadata["title"]<and>"="<not><in>metadata["title"])<block_start>text.append(metadata.pop("title"))<block_end><block_end><if_stmt>plain_json<block_start><if_stmt>metadata<block_start>text.append(dumps(metadata))<block_end><block_end><else_stmt><block_start><for_stmt>key metadata<block_start><if_stmt>key<eq>"incorrectly_encoded_metadata"<block_start>text.append(metadata[key])<block_end><elif_stmt>metadata[key]<is><none><block_start>text.append(key)<block_end><else_stmt><block_start>text.append("{}={}".format(key dumps(metadata[key])))<block_end><block_end><block_end><return>" ".join(text)<block_end> |
<import_stmt>numpy<as>np<class_stmt>Renderer<block_start><def_stmt>__init__ self height width config<block_start>self.height=height<line_sep>self.width=width<line_sep>self.content=<none><line_sep>self.zbuffer=<none><line_sep>self.m=<none><line_sep>self.f=1.0<line_sep>self.resize(height width)<line_sep>self.colors=config.colors<line_sep>self.bonds=config.bonds<line_sep>self.btoggle=len(self.bonds)<g>0<line_sep>self.pos,self.sym=np.array(config.coordinates) config.symbols<line_sep>self.ztoggle=<true><line_sep>self.zoom=1.0<line_sep>self.rot=np.identity(3)<line_sep>self.rotcounter=[0 0 0]<line_sep>self.draw_scene()<block_end><def_stmt>draw_scene self<block_start>"""
A super simple rasterizer. For now, just draw single character atom symbols at their rounded x and y
positions.
:return: True if nothing bad happened.
"""<line_sep>mx,my=self.m<line_sep>rot=np.matmul(self.pos self.rot)<line_sep>self.clear()<line_sep># Draw bonds
<for_stmt>bond self.bonds<block_start>i,j=bond<line_sep># if bond is (i, j) with i == j, just draw the label (no bonds)
<if_stmt>i<eq>j<block_start>x,y,z=rot[i]<line_sep>xp,yp=round(float(x)<times>self.f<times>self.zoom+mx) round(float(y)<times>self.zoom+my)<if_stmt>1<l>xp<l>self.width-2<and>1<l>yp<l>self.height-3<and>float(z)<l>self.zbuffer[yp][xp]<block_start>self.zbuffer[yp][xp]=float(z)<line_sep>self.content[yp][xp]=self.sym[i][0].upper()+","+self.colors[self.sym[i].upper()]<block_end><block_end># else draw the bond with the labels at the end points
<else_stmt># Draw the two labels at the end points
<block_start>xa,ya,za=rot[i]<line_sep>xa=float(xa)<times>self.f<times>self.zoom+mx<line_sep>ya=float(ya)<times>self.zoom+my<line_sep>xb,yb,zb=rot[j]<line_sep>xb=float(xb)<times>self.f<times>self.zoom+mx<line_sep>yb=float(yb)<times>self.zoom+my<line_sep>xap,yap=round(xa) round(ya)<line_sep>xbp,ybp=round(xb) round(yb)<if_stmt>1<l>xap<l>self.width-2<and>1<l>yap<l>self.height-3<and>float(za)<l>self.zbuffer[yap][xap]<block_start>self.zbuffer[yap][xap]=float(za)<line_sep>self.content[yap][xap]=self.sym[i][0].upper()+","+self.colors[self.sym[i].upper()]<block_end><if_stmt>1<l>xbp<l>self.width-2<and>1<l>ybp<l>self.height-3<and>float(zb)<l>self.zbuffer[ybp][xbp]<block_start>self.zbuffer[ybp][xbp]=float(zb)<line_sep>self.content[ybp][xbp]=self.sym[j][0].upper()+","+self.colors[self.sym[j].upper()]<block_end><if_stmt><not>self.btoggle<block_start><continue><block_end># Then start at xap+1 and go to xbp-1, drawing line segments
sy=-1<if>ya<g>yb<else>1<line_sep>sx=-1<if>xa<g>xb<else>1<line_sep>sz=-1<if>za<g>zb<else>1<line_sep>dx=float((xb-xa)/(yb-ya))<if>abs(yb-ya)<g>0<else>0<line_sep>dy=float((yb-ya)/(xb-xa))<if>abs(xb-xa)<g>0<else>0<line_sep>dz=float((zb-za)/(xb-xa))<if>abs(xb-xa)<g>0<else>0<if_stmt>abs(dy)<le>1<block_start><for_stmt>k range(1 abs(xap-xbp))<block_start>xk=xap+sx<times>k<line_sep>yk=round(float(ya)+sx<times>k<times>dy)<line_sep>zk=round((float(za)+sz<times>k<times>dz))<if_stmt>1<l>xk<l>self.width-2<and>1<l>yk<l>self.height-3<and>float(zk)<l>self.zbuffer[yk][xk]<block_start>col=self.colors[self.sym[i].upper()]<if>k<l>abs(xap-xbp)/2<else>self.colors[self.sym[j].upper()]<line_sep>self.zbuffer[yk][xk]=float(zk)<line_sep>self.content[yk][xk]="·,%s"%col<block_end><block_end><block_end><else_stmt><block_start><for_stmt>k range(1 abs(yap-ybp))<block_start>xk=round((float(xa)+sy<times>k<times>dx))<line_sep>yk=yap+sy<times>k<line_sep>zk=round((float(za)+sz<times>k<times>dz))<if_stmt>1<l>xk<l>self.width-2<and>1<l>yk<l>self.height-3<and>float(zk)<l>self.zbuffer[yk][xk]<block_start>col=self.colors[self.sym[i].upper()]<if>k<l>abs(yap-ybp)/2<else>self.colors[self.sym[j].upper()]<line_sep>self.zbuffer[yk][xk]=float(zk)<line_sep>self.content[yk][xk]="·,%s"%col<block_end><block_end><block_end><block_end><block_end><return><true><block_end><def_stmt>rotate self direction<block_start>"""
Set an internal rotation matrix that is applied to the coordinates before every render.
:param direction: 1 and -1 are x and -x, 2 is either z/y, depending on whether the ztoggle is active or not
"""<if_stmt>direction<eq>1<block_start>self.rot=np.matmul(self.rot [[1.0 0.0 0.0] [0.0 0.9962 -0.0872] [0.0 0.0872 0.9962]])<if_stmt>self.rotcounter[0]+5<g>360<block_start>self.rotcounter[0]=0<block_end>self.rotcounter[0]<augadd>5<block_end><elif_stmt>direction<eq>-1<block_start>self.rot=np.matmul(self.rot [[1.0 0.0 0.0] [0.0 0.9962 0.0872] [0.0 -0.0872 0.9962]])<if_stmt>self.rotcounter[0]-5<l>0<block_start>self.rotcounter[0]=360<block_end>self.rotcounter[0]<augsub>5<block_end><elif_stmt>direction<eq>2<and>self.ztoggle<block_start>self.rot=np.matmul(self.rot [[0.9962 -0.0872 0.0] [0.0872 0.9962 0.0] [0.0 0.0 1.0]])<if_stmt>self.rotcounter[2]+5<g>360<block_start>self.rotcounter[2]=0<block_end><else_stmt><block_start>self.rotcounter[2]<augadd>5<block_end><block_end><elif_stmt>direction<eq>-2<and>self.ztoggle<block_start>self.rot=np.matmul(self.rot [[0.9962 0.0872 0.0] [-0.0872 0.9962 0.0] [0.0 0.0 1.0]])<if_stmt>self.rotcounter[2]-5<l>0<block_start>self.rotcounter[2]=360<block_end><else_stmt><block_start>self.rotcounter[2]<augsub>5<block_end><block_end><elif_stmt>direction<eq>2<block_start>self.rot=np.matmul(self.rot [[0.9962 0.0 0.0872] [0.0 1.0 0.0] [-0.0872 0.0 0.9962]])<if_stmt>self.rotcounter[1]+5<g>360<block_start>self.rotcounter[1]=0<block_end><else_stmt><block_start>self.rotcounter[1]<augadd>5<block_end><block_end><elif_stmt>direction<eq>-2<block_start>self.rot=np.matmul(self.rot [[0.9962 0.0 -0.0872] [0.0 1.0 0.0] [0.0872 0.0 0.9962]])<if_stmt>self.rotcounter[1]-5<l>0<block_start>self.rotcounter[1]=360<block_end><else_stmt><block_start>self.rotcounter[1]<augsub>5<block_end><block_end><block_end><def_stmt>reset_view self<block_start>"""
Reset the view to the starting values.
"""<line_sep>self.zoom=1.0<line_sep>self.rotcounter=[0 0 0]<line_sep>self.rot=np.identity(3)<line_sep>self.m=round(self.width/2) round(self.height/2)<block_end><def_stmt>resize self height width<block_start>"""
Resize the screen. Known issue: crashes if the resize is faster than the framerate.
"""<line_sep>self.height=height<line_sep>self.width=width<line_sep>self.content=[[" ,0"]<times>self.width<for>n range(self.height-2)]<line_sep>self.zbuffer=[[10000.0]<times>self.width<for>n range(self.height-2)]<line_sep>self.m=round(self.width/2) round(self.height/2)<line_sep># Since terminal characters are higher than wide, I correct for this by multiplying the x by f
# so that it appears wider. 2.25 is what looks good on my terminals, but might be
# nice to have a general way of determining the optimal value
self.f=2<block_end><def_stmt>clear self<block_start>"""
Clear the canvas and redraw the border.
"""<for_stmt>i range(self.height-2)<block_start><for_stmt>j range(self.width)<block_start>self.zbuffer[i][j]=10000.0<block_end><block_end><for_stmt>i range(self.height-2)<block_start><for_stmt>j range(self.width)<block_start><if_stmt>i<eq>0<and>j<eq>0<block_start>self.content[i][j]="┌,0"<block_end><elif_stmt>(i<eq>0<or>i<eq>self.height-3)<and>0<l>j<l>self.width-1<block_start>self.content[i][j]="─,0"<block_end><elif_stmt>i<eq>0<and>j<eq>self.width-1<block_start>self.content[i][j]="┐,0"<block_end><elif_stmt>i<l>self.height-3<and>(j<eq>0<or>j<eq>self.width-1)<block_start>self.content[i][j]="│,0"<block_end><elif_stmt>i<eq>self.height-3<and>j<eq>0<block_start>self.content[i][j]="└,0"<block_end><elif_stmt>i<eq>self.height-3<and>j<eq>self.width-1<block_start>self.content[i][j]="┘,0"<block_end><else_stmt><block_start>self.content[i][j]=" ,0"<block_end><block_end><block_end><block_end><block_end> |
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""<import_stmt>collections<import_stmt>json<import_stmt>random<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>image_retrieval.common preproces_image depreprocess_image fit_to_max_size from_list<def_stmt>blur image<block_start>kernel=np.ones((3 3) np.float32)/9<line_sep>image=cv2.filter2D(image -1 kernel)<line_sep><return>image<block_end><def_stmt>gray_noise image<block_start><if_stmt>np.mean(image)<g>100<block_start>gray=np.random.uniform(0.0 100.0 image.shape[0:2])<line_sep>gray3=np.array([gray gray gray])<line_sep>gray3=np.transpose(gray3 (1 2 0))<line_sep>gray3=cv2.blur(gray3 ksize=(7 7))<line_sep>image<augsub>gray3<line_sep>image=np.clip(image 0.0 255.0)<block_end><return>image<block_end>@tf.function<def_stmt>tf_random_crop_and_resize image input_size<block_start>min_size=tf.minimum(tf.shape(image)[0] tf.shape(image)[1])<line_sep>crop_size=tf.random.uniform(() min_size<floordiv>2 min_size dtype=tf.int32)<line_sep>crop=tf.image.random_crop(image (crop_size crop_size 3))<line_sep>var_thr=100<for_stmt>_ tf.range(10)<block_start>moments=tf.nn.moments(tf.reshape(crop (-1 3)) axes=0)<if_stmt>tf.less(tf.reduce_sum(moments[1]) tf.constant(var_thr dtype=tf.float32))<block_start>crop=tf.image.random_crop(image (crop_size crop_size 3))<block_end><else_stmt><block_start><break><block_end><block_end>moments=tf.nn.moments(tf.reshape(crop (-1 3)) axes=0)<if_stmt>tf.less(tf.reduce_sum(moments[1]) tf.constant(var_thr dtype=tf.float32))<block_start>crop=tf.image.random_crop(image (tf.shape(image)[0] tf.shape(image)[1] 3))<block_end>crop=tf.cast(tf.expand_dims(crop axis=0) tf.float32)<line_sep>crop=tf.image.resize(crop (input_size input_size))<line_sep>crop=tf.squeeze(crop axis=0)<line_sep><return>crop<block_end>@tf.function<def_stmt>tf_distort_color image<block_start>""" Distorts color. """<line_sep>image=image/255.0<line_sep>image=image[: : ::-1]<line_sep>brightness_max_delta=16./255.<line_sep>color_ordering=tf.random.uniform([] maxval=5 dtype=tf.int32)<if_stmt>tf.equal(color_ordering 0)<block_start>image=tf.image.random_brightness(image max_delta=brightness_max_delta)<line_sep>image=tf.image.random_saturation(image lower=0.5 upper=1.5)<line_sep>image=tf.image.random_hue(image max_delta=0.1)<line_sep>image=tf.image.random_contrast(image lower=0.5 upper=1.5)<block_end><elif_stmt>tf.equal(color_ordering 1)<block_start>image=tf.image.random_saturation(image lower=0.5 upper=1.5)<line_sep>image=tf.image.random_brightness(image max_delta=brightness_max_delta)<line_sep>image=tf.image.random_contrast(image lower=0.5 upper=1.5)<line_sep>image=tf.image.random_hue(image max_delta=0.1)<block_end><elif_stmt>tf.equal(color_ordering 2)<block_start>image=tf.image.random_contrast(image lower=0.5 upper=1.5)<line_sep>image=tf.image.random_hue(image max_delta=0.1)<line_sep>image=tf.image.random_brightness(image max_delta=brightness_max_delta)<line_sep>image=tf.image.random_saturation(image lower=0.5 upper=1.5)<block_end><elif_stmt>tf.equal(color_ordering 3)<block_start>image=tf.image.random_hue(image max_delta=0.1)<line_sep>image=tf.image.random_saturation(image lower=0.5 upper=1.5)<line_sep>image=tf.image.random_contrast(image lower=0.5 upper=1.5)<line_sep>image=tf.image.random_brightness(image max_delta=brightness_max_delta)<block_end>image=tf.clip_by_value(image 0.0 1.0)<line_sep>image=image<times>255<line_sep>image=image[: : ::-1]<line_sep><return>image<block_end><class_stmt>Dataset<block_start><def_stmt>__init__ self images_paths labels is_real input_size batch_size params return_original=<false><block_start>self.images_paths=images_paths<line_sep>self.input_size=input_size<line_sep>self.batch_size=batch_size<line_sep>self.params=params<line_sep>self.return_original=return_original<line_sep>self.loaded_images=[]<line_sep>self.labels=Dataset.reassign_labels(labels)<line_sep>self.is_real=is_real<if_stmt>self.params['preload']<block_start>self.preload()<if_stmt>self.params['pretile']<block_start>self.pretile()<block_end><block_end>self.images_indexes_per_class=collections.defaultdict(list)<for_stmt>index,label enumerate(self.labels)<block_start>self.images_indexes_per_class[label].append(index)<block_end><if_stmt>self.params['weighted_sampling']<block_start>self.calc_sampling_probs()<block_end><block_end><def_stmt>calc_sampling_probs self<block_start>''' Counts number of images per class and returns probability distribution so that
distribution of images classes becomes uniform.
'''<line_sep>frequency={l:self.labels.count(l)<for>l set(self.labels)}<line_sep>probs=np.empty((len(self.labels)) dtype=np.float32)<for_stmt>idx,l enumerate(self.labels)<block_start>probs[idx]=1.0/frequency[l]<block_end>self.probs=probs/np.sum(probs)<block_end><def_stmt>preload self<block_start>''' Pre-loads images in RAM. '''<for_stmt>image_path self.images_paths<block_start>self.loaded_images.append(cv2.imread(image_path))<block_end><block_end><def_stmt>pretile self<block_start>''' Pre-tiles images in RAM. Makes training faster but requires huge amount of RAM. '''<line_sep>tiled_labels=[]<line_sep>tiled_is_real=[]<line_sep>tiled_loaded_images=[]<for_stmt>read_image,label,real zip(self.loaded_images self.labels self.is_real)<block_start><if_stmt><not>real<block_start><for_stmt>n range(2 self.params['max_tiling']+1)<block_start>image=self.tile(read_image n)<line_sep>tiled_labels.append(label)<line_sep>tiled_is_real.append(real)<line_sep>tiled_loaded_images.append(image)<block_end><block_end><block_end>self.labels.extend(tiled_labels)<line_sep>self.is_real.extend(tiled_is_real)<line_sep>self.loaded_images.extend(tiled_loaded_images)<block_end><def_stmt>tile self image n<block_start>''' Tiles images taking their aspect ratios into account. '''<line_sep>aspect_ratio=image.shape[1]/image.shape[0]<if_stmt>aspect_ratio<l>1<block_start>w_repeats=n<line_sep>h_repeats=max(1<if>n<ne>self.params['max_tiling']<else>2 int(n<times>aspect_ratio))<block_end><else_stmt><block_start>h_repeats=n<line_sep>w_repeats=max(1<if>n<ne>self.params['max_tiling']<else>2 int(n/aspect_ratio))<block_end>image=np.tile(image (h_repeats w_repeats 1))<line_sep>fit_size=self.input_size<times>3<if_stmt>image.shape[0]<g>fit_size<or>image.shape[1]<g>fit_size<block_start>image=fit_to_max_size(image self.input_size<times>3)<block_end><return>image<block_end><def_stmt>sample_index self<block_start>''' Samples indexes. '''<line_sep>choices=list(range(len(self.labels)))<if_stmt>self.params['weighted_sampling']<block_start>choices=np.random.choice(choices len(self.labels) p=self.probs)<block_end><elif_stmt>self.params['shuffle']<block_start>np.random.shuffle(choices)<block_end># duplication is required for triplet loss at least.
duplicated_choices=[]<for_stmt>choice choices<block_start><for_stmt>_ range(self.params['duplicate_n_times'])<block_start>duplicated_choices.append(int(np.random.choice(self.images_indexes_per_class[self.labels[choice]] 1)))<block_end><block_end><for_stmt>choice duplicated_choices<block_start><yield>[choice]<block_end><block_end><def_stmt>read self index<block_start>''' Reads an image from RAM or disk and returns it with corresponding class label. '''<if_stmt>self.params['preload']<block_start>image=self.loaded_images[index[0]].astype(np.float32)<block_end><else_stmt><block_start>image=cv2.imread(self.images_paths[index[0]]).astype(np.float32)<block_end><if_stmt><not>self.params['pretile']<and><not>self.is_real[index[0]]<block_start>n=random.randint(1 self.params['max_tiling'])<line_sep>image=self.tile(image n)<block_end><return>image self.labels[index[0]]<block_end><def_stmt>cv2_rotate self image<block_start>''' Rotates images on random angle using opencv. '''<line_sep>c_xy=image.shape[1]/2 image.shape[0]/2<line_sep>angle=random.uniform(-self.params['add_rot_angle'] self.params['add_rot_angle'])<times>57.2958<if_stmt>self.params['rot90']<block_start>angle<augadd>random.randint(0 3)<times>180<block_end>rotation_matrix=cv2.getRotationMatrix2D(c_xy angle 1)<line_sep>img_rotation=cv2.warpAffine(image rotation_matrix (image.shape[1] image.shape[0]))<line_sep><return>img_rotation<block_end><def_stmt>cv2_noise_and_blur self image<block_start>''' Adds noise making image darker and blur.'''<line_sep>image=image.astype(np.float32)<if_stmt>self.params['apply_gray_noise']<and>np.random.choice([<true> <false>])<block_start>image=gray_noise(image)<block_end><if_stmt>self.params['blur']<and>np.random.choice([<true> <false>])<block_start>image=blur(image)<block_end><return>image<block_end><def_stmt>train_preprocess self choice<block_start>''' Applies training preprocessing. '''<line_sep>original,label=tf.numpy_function(self.read [choice] [tf.float32 tf.int64])<line_sep>image=tf_random_crop_and_resize(original self.input_size)<line_sep>image,=tf.numpy_function(self.cv2_noise_and_blur [image] [tf.float32])<if_stmt>self.params['horizontal_flip']<block_start>image=tf.image.random_flip_left_right(image)<block_end><if_stmt>self.params['vertical_flip']<block_start>image=tf.image.random_flip_up_down(image)<block_end><if_stmt>self.params['add_rot_angle']<g>0<or>self.params['rot90']<block_start>image,=tf.numpy_function(self.cv2_rotate [image] [tf.float32])<block_end>image=tf_distort_color(image)<line_sep>image=preproces_image(image)<if_stmt>self.return_original<block_start><return>image label original<block_end><return>image label<block_end><def_stmt>__call__ self *args **kwargs<block_start>''' Returns tf.data.Dataset instance as well as number of classes in training set. '''<line_sep>dataset=tf.data.Dataset.from_generator(self.sample_index (tf.int32) (tf.TensorShape([1])))<line_sep>dataset=dataset.map(self.train_preprocess num_parallel_calls=tf.data.experimental.AUTOTUNE)<if_stmt><not>self.return_original<block_start>dataset=dataset.batch(self.batch_size drop_remainder=<true>)<line_sep>dataset=dataset.prefetch(tf.data.experimental.AUTOTUNE)<line_sep>dataset=dataset.repeat()<block_end><return>dataset len(set(self.labels))<block_end>@staticmethod<def_stmt>create_from_list path input_size batch_size params return_original=<false><block_start>''' Creates Dataset instance from path to images list.
Images list has following format:
<relative_path_to_image> <class_label>
'''<line_sep>impaths,labels,is_real,_=from_list(path)<line_sep><return>Dataset(impaths labels is_real input_size batch_size params return_original)()<block_end>@staticmethod<def_stmt>reassign_labels labels<block_start>''' Re-assign class labels so that they starts from 0 and ends with (num_classes - 1). '''<line_sep>unique_labels=list(set(labels))<line_sep><return>[unique_labels.index(l)<for>l labels]<block_end><block_end><def_stmt>main <block_start><import_stmt>argparse<import_stmt>time<line_sep>args=argparse.ArgumentParser()<line_sep>args.add_argument('--gallery' required=<true>)<line_sep>args.add_argument('--input_size' default=224 type=int)<line_sep>args.add_argument('--augmentation_config' required=<true>)<line_sep>args=args.parse_args()<with_stmt>open(args.augmentation_config)<as>f<block_start>augmentation_config=json.load(f)<block_end>dataset,_=Dataset.create_from_list(args.gallery args.input_size 1 augmentation_config <true>)<line_sep>t=time.time()<for_stmt>preprocessed,label,original dataset.take(1000)<block_start>cv2.imshow('preprocessed' depreprocess_image(preprocessed.numpy()))<line_sep>cv2.imshow('original' original.numpy().astype(np.uint8))<line_sep>print(label)<if_stmt>cv2.waitKey(0)<eq>27<block_start><break><block_end><block_end>print(time.time()-t)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
<import_from_future_stmt> print_function<import_from_future_stmt> division<import_from_future_stmt> absolute_import<import_stmt>sys<import_stmt>unittest<import_from_stmt>jnius.reflect autoclass<try_stmt><block_start>long<block_end><except_stmt>NameError# Python 3
<block_start>long=int<block_end><def_stmt>py2_encode uni<block_start><if_stmt>sys.version_info<l>(3 0)<block_start>uni=uni.encode('utf-8')<block_end><return>uni<block_end><class_stmt>BasicsTest(unittest.TestCase)<block_start><def_stmt>test_static_methods self<block_start>Test=autoclass('org.jnius.BasicsTest')<line_sep>self.assertEqual(Test.methodStaticZ() <true>)<line_sep>self.assertEqual(Test.methodStaticB() 127)<line_sep>self.assertEqual(Test.methodStaticC() 'k')<line_sep>self.assertEqual(Test.methodStaticS() 32767)<line_sep>self.assertEqual(Test.methodStaticI() 2147483467)<line_sep>self.assertEqual(Test.methodStaticJ() 9223372036854775807)<line_sep>self.assertAlmostEqual(Test.methodStaticF() 1.23456789)<line_sep>self.assertEqual(Test.methodStaticD() 1.23456789)<line_sep>self.assertEqual(Test.methodStaticString() py2_encode(u'hello \U0001F30E!'))<block_end><def_stmt>test_static_fields self<block_start>Test=autoclass('org.jnius.BasicsTest')<line_sep>self.assertEqual(Test.fieldStaticZ <true>)<line_sep>self.assertEqual(Test.fieldStaticB 127)<line_sep>self.assertEqual(Test.fieldStaticC 'k')<line_sep>self.assertEqual(Test.fieldStaticS 32767)<line_sep>self.assertEqual(Test.fieldStaticI 2147483467)<line_sep>self.assertEqual(Test.fieldStaticJ 9223372036854775807)<line_sep>self.assertAlmostEqual(Test.fieldStaticF 1.23456789)<line_sep>self.assertEqual(Test.fieldStaticD 1.23456789)<line_sep>self.assertEqual(Test.fieldStaticString py2_encode(u'hello \U0001F30E!'))<block_end><def_stmt>test_instance_methods self<block_start>test=autoclass('org.jnius.BasicsTest')()<line_sep>self.assertEqual(test.methodZ() <true>)<line_sep>self.assertEqual(test.methodB() 127)<line_sep>self.assertEqual(test.methodC() 'k')<line_sep>self.assertEqual(test.methodS() 32767)<line_sep>self.assertEqual(test.methodI() 2147483467)<line_sep>self.assertEqual(test.methodJ() 9223372036854775807)<line_sep>self.assertAlmostEqual(test.methodF() 1.23456789)<line_sep>self.assertEqual(test.methodD() 1.23456789)<line_sep>self.assertEqual(test.methodString() py2_encode(u'hello \U0001F30E!'))<block_end><def_stmt>test_instance_fields self<block_start>test=autoclass('org.jnius.BasicsTest')()<line_sep>self.assertEqual(test.fieldZ <true>)<line_sep>self.assertEqual(test.fieldB 127)<line_sep>self.assertEqual(test.fieldC 'k')<line_sep>self.assertEqual(test.fieldS 32767)<line_sep>self.assertEqual(test.fieldI 2147483467)<line_sep>self.assertEqual(test.fieldJ 9223372036854775807)<line_sep>self.assertAlmostEqual(test.fieldF 1.23456789)<line_sep>self.assertEqual(test.fieldD 1.23456789)<line_sep>self.assertEqual(test.fieldString py2_encode(u'hello \U0001F30E!'))<line_sep>test2=autoclass('org.jnius.BasicsTest')(10)<line_sep>self.assertEqual(test2.fieldB 10)<line_sep>self.assertEqual(test.fieldB 127)<line_sep>self.assertEqual(test2.fieldB 10)<block_end><def_stmt>test_instance_getter_naming self<block_start>test=autoclass('org.jnius.BasicsTest')()<line_sep>self.assertEqual(test.disabled <true>)<line_sep>self.assertEqual(test.enabled <false>)<block_end><def_stmt>test_instance_set_fields self<block_start>test=autoclass('org.jnius.BasicsTest')()<line_sep>test.fieldSetZ=<true><line_sep>test.fieldSetB=127<line_sep>test.fieldSetC=ord('k')<line_sep>test.fieldSetS=32767<line_sep>test.fieldSetI=2147483467<line_sep>test.fieldSetJ=9223372036854775807<line_sep>test.fieldSetF=1.23456789<line_sep>test.fieldSetD=1.23456789<line_sep>self.assertTrue(test.testFieldSetZ())<line_sep>self.assertTrue(test.testFieldSetB())<line_sep>self.assertTrue(test.testFieldSetC())<line_sep>self.assertTrue(test.testFieldSetS())<line_sep>self.assertTrue(test.testFieldSetI())<line_sep>self.assertTrue(test.testFieldSetJ())<line_sep>self.assertTrue(test.testFieldSetF())<line_sep>self.assertTrue(test.testFieldSetD())<block_end><def_stmt>test_instances_methods_array self<block_start>test=autoclass('org.jnius.BasicsTest')()<line_sep>self.assertEqual(test.methodArrayZ() [<true>]<times>3)<line_sep>self.assertEqual(test.methodArrayB()[0] 127)<if_stmt>sys.version_info<ge>(3 0)<block_start>self.assertEqual(test.methodArrayB() [127]<times>3)<block_end>self.assertEqual(test.methodArrayC() ['k']<times>3)<line_sep>self.assertEqual(test.methodArrayS() [32767]<times>3)<line_sep>self.assertEqual(test.methodArrayI() [2147483467]<times>3)<line_sep>self.assertEqual(test.methodArrayJ() [9223372036854775807]<times>3)<line_sep>ret=test.methodArrayF()<line_sep>ref=[1.23456789]<times>3<line_sep>self.assertAlmostEqual(ret[0] ref[0])<line_sep>self.assertAlmostEqual(ret[1] ref[1])<line_sep>self.assertAlmostEqual(ret[2] ref[2])<line_sep>self.assertEqual(test.methodArrayD() [1.23456789]<times>3)<line_sep>self.assertEqual(test.methodArrayString() [py2_encode(u'hello \U0001F30E!')]<times>3)<block_end><def_stmt>test_instances_methods_params self<block_start>test=autoclass('org.jnius.BasicsTest')()<line_sep>self.assertEqual(test.methodParamsZBCSIJFD(<true> 127 'k' 32767 2147483467 9223372036854775807 1.23456789 1.23456789) <true>)<line_sep>self.assertEqual(test.methodParamsZBCSIJFD(<true> long(127) 'k' long(32767) long(2147483467) 9223372036854775807 1.23456789 1.23456789) <true>)<line_sep>self.assertEqual(test.methodParamsString(py2_encode(u'hello \U0001F30E!')) <true>)<line_sep>self.assertEqual(test.methodParamsArrayI([1 2 3]) <true>)<line_sep>self.assertEqual(test.methodParamsArrayString([py2_encode(u'hello') py2_encode(u'\U0001F30E')]) <true>)<block_end><def_stmt>test_instances_methods_params_object_list_str self<block_start>test=autoclass('org.jnius.BasicsTest')()<line_sep>self.assertEqual(test.methodParamsObject(['hello' 'world']) <true>)<block_end><def_stmt>test_instances_methods_params_object_list_int self<block_start>test=autoclass('org.jnius.BasicsTest')()<line_sep>self.assertEqual(test.methodParamsObject([1 2]) <true>)<block_end><def_stmt>test_instances_methods_params_object_list_float self<block_start>test=autoclass('org.jnius.BasicsTest')()<line_sep>self.assertEqual(test.methodParamsObject([3.14 1.61]) <true>)<block_end><def_stmt>test_instances_methods_params_object_list_long self<block_start>test=autoclass('org.jnius.BasicsTest')()<line_sep>self.assertEqual(test.methodParamsObject([1 2]) <true>)<block_end><def_stmt>test_instances_methods_params_array_byte self<block_start>test=autoclass('org.jnius.BasicsTest')()<line_sep>self.assertEqual(test.methodParamsArrayByte([127 127 127]) <true>)<line_sep>ret=test.methodArrayB()<line_sep>self.assertEqual(test.methodParamsArrayByte(ret) <true>)<block_end><def_stmt>test_return_array_as_object_array_of_strings self<block_start>test=autoclass('org.jnius.BasicsTest')()<line_sep>self.assertEqual(test.methodReturnStrings() [py2_encode(u'Hello') py2_encode(u'\U0001F30E')])<block_end><def_stmt>test_return_array_as_object_of_integers self<block_start>test=autoclass('org.jnius.BasicsTest')()<line_sep>self.assertEqual(test.methodReturnIntegers() [1 2])<block_end><block_end> |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_from_stmt>oslo_policy policy<line_sep>_READER="role:reader"<line_sep>_SYSTEM_ADMIN="role:admin and system_scope:all"<line_sep>_SYSTEM_READER="role:reader and system_scope:all"<line_sep>rules=[policy.DocumentedRuleDefault(name='quotas:get' check_str=f'rule:all_users or {_READER}' scope_types=['project'] description='List quotas for the project the user belongs to.' operations=[{'path':'/v1/quotas' 'method':'GET'}]) policy.DocumentedRuleDefault(name='project_quotas:get' check_str=f'rule:service_admin or {_SYSTEM_READER}' scope_types=['system'] description='List quotas for the specified project.' operations=[{'path':'/v1/project-quotas' 'method':'GET'} {'path':'/v1/project-quotas/{uuid}' 'method':'GET'}]) policy.DocumentedRuleDefault(name='project_quotas:put' check_str=f'rule:service_admin or {_SYSTEM_ADMIN}' scope_types=['system'] description='Create or update the configured project quotas for '<concat>'the project with the specified UUID.' operations=[{'path':'/v1/project-quotas/{uuid}' 'method':'PUT'}]) policy.DocumentedRuleDefault(name='project_quotas:delete' check_str=f'rule:service_admin or {_SYSTEM_ADMIN}' scope_types=['system'] description='Delete the project quotas configuration for the '<concat>'project with the requested UUID.' operations=[{'path':'/v1/quotas}' 'method':'DELETE'}]) ]<def_stmt>list_rules <block_start><return>rules<block_end> |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>typing Any Dict List Union<import_stmt>torch<import_from_stmt>pytorch_lightning.accelerators.accelerator Accelerator<import_from_stmt>pytorch_lightning.utilities device_parser<import_from_stmt>pytorch_lightning.utilities.exceptions MisconfigurationException<import_from_stmt>pytorch_lightning.utilities.imports _PSUTIL_AVAILABLE<import_from_stmt>pytorch_lightning.utilities.types _DEVICE<class_stmt>CPUAccelerator(Accelerator)<block_start>"""Accelerator for CPU devices."""<def_stmt>setup_environment self root_device:torch.device<arrow><none><block_start>"""
Raises:
MisconfigurationException:
If the selected device is not CPU.
"""<line_sep>super().setup_environment(root_device)<if_stmt>root_device.type<ne>"cpu"<block_start><raise>MisconfigurationException(f"Device should be CPU, got {root_device} instead.")<block_end><block_end><def_stmt>get_device_stats self device:_DEVICE<arrow>Dict[str Any]<block_start>"""Get CPU stats from ``psutil`` package."""<line_sep><return>get_cpu_stats()<block_end>@staticmethod<def_stmt>parse_devices devices:Union[int str List[int]]<arrow>int<block_start>"""Accelerator device parsing logic."""<line_sep>devices=device_parser.parse_cpu_cores(devices)<line_sep><return>devices<block_end>@staticmethod<def_stmt>get_parallel_devices devices:Union[int str List[int]]<arrow>List[torch.device]<block_start>"""Gets parallel devices for the Accelerator."""<line_sep>devices=device_parser.parse_cpu_cores(devices)<line_sep><return>[torch.device("cpu")]<times>devices<block_end>@staticmethod<def_stmt>auto_device_count <arrow>int<block_start>"""Get the devices when set to auto."""<line_sep><return>1<block_end>@staticmethod<def_stmt>is_available <arrow>bool<block_start>"""CPU is always available for execution."""<line_sep><return><true><block_end>@classmethod<def_stmt>register_accelerators cls accelerator_registry:Dict<arrow><none><block_start>accelerator_registry.register("cpu" cls description=f"{cls.__class__.__name__}" )<block_end><block_end># CPU device metrics
_CPU_VM_PERCENT="cpu_vm_percent"<line_sep>_CPU_PERCENT="cpu_percent"<line_sep>_CPU_SWAP_PERCENT="cpu_swap_percent"<def_stmt>get_cpu_stats <arrow>Dict[str float]<block_start><if_stmt><not>_PSUTIL_AVAILABLE<block_start><raise>ModuleNotFoundError("Fetching CPU device stats requires `psutil` to be installed."<concat>" Install it by running `pip install -U psutil`.")<block_end><import_stmt>psutil<line_sep><return>{_CPU_VM_PERCENT:psutil.virtual_memory().percent _CPU_PERCENT:psutil.cpu_percent() _CPU_SWAP_PERCENT:psutil.swap_memory().percent }<block_end> |
<import_stmt>pytest<import_from_stmt>ruptures.datasets pw_constant<import_from_stmt>ruptures.show display<import_from_stmt>ruptures.show.display MatplotlibMissingError<line_sep>@pytest.fixture(scope="module")<def_stmt>signal_bkps <block_start>signal,bkps=pw_constant()<line_sep><return>signal bkps<block_end><def_stmt>test_display_with_options signal_bkps<block_start><try_stmt><block_start>signal,bkps=signal_bkps<line_sep>fig,axarr=display(signal bkps)<line_sep>fig,axarr=display(signal bkps bkps)<line_sep>figsize=(20 10)# figure size
fig,axarr=display(signal bkps figsize=figsize )<line_sep>fig,axarr=display(signal[: 0] bkps figsize=figsize )<block_end><except_stmt>MatplotlibMissingError<block_start>pytest.skip("matplotlib is not installed")<block_end><block_end><def_stmt>test_display_without_options signal_bkps<block_start><try_stmt><block_start>signal,bkps=signal_bkps<line_sep>fig,axarr=display(signal bkps)<line_sep>fig,axarr=display(signal bkps bkps)<line_sep>figsize=(20 10)# figure size
fig,axarr=display(signal bkps)<line_sep>fig,axarr=display(signal[: 0] bkps)<block_end><except_stmt>MatplotlibMissingError<block_start>pytest.skip("matplotlib is not installed")<block_end><block_end><def_stmt>test_display_with_new_options signal_bkps<block_start><try_stmt><block_start>signal,bkps=signal_bkps<line_sep>fig,axarr=display(signal bkps)<line_sep>fig,axarr=display(signal bkps bkps)<line_sep>fig,axarr=display(signal bkps facecolor="k" edgecolor="b")<line_sep>fig,axarr=display(signal[: 0] bkps facecolor="k" edgecolor="b")<block_end><except_stmt>MatplotlibMissingError<block_start>pytest.skip("matplotlib is not installed")<block_end><block_end><def_stmt>test_display_with_computed_chg_pts_options signal_bkps<block_start><try_stmt><block_start>signal,bkps=signal_bkps<line_sep>fig,axarr=display(signal bkps)<line_sep>fig,axarr=display(signal bkps bkps)<line_sep>fig,axarr=display(signal bkps bkps computed_chg_pts_color="k")<line_sep>fig,axarr=display(signal bkps bkps computed_chg_pts_color="k" computed_chg_pts_linewidth=3)<line_sep>fig,axarr=display(signal bkps bkps computed_chg_pts_color="k" computed_chg_pts_linewidth=3 computed_chg_pts_linestyle="--" )<line_sep>fig,axarr=display(signal bkps bkps computed_chg_pts_color="k" computed_chg_pts_linewidth=3 computed_chg_pts_linestyle="--" computed_chg_pts_alpha=1.0 )<block_end><except_stmt>MatplotlibMissingError<block_start>pytest.skip("matplotlib is not installed")<block_end><block_end> |
expected_output={'nodes':{1:{'te_router_id':'192.168.0.4' 'host_name':'rtrD' 'isis_system_id':['1921.68ff.1004 level-1' '1921.68ff.1004 level-2' '1921.68ff.1004 level-2'] 'asn':[65001 65001 65001] 'domain_id':[1111 1111 9999] 'advertised_prefixes':['192.168.0.4' '192.168.0.4' '192.168.0.4' '192.168.0.6']} 2:{'te_router_id':'192.168.0.1' 'host_name':'rtrA' 'isis_system_id':['1921.68ff.1001 level-2'] 'advertised_prefixes':['192.168.0.1']}}}<line_sep> |
<import_stmt>sys<import_stmt>os<import_stmt>electronDbsDiscovery<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("testEgammaAnalyzers")<line_sep>process.DQMStore=cms.Service("DQMStore")<line_sep>process.load("DQMServices.Components.DQMStoreStats_cfi")<line_sep>#from DQMServices.Components.DQMStoreStats_cfi import *
#dqmStoreStats.runOnEndJob = cms.untracked.bool(True)
process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(-1))<line_sep>process.source=cms.Source("PoolSource" fileNames=cms.untracked.vstring() secondaryFileNames=cms.untracked.vstring())<line_sep>process.source.fileNames.extend(electronDbsDiscovery.search())<line_sep>process.load("Configuration.StandardSequences.GeometryRecoDB_cff")<line_sep>process.load("DQMOffline.EGamma.egammaDQMOffline_cff")<line_sep>process.dqmElectronTagProbeAnalysis.OutputFile=cms.string(os.environ['TEST_HISTOS_FILE'])<line_sep>process.p=cms.Path(process.egammaDQMOffline<times>process.dqmStoreStats)<line_sep> |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-21 08:43
<import_stmt>datetime<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<import_stmt>seahub.base.fields<class_stmt>Migration(migrations.Migration)<block_start>initial=<true><line_sep>dependencies=[]<line_sep>operations=[migrations.CreateModel(name='AnonymousShare' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('repo_owner' seahub.base.fields.LowerCaseCharField(max_length=255)) ('repo_id' models.CharField(max_length=36)) ('anonymous_email' seahub.base.fields.LowerCaseCharField(max_length=255)) ('token' models.CharField(max_length=25 unique=<true>)) ] ) migrations.CreateModel(name='ExtraGroupsSharePermission' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('repo_id' models.CharField(db_index=<true> max_length=36)) ('group_id' models.IntegerField(db_index=<true>)) ('permission' models.CharField(max_length=30)) ] ) migrations.CreateModel(name='ExtraSharePermission' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('repo_id' models.CharField(db_index=<true> max_length=36)) ('share_to' models.CharField(db_index=<true> max_length=255)) ('permission' models.CharField(max_length=30)) ] ) migrations.CreateModel(name='FileShare' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('username' seahub.base.fields.LowerCaseCharField(db_index=<true> max_length=255)) ('repo_id' models.CharField(db_index=<true> max_length=36)) ('path' models.TextField()) ('token' models.CharField(max_length=100 unique=<true>)) ('ctime' models.DateTimeField(default=datetime.datetime.now)) ('view_cnt' models.IntegerField(default=0)) ('s_type' models.CharField(db_index=<true> default=b'f' max_length=2)) ('password' models.CharField(max_length=128 null=<true>)) ('expire_date' models.DateTimeField(null=<true>)) ('permission' models.CharField(choices=[(b'view_download' b'View and download') (b'view_only' b'Disable download')] db_index=<true> default=b'view_download' max_length=50)) ] ) migrations.CreateModel(name='OrgFileShare' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('org_id' models.IntegerField(db_index=<true>)) ('file_share' models.OneToOneField(on_delete=django.db.models.deletion.CASCADE to='share.FileShare')) ] ) migrations.CreateModel(name='PrivateFileDirShare' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('from_user' seahub.base.fields.LowerCaseCharField(db_index=<true> max_length=255)) ('to_user' seahub.base.fields.LowerCaseCharField(db_index=<true> max_length=255)) ('repo_id' models.CharField(db_index=<true> max_length=36)) ('path' models.TextField()) ('token' models.CharField(max_length=10 unique=<true>)) ('permission' models.CharField(max_length=5)) ('s_type' models.CharField(default=b'f' max_length=5)) ] ) migrations.CreateModel(name='UploadLinkShare' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('username' seahub.base.fields.LowerCaseCharField(db_index=<true> max_length=255)) ('repo_id' models.CharField(db_index=<true> max_length=36)) ('path' models.TextField()) ('token' models.CharField(max_length=100 unique=<true>)) ('ctime' models.DateTimeField(default=datetime.datetime.now)) ('view_cnt' models.IntegerField(default=0)) ('password' models.CharField(max_length=128 null=<true>)) ('expire_date' models.DateTimeField(null=<true>)) ] ) ]<block_end> |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Compilation of DML exclusive constraint conflict handling."""<import_from_future_stmt> annotations<import_from_stmt>typing *<import_from_stmt>edb errors<import_from_stmt>edb.common context<as>pctx<import_from_stmt>edb.ir ast<as>irast<import_from_stmt>edb.ir typeutils<import_from_stmt>edb.schema constraints<as>s_constr<import_from_stmt>edb.schema name<as>s_name<import_from_stmt>edb.schema objtypes<as>s_objtypes<import_from_stmt>edb.schema pointers<as>s_pointers<import_from_stmt>edb.schema utils<as>s_utils<import_from_stmt>edb.edgeql ast<as>qlast<import_from_stmt>edb.edgeql utils<as>qlutils<import_from_stmt>. astutils<import_from_stmt>. context<import_from_stmt>. dispatch<import_from_stmt>. inference<import_from_stmt>. setgen<import_from_stmt>. typegen<def_stmt>_compile_conflict_select stmt:irast.MutatingStmt subject_typ:s_objtypes.ObjectType * for_inheritance:bool fake_dml_set:Optional[irast.Set] obj_constrs:Sequence[s_constr.Constraint] constrs:Dict[str Tuple[s_pointers.Pointer List[s_constr.Constraint]]] parser_context:Optional[pctx.ParserContext] ctx:context.ContextLevel <arrow>Optional[qlast.Expr]<block_start>"""Synthesize a select of conflicting objects
... for a single object type. This gets called once for each ancestor
type that provides constraints to the type being inserted.
`cnstrs` contains the constraints to consider.
"""<line_sep># Find which pointers we need to grab
needed_ptrs=set(constrs)<for_stmt>constr obj_constrs<block_start>subjexpr=constr.get_subjectexpr(ctx.env.schema)<assert_stmt>subjexpr<line_sep>needed_ptrs<augor>qlutils.find_subject_ptrs(subjexpr.qlast)<block_end>wl=list(needed_ptrs)<line_sep>ptr_anchors={}<while_stmt>wl<block_start>p=wl.pop()<line_sep>ptr=subject_typ.getptr(ctx.env.schema s_name.UnqualName(p))<if_stmt>expr:=ptr.get_expr(ctx.env.schema)<block_start><assert_stmt>isinstance(expr.qlast qlast.Expr)<line_sep>ptr_anchors[p]=expr.qlast<for_stmt>ref qlutils.find_subject_ptrs(expr.qlast)<block_start><if_stmt>ref<not><in>needed_ptrs<block_start>wl.append(ref)<line_sep>needed_ptrs.add(ref)<block_end><block_end><block_end><block_end>ctx.anchors=ctx.anchors.copy()<line_sep># If we are given a fake_dml_set to directly represent the result
# of our DML, use that instead of populating the result.
<if_stmt>fake_dml_set<block_start><for_stmt>p needed_ptrs|{'id'}<block_start>ptr=subject_typ.getptr(ctx.env.schema s_name.UnqualName(p))<line_sep>val=setgen.extend_path(fake_dml_set ptr ctx=ctx)<line_sep>ptr_anchors[p]=ctx.create_anchor(val p)<block_end><block_end># Find the IR corresponding to the fields we care about and
# produce anchors for them
ptrs_in_shape=set()<for_stmt>elem,_ stmt.subject.shape<block_start><assert_stmt>elem.rptr<is><not><none><line_sep>name=elem.rptr.ptrref.shortname.name<line_sep>ptrs_in_shape.add(name)<if_stmt>name<in>needed_ptrs<and>name<not><in>ptr_anchors<block_start><assert_stmt>elem.expr<if_stmt>inference.infer_volatility(elem.expr ctx.env).is_volatile()<block_start><if_stmt>for_inheritance<block_start>error=('INSERT does not support volatile properties with '<concat>'exclusive constraints when another statement in '<concat>'the same query modifies a related type')<block_end><else_stmt><block_start>error=('INSERT UNLESS CONFLICT ON does not support volatile '<concat>'properties')<block_end><raise>errors.UnsupportedFeatureError(error context=parser_context)<block_end># FIXME: The wrong thing will definitely happen if there are
# volatile entries here
ptr_anchors[name]=ctx.create_anchor(setgen.ensure_set(elem.expr ctx=ctx) name)<block_end><block_end><if_stmt>for_inheritance<and><not>ptrs_in_shape<block_start><return><none><block_end># Fill in empty sets for pointers that are needed but not present
present_ptrs=set(ptr_anchors)<for_stmt>p (needed_ptrs-present_ptrs)<block_start>ptr=subject_typ.getptr(ctx.env.schema s_name.UnqualName(p))<line_sep>typ=ptr.get_target(ctx.env.schema)<assert_stmt>typ<line_sep>ptr_anchors[p]=qlast.TypeCast(expr=qlast.Set(elements=[]) type=typegen.type_to_ql_typeref(typ ctx=ctx))<block_end><if_stmt><not>ptr_anchors<block_start><raise>errors.QueryError('INSERT UNLESS CONFLICT property requires matching shape' context=parser_context )<block_end>conds:List[qlast.Expr]=[]<for_stmt>ptrname,(ptr ptr_cnstrs) constrs.items()<block_start><if_stmt>ptrname<not><in>present_ptrs<block_start><continue><block_end>anchor=qlutils.subject_paths_substitute(ptr_anchors[ptrname] ptr_anchors)<line_sep>ptr_val=qlast.Path(partial=<true> steps=[qlast.Ptr(ptr=qlast.ObjectRef(name=ptrname))])<line_sep>ptr,ptr_cnstrs=constrs[ptrname]<line_sep>ptr_card=ptr.get_cardinality(ctx.env.schema)<for_stmt>cnstr ptr_cnstrs<block_start>lhs:qlast.Expr=anchor<line_sep>rhs:qlast.Expr=ptr_val<line_sep># If there is a subjectexpr, substitute our lhs and rhs in
# for __subject__ in the subjectexpr and compare *that*
<if_stmt>(subjectexpr:=cnstr.get_subjectexpr(ctx.env.schema))<block_start><assert_stmt>isinstance(subjectexpr.qlast qlast.Expr)<line_sep>lhs=qlutils.subject_substitute(subjectexpr.qlast lhs)<line_sep>rhs=qlutils.subject_substitute(subjectexpr.qlast rhs)<block_end>conds.append(qlast.BinOp(op='='<if>ptr_card.is_single()<else>'IN' left=lhs right=rhs ))<block_end><block_end>insert_subject=qlast.Path(steps=[s_utils.name_to_ast_ref(subject_typ.get_name(ctx.env.schema))])<for_stmt>constr obj_constrs# TODO: learn to skip irrelevant ones for UPDATEs at least?
<block_start>subjectexpr=constr.get_subjectexpr(ctx.env.schema)<assert_stmt>subjectexpr<and>isinstance(subjectexpr.qlast qlast.Expr)<line_sep>lhs=qlutils.subject_paths_substitute(subjectexpr.qlast ptr_anchors)<line_sep>rhs=qlutils.subject_substitute(subjectexpr.qlast insert_subject)<line_sep>conds.append(qlast.BinOp(op='=' left=lhs right=rhs))<block_end><if_stmt><not>conds<block_start><return><none><block_end># We use `any` to compute the disjunction here because some might
# be empty.
<if_stmt>len(conds)<eq>1<block_start>cond=conds[0]<block_end><else_stmt><block_start>cond=qlast.FunctionCall(func='any' args=[qlast.Set(elements=conds)] )<block_end># For the result filtering we need to *ignore* the same object
<if_stmt>fake_dml_set<block_start>anchor=qlutils.subject_paths_substitute(ptr_anchors['id'] ptr_anchors)<line_sep>ptr_val=qlast.Path(partial=<true> steps=[qlast.Ptr(ptr=qlast.ObjectRef(name='id'))])<line_sep>cond=qlast.BinOp(op='AND' left=cond right=qlast.BinOp(op='!=' left=anchor right=ptr_val) )<block_end># Produce a query that finds the conflicting objects
select_ast=qlast.DetachedExpr(expr=qlast.SelectQuery(result=insert_subject where=cond))<line_sep><return>select_ast<block_end><def_stmt>_constr_matters constr:s_constr.Constraint ctx:context.ContextLevel <arrow>bool<block_start>schema=ctx.env.schema<line_sep><return>(<not>constr.generic(schema)<and><not>constr.get_delegated(schema)<and>(constr.get_owned(schema)<or>all(anc.get_delegated(schema)<or>anc.generic(schema)<for>anc constr.get_ancestors(schema).objects(schema))))<block_end>PointerConstraintMap=Dict[str Tuple[s_pointers.Pointer List[s_constr.Constraint]] ]<line_sep>ConstraintPair=Tuple[PointerConstraintMap List[s_constr.Constraint]]<line_sep>ConflictTypeMap=Dict[s_objtypes.ObjectType ConstraintPair]<def_stmt>_split_constraints obj_constrs:Sequence[s_constr.Constraint] constrs:PointerConstraintMap ctx:context.ContextLevel <arrow>ConflictTypeMap<block_start>schema=ctx.env.schema<line_sep>type_maps:ConflictTypeMap={}<line_sep># Split up pointer constraints by what object types they come from
<for_stmt>name,(_ p_constrs) constrs.items()<block_start><for_stmt>p_constr p_constrs<block_start>ancs=(p_constr )+p_constr.get_ancestors(schema).objects(schema)<for_stmt>anc ancs<block_start><if_stmt><not>_constr_matters(anc ctx)<block_start><continue><block_end>p_ptr=anc.get_subject(schema)<assert_stmt>isinstance(p_ptr s_pointers.Pointer)<line_sep>obj=p_ptr.get_source(schema)<assert_stmt>isinstance(obj s_objtypes.ObjectType)<line_sep>map,_=type_maps.setdefault(obj ({} []))<line_sep>_,entry=map.setdefault(name (p_ptr []))<line_sep>entry.append(anc)<block_end><block_end><block_end># Split up object constraints by what object types they come from
<for_stmt>obj_constr obj_constrs<block_start>ancs=(obj_constr )+obj_constr.get_ancestors(schema).objects(schema)<for_stmt>anc ancs<block_start><if_stmt><not>_constr_matters(anc ctx)<block_start><continue><block_end>obj=anc.get_subject(schema)<assert_stmt>isinstance(obj s_objtypes.ObjectType)<line_sep>_,o_constr_entry=type_maps.setdefault(obj ({} []))<line_sep>o_constr_entry.append(anc)<block_end><block_end><return>type_maps<block_end><def_stmt>compile_conflict_select stmt:irast.MutatingStmt subject_typ:s_objtypes.ObjectType * for_inheritance:bool=<false> fake_dml_set:Optional[irast.Set]=<none> obj_constrs:Sequence[s_constr.Constraint] constrs:PointerConstraintMap parser_context:Optional[pctx.ParserContext] ctx:context.ContextLevel <arrow>Tuple[irast.Set bool bool]<block_start>"""Synthesize a select of conflicting objects
This teases apart the constraints we care about based on which
type they originate from, generates a SELECT for each type, and
unions them together.
`cnstrs` contains the constraints to consider.
"""<line_sep>schema=ctx.env.schema<if_stmt>for_inheritance<block_start>type_maps={subject_typ:(constrs list(obj_constrs))}<block_end><else_stmt><block_start>type_maps=_split_constraints(obj_constrs constrs ctx=ctx)<block_end># Generate a separate query for each type
from_parent=<false><line_sep>frags=[]<for_stmt>a_obj,(a_constrs a_obj_constrs) type_maps.items()<block_start>frag=_compile_conflict_select(stmt a_obj obj_constrs=a_obj_constrs constrs=a_constrs for_inheritance=for_inheritance fake_dml_set=fake_dml_set parser_context=parser_context ctx=ctx )<if_stmt>frag<block_start><if_stmt>a_obj<ne>subject_typ<block_start>from_parent=<true><block_end>frags.append(frag)<block_end><block_end>always_check=from_parent<or>any(<not>child.is_view(schema)<for>child subject_typ.children(schema))<line_sep># Union them all together
select_ast=qlast.Set(elements=frags)<with_stmt>ctx.new()<as>ectx<block_start>ectx.implicit_limit=0<line_sep>select_ir=dispatch.compile(select_ast ctx=ectx)<line_sep>select_ir=setgen.scoped_set(select_ir force_reassign=<true> ctx=ectx)<assert_stmt>isinstance(select_ir irast.Set)<block_end><return>select_ir always_check from_parent<block_end><def_stmt>_get_exclusive_ptr_constraints typ:s_objtypes.ObjectType * ctx:context.ContextLevel <arrow>Dict[str Tuple[s_pointers.Pointer List[s_constr.Constraint]]]<block_start>schema=ctx.env.schema<line_sep>pointers={}<line_sep>exclusive_constr=schema.get('std::exclusive' type=s_constr.Constraint)<for_stmt>ptr typ.get_pointers(schema).objects(schema)<block_start>ptr=ptr.get_nearest_non_derived_parent(schema)<line_sep>ex_cnstrs=[c<for>c ptr.get_constraints(schema).objects(schema)<if>c.issubclass(schema exclusive_constr)]<if_stmt>ex_cnstrs<block_start>name=ptr.get_shortname(schema).name<if_stmt>name<ne>'id'<block_start>pointers[name]=ptr ex_cnstrs<block_end><block_end><block_end><return>pointers<block_end><def_stmt>compile_insert_unless_conflict stmt:irast.InsertStmt typ:s_objtypes.ObjectType * ctx:context.ContextLevel <arrow>irast.OnConflictClause<block_start>"""Compile an UNLESS CONFLICT clause with no ON
This requires synthesizing a conditional based on all the exclusive
constraints on the object.
"""<line_sep>pointers=_get_exclusive_ptr_constraints(typ ctx=ctx)<line_sep>obj_constrs=typ.get_constraints(ctx.env.schema).objects(ctx.env.schema)<line_sep>select_ir,always_check,_=compile_conflict_select(stmt typ constrs=pointers obj_constrs=obj_constrs parser_context=stmt.context ctx=ctx)<line_sep><return>irast.OnConflictClause(constraint=<none> select_ir=select_ir always_check=always_check else_ir=<none>)<block_end><def_stmt>compile_insert_unless_conflict_on stmt:irast.InsertStmt typ:s_objtypes.ObjectType constraint_spec:qlast.Expr else_branch:Optional[qlast.Expr] * ctx:context.ContextLevel <arrow>irast.OnConflictClause<block_start><with_stmt>ctx.new()<as>constraint_ctx<block_start>constraint_ctx.partial_path_prefix=stmt.subject<line_sep># We compile the name here so we can analyze it, but we don't do
# anything else with it.
cspec_res=dispatch.compile(constraint_spec ctx=constraint_ctx)<block_end># We accept a property, link, or a list of them in the form of a
# tuple.
<if_stmt>cspec_res.rptr<is><none><and>isinstance(cspec_res.expr irast.Tuple)<block_start>cspec_args=[elem.val<for>elem cspec_res.expr.elements]<block_end><else_stmt><block_start>cspec_args=[cspec_res]<block_end><for_stmt>cspec_arg cspec_args<block_start><if_stmt><not>cspec_arg.rptr<block_start><raise>errors.QueryError('UNLESS CONFLICT argument must be a property, link, '<concat>'or tuple of properties and links' context=constraint_spec.context )<block_end><if_stmt>cspec_arg.rptr.source.path_id<ne>stmt.subject.path_id<block_start><raise>errors.QueryError('UNLESS CONFLICT argument must be a property of the '<concat>'type being inserted' context=constraint_spec.context )<block_end><block_end>schema=ctx.env.schema<line_sep>ptrs=[]<line_sep>exclusive_constr=schema.get('std::exclusive' type=s_constr.Constraint)<for_stmt>cspec_arg cspec_args<block_start><assert_stmt>cspec_arg.rptr<is><not><none><line_sep>schema,ptr=(typeutils.ptrcls_from_ptrref(cspec_arg.rptr.ptrref schema=schema))<if_stmt><not>isinstance(ptr s_pointers.Pointer)<block_start><raise>errors.QueryError('UNLESS CONFLICT property must be a property' context=constraint_spec.context )<block_end>ptr=ptr.get_nearest_non_derived_parent(schema)<line_sep>ptr_card=ptr.get_cardinality(schema)<if_stmt><not>ptr_card.is_single()<block_start><raise>errors.QueryError('UNLESS CONFLICT property must be a SINGLE property' context=constraint_spec.context )<block_end>ptrs.append(ptr)<block_end>obj_constrs=inference.cardinality.get_object_exclusive_constraints(typ set(ptrs) ctx.env)<line_sep>field_constrs=[]<if_stmt>len(ptrs)<eq>1<block_start>field_constrs=[c<for>c ptrs[0].get_constraints(schema).objects(schema)<if>c.issubclass(schema exclusive_constr)]<block_end>all_constrs=list(obj_constrs)+field_constrs<if_stmt>len(all_constrs)<ne>1<block_start><raise>errors.QueryError('UNLESS CONFLICT property must have a single exclusive constraint' context=constraint_spec.context )<block_end>ds={ptr.get_shortname(schema).name:(ptr field_constrs)<for>ptr ptrs}<line_sep>select_ir,always_check,from_anc=compile_conflict_select(stmt typ constrs=ds obj_constrs=list(obj_constrs) parser_context=stmt.context ctx=ctx)<line_sep># Compile an else branch
else_ir=<none><if_stmt>else_branch# TODO: We should support this, but there is some semantic and
# implementation trickiness.
<block_start><if_stmt>from_anc<block_start><raise>errors.UnsupportedFeatureError('UNLESS CONFLICT can not use ELSE when constraint is from a '<concat>'parent type' context=constraint_spec.context )<block_end># The ELSE needs to be able to reference the subject in an
# UPDATE, even though that would normally be prohibited.
ctx.path_scope.factoring_allowlist.add(stmt.subject.path_id)<line_sep># Compile else
else_ir=dispatch.compile(astutils.ensure_qlstmt(else_branch) ctx=ctx)<assert_stmt>isinstance(else_ir irast.Set)<block_end><return>irast.OnConflictClause(constraint=irast.ConstraintRef(id=all_constrs[0].id) select_ir=select_ir always_check=always_check else_ir=else_ir)<block_end><def_stmt>compile_inheritance_conflict_selects stmt:irast.MutatingStmt conflict:irast.MutatingStmt typ:s_objtypes.ObjectType subject_type:s_objtypes.ObjectType * ctx:context.ContextLevel <arrow>List[irast.OnConflictClause]<block_start>"""Compile the selects needed to resolve multiple DML to related types
Generate a SELECT that finds all objects of type `typ` that conflict with
the insert `stmt`. The backend will use this to explicitly check that
no conflicts exist, and raise an error if they do.
This is needed because we mostly use triggers to enforce these
cross-type exclusive constraints, and they use a snapshot
beginning at the start of the statement.
"""<line_sep>pointers=_get_exclusive_ptr_constraints(typ ctx=ctx)<line_sep>obj_constrs=typ.get_constraints(ctx.env.schema).objects(ctx.env.schema)<line_sep># This is a little silly, but for *this* we need to do one per
# constraint (so that we can properly identify which constraint
# failed in the error messages)
entries:List[Tuple[s_constr.Constraint ConstraintPair]]=[]<for_stmt>name,(ptr ptr_constrs) pointers.items()<block_start><for_stmt>ptr_constr ptr_constrs<block_start><if_stmt>_constr_matters(ptr_constr ctx)<block_start>entries.append((ptr_constr ({name:(ptr [ptr_constr])} [])))<block_end><block_end><block_end><for_stmt>obj_constr obj_constrs<block_start><if_stmt>_constr_matters(obj_constr ctx)<block_start>entries.append((obj_constr ({} [obj_constr])))<block_end><block_end># For updates, we need to pull from the actual result overlay,
# since the final row can depend on things not in the query.
fake_dml_set=<none><if_stmt>isinstance(stmt irast.UpdateStmt)<block_start>fake_subject=qlast.DetachedExpr(expr=qlast.Path(steps=[s_utils.name_to_ast_ref(subject_type.get_name(ctx.env.schema))]))<line_sep>fake_dml_set=dispatch.compile(fake_subject ctx=ctx)<block_end>clauses=[]<for_stmt>cnstr,(p o) entries<block_start>select_ir,_,_=compile_conflict_select(stmt typ for_inheritance=<true> fake_dml_set=fake_dml_set constrs=p obj_constrs=o parser_context=stmt.context ctx=ctx)<if_stmt>isinstance(select_ir irast.EmptySet)<block_start><continue><block_end>cnstr_ref=irast.ConstraintRef(id=cnstr.id)<line_sep>clauses.append(irast.OnConflictClause(constraint=cnstr_ref select_ir=select_ir always_check=<false> else_ir=<none> else_fail=conflict update_query_set=fake_dml_set))<block_end><return>clauses<block_end><def_stmt>compile_inheritance_conflict_checks stmt:irast.MutatingStmt subject_stype:s_objtypes.ObjectType * ctx:context.ContextLevel <arrow>Optional[List[irast.OnConflictClause]]<block_start><if_stmt><not>ctx.env.dml_stmts<block_start><return><none><block_end><assert_stmt>isinstance(subject_stype s_objtypes.ObjectType)<line_sep># TODO: when the conflicting statement is an UPDATE, only
# look at things it updated
modified_ancestors=set()<line_sep>base_object=ctx.env.schema.get('std::BaseObject' type=s_objtypes.ObjectType)<line_sep>subject_stypes=[subject_stype]<line_sep># For updates, we need to also consider all descendants, because
# those could also have interesting constraints of their own.
<if_stmt>isinstance(stmt irast.UpdateStmt)<block_start>subject_stypes.extend(subject_stype.descendants(ctx.env.schema))<block_end># N.B that for updates, the update itself will be in dml_stmts,
# since an update can conflict with itself if there are subtypes.
<for_stmt>ir ctx.env.dml_stmts<block_start>typ=setgen.get_set_type(ir.subject ctx=ctx)<assert_stmt>isinstance(typ s_objtypes.ObjectType)<line_sep>typ=typ.get_nearest_non_derived_parent(ctx.env.schema)<line_sep>typs=[typ]<line_sep># As mentioned above, need to consider descendants of updates
<if_stmt>isinstance(ir irast.UpdateStmt)<block_start>typs.extend(typ.descendants(ctx.env.schema))<block_end><for_stmt>typ typs<block_start><if_stmt>typ.is_view(ctx.env.schema)<block_start><continue><block_end><for_stmt>subject_stype subject_stypes<block_start><if_stmt>subject_stype.is_view(ctx.env.schema)<block_start><continue><block_end># If the earlier DML has a shared ancestor that isn't
# BaseObject and isn't (if it's an insert) the same type,
# then we need to see if we need a conflict select
<if_stmt>(subject_stype<eq>typ<and><not>isinstance(ir irast.UpdateStmt)<and><not>isinstance(stmt irast.UpdateStmt))<block_start><continue><block_end>ancs=s_utils.get_class_nearest_common_ancestors(ctx.env.schema [subject_stype typ])<for_stmt>anc ancs<block_start><if_stmt>anc<ne>base_object<block_start>modified_ancestors.add((subject_stype anc ir))<block_end><block_end><block_end><block_end><block_end>conflicters=[]<for_stmt>subject_stype,anc_type,ir modified_ancestors<block_start>conflicters.extend(compile_inheritance_conflict_selects(stmt ir anc_type subject_stype ctx=ctx))<block_end><return>conflicters<or><none><block_end> |
# Generated by Django 2.2.17 on 2021-01-30 08:35
<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<import_stmt>django_extensions.db.fields<import_from_stmt>django.db.models JSONField<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('dojo' '0074_notifications_close_engagement')]<line_sep>operations=[migrations.CreateModel(name='Test_Import' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('created' django_extensions.db.fields.CreationDateTimeField(auto_now_add=<true> verbose_name='created')) ('modified' django_extensions.db.fields.ModificationDateTimeField(auto_now=<true> verbose_name='modified')) ('import_settings' JSONField(null=<true>)) ('version' models.CharField(blank=<true> max_length=100 null=<true>)) ('type' models.CharField(default='unknown' max_length=64 null=<false>)) ] options={'ordering':('-id' ) } ) migrations.CreateModel(name='Test_Import_Finding_Action' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('created' django_extensions.db.fields.CreationDateTimeField(auto_now_add=<true> verbose_name='created')) ('modified' django_extensions.db.fields.ModificationDateTimeField(auto_now=<true> verbose_name='modified')) ('action' models.CharField(blank=<true> choices=[('N' 'created') ('C' 'closed') ('R' 'reactivated') ('U' 'updated')] max_length=100 null=<true>)) ('finding' models.ForeignKey(editable=<false> on_delete=django.db.models.deletion.CASCADE to='dojo.Finding')) ('test_import' models.ForeignKey(editable=<false> on_delete=django.db.models.deletion.CASCADE to='dojo.Test_Import')) ] options={'ordering':('test_import' 'action' 'finding') 'unique_together':{('test_import' 'finding')} } ) migrations.AddField(model_name='test_import' name='findings_affected' field=models.ManyToManyField(through='dojo.Test_Import_Finding_Action' to='dojo.Finding') ) migrations.AddField(model_name='test_import' name='test' field=models.ForeignKey(editable=<false> on_delete=django.db.models.deletion.CASCADE to='dojo.Test') ) migrations.AddIndex(model_name='test_import' index=models.Index(fields=['created' 'test' 'type'] name='dojo_test_i_created_951f4e_idx') ) ]<block_end> |
<import_from_stmt>flask_restful Resource<import_from_stmt>flask_restful.reqparse RequestParser<import_from_stmt>pajbot.managers.db DBManager<import_from_stmt>pajbot.models.playsound Playsound<import_from_stmt>pajbot.models.sock SocketClientManager<import_from_stmt>pajbot.modules PlaysoundModule<import_from_stmt>pajbot.web.utils requires_level<import_from_stmt>pajbot.managers.adminlog AdminLogManager<class_stmt>PlaysoundAPI(Resource)<block_start>@requires_level(500)<def_stmt>put self playsound_name **options<block_start>playsound_name=PlaysoundModule.massage_name(playsound_name)<if_stmt><not>PlaysoundModule.validate_name(playsound_name)<block_start><return>({"error":"Invalid Playsound name. The playsound name may only contain lowercase latin letters, 0-9, -, or _. No spaces :rage:"} 400 )<block_end>post_parser=RequestParser()<line_sep>post_parser.add_argument("link" required=<true>)<line_sep>args=post_parser.parse_args()<try_stmt><block_start>link=args["link"]<block_end><except_stmt>(ValueError KeyError)<block_start><return>{"error":"Invalid `link` parameter."} 400<block_end><with_stmt>DBManager.create_session_scope()<as>db_session<block_start>count=db_session.query(Playsound).filter(Playsound.name<eq>playsound_name).count()<if_stmt>count<ge>1<block_start><return>"Playsound already exists" 400<block_end># the rest of the parameters are initialized with defaults
playsound=Playsound(name=playsound_name link=link)<line_sep>db_session.add(playsound)<line_sep>log_msg=f"The {playsound_name} playsound has been added"<line_sep>AdminLogManager.add_entry("Playsound added" options["user"] log_msg)<line_sep><return>"OK" 200<block_end><block_end>@requires_level(500)<def_stmt>post self playsound_name **options# require JSON so the cooldown can be null
<block_start>post_parser=RequestParser()<line_sep>post_parser.add_argument("link" required=<true>)<line_sep>post_parser.add_argument("volume" type=int required=<true>)<line_sep>post_parser.add_argument("cooldown" type=int required=<false>)<line_sep>post_parser.add_argument("enabled" type=bool required=<false>)<line_sep>args=post_parser.parse_args()<line_sep>link=args["link"]<if_stmt><not>PlaysoundModule.validate_link(link)<block_start><return>"Empty or bad link, links must start with https:// and must not contain spaces" 400<block_end>volume=args["volume"]<if_stmt><not>PlaysoundModule.validate_volume(volume)<block_start><return>"Bad volume argument" 400<block_end># cooldown is allowed to be null/None
cooldown=args.get("cooldown" <none>)<if_stmt><not>PlaysoundModule.validate_cooldown(cooldown)<block_start><return>"Bad cooldown argument" 400<block_end>enabled=args["enabled"]<if_stmt>enabled<is><none><block_start><return>"Bad enabled argument" 400<block_end><with_stmt>DBManager.create_session_scope()<as>db_session<block_start>playsound=db_session.query(Playsound).filter(Playsound.name<eq>playsound_name).one_or_none()<if_stmt>playsound<is><none><block_start><return>"Playsound does not exist" 404<block_end>raw_edited_data={"link":(playsound.link link) "volume":(playsound.volume volume) "cooldown":(playsound.cooldown cooldown) }<line_sep># make a dictionary with all the changed values (except for enabled, which has a special case below)
filtered_edited_data={k:v<for>k,v raw_edited_data.items()<if>v[0]<ne>v[1]}<line_sep>log_msg=f"The {playsound_name} playsound has been updated: "<line_sep>log_msg_changes=[]<if_stmt>playsound.enabled<ne>enabled<block_start>log_msg_changes.append("enabled"<if>enabled<else>"disabled")<block_end># iterate over changed values and push them to the log msg
<for_stmt>edited_key,values filtered_edited_data.items()<block_start>log_msg_changes.append(f"{edited_key} {values[0]} to {values[1]}")<block_end>log_msg<augadd>", ".join(log_msg_changes)<line_sep>playsound.link=link<line_sep>playsound.volume=volume<line_sep>playsound.cooldown=cooldown<line_sep>playsound.enabled=enabled<line_sep>db_session.add(playsound)<if_stmt>len(log_msg_changes)<block_start>AdminLogManager.add_entry("Playsound edited" options["user"] log_msg)<block_end><block_end><return>"OK" 200<block_end>@requires_level(500)<def_stmt>delete self playsound_name **options<block_start><with_stmt>DBManager.create_session_scope()<as>db_session<block_start>playsound=db_session.query(Playsound).filter(Playsound.name<eq>playsound_name).one_or_none()<if_stmt>playsound<is><none><block_start><return>"Playsound does not exist" 404<block_end>log_msg=f"The {playsound.name} playsound has been removed"<line_sep>AdminLogManager.add_entry("Playsound removed" options["user"] log_msg)<line_sep>db_session.delete(playsound)<line_sep><return>"OK" 200<block_end><block_end><block_end><class_stmt>PlayPlaysoundAPI(Resource)<block_start>@requires_level(500)<def_stmt>post self playsound_name **options<block_start><with_stmt>DBManager.create_session_scope()<as>db_session<block_start>count=db_session.query(Playsound).filter(Playsound.name<eq>playsound_name).count()<if_stmt>count<le>0<block_start><return>"Playsound does not exist" 404<block_end># explicitly don't check for disabled
<block_end>SocketClientManager.send("playsound.play" {"name":playsound_name})<line_sep><return>"OK" 200<block_end><block_end><def_stmt>init api<block_start>api.add_resource(PlaysoundAPI "/playsound/<playsound_name>")<line_sep>api.add_resource(PlayPlaysoundAPI "/playsound/<playsound_name>/play")<block_end> |
<import_stmt>PIL.Image<as>Image<import_stmt>scipy.misc<import_stmt>sys<line_sep>sys.path.append('./python')<import_from_stmt>dehaze load_model transform cuda# pylint: disable=E0401
<def_stmt>run_test <block_start>net=load_model()<line_sep>input_image='./download/canyon1.jpg'<line_sep>output_filename='./download/canyon1_dh.jpg'<line_sep>#===== Load input image =====
img=Image.open(input_image).convert('RGB')<line_sep>imgIn=transform(img).unsqueeze_(0)<line_sep>#===== Test procedures =====
<if_stmt>cuda<block_start>imgIn=imgIn.cuda()<block_end>prediction=net(imgIn)<line_sep>prediction=prediction.data.cpu().numpy().squeeze().transpose((1 2 0))<line_sep>scipy.misc.toimage(prediction).save(output_filename)<block_end><if_stmt>__name__<eq>'__main__'<block_start>print('dehaze')<line_sep>run_test()<block_end> |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Train the Attention-based model (ERATO corpus)."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>os.path join isfile abspath<import_stmt>sys<import_stmt>time<import_stmt>tensorflow<as>tf<import_from_stmt>setproctitle setproctitle<import_stmt>yaml<import_stmt>shutil<line_sep>sys.path.append(abspath('../../../'))<import_from_stmt>experiments.erato.data.load_dataset_attention Dataset<import_from_stmt>experiments.erato.metrics.attention do_eval_cer do_eval_fmeasure<import_from_stmt>utils.io.labels.sparsetensor list2sparsetensor<import_from_stmt>utils.training.learning_rate_controller Controller<import_from_stmt>utils.training.plot plot_loss plot_ler<import_from_stmt>utils.directory mkdir_join mkdir<import_from_stmt>utils.parameter count_total_parameters<import_from_stmt>models.attention.attention_seq2seq AttentionSeq2Seq<def_stmt>do_train model params<block_start>"""Run training.
Args:
model: the model to train
params (dict): A dictionary of parameters
"""<line_sep>map_file_path='../metrics/mapping_files/'+params['label_type']+'_'+params['ss_type']+'.txt'<line_sep># Load dataset
train_data=Dataset(data_type='train' label_type=params['label_type'] ss_type=params['ss_type'] batch_size=params['batch_size'] map_file_path=map_file_path max_epoch=params['num_epoch'] splice=params['splice'] num_stack=params['num_stack'] num_skip=params['num_skip'] sort_utt=<true> sort_stop_epoch=params['sort_stop_epoch'])<line_sep>dev_data=Dataset(data_type='dev' label_type=params['label_type'] ss_type=params['ss_type'] batch_size=params['batch_size'] map_file_path=map_file_path splice=params['splice'] num_stack=params['num_stack'] num_skip=params['num_skip'] sort_utt=<false>)<line_sep>test_data=Dataset(data_type='test' label_type=params['label_type'] ss_type=params['ss_type'] batch_size=params['batch_size'] map_file_path=map_file_path splice=params['splice'] num_stack=params['num_stack'] num_skip=params['num_skip'] sort_utt=<false>)<line_sep># Tell TensorFlow that the model will be built into the default graph
<with_stmt>tf.Graph().as_default()# Define placeholders
<block_start>model.create_placeholders()<line_sep>learning_rate_pl=tf.placeholder(tf.float32 name='learning_rate')<line_sep># Add to the graph each operation (including model definition)
loss_op,logits,decoder_outputs_train,decoder_outputs_infer=model.compute_loss(model.inputs_pl_list[0] model.labels_pl_list[0] model.inputs_seq_len_pl_list[0] model.labels_seq_len_pl_list[0] model.keep_prob_encoder_pl_list[0] model.keep_prob_decoder_pl_list[0] model.keep_prob_embedding_pl_list[0])<line_sep>train_op=model.train(loss_op optimizer=params['optimizer'] learning_rate=learning_rate_pl)<line_sep>_,decode_op_infer=model.decode(decoder_outputs_train decoder_outputs_infer)<line_sep>ler_op=model.compute_ler(model.labels_st_true_pl model.labels_st_pred_pl)<line_sep># Define learning rate controller
lr_controller=Controller(learning_rate_init=params['learning_rate'] decay_start_epoch=params['decay_start_epoch'] decay_rate=params['decay_rate'] decay_patient_epoch=params['decay_patient_epoch'] lower_better=<true>)<line_sep># Build the summary tensor based on the TensorFlow collection of
# summaries
summary_train=tf.summary.merge(model.summaries_train)<line_sep>summary_dev=tf.summary.merge(model.summaries_dev)<line_sep># Add the variable initializer operation
init_op=tf.global_variables_initializer()<line_sep># Create a saver for writing training checkpoints
saver=tf.train.Saver(max_to_keep=<none>)<line_sep># Count total param
parameters_dict,total_parameters=count_total_parameters(tf.trainable_variables())<for_stmt>parameter_name sorted(parameters_dict.keys())<block_start>print("%s %d"%(parameter_name parameters_dict[parameter_name]))<block_end>print("Total %d variables, %s M param"%(len(parameters_dict.keys()) "{:,}".format(total_parameters/1000000)))<line_sep>csv_steps,csv_loss_train,csv_loss_dev=[] [] []<line_sep>csv_ler_train,csv_ler_dev=[] []<line_sep># Create a session for running operation on the graph
<with_stmt>tf.Session()<as>sess# Instantiate a SummaryWriter to output summaries and the graph
<block_start>summary_writer=tf.summary.FileWriter(model.save_path sess.graph)<line_sep># Initialize param
sess.run(init_op)<line_sep># Train model
start_time_train=time.time()<line_sep>start_time_epoch=time.time()<line_sep>start_time_step=time.time()<line_sep>cer_dev_best=1<line_sep>not_improved_epoch=0<line_sep>learning_rate=float(params['learning_rate'])<for_stmt>step,(data is_new_epoch) enumerate(train_data)# Create feed dictionary for next mini batch (train)
<block_start>inputs,labels_train,inputs_seq_len,labels_seq_len,_=data<line_sep>feed_dict_train={model.inputs_pl_list[0]:inputs[0] model.labels_pl_list[0]:labels_train[0] model.inputs_seq_len_pl_list[0]:inputs_seq_len[0] model.labels_seq_len_pl_list[0]:labels_seq_len[0] model.keep_prob_encoder_pl_list[0]:1-float(params['dropout_encoder']) model.keep_prob_decoder_pl_list[0]:1-float(params['dropout_decoder']) model.keep_prob_embedding_pl_list[0]:1-float(params['dropout_embedding']) learning_rate_pl:learning_rate}<line_sep># Update parameters
sess.run(train_op feed_dict=feed_dict_train)<if_stmt>(step+1)%params['print_step']<eq>0# Create feed dictionary for next mini batch (dev)
<block_start>(inputs labels_dev inputs_seq_len labels_seq_len _),_=dev_data.next()<line_sep>feed_dict_dev={model.inputs_pl_list[0]:inputs[0] model.labels_pl_list[0]:labels_dev[0] model.inputs_seq_len_pl_list[0]:inputs_seq_len[0] model.labels_seq_len_pl_list[0]:labels_seq_len[0] model.keep_prob_encoder_pl_list[0]:1.0 model.keep_prob_decoder_pl_list[0]:1.0 model.keep_prob_embedding_pl_list[0]:1.0}<line_sep># Compute loss
loss_train=sess.run(loss_op feed_dict=feed_dict_train)<line_sep>loss_dev=sess.run(loss_op feed_dict=feed_dict_dev)<line_sep>csv_steps.append(step)<line_sep>csv_loss_train.append(loss_train)<line_sep>csv_loss_dev.append(loss_dev)<line_sep># Change to evaluation mode
feed_dict_train[model.keep_prob_encoder_pl_list[0]]=1.0<line_sep>feed_dict_train[model.keep_prob_decoder_pl_list[0]]=1.0<line_sep>feed_dict_train[model.keep_prob_embedding_pl_list[0]]=1.0<line_sep># Predict class ids & update even files
predicted_ids_train,summary_str_train=sess.run([decode_op_infer summary_train] feed_dict=feed_dict_train)<line_sep>predicted_ids_dev,summary_str_dev=sess.run([decode_op_infer summary_dev] feed_dict=feed_dict_dev)<line_sep>summary_writer.add_summary(summary_str_train step+1)<line_sep>summary_writer.add_summary(summary_str_dev step+1)<line_sep>summary_writer.flush()<line_sep># Convert to sparsetensor to compute LER
feed_dict_ler_train={model.labels_st_true_pl:list2sparsetensor(labels_train[0] padded_value=train_data.padded_value) model.labels_st_pred_pl:list2sparsetensor(predicted_ids_train padded_value=train_data.padded_value)}<line_sep>feed_dict_ler_dev={model.labels_st_true_pl:list2sparsetensor(labels_dev[0] padded_value=dev_data.padded_value) model.labels_st_pred_pl:list2sparsetensor(predicted_ids_dev padded_value=dev_data.padded_value)}<line_sep># Compute accuracy
ler_train=sess.run(ler_op feed_dict=feed_dict_ler_train)<line_sep>ler_dev=sess.run(ler_op feed_dict=feed_dict_ler_dev)<line_sep>csv_ler_train.append(ler_train)<line_sep>csv_ler_dev.append(ler_dev)<line_sep>duration_step=time.time()-start_time_step<line_sep>print("Step %d (epoch: %.3f): loss = %.3f (%.3f) / ler = %.3f (%.3f) / lr = %.5f (%.3f min)"%(step+1 train_data.epoch_detail loss_train loss_dev ler_train ler_dev learning_rate duration_step/60))<line_sep>sys.stdout.flush()<line_sep>start_time_step=time.time()<block_end># Save checkpoint and evaluate model per epoch
<if_stmt>is_new_epoch<block_start>duration_epoch=time.time()-start_time_epoch<line_sep>print('-----EPOCH:%d (%.3f min)-----'%(train_data.epoch duration_epoch/60))<line_sep># Save fugure of loss & ler
plot_loss(csv_loss_train csv_loss_dev csv_steps save_path=model.save_path)<line_sep>plot_ler(csv_ler_train csv_ler_dev csv_steps label_type=params['label_type'] save_path=model.save_path)<if_stmt>train_data.epoch<ge>params['eval_start_epoch']<block_start>start_time_eval=time.time()<line_sep>print('=== Dev Data Evaluation ===')<line_sep>cer_dev_epoch=do_eval_cer(session=sess decode_op=decode_op_infer model=model dataset=dev_data label_type=params['label_type'] ss_type=params['ss_type'] eval_batch_size=1)<line_sep>print(' CER: %f %%'%(cer_dev_epoch<times>100))<if_stmt>cer_dev_epoch<l>cer_dev_best<block_start>cer_dev_best=cer_dev_epoch<line_sep>not_improved_epoch=0<line_sep>print('■■■ ↑Best Score (CER)↑ ■■■')<line_sep># Save model (check point)
checkpoint_file=join(model.save_path 'model.ckpt')<line_sep>save_path=saver.save(sess checkpoint_file global_step=train_data.epoch)<line_sep>print("Model saved in file: %s"%save_path)<line_sep>print('=== Test Data Evaluation ===')<line_sep>ler_test=do_eval_cer(session=sess decode_op=decode_op_infer model=model dataset=test_data label_type=params['label_type'] ss_type=params['ss_type'] is_test=<true> eval_batch_size=1)<line_sep>print(' CER: %f %%'%(ler_test<times>100))<if_stmt>params['ss_type']<ne>'remove'<block_start>df_acc=do_eval_fmeasure(session=sess decode_op=decode_op_infer model=model dataset=test_data label_type=params['label_type'] ss_type=params['ss_type'] is_test=<true> eval_batch_size=1)<line_sep>print(df_acc)<block_end><block_end><else_stmt><block_start>not_improved_epoch<augadd>1<block_end>duration_eval=time.time()-start_time_eval<line_sep>print('Evaluation time: %.3f min'%(duration_eval/60))<line_sep># Early stopping
<if_stmt>not_improved_epoch<eq>params['not_improved_patient_epoch']<block_start><break><block_end># Update learning rate
learning_rate=lr_controller.decay_lr(learning_rate=learning_rate epoch=train_data.epoch value=cer_dev_epoch)<block_end>start_time_epoch=time.time()<block_end><block_end>duration_train=time.time()-start_time_train<line_sep>print('Total time: %.3f hour'%(duration_train/3600))<line_sep># Training was finished correctly
<with_stmt>open(join(model.save_path 'complete.txt') 'w')<as>f<block_start>f.write('')<block_end><block_end><block_end><block_end><def_stmt>main config_path model_save_path# Load a config file (.yml)
<block_start><with_stmt>open(config_path "r")<as>f<block_start>config=yaml.load(f)<line_sep>params=config['param']<block_end># Except for a <SOS> and <EOS> class
<if_stmt>params['ss_type']<eq>'remove'<block_start>params['num_classes']=147<block_end><elif_stmt>params['ss_type']<in>['insert_left' 'insert_right']<block_start>params['num_classes']=151<block_end><elif_stmt>params['ss_type']<eq>'insert_both'<block_start>params['num_classes']=155<block_end><else_stmt><block_start>TypeError<block_end># Model setting
model=AttentionSeq2Seq(input_size=params['input_size']<times>params['num_stack'] encoder_type=params['encoder_type'] encoder_num_units=params['encoder_num_units'] encoder_num_layers=params['encoder_num_layers'] encoder_num_proj=params['encoder_num_proj'] attention_type=params['attention_type'] attention_dim=params['attention_dim'] decoder_type=params['decoder_type'] decoder_num_units=params['decoder_num_units'] decoder_num_layers=params['decoder_num_layers'] embedding_dim=params['embedding_dim'] num_classes=params['num_classes'] sos_index=params['num_classes'] eos_index=params['num_classes']+1 max_decode_length=params['max_decode_length'] lstm_impl='LSTMBlockCell' use_peephole=params['use_peephole'] parameter_init=params['weight_init'] clip_grad_norm=params['clip_grad_norm'] clip_activation_encoder=params['clip_activation_encoder'] clip_activation_decoder=params['clip_activation_decoder'] weight_decay=params['weight_decay'] time_major=<true> sharpening_factor=params['sharpening_factor'] logits_temperature=params['logits_temperature'] sigmoid_smoothing=params['sigmoid_smoothing'])<line_sep># Set process name
setproctitle('tf_erato_'+model.name+'_'+params['label_type']+'_'+params['ss_type']+'_'+params['attention_type'])<line_sep>model.name='en'+str(params['encoder_num_units'])<line_sep>model.name<augadd>'_'+str(params['encoder_num_layers'])<line_sep>model.name<augadd>'_att'+str(params['attention_dim'])<line_sep>model.name<augadd>'_de'+str(params['decoder_num_units'])<line_sep>model.name<augadd>'_'+str(params['decoder_num_layers'])<line_sep>model.name<augadd>'_'+params['optimizer']<line_sep>model.name<augadd>'_lr'+str(params['learning_rate'])<line_sep>model.name<augadd>'_'+params['attention_type']<if_stmt>params['dropout_encoder']<ne>0<block_start>model.name<augadd>'_dropen'+str(params['dropout_encoder'])<block_end><if_stmt>params['dropout_decoder']<ne>0<block_start>model.name<augadd>'_dropde'+str(params['dropout_decoder'])<block_end><if_stmt>params['dropout_embedding']<ne>0<block_start>model.name<augadd>'_dropem'+str(params['dropout_embedding'])<block_end><if_stmt>params['num_stack']<ne>1<block_start>model.name<augadd>'_stack'+str(params['num_stack'])<block_end><if_stmt>params['weight_decay']<ne>0<block_start>model.name<augadd>'wd'+str(params['weight_decay'])<block_end><if_stmt>params['sharpening_factor']<ne>1<block_start>model.name<augadd>'_sharp'+str(params['sharpening_factor'])<block_end><if_stmt>params['logits_temperature']<ne>1<block_start>model.name<augadd>'_temp'+str(params['logits_temperature'])<block_end><if_stmt>bool(params['sigmoid_smoothing'])<block_start>model.name<augadd>'_smoothing'<block_end># Set save path
model.save_path=mkdir_join(model_save_path 'attention' params['label_type'] params['ss_type'] model.name)<line_sep># Reset model directory
model_index=0<line_sep>new_model_path=model.save_path<while_stmt><true><block_start><if_stmt>isfile(join(new_model_path 'complete.txt'))# Training of the first model have been finished
<block_start>model_index<augadd>1<line_sep>new_model_path=model.save_path+'_'+str(model_index)<block_end><elif_stmt>isfile(join(new_model_path 'config.yml'))# Training of the first model have not been finished yet
<block_start>model_index<augadd>1<line_sep>new_model_path=model.save_path+'_'+str(model_index)<block_end><else_stmt><block_start><break><block_end><block_end>model.save_path=mkdir(new_model_path)<line_sep># Save config file
shutil.copyfile(config_path join(model.save_path 'config.yml'))<line_sep>sys.stdout=open(join(model.save_path 'train.log') 'w')<line_sep># TODO(hirofumi): change to logger
do_train(model=model params=params)<block_end><if_stmt>__name__<eq>'__main__'<block_start>args=sys.argv<if_stmt>len(args)<ne>3<block_start><raise>ValueError('Length of args should be 3.')<block_end>main(config_path=args[1] model_save_path=args[2])<block_end> |
<class_stmt>Solution<block_start>"""
The Brute Force Solution
Time Complexity: O(N^3)
Space Complexity: O(1)
"""<def_stmt>countGoodTriplets self arr:List[int] a:int b:int c:int<arrow>int<block_start>triplet_count=0<line_sep># for each i, for each j, check if the first condition is satisfied
<for_stmt>i range(len(arr)-2)<block_start><for_stmt>j range(i+1 len(arr)-1)<block_start><if_stmt>abs(arr[i]-arr[j])<le>a# for each k, check if the last two conditions are satisfied
<block_start><for_stmt>k range(j+1 len(arr))<block_start><if_stmt>abs(arr[j]-arr[k])<le>b<and>abs(arr[i]-arr[k])<le>c# the triplet is Good, increment the count!
<block_start>triplet_count<augadd>1<block_end><block_end><block_end><block_end><block_end><return>triplet_count<block_end><block_end> |
"""
Main file for streaming Multicam tracker for 360 degree usecase
"""<line_sep>__version__='0.2'<import_stmt>argparse<import_stmt>json<import_stmt>logging<import_stmt>signal<import_stmt>sys<import_from_stmt>mctrack mctrackstream<line_sep>logging.basicConfig(filename='mctracker360.log' level=logging.INFO)<line_sep>DEFAULT_CONSUMER_KAFKA_BOOTSTRAP_SERVER_URL="kafka"<line_sep>DEFAULT_PRODUCER_KAFKA_BOOTSTRAP_SERVER_URL="kafka"<line_sep>DEFAULT_CONSUMER_KAFKA_TOPIC="metromind-raw"<line_sep>DEFAULT_PRODUCER_KAFKA_TOPIC="metromind-start"<line_sep>DEFAULT_MCTRACKER_CONFIG_FILE="config/config_360d.json"<line_sep>DEFAULT_STREAM_CONFIG_FILE="config/config_360d_stream.json"<line_sep>mctrack_obj=<none><def_stmt>signal_handler signum _<block_start>"""Signal handler. This function will dump all tracker stats and exit
Arguments:
signum {int} -- The signal number
frame {list} -- Stack frame
"""<line_sep>logging.error("Multicam tracker got a signal: %d" signum)<try_stmt><block_start><if_stmt>mctrack_obj<is><not><none><block_start>mctrack_obj.dump_stats()<block_end><block_end><except_stmt>Exception<block_start><pass><block_end>exit()<block_end><def_stmt>main <block_start>"""Main function. Starts multicam tracker and runs continiously
until killed
"""<line_sep><global>mctrack_obj<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("-c" "--config" help="Config file for mctracker" default=DEFAULT_MCTRACKER_CONFIG_FILE)<line_sep>parser.add_argument("-s" "--sconfig" help="Config file for streaming setup" default=DEFAULT_STREAM_CONFIG_FILE)<line_sep>args=parser.parse_args()<line_sep>stream_config=<none><try_stmt><block_start>stream_config=json.load(open(args.sconfig))<block_end><except_stmt>IOError<as>ioe<block_start>err_msg="ERROR: Stream Config I/O Error({}): {}: {}. Quitting".format(ioe.errno args.sconfig ioe.strerror)<line_sep>logging.error(err_msg)<line_sep>print(err_msg)<line_sep>exit()<block_end><except_stmt><block_start>err_msg="ERROR: Stream Config Error: {}: {}. Quitting".format(args.sconfig sys.exc_info()[0])<line_sep>logging.error(err_msg)<line_sep>print(err_msg)<line_sep>exit()<block_end>print(stream_config)<line_sep>ckafka=(stream_config.get("msgBrokerConfig" {}).get("inputKafkaServerUrl" DEFAULT_CONSUMER_KAFKA_BOOTSTRAP_SERVER_URL))<line_sep>pkafka=(stream_config.get("msgBrokerConfig" {}).get("outputKafkaServerUrl" DEFAULT_PRODUCER_KAFKA_BOOTSTRAP_SERVER_URL))<line_sep>itopic=(stream_config.get("msgBrokerConfig" {}).get("inputKafkaTopic" DEFAULT_CONSUMER_KAFKA_TOPIC))<line_sep>otopic=(stream_config.get("msgBrokerConfig" {}).get("outputKafkaTopic" DEFAULT_CONSUMER_KAFKA_TOPIC))<line_sep>time_it_flag=stream_config.get("profileTime" <false>)<line_sep>print("Starting MC-Streaming app with following args:\n"<concat>"consumer kafka server={}\n"<concat>"consumer kafka topic={}\n"<concat>"producer kafka server={}\n"<concat>"producer kafka topic={}\n"<concat>"Time profile={}\n"<concat>"MC Tracker Config File={}\n".format(ckafka itopic pkafka otopic time_it_flag args.config))<line_sep># Set the signal handler for ctrl-c. Since the program runs indefinitely,
# we need to dump some stats when sigint is received
# (when profiling is enabled)
signal.signal(signal.SIGINT signal_handler)<line_sep>mctrack_obj=mctrackstream.McTrackerStream(ckafka itopic pkafka otopic args.config time_it_flag)<line_sep>mctrack_obj.start_mctracker()<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
# copytrue (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>pickle<import_stmt>os.path<as>osp<import_stmt>random<import_from_stmt>.utils copy_directory<class_stmt>DatasetBase(object)<block_start><def_stmt>__init__ self dataset_id path<block_start>self.id=dataset_id<line_sep>self.path=path<line_sep>self.all_files=list()<line_sep>self.file_info=dict()<line_sep>self.label_info=dict()<line_sep>self.labels=list()<line_sep>self.train_files=list()<line_sep>self.val_files=list()<line_sep>self.test_files=list()<line_sep>self.class_train_file_list=dict()<line_sep>self.class_val_file_list=dict()<line_sep>self.class_test_file_list=dict()<block_end><def_stmt>copy_dataset self source_path files# 将原数据集拷贝至目标路径
<block_start>copy_directory(source_path self.path files)<block_end><def_stmt>dump_statis_info self# info['fields']指定了需要dump的信息
<block_start>info=dict()<line_sep>info['fields']=['file_info' 'label_info' 'labels' 'train_files' 'val_files' 'test_files' 'class_train_file_list' 'class_val_file_list' 'class_test_file_list']<for_stmt>field info['fields']<block_start><if_stmt>hasattr(self field)<block_start>info[field]=getattr(self field)<block_end><block_end><with_stmt>open(osp.join(self.path 'statis.pkl') 'wb')<as>f<block_start>pickle.dump(info f)<block_end><block_end><def_stmt>load_statis_info self<block_start><with_stmt>open(osp.join(self.path 'statis.pkl') 'rb')<as>f<block_start>info=pickle.load(f)<block_end><for_stmt>field info['fields']<block_start><if_stmt>field<in>info<block_start>setattr(self field info[field])<block_end><block_end><block_end><def_stmt>split self val_split test_split<block_start>all_files=list(self.file_info.keys())<line_sep>random.shuffle(all_files)<line_sep>val_num=int(len(all_files)<times>val_split)<line_sep>test_num=int(len(all_files)<times>test_split)<line_sep>train_num=len(all_files)-val_num-test_num<assert_stmt>train_num<g>0 "训练集样本数量需大于0"<assert_stmt>val_num<g>0 "验证集样本数量需大于0"<line_sep>self.train_files=all_files[:train_num]<line_sep>self.val_files=all_files[train_num:train_num+val_num]<line_sep>self.test_files=all_files[train_num+val_num:]<line_sep>self.train_set=set(self.train_files)<line_sep>self.val_set=set(self.val_files)<line_sep>self.test_set=set(self.test_files)<for_stmt>label,file_list self.label_info.items()<block_start>self.class_train_file_list[label]=list()<line_sep>self.class_val_file_list[label]=list()<line_sep>self.class_test_file_list[label]=list()<for_stmt>f file_list<block_start><if_stmt>f<in>self.test_set<block_start>self.class_test_file_list[label].append(f)<block_end><if_stmt>f<in>self.val_set<block_start>self.class_val_file_list[label].append(f)<block_end><if_stmt>f<in>self.train_set<block_start>self.class_train_file_list[label].append(f)<block_end><block_end><block_end><block_end><block_end> |
"""
This module is used to return latest OCS internal build for specified OCS
version.
"""<import_stmt>argparse<import_stmt>os<import_from_stmt>ocs_ci.framework config<import_from_stmt>ocs_ci.framework.main load_config<import_from_stmt>ocs_ci.ocs.constants OCS_VERSION_CONF_DIR<import_from_stmt>ocs_ci.utility.utils get_latest_ds_olm_tag<def_stmt>init_arg_parser <block_start>"""
Init argument parser.
Returns:
object: Parsed arguments
"""<line_sep>parser=argparse.ArgumentParser(description="OCS Internal build version")<line_sep>parser.add_argument("--ocs-version" action="store" required=<false> default=config.ENV_DATA["ocs_version"] help=f"""
OCS version in format X.Y (e.g. 4.7). If not specified, the default
value {config.ENV_DATA['ocs_version']} will be used.
""" )<line_sep>parser.add_argument("--image" action="store_true" required=<false> default=<false> help="If used the whole image of OCS internal build will be returned" )<line_sep><return>parser.parse_args()<block_end><def_stmt>main <block_start>"""
Main function
"""<line_sep>parser=init_arg_parser()<line_sep>ocs_version=parser.ocs_version<line_sep>image=parser.image<line_sep>config.ENV_DATA["ocs_version"]=ocs_version<line_sep>version_config_file=os.path.join(OCS_VERSION_CONF_DIR f"ocs-{ocs_version}.yaml")<line_sep>load_config([version_config_file])<line_sep>latest_ocs_build=get_latest_ds_olm_tag()<if_stmt>image<block_start>base_image=config.DEPLOYMENT["default_ocs_registry_image"].split(":")[0]<line_sep>print(f"{base_image}:{latest_ocs_build}")<line_sep><return><block_end>print(latest_ocs_build)<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_from_stmt>nova.notifications.objects base<import_from_stmt>nova.notifications.objects request_spec<as>reqspec_payload<import_from_stmt>nova.objects base<as>nova_base<import_from_stmt>nova.objects fields<line_sep>@nova_base.NovaObjectRegistry.register_notification<class_stmt>ComputeTaskPayload(base.NotificationPayloadBase)# Version 1.0: Initial version
<block_start>VERSION='1.0'<line_sep>fields={'instance_uuid':fields.UUIDField() # There are some cases that request_spec is None.
# e.g. Old instances can still have no RequestSpec object
# attached to them.
'request_spec':fields.ObjectField('RequestSpecPayload' nullable=<true>) 'state':fields.InstanceStateField(nullable=<true>) 'reason':fields.ObjectField('ExceptionPayload')}<def_stmt>__init__ self instance_uuid request_spec state reason<block_start>super(ComputeTaskPayload self).__init__()<line_sep>self.instance_uuid=instance_uuid<line_sep>self.request_spec=reqspec_payload.RequestSpecPayload(request_spec)<if>request_spec<is><not><none><else><none><line_sep>self.state=state<line_sep>self.reason=reason<block_end><block_end>@base.notification_sample('compute_task-build_instances-error.json')@base.notification_sample('compute_task-migrate_server-error.json')@base.notification_sample('compute_task-rebuild_server-error.json')@nova_base.NovaObjectRegistry.register_notification<class_stmt>ComputeTaskNotification(base.NotificationBase)# Version 1.0: Initial version
<block_start>VERSION='1.0'<line_sep>fields={'payload':fields.ObjectField('ComputeTaskPayload')}<block_end> |
"""
This file is part of flatlib - (C) FlatAngle
Author: <NAME> (<EMAIL>)
This module implements a class to represent an
astrology Chart. It provides methods to handle
the chart, as well as three relevant properties:
- objects: a list with the chart's objects
- houses: a list with the chart's houses
- angles: a list with the chart's angles
Since houses 1 and 10 may not match the Asc and
MC in some house systems, the Chart class
includes the list of angles. The angles should be
used when you want to deal with angle's longitudes.
There are also methods to access fixed stars.
"""<import_from_stmt>. angle<import_from_stmt>. const<import_from_stmt>. utils<import_from_stmt>.ephem ephem<import_from_stmt>.datetime Datetime<line_sep># ------------------ #
# Chart Class #
# ------------------ #
<class_stmt>Chart<block_start>""" This class represents an astrology chart. """<def_stmt>__init__ self date pos **kwargs<block_start>""" Creates an astrology chart for a given
date and location.
Optional arguments are:
- hsys: house system
- IDs: list of objects to include
"""<line_sep># Handle optional arguments
hsys=kwargs.get('hsys' const.HOUSES_DEFAULT)<line_sep>IDs=kwargs.get('IDs' const.LIST_OBJECTS_TRADITIONAL)<line_sep>self.date=date<line_sep>self.pos=pos<line_sep>self.hsys=hsys<line_sep>self.objects=ephem.getObjectList(IDs date pos)<line_sep>self.houses,self.angles=ephem.getHouses(date pos hsys)<block_end><def_stmt>copy self<block_start>""" Returns a deep copy of this chart. """<line_sep>chart=Chart.__new__(Chart)<line_sep>chart.date=self.date<line_sep>chart.pos=self.pos<line_sep>chart.hsys=self.hsys<line_sep>chart.objects=self.objects.copy()<line_sep>chart.houses=self.houses.copy()<line_sep>chart.angles=self.angles.copy()<line_sep><return>chart<block_end># === Properties === #
<def_stmt>getObject self ID<block_start>""" Returns an object from the chart. """<line_sep><return>self.objects.get(ID)<block_end><def_stmt>getHouse self ID<block_start>""" Returns an house from the chart. """<line_sep><return>self.houses.get(ID)<block_end><def_stmt>getAngle self ID<block_start>""" Returns an angle from the chart. """<line_sep><return>self.angles.get(ID)<block_end><def_stmt>get self ID<block_start>""" Returns an object, house or angle
from the chart.
"""<if_stmt>ID.startswith('House')<block_start><return>self.getHouse(ID)<block_end><elif_stmt>ID<in>const.LIST_ANGLES<block_start><return>self.getAngle(ID)<block_end><else_stmt><block_start><return>self.getObject(ID)<block_end><block_end># === Fixed stars === #
# The computation of fixed stars is inefficient,
# so the access must be made directly to the
# ephemeris only when needed.
<def_stmt>getFixedStar self ID<block_start>""" Returns a fixed star from the ephemeris. """<line_sep><return>ephem.getFixedStar(ID self.date)<block_end><def_stmt>getFixedStars self<block_start>""" Returns a list with all fixed stars. """<line_sep>IDs=const.LIST_FIXED_STARS<line_sep><return>ephem.getFixedStarList(IDs self.date)<block_end># === Houses and angles === #
<def_stmt>isHouse1Asc self<block_start>""" Returns true if House1 is the same as the Asc. """<line_sep>house1=self.getHouse(const.HOUSE1)<line_sep>asc=self.getAngle(const.ASC)<line_sep>dist=angle.closestdistance(house1.lon asc.lon)<line_sep><return>abs(dist)<l>0.0003<block_end># 1 arc-second
<def_stmt>isHouse10MC self<block_start>""" Returns true if House10 is the same as the MC. """<line_sep>house10=self.getHouse(const.HOUSE10)<line_sep>mc=self.getAngle(const.MC)<line_sep>dist=angle.closestdistance(house10.lon mc.lon)<line_sep><return>abs(dist)<l>0.0003<block_end># 1 arc-second
# === Other properties === #
<def_stmt>isDiurnal self<block_start>""" Returns true if this chart is diurnal. """<line_sep>sun=self.getObject(const.SUN)<line_sep>mc=self.getAngle(const.MC)<line_sep># Get ecliptical positions and check if the
# sun is above the horizon.
lat=self.pos.lat<line_sep>sunRA,sunDecl=utils.eqCoords(sun.lon sun.lat)<line_sep>mcRA,mcDecl=utils.eqCoords(mc.lon 0)<line_sep><return>utils.isAboveHorizon(sunRA sunDecl mcRA lat)<block_end><def_stmt>getMoonPhase self<block_start>""" Returns the phase of the moon. """<line_sep>sun=self.getObject(const.SUN)<line_sep>moon=self.getObject(const.MOON)<line_sep>dist=angle.distance(sun.lon moon.lon)<if_stmt>dist<l>90<block_start><return>const.MOON_FIRST_QUARTER<block_end><elif_stmt>dist<l>180<block_start><return>const.MOON_SECOND_QUARTER<block_end><elif_stmt>dist<l>270<block_start><return>const.MOON_THIRD_QUARTER<block_end><else_stmt><block_start><return>const.MOON_LAST_QUARTER<block_end><block_end># === Solar returns === #
<def_stmt>solarReturn self year<block_start>""" Returns this chart's solar return for a
given year.
"""<line_sep>sun=self.getObject(const.SUN)<line_sep>date=Datetime('{0}/01/01'.format(year) '00:00' self.date.utcoffset)<line_sep>srDate=ephem.nextSolarReturn(date sun.lon)<line_sep><return>Chart(srDate self.pos hsys=self.hsys)<block_end><block_end> |
# Copyright (C) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
<import_from_stmt>collections namedtuple<import_stmt>numpy<as>np<import_from_stmt>action_detection.postprocessing.metrics matrix_iou<line_sep>Detections=namedtuple('Detections' 'loc, scores')<line_sep>Actions=namedtuple('Detections' 'loc, scores, action_labels, action_scores, id')<def_stmt>nms input_bboxes input_scores threshold keep_top_k min_score=0.01<block_start>"""Carry out default NMS algorithm over the input boxes.
:param input_bboxes: Input boxes
:param input_scores: Detection scores of boxes
:param threshold: Min IoU value to merge boxes
:param keep_top_k: Max number of boxes to output
:param min_score: Min score value to output box
:return: Filtered box IDs
"""<if_stmt>len(input_bboxes)<eq>0<block_start><return>[]<block_end><if_stmt>len(input_bboxes)<g>keep_top_k<block_start>indices=np.argsort(-input_scores)[:keep_top_k]<line_sep>scores=input_scores[indices]<line_sep>bboxes=input_bboxes[indices]<block_end><else_stmt><block_start>scores=np.copy(input_scores)<line_sep>indices=np.arange(len(scores))<line_sep>bboxes=input_bboxes<block_end>similarity_matrix=matrix_iou(bboxes bboxes)<line_sep>out_ids=[]<for_stmt>_ xrange(len(bboxes))<block_start>bbox_id=np.argmax(scores)<line_sep>bbox_score=scores[bbox_id]<if_stmt>bbox_score<l>min_score<block_start><break><block_end>out_ids.append(indices[bbox_id])<line_sep>scores[bbox_id]=0.0<line_sep>iou_values=similarity_matrix[bbox_id]<line_sep>scores[iou_values<g>threshold]=0.0<block_end><return>np.array(out_ids dtype=np.int32)<block_end><def_stmt>soft_nms input_bboxes input_scores keep_top_k sigma min_score<block_start>"""Carry out Soft-NMS algorithm over the input boxes.
:param input_bboxes: Input boxes
:param input_scores: Detection scores of boxes
:param keep_top_k: Max number of boxes to output
:param sigma: Algorithm parameter
:param min_score: Min score value to output box
:return: Filtered box IDs
"""<if_stmt>len(input_bboxes)<eq>0<block_start><return>[] []<block_end><if_stmt>len(input_bboxes)<g>keep_top_k<block_start>indices=np.argsort(-input_scores)[:keep_top_k]<line_sep>scores=input_scores[indices]<line_sep>bboxes=input_bboxes[indices]<block_end><else_stmt><block_start>scores=np.copy(input_scores)<line_sep>indices=np.arange(len(scores))<line_sep>bboxes=input_bboxes<block_end>similarity_matrix=matrix_iou(bboxes bboxes)<line_sep>out_ids=[]<line_sep>out_scores=[]<for_stmt>_ xrange(len(bboxes))<block_start>bbox_id=np.argmax(scores)<line_sep>bbox_score=scores[bbox_id]<if_stmt>bbox_score<l>min_score<block_start><break><block_end>out_ids.append(indices[bbox_id])<line_sep>out_scores.append(bbox_score)<line_sep>scores[bbox_id]=0.0<line_sep>iou_values=similarity_matrix[bbox_id]<line_sep>scores<augmul>np.exp(np.negative(np.square(iou_values)/sigma))<block_end><return>np.array(out_ids dtype=np.int32) np.array(out_scores dtype=np.float32)<block_end><def_stmt>ssd_detection_output batch_bboxes batch_conf bg_class min_conf=0.01 out_top_k=200 nms_overlap=0.45 nms_top_k=400<block_start>"""Process network output to translate it into the bboxes with labels.
:param batch_bboxes: All bboxes
:param batch_conf: All detection scores
:param bg_class: ID of background class
:param min_conf: Min score value to output box
:param out_top_k: Max number of boxes per image to output
:param nms_overlap: NMS parameter
:param nms_top_k: NMS parameter
:return: List of detections
"""<assert_stmt>batch_bboxes.shape[:2]<eq>batch_conf.shape[:2]<assert_stmt>batch_bboxes.shape[2]<eq>4<line_sep>num_classes=batch_conf.shape[-1]<assert_stmt>num_classes<g>1<line_sep>all_detections=[]<for_stmt>sample_id xrange(batch_bboxes.shape[0])<block_start>sample_bboxes=batch_bboxes[sample_id]<line_sep>sample_conf=batch_conf[sample_id]<line_sep>all_sample_detections=[]<for_stmt>label xrange(num_classes)<block_start><if_stmt>label<eq>bg_class<block_start><continue><block_end>sample_scores=sample_conf[: label]<line_sep>valid_mask=sample_scores<g>min_conf<line_sep># noinspection PyTypeChecker
<if_stmt>np.sum(valid_mask)<eq>0<block_start><continue><block_end>valid_bboxes=sample_bboxes[valid_mask]<line_sep>valid_scores=sample_scores[valid_mask]<line_sep>merged_ids=nms(valid_bboxes valid_scores nms_overlap nms_top_k)<if_stmt>len(merged_ids)<g>0<block_start>out_bboxes=valid_bboxes[merged_ids].reshape([-1 4])<line_sep>out_scores=valid_scores[merged_ids].reshape([-1])<for_stmt>i xrange(len(out_scores))<block_start>all_sample_detections.append((out_bboxes[i] label out_scores[i]))<block_end><block_end><block_end><if_stmt>len(all_sample_detections)<g>out_top_k<block_start>all_sample_detections.sort(key=<lambda>tup:tup[2] reverse=<true>)<line_sep>all_sample_detections=all_sample_detections[:out_top_k]<block_end>sample_detections={}<for_stmt>bbox,label,score all_sample_detections<block_start><if_stmt>label<not><in>sample_detections<block_start>sample_detections[label]={'loc':[bbox] 'scores':[score]}<block_end><else_stmt><block_start>last_data=sample_detections[label]<line_sep>last_data['loc'].append(bbox)<line_sep>last_data['scores'].append(score)<block_end><block_end>out_sample_detections={label:Detections(loc=np.stack(sample_detections[label]['loc']) scores=np.stack(sample_detections[label]['scores']))<for>label sample_detections}<line_sep>all_detections.append(out_sample_detections)<block_end><return>all_detections<block_end><def_stmt>ssd_warp_gt batch_bboxes batch_labels bg_class<block_start>"""Translates Ground truth boxes and labels into the internal format.
:param batch_bboxes: Bbox coordinates
:param batch_labels: Bbox labels
:param bg_class: ID of background label
:return: List of boxes
"""<assert_stmt>batch_bboxes.shape[0]<eq>batch_labels.shape[0]<line_sep>all_gt=[]<for_stmt>sample_id xrange(batch_bboxes.shape[0])<block_start>sample_bboxes=batch_bboxes[sample_id]<line_sep>sample_labels=batch_labels[sample_id]<line_sep>valid_mask=np.logical_and(sample_labels<ge>0 sample_labels<ne>bg_class)<if_stmt>np.sum(valid_mask)<eq>0<block_start>all_gt.append([])<line_sep><continue><block_end>valid_bboxes=sample_bboxes[valid_mask]<line_sep>valid_labels=sample_labels[valid_mask]<line_sep>unique_labels=np.unique(valid_labels)<line_sep>sample_detections={}<for_stmt>label unique_labels<block_start>label_mask=valid_labels<eq>label<line_sep>class_bboxes=valid_bboxes[label_mask]<line_sep>sample_detections[label]=Detections(loc=class_bboxes scores=<none>)<block_end>all_gt.append(sample_detections)<block_end><return>all_gt<block_end><def_stmt>action_detection_output batch_bboxes batch_det_conf batch_action_conf bg_class min_det_conf=0.01 min_action_conf=0.01 out_top_k=400 nms_top_k=400 nms_sigma=0.6 do_nms=<true><block_start>"""Process network output to translate it into the bboxes with detection scores and action labels.
:param batch_bboxes: All bboxes
:param batch_det_conf: All detection scores
:param batch_action_conf: All action scores
:param bg_class: ID of background class
:param min_det_conf: Min score value to output box
:param min_action_conf: Min score value for action confidence
:param out_top_k: Max number of boxes per image to output
:param nms_top_k: NMS parameter
:param nms_sigma: NMS parameter
:param do_nms: Whether to run NMS algorithm
:return: List of detections
"""<assert_stmt>batch_bboxes.shape[:2]<eq>batch_det_conf.shape[:2]<assert_stmt>batch_bboxes.shape[:2]<eq>batch_action_conf.shape[:2]<assert_stmt>batch_bboxes.shape[2]<eq>4<line_sep>num_det_classes=batch_det_conf.shape[-1]<assert_stmt>num_det_classes<eq>2<line_sep>num_action_classes=batch_action_conf.shape[-1]<assert_stmt>num_action_classes<g>1<line_sep>det_class=(bg_class+1)%2<line_sep>all_detections=[]<for_stmt>sample_id xrange(batch_bboxes.shape[0])<block_start>sample_bboxes=batch_bboxes[sample_id]<line_sep>sample_det_scores=batch_det_conf[sample_id : det_class]<line_sep>sample_action_conf=batch_action_conf[sample_id]<line_sep>valid_mask=sample_det_scores<g>min_det_conf<line_sep># noinspection PyTypeChecker
<if_stmt>np.sum(valid_mask)<eq>0<block_start>all_detections.append({det_class:[]})<line_sep><continue><block_end>valid_bboxes=sample_bboxes[valid_mask]<line_sep>valid_det_scores=sample_det_scores[valid_mask]<line_sep>valid_det_conf=sample_action_conf[valid_mask]<if_stmt>do_nms<block_start>filtered_ids,filtered_scores=soft_nms(valid_bboxes valid_det_scores nms_top_k nms_sigma min_det_conf)<block_end><else_stmt><block_start>filtered_scores=np.copy(valid_det_scores)<line_sep>filtered_ids=np.argsort(-filtered_scores)<block_end><if_stmt>len(filtered_ids)<g>0<block_start>out_bboxes=valid_bboxes[filtered_ids].reshape([-1 4])<line_sep>out_det_scores=filtered_scores.reshape([-1])<line_sep>out_action_conf=valid_det_conf[filtered_ids].reshape([-1 num_action_classes])<if_stmt>0<l>out_top_k<l>len(out_det_scores)<block_start>out_bboxes=out_bboxes[:out_top_k]<line_sep>out_det_scores=out_det_scores[:out_top_k]<line_sep>out_action_conf=out_action_conf[:out_top_k]<block_end>out_action_label=np.argmax(out_action_conf axis=-1)<line_sep>out_action_score=np.max(out_action_conf axis=-1)<if_stmt>min_action_conf<is><not><none><and>min_action_conf<g>0.0<block_start>out_action_label[out_action_score<l>min_action_conf]=0<block_end>sample_detections=Actions(loc=out_bboxes scores=out_det_scores action_labels=out_action_label action_scores=out_action_score id=<none>)<line_sep>all_detections.append({det_class:sample_detections})<block_end><else_stmt><block_start>all_detections.append({det_class:[]})<line_sep><continue><block_end><block_end><return>all_detections<block_end><def_stmt>action_warp_gt batch_bboxes batch_labels bg_class batch_track_ids=<none><block_start>"""Translates Ground truth boxes and actions into the internal format.
:param batch_bboxes: Bbox coordinates
:param batch_labels: Bbox labels
:param bg_class: ID of background label
:param batch_track_ids: ID of track in a batch
:return: List of boxes
"""<assert_stmt>batch_bboxes.shape[0]<eq>batch_labels.shape[0]<line_sep>det_class=(bg_class+1)%2<line_sep>all_gt=[]<for_stmt>sample_id xrange(batch_bboxes.shape[0])<block_start>sample_bboxes=batch_bboxes[sample_id]<line_sep>sample_labels=batch_labels[sample_id]<line_sep>sample_track_ids=batch_track_ids[sample_id]<if>batch_track_ids<is><not><none><else><none><line_sep>valid_mask=sample_labels<ge>0<line_sep># noinspection PyTypeChecker
<if_stmt>np.sum(valid_mask)<eq>0<block_start>all_gt.append([])<line_sep><continue><block_end>valid_bboxes=sample_bboxes[valid_mask]<line_sep>valid_labels=sample_labels[valid_mask]<line_sep>valid_track_ids=sample_track_ids[valid_mask]<if>sample_track_ids<is><not><none><else><none><line_sep>sample_detections={det_class:Actions(loc=valid_bboxes scores=<none> action_labels=valid_labels action_scores=<none> id=valid_track_ids)}<line_sep>all_gt.append(sample_detections)<block_end><return>all_gt<block_end> |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
<import_stmt>os<import_stmt>sys<line_sep>sys.path.append('../../')<import_from_stmt>convlab.agent Body<import_from_stmt>convlab.agent DialogAgent<import_from_stmt>convlab.spec spec_util<import_from_stmt>convlab.env make_env<import_stmt>numpy<as>np<import_stmt>copy<import_from_stmt>flask Flask request jsonify<import_from_stmt>queue PriorityQueue<import_from_stmt>threading Thread<import_stmt>time<line_sep>rgi_queue=PriorityQueue(maxsize=0)<line_sep>rgo_queue=PriorityQueue(maxsize=0)<line_sep>app=Flask(__name__)<line_sep>os.environ['lab_mode']='eval'<line_sep>spec_file=sys.argv[1]<line_sep>spec_name=sys.argv[2]<line_sep>lab_mode=sys.argv[3]<if_stmt>'@'<in>lab_mode<block_start>lab_mode,prename=lab_mode.split('@')<line_sep>spec=spec_util.get_eval_spec(spec_file spec_name prename)<block_end><else_stmt><block_start>spec=spec_util.get(spec_file spec_name)<block_end># # lab_mode, prename = sys.argv[3].split('@')
# spec = spec_util.get_eval_spec(spec_file, prename)
spec=spec_util.override_eval_spec(spec)<line_sep>agent_spec=spec['agent'][0]<line_sep>env=make_env(spec)<line_sep>body=Body(env spec['agent'])<line_sep>agent=DialogAgent(spec body)<line_sep># last_obs = 'hi'
# agent.reset(last_obs)
# obs = 'hi can you find me a hotel in the west?'
# action = agent.act(obs)
# next_obs = 'we have six people'
# agent.update(obs, action, 0, next_obs, 0)
# action = agent.act(next_obs)
@app.route('/' methods=['GET' 'POST'])<def_stmt>process <block_start><try_stmt><block_start>in_request=request.json<line_sep>print(in_request)<block_end><except_stmt><block_start><return>"invalid input: {}".format(in_request)<block_end>rgi_queue.put((time.time() in_request))<line_sep>rgi_queue.join()<line_sep>output=rgo_queue.get()<line_sep>print(output['response'])<line_sep>rgo_queue.task_done()<line_sep># return jsonify({'response': response})
<return>jsonify(output)<block_end><def_stmt>generate_response in_queue out_queue<block_start><while_stmt><true># pop input
<block_start>last_action='null'<line_sep>in_request=in_queue.get()<line_sep>obs=in_request['input']<if_stmt>in_request['agent_state']<eq>{}<block_start>agent.reset(obs)<block_end><else_stmt><block_start>encoded_state,dst_state,last_action=in_request['agent_state']<line_sep>agent.body.encoded_state=np.asarray(encoded_state)<if>isinstance(encoded_state list)<else>encoded_state<line_sep>agent.dst.state=copy.deepcopy(dst_state)<line_sep>agent.update(obs last_action 0 obs 0)<block_end><try_stmt><block_start>action=agent.act(obs)<line_sep>encoded_state=agent.body.encoded_state.tolist()<if>isinstance(agent.body.encoded_state np.ndarray)<else>agent.body.encoded_state<line_sep>dst_state=copy.deepcopy(agent.dst.state)<block_end><except_stmt>Exception<as>e<block_start>print('agent error' e)<block_end><try_stmt><block_start><if_stmt>action<eq>''<block_start>response='Sorry I do not understand, can you paraphrase?'<block_end><else_stmt><block_start>response=action<block_end><block_end><except_stmt>Exception<as>e<block_start>print('Response generation error' e)<line_sep>response='What did you say?'<block_end>last_action=action<line_sep>out_queue.put({'response':response 'agent_state':(encoded_state dst_state last_action)})<line_sep>in_queue.task_done()<line_sep>out_queue.join()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>worker=Thread(target=generate_response args=(rgi_queue rgo_queue ))<line_sep>worker.setDaemon(<true>)<line_sep>worker.start()<line_sep>app.run(host='0.0.0.0' port=10004)<block_end> |
<import_from_stmt>jaxrl.agents.awac.awac_learner AWACLearner<import_from_stmt>jaxrl.agents.bc.bc_learner BCLearner<import_from_stmt>jaxrl.agents.ddpg.ddpg_learner DDPGLearner<import_from_stmt>jaxrl.agents.drq.drq_learner DrQLearner<import_from_stmt>jaxrl.agents.sac.sac_learner SACLearner<import_from_stmt>jaxrl.agents.sac_v1.sac_v1_learner SACV1Learner<line_sep> |
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""This module implements 2d rotation matrix functionalities.
Given an angle of rotation $$\theta$$ a 2d rotation matrix can be expressed as
$$
\mathbf{R} =
\begin{bmatrix}
\cos(\theta) & -\sin(\theta) \\
\sin(\theta) & \cos(\theta)
\end{bmatrix}.
$$
More details rotation matrices can be found on [this page.]
(https://en.wikipedia.org/wiki/Rotation_matrix)
Note: This matrix rotates points in the $$xy$$-plane counterclockwise.
"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>typing Optional<import_from_stmt>six.moves range<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow_graphics.geometry.transformation rotation_matrix_common<import_from_stmt>tensorflow_graphics.util export_api<import_from_stmt>tensorflow_graphics.util shape<import_from_stmt>tensorflow_graphics.util type_alias<def_stmt>from_euler angle:type_alias.TensorLike name:str="rotation_matrix_2d_from_euler_angle"<arrow>tf.Tensor<block_start>r"""Converts an angle to a 2d rotation matrix.
Converts an angle $$\theta$$ to a 2d rotation matrix following the equation
$$
\mathbf{R} =
\begin{bmatrix}
\cos(\theta) & -\sin(\theta) \\
\sin(\theta) & \cos(\theta)
\end{bmatrix}.
$$
Note:
The resulting matrix rotates points in the $$xy$$-plane counterclockwise.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
angle: A tensor of shape `[A1, ..., An, 1]`, where the last dimension
represents an angle in radians.
name: A name for this op that defaults to
"rotation_matrix_2d_from_euler_angle".
Returns:
A tensor of shape `[A1, ..., An, 2, 2]`, where the last dimension represents
a 2d rotation matrix.
Raises:
ValueError: If the shape of `angle` is not supported.
"""<with_stmt>tf.name_scope(name)<block_start>angle=tf.convert_to_tensor(value=angle)<line_sep>shape.check_static(tensor=angle tensor_name="angle" has_dim_equals=(-1 1))<line_sep>cos_angle=tf.cos(angle)<line_sep>sin_angle=tf.sin(angle)<line_sep>matrix=tf.stack((cos_angle -sin_angle sin_angle cos_angle) axis=-1)<line_sep># pyformat: disable
output_shape=tf.concat((tf.shape(input=angle)[:-1] (2 2)) axis=-1)<line_sep><return>tf.reshape(matrix shape=output_shape)<block_end><block_end><def_stmt>from_euler_with_small_angles_approximation angles:type_alias.TensorLike name:str="rotation_matrix_2d_from_euler_with_small_angles_approximation"<arrow>tf.Tensor<block_start>r"""Converts an angle to a 2d rotation matrix under the small angle assumption.
Under the small angle assumption, $$\sin(x)$$ and $$\cos(x)$$ can be
approximated by their second order Taylor expansions, where
$$\sin(x) \approx x$$ and $$\cos(x) \approx 1 - \frac{x^2}{2}$$. The 2d
rotation matrix will then be approximated as
$$
\mathbf{R} =
\begin{bmatrix}
1.0 - 0.5\theta^2 & -\theta \\
\theta & 1.0 - 0.5\theta^2
\end{bmatrix}.
$$
In the current implementation, the smallness of the angles is not verified.
Note:
The resulting matrix rotates points in the $$xy$$-plane counterclockwise.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
angles: A tensor of shape `[A1, ..., An, 1]`, where the last dimension
represents a small angle in radians.
name: A name for this op that defaults to
"rotation_matrix_2d_from_euler_with_small_angles_approximation".
Returns:
A tensor of shape `[A1, ..., An, 2, 2]`, where the last dimension represents
a 2d rotation matrix.
Raises:
ValueError: If the shape of `angle` is not supported.
"""<with_stmt>tf.name_scope(name)<block_start>angles=tf.convert_to_tensor(value=angles)<line_sep>shape.check_static(tensor=angles tensor_name="angles" has_dim_equals=(-1 1))<line_sep>cos_angle=1.0-0.5<times>angles<times>angles<line_sep>sin_angle=angles<line_sep>matrix=tf.stack((cos_angle -sin_angle sin_angle cos_angle) axis=-1)<line_sep># pyformat: disable
output_shape=tf.concat((tf.shape(input=angles)[:-1] (2 2)) axis=-1)<line_sep><return>tf.reshape(matrix shape=output_shape)<block_end><block_end><def_stmt>inverse matrix:type_alias.TensorLike name:str="rotation_matrix_2d_inverse"<arrow>tf.Tensor<block_start>"""Computes the inverse of a 2D rotation matrix.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
matrix: A tensor of shape `[A1, ..., An, 2, 2]`, where the last two
dimensions represent a 2d rotation matrix.
name: A name for this op that defaults to "rotation_matrix_2d_inverse".
Returns:
A tensor of shape `[A1, ..., An, 2, 2]`, where the last dimension represents
a 2d rotation matrix.
Raises:
ValueError: If the shape of `matrix` is not supported.
"""<with_stmt>tf.name_scope(name)<block_start>matrix=tf.convert_to_tensor(value=matrix)<line_sep>shape.check_static(tensor=matrix tensor_name="matrix" has_rank_greater_than=1 has_dim_equals=((-2 2) (-1 2)))<line_sep>ndims=matrix.shape.ndims<line_sep>perm=list(range(ndims-2))+[ndims-1 ndims-2]<line_sep><return>tf.transpose(a=matrix perm=perm)<block_end><block_end><def_stmt>is_valid matrix:type_alias.TensorLike atol:type_alias.Float=1e-3 name:str="rotation_matrix_2d_is_valid"<arrow>tf.Tensor<block_start>r"""Determines if a matrix is a valid rotation matrix.
Determines if a matrix $$\mathbf{R}$$ is a valid rotation matrix by checking
that $$\mathbf{R}^T\mathbf{R} = \mathbf{I}$$ and $$\det(\mathbf{R}) = 1$$.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
matrix: A tensor of shape `[A1, ..., An, 2, 2]`, where the last two
dimensions represent a 2d rotation matrix.
atol: The absolute tolerance parameter.
name: A name for this op that defaults to "rotation_matrix_2d_is_valid".
Returns:
A tensor of type `bool` and shape `[A1, ..., An, 1]` where False indicates
that the input is not a valid rotation matrix.
"""<with_stmt>tf.name_scope(name)<block_start>matrix=tf.convert_to_tensor(value=matrix)<line_sep>shape.check_static(tensor=matrix tensor_name="matrix" has_rank_greater_than=1 has_dim_equals=((-2 2) (-1 2)))<line_sep><return>rotation_matrix_common.is_valid(matrix atol)<block_end><block_end><def_stmt>rotate point:type_alias.TensorLike matrix:type_alias.TensorLike name:str="rotation_matrix_2d_rotate"<arrow>tf.Tensor<block_start>"""Rotates a 2d point using a 2d rotation matrix.
Note:
In the following, A1 to An are optional batch dimensions, which must be
identical.
Args:
point: A tensor of shape `[A1, ..., An, 2]`, where the last dimension
represents a 2d point.
matrix: A tensor of shape `[A1, ..., An, 2, 2]`, where the last two
dimensions represent a 2d rotation matrix.
name: A name for this op that defaults to "rotation_matrix_2d_rotate".
Returns:
A tensor of shape `[A1, ..., An, 2]`, where the last dimension
represents a 2d point.
Raises:
ValueError: If the shape of `point` or `matrix` is not supported.
"""<with_stmt>tf.name_scope(name)<block_start>point=tf.convert_to_tensor(value=point)<line_sep>matrix=tf.convert_to_tensor(value=matrix)<line_sep>shape.check_static(tensor=point tensor_name="point" has_dim_equals=(-1 2))<line_sep>shape.check_static(tensor=matrix tensor_name="matrix" has_rank_greater_than=1 has_dim_equals=((-2 2) (-1 2)))<line_sep>shape.compare_batch_dimensions(tensors=(point matrix) tensor_names=("point" "matrix") last_axes=(-2 -3) broadcast_compatible=<true>)<line_sep>point=tf.expand_dims(point axis=-1)<line_sep>common_batch_shape=shape.get_broadcasted_shape(point.shape[:-2] matrix.shape[:-2])<def_stmt>dim_value dim:Optional[int]=<none><arrow>int<block_start><return>1<if>dim<is><none><else>tf.compat.dimension_value(dim)<block_end>common_batch_shape=[dim_value(dim)<for>dim common_batch_shape]<line_sep>point=tf.broadcast_to(point common_batch_shape+[2 1])<line_sep>matrix=tf.broadcast_to(matrix common_batch_shape+[2 2])<line_sep>rotated_point=tf.matmul(matrix point)<line_sep><return>tf.squeeze(rotated_point axis=-1)<block_end><block_end># API contains all public functions and classes.
__all__=export_api.get_functions_and_classes()<line_sep> |
<import_from_stmt>typing List Optional Tuple<import_from_stmt>django.http HttpRequest<import_from_stmt>django_scim.filters UserFilterQuery<import_from_stmt>zerver.lib.request RequestNotes<line_sep># This is in a separate file due to circular import issues django-scim2 runs into
# when this is placed in zerver.lib.scim.
<class_stmt>ZulipUserFilterQuery(UserFilterQuery)<block_start>"""This class implements the filter functionality of SCIM2.
E.g. requests such as
/scim/v2/Users?filter=userName eq "<EMAIL>"
can be made to refer to resources via their properties.
This gets fairly complicated in its full scope
(https://datatracker.ietf.org/doc/html/rfc7644#section-3.4.2.2)
and django-scim2 implements an entire mechanism of converting
this SCIM2 filter syntax into SQL queries.
What we have to do in this class is to customize django-scim2 so
that it knows which SCIM attributes map to which UserProfile
fields. We can assume that get_extra_model_filter_kwargs_getter
has already ensured that we will only interact with non-bot user
accounts in the realm associated with this SCIM configuration.
"""<line_sep># attr_map describes which table.column the given SCIM2 User
# attributes refer to.
attr_map={# attr, sub attr, uri
("userName" <none> <none>):"zerver_userprofile.delivery_email" # We can only reasonably support filtering by name.formatted
# as UserProfile.full_name is its equivalent. We don't store
# first/last name information for UserProfile, so we can't
# support filtering based on name.givenName or name.familyName.
("name" "formatted" <none>):"zerver_userprofile.full_name" ("active" <none> <none>):"zerver_userprofile.is_active" }<line_sep># joins tells django-scim2 to always add the specified JOINS
# to the formed SQL queries. We need to JOIN the Realm table
# because we need to limit the results to the realm (subdomain)
# of the request.
joins=("INNER JOIN zerver_realm ON zerver_realm.id = realm_id" )<line_sep>@classmethod<def_stmt>get_extras cls q:str request:Optional[HttpRequest]=<none><arrow>Tuple[str List[object]]<block_start>"""
Return extra SQL and params to be attached to end of current Query's
SQL and params. The return format matches the format that should be used
for providing raw SQL with params to Django's .raw():
https://docs.djangoproject.com/en/3.2/topics/db/sql/#passing-parameters-into-raw
Here we ensure that results are limited to the subdomain of the request
and also exclude bots, as we currently don't want them to be managed by SCIM2.
"""<assert_stmt>request<is><not><none><line_sep>realm=RequestNotes.get_notes(request).realm<assert_stmt>realm<is><not><none><line_sep><return>"AND zerver_realm.id = %s AND zerver_userprofile.is_bot = False" [realm.id]<block_end><block_end> |
"""
Copyright (c) 2021 The Orbit Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
"""<import_from_stmt>absl app<import_from_stmt>datetime date timedelta<import_from_stmt>core.orbit_e2e E2ETestSuite<import_from_stmt>test_cases.capture_window Capture CheckTimers CheckThreadStates FilterTracks ToggleCollapsedStateOfAllTracks VerifyTracksExist<import_from_stmt>test_cases.connection_window ConnectToStadiaInstance FilterAndSelectFirstProcess LoadCapture LoadLatestCapture<import_from_stmt>test_cases.live_tab AddIterator VerifyFunctionCallCount<import_from_stmt>test_cases.main_window EndSession<line_sep>"""Verify loading a capture in Orbit using pywinauto.
Before this script is run there needs to be a gamelet reserved and
"hello_ggp_standalone" has to be started. Further, Orbit needs to be started.
Also, the captures directory should be cleared.
The script requires absl and pywinauto. Since pywinauto requires the bitness of
the python installation to match the bitness of the program under test, it needs
to be run from 64 bit python.
This automation script covers a basic workflow:
- load an old, unsupported capture and verify this fails with a message
- load a supported capture
- verify that the scheduler track is present and contains timers
- verify that the frame track is present and contains timers
- verify that the tracks from manual instrumentation are present
- verify that the memory tracks are present
- verify that an iterator can be added to "TestFunc2"
- verify that "TestFunc2" was called exactly 1257 times
- take a capture and verify there is a corresponding capture in the latest captures list which contains the tracks
"""<def_stmt>main argv# During the tests, we want to verify that captures get automatically saved. We will do so by filtering the recent
# captures list with the current date (in addition to also deleting old captures before this script runs). However,
# if it is around midnight when this code gets executed and we store the date string, it can be that the capture
# actually gets taken on the next day. Therefore, we will also check for the next day.
<block_start>today=date.today()<line_sep>tomorrow=today+timedelta(days=1)<line_sep>today_string=today.strftime("%Y_%m_%d")<line_sep>tomorrow_string=tomorrow.strftime("%Y_%m_%d")<line_sep>test_cases=[LoadCapture(capture_file_path="testdata\\OrbitTest_1-64.orbit" expect_fail=<true>) LoadCapture(capture_file_path="testdata\\OrbitTest_1-72.orbit") FilterTracks(filter_string="Scheduler" expected_track_count=1) CheckTimers(track_name_filter='Scheduler*') FilterTracks(filter_string="Frame" expected_track_count=1) CheckTimers(track_name_filter='Frame track*') # Verify the frame track has timers
FilterTracks(filter_string="DynamicName_" expected_track_count=5) FilterTracks(filter_string="_var" expected_track_count=6) FilterTracks(filter_string="OrbitThread_" expected_track_count=1) ToggleCollapsedStateOfAllTracks() CheckTimers(track_name_filter="OrbitThread_*") CheckThreadStates(track_name_filter='OrbitThread_*') FilterTracks(filter_string="ORBIT_ASYNC_TASKS" expected_track_count=1) CheckTimers(track_name_filter="ORBIT_ASYNC_TASKS") FilterTracks(filter_string="ORBIT_START_ASYNC_TEST" expected_track_count=1) CheckTimers(track_name_filter="ORBIT_START_ASYNC_TEST") FilterTracks(filter_string="") VerifyTracksExist(track_names=["Page*" "*System*" "*CGroup*"] allow_duplicates=<true>) AddIterator(function_name="TestFunc2") VerifyFunctionCallCount(function_name="TestFunc2" min_calls=1257 max_calls=1257) # Let's take a capture with the current version and verify this can be loaded
EndSession() ConnectToStadiaInstance() FilterAndSelectFirstProcess(process_filter="hello_ggp") Capture() VerifyTracksExist(track_names="hello_ggp_stand*" allow_duplicates=<true>) EndSession() # If we took the capture around midnight, we need to ensure to also look for the next day. Remember, the strings
# get created before the tests run. Thus the `today_string` might be actually from the day before the capture
# gets auto-saved.
LoadLatestCapture(filter_strings=[f"hello_ggp_stand_{today_string}" f"hello_ggp_stand_{tomorrow_string}"]) VerifyTracksExist(track_names="hello_ggp_stand*" allow_duplicates=<true>)]<line_sep>suite=E2ETestSuite(test_name="Capture Loading" test_cases=test_cases)<line_sep>suite.execute()<block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run(main)<block_end> |
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
<import_stmt>importlib<import_stmt>traceback<import_from_stmt>modnas.registry.backend build<import_from_stmt>. predefined<import_from_stmt>typing Optional<line_sep>_backend=<none><line_sep>_backend_keys=[]<def_stmt>use backend:Optional[str] *args imported=<false> **kwargs<arrow><none><block_start>"""Switch to backend by name."""<line_sep><global>_backend _backend_keys<if_stmt>backend<eq>_backend<or>backend<eq>'none'<or>backend<is><none><block_start><return><block_end><try_stmt><block_start><if_stmt>imported<block_start>bk_mod=importlib.import_module(backend)<block_end><else_stmt><block_start>bk_mod=build(backend *args **kwargs)<block_end><block_end><except_stmt>ImportError<block_start>traceback.print_exc()<line_sep><return><block_end>bk_vars=vars(bk_mod)<line_sep>bk_keys=bk_vars.keys()<line_sep>ns=globals()<for_stmt>k _backend_keys<block_start>ns.pop(k <none>)<block_end><for_stmt>k bk_keys<block_start><if_stmt>k.startswith('__')<block_start><continue><block_end>ns[k]=bk_vars[k]<block_end>_backend_keys=list(bk_keys)<line_sep>_backend=backend<block_end><def_stmt>backend <block_start>"""Return name of current backend."""<line_sep><return>_backend<block_end><def_stmt>is_backend backend:str<arrow>bool<block_start>"""Return if the current backend is the given one."""<line_sep><return>_backend<eq>backend<block_end> |
# -*- coding: utf-8 -*-
# base settings - imported by other settings files, then overridden
<import_stmt>copy<import_from_stmt>datetime timedelta<import_stmt>os.path<import_stmt>posixpath<import_stmt>bleach<import_from_stmt>django.core.urlresolvers reverse_lazy<def_stmt>env_or_default NAME default<block_start><return>os.environ.get(NAME default)<block_end>CONFERENCE_YEAR="2019"<line_sep># Top level of our source / repository
PROJECT_ROOT=os.path.abspath(os.path.join(os.path.dirname(__file__) os.pardir os.pardir))<line_sep># Symposion package
PACKAGE_ROOT=os.path.join(PROJECT_ROOT "symposion")<line_sep>DEBUG=<false><line_sep>TEMPLATE_DEBUG=DEBUG<line_sep># tells Pinax to serve media through the staticfiles app.
SERVE_MEDIA=DEBUG<line_sep># django-compressor is turned off by default due to deployment overhead for
# most users. See <URL> for more information
COMPRESS=<false><line_sep>DATABASES={"default":{"ENGINE":"django.db.backends.postgresql_psycopg2" "NAME":env_or_default("DB_NAME" "pycon") "USER":env_or_default("DB_USER" "") "PASSWORD":env_or_default("DB_PASSWORD" "") "HOST":env_or_default("DB_HOST" "") "PORT":env_or_default("DB_PORT" "") # https://docs.djangoproject.com/en/1.8/ref/databases/#persistent-connections
"CONN_MAX_AGE":int(env_or_default("CONN_MAX_AGE" 300)) }}<line_sep>INTERNAL_IPS=["127.0.0.1" ]<line_sep>ADMINS=[# ("<NAME>", "<EMAIL>"),
]<line_sep>MANAGERS=ADMINS<line_sep># Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE="US/Eastern"<line_sep># Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE="en-us"<line_sep>SITE_ID=1<line_sep># Conference ID and any URL prefixes
CONFERENCE_ID=1<line_sep>CONFERENCE_URL_PREFIXES={1:CONFERENCE_YEAR }<line_sep># If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N=<false><line_sep>gettext=<lambda>s:s<line_sep>LANGUAGES=(('en' gettext('English')) # ('fr', gettext('French')),
)<line_sep>LOCALE_PATHS=[os.path.join(PROJECT_ROOT "locale")]<line_sep># Absolute path to the directory that holds media - this is files uploaded
# by users, such as attachments.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT=env_or_default("MEDIA_ROOT" os.path.join(PROJECT_ROOT "site_media" "media"))<line_sep># URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL="/%s/site_media/media/"%CONFERENCE_URL_PREFIXES[CONFERENCE_ID]<line_sep># Absolute path to the directory where static files will be gathered
# at deploy time and served from in production. Should NOT be
# in version control, or contain anything before deploying.
STATIC_ROOT=os.path.join(PROJECT_ROOT "site_media" "static")<line_sep># URL that handles the static files like app media.
# Example: "http://media.lawrence.com"
STATIC_URL="/%s/site_media/static/"%CONFERENCE_URL_PREFIXES[CONFERENCE_ID]<line_sep># Additional directories which hold static files
STATICFILES_DIRS=[os.path.join(PACKAGE_ROOT "static") ]<line_sep>STATICFILES_FINDERS=["django.contrib.staticfiles.finders.FileSystemFinder" "django.contrib.staticfiles.finders.AppDirectoriesFinder" "compressor.finders.CompressorFinder" ]<line_sep># URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX=posixpath.join(STATIC_URL "admin/")<line_sep># Subdirectory of COMPRESS_ROOT to store the cached media files in
COMPRESS_OUTPUT_DIR="cache"<line_sep># List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS=["django.template.loaders.filesystem.Loader" "django.template.loaders.app_directories.Loader" ]<line_sep>MIDDLEWARE_CLASSES=["djangosecure.middleware.SecurityMiddleware" "django.contrib.sessions.middleware.SessionMiddleware" # LocaleMiddleware must follow session middleware and cache middleware,
# and precede commonmiddleware
"django.middleware.locale.LocaleMiddleware" "django.middleware.common.CommonMiddleware" "django.middleware.csrf.CsrfViewMiddleware" "django.contrib.auth.middleware.AuthenticationMiddleware" "django.contrib.messages.middleware.MessageMiddleware" "reversion.middleware.RevisionMiddleware" # "debug_toolbar.middleware.DebugToolbarMiddleware",
]<if_stmt>os.getenv('NOINDEX')<eq>'1'<block_start>MIDDLEWARE_CLASSES.append('pycon.noindexmiddleware.NoIndexMiddleware')<block_end>ROOT_URLCONF="symposion.urls"<line_sep>TEMPLATE_DIRS=[os.path.join(PROJECT_ROOT "pycon/templates") os.path.join(PACKAGE_ROOT "templates") ]<line_sep>TEMPLATE_CONTEXT_PROCESSORS=["django.contrib.auth.context_processors.auth" "django.core.context_processors.debug" "django.core.context_processors.i18n" "django.core.context_processors.media" "django.core.context_processors.static" "django.core.context_processors.tz" "django.core.context_processors.request" "django.contrib.messages.context_processors.messages" "pycon.context_processors.global_settings" "pinax_utils.context_processors.settings" "account.context_processors.account" "symposion.reviews.context_processors.reviews" "constance.context_processors.config" "pinax_theme_bootstrap.context_processors.theme" ]<line_sep>INSTALLED_APPS=[# Django
"django.contrib.admin" "django.contrib.auth" "django.contrib.contenttypes" "django.contrib.sessions" "django.contrib.sites" "django.contrib.messages" "django.contrib.staticfiles" "django.contrib.humanize" # theme
"pinax_theme_bootstrap" "django_forms_bootstrap" # external
"compressor" "mailer" "timezones" "metron" "easy_thumbnails" "account" "sitetree" "taggit" "reversion" "biblion" "djangosecure" "raven.contrib.django.raven_compat" "constance.backends.database" "constance" "uni_form" "gunicorn" "multi_email_field" "email_log" "djcelery_email" "multiselectfield" "markdownify" "storages" # symposion
"symposion.conference" "symposion.cms" "symposion.boxes" "symposion.speakers" "symposion.proposals" "symposion.reviews" "symposion.teams" "symposion.schedule" # custom
"markedit" "pycon" "pycon.bulkemail" "pycon.sponsorship" "pycon.registration" "pycon.schedule" "pycon.profile" "pycon.finaid" "pycon.pycon_api" "pycon.tutorials" "pycon.mentorship" ]<line_sep>FIXTURE_DIRS=[os.path.join(PROJECT_ROOT "fixtures") ]<line_sep>MESSAGE_STORAGE="django.contrib.messages.storage.session.SessionStorage"<line_sep>EMAIL_BACKEND='djcelery_email.backends.CeleryEmailBackend'<line_sep>CELERY_EMAIL_BACKEND='email_log.backends.EmailBackend'<line_sep>EMAIL_LOG_BACKEND="django.core.mail.backends.console.EmailBackend"<line_sep>ACCOUNT_OPEN_SIGNUP=<true><line_sep>ACCOUNT_USE_OPENID=<false><line_sep>ACCOUNT_REQUIRED_EMAIL=<false><line_sep>ACCOUNT_EMAIL_VERIFICATION=<false><line_sep>ACCOUNT_EMAIL_AUTHENTICATION=<false><line_sep>ACCOUNT_UNIQUE_EMAIL=EMAIL_CONFIRMATION_UNIQUE_EMAIL=<false><line_sep>ACCOUNT_CREATE_ON_SAVE=<true><line_sep>AUTHENTICATION_BACKENDS=[# Permissions backends
"symposion.teams.backends.TeamPermissionsBackend" # Django User Accounts
"account.auth_backends.EmailAuthenticationBackend" 'django.contrib.auth.backends.ModelBackend' ]<line_sep>LOGIN_URL=reverse_lazy("account_login")<line_sep>ACCOUNT_SIGNUP_REDIRECT_URL="dashboard"<line_sep>ACCOUNT_LOGIN_REDIRECT_URL="dashboard"<line_sep>ACCOUNT_LOGOUT_REDIRECT_URL="home"<line_sep>ACCOUNT_USER_DISPLAY=<lambda>user:user.get_full_name()<line_sep>LOGIN_ERROR_URL=reverse_lazy("account_login")<line_sep>EMAIL_CONFIRMATION_DAYS=2<line_sep>EMAIL_DEBUG=DEBUG<line_sep>DEBUG_TOOLBAR_CONFIG={"INTERCEPT_REDIRECTS":<false> }<line_sep>CONSTANCE_BACKEND="constance.backends.database.DatabaseBackend"<line_sep>CONSTANCE_CONFIG={# "SETTING_NAME": (default_value, "help text")
"CDN_PURGE_BASE_URL":("" "Base URL for CDN 'PURGE' requests"<concat>" when pages are edited through the web.") "CTE_SECRET":("" "Shared secret for CTE integration") "CTE_BASICAUTH_USER":("" "Shared User for accessing CTE Registration data") "CTE_BASICAUTH_PASS":("" "Shared User password for accessing CTE Registration data") "CTE_TUTORIAL_DATA_URL":("" "URL for the CSV of CTE Tutorial Registration Data") "REGISTRATION_INTRODUCTION_URL":("" "URL for introduction to registration domain") "REGISTRATION_URL":("" "URL for registration") "SPONSOR_FROM_EMAIL":("" "From address for emails to sponsors") "REGISTRATION_STATUS":("" "Used in the home page template. Valid values are 'soon', 'open' and 'closed'") }<line_sep># Instead of expecting blog posts to be typed as markup, simply expect
# raw HTML to be typed into the "Teaser:" and "Content:" fields of each
# Biblion Post in the Django admin interface. By using the identity
# function unicode() as the filter, the HTML winds up being saved to the
# database intact and unchanged.
BIBLION_PARSER=["__builtin__.unicode" {}]<line_sep>BIBLION_SECTIONS=[("general" "General") ]<line_sep>SYMPOSION_PAGE_REGEX=r"(([\w-]{1,})(/[\w-]{1,})*)/$"<line_sep>USE_X_ACCEL_REDIRECT=<false><line_sep>MARKEDIT_DEFAULT_SETTINGS={'preview':'below' 'toolbar':{'backgroundMode':'dark' }}<line_sep>CACHES={'default':{'BACKEND':'django.core.cache.backends.dummy.DummyCache' }}<line_sep># Is somebody clobbering this? We shouldn't have to set it ourselves,
# but if we don't, gunicorn's django_wsgi blows up trying to configure
# logging with an empty dictionary.
<import_from_stmt>django.utils.log DEFAULT_LOGGING<line_sep>LOGGING=copy.deepcopy(DEFAULT_LOGGING)<line_sep>LOGGING.setdefault('root' {# Default root logger, just so everything has a handler and we don't see warnings
'handlers':['null'] # null handler is defined in the default logging config
})<line_sep>BLEACH_ALLOWED_TAGS=bleach.ALLOWED_TAGS+['p']<line_sep># Django issues a nasty warning in 1.7 if you don't
# declare a runner explicitly, even though it works...
# This can be removed in 1.8, the warning has been
# removed.
TEST_RUNNER='django.test.runner.DiscoverRunner'<line_sep># Need to switch from the now-default JSON serializer, or OAuth2 breaks trying
# to serialize a datetime to JSON
SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'<line_sep># Celery
REDIS_HOST=os.environ.get('REDIS_HOST' 'localhost')<line_sep>BROKER_URL='redis://{}:6379/0'.format(REDIS_HOST)# Redis DB 0 for Celery. (Cache will use DB 1)
# We deliberately do not set CELERY_RESULT_BACKEND because we are discarding results.
# Pickle is fine, our redis is only accessible on localhost
CELERY_ACCEPT_CONTENT=['pickle']<line_sep># Some other options Celery docs say we should set when using Redis:
BROKER_TRANSPORT_OPTIONS={'fanout_prefix':<true> 'fanout_patterns':<true>}<line_sep># NOTE: to start the worker, activate the venv and run "celery -A pycon worker [options]"
# Send bulk emails every 5 minutes
CELERYBEAT_SCHEDULE={'send_bulk_emails':{'task':'pycon.bulkemail.tasks.send_bulk_emails' 'schedule':timedelta(minutes=5) }}<line_sep># EMAIL ADDRESSES
# Override in more specific settings files, please.
DEFAULT_FROM_EMAIL='<EMAIL>'<line_sep>FINANCIAL_AID_EMAIL='<EMAIL>'<line_sep>ORGANIZERS_EMAIL='<EMAIL>'<line_sep>REGISTRATION_EMAIL='<EMAIL>'<line_sep>SPONSORSHIP_EMAIL='<EMAIL>'<line_sep>THEME_CONTACT_EMAIL='<EMAIL>'<line_sep>FINANCIAL_AID_WEEKLY_REPORT_EMAIL=['<EMAIL>']<line_sep># django easy_thumbnails
THUMBNAIL_ALIASES={"":{'sponsor_homepage':{'size':(300 300)} 'sponsor_jobs':{'size':(150 80)} 'sponsor_list':{'size':(260 240)} 'sponsor_link':{'size':(150 150)} 'speaker_profile':{'size':(128 128)} }}<line_sep># fixer.io, currency conversion
FIXER_ACCESS_KEY=os.environ.get('FIXER_ACCESS_KEY')<line_sep> |
<import_stmt>functools<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>torch.nn functional<as>F<import_from_stmt>torch.utils checkpoint<import_from_stmt>torch_scatter scatter_mean scatter_add scatter_max<import_from_stmt>torchdrug data layers utils<import_from_stmt>torchdrug.layers functional<class_stmt>MessagePassingBase(nn.Module)<block_start>"""
Base module for message passing.
Any custom message passing module should be derived from this class.
"""<line_sep>gradient_checkpoint=<false><def_stmt>message self graph input<block_start>"""
Compute edge messages for the graph.
Parameters:
graph (Graph): graph(s)
input (Tensor): node representations of shape :math:`(|V|, ...)`
Returns:
Tensor: edge messages of shape :math:`(|E|, ...)`
"""<line_sep><raise>NotImplementedError<block_end><def_stmt>aggregate self graph message<block_start>"""
Aggregate edge messages to nodes.
Parameters:
graph (Graph): graph(s)
message (Tensor): edge messages of shape :math:`(|E|, ...)`
Returns:
Tensor: node updates of shape :math:`(|V|, ...)`
"""<line_sep><raise>NotImplementedError<block_end><def_stmt>message_and_aggregate self graph input<block_start>"""
Fused computation of message and aggregation over the graph.
This may provide better time or memory complexity than separate calls of
:meth:`message <MessagePassingBase.message>` and :meth:`aggregate <MessagePassingBase.aggregate>`.
Parameters:
graph (Graph): graph(s)
input (Tensor): node representations of shape :math:`(|V|, ...)`
Returns:
Tensor: node updates of shape :math:`(|V|, ...)`
"""<line_sep>message=self.message(graph input)<line_sep>update=self.aggregate(graph message)<line_sep><return>update<block_end><def_stmt>_message_and_aggregate self *tensors<block_start>graph=data.Graph.from_tensors(tensors[:-1])<line_sep>input=tensors[-1]<line_sep>update=self.message_and_aggregate(graph input)<line_sep><return>update<block_end><def_stmt>combine self input update<block_start>"""
Combine node input and node update.
Parameters:
input (Tensor): node representations of shape :math:`(|V|, ...)`
update (Tensor): node updates of shape :math:`(|V|, ...)`
"""<line_sep><raise>NotImplementedError<block_end><def_stmt>forward self graph input<block_start>"""
Perform message passing over the graph(s).
Parameters:
graph (Graph): graph(s)
input (Tensor): node representations of shape :math:`(|V|, ...)`
"""<if_stmt>self.gradient_checkpoint<block_start>update=checkpoint.checkpoint(self._message_and_aggregate *graph.to_tensors() input)<block_end><else_stmt><block_start>update=self.message_and_aggregate(graph input)<block_end>output=self.combine(input update)<line_sep><return>output<block_end><block_end><class_stmt>GraphConv(MessagePassingBase)<block_start>"""
Graph convolution operator from `Semi-Supervised Classification with Graph Convolutional Networks`_.
.. _Semi-Supervised Classification with Graph Convolutional Networks:
https://arxiv.org/pdf/1609.02907.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""<def_stmt>__init__ self input_dim output_dim edge_input_dim=<none> batch_norm=<false> activation="relu"<block_start>super(GraphConv self).__init__()<line_sep>self.input_dim=input_dim<line_sep>self.output_dim=output_dim<line_sep>self.edge_input_dim=edge_input_dim<if_stmt>batch_norm<block_start>self.batch_norm=nn.BatchNorm1d(output_dim)<block_end><else_stmt><block_start>self.batch_norm=<none><block_end><if_stmt>isinstance(activation str)<block_start>self.activation=getattr(F activation)<block_end><else_stmt><block_start>self.activation=activation<block_end>self.linear=nn.Linear(input_dim output_dim)<if_stmt>edge_input_dim<block_start>self.edge_linear=nn.Linear(edge_input_dim input_dim)<block_end><else_stmt><block_start>self.edge_linear=<none><block_end><block_end><def_stmt>message self graph input# add self loop
<block_start>node_in=torch.cat([graph.edge_list[: 0] torch.arange(graph.num_node device=graph.device)])<line_sep>degree_in=graph.degree_in.unsqueeze(-1)+1<line_sep>message=input[node_in]<if_stmt>self.edge_linear<block_start>edge_input=self.edge_linear(graph.edge_feature.float())<line_sep>edge_input=torch.cat([edge_input torch.zeros(graph.num_node self.input_dim device=graph.device)])<line_sep>message<augadd>edge_input<block_end>message<augdiv>degree_in[node_in].sqrt()<line_sep><return>message<block_end><def_stmt>aggregate self graph message# add self loop
<block_start>node_out=torch.cat([graph.edge_list[: 1] torch.arange(graph.num_node device=graph.device)])<line_sep>edge_weight=torch.cat([graph.edge_weight torch.ones(graph.num_node device=graph.device)])<line_sep>edge_weight=edge_weight.unsqueeze(-1)<line_sep>degree_out=graph.degree_out.unsqueeze(-1)+1<line_sep>update=scatter_add(message<times>edge_weight node_out dim=0 dim_size=graph.num_node)<line_sep>update=update/degree_out.sqrt()<line_sep><return>update<block_end><def_stmt>message_and_aggregate self graph input<block_start>node_in,node_out=graph.edge_list.t()[:2]<line_sep>node_in=torch.cat([node_in torch.arange(graph.num_node device=graph.device)])<line_sep>node_out=torch.cat([node_out torch.arange(graph.num_node device=graph.device)])<line_sep>edge_weight=torch.cat([graph.edge_weight torch.ones(graph.num_node device=graph.device)])<line_sep>degree_in=graph.degree_in+1<line_sep>degree_out=graph.degree_out+1<line_sep>edge_weight=edge_weight/(degree_in[node_in]<times>degree_out[node_out]).sqrt()<line_sep>adjacency=utils.sparse_coo_tensor(torch.stack([node_in node_out]) edge_weight (graph.num_node graph.num_node))<line_sep>update=torch.sparse.mm(adjacency.t() input)<if_stmt>self.edge_linear<block_start>edge_input=graph.edge_feature.float()<if_stmt>self.edge_linear.in_features<g>self.edge_linear.out_features<block_start>edge_input=self.edge_linear(edge_input)<block_end>edge_weight=edge_weight.unsqueeze(-1)<line_sep>edge_update=scatter_add(edge_input<times>edge_weight graph.edge_list[: 1] dim=0 dim_size=graph.num_node)<if_stmt>self.edge_linear.in_features<le>self.edge_linear.out_features<block_start>edge_update=self.edge_linear(edge_update)<block_end>update<augadd>edge_update<block_end><return>update<block_end><def_stmt>combine self input update<block_start>output=self.linear(update)<if_stmt>self.batch_norm<block_start>output=self.batch_norm(output)<block_end><if_stmt>self.activation<block_start>output=self.activation(output)<block_end><return>output<block_end><block_end><class_stmt>GraphAttentionConv(MessagePassingBase)<block_start>"""
Graph attentional convolution operator from `Graph Attention Networks`_.
.. _Graph Attention Networks:
https://arxiv.org/pdf/1710.10903.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
num_head (int, optional): number of attention heads
negative_slope (float, optional): negative slope of leaky relu activation
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""<line_sep>eps=1e-10<def_stmt>__init__ self input_dim output_dim edge_input_dim=<none> num_head=1 negative_slope=0.2 concat=<true> batch_norm=<false> activation="relu"<block_start>super(GraphAttentionConv self).__init__()<line_sep>self.input_dim=input_dim<line_sep>self.output_dim=output_dim<line_sep>self.edge_input_dim=edge_input_dim<line_sep>self.num_head=num_head<line_sep>self.concat=concat<line_sep>self.leaky_relu=functools.partial(F.leaky_relu negative_slope=negative_slope)<if_stmt>batch_norm<block_start>self.batch_norm=nn.BatchNorm1d(output_dim)<block_end><else_stmt><block_start>self.batch_norm=<none><block_end><if_stmt>isinstance(activation str)<block_start>self.activation=getattr(F activation)<block_end><else_stmt><block_start>self.activation=activation<block_end><if_stmt>output_dim%num_head<ne>0<block_start><raise>ValueError("Expect output_dim to be a multiplier of num_head, but found `%d` and `%d`"%(output_dim num_head))<block_end>self.linear=nn.Linear(input_dim output_dim)<if_stmt>edge_input_dim<block_start>self.edge_linear=nn.Linear(edge_input_dim output_dim)<block_end><else_stmt><block_start>self.edge_linear=<none><block_end>self.query=nn.Parameter(torch.zeros(num_head output_dim<times>2<floordiv>num_head))<line_sep>nn.init.kaiming_uniform_(self.query negative_slope mode="fan_in")<block_end><def_stmt>message self graph input# add self loop
<block_start>node_in=torch.cat([graph.edge_list[: 0] torch.arange(graph.num_node device=graph.device)])<line_sep>node_out=torch.cat([graph.edge_list[: 1] torch.arange(graph.num_node device=graph.device)])<line_sep>edge_weight=torch.cat([graph.edge_weight torch.ones(graph.num_node device=graph.device)])<line_sep>edge_weight=edge_weight.unsqueeze(-1)<line_sep>hidden=self.linear(input)<line_sep>key=torch.stack([hidden[node_in] hidden[node_out]] dim=-1)<if_stmt>self.edge_linear<block_start>edge_input=self.edge_linear(graph.edge_feature.float())<line_sep>edge_input=torch.cat([edge_input torch.zeros(graph.num_node self.output_dim device=graph.device)])<line_sep>key<augadd>edge_input.unsqueeze(-1)<block_end>key=key.view(-1 *self.query.shape)<line_sep>weight=torch.einsum("hd, nhd -> nh" self.query key)<line_sep>weight=self.leaky_relu(weight)<line_sep>weight=weight-scatter_max(weight node_out dim=0 dim_size=graph.num_node)[0][node_out]<line_sep>attention=weight.exp()<times>edge_weight<line_sep># why mean? because with mean we have normalized message scale across different node degrees
normalizer=scatter_mean(attention node_out dim=0 dim_size=graph.num_node)[node_out]<line_sep>attention=attention/(normalizer+self.eps)<line_sep>value=hidden[node_in].view(-1 self.num_head self.query.shape[-1]<floordiv>2)<line_sep>attention=attention.unsqueeze(-1).expand_as(value)<line_sep>message=(attention<times>value).flatten(1)<line_sep><return>message<block_end><def_stmt>aggregate self graph message# add self loop
<block_start>node_out=torch.cat([graph.edge_list[: 1] torch.arange(graph.num_node device=graph.device)])<line_sep>update=scatter_mean(message node_out dim=0 dim_size=graph.num_node)<line_sep><return>update<block_end><def_stmt>combine self input update<block_start>output=update<if_stmt>self.batch_norm<block_start>output=self.batch_norm(output)<block_end><if_stmt>self.activation<block_start>output=self.activation(output)<block_end><return>output<block_end><block_end><class_stmt>GraphIsomorphismConv(MessagePassingBase)<block_start>"""
Graph isomorphism convolution operator from `How Powerful are Graph Neural Networks?`_
.. _How Powerful are Graph Neural Networks?:
https://arxiv.org/pdf/1810.00826.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
hidden_dims (list of int, optional): hidden dimensions
eps (float, optional): initial epsilon
learn_eps (bool, optional): learn epsilon or not
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""<def_stmt>__init__ self input_dim output_dim edge_input_dim=<none> hidden_dims=<none> eps=0 learn_eps=<false> batch_norm=<false> activation="relu"<block_start>super(GraphIsomorphismConv self).__init__()<line_sep>self.input_dim=input_dim<line_sep>self.output_dim=output_dim<line_sep>self.edge_input_dim=edge_input_dim<line_sep>eps=torch.tensor([eps] dtype=torch.float32)<if_stmt>learn_eps<block_start>self.eps=nn.Parameter(eps)<block_end><else_stmt><block_start>self.register_buffer("eps" eps)<block_end><if_stmt>batch_norm<block_start>self.batch_norm=nn.BatchNorm1d(output_dim)<block_end><else_stmt><block_start>self.batch_norm=<none><block_end><if_stmt>isinstance(activation str)<block_start>self.activation=getattr(F activation)<block_end><else_stmt><block_start>self.activation=activation<block_end><if_stmt>hidden_dims<is><none><block_start>hidden_dims=[]<block_end>self.mlp=layers.MLP(input_dim list(hidden_dims)+[output_dim] activation)<if_stmt>edge_input_dim<block_start>self.edge_linear=nn.Linear(edge_input_dim input_dim)<block_end><else_stmt><block_start>self.edge_linear=<none><block_end><block_end><def_stmt>message self graph input<block_start>node_in=graph.edge_list[: 0]<line_sep>message=input[node_in]<if_stmt>self.edge_linear<block_start>message<augadd>self.edge_linear(graph.edge_feature.float())<block_end><return>message<block_end><def_stmt>aggregate self graph message<block_start>node_out=graph.edge_list[: 1]<line_sep>edge_weight=graph.edge_weight.unsqueeze(-1)<line_sep>update=scatter_add(message<times>edge_weight node_out dim=0 dim_size=graph.num_node)<line_sep><return>update<block_end><def_stmt>message_and_aggregate self graph input<block_start>adjacency=utils.sparse_coo_tensor(graph.edge_list.t()[:2] graph.edge_weight (graph.num_node graph.num_node))<line_sep>update=torch.sparse.mm(adjacency.t() input)<if_stmt>self.edge_linear<block_start>edge_input=graph.edge_feature.float()<line_sep>edge_weight=graph.edge_weight.unsqueeze(-1)<if_stmt>self.edge_linear.in_features<g>self.edge_linear.out_features<block_start>edge_input=self.edge_linear(edge_input)<block_end>edge_update=scatter_add(edge_input<times>edge_weight graph.edge_list[: 1] dim=0 dim_size=graph.num_node)<if_stmt>self.edge_linear.in_features<le>self.edge_linear.out_features<block_start>edge_update=self.edge_linear(edge_update)<block_end>update<augadd>edge_update<block_end><return>update<block_end><def_stmt>combine self input update<block_start>output=self.mlp((1+self.eps)<times>input+update)<if_stmt>self.batch_norm<block_start>output=self.batch_norm(output)<block_end><if_stmt>self.activation<block_start>output=self.activation(output)<block_end><return>output<block_end><block_end><class_stmt>RelationalGraphConv(MessagePassingBase)<block_start>"""
Relational graph convolution operator from `Modeling Relational Data with Graph Convolutional Networks`_.
.. _Modeling Relational Data with Graph Convolutional Networks:
https://arxiv.org/pdf/1703.06103.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
num_relation (int): number of relations
edge_input_dim (int, optional): dimension of edge features
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""<line_sep>eps=1e-10<def_stmt>__init__ self input_dim output_dim num_relation edge_input_dim=<none> batch_norm=<false> activation="relu"<block_start>super(RelationalGraphConv self).__init__()<line_sep>self.input_dim=input_dim<line_sep>self.output_dim=output_dim<line_sep>self.num_relation=num_relation<line_sep>self.edge_input_dim=edge_input_dim<if_stmt>batch_norm<block_start>self.batch_norm=nn.BatchNorm1d(output_dim)<block_end><else_stmt><block_start>self.batch_norm=<none><block_end><if_stmt>isinstance(activation str)<block_start>self.activation=getattr(F activation)<block_end><else_stmt><block_start>self.activation=activation<block_end>self.self_loop=nn.Linear(input_dim output_dim)<line_sep>self.linear=nn.Linear(num_relation<times>input_dim output_dim)<if_stmt>edge_input_dim<block_start>self.edge_linear=nn.Linear(edge_input_dim input_dim)<block_end><else_stmt><block_start>self.edge_linear=<none><block_end><block_end><def_stmt>message self graph input<block_start>node_in=graph.edge_list[: 0]<line_sep>message=input[node_in]<if_stmt>self.edge_linear<block_start>message<augadd>self.edge_linear(graph.edge_feature.float())<block_end><return>message<block_end><def_stmt>aggregate self graph message<block_start><assert_stmt>graph.num_relation<eq>self.num_relation<line_sep>node_out=graph.edge_list[: 1]<times>self.num_relation+graph.edge_list[: 2]<line_sep>edge_weight=graph.edge_weight.unsqueeze(-1)<line_sep>update=scatter_add(message<times>edge_weight node_out dim=0 dim_size=graph.num_node<times>self.num_relation)/(scatter_add(edge_weight node_out dim=0 dim_size=graph.num_node<times>self.num_relation)+self.eps)<line_sep><return>update.view(graph.num_node self.num_relation<times>self.input_dim)<block_end><def_stmt>message_and_aggregate self graph input<block_start><assert_stmt>graph.num_relation<eq>self.num_relation<line_sep>node_in,node_out,relation=graph.edge_list.t()<line_sep>node_out=node_out<times>self.num_relation+relation<line_sep>degree_out=scatter_add(graph.edge_weight node_out dim_size=graph.num_node<times>graph.num_relation)<line_sep>edge_weight=graph.edge_weight/degree_out[node_out]<line_sep>adjacency=utils.sparse_coo_tensor(torch.stack([node_in node_out]) edge_weight (graph.num_node graph.num_node<times>graph.num_relation))<line_sep>update=torch.sparse.mm(adjacency.t() input)<if_stmt>self.edge_linear<block_start>edge_input=graph.edge_feature.float()<if_stmt>self.edge_linear.in_features<g>self.edge_linear.out_features<block_start>edge_input=self.edge_linear(edge_input)<block_end>edge_weight=edge_weight.unsqueeze(-1)<line_sep>edge_update=scatter_add(edge_input<times>edge_weight node_out dim=0 dim_size=graph.num_node<times>graph.num_relation)<if_stmt>self.edge_linear.in_features<le>self.edge_linear.out_features<block_start>edge_update=self.edge_linear(edge_update)<block_end>update<augadd>edge_update<block_end><return>update.view(graph.num_node self.num_relation<times>self.input_dim)<block_end><def_stmt>combine self input update<block_start>output=self.linear(update)+self.self_loop(input)<if_stmt>self.batch_norm<block_start>output=self.batch_norm(output)<block_end><if_stmt>self.activation<block_start>output=self.activation(output)<block_end><return>output<block_end><block_end><class_stmt>NeuralFingerprintConv(MessagePassingBase)<block_start>"""
Graph neural network operator from `Convolutional Networks on Graphs for Learning Molecular Fingerprints`_.
Note this operator doesn't include the sparsifying step of the original paper.
.. _Convolutional Networks on Graphs for Learning Molecular Fingerprints:
https://arxiv.org/pdf/1509.09292.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""<def_stmt>__init__ self input_dim output_dim edge_input_dim=<none> batch_norm=<false> activation="relu"<block_start>super(NeuralFingerprintConv self).__init__()<line_sep>self.input_dim=input_dim<line_sep>self.output_dim=output_dim<line_sep>self.edge_input_dim=edge_input_dim<if_stmt>batch_norm<block_start>self.batch_norm=nn.BatchNorm1d(output_dim)<block_end><else_stmt><block_start>self.batch_norm=<none><block_end><if_stmt>isinstance(activation str)<block_start>self.activation=getattr(F activation)<block_end><else_stmt><block_start>self.activation=activation<block_end>self.linear=nn.Linear(input_dim output_dim)<if_stmt>edge_input_dim<block_start>self.edge_linear=nn.Linear(edge_input_dim input_dim)<block_end><else_stmt><block_start>self.edge_linear=<none><block_end><block_end><def_stmt>message self graph input<block_start>node_in=graph.edge_list[: 0]<line_sep>message=input[node_in]<if_stmt>self.edge_linear<block_start>message<augadd>self.edge_linear(graph.edge_feature.float())<block_end><return>message<block_end><def_stmt>aggregate self graph message<block_start>node_out=graph.edge_list[: 1]<line_sep>edge_weight=graph.edge_weight.unsqueeze(-1)<line_sep>update=scatter_add(message<times>edge_weight node_out dim=0 dim_size=graph.num_node)<line_sep><return>update<block_end><def_stmt>message_and_aggregate self graph input<block_start>adjacency=utils.sparse_coo_tensor(graph.edge_list.t()[:2] graph.edge_weight (graph.num_node graph.num_node))<line_sep>update=torch.sparse.mm(adjacency.t() input)<if_stmt>self.edge_linear<block_start>edge_input=graph.edge_feature.float()<line_sep>edge_weight=graph.edge_weight.unsqueeze(-1)<if_stmt>self.edge_linear.in_features<g>self.edge_linear.out_features<block_start>edge_input=self.edge_linear(edge_input)<block_end>edge_update=scatter_add(edge_input<times>edge_weight graph.edge_list[: 1] dim=0 dim_size=graph.num_node)<if_stmt>self.edge_linear.in_features<le>self.edge_linear.out_features<block_start>edge_update=self.edge_linear(edge_update)<block_end>update<augadd>edge_update<block_end><return>update<block_end><def_stmt>combine self input update<block_start>output=self.linear(input+update)<if_stmt>self.batch_norm<block_start>output=self.batch_norm(output)<block_end><if_stmt>self.activation<block_start>output=self.activation(output)<block_end><return>output<block_end><block_end><class_stmt>ContinuousFilterConv(MessagePassingBase)<block_start>"""
Continuous filter operator from
`SchNet: A continuous-filter convolutional neural network for modeling quantum interactions`_.
.. _SchNet\: A continuous-filter convolutional neural network for modeling quantum interactions:
https://arxiv.org/pdf/1706.08566.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
hidden_dim (int, optional): hidden dimension. By default, same as :attr:`output_dim`
cutoff (float, optional): maximal scale for RBF kernels
num_gaussian (int, optional): number of RBF kernels
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""<def_stmt>__init__ self input_dim output_dim edge_input_dim=<none> hidden_dim=<none> cutoff=5 num_gaussian=100 batch_norm=<false> activation="shifted_softplus"<block_start>super(ContinuousFilterConv self).__init__()<line_sep>self.input_dim=input_dim<line_sep>self.output_dim=output_dim<line_sep>self.edge_input_dim=edge_input_dim<if_stmt>hidden_dim<is><none><block_start>hidden_dim=output_dim<block_end>self.hidden_dim=hidden_dim<line_sep>self.rbf=layers.RBF(stop=cutoff num_kernel=num_gaussian)<if_stmt>batch_norm<block_start>self.batch_norm=nn.BatchNorm1d(output_dim)<block_end><else_stmt><block_start>self.batch_norm=<none><block_end><if_stmt>activation<eq>"shifted_softplus"<block_start>self.activation=functional.shifted_softplus<block_end><elif_stmt>isinstance(activation str)<block_start>self.activation=getattr(F activation)<block_end><else_stmt><block_start>self.activation=activation<block_end>self.input_layer=nn.Linear(input_dim hidden_dim)<line_sep>self.rbf_layer=nn.Linear(num_gaussian hidden_dim)<line_sep>self.output_layer=nn.Linear(hidden_dim output_dim)<if_stmt>edge_input_dim<block_start>self.edge_linear=nn.Linear(edge_input_dim input_dim)<block_end><else_stmt><block_start>self.edge_linear=<none><block_end><block_end><def_stmt>message self graph input<block_start>node_in,node_out=graph.edge_list.t()[:2]<line_sep>position=graph.node_position<line_sep>message=self.input_layer(input)[node_in]<if_stmt>self.edge_linear<block_start>message<augadd>self.edge_linear(graph.edge_feature.float())<block_end>weight=self.rbf_layer(self.rbf(position[node_in] position[node_out]))<line_sep>message<augmul>weight<line_sep><return>message<block_end><def_stmt>aggregate self graph message<block_start>node_out=graph.edge_list[: 1]<line_sep>edge_weight=graph.edge_weight.unsqueeze(-1)<line_sep>update=scatter_add(message<times>edge_weight node_out dim=0 dim_size=graph.num_node)<line_sep><return>update<block_end><def_stmt>message_and_aggregate self graph input<block_start>node_in,node_out=graph.edge_list.t()[:2]<line_sep>position=graph.node_position<line_sep>rbf_weight=self.rbf_layer(self.rbf(position[node_in] position[node_out]))<line_sep>indices=torch.stack([node_out node_in torch.arange(graph.num_edge device=graph.device)])<line_sep>adjacency=utils.sparse_coo_tensor(indices graph.edge_weight (graph.num_node graph.num_node graph.num_edge))<line_sep>update=functional.generalized_rspmm(adjacency rbf_weight self.input_layer(input))<if_stmt>self.edge_linear<block_start>edge_input=graph.edge_feature.float()<if_stmt>self.edge_linear.in_features<g>self.edge_linear.out_features<block_start>edge_input=self.edge_linear(edge_input)<block_end>edge_weight=graph.edge_weight.unsqueeze(-1)<times>rbf_weight<line_sep>edge_update=scatter_add(edge_input<times>edge_weight graph.edge_list[: 1] dim=0 dim_size=graph.num_node)<if_stmt>self.edge_linear.in_features<le>self.edge_linear.out_features<block_start>edge_update=self.edge_linear(edge_update)<block_end>update<augadd>edge_update<block_end><return>update<block_end><def_stmt>combine self input update<block_start>output=self.output_layer(update)<if_stmt>self.batch_norm<block_start>output=self.batch_norm(output)<block_end><if_stmt>self.activation<block_start>output=self.activation(output)<block_end><return>output<block_end><block_end><class_stmt>MessagePassing(MessagePassingBase)<block_start>"""
Message passing operator from `Neural Message Passing for Quantum Chemistry`_.
This implements the edge network variant in the original paper.
.. _Neural Message Passing for Quantum Chemistry:
https://arxiv.org/pdf/1704.01212.pdf
Parameters:
input_dim (int): input dimension
edge_input_dim (int): dimension of edge features
hidden_dims (list of int, optional): hidden dims of edge network
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""<def_stmt>__init__ self input_dim edge_input_dim hidden_dims=<none> batch_norm=<false> activation="relu"<block_start>super(MessagePassing self).__init__()<line_sep>self.input_dim=input_dim<line_sep>self.output_dim=input_dim<line_sep>self.edge_input_dim=edge_input_dim<if_stmt>hidden_dims<is><none><block_start>hidden_dims=[]<block_end><if_stmt>batch_norm<block_start>self.batch_norm=nn.BatchNorm1d(input_dim)<block_end><else_stmt><block_start>self.batch_norm=<none><block_end><if_stmt>isinstance(activation str)<block_start>self.activation=getattr(F activation)<block_end><else_stmt><block_start>self.activation=activation<block_end>self.edge_mlp=layers.MLP(edge_input_dim list(hidden_dims)+[input_dim<times>input_dim] activation)<block_end><def_stmt>message self graph input<block_start>node_in=graph.edge_list[: 0]<line_sep>transform=self.edge_mlp(graph.edge_feature.float()).view(-1 self.input_dim self.input_dim)<if_stmt>graph.num_edge<block_start>message=torch.einsum("bed, bd -> be" transform input[node_in])<block_end><else_stmt><block_start>message=torch.zeros(0 self.input_dim device=graph.device)<block_end><return>message<block_end><def_stmt>aggregate self graph message<block_start>node_out=graph.edge_list[: 1]<line_sep>edge_weight=graph.edge_weight.unsqueeze(-1)<line_sep>update=scatter_add(message<times>edge_weight node_out dim=0 dim_size=graph.num_node)<line_sep><return>update<block_end><def_stmt>combine self input update<block_start>output=update<if_stmt>self.batch_norm<block_start>output=self.batch_norm(output)<block_end><if_stmt>self.activation<block_start>output=self.activation(output)<block_end><return>output<block_end><block_end><class_stmt>ChebyshevConv(MessagePassingBase)<block_start>"""
Chebyshev spectral graph convolution operator from
`Convolutional Neural Networks on Graphs with Fast Localized Spectral Filtering`_.
.. _Convolutional Neural Networks on Graphs with Fast Localized Spectral Filtering:
https://arxiv.org/pdf/1606.09375.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
k (int, optional): number of Chebyshev polynomials.
This also corresponds to the radius of the receptive field.
hidden_dims (list of int, optional): hidden dims of edge network
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""<def_stmt>__init__ self input_dim output_dim edge_input_dim=<none> k=1 batch_norm=<false> activation="relu"<block_start>super(ChebyshevConv self).__init__()<line_sep>self.input_dim=input_dim<line_sep>self.output_dim=output_dim<line_sep>self.k=k<line_sep>self.edge_input_dim=edge_input_dim<if_stmt>batch_norm<block_start>self.batch_norm=nn.BatchNorm1d(output_dim)<block_end><else_stmt><block_start>self.batch_norm=<none><block_end><if_stmt>isinstance(activation str)<block_start>self.activation=getattr(F activation)<block_end><else_stmt><block_start>self.activation=activation<block_end>self.linear=nn.Linear((k+1)<times>input_dim output_dim)<if_stmt>edge_input_dim<block_start>self.edge_linear=nn.Linear(edge_input_dim input_dim)<block_end><else_stmt><block_start>self.edge_linear=<none><block_end><block_end><def_stmt>message self graph input<block_start>node_in=graph.edge_list[: 0]<line_sep>degree_in=graph.degree_in.unsqueeze(-1)<line_sep># because self-loop messages have a different scale, they are processed in combine()
message=input[node_in]<if_stmt>self.edge_linear<block_start>message<augadd>self.edge_linear(graph.edge_feature.float())<block_end>message<augdiv>degree_in[node_in].sqrt()<line_sep><return>message<block_end><def_stmt>aggregate self graph message<block_start>node_out=graph.edge_list[: 1]<line_sep>edge_weight=graph.edge_weight.unsqueeze(-1)<line_sep>degree_out=graph.degree_out.unsqueeze(-1)<line_sep># because self-loop messages have a different scale, they are processed in combine()
update=-scatter_add(message<times>edge_weight node_out dim=0 dim_size=graph.num_node)<line_sep>update=update/degree_out.sqrt()<line_sep><return>update<block_end><def_stmt>message_and_aggregate self graph input<block_start>node_in,node_out=graph.edge_list.t()[:2]<line_sep>edge_weight=-graph.edge_weight/(graph.degree_in[node_in]<times>graph.degree_out[node_out]).sqrt()<line_sep>adjacency=utils.sparse_coo_tensor(graph.edge_list.t()[:2] edge_weight (graph.num_node graph.num_node))<line_sep>update=torch.sparse.mm(adjacency.t() input)<if_stmt>self.edge_linear<block_start>edge_input=graph.edge_feature.float()<if_stmt>self.edge_linear.in_features<g>self.edge_linear.out_features<block_start>edge_input=self.edge_linear(edge_input)<block_end>edge_weight=edge_weight.unsqueeze(-1)<line_sep>edge_update=scatter_add(edge_input<times>edge_weight graph.edge_list[: 1] dim=0 dim_size=graph.num_node)<if_stmt>self.edge_linear.in_features<le>self.edge_linear.out_features<block_start>edge_update=self.edge_linear(edge_update)<block_end>update<augadd>edge_update<block_end><return>update<block_end><def_stmt>forward self graph input# Chebyshev polynomial bases
<block_start>bases=[input]<for_stmt>i range(self.k)<block_start>x=super(ChebyshevConv self).forward(graph bases[-1])<if_stmt>i<g>0<block_start>x=2<times>x-bases[-2]<block_end>bases.append(x)<block_end>bases=torch.cat(bases dim=-1)<line_sep>output=self.linear(bases)<if_stmt>self.batch_norm<block_start>x=self.batch_norm(output)<block_end><if_stmt>self.activation<block_start>output=self.activation(output)<block_end><return>output<block_end><def_stmt>combine self input update<block_start>output=input+update<line_sep><return>output<block_end><block_end> |
<import_stmt>itertools<import_from_stmt>contextlib suppress<import_from_stmt>copy deepcopy<import_from_stmt>pymongo MongoClient<import_from_stmt>tinydb_serialization SerializationMiddleware<import_from_stmt>tinymongo TinyMongoClient<import_from_stmt>tinymongo.serializers DateTimeSerializer<import_from_stmt>tinymongo.tinymongo generate_id<import_from_stmt>quokka.utils.text split_all_category_roots<class_stmt>QuokkaTinyMongoClient(TinyMongoClient)<block_start>@property<def_stmt>_storage self<block_start>serialization=SerializationMiddleware()<line_sep>serialization.register_serializer(DateTimeSerializer() 'TinyDate')<line_sep># TODO: Read custom serializers from settings and extensions
<return>serialization<block_end><block_end><class_stmt>QuokkaDB(object)<block_start>config={}<line_sep>system='tinydb'<line_sep>folder='databases'<line_sep>host='localhost'<line_sep>port=27017<line_sep>name='quokka_db'<line_sep>collections={'index':'index' 'contents':'contents' 'uploads':'uploads' 'users':'users' }<def_stmt>__init__ self app=<none><block_start>self.app=<none><if_stmt>app<is><not><none><block_start>self.init_app(app)<block_end><block_end><def_stmt>init_app self app<block_start>self.config=app.config.get('DATABASE' {})<line_sep># update atributes with config counterparts
<for_stmt>key,value self.config.items()<block_start><if_stmt>key.lower()<ne>'collections'<block_start>setattr(self key.lower() value)<block_end><else_stmt><block_start>self.collections.update(value)<block_end><block_end>self._register(app)<block_end><def_stmt>_register self app<block_start><if_stmt><not>hasattr(app 'extensions')<block_start>app.extensions={}<block_end><if_stmt>'db'<in>app.extensions<block_start><raise>RuntimeError("Flask extension already initialized")<block_end>app.extensions['db']=self<line_sep>self.app=app<block_end><def_stmt>get_db_name self collection<block_start>"""return db_name for collection"""<if_stmt>self.system<eq>"mongo"<block_start><return>self.name<block_end><return>collection<block_end><def_stmt>get_collection self collection<block_start>"""Get the corresponding database collection/table"""<line_sep>col_name=self.collections.get(collection collection)<line_sep>db_name=self.get_db_name(col_name)<line_sep><return>self.connection[db_name][col_name]<block_end><def_stmt>get_content_collection self content_id<block_start><return>self.connection[content_id]['contents']<block_end><def_stmt>get_content_collection_mongo self content_id<block_start><return>self.connection[self.name]['contents']<block_end>@property<def_stmt>connection self<block_start><if_stmt>getattr(self '_connection' <none>)<is><none><block_start><if_stmt>self.system<eq>'tinydb'<block_start>self._connection=QuokkaTinyMongoClient(self.folder)<block_end><elif_stmt>self.system<eq>'mongo'<block_start>self._connection=MongoClient(host=self.host port=self.port)<block_end><block_end><return>self._connection<block_end><def_stmt>__dir__ self<block_start>"""Return existing attributes + collection names"""<line_sep>attrs=[]<for_stmt>attr super().__dir__()<block_start><if_stmt>attr.endswith(('_mongo' '_tinydb'))<block_start>attrs.append(attr.rpartition('_')[0])<block_end><else_stmt><block_start>attrs.append(attr)<block_end><block_end><return>sorted(list(set(attrs))+list(self.collections.keys()))<block_end><def_stmt>__getattribute__ self name<block_start>collections=super().__getattribute__('collections')<line_sep>get_collection=super().__getattribute__('get_collection')<if_stmt>name<in>collections<block_start><return>get_collection(name)<block_end># Try to get system specific method e.g: self.categories_mongo
<try_stmt><block_start>system=super().__getattribute__('system')<line_sep><return>super().__getattribute__(f'{name}_{system}')<block_end><except_stmt>AttributeError<block_start><return>super().__getattribute__(name)<block_end><block_end># [ <-- DB query helpers --> ]
<def_stmt>generate_id self<block_start><return>generate_id()<block_end><def_stmt>value_set self colname key filter=<none> sort=<true> flat=<false> **kwargs<block_start>"""Return a set of all values in a key"""<if_stmt>filter<is><not><none><block_start>data=self.get_collection(colname).find(filter **kwargs)<block_end><else_stmt><block_start>data=self.get_collection(colname).find(**kwargs)<block_end>values=[item.get(key)<for>item data<if>item.get(key)<is><not><none>]<if_stmt>flat<is><true><block_start>values=list(itertools.chain(*values))<block_end><with_stmt>suppress(TypeError)<block_start>values=list(set(values))<block_end><return>sorted(values)<if>sort<is><true><else>values<block_end><def_stmt>author_set self sort=<true> **kwargs<block_start>users=[item.get('fullname' item.get('username'))<for>item self.users.find()]<line_sep>authors=self.value_set('index' 'authors' flat=<true> **kwargs)<line_sep>values=list(set(users+authors))<line_sep><return>sorted(values)<if>sort<is><true><else>values<block_end><def_stmt>tag_set self sort=<true> **kwargs<block_start><return>self.value_set('index' 'tags' flat=<true> sort=sort **kwargs)<block_end><def_stmt>category_set self sort=<true> **kwargs<block_start>results=self.value_set('index' 'category' sort=sort **kwargs)<line_sep>cats=[]<for_stmt>result results<block_start>cats.extend(split_all_category_roots(result))<block_end><return>sorted(set(cats))<if>sort<is><true><else>set(cats)<block_end><def_stmt>content_set self *args **kwargs<block_start><return>self.index.find(*args **kwargs)<block_end><def_stmt>article_set self *args **kwargs<block_start>kwargs.setdefault('sort' self.app.theme_context.get('ARTICLE_ORDER_BY' [('date' -1)]))<if_stmt><not>args<block_start>args=[{'content_type':'article'}]<block_end><elif_stmt>isinstance(args[0] dict)<block_start>args[0]['content_type']='article'<block_end><return>self.content_set(*args **kwargs)<block_end><def_stmt>page_set self *args **kwargs<block_start>kwargs.setdefault('sort' self.app.theme_context.get('PAGE_ORDER_BY' [('title' -1)]))<if_stmt><not>args<block_start>args=[{'content_type':'page'}]<block_end><elif_stmt>isinstance(args[0] dict)<block_start>args[0]['content_type']='page'<block_end><return>self.content_set(*args **kwargs)<block_end><def_stmt>block_set self *args **kwargs<block_start>kwargs.setdefault('sort' self.app.theme_context.get('BLOCK_ORDER_BY' [('title' -1)]))<if_stmt><not>args<block_start>args=[{'content_type':'block'}]<block_end><elif_stmt>isinstance(args[0] dict)<block_start>args[0]['content_type']='block'<block_end><return>self.content_set(*args **kwargs)<block_end><def_stmt>select self colname *args **kwargs<block_start><return>self.get_collection(colname).find(*args **kwargs)<block_end><def_stmt>count self colname *args **kwargs<block_start><return>self.get_collection(colname).find(*args **kwargs).count()<block_end><def_stmt>get self colname *args **kwargs<block_start><return>self.get_collection(colname).find_one(*args **kwargs)<block_end><def_stmt>insert self colname *args **kwargs<block_start><return>self.get_collection(colname).insert(*args **kwargs)<block_end><def_stmt>update self colname query doc<block_start><return>self.get_collection(colname).update_one(query doc)<block_end><def_stmt>push_content self model<block_start>"""Insert or Update content related to model"""<line_sep>collection=self.get_content_collection(model['_id'])<line_sep>current_saved=collection.find_one({'content_id':model['_id'] 'version':model.get('version' 0)})<if_stmt>is_equal(model current_saved)<block_start>model.pop('content' <none>)<line_sep><return><block_end>model_to_save=deepcopy(model)<if_stmt><not>current_saved<block_start>version=0<block_end><else_stmt><block_start>version=model.get('version' 0)+1<block_end>model['version']=model_to_save['version']=version<line_sep>model_to_save['content_id']=model_to_save.pop('_id')<line_sep>collection.insert(model_to_save)<line_sep>model.pop('content' <none>)<block_end><def_stmt>pull_content self model<block_start><if_stmt><not>isinstance(model dict)<block_start>model=self.get('index' {'_id':model})<block_end><if_stmt><not>model<or>(model.get('version')<eq>0<and><not>model.get('_isclone'))<block_start><return><block_end>collection=self.get_content_collection(model['_id'])<line_sep>record=collection.find_one({'content_id':model['_id'] 'version':model['version']})<line_sep><return>record['content']<if>record<else><none><block_end><def_stmt>get_with_content self **kwargs<block_start>model=self.get('index' kwargs)<if_stmt>model<block_start>model['content']=self.pull_content(model)<block_end><return>model<block_end><block_end><def_stmt>is_equal model other<block_start><if_stmt><not>other<block_start><return><false><block_end>versioned_keys=['title' 'summary' 'tags' 'category' 'date' 'content' 'authors' 'slug' 'status' 'published' 'comments' 'block_items']<for_stmt>key versioned_keys<block_start><if_stmt>model.get(key)<ne>other.get(key)<block_start><return><false><block_end><block_end><return><true><block_end> |
<import_from_future_stmt> absolute_import division print_function<import_from_stmt>libtbx.utils write_this_is_auto_generated<import_from_stmt>libtbx.str_utils line_breaker<import_stmt>libtbx.load_env<import_stmt>libtbx.path<import_stmt>os<import_stmt>sys<import_from_stmt>six.moves range<line_sep>this="gltbx.generate_functions_bpl"<line_sep>return_types={"GLenum":0 "GLboolean":0 "GLint":0 "GLuint":0 "const GLubyte*":0 "GLUnurbs*":0 "GLUquadric*":0 "GLUtesselator*":0 }<line_sep>arg_types={"const void*":0 "GLbitfield":0 "GLboolean":0 "GLboolean*":0 "GLbyte":0 "GLclampd":0 "GLclampf":0 "GLdouble":0 "GLdouble*":0 "GLenum":0 "GLfloat":0 "GLfloat*":0 "GLint":0 "GLint*":0 "GLshort":0 "GLsizei":0 "GLubyte":0 "GLubyte*":0 "GLuint":0 "GLuint*":0 "GLushort":0 "GLushort*":0 "GLvoid*":0 "GLvoid**":0 "const GLboolean*":0 "const GLbyte*":0 "const GLclampf*":0 "const GLdouble*":0 "const GLfloat*":0 "const GLint*":0 "const GLshort*":0 "const GLubyte*":0 "const GLuint*":0 "const GLushort*":0 "const GLvoid*":0 "GLUnurbs*":0 "GLUquadric*":0 "GLUtesselator*":0 "glu_function_pointer":0 }<line_sep>opaque_pointers=["GLUnurbs*" "GLUquadric*" "GLUtesselator*" ]<line_sep>pointee_sizes={"glAreTexturesResident textures":0 "glAreTexturesResident residences":0 "glBitmap bitmap":0 "glCallLists lists":"?n*sizeof(type)" "glClipPlane equation":4 "glColor3bv v":3 "glColor3dv v":3 "glColor3fv v":3 "glColor3iv v":3 "glColor3sv v":3 "glColor3ubv v":3 "glColor3uiv v":3 "glColor3usv v":3 "glColor4bv v":4 "glColor4dv v":4 "glColor4fv v":4 "glColor4iv v":4 "glColor4sv v":4 "glColor4ubv v":4 "glColor4uiv v":4 "glColor4usv v":4 "glColorPointer pointer":0 "glDeleteTextures textures":0 "glDrawElements indices":0 "glDrawPixels pixels":0 "glEdgeFlagv flag":1 "glEdgeFlagPointer pointer":0 "glEvalCoord1dv u":1 "glEvalCoord1fv u":1 "glEvalCoord2dv u":2 "glEvalCoord2fv u":2 "glFeedbackBuffer buffer":"size" "glFogfv params":"?pname=GL_FOG_COLOR: 4, default: 1" "glFogiv params":"?pname=GL_FOG_COLOR: 4, default: 1" "glGenTextures textures":"n" "glGetClipPlane equation":4 "glGetBooleanv params":"?1..16 depending on pname" "glGetDoublev params":"?1..16 depending on pname" "glGetFloatv params":"?1..16 depending on pname" "glGetIntegerv params":"?1..16 depending on pname" "glGetLightfv params":"?1..4 depending on pname" "glGetLightiv params":"?1..4 depending on pname" "glGetMapdv v":0 "glGetMapfv v":0 "glGetMapiv v":0 "glGetMaterialfv params":"?1..4 depending on pname" "glGetMaterialiv params":0 "glGetPixelMapfv values":"?glGet(map)" "glGetPixelMapuiv values":0 "glGetPixelMapusv values":0 "glGetPointerv params":0 "glGetPolygonStipple mask":0 "glGetTexEnvfv params":0 "glGetTexEnviv params":0 "glGetTexGendv params":0 "glGetTexGenfv params":0 "glGetTexGeniv params":0 "glGetTexImage pixels":0 "glGetTexLevelParameterfv params":0 "glGetTexLevelParameteriv params":0 "glGetTexParameterfv params":0 "glGetTexParameteriv params":0 "glIndexdv c":0 "glIndexfv c":0 "glIndexiv c":0 "glIndexsv c":0 "glIndexubv c":0 "glIndexPointer pointer":0 "glInterleavedArrays pointer":0 "glLightfv params":0 "glLightiv params":0 "glLightModelfv params":0 "glLightModeliv params":0 "glLoadMatrixd m":0 "glLoadMatrixf m":0 "glMap1d points":0 "glMap1f points":0 "glMap2d points":0 "glMap2f points":0 "glMaterialfv params":0 "glMaterialiv params":0 "glMultMatrixd m":0 "glMultMatrixf m":0 "glNormal3bv v":3 "glNormal3dv v":3 "glNormal3fv v":3 "glNormal3iv v":3 "glNormal3sv v":3 "glNormalPointer pointer":0 "glPixelMapfv values":0 "glPixelMapuiv values":0 "glPixelMapusv values":0 "glPolygonStipple mask":0 "glPrioritizeTextures textures":0 "glPrioritizeTextures priorities":0 "glRasterPos2dv v":2 "glRasterPos2fv v":2 "glRasterPos2iv v":2 "glRasterPos2sv v":2 "glRasterPos3dv v":3 "glRasterPos3fv v":3 "glRasterPos3iv v":3 "glRasterPos3sv v":3 "glRasterPos4dv v":4 "glRasterPos4fv v":4 "glRasterPos4iv v":4 "glRasterPos4sv v":4 "glReadPixels pixels":0 "glRectdv v1":2 "glRectdv v2":2 "glRectfv v1":2 "glRectfv v2":2 "glRectiv v1":2 "glRectiv v2":2 "glRectsv v1":2 "glRectsv v2":2 "glSelectBuffer buffer":0 "glTexCoord1dv v":1 "glTexCoord1fv v":1 "glTexCoord1iv v":1 "glTexCoord1sv v":1 "glTexCoord2dv v":2 "glTexCoord2fv v":2 "glTexCoord2iv v":2 "glTexCoord2sv v":2 "glTexCoord3dv v":3 "glTexCoord3fv v":3 "glTexCoord3iv v":3 "glTexCoord3sv v":3 "glTexCoord4dv v":4 "glTexCoord4fv v":4 "glTexCoord4iv v":4 "glTexCoord4sv v":4 "glTexCoordPointer pointer":0 "glTexEnvfv params":0 "glTexEnviv params":0 "glTexGendv params":0 "glTexGenfv params":0 "glTexGeniv params":0 "glTexImage1D pixels":0 "glTexImage2D pixels":0 "glTexParameterfv params":0 "glTexParameteriv params":0 "glTexSubImage1D pixels":0 "glTexSubImage2D pixels":0 "gluBeginCurve nurb":0 "gluEndCurve nurb":0 "gluBeginPolygon tess":0 "gluEndPolygon tess":0 "gluBeginSurface nurb":0 "gluEndSurface nurb":0 "gluBeginTrim nurb":0 "gluEndTrim nurb":0 "gluBuild1DMipmaps data":0 "gluBuild2DMipmaps data":0 "gluCylinder quad":0 "gluDeleteNurbsRenderer nurb":0 "gluDeleteQuadric quad":0 "gluDeleteTess tess":0 "gluDisk quad":0 "gluGetNurbsProperty nurb":0 "gluGetNurbsProperty data":0 "gluGetTessProperty tess":0 "gluGetTessProperty data":0 "gluLoadSamplingMatrices nurb":0 "gluLoadSamplingMatrices model":16 "gluLoadSamplingMatrices perspective":16 "gluLoadSamplingMatrices view":4 "gluNextContour tess":0 "gluNurbsCallbackDataEXT nurb":0 "gluNurbsCallbackDataEXT userData":0 "gluNurbsCallback nurb":0 "gluNurbsCurve nurb":0 "gluNurbsCurve knots":0 "gluNurbsCurve control":0 "gluNurbsProperty nurb":0 "gluNurbsSurface nurb":0 "gluNurbsSurface sKnots":0 "gluNurbsSurface tKnots":0 "gluNurbsSurface control":0 "gluPartialDisk quad":0 "gluPickMatrix viewport":4 "gluProject model":16 "gluProject proj":16 "gluProject view":4 "gluProject winX":1 "gluProject winY":1 "gluProject winZ":1 "gluPwlCurve nurb":0 "gluPwlCurve data":0 "gluQuadricCallback quad":0 "gluQuadricDrawStyle quad":0 "gluQuadricNormals quad":0 "gluQuadricOrientation quad":0 "gluQuadricTexture quad":0 "gluScaleImage dataIn":0 "gluScaleImage dataOut":0 "gluSphere quad":0 "gluTessBeginContour tess":0 "gluTessEndContour tess":0 "gluTessBeginPolygon tess":0 "gluTessBeginPolygon data":0 "gluTessCallback tess":0 "gluTessEndPolygon tess":0 "gluTessNormal tess":0 "gluTessProperty tess":0 "gluTessVertex tess":0 "gluTessVertex location":0 "gluTessVertex data":0 "gluUnProject model":16 "gluUnProject proj":16 "gluUnProject view":4 "gluUnProject objX":1 "gluUnProject objY":1 "gluUnProject objZ":1 "glVertex2dv v":2 "glVertex2fv v":2 "glVertex2iv v":2 "glVertex2sv v":2 "glVertex3dv v":3 "glVertex3fv v":3 "glVertex3iv v":3 "glVertex3sv v":3 "glVertex4dv v":4 "glVertex4fv v":4 "glVertex4iv v":4 "glVertex4sv v":4 "glVertexPointer pointer":0 }<line_sep>version_guards={"glBlendColorEXT":"GL_XXX" "glEdgeFlagPointer":"GLTBX_XXX" "gluNurbsCallbackDataEXT":"GL_XXX" }<line_sep>special_wrappers={"glGetString":["""\
boost::python::str
gl_GetString(boost::python::object const& py_name)
{
boost::python::extract<GLenum> name_proxy(py_name);
GLenum name = name_proxy();
boost::python::str result(
reinterpret_cast<const char*>(glGetString(name)));
return result;
}
""" <none>] "gluGetString":["""\
boost::python::str
glu_GetString(boost::python::object const& py_name)
{
boost::python::extract<GLenum> name_proxy(py_name);
GLenum name = name_proxy();
boost::python::str result(
reinterpret_cast<const char*>(gluGetString(name)));
return result;
}
""" <none>] "gluErrorString":["""\
boost::python::str
glu_ErrorString(boost::python::object const& py_error)
{
boost::python::extract<GLenum> error_proxy(py_error);
GLenum error = error_proxy();
return boost::python::str(
reinterpret_cast<const char*>(gluErrorString(error)));
}
""" <none>] }<def_stmt>bytes_converters signature expected_size="0" post_extract=""<block_start><assert_stmt>signature.return_type<eq>"void"<line_sep>function_name=signature.function_name<line_sep>arg_type=signature.args[-1].type<line_sep>arg_name=signature.args[-1].name<line_sep>arg_type_name=arg_type+" "+arg_name<line_sep>is_const=arg_type.startswith("const ")<line_sep>call="\n".join(signature.format_call(return_directly=is_const prefix=" "))<if_stmt>(<not>is_const)<block_start>call<augadd>"\n %s_proxy.write_back();"%arg_name<line_sep>is_const="false"<block_end><else_stmt><block_start>is_const="true"<block_end>extracts=[""]<for_stmt>arg signature.args[:-1]<block_start><assert_stmt><not>arg.type.startswith("const ")<line_sep>extracts.append("boost::python::extract<%s> %s_proxy(py_%s);"%(arg.type arg.name arg.name))<line_sep>extracts.append("%s %s = %s_proxy();"%(arg.type arg.name arg.name))<block_end>extracts="\n ".join(extracts)<line_sep><return>"""\
%(extracts)s%(post_extract)s
if (type == GL_BYTE) {
boost_python::converter_str<GLubyte> %(arg_name)s_proxy(
"%(arg_name)s", py_%(arg_name)s, %(expected_size)s, %(is_const)s);
%(arg_type_name)s = reinterpret_cast<%(arg_type)s>(
%(arg_name)s_proxy.get());
%(call)s
}
else if (type == GL_UNSIGNED_BYTE) {
boost_python::converter_str<GLbyte> %(arg_name)s_proxy(
"%(arg_name)s", py_%(arg_name)s, %(expected_size)s, %(is_const)s);
%(arg_type_name)s = reinterpret_cast<%(arg_type)s>(
%(arg_name)s_proxy.get());
%(call)s
}
else {
throw std::runtime_error(
"Conversion not implemented for given GLenum type:"
" %(function_name)s(): %(arg_type_name)s");
}"""%vars()<block_end><def_stmt>glReadPixels_wrapper_body signature<block_start><return>bytes_converters(signature=signature expected_size="expected_size" post_extract="""
boost::python::ssize_t expected_size = glReadPixels_pixels_expected_size(
width, height, format, type);""")<block_end>special_wrapper_bodies={"glCallLists":bytes_converters "glDrawPixels":bytes_converters "glGetTexImage":bytes_converters "glReadPixels":glReadPixels_wrapper_body "glTexImage1D":bytes_converters "glTexImage2D":bytes_converters "glTexSubImage1D":bytes_converters "glTexSubImage2D":bytes_converters }<class_stmt>argument<block_start><def_stmt>__init__ self function_name string<block_start>fields=string.split()<line_sep>self.type=" ".join(fields[:-1])<line_sep>self.name=fields[-1]<assert_stmt>self.type<in>arg_types<line_sep>arg_types[self.type]<augadd>1<if_stmt>(self.type[-1]<ne>"*")<block_start>self.pointee_size=<none><block_end><else_stmt><block_start>self.pointee_size=pointee_sizes[function_name+" "+self.name]<block_end><block_end><block_end><class_stmt>signature<block_start><def_stmt>__init__ self string<block_start><assert_stmt>string.endswith(" )")<line_sep>fields=string[:-2].split("(")<assert_stmt>len(fields)<eq>2<line_sep>arg_strings=[]<for_stmt>arg fields[1].split(",")<block_start>arg_strings.append(" ".join(arg.replace("*" " * ").split()).replace(" *" "*"))<block_end>fields=fields[0].split()<line_sep>self.return_type=" ".join(" ".join(fields[:-1]).replace("*" " * ").split()).replace(" *" "*")<line_sep>self.function_name=fields[-1]<if_stmt>(self.return_type<ne>"void")<block_start><assert_stmt>self.return_type<in>return_types<line_sep>return_types[self.return_type]<augadd>1<block_end>self.args=[]<if_stmt>(arg_strings<ne>["void"])<block_start><for_stmt>arg arg_strings<block_start>self.args.append(argument(self.function_name arg))<block_end><block_end>self.version_guard=version_guards.get(self.function_name <none>)<line_sep>self.have_opaque_pointer=self.return_type<in>opaque_pointers<if_stmt>(<not>self.have_opaque_pointer)<block_start><for_stmt>arg self.args<block_start><if_stmt>(arg.type<in>opaque_pointers)<block_start>self.have_opaque_pointer=<true><block_end><block_end><block_end><block_end><def_stmt>show self f=<none><block_start><if_stmt>(f<is><none>)<block_start>f=sys.stdout<block_end>print("function name:" self.function_name file=f)<line_sep>print(" return type:" self.return_type file=f)<for_stmt>arg self.args<block_start>print(" arg type:" arg.type "name:" arg.name file=f)<block_end><block_end><def_stmt>wrapper_function_name self<block_start>i=2<if_stmt>(self.function_name.startswith("glu"))<block_start>i=3<block_end><return>self.function_name[:i]+"_"+self.function_name[i:]<block_end><def_stmt>write_no_opaque_pointers_guard_if self f<block_start><if_stmt>(self.have_opaque_pointer)<block_start>print("#if !defined(GLTBX_NO_OPAQUE_POINTERS)" file=f)<block_end><block_end><def_stmt>write_no_opaque_pointers_guard_endif self f<block_start><if_stmt>(self.have_opaque_pointer)<block_start>print("#endif" file=f)<block_end><block_end><def_stmt>write_version_guard_if self f<block_start><if_stmt>(self.version_guard<is><not><none>)<block_start>print("#if defined(%s)"%self.version_guard file=f)<block_end><block_end><def_stmt>write_version_guard_endif self f<block_start><if_stmt>(self.version_guard<is><not><none>)<block_start>print("#endif" file=f)<block_end><block_end><def_stmt>format_call self return_directly prefix<block_start>s=""<if_stmt>(self.return_type<ne>"void")<block_start><if_stmt>(return_directly)<block_start>s<augadd>"return "<block_end><else_stmt><block_start>s<augadd>self.return_type+" result = "<block_end><block_end>s<augadd>self.function_name+"("<line_sep>s<augadd>", ".join([arg.name<for>arg self.args])<line_sep>s<augadd>");"<line_sep>result=[]<line_sep>indent=""<for_stmt>line line_breaker(s 70)<block_start>result.append(prefix+indent+line)<line_sep>indent=" "<block_end><return>result<block_end><def_stmt>write_wrapper self f<block_start>special=special_wrappers.get(self.function_name <none>)<if_stmt>(special<is><not><none><and>special[0]<is><not><none>)<block_start>print(special[0] file=f)<line_sep><return><block_end>lines=[self.return_type self.wrapper_function_name()+"("]<for_stmt>arg self.args<block_start>lines.append(" %s %s,"%("boost::python::object const&" "py_"+arg.name))<block_end><if_stmt>(lines[-1][-1]<eq>",")<block_start>lines[-1]=lines[-1][:-1]<block_end>lines[-1]<augadd>")"<line_sep>lines.append("{")<line_sep>special_body=special_wrapper_bodies.get(self.function_name <none>)<if_stmt>(special_body<is><not><none>)<block_start>lines.extend(special_body(self).splitlines())<block_end><else_stmt><block_start>not_implemented=["const void*" "GLvoid*" "GLvoid**" "const GLvoid*" "glu_function_pointer"]<line_sep>to_write_back=[]<line_sep>ss=""<for_stmt>arg self.args<block_start><if_stmt>((arg.pointee_size<is><not><none><and>arg.type<not><in>opaque_pointers)<or>arg.type<eq>"glu_function_pointer")<block_start><if_stmt>(arg.type<in>not_implemented)<block_start>lines.append(ss+" throw std::runtime_error(")<line_sep>lines.append(ss+' "Conversion not implemented:"')<line_sep>lines.append(ss+' " %s(): %s %s");'%(self.function_name arg.type arg.name))<line_sep>ss="//"<line_sep>lines.append(ss+" %s %s = 0;"%(arg.type arg.name))<block_end><else_stmt><block_start>expected_size=arg.pointee_size<if_stmt>(isinstance(expected_size str))<block_start><if_stmt>(expected_size[0]<eq>"?")<block_start>expected_size="0"<block_end><block_end><else_stmt><block_start><assert_stmt>isinstance(expected_size int)<line_sep>expected_size=str(expected_size)<block_end><if_stmt>(arg.type.startswith("const "))<block_start>is_const="true"<line_sep>converter_t=arg.type[6:-1]<block_end><else_stmt><block_start>is_const="false"<line_sep>converter_t=arg.type[:-1]<block_end><if_stmt>(arg.type.endswith("GLbyte*")<or>arg.type.endswith("GLubyte*"))<block_start>converter="boost_python::converter_str"<block_end><else_stmt><block_start>converter="boost_python::converter"<block_end>lines.append(ss+' %s<%s> %s_proxy('%(converter converter_t arg.name))<line_sep>lines.append(ss+' "%s", py_%s, %s, %s);'%(arg.name arg.name expected_size is_const))<line_sep>lines.append(ss+" %s %s = %s_proxy.get();"%(arg.type arg.name arg.name))<if_stmt>(is_const<eq>"false")<block_start>to_write_back.append(arg)<block_end><block_end><block_end><else_stmt><block_start><assert_stmt><not>arg.type.startswith("const ")<line_sep>lines.append(ss+' boost::python::extract<%s> %s_proxy(py_%s);'%(arg.type arg.name arg.name))<line_sep>lines.append(ss+" %s %s = %s_proxy();"%(arg.type arg.name arg.name))<block_end><block_end>return_directly=len(to_write_back)<eq>0<line_sep>lines.extend([ss+line<for>line self.format_call(return_directly=return_directly prefix=" ")])<for_stmt>arg to_write_back<block_start>lines.append(ss+" %s_proxy.write_back();"%arg.name)<block_end><if_stmt>(self.return_type<ne>"void"<and><not>return_directly)<block_start>lines.append(ss+" return result;")<block_end><block_end>lines.append("}")<line_sep>self.write_no_opaque_pointers_guard_if(f=f)<line_sep>self.write_version_guard_if(f=f)<for_stmt>line lines<block_start>print(" " line file=f)<block_end>self.write_version_guard_endif(f=f)<line_sep>self.write_no_opaque_pointers_guard_endif(f=f)<line_sep>print(file=f)<block_end><def_stmt>write_def self f<block_start>special=special_wrappers.get(self.function_name <none>)<if_stmt>(special<is><not><none><and>special[1]<is><not><none>)<block_start>print(special[1] file=f)<line_sep><return><block_end>return_opaque=self.return_type<in>opaque_pointers<line_sep>def_args=(self.function_name self.wrapper_function_name())<line_sep>self.write_no_opaque_pointers_guard_if(f=f)<line_sep>self.write_version_guard_if(f=f)<if_stmt>(len(self.args)<eq>0)<block_start><if_stmt>(<not>return_opaque)<block_start>print(' def("%s", %s);'%def_args file=f)<block_end><else_stmt><block_start>print(' def("%s", %s,'%def_args file=f)<line_sep>print(" return_value_policy<return_opaque_pointer>());" file=f)<block_end><block_end><else_stmt><block_start><assert_stmt><not>return_opaque<line_sep>print(' def("%s", %s, ('%def_args file=f)<line_sep>s=""<for_stmt>arg self.args<block_start>s<augadd>', arg("%s")'%arg.name<block_end>s=s[2:]+"));"<for_stmt>line line_breaker(s 73)<block_start>print(" "+line file=f)<block_end><block_end>self.write_version_guard_endif(f=f)<line_sep>self.write_no_opaque_pointers_guard_endif(f=f)<block_end><block_end><def_stmt>get_signatures <block_start>result=[]<line_sep>specs_file=libtbx.env.under_dist("gltbx" "opengl_specs.txt")<for_stmt>line open(specs_file).read().splitlines()<block_start><if_stmt>(<not>(line.startswith("GL_")<or>line.startswith("GLU_")))<block_start>result.append(signature(line))<block_end><block_end><return>result<block_end><def_stmt>write_function_wrappers f namespace signatures i_fragment<block_start>write_this_is_auto_generated(f this)<line_sep>print("""\
#include <gltbx/special_wrapper_support.h>
#include <gltbx/pointer_args_bpl.h>
#include <gltbx/error.h>
""" file=f)<if_stmt>(namespace<eq>"glu")<block_start>print("#if defined(__GNUC__) && __GNUC__ == 2 \\" file=f)<line_sep>print(" && __GNUC_MINOR__ == 96 && __GNUC_PATCHLEVEL__ == 0" file=f)<line_sep>print("#define GLTBX_NO_OPAQUE_POINTERS" file=f)<line_sep>print("#else" file=f)<line_sep>print("#include <boost/python/return_value_policy.hpp>" file=f)<line_sep>print("#include <boost/python/return_opaque_pointer.hpp>" file=f)<for_stmt>opaque_pointer opaque_pointers<block_start>print("BOOST_PYTHON_OPAQUE_SPECIALIZED_TYPE_ID(%s)"%(opaque_pointer[:-1]) file=f)<block_end>print("#endif" file=f)<line_sep>print(file=f)<block_end>print("""\
namespace gltbx { namespace %s { namespace {
"""%namespace file=f)<for_stmt>signature signatures<block_start>signature.write_wrapper(f=f)<block_end>print("""\
} // namespace <anonymous>
namespace boost_python {
void
wrap_functions_%02d()
{
using namespace boost::python;"""%i_fragment file=f)<for_stmt>signature signatures<block_start>signature.write_def(f=f)<block_end>print("""\
}
}}} // namespace gltbx::%s::boost_python"""%namespace file=f)<block_end><def_stmt>run target_dir<block_start><if_stmt>(<not>os.path.isdir(target_dir))<block_start>os.makedirs(target_dir)<block_end>gl_signatures=[]<line_sep>glu_signatures=[]<for_stmt>signature get_signatures()<block_start><if_stmt>(signature.function_name.startswith("glu"))<block_start>glu_signatures.append(signature)<block_end><else_stmt><block_start>gl_signatures.append(signature)<block_end><block_end><for_stmt>namespace,signatures,n_fragments [("gl" gl_signatures 16) ("glu" glu_signatures 4)]<block_start>block_size=len(signatures)<floordiv>n_fragments<if_stmt>(block_size<times>n_fragments<l>len(signatures))<block_start>block_size<augadd>1<block_end><for_stmt>i_fragment range(n_fragments)<block_start>file_name=libtbx.path.norm_join(target_dir namespace+"_functions_%02d_bpl.cpp"%i_fragment)<with_stmt>open(file_name "w")<as>f<block_start>write_function_wrappers(f=f namespace=namespace signatures=signatures[i_fragment<times>block_size:(i_fragment+1)<times>block_size] i_fragment=i_fragment)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>run(".")<block_end> |
<def_stmt>zigZag_Fashion array length<block_start>flag=<true><for_stmt>i range(length-1)<block_start><if_stmt>flag<is><true><block_start><if_stmt>array[i]<g>array[i+1]<block_start>array[i],array[i+1]=array[i+1] array[i]<block_end><block_end><else_stmt><block_start><if_stmt>array[i]<l>array[i+1]<block_start>array[i],array[i+1]=array[i+1] array[i]<block_end><block_end>flag=bool(1-flag)<block_end>print(array)<block_end>arraySize=int(input("Enter Array Size:- "))<line_sep>array=[]<line_sep>print("Enter Array Elements")<for_stmt>i range(arraySize)<block_start>array.append(int(input()))<block_end>length=len(array)<line_sep>zigZag_Fashion(array length)<line_sep> |
<import_stmt>json<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_stmt>requests<import_from_stmt>tqdm tqdm<line_sep>api_url="http://127.0.0.1/api"<line_sep>df=pd.read_csv("Backend regression test cases.csv")<def_stmt>extract_params case<block_start>search={}<line_sep>search["service"]=case.get("data integration" "").lower()<line_sep>search["q"]=case.get("search query" "")<line_sep>search["from"]=case.get("from" "")<line_sep>search["to"]=case.get("to" "")<line_sep>search["sorting"]=case.get("sorting" "")<line_sep>search["article_types"]=case.get("article types" "[]")<line_sep><return>search<block_end><def_stmt>get_input_data search raw=<false><block_start>params=search<line_sep>service=params.pop('service')<if_stmt>service<not><in>["base" "pubmed"]<block_start><return><none><block_end><if_stmt>service<eq>"pubmed"<block_start>params.pop("article_types")<line_sep>params["limit"]=100<block_end><if_stmt>service<eq>"base"<block_start>params["limit"]=120<line_sep>doctypes=eval(params["article_types"])<if_stmt>isinstance(doctypes list)<block_start>params["document_types"]=[a<for>a doctypes]<block_end><else_stmt><block_start>params["document_types"]=[121]<block_end>params.pop("article_types" [])<block_end><if_stmt>raw<block_start>params["raw"]=<true><block_end>url="/".join([api_url service "search"])<line_sep>res=requests.post(url json=params)<line_sep><return>res<block_end><for_stmt>r tqdm(df.iterrows())<block_start>case=dict(r[1])<if_stmt>np.isnan(case["case id"])<block_start><continue><block_end>s=extract_params(case)<line_sep>res=get_input_data(s raw=<true>)<if_stmt>res<is><none><block_start><continue><block_end>res_json=res.json()<line_sep>res_json.pop("id")<line_sep>input_data=res_json["input_data"]<line_sep>params=res_json["params"]<with_stmt>open("knowncases/testcase%d.json"%case["case id"] "w")<as>outfile<block_start>json.dump(res_json outfile indent=4 separators=(',' ': ') sort_keys=<true>)<block_end><block_end> |
<import_from_stmt>paiargparse PAIArgumentParser<import_from_stmt>tfaip.util.logging logger<import_from_stmt>calamari_ocr.ocr.training.cross_fold_trainer CrossFoldTrainer CrossFoldTrainerParams <line_sep>logger=logger(__name__)<def_stmt>run <block_start><return>main(parse_args())<block_end><def_stmt>parse_args args=<none><block_start>parser=PAIArgumentParser()<line_sep>parser.add_root_argument("root" CrossFoldTrainerParams CrossFoldTrainerParams())<line_sep>params:CrossFoldTrainerParams=parser.parse_args(args).root<line_sep># TODO: add the training args (omit those params, that are set by the cross fold training)
# setup_train_args(parser, omit=["files", "validation", "weights",
# "early_stopping_best_model_output_dir", "early_stopping_best_model_prefix",
# "output_dir"])
<return>params<block_end><def_stmt>main params<block_start>trainer=CrossFoldTrainer(params)<line_sep>logger.info("Running cross fold train with params")<line_sep>logger.info(params.to_json(indent=2))<line_sep>trainer.run()<block_end><if_stmt>__name__<eq>"__main__"<block_start>run()<block_end> |
<import_stmt>sys<import_stmt>unittest<import_stmt>pendulum<import_from_stmt>src Stocks StocksCommandService <import_from_stmt>minos.networks InMemoryRequest Response <import_from_stmt>tests.utils build_dependency_injector <class_stmt>TestStocksCommandService(unittest.IsolatedAsyncioTestCase)<block_start><def_stmt>setUp self<arrow><none><block_start>self.injector=build_dependency_injector()<block_end><async_keyword><def_stmt>asyncSetUp self<arrow><none><block_start><await>self.injector.wire(modules=[sys.modules[__name__]])<block_end><async_keyword><def_stmt>asyncTearDown self<arrow><none><block_start><await>self.injector.unwire()<block_end><def_stmt>test_constructor self<block_start>service=StocksCommandService()<line_sep>self.assertIsInstance(service StocksCommandService)<block_end><async_keyword><def_stmt>test_get_remote_quotes self<block_start>service=StocksCommandService()<line_sep>now=pendulum.now()<line_sep>now_minus_one_month=now.subtract(months=1)<line_sep>response=service.call_remote("AAPL" now_minus_one_month.to_date_string() now.to_date_string())<line_sep>self.assertIsInstance(response list)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
<import_from_stmt>fastapi FastAPI<import_from_stmt>.config settings<line_sep>app=FastAPI()<line_sep>@app.get("/info")<async_keyword><def_stmt>info <block_start><return>{"app_name":settings.app_name "admin_email":settings.admin_email "items_per_user":settings.items_per_user }<block_end> |
<import_stmt>pytest<import_from_stmt>libp2p.peer.peerstore PeerStore PeerStoreError<line_sep># Testing methods from IPeerStore base class.
<def_stmt>test_peer_info_empty <block_start>store=PeerStore()<with_stmt>pytest.raises(PeerStoreError)<block_start>store.peer_info("peer")<block_end><block_end><def_stmt>test_peer_info_basic <block_start>store=PeerStore()<line_sep>store.add_addr("peer" "/foo" 10)<line_sep>info=store.peer_info("peer")<assert_stmt>info.peer_id<eq>"peer"<assert_stmt>info.addrs<eq>["/foo"]<block_end><def_stmt>test_add_get_protocols_basic <block_start>store=PeerStore()<line_sep>store.add_protocols("peer1" ["p1" "p2"])<line_sep>store.add_protocols("peer2" ["p3"])<assert_stmt>set(store.get_protocols("peer1"))<eq>set(["p1" "p2"])<assert_stmt>set(store.get_protocols("peer2"))<eq>set(["p3"])<block_end><def_stmt>test_add_get_protocols_extend <block_start>store=PeerStore()<line_sep>store.add_protocols("peer1" ["p1" "p2"])<line_sep>store.add_protocols("peer1" ["p3"])<assert_stmt>set(store.get_protocols("peer1"))<eq>set(["p1" "p2" "p3"])<block_end><def_stmt>test_set_protocols <block_start>store=PeerStore()<line_sep>store.add_protocols("peer1" ["p1" "p2"])<line_sep>store.add_protocols("peer2" ["p3"])<line_sep>store.set_protocols("peer1" ["p4"])<line_sep>store.set_protocols("peer2" [])<assert_stmt>set(store.get_protocols("peer1"))<eq>set(["p4"])<assert_stmt>set(store.get_protocols("peer2"))<eq>set([])<block_end># Test with methods from other Peer interfaces.
<def_stmt>test_peers <block_start>store=PeerStore()<line_sep>store.add_protocols("peer1" [])<line_sep>store.put("peer2" "key" "val")<line_sep>store.add_addr("peer3" "/foo" 10)<assert_stmt>set(store.peer_ids())<eq>set(["peer1" "peer2" "peer3"])<block_end> |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
<import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<line_sep>__all__=['ServiceDnsConfigArgs' 'ServiceDnsConfigDnsRecordArgs' 'ServiceHealthCheckConfigArgs' 'ServiceHealthCheckCustomConfigArgs' ]<line_sep>@pulumi.input_type<class_stmt>ServiceDnsConfigArgs<block_start><def_stmt>__init__ __self__ * dns_records:pulumi.Input[Sequence[pulumi.Input['ServiceDnsConfigDnsRecordArgs']]] namespace_id:pulumi.Input[str] routing_policy:Optional[pulumi.Input[str]]=<none><block_start>"""
:param pulumi.Input[Sequence[pulumi.Input['ServiceDnsConfigDnsRecordArgs']]] dns_records: An array that contains one DnsRecord object for each resource record set.
:param pulumi.Input[str] namespace_id: The ID of the namespace to use for DNS configuration.
:param pulumi.Input[str] routing_policy: The routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED
"""<line_sep>pulumi.set(__self__ "dns_records" dns_records)<line_sep>pulumi.set(__self__ "namespace_id" namespace_id)<if_stmt>routing_policy<is><not><none><block_start>pulumi.set(__self__ "routing_policy" routing_policy)<block_end><block_end>@property@pulumi.getter(name="dnsRecords")<def_stmt>dns_records self<arrow>pulumi.Input[Sequence[pulumi.Input['ServiceDnsConfigDnsRecordArgs']]]<block_start>"""
An array that contains one DnsRecord object for each resource record set.
"""<line_sep><return>pulumi.get(self "dns_records")<block_end>@dns_records.setter<def_stmt>dns_records self value:pulumi.Input[Sequence[pulumi.Input['ServiceDnsConfigDnsRecordArgs']]]<block_start>pulumi.set(self "dns_records" value)<block_end>@property@pulumi.getter(name="namespaceId")<def_stmt>namespace_id self<arrow>pulumi.Input[str]<block_start>"""
The ID of the namespace to use for DNS configuration.
"""<line_sep><return>pulumi.get(self "namespace_id")<block_end>@namespace_id.setter<def_stmt>namespace_id self value:pulumi.Input[str]<block_start>pulumi.set(self "namespace_id" value)<block_end>@property@pulumi.getter(name="routingPolicy")<def_stmt>routing_policy self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED
"""<line_sep><return>pulumi.get(self "routing_policy")<block_end>@routing_policy.setter<def_stmt>routing_policy self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "routing_policy" value)<block_end><block_end>@pulumi.input_type<class_stmt>ServiceDnsConfigDnsRecordArgs<block_start><def_stmt>__init__ __self__ * ttl:pulumi.Input[int] type:pulumi.Input[str]<block_start>"""
:param pulumi.Input[int] ttl: The amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set.
:param pulumi.Input[str] type: The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
"""<line_sep>pulumi.set(__self__ "ttl" ttl)<line_sep>pulumi.set(__self__ "type" type)<block_end>@property@pulumi.getter<def_stmt>ttl self<arrow>pulumi.Input[int]<block_start>"""
The amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set.
"""<line_sep><return>pulumi.get(self "ttl")<block_end>@ttl.setter<def_stmt>ttl self value:pulumi.Input[int]<block_start>pulumi.set(self "ttl" value)<block_end>@property@pulumi.getter<def_stmt>type self<arrow>pulumi.Input[str]<block_start>"""
The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
"""<line_sep><return>pulumi.get(self "type")<block_end>@type.setter<def_stmt>type self value:pulumi.Input[str]<block_start>pulumi.set(self "type" value)<block_end><block_end>@pulumi.input_type<class_stmt>ServiceHealthCheckConfigArgs<block_start><def_stmt>__init__ __self__ * failure_threshold:Optional[pulumi.Input[int]]=<none> resource_path:Optional[pulumi.Input[str]]=<none> type:Optional[pulumi.Input[str]]=<none><block_start>"""
:param pulumi.Input[int] failure_threshold: The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
:param pulumi.Input[str] resource_path: The path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /.
:param pulumi.Input[str] type: The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
"""<if_stmt>failure_threshold<is><not><none><block_start>pulumi.set(__self__ "failure_threshold" failure_threshold)<block_end><if_stmt>resource_path<is><not><none><block_start>pulumi.set(__self__ "resource_path" resource_path)<block_end><if_stmt>type<is><not><none><block_start>pulumi.set(__self__ "type" type)<block_end><block_end>@property@pulumi.getter(name="failureThreshold")<def_stmt>failure_threshold self<arrow>Optional[pulumi.Input[int]]<block_start>"""
The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
"""<line_sep><return>pulumi.get(self "failure_threshold")<block_end>@failure_threshold.setter<def_stmt>failure_threshold self value:Optional[pulumi.Input[int]]<block_start>pulumi.set(self "failure_threshold" value)<block_end>@property@pulumi.getter(name="resourcePath")<def_stmt>resource_path self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /.
"""<line_sep><return>pulumi.get(self "resource_path")<block_end>@resource_path.setter<def_stmt>resource_path self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "resource_path" value)<block_end>@property@pulumi.getter<def_stmt>type self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
"""<line_sep><return>pulumi.get(self "type")<block_end>@type.setter<def_stmt>type self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "type" value)<block_end><block_end>@pulumi.input_type<class_stmt>ServiceHealthCheckCustomConfigArgs<block_start><def_stmt>__init__ __self__ * failure_threshold:Optional[pulumi.Input[int]]=<none><block_start>"""
:param pulumi.Input[int] failure_threshold: The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
"""<if_stmt>failure_threshold<is><not><none><block_start>pulumi.set(__self__ "failure_threshold" failure_threshold)<block_end><block_end>@property@pulumi.getter(name="failureThreshold")<def_stmt>failure_threshold self<arrow>Optional[pulumi.Input[int]]<block_start>"""
The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
"""<line_sep><return>pulumi.get(self "failure_threshold")<block_end>@failure_threshold.setter<def_stmt>failure_threshold self value:Optional[pulumi.Input[int]]<block_start>pulumi.set(self "failure_threshold" value)<block_end><block_end> |
"""Image utils."""<import_from_stmt>PIL Image<def_stmt>pil_loader path:str<arrow>Image<block_start>"""Load image from pathes.
Args:
path: Image path.
Returns:
Loaded PIL Image in rgb.
"""<with_stmt>open(path "rb")<as>f<block_start>img=Image.open(f)<line_sep><return>img.convert("RGB")<block_end><block_end> |
<import_from_stmt>matplotlib pyplot<as>plt<line_sep>plt.xkcd()<line_sep>ages_x=[18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55]<line_sep>py_dev_y=[20046 17100 20000 24744 30500 37732 41247 45372 48876 53850 57287 63016 65998 70003 70000 71496 75370 83640 84666 84392 78254 85000 87038 91991 100000 94796 97962 93302 99240 102736 112285 100771 104708 108423 101407 112542 122870 120000]<line_sep>plt.plot(ages_x py_dev_y label='Python')<line_sep>js_dev_y=[16446 16791 18942 21780 25704 29000 34372 37810 43515 46823 49293 53437 56373 62375 66674 68745 68746 74583 79000 78508 79996 80403 83820 88833 91660 87892 96243 90000 99313 91660 102264 100000 100000 91660 99240 108000 105000 104000]<line_sep>plt.plot(ages_x js_dev_y label='JavaScript')<line_sep>dev_y=[17784 16500 18012 20628 25206 30252 34368 38496 42000 46752 49320 53200 56000 62316 64928 67317 68748 73752 77232 78000 78508 79536 82488 88935 90000 90056 95000 90000 91633 91660 98150 98964 100000 98988 100000 108923 105000 103117]<line_sep>plt.plot(ages_x dev_y color='#444444' linestyle='--' label='All Devs')<line_sep>plt.xlabel('Ages')<line_sep>plt.ylabel('Median Salary (USD)')<line_sep>plt.title('Median Salary (USD) by Age')<line_sep>plt.legend()<line_sep>plt.tight_layout()<line_sep>plt.savefig('plot.png')<line_sep>plt.show()<line_sep> |
<import_stmt>os<import_from_stmt>string punctuation<import_from_stmt>.util is_module_available get_module_or_attr<line_sep>""" default paths """<line_sep>DEFAULT_DATA_PATH=os.path.join(os.path.split(__file__)[0] "../data")<line_sep>print(f"data folder is set to `{DEFAULT_DATA_PATH}` script")<if_stmt><not>os.path.exists(DEFAULT_DATA_PATH)<block_start>os.makedirs(DEFAULT_DATA_PATH)<block_end>DEFAULT_TRAINTEST_DATA_PATH=os.path.join(DEFAULT_DATA_PATH "traintest")<line_sep>ALLENNLP_ELMO_PRETRAINED_FOLDER=os.path.join(DEFAULT_DATA_PATH "allennlp_elmo_pretrained")<line_sep>""" special tokenizers """<line_sep>_SPACY_TOKENIZER,_SPACY_TAGGER=<none> <none><def_stmt>_load_spacy_tokenizer <block_start><global>_SPACY_TOKENIZER _SPACY_TAGGER<if_stmt><not>_SPACY_TOKENIZER<block_start><if_stmt>is_module_available("spacy")<block_start><if_stmt><not>is_module_available("en_core_web_sm")<block_start><raise>ImportError("run `python -m spacy download en_core_web_sm`")<block_end>print("creating spacy models ...")<line_sep>spacy_nlp=get_module_or_attr("en_core_web_sm").load(disable=["tagger" "ner" "lemmatizer"])<line_sep>_SPACY_TOKENIZER=<lambda>inp:[token.text<for>token spacy_nlp(inp)]<line_sep># spacy_nlp = get_module_or_attr("en_core_web_sm").load(disable=["ner", "lemmatizer"])
# _SPACY_TAGGER = lambda inp: [token.tag for token in spacy_nlp(inp)]
print("spacy models initialized")<block_end><else_stmt><block_start><raise>ImportError("`pip install spacy` to use spacy retokenizer")<block_end><block_end><return>_SPACY_TOKENIZER<block_end><def_stmt>_custom_tokenizer inp:str<block_start><try_stmt><block_start>_spacy_tokenizer=_load_spacy_tokenizer()<line_sep>get_tokens=<lambda>inp:_spacy_tokenizer(inp)<block_end><except_stmt>ImportError<as>e<block_start>print(e)<line_sep>get_tokens=<lambda>inp:inp.split()<block_end><def_stmt>_is_punct inp<block_start><return>all([i<in>punctuation<for>i inp])<block_end>tokens=get_tokens(inp)<line_sep>new_tokens=[]<line_sep>str_=""<for_stmt>token tokens<block_start><if_stmt>_is_punct(token)<block_start>str_<augadd>token<block_end><else_stmt><block_start>new_tokens.append(str_)<line_sep>str_=""<line_sep>new_tokens.append(token)<block_end><block_end><if_stmt>str_<block_start>new_tokens.append(str_)<block_end><return>" ".join(new_tokens)<block_end>spacy_tokenizer=_custom_tokenizer<line_sep> |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
<import_from_stmt>.instance_agent_command_execution_output_content InstanceAgentCommandExecutionOutputContent<import_from_stmt>oci.util formatted_flat_dict NONE_SENTINEL value_allowed_none_or_none_sentinel# noqa: F401
<import_from_stmt>oci.decorators init_model_state_from_kwargs<line_sep>@init_model_state_from_kwargs<class_stmt>InstanceAgentCommandExecutionOutputViaTextDetails(InstanceAgentCommandExecutionOutputContent)<block_start>"""
The execution output from a command when returned in plain text.
"""<def_stmt>__init__ self **kwargs<block_start>"""
Initializes a new InstanceAgentCommandExecutionOutputViaTextDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.compute_instance_agent.models.InstanceAgentCommandExecutionOutputViaTextDetails.output_type` attribute
of this class is ``TEXT`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param output_type:
The value to assign to the output_type property of this InstanceAgentCommandExecutionOutputViaTextDetails.
Allowed values for this property are: "TEXT", "OBJECT_STORAGE_URI", "OBJECT_STORAGE_TUPLE"
:type output_type: str
:param exit_code:
The value to assign to the exit_code property of this InstanceAgentCommandExecutionOutputViaTextDetails.
:type exit_code: int
:param message:
The value to assign to the message property of this InstanceAgentCommandExecutionOutputViaTextDetails.
:type message: str
:param text:
The value to assign to the text property of this InstanceAgentCommandExecutionOutputViaTextDetails.
:type text: str
:param text_sha256:
The value to assign to the text_sha256 property of this InstanceAgentCommandExecutionOutputViaTextDetails.
:type text_sha256: str
"""<line_sep>self.swagger_types={'output_type':'str' 'exit_code':'int' 'message':'str' 'text':'str' 'text_sha256':'str'}<line_sep>self.attribute_map={'output_type':'outputType' 'exit_code':'exitCode' 'message':'message' 'text':'text' 'text_sha256':'textSha256'}<line_sep>self._output_type=<none><line_sep>self._exit_code=<none><line_sep>self._message=<none><line_sep>self._text=<none><line_sep>self._text_sha256=<none><line_sep>self._output_type='TEXT'<block_end>@property<def_stmt>text self<block_start>"""
Gets the text of this InstanceAgentCommandExecutionOutputViaTextDetails.
The command output.
:return: The text of this InstanceAgentCommandExecutionOutputViaTextDetails.
:rtype: str
"""<line_sep><return>self._text<block_end>@text.setter<def_stmt>text self text<block_start>"""
Sets the text of this InstanceAgentCommandExecutionOutputViaTextDetails.
The command output.
:param text: The text of this InstanceAgentCommandExecutionOutputViaTextDetails.
:type: str
"""<line_sep>self._text=text<block_end>@property<def_stmt>text_sha256 self<block_start>"""
Gets the text_sha256 of this InstanceAgentCommandExecutionOutputViaTextDetails.
SHA-256 checksum value of the text content.
:return: The text_sha256 of this InstanceAgentCommandExecutionOutputViaTextDetails.
:rtype: str
"""<line_sep><return>self._text_sha256<block_end>@text_sha256.setter<def_stmt>text_sha256 self text_sha256<block_start>"""
Sets the text_sha256 of this InstanceAgentCommandExecutionOutputViaTextDetails.
SHA-256 checksum value of the text content.
:param text_sha256: The text_sha256 of this InstanceAgentCommandExecutionOutputViaTextDetails.
:type: str
"""<line_sep>self._text_sha256=text_sha256<block_end><def_stmt>__repr__ self<block_start><return>formatted_flat_dict(self)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other<is><none><block_start><return><false><block_end><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end> |
<import_from_stmt>trading_ig IGService<import_from_stmt>trading_ig.config config<import_stmt>logging<line_sep>logger=logging.getLogger(__name__)<line_sep>logger.setLevel(logging.INFO)<line_sep># if you need to cache to DB your requests
<import_from_stmt>datetime timedelta<import_stmt>requests_cache<import_from_stmt>predefined_functions.initialisation Initialisation<class_stmt>Order_Management()<block_start><def_stmt>__init__ self<block_start>logging.basicConfig(level=logging.INFO)<line_sep>self.log=logging.getLogger(__name__)<line_sep># set object and then set connection
self.initial=Initialisation()<line_sep>self.initialise_connection()<block_end><def_stmt>initialise_connection self<block_start>self.ig_service=self.initial.initialise_connection()<line_sep>self.ig_service.create_session()<block_end># limit orders
<def_stmt>create_working_order self direction epic size price stop_distance limit_distance force_open=<false><block_start>currency_code="GBP"<line_sep>direction=direction<line_sep>epic=epic<line_sep>expiry="DFB"<if_stmt>force_open<eq><true><block_start>guaranteed_stop=<false><block_end><else_stmt><block_start>guaranteed_stop=<true><block_end># entering price
level=price<line_sep># Pound per point size
size=size<line_sep>time_in_force="GOOD_TILL_CANCELLED"<line_sep># LIMIT orders are now STOP
order_type="STOP"<line_sep>limit_distance=limit_distance<line_sep>stop_distance=stop_distance<line_sep># currency_code = "GBP"
# direction = "SELL"
# epic = "CS.D.BITCOIN.TODAY.IP"
# expiry = "DFB"
# guaranteed_stop = False
# # entering price
# level = 13109
# # Pound per point size
# size = 0.50
# time_in_force = "GOOD_TILL_CANCELLED"
# order_type = "LIMIT"
# limit_distance = 4000.0
# stop_distance = 160.0
# """Creates an OTC working order"""
<try_stmt><block_start>response=self.ig_service.create_working_order(currency_code=currency_code direction=direction epic=epic expiry=expiry guaranteed_stop=guaranteed_stop level=level size=size time_in_force=time_in_force order_type=order_type limit_distance=limit_distance stop_distance=stop_distance force_open=force_open)<line_sep><return>response<block_end><except_stmt>Exception<as>e<block_start>self.log.info(str(e)+" error occurred when creating a working order")<block_end><return><none><block_end># market orders
<def_stmt>create_open_position self direction epic size limit_distance stop_distance force_open<block_start>currency_code="GBP"<line_sep>direction=direction<line_sep>epic=epic<line_sep>expiry="DFB"<line_sep># no matter what you are doing force open always has to be True other wise stop losses do not work
force_open=force_open<if_stmt>force_open<block_start>guaranteed_stop=<false><block_end><else_stmt><block_start>guaranteed_stop=<true><block_end>stop_distance=stop_distance<line_sep>size=size<line_sep>trailing_stop=<false><line_sep>trailing_stop_increment=<none><line_sep>trailing_stop_distance=<none><line_sep>time_in_force="FILL_OR_KILL"<line_sep>order_type="MARKET"<line_sep>limit_distance=limit_distance<try_stmt><block_start>response=self.ig_service.create_open_position(currency_code=currency_code direction=direction epic=epic expiry=expiry # no matter what you are doing force open always has to be True other wise stop losses do not work
force_open=<true> guaranteed_stop=guaranteed_stop stop_distance=stop_distance size=size trailing_stop=trailing_stop trailing_stop_increment=trailing_stop_increment # trailing_stop_distance = trailing_stop_distance,
# time_in_force=time_in_force,
order_type=order_type limit_distance=limit_distance)<line_sep><return>response<block_end><except_stmt>Exception<as>e<block_start>self.log.info(str(e)+" error occurred when opening a position")<block_end><return><none><block_end># market orders to close positions
<def_stmt>close_open_position self position size# set randomly
<block_start><try_stmt><block_start>direction="BUY"<line_sep>position_direction=position["direction"]<if_stmt>position_direction<eq>"BUY"<block_start>direction="SELL"<block_end>deal_id=position["dealId"]<line_sep>order_type="MARKET"<line_sep>size=size<line_sep>response=self.ig_service.close_open_position(deal_id=deal_id direction=direction order_type=order_type size=size)<line_sep><return>response<block_end><except_stmt>Exception<as>e<block_start>self.log.info(str(e)+" error occurred when closing position")<block_end><return><none><block_end><def_stmt>delete_working_order self deal_id<block_start><try_stmt><block_start>deal_id=deal_id<line_sep>response=self.ig_service.delete_working_order(deal_id)<line_sep><return>response<block_end><except_stmt>Exception<as>e<block_start>self.log.info(str(e)+" error occurred when deleting working order")<block_end><return><none><block_end><def_stmt>update_position self limit_level stop_level deal_id guaranteed_stop<block_start>limit_level=limit_level<line_sep>guaranteed_stop=guaranteed_stop<line_sep>stop_level=stop_level<line_sep>deal_id=deal_id<line_sep>trailing_stop=<false><line_sep>trailing_stop_distance=<none><line_sep>trailing_stop_increment=<none><try_stmt><block_start>response=self.ig_service.update_open_position(limit_level=limit_level stop_level=stop_level # guaranteed_stop=guaranteed_stop,
deal_id=deal_id # trailing_stop=trailing_stop,
# trailing_stop_distance=trailing_stop_distance,
# trailing_stop_increment=trailing_stop_increment
)<line_sep><return>response<block_end><except_stmt>Exception<as>e<block_start>self.log.info(str(e)+" error occurred when updating position or maybe the order is no longer open")<block_end><return><none><block_end><def_stmt>get_open_positions self<block_start><while_stmt>(<true>)<block_start><try_stmt><block_start><return>self.ig_service.fetch_open_positions()<block_end><except_stmt>Exception<as>e<block_start>self.log.info(str(e)+" error occurred when getting open positions")<line_sep># resets the connection
self.initialise_connection()<block_end><block_end><block_end><def_stmt>get_working_orders self<block_start><while_stmt>(<true>)<block_start><try_stmt><block_start><return>self.ig_service.fetch_working_orders()<block_end><except_stmt>Exception<as>e<block_start>self.log.info(str(e)+" error occurred when getting working orders")<line_sep>self.initialise_connection()<block_end><block_end><block_end><block_end> |
<import_stmt>time<import_stmt>copy<import_stmt>logging<import_from_stmt>milvus_benchmark parser<import_from_stmt>milvus_benchmark.runners utils<import_from_stmt>milvus_benchmark.runners.base BaseRunner<line_sep>logger=logging.getLogger("milvus_benchmark.runners.get")<def_stmt>get_ids length size<block_start>ids_list=[]<line_sep>step=size<floordiv>length<for_stmt>i range(length)<block_start>ids_list.append(step<times>i)<block_end><return>ids_list<block_end><class_stmt>GetRunner(BaseRunner)<block_start>"""run get"""<line_sep>name="get_performance"<def_stmt>__init__ self env metric<block_start>super(GetRunner self).__init__(env metric)<block_end><def_stmt>extract_cases self collection<block_start>collection_name=collection["collection_name"]<if>"collection_name"<in>collection<else><none><line_sep>(data_type collection_size dimension metric_type)=parser.collection_parser(collection_name)<line_sep>ni_per=collection["ni_per"]<line_sep>vector_type=utils.get_vector_type(data_type)<line_sep>other_fields=collection["other_fields"]<if>"other_fields"<in>collection<else><none><line_sep>ids_length_list=collection["ids_length_list"]<line_sep>collection_info={"dimension":dimension "metric_type":metric_type "dataset_name":collection_name "collection_size":collection_size "other_fields":other_fields "ni_per":ni_per}<line_sep>index_field_name=utils.get_default_field_name(vector_type)<line_sep>index_type=collection["index_type"]<line_sep>index_param=collection["index_param"]<line_sep>index_info={"index_type":index_type "index_param":index_param}<line_sep>flush=<true><if_stmt>"flush"<in>collection<and>collection["flush"]<eq>"no"<block_start>flush=<false><block_end>self.init_metric(self.name collection_info index_info search_info=<none>)<line_sep>case_metrics=list()<for_stmt>ids_length ids_length_list<block_start>ids=get_ids(ids_length collection_size)<line_sep>case_metric=copy.deepcopy(self.metric)<line_sep>case_metric.set_case_metric_type()<line_sep>case_params=list()<line_sep>case_metric.run_params={"ids_length":ids_length}<line_sep>case_metrics.append(case_metric)<line_sep>case_param={"collection_name":collection_name "data_type":data_type "dimension":dimension "collection_size":collection_size "ni_per":ni_per "metric_type":metric_type "vector_type":vector_type "other_fields":other_fields "flush_after_insert":flush "index_field_name":index_field_name "index_type":index_type "index_param":index_param "ids":ids}<line_sep>case_params.append(case_param)<block_end><return>case_params case_metrics<block_end><def_stmt>prepare self **case_param<block_start>collection_name=case_param["collection_name"]<line_sep>self.milvus.set_collection(collection_name)<if_stmt><not>self.milvus.exists_collection()<block_start>logger.info("collection not exist")<block_end>logger.debug({"collection count":self.milvus.count()})<block_end><def_stmt>run_case self case_metric **case_param<block_start>ids=case_param["ids"]<line_sep>start_time=time.time()<line_sep>self.milvus.get(ids)<line_sep>get_time=round(time.time()-start_time 2)<line_sep>tmp_result={"get_time":get_time}<line_sep><return>tmp_result<block_end><block_end><class_stmt>InsertGetRunner(GetRunner)<block_start>"""run insert and get"""<line_sep>name="insert_get_performance"<def_stmt>__init__ self env metric<block_start>super(InsertGetRunner self).__init__(env metric)<block_end><def_stmt>prepare self **case_param<block_start>collection_name=case_param["collection_name"]<line_sep>dimension=case_param["dimension"]<line_sep>vector_type=case_param["vector_type"]<line_sep>other_fields=case_param["other_fields"]<line_sep>self.milvus.set_collection(collection_name)<if_stmt>self.milvus.exists_collection()<block_start>logger.debug("Start drop collection")<line_sep>self.milvus.drop()<line_sep>time.sleep(utils.DELETE_INTERVAL_TIME)<block_end>self.milvus.create_collection(dimension data_type=vector_type other_fields=other_fields)<line_sep>self.insert(self.milvus collection_name case_param["data_type"] dimension case_param["collection_size"] case_param["ni_per"])<line_sep>start_time=time.time()<line_sep>self.milvus.flush()<line_sep>flush_time=round(time.time()-start_time 2)<line_sep>logger.debug({"collection count":self.milvus.count()})<line_sep>logger.debug({"flush_time":flush_time})<line_sep>logger.debug("Start load collection")<line_sep>self.milvus.load_collection(timeout=1200)<line_sep>logger.debug("Load collection end")<block_end><block_end> |
""" A config only for reproducing the ScanNet evaluation results.
We remove border matches by default, but the originally implemented
`remove_border()` has a bug, leading to only two sides of
all borders are actually removed. However, the [bug fix](https://github.com/zju3dv/LoFTR/commit/e9146c8144dea5f3cbdd98b225f3e147a171c216)
makes the scannet evaluation results worse (auc@10=40.8 => 39.5), which should be
caused by tiny result fluctuation of few image pairs. This config set `BORDER_RM` to 0
to be consistent with the results in our paper.
Update: This config is for testing the re-trained model with the pos-enc bug fixed.
"""<import_from_stmt>src.config.default _CN<as>cfg<line_sep>cfg.LOFTR.COARSE.TEMP_BUG_FIX=<true><line_sep>cfg.LOFTR.MATCH_COARSE.MATCH_TYPE='dual_softmax'<line_sep>cfg.LOFTR.MATCH_COARSE.BORDER_RM=0<line_sep> |
<import_from_stmt>textbox.data.dataloader.abstract_dataloader AbstractDataLoader<import_from_stmt>textbox.data.dataloader.single_sent_dataloader SingleSentenceDataLoader<import_from_stmt>textbox.data.dataloader.paired_sent_dataloader PairedSentenceDataLoader<import_from_stmt>textbox.data.dataloader.attr_sent_dataloader AttributedSentenceDataLoader<import_from_stmt>textbox.data.dataloader.kg_sent_dataloader KGSentenceDataLoader<import_from_stmt>textbox.data.dataloader.wikibio_sent_dataloader WikiBioSentenceDataLoader<import_from_stmt>textbox.data.dataloader.rotowire_sent_dataloader RotoWireSentenceDataLoader<line_sep> |
<import_from_stmt>.assetmapper AssetMapper<import_from_stmt>.assetfactory AssetFactory<import_from_stmt>.sqlserver SqlServerTableMapper<line_sep> |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
<import_from_stmt>dataclasses dataclass<import_from_stmt>operator floordiv<import_from_stmt>pathlib Path<import_from_stmt>re search<import_from_stmt>textwrap indent<import_from_stmt>typing Iterable List Mapping Optional Sequence Tuple<import_from_stmt>.bench_file change_path_machine get_this_machine Machine MACHINE_DOC parse_machines_arg <import_from_stmt>.get_built get_built Built<import_from_stmt>.collection_util empty_mapping is_empty<import_from_stmt>.command Command CommandKind CommandsMapping<import_from_stmt>.config HOST_INFO_PATH<import_from_stmt>.option map_option map_option_2<import_from_stmt>.parse_and_serialize load_yaml parse_yaml to_yaml write_yaml_file<import_from_stmt>.type_utils argument check_cast with_slots<import_from_stmt>.util ensure_dir ExecArgs exec_and_get_output gb_to_kb get_hostname get_os kb_to_bytes kb_to_mb mb_to_kb mhz_to_ghz OS remove_str_end try_remove_str_start <line_sep>@with_slots@dataclass(frozen=<true>)<class_stmt>CacheInfoForLevel# Values are None if unknown
<block_start>n_caches:Optional[int]=<none><line_sep>total_bytes:Optional[int]=<none><line_sep>@property<def_stmt>average_bytes self<arrow>Optional[int]<block_start><return>map_option_2(self.total_bytes self.n_caches floordiv)<block_end><block_end>@with_slots@dataclass(frozen=<true>)<class_stmt>CacheInfo<block_start>l1:CacheInfoForLevel<line_sep>l2:CacheInfoForLevel<line_sep>l3:CacheInfoForLevel<block_end>@with_slots@dataclass(frozen=<true>)<class_stmt>Range# Both inclusive
<block_start>lo:int<line_sep>hi:int<def_stmt>with_hi self new_hi:int<arrow>"Range"<block_start><return>Range(self.lo new_hi)<block_end><block_end>@with_slots@dataclass(frozen=<true>)<class_stmt>NumaNodeInfo<block_start>numa_node_number:int<line_sep>ranges:Sequence[Range]<line_sep># None on non-Windows
cpu_group_number:Optional[int]=<none><block_end>@with_slots@dataclass(frozen=<true>)<class_stmt>HostInfo# All values are None if unknown
<block_start>hostname:str<line_sep>n_physical_processors:int<line_sep>n_logical_processors:int<line_sep>numa_nodes:Sequence[NumaNodeInfo]<line_sep>cache_info:CacheInfo<line_sep>clock_ghz:Optional[float]=<none><line_sep>total_physical_memory_mb:Optional[int]=<none><block_end>@with_slots@dataclass(frozen=<true>)<class_stmt>_NumaNodesAndCacheInfo<block_start>numa_nodes:Sequence[NumaNodeInfo]<line_sep>n_physical_processors:int<line_sep>n_logical_processors:int<line_sep>caches:CacheInfo<block_end><def_stmt>_get_total_physical_memory_mb_windows <arrow>int<block_start>output=exec_and_get_output(ExecArgs(("systeminfo" ) quiet_print=<true>))<for_stmt>line output.splitlines()<block_start>tot=try_remove_str_start(line "Total Physical Memory:")<if_stmt>tot<is><not><none><block_start>mem=remove_str_end(tot "MB")<line_sep><return>int(mem.replace("," ""))<block_end><block_end><raise>Exception("Didn't find total physical memory")<block_end><def_stmt>_get_host_info built:Built<arrow>HostInfo<block_start><return>{OS.posix:<lambda>_:_get_host_info_posix() OS.windows:_get_host_info_windows}[get_os()](built)<block_end>_UNKNOWN_MSG:str="unknown"<def_stmt>_get_host_info_posix <arrow>HostInfo# lscpu output is a bunch of lines all of the form key: value. Make a dict from that.
<block_start>dct=_parse_keys_values_lines(exec_and_get_output(ExecArgs(("lscpu" ) quiet_print=<true>)))<def_stmt>get_opt name:str<arrow>Optional[str]<block_start><return>dct.get(name <none>)<block_end><def_stmt>get_int name:str<arrow>int<block_start><return>int(dct[name])<block_end><def_stmt>get_opt_float name:str<arrow>Optional[float]<block_start><return>map_option(get_opt(name) float)<block_end><def_stmt>get_opt_kb name:str<arrow>Optional[int]<block_start>opt=get_opt(name)<if_stmt>opt<is><not><none><and>_UNKNOWN_MSG<in>opt.lower()<block_start><return><none><block_end># Retrieve the size of the given memory option string, and return it
# in KB. Nowadays, it is not uncommon for machines to have MB, or in
# more powerful cases, GB of cache, so we need to do the conversion
# as necessary since the infra expects to receive KB.
#
# First we extract the size units name to determine how to convert them
# if necessary. Since we don't know about every single machine's rules
# for capitalization, we'll convert to lowercase just to be safe.
<assert_stmt>opt<is><not><none><line_sep>size_units=opt.rsplit(" " 1)[-1].lower()<line_sep>converted_kb=0.0<if_stmt>size_units<in>["k" "kib"]<block_start>converted_kb=int(remove_str_end(opt.lower() size_units))<line_sep><return>map_option(converted_kb <lambda>n:n)<block_end><elif_stmt>size_units<in>["m" "mib"]<block_start>converted_kb=mb_to_kb(float(remove_str_end(opt.lower() size_units)))<line_sep><return>map_option(converted_kb <lambda>n:int(n))# pylint: disable=W0108
<block_end><elif_stmt>size_units<in>["g" "gib"]<block_start>converted_kb=gb_to_kb(float(remove_str_end(opt.lower() size_units)))<line_sep><return>map_option(converted_kb <lambda>n:int(n))# pylint: disable=W0108
<block_end><else_stmt><block_start><raise>Exception(f"Unrecognized size units '{size_units}'")<block_end><block_end># Note: "CPU MHz" is the *current* cpu rate which varies. Going with max here.
# TODO: Max is probably wrong, we want a typical value.
clock_ghz=map_option(get_opt_float("CPU max MHz") mhz_to_ghz)<line_sep>sockets=get_int("Socket(s)")<line_sep>cores=get_int("Core(s) per socket")<line_sep>threads=get_int("Thread(s) per core")<line_sep>n_physical_processors=sockets<times>cores<line_sep>n_logical_processors=n_physical_processors<times>threads<line_sep>l1d_cache_kb=get_opt_kb("L1d cache")<line_sep>l1i_cache_kb=get_opt_kb("L1i cache")<line_sep>l2_cache_kb=get_opt_kb("L2 cache")<line_sep>l3_cache_kb=get_opt_kb("L3 cache")<line_sep>x=_parse_keys_values_lines((Path("/proc")/"meminfo").read_text())<line_sep>total_physical_memory_mb=round(kb_to_mb(float(remove_str_end(x["MemTotal"] " kB"))))<line_sep>numa_nodes=_get_numa_nodes_posix()<line_sep><return>HostInfo(hostname=get_hostname() n_physical_processors=n_physical_processors n_logical_processors=n_logical_processors numa_nodes=numa_nodes cache_info=CacheInfo(# TODO: figure out how to determine number of caches on posix
l1=CacheInfoForLevel(n_caches=<none> total_bytes=map_option_2(l1d_cache_kb l1i_cache_kb <lambda>a b:kb_to_bytes(a+b)) ) l2=CacheInfoForLevel(n_caches=<none> total_bytes=map_option(l2_cache_kb kb_to_bytes)) l3=CacheInfoForLevel(n_caches=<none> total_bytes=map_option(l3_cache_kb kb_to_bytes)) ) clock_ghz=clock_ghz total_physical_memory_mb=total_physical_memory_mb )<block_end><def_stmt>_get_numa_nodes_posix <arrow>Sequence[NumaNodeInfo]<block_start><return>tuple(_parse_numa_nodes_posix(exec_and_get_output(ExecArgs(("numactl" "--hardware") quiet_print=<true>))))<block_end><def_stmt>_parse_numa_nodes_posix s:str<arrow>Iterable[NumaNodeInfo]<block_start><for_stmt>line s.splitlines()<block_start>res=search(r"^node (\d+) cpus: " line)<if_stmt>res<is><not><none><block_start>node_number=int(res.group(1))<line_sep><yield>NumaNodeInfo(numa_node_number=node_number cpu_group_number=<none> ranges=_ranges_from_numbers([int(x)<for>x line[res.span()[1]:].split()]) )<block_end><block_end><block_end><def_stmt>_ranges_from_numbers ns:Iterable[int]<arrow>Sequence[Range]<block_start>ranges:List[Range]=[]<for_stmt>n ns<block_start><if_stmt>is_empty(ranges)<or>n<ne>ranges[-1].hi+1<block_start>ranges.append(Range(n n))<block_end><else_stmt><block_start>ranges.append(ranges.pop().with_hi(n))<block_end><block_end><return>ranges<block_end><def_stmt>_parse_keys_values_lines s:str<arrow>Mapping[str str]<block_start><return>{k:v<for>line s.split("\n")<if>line<ne>""<for>k,v (_split_line(line) )}<block_end><def_stmt>_split_line line:str<arrow>Tuple[str str]<block_start>parts=line.split(":")<assert_stmt>len(parts)<eq>2<line_sep>l,r=parts<line_sep><return>l.strip() r.strip()<block_end><def_stmt>_get_host_info_windows built:Built<arrow>HostInfo<block_start>total_physical_memory_mb=_get_total_physical_memory_mb_windows()<line_sep>info_from_c=parse_yaml(_NumaNodesAndCacheInfo exec_and_get_output(ExecArgs((str(built.win.get_host_info_exe) ) quiet_print=<true>)) )<line_sep><return>HostInfo(hostname=get_hostname() clock_ghz=_get_clock_ghz_windows() total_physical_memory_mb=total_physical_memory_mb n_physical_processors=info_from_c.n_physical_processors n_logical_processors=info_from_c.n_logical_processors numa_nodes=info_from_c.numa_nodes cache_info=info_from_c.caches )<block_end><def_stmt>_get_clock_ghz_windows <arrow>float# Import lazily as this is only available on Windows
# pylint:disable=import-outside-toplevel
<block_start><import_from_stmt>winreg ConnectRegistry HKEY_LOCAL_MACHINE OpenKey QueryValueEx<line_sep>registry=ConnectRegistry(<none> HKEY_LOCAL_MACHINE)<line_sep>key=OpenKey(registry "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0")<line_sep>mhz,_=QueryValueEx(key "~MHz")<line_sep>ghz=mhz_to_ghz(check_cast(float mhz))<assert_stmt>0<l>ghz<l>10<line_sep><return>ghz<block_end><def_stmt>read_host_info_for_machine machine:Machine<arrow>HostInfo<block_start><return>_read_host_info_at_path(change_path_machine(HOST_INFO_PATH machine))<block_end><def_stmt>_read_host_info_at_path path:Path<arrow>HostInfo<block_start><try_stmt><block_start><return>load_yaml(HostInfo path)<block_end><except_stmt>FileNotFoundError<block_start><raise>Exception(f"Host info not found at {path}. Did you forget to run 'write-host-info'?")<from><none><block_end><block_end><def_stmt>read_this_machines_host_info <arrow>HostInfo<block_start><return>read_host_info_for_machine(get_this_machine())<block_end><def_stmt>write_host_info <arrow><none><block_start>built=get_built(coreclrs=empty_mapping())<line_sep>ensure_dir(HOST_INFO_PATH.parent)<line_sep>write_yaml_file(HOST_INFO_PATH _get_host_info(built) overwrite=<true>)<line_sep>print(f"Wrote to {HOST_INFO_PATH}")<block_end><def_stmt>print_host_info <arrow><none><block_start>print(to_yaml(read_this_machines_host_info()))<block_end>@with_slots@dataclass(frozen=<true>)<class_stmt>PrintAllHostInfosArgs<block_start>machines:Sequence[str]=argument(doc=MACHINE_DOC)<block_end><def_stmt>print_all_host_infos args:PrintAllHostInfosArgs<arrow><none><block_start><for_stmt>machine parse_machines_arg(args.machines)<block_start>print(machine)<line_sep>hi=read_host_info_for_machine(machine)<line_sep>print(indent(to_yaml(hi) " "))<block_end><block_end>HOST_INFO_COMMANDS:CommandsMapping={"print-host-info":Command(hidden=<true> kind=CommandKind.infra fn=print_host_info doc="""Print info about this machine generated from write-host-info""" ) # Hidden because 'setup' does this already.
# Though it's useful to run again if the code for getting host-info is modified.
"write-host-info":Command(hidden=<true> kind=CommandKind.infra fn=write_host_info doc=f"Write host info to {HOST_INFO_PATH}." ) "print-all-host-infos":Command(kind=CommandKind.infra fn=print_all_host_infos doc="Fetch and print host info for all machines." priority=1 ) }<line_sep> |
#
# This file is part of LiteX (Adapted from Migen for LiteX usage).
#
# This file is Copyright (c) 2013-2014 <NAME> <<EMAIL>>
# This file is Copyright (c) 2013-2021 <NAME> <<EMAIL>>
# This file is Copyright (c) 2013-2017 <NAME> <<EMAIL>>
# This file is Copyright (c) 2016-2018 whitequark <<EMAIL>>
# This file is Copyright (c) 2017 <NAME> <<EMAIL>>
# This file is Copyright (c) 2016 <NAME> <<EMAIL>>
# This file is Copyright (c) 2018 <NAME> <<EMAIL>>
# This file is Copyright (c) 2015 <NAME> <<EMAIL>>
# This file is Copyright (c) 2013 <NAME> <<EMAIL>>
# This file is Copyright (c) 2018 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
<import_from_stmt>functools partial<import_from_stmt>operator itemgetter<import_stmt>collections<import_from_stmt>migen.fhdl.structure *<import_from_stmt>migen.fhdl.structure _Operator _Slice _Assign _Fragment<import_from_stmt>migen.fhdl.tools *<import_from_stmt>migen.fhdl.namer build_namespace<import_from_stmt>migen.fhdl.conv_output ConvOutput<import_from_stmt>migen.fhdl.specials Memory<import_from_stmt>litex.gen.fhdl.memory memory_emit_verilog<import_from_stmt>litex.build.tools generated_banner<line_sep># ------------------------------------------------------------------------------------------------ #
# RESERVED KEYWORDS #
# ------------------------------------------------------------------------------------------------ #
_ieee_1800_2017_verilog_reserved_keywords={"accept_on" "alias" "always" "always_comb" "always_ff" "always_latch" "and" "assert" "assign" "assume" "automatic" "before" "begin" "bind" "bins" "binsof" "bit" "break" "buf" "bufif0" "bufif1" "byte" "case" "casex" "casez" "cell" "chandle" "checker" "class" "clocking" "cmos" "config" "const" "constraint" "context" "continue" "cover" "covergroup" "coverpoint" "cross" "deassign" "default" "defparam" "design" "disable" "dist" "do" "edge" "else" "end" "endcase" "endchecker" "endclass" "endclocking" "endconfig" "endfunction" "endgenerate" "endgroup" "endinterface" "endmodule" "endpackage" "endprimitive" "endprogram" "endproperty" "endsequence" "endspecify" "endtable" "endtask" "enum" "event" "eventually" "expect" "export" "extends" "extern" "final" "first_match" "for" "force" "foreach" "forever" "fork" "forkjoin" "function" "generate" "genvar" "global" "highz0" "highz1" "if" "iff" "ifnone" "ignore_bins" "illegal_bins" "implements" "implies" "import" "incdir" "include" "initial" "inout" "input" "inside" "instance" "int" "integer" "interconnect" "interface" "intersect" "join" "join_any" "join_none" "large" "let" "liblist" "library" "local" "localparam" "logic" "longint" "macromodule" "matches" "medium" "modport" "module" "nand" "negedge" "nettype" "new" "nexttime" "nmos" "nor" "noshowcancelled" "not" "notif0" "notif1" "null" "or" "output" "package" "packed" "parameter" "pmos" "posedge" "primitive" "priority" "program" "property" "protected" "pull0" "pull1" "pulldown" "pullup" "pulsestyle_ondetect" "pulsestyle_onevent" "pure" "rand" "randc" "randcase" "randsequence" "rcmos" "real" "realtime" "ref" "reg" "reject_on" "release" " repeat" "restrict" "return" "rnmos" "rpmos" "rtran" "rtranif0" "rtranif1" "s_always" "s_eventually" "s_nexttime" "s_until" "s_until_with" "scalared" "sequence" "shortint" "shortreal" "showcancelled" "signed" "small" "soft" "solve" "specify" "specparam" "static" "string" "strong" "strong0" "strong1" "struct" "super" "supply0" "supply1" "sync_accept_on" "sync_reject_on" "table" "tagged" "task" "this" "throughout" "time" "timeprecision" "timeunit" "tran" "tranif0" "tranif1" "tri" "tri0" "tri1" "triand" "trior" "trireg" "type" "typedef" " union" "unique" "unique0" "unsigned" "until" "until_with" "untyped" "use" " uwire" "var" "vectored" "virtual" "void" "wait" "wait_order" "wand" "weak" "weak0" "weak1" "while" "wildcard" "wire" "with" "within" "wor" "xnor" "xor" }<line_sep># ------------------------------------------------------------------------------------------------ #
# EXPRESSIONS #
# ------------------------------------------------------------------------------------------------ #
# Print Constant -----------------------------------------------------------------------------------
<def_stmt>_print_constant node<block_start><return>"{sign}{bits}'d{value}".format(sign=""<if>node.value<ge>0<else>"-" bits=str(node.nbits) value=abs(node.value) ) node.signed<block_end># Print Signal -------------------------------------------------------------------------------------
<def_stmt>_print_signal ns s<block_start><return>"{signed}{vector}{name}".format(signed=""<if>(<not>s.signed)<else>"signed " vector=""<if>(len(s)<le>1)<else>f"[{str(len(s)-1)}:0] " name=ns.get_name(s))<block_end># Print Operator -----------------------------------------------------------------------------------
(UNARY BINARY TERNARY)=(1 2 3)<def_stmt>_print_operator ns node<block_start>operator=node.op<line_sep>operands=node.operands<line_sep>arity=len(operands)<assert_stmt>arity<in>[UNARY BINARY TERNARY]<def_stmt>to_signed r<block_start><return>f"$signed({{1'd0, {r}}})"<block_end># Unary Operator.
<if_stmt>arity<eq>UNARY<block_start>r1,s1=_print_expression(ns operands[0])<line_sep># Negation Operator.
<if_stmt>operator<eq>"-"# Negate and convert to signed if not already.
<block_start>r="-"+(r1<if>s1<else>to_signed(r1))<line_sep>s=<true><block_end># Other Operators.
<else_stmt><block_start>r=operator+r1<line_sep>s=s1<block_end><block_end># Binary Operator.
<if_stmt>arity<eq>BINARY<block_start>r1,s1=_print_expression(ns operands[0])<line_sep>r2,s2=_print_expression(ns operands[1])<line_sep># Convert all expressions to signed when at least one is signed.
<if_stmt>operator<not><in>["<<<" ">>>"]<block_start><if_stmt>s2<and><not>s1<block_start>r1=to_signed(r1)<block_end><if_stmt>s1<and><not>s2<block_start>r2=to_signed(r2)<block_end><block_end>r=f"{r1} {operator} {r2}"<line_sep>s=s1<or>s2<block_end># Ternary Operator.
<if_stmt>arity<eq>TERNARY<block_start><assert_stmt>operator<eq>"m"<line_sep>r1,s1=_print_expression(ns operands[0])<line_sep>r2,s2=_print_expression(ns operands[1])<line_sep>r3,s3=_print_expression(ns operands[2])<line_sep># Convert all expressions to signed when at least one is signed.
<if_stmt>s2<and><not>s3<block_start>r3=to_signed(r3)<block_end><if_stmt>s3<and><not>s2<block_start>r2=to_signed(r2)<block_end>r=f"{r1} ? {r2} : {r3}"<line_sep>s=s2<or>s3<block_end><return>f"({r})" s<block_end># Print Slice --------------------------------------------------------------------------------------
<def_stmt>_print_slice ns node<block_start><assert_stmt>(node.stop-node.start)<ge>1<if_stmt>(isinstance(node.value Signal)<and>len(node.value)<eq>1)<block_start><assert_stmt>node.start<eq>0<line_sep>sr=""# Avoid slicing 1-bit Signals.
<block_end><else_stmt><block_start>sr=f"[{node.stop-1}:{node.start}]"<if>(node.stop-node.start)<g>1<else>f"[{node.start}]"<block_end>r,s=_print_expression(ns node.value)<line_sep><return>r+sr s<block_end># Print Cat ----------------------------------------------------------------------------------------
<def_stmt>_print_cat ns node<block_start>l=[_print_expression(ns v)[0]<for>v reversed(node.l)]<line_sep><return>"{"+", ".join(l)+"}" <false><block_end># Print Replicate ----------------------------------------------------------------------------------
<def_stmt>_print_replicate ns node<block_start><return>"{"+str(node.n)+"{"+_print_expression(ns node.v)[0]+"}}" <false><block_end># Print Expression ---------------------------------------------------------------------------------
<def_stmt>_print_expression ns node# Constant.
<block_start><if_stmt>isinstance(node Constant)<block_start><return>_print_constant(node)<block_end># Signal.
<elif_stmt>isinstance(node Signal)<block_start><return>ns.get_name(node) node.signed<block_end># Operator.
<elif_stmt>isinstance(node _Operator)<block_start><return>_print_operator(ns node)<block_end># Slice.
<elif_stmt>isinstance(node _Slice)<block_start><return>_print_slice(ns node)<block_end># Cat.
<elif_stmt>isinstance(node Cat)<block_start><return>_print_cat(ns node)<block_end># Replicate.
<elif_stmt>isinstance(node Replicate)<block_start><return>_print_replicate(ns node)<block_end># Unknown.
<else_stmt><block_start><raise>TypeError(f"Expression of unrecognized type: '{type(node).__name__}'")<block_end><block_end># ------------------------------------------------------------------------------------------------ #
# NODES #
# ------------------------------------------------------------------------------------------------ #
(_AT_BLOCKING _AT_NONBLOCKING _AT_SIGNAL)=range(3)<def_stmt>_print_node ns at level node target_filter=<none><block_start><if_stmt>target_filter<is><not><none><and>target_filter<not><in>list_targets(node)<block_start><return>""<block_end># Assignment.
<elif_stmt>isinstance(node _Assign)<block_start><if_stmt>at<eq>_AT_BLOCKING<block_start>assignment=" = "<block_end><elif_stmt>at<eq>_AT_NONBLOCKING<block_start>assignment=" <= "<block_end><elif_stmt>is_variable(node.l)<block_start>assignment=" = "<block_end><else_stmt><block_start>assignment=" <= "<block_end><return>"\t"<times>level+_print_expression(ns node.l)[0]+assignment+_print_expression(ns node.r)[0]+";\n"<block_end># Iterable.
<elif_stmt>isinstance(node collections.abc.Iterable)<block_start><return>"".join(_print_node(ns at level n target_filter)<for>n node)<block_end># If.
<elif_stmt>isinstance(node If)<block_start>r="\t"<times>level+"if ("+_print_expression(ns node.cond)[0]+") begin\n"<line_sep>r<augadd>_print_node(ns at level+1 node.t target_filter)<if_stmt>node.f<block_start>r<augadd>"\t"<times>level+"end else begin\n"<line_sep>r<augadd>_print_node(ns at level+1 node.f target_filter)<block_end>r<augadd>"\t"<times>level+"end\n"<line_sep><return>r<block_end># Case.
<elif_stmt>isinstance(node Case)<block_start><if_stmt>node.cases<block_start>r="\t"<times>level+"case ("+_print_expression(ns node.test)[0]+")\n"<line_sep>css=[(k v)<for>k,v node.cases.items()<if>isinstance(k Constant)]<line_sep>css=sorted(css key=<lambda>x:x[0].value)<for_stmt>choice,statements css<block_start>r<augadd>"\t"<times>(level+1)+_print_expression(ns choice)[0]+": begin\n"<line_sep>r<augadd>_print_node(ns at level+2 statements target_filter)<line_sep>r<augadd>"\t"<times>(level+1)+"end\n"<block_end><if_stmt>"default"<in>node.cases<block_start>r<augadd>"\t"<times>(level+1)+"default: begin\n"<line_sep>r<augadd>_print_node(ns at level+2 node.cases["default"] target_filter)<line_sep>r<augadd>"\t"<times>(level+1)+"end\n"<block_end>r<augadd>"\t"<times>level+"endcase\n"<line_sep><return>r<block_end><else_stmt><block_start><return>""<block_end><block_end># Display.
<elif_stmt>isinstance(node Display)<block_start>s="\""+node.s+"\""<for_stmt>arg node.args<block_start>s<augadd>", "<if_stmt>isinstance(arg Signal)<block_start>s<augadd>ns.get_name(arg)<block_end><else_stmt><block_start>s<augadd>str(arg)<block_end><block_end><return>"\t"<times>level+"$display("+s+");\n"<block_end># Finish.
<elif_stmt>isinstance(node Finish)<block_start><return>"\t"<times>level+"$finish;\n"<block_end># Unknown.
<else_stmt><block_start><raise>TypeError(f"Node of unrecognized type: {str(type(node))}")<block_end><block_end># ------------------------------------------------------------------------------------------------ #
# ATTRIBUTES #
# ------------------------------------------------------------------------------------------------ #
<def_stmt>_print_attribute attr attr_translate<block_start>r=""<line_sep>firsta=<true><for_stmt>attr sorted(attr key=<lambda>x:("" x)<if>isinstance(x str)<else>x)<block_start><if_stmt>isinstance(attr tuple)# platform-dependent attribute
<block_start>attr_name,attr_value=attr<block_end><else_stmt># translated attribute
<block_start>at=attr_translate.get(attr <none>)<if_stmt>at<is><none><block_start><continue><block_end>attr_name,attr_value=at<block_end><if_stmt><not>firsta<block_start>r<augadd>", "<block_end>firsta=<false><line_sep>const_expr="\""+attr_value+"\""<if><not>isinstance(attr_value int)<else>str(attr_value)<line_sep>r<augadd>attr_name+" = "+const_expr<block_end><if_stmt>r<block_start>r="(* "+r+" *)"<block_end><return>r<block_end># ------------------------------------------------------------------------------------------------ #
# MODULE #
# ------------------------------------------------------------------------------------------------ #
<def_stmt>_list_comb_wires f<block_start>r=set()<line_sep>groups=group_by_targets(f.comb)<for_stmt>g groups<block_start><if_stmt>len(g[1])<eq>1<and>isinstance(g[1][0] _Assign)<block_start>r<augor>g[0]<block_end><block_end><return>r<block_end><def_stmt>_print_module f ios name ns attr_translate<block_start>sigs=list_signals(f)|list_special_ios(f ins=<true> outs=<true> inouts=<true>)<line_sep>special_outs=list_special_ios(f ins=<false> outs=<true> inouts=<true>)<line_sep>inouts=list_special_ios(f ins=<false> outs=<false> inouts=<true>)<line_sep>targets=list_targets(f)|special_outs<line_sep>wires=_list_comb_wires(f)|special_outs<line_sep>r="module "+name+"(\n"<line_sep>firstp=<true><for_stmt>sig sorted(ios key=<lambda>x:x.duid)<block_start><if_stmt><not>firstp<block_start>r<augadd>",\n"<block_end>firstp=<false><line_sep>attr=_print_attribute(sig.attr attr_translate)<if_stmt>attr<block_start>r<augadd>"\t"+attr<block_end>sig.type="wire"<line_sep>sig.name=ns.get_name(sig)<if_stmt>sig<in>inouts<block_start>sig.direction="inout"<line_sep>r<augadd>"\tinout wire "+_print_signal(ns sig)<block_end><elif_stmt>sig<in>targets<block_start>sig.direction="output"<if_stmt>sig<in>wires<block_start>r<augadd>"\toutput wire "+_print_signal(ns sig)<block_end><else_stmt><block_start>sig.type="reg"<line_sep>r<augadd>"\toutput reg "+_print_signal(ns sig)<block_end><block_end><else_stmt><block_start>sig.direction="input"<line_sep>r<augadd>"\tinput wire "+_print_signal(ns sig)<block_end><block_end>r<augadd>"\n);\n\n"<for_stmt>sig sorted(sigs-ios key=<lambda>x:x.duid)<block_start>attr=_print_attribute(sig.attr attr_translate)<if_stmt>attr<block_start>r<augadd>attr+" "<block_end><if_stmt>sig<in>wires<block_start>r<augadd>"wire "+_print_signal(ns sig)+";\n"<block_end><else_stmt><block_start>r<augadd>"reg "+_print_signal(ns sig)+" = "+_print_expression(ns sig.reset)[0]+";\n"<block_end><block_end>r<augadd>"\n"<line_sep><return>r<block_end># ------------------------------------------------------------------------------------------------ #
# COMBINATORIAL LOGIC #
# ------------------------------------------------------------------------------------------------ #
<def_stmt>_print_combinatorial_logic_sim f ns<block_start>r=""<if_stmt>f.comb<block_start><import_from_stmt>collections defaultdict<line_sep>target_stmt_map=defaultdict(list)<for_stmt>statement flat_iteration(f.comb)<block_start>targets=list_targets(statement)<for_stmt>t targets<block_start>target_stmt_map[t].append(statement)<block_end><block_end>groups=group_by_targets(f.comb)<for_stmt>n,(t stmts) enumerate(target_stmt_map.items())<block_start><assert_stmt>isinstance(t Signal)<if_stmt>len(stmts)<eq>1<and>isinstance(stmts[0] _Assign)<block_start>r<augadd>"assign "+_print_node(ns _AT_BLOCKING 0 stmts[0])<block_end><else_stmt><block_start>r<augadd>"always @(*) begin\n"<line_sep>r<augadd>"\t"+ns.get_name(t)+" <= "+_print_expression(ns t.reset)[0]+";\n"<line_sep>r<augadd>_print_node(ns _AT_NONBLOCKING 1 stmts t)<line_sep>r<augadd>"end\n"<block_end><block_end><block_end>r<augadd>"\n"<line_sep><return>r<block_end><def_stmt>_print_combinatorial_logic_synth f ns<block_start>r=""<if_stmt>f.comb<block_start>groups=group_by_targets(f.comb)<for_stmt>n,g enumerate(groups)<block_start><if_stmt>len(g[1])<eq>1<and>isinstance(g[1][0] _Assign)<block_start>r<augadd>"assign "+_print_node(ns _AT_BLOCKING 0 g[1][0])<block_end><else_stmt><block_start>r<augadd>"always @(*) begin\n"<for_stmt>t g[0]<block_start>r<augadd>"\t"+ns.get_name(t)+" <= "+_print_expression(ns t.reset)[0]+";\n"<block_end>r<augadd>_print_node(ns _AT_NONBLOCKING 1 g[1])<line_sep>r<augadd>"end\n"<block_end><block_end><block_end>r<augadd>"\n"<line_sep><return>r<block_end># ------------------------------------------------------------------------------------------------ #
# SYNCHRONOUS LOGIC #
# ------------------------------------------------------------------------------------------------ #
<def_stmt>_print_synchronous_logic f ns<block_start>r=""<for_stmt>k,v sorted(f.sync.items() key=itemgetter(0))<block_start>r<augadd>"always @(posedge "+ns.get_name(f.clock_domains[k].clk)+") begin\n"<line_sep>r<augadd>_print_node(ns _AT_SIGNAL 1 v)<line_sep>r<augadd>"end\n\n"<block_end><return>r<block_end># ------------------------------------------------------------------------------------------------ #
# SPECIALS #
# ------------------------------------------------------------------------------------------------ #
<def_stmt>_print_specials overrides specials ns add_data_file attr_translate<block_start>r=""<for_stmt>special sorted(specials key=<lambda>x:x.duid)<block_start><if_stmt>hasattr(special "attr")<block_start>attr=_print_attribute(special.attr attr_translate)<if_stmt>attr<block_start>r<augadd>attr+" "<block_end><block_end># Replace Migen Memory's emit_verilog with our implementation.
<if_stmt>isinstance(special Memory)<block_start>pr=memory_emit_verilog(special ns add_data_file)<block_end><else_stmt><block_start>pr=call_special_classmethod(overrides special "emit_verilog" ns add_data_file)<block_end><if_stmt>pr<is><none><block_start><raise>NotImplementedError("Special "+str(special)+" failed to implement emit_verilog")<block_end>r<augadd>pr<block_end><return>r<block_end># ------------------------------------------------------------------------------------------------ #
# FHDL --> VERILOG #
# ------------------------------------------------------------------------------------------------ #
<class_stmt>DummyAttrTranslate(dict)<block_start><def_stmt>__getitem__ self k<block_start><return>(k "true")<block_end><block_end><def_stmt>convert f ios=set() name="top" special_overrides=dict() attr_translate=DummyAttrTranslate() regular_comb=<true># Create ConvOutput.
<block_start>r=ConvOutput()<line_sep># Convert to FHDL's fragments is not already done.
<if_stmt><not>isinstance(f _Fragment)<block_start>f=f.get_fragment()<block_end># Verify/Create Clock Domains.
<for_stmt>cd_name sorted(list_clock_domains(f))# Try to get Clock Domain.
<block_start><try_stmt><block_start>f.clock_domains[cd_name]<block_end># If not found, raise Error.
<except_stmt><block_start>msg=f"""Unresolved clock domain {cd_name}, availables:\n"""<for_stmt>f f.clock_domains<block_start>msg<augadd>f"- {f.name}\n"<block_end><raise>Exception(msg)<block_end><block_end># Lower complex slices.
f=lower_complex_slices(f)<line_sep># Insert resets.
insert_resets(f)<line_sep># Lower basics.
f=lower_basics(f)<line_sep># Lower specials.
f,lowered_specials=lower_specials(special_overrides f)<line_sep># Lower basics (for basics included in specials).
f=lower_basics(f)<line_sep># IOs backtrace/naming.
<for_stmt>io sorted(ios key=<lambda>x:x.duid)<block_start><if_stmt>io.name_override<is><none><block_start>io_name=io.backtrace[-1][0]<if_stmt>io_name<block_start>io.name_override=io_name<block_end><block_end><block_end># Build NameSpace.
# ----------------
ns=build_namespace(signals=(list_signals(f)|list_special_ios(f ins=<true> outs=<true> inouts=<true>)|ios) reserved_keywords=_ieee_1800_2017_verilog_reserved_keywords)<line_sep>ns.clock_domains=f.clock_domains<line_sep># Build Verilog.
# --------------
verilog=generated_banner("//")<line_sep># Module Top.
verilog<augadd>_print_module(f ios name ns attr_translate)<line_sep># Combinatorial Logic.
<if_stmt>regular_comb<block_start>verilog<augadd>_print_combinatorial_logic_synth(f ns)<block_end><else_stmt><block_start>verilog<augadd>_print_combinatorial_logic_sim(f ns)<block_end># Synchronous Logic.
verilog<augadd>_print_synchronous_logic(f ns)<line_sep># Specials
verilog<augadd>_print_specials(special_overrides f.specials-lowered_specials ns r.add_data_file attr_translate)<line_sep># Module End.
verilog<augadd>"endmodule\n"<line_sep>r.set_main_source(verilog)<line_sep>r.ns=ns<line_sep><return>r<block_end> |
#
# Copyright (C) 2020 IBM. All Rights Reserved.
#
# See LICENSE.txt file in the root directory
# of this source tree for licensing information.
#
<import_from_stmt>abc ABC abstractmethod<import_from_stmt>concurrent futures<import_from_stmt>functools partial<import_from_stmt>operator is_not<import_from_stmt>typing List Union<import_from_stmt>clai.server.agent Agent<import_from_stmt>clai.server.command_message Action State<line_sep># pylint: disable=too-few-public-methods
<class_stmt>AgentExecutor(ABC)<block_start>@abstractmethod<def_stmt>execute_agents self command:State agents:List[Agent]<arrow>List[Action]<block_start>"""execute all agents in parallel and return the actions"""<block_end><block_end><class_stmt>ThreadExecutor(AgentExecutor)<block_start>MAX_TIME_PLUGIN_EXECUTION=4<line_sep>NUM_WORKERS=4<def_stmt>execute_agents self command:State agents:List[Agent]<arrow>List[Union[Action List[Action]]]<block_start><with_stmt>futures.ThreadPoolExecutor(max_workers=self.NUM_WORKERS)<as>executor<block_start>done,_=futures.wait([executor.submit(plugin_instance.execute command)<for>plugin_instance agents] timeout=self.MAX_TIME_PLUGIN_EXECUTION)<if_stmt><not>done<block_start><return>[]<block_end>results=map(<lambda>future:future.result() done)<line_sep>candidate_actions=list(filter(partial(is_not <none>) results))<block_end><return>candidate_actions<block_end><block_end># pylint: disable= invalid-name
thread_executor=ThreadExecutor()<line_sep> |
<import_stmt>urllib.parse<assert_stmt>urllib.parse.unquote("foo%20bar")<eq>"foo bar"<import_stmt>urllib.request<with_stmt>urllib.request.urlopen('https://httpbin.org/headers')<as>f<block_start>f.read()<block_end># issue 1424
text="""Hello
World"""<assert_stmt>urllib.parse.urlencode({"text":text})<eq>"text=Hello%0AWorld"<line_sep>print('passed all tests')<line_sep> |
<import_stmt>lz4.block<import_stmt>pytest<import_stmt>sys<import_stmt>os<def_stmt>test_decompress_ui32_overflow <block_start>data=lz4.block.compress(b'A'<times>64)<with_stmt>pytest.raises(OverflowError)<block_start>lz4.block.decompress(data[4:] uncompressed_size=((1<lshift>32)+64))<block_end><block_end><def_stmt>test_decompress_without_leak # Verify that hand-crafted packet does not leak uninitialized(?) memory.
<block_start>data=lz4.block.compress(b'A'<times>64)<line_sep>message=r'^Decompressor wrote 64 bytes, but 79 bytes expected from header$'<with_stmt>pytest.raises(lz4.block.LZ4BlockError match=message)<block_start>lz4.block.decompress(b'\x4f'+data[1:])<block_end><block_end><def_stmt>test_decompress_with_small_buffer <block_start>data=lz4.block.compress(b'A'<times>64 store_size=<false>)<line_sep>message=r'^Decompression failed: corrupt input or insufficient space in destination buffer. Error code: \d+$'<with_stmt>pytest.raises(lz4.block.LZ4BlockError match=message)<block_start>lz4.block.decompress(data[4:] uncompressed_size=64)<block_end><with_stmt>pytest.raises(lz4.block.LZ4BlockError match=message)<block_start>lz4.block.decompress(data uncompressed_size=60)<block_end><block_end><def_stmt>test_decompress_truncated <block_start>input_data=b"2099023098234882923049823094823094898239230982349081231290381209380981203981209381238901283098908123109238098123"<times>24<line_sep>compressed=lz4.block.compress(input_data)<line_sep># for i in range(len(compressed)):
# try:
# lz4.block.decompress(compressed[:i])
# except:
# print(i, sys.exc_info()[0], sys.exc_info()[1])
<with_stmt>pytest.raises(ValueError match='Input source data size too small')<block_start>lz4.block.decompress(compressed[:0])<block_end><for_stmt>n [0 1]<block_start><with_stmt>pytest.raises(ValueError match='Input source data size too small')<block_start>lz4.block.decompress(compressed[:n])<block_end><block_end><for_stmt>n [24 25 -2 27 67 85]<block_start><with_stmt>pytest.raises(lz4.block.LZ4BlockError)<block_start>lz4.block.decompress(compressed[:n])<block_end><block_end><block_end><def_stmt>test_decompress_with_trailer <block_start>data=b'A'<times>64<line_sep>comp=lz4.block.compress(data)<line_sep>message=r'^Decompression failed: corrupt input or insufficient space in destination buffer. Error code: \d+$'<with_stmt>pytest.raises(lz4.block.LZ4BlockError match=message)<block_start>lz4.block.decompress(comp+b'A')<block_end><with_stmt>pytest.raises(lz4.block.LZ4BlockError match=message)<block_start>lz4.block.decompress(comp+comp)<block_end><with_stmt>pytest.raises(lz4.block.LZ4BlockError match=message)<block_start>lz4.block.decompress(comp+comp[4:])<block_end><block_end><def_stmt>test_unicode <block_start><if_stmt>sys.version_info<l>(3 )<block_start><return># skip
<block_end>DATA=b'x'<with_stmt>pytest.raises(TypeError)<block_start>lz4.block.compress(DATA.decode('latin1'))<line_sep>lz4.block.decompress(lz4.block.compress(DATA).decode('latin1'))<block_end><block_end># These next two are probably redundant given test_1 above but we'll keep them
# for now
<def_stmt>test_return_bytearray <block_start><if_stmt>sys.version_info<l>(3 )<block_start><return># skip
<block_end>data=os.urandom(128<times>1024)# Read 128kb
compressed=lz4.block.compress(data)<line_sep>b=lz4.block.compress(data return_bytearray=<true>)<assert_stmt>isinstance(b bytearray)<assert_stmt>bytes(b)<eq>compressed<line_sep>b=lz4.block.decompress(compressed return_bytearray=<true>)<assert_stmt>isinstance(b bytearray)<assert_stmt>bytes(b)<eq>data<block_end><def_stmt>test_memoryview <block_start><if_stmt>sys.version_info<l>(2 7)<block_start><return># skip
<block_end>data=os.urandom(128<times>1024)# Read 128kb
compressed=lz4.block.compress(data)<assert_stmt>lz4.block.compress(memoryview(data))<eq>compressed<assert_stmt>lz4.block.decompress(memoryview(compressed))<eq>data<block_end><def_stmt>test_with_dict_none <block_start>input_data=b"2099023098234882923049823094823094898239230982349081231290381209380981203981209381238901283098908123109238098123"<times>24<for_stmt>mode ['default' 'high_compression']<block_start><assert_stmt>lz4.block.decompress(lz4.block.compress(input_data mode=mode dict=<none>))<eq>input_data<assert_stmt>lz4.block.decompress(lz4.block.compress(input_data mode=mode) dict=<none>)<eq>input_data<assert_stmt>lz4.block.decompress(lz4.block.compress(input_data mode=mode dict=b''))<eq>input_data<assert_stmt>lz4.block.decompress(lz4.block.compress(input_data mode=mode) dict=b'')<eq>input_data<assert_stmt>lz4.block.decompress(lz4.block.compress(input_data mode=mode dict=''))<eq>input_data<assert_stmt>lz4.block.decompress(lz4.block.compress(input_data mode=mode) dict='')<eq>input_data<block_end><block_end><def_stmt>test_with_dict <block_start>input_data=b"2099023098234882923049823094823094898239230982349081231290381209380981203981209381238901283098908123109238098123"<times>24<line_sep>dict1=input_data[10:30]<line_sep>dict2=input_data[20:40]<line_sep>message=r'^Decompression failed: corrupt input or insufficient space in destination buffer. Error code: \d+$'<for_stmt>mode ['default' 'high_compression']<block_start>compressed=lz4.block.compress(input_data mode=mode dict=dict1)<with_stmt>pytest.raises(lz4.block.LZ4BlockError match=message)<block_start>lz4.block.decompress(compressed)<block_end><with_stmt>pytest.raises(lz4.block.LZ4BlockError match=message)<block_start>lz4.block.decompress(compressed dict=dict1[:2])<block_end><assert_stmt>lz4.block.decompress(compressed dict=dict2)<ne>input_data<assert_stmt>lz4.block.decompress(compressed dict=dict1)<eq>input_data<block_end><assert_stmt>lz4.block.decompress(lz4.block.compress(input_data) dict=dict1)<eq>input_data<block_end><def_stmt>test_known_decompress_1 <block_start>input=b'\x00\x00\x00\x00\x00'<line_sep>output=b''<assert_stmt>lz4.block.decompress(input)<eq>output<block_end><def_stmt>test_known_decompress_2 <block_start>input=b'\x01\x00\x00\x00\x10 '<line_sep>output=b' '<assert_stmt>lz4.block.decompress(input)<eq>output<block_end><def_stmt>test_known_decompress_3 <block_start>input=b'h\x00\x00\x00\xff\x0bLorem ipsum dolor sit amet\x1a\x006P amet'<line_sep>output=b'Lorem ipsum dolor sit amet'<times>4<assert_stmt>lz4.block.decompress(input)<eq>output<block_end><def_stmt>test_known_decompress_4 <block_start>input=b'\xb0\xb3\x00\x00\xff\x1fExcepteur sint occaecat cupidatat non proident.\x00'+(b'\xff'<times>180)+b'\x1ePident'<line_sep>output=b'Excepteur sint occaecat cupidatat non proident'<times>1000<assert_stmt>lz4.block.decompress(input)<eq>output<block_end> |
getObject={'id':37401 'memoryCapacity':242 'modifyDate':'' 'name':'test-dedicated' 'diskCapacity':1200 'createDate':'2017-10-16T12:50:23-05:00' 'cpuCount':56 'accountId':1199911}<line_sep>getAvailableRouters=[{'hostname':'bcr01a.dal05' 'id':12345} {'hostname':'bcr02a.dal05' 'id':12346} {'hostname':'bcr03a.dal05' 'id':12347} {'hostname':'bcr04a.dal05' 'id':12348}]<line_sep>getObjectById={'datacenter':{'id':12345 'name':'dal05' 'longName':'Dallas 5'} 'memoryCapacity':242 'modifyDate':'2017-11-06T11:38:20-06:00' 'name':'test-dedicated' 'diskCapacity':1200 'backendRouter':{'domain':'test.com' 'hostname':'bcr01a.dal05' 'id':12345} 'guestCount':1 'cpuCount':56 'guests':[{'domain':'test.com' 'hostname':'test-dedicated' 'id':12345 'uuid':'F9329795-4220-4B0A-B970-C86B950667FA'}] 'billingItem':{'nextInvoiceTotalRecurringAmount':1515.556 'orderItem':{'id':12345 'order':{'status':'APPROVED' 'privateCloudOrderFlag':<false> 'modifyDate':'2017-11-02T11:42:50-07:00' 'orderQuoteId':'' 'userRecordId':12345 'createDate':'2017-11-02T11:40:56-07:00' 'impersonatingUserRecordId':'' 'orderTypeId':7 'presaleEventId':'' 'userRecord':{'username':'test-dedicated'} 'id':12345 'accountId':12345}} 'id':12345 'children':[{'nextInvoiceTotalRecurringAmount':0.0 'categoryCode':'dedicated_host_ram'} {'nextInvoiceTotalRecurringAmount':0.0 'categoryCode':'dedicated_host_disk'}]} 'id':12345 'createDate':'2017-11-02T11:40:56-07:00'}<line_sep>deleteObject=<true><line_sep>getGuests=[{'id':200 'hostname':'vs-test1' 'domain':'test.sftlyr.ws' 'fullyQualifiedDomainName':'vs-test1.test.sftlyr.ws' 'status':{'keyName':'ACTIVE' 'name':'Active'} 'datacenter':{'id':50 'name':'TEST00' 'description':'Test Data Center'} 'powerState':{'keyName':'RUNNING' 'name':'Running'} 'maxCpu':2 'maxMemory':1024 'primaryIpAddress':'172.16.240.2' 'globalIdentifier':'1a2b3c-1701' 'primaryBackendIpAddress':'10.45.19.37' 'hourlyBillingFlag':<false> 'billingItem':{'id':6327 'recurringFee':1.54 'orderItem':{'order':{'userRecord':{'username':'chechu' }}}} } {'id':202 'hostname':'vs-test2' 'domain':'test.sftlyr.ws' 'fullyQualifiedDomainName':'vs-test2.test.sftlyr.ws' 'status':{'keyName':'ACTIVE' 'name':'Active'} 'datacenter':{'id':50 'name':'TEST00' 'description':'Test Data Center'} 'powerState':{'keyName':'RUNNING' 'name':'Running'} 'maxCpu':4 'maxMemory':4096 'primaryIpAddress':'172.16.240.7' 'globalIdentifier':'05a8ac-6abf0' 'primaryBackendIpAddress':'10.45.19.35' 'hourlyBillingFlag':<true> 'billingItem':{'id':6327 'recurringFee':1.54 'orderItem':{'order':{'userRecord':{'username':'chechu' }}}}}]<line_sep> |
<class_stmt>FileSystemAuditRule(AuditRule)<block_start>"""
Represents an abstraction of an access control entry (ACE) that defines an audit rule for a file or directory. This class cannot be inherited.
FileSystemAuditRule(identity: IdentityReference,fileSystemRights: FileSystemRights,flags: AuditFlags)
FileSystemAuditRule(identity: IdentityReference,fileSystemRights: FileSystemRights,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,flags: AuditFlags)
FileSystemAuditRule(identity: str,fileSystemRights: FileSystemRights,flags: AuditFlags)
FileSystemAuditRule(identity: str,fileSystemRights: FileSystemRights,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,flags: AuditFlags)
"""<line_sep>@staticmethod<def_stmt>__new__ self identity fileSystemRights *__args<block_start>"""
__new__(cls: type,identity: IdentityReference,fileSystemRights: FileSystemRights,flags: AuditFlags)
__new__(cls: type,identity: IdentityReference,fileSystemRights: FileSystemRights,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,flags: AuditFlags)
__new__(cls: type,identity: str,fileSystemRights: FileSystemRights,flags: AuditFlags)
__new__(cls: type,identity: str,fileSystemRights: FileSystemRights,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,flags: AuditFlags)
"""<line_sep><pass><block_end>AccessMask=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the access mask for this rule.
"""<line_sep>FileSystemRights=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the System.Security.AccessControl.FileSystemRights flags associated with the current System.Security.AccessControl.FileSystemAuditRule object.
Get: FileSystemRights(self: FileSystemAuditRule) -> FileSystemRights
"""<block_end> |
<import_from_stmt>django.conf.urls patterns<import_from_stmt>package apiv2<as>package_api<import_from_stmt>grid views<as>grid_views<import_from_stmt>searchv2 views<as>search_views<import_from_stmt>django.urls path<line_sep>urlpatterns=patterns("" # {% url "apiv2:category" %}
path("categories/" view=package_api.CategoryListAPIView.as_view() name="categories") # {% url "apiv2:packages" %}
path("packages/" view=package_api.PackageListAPIView.as_view() name="packages") # {% url "apiv2:packages" slug %}
path("packages/<slug:slug>/" view=package_api.PackageDetailAPIView.as_view() name="packages" ) # {% url "apiv2:grids" %}
path("grids/" view=grid_views.GridListAPIView.as_view() name="grids") # {% url "apiv2:grids" slug %}
path("grids/<slug:slug>/" view=grid_views.GridDetailAPIView.as_view() name="grids") # {% url "apiv2:search" %}
path("search/" view=search_views.SearchListAPIView.as_view() name="search") # {% url "apiv2:search" slug %}
path("search/<slug:slug>/" view=search_views.SearchDetailAPIView.as_view() name="search" ) # {% url "apiv2:python3" slug %}
path("python3/" view=package_api.Python3ListAPIView.as_view() name="python3") )<line_sep> |
<import_stmt>binascii<import_stmt>bitcoin<import_from_stmt>ethereum utils<import_from_stmt>secp256k1 PrivateKey<import_from_stmt>eth_utils encode_hex<import_from_stmt>utils.utils sol_sha3<line_sep>eth_prefix="\x19Ethereum Signed Message:\n"<def_stmt>eth_privtoaddr priv<arrow>str<block_start>pub=bitcoin.encode_pubkey(bitcoin.privtopub(priv) 'bin_electrum')<line_sep><return>"0x"+binascii.hexlify(sol_sha3(pub)[12:]).decode("ascii")<block_end><def_stmt>eth_message_prefixed msg:str<arrow>bytes<block_start><return>eth_prefix+str(len(msg))+msg<block_end><def_stmt>eth_message_hex msg:str<arrow>bytes<block_start>msg=eth_message_prefixed(msg)<line_sep>msg_hex=encode_hex(msg)<line_sep><return>sol_sha3(msg_hex)<block_end><def_stmt>eth_signed_typed_data_message types names data<arrow>bytes<block_start>"""
types e.g. ('address', 'uint', ('uint', 32))
names e.g. ('receiver', 'block_created', 'balance')
data e.g. ('0x5601ea8445a5d96eeebf89a67c4199fbb7a43fbb', 3000, 1000)
"""<assert_stmt>len(types)<eq>len(data)<eq>len(names) 'Argument length mismatch.'<line_sep>sign_types=[]<line_sep>sign_values=[]<for_stmt>i,type enumerate(types)<block_start><if_stmt>isinstance(type tuple)<block_start>sign_types.append(type[0]+str(type[1]))<line_sep>sign_values.append((data[i] type[1]))<block_end><else_stmt><block_start>sign_types.append(type)<line_sep>sign_values.append(data[i])<block_end>sign_types[i]<augadd>' '+names[i]<block_end><return>sol_sha3(sol_sha3(*sign_types) sol_sha3(*sign_values))<block_end><def_stmt>sign data:bytes private_key_seed_ascii:str<block_start>priv=private_key_seed_ascii<line_sep>pk=PrivateKey(priv raw=<true>)<line_sep>signature=pk.ecdsa_recoverable_serialize(pk.ecdsa_sign_recoverable(data raw=<true>))<line_sep>signature=signature[0]+utils.bytearray_to_bytestr([signature[1]])<line_sep><return>signature eth_privtoaddr(priv)<block_end><def_stmt>check data:bytes pk:bytes<block_start><return>sign(data pk)<block_end> |
<import_from_stmt>aiogram.dispatcher.filters.state StatesGroup State<import_from_stmt>aiogram.types Message<import_from_stmt>aiogram_dialog Dialog Window DialogManager<import_from_stmt>aiogram_dialog.tools render_transitions<import_from_stmt>aiogram_dialog.widgets.input MessageInput<import_from_stmt>aiogram_dialog.widgets.kbd Next Back<import_from_stmt>aiogram_dialog.widgets.text Const<class_stmt>RenderSG(StatesGroup)<block_start>first=State()<line_sep>second=State()<line_sep>last=State()<block_end><async_keyword><def_stmt>on_input m:Message dialog:Dialog manager:DialogManager<block_start>manager.current_context().dialog_data["name"]=m.text<line_sep><await>dialog.next()<block_end>dialog=Dialog(Window(Const("1. First") Next() state=RenderSG.first ) Window(Const("2. Second") Back() MessageInput(on_input) state=RenderSG.second ) Window(Const("3. Last") Back() state=RenderSG.last ) )<line_sep># this is diagram rendering
render_transitions([dialog])<line_sep> |
# Copyright (c) Facebook, Inc. and its affiliates.
<import_stmt>numpy<as>np<import_from_stmt>fairmotion.ops conversions<def_stmt>euler_diff predictions targets<block_start>"""
Computes the Euler angle error as in previous work, following
https://github.com/una-dinosauria/human-motion-prediction/blob/master/src/translate.py#L207
Args:
predictions: np array of predicted joint angles represented as rotation matrices, i.e. in shape
(..., n_joints, 3, 3)
targets: np array of same shape as `predictions`
Returns:
The Euler angle error an np array of shape (..., )
"""<assert_stmt>predictions.shape[-1]<eq>3<and>predictions.shape[-2]<eq>3<assert_stmt>targets.shape[-1]<eq>3<and>targets.shape[-2]<eq>3<line_sep>n_joints=predictions.shape[-3]<line_sep>ori_shape=predictions.shape[:-3]<line_sep>preds=np.reshape(predictions [-1 3 3])<line_sep>targs=np.reshape(targets [-1 3 3])<line_sep>euler_preds=conversions.R2E(preds)# (N, 3)
euler_targs=conversions.R2E(targs)# (N, 3)
# reshape to (-1, n_joints*3) to be consistent with previous work
euler_preds=np.reshape(euler_preds [-1 n_joints<times>3])<line_sep>euler_targs=np.reshape(euler_targs [-1 n_joints<times>3])<line_sep># l2 error on euler angles
idx_to_use=np.where(np.std(euler_targs 0)<g>1e-4)[0]<line_sep>euc_error=np.power(euler_targs[: idx_to_use]-euler_preds[: idx_to_use] 2 )<line_sep>euc_error=np.sqrt(np.sum(euc_error axis=1))# (-1, ...)
# reshape to original
<return>np.reshape(euc_error ori_shape)<block_end> |
<import_stmt>asyncio<import_stmt>random<import_stmt>pytest<import_stmt>uuid<import_from_stmt>collections defaultdict<import_stmt>aiotask_context<as>context<line_sep>@asyncio.coroutine<def_stmt>dummy3 <block_start><yield><from>asyncio.sleep(random.uniform(0 2))<line_sep><return>context.get("key")<block_end>@asyncio.coroutine<def_stmt>dummy2 a b<block_start><yield><from>asyncio.sleep(random.uniform(0 2))<line_sep>res=context.get("key")<line_sep><yield><from>asyncio.sleep(random.uniform(0 2))<line_sep>res1=<yield><from>dummy3()<assert_stmt>res<eq>res1<line_sep><return>a b res<block_end>@asyncio.coroutine<def_stmt>dummy1 n_tasks<block_start>context.set("key" str(uuid.uuid4()))<line_sep>tasks=[asyncio.ensure_future(dummy2(id(context.asyncio_current_task()) n))<for>n range(n_tasks)]<line_sep>results=<yield><from>asyncio.gather(*tasks)<line_sep>info=defaultdict(list)<for_stmt>taskid,n,key results<block_start>info[key].append([taskid n])<block_end><return>info<block_end>@pytest.mark.asyncio@asyncio.coroutine<def_stmt>test_ensure_future_concurrent <block_start>n_tasks=10<line_sep>results=<yield><from>asyncio.gather(*[dummy1(n_tasks=n_tasks)<for>x range(1000)])<for_stmt>r results<block_start><assert_stmt>len(r)<eq>1<for_stmt>key,value r.items()<block_start><assert_stmt>len(value)<eq>n_tasks<block_end><block_end><block_end>@pytest.mark.asyncio@asyncio.coroutine<def_stmt>test_ensurefuture_context_propagation <block_start>context.set("key" "value")<line_sep>@asyncio.coroutine<def_stmt>change_context <block_start><assert_stmt>context.get("key")<eq>"value"<line_sep>context.set("key" "what")<line_sep>context.set("other" "data")<block_end><yield><from>asyncio.ensure_future(change_context())<assert_stmt>context.get("key")<eq>"what"<assert_stmt>context.get("other")<eq>"data"<block_end>@pytest.mark.asyncio@asyncio.coroutine<def_stmt>test_waitfor_context_propagation <block_start>context.set("key" "value")<line_sep>@asyncio.coroutine<def_stmt>change_context <block_start><assert_stmt>context.get("key")<eq>"value"<line_sep>context.set("key" "what")<line_sep>context.set("other" "data")<block_end><yield><from>asyncio.wait_for(change_context() 1)<assert_stmt>context.get("key")<eq>"what"<assert_stmt>context.get("other")<eq>"data"<block_end>@pytest.mark.asyncio@asyncio.coroutine<def_stmt>test_gather_context_propagation <block_start>context.set("key" "value")<line_sep>@asyncio.coroutine<def_stmt>change_context <block_start><assert_stmt>context.get("key")<eq>"value"<line_sep>context.set("key" "what")<line_sep>context.set("other" "data")<block_end><yield><from>asyncio.gather(change_context())<assert_stmt>context.get("key")<eq>"what"<assert_stmt>context.get("other")<eq>"data"<block_end> |
<import_from_stmt>dataclasses InitVar dataclass field<import_from_stmt>pytest raises<import_from_stmt>apischema ValidationError deserialize validator<import_from_stmt>apischema.metadata init_var<line_sep>@dataclass<class_stmt>Foo<block_start>bar:InitVar[int]=field(metadata=init_var(int))<line_sep>@validator(bar)<def_stmt>validate self bar:int<block_start><if_stmt>bar<l>0<block_start><yield>"negative"<block_end><block_end><block_end><with_stmt>raises(ValidationError)<as>err<block_start>deserialize(Foo {"bar":-1})<block_end><assert_stmt>err.value.errors<eq>[{"loc":["bar"] "msg":"negative"}]<line_sep> |
# -*- coding: utf-8 -*
<import_from_stmt>expects *<import_from_stmt>expects.testing failure<with_stmt>describe('be')<block_start><with_stmt>it('should pass if object is expected')<block_start>value=1<line_sep>expect(value).to(be(value))<block_end><with_stmt>it('should fail if object is not expected')<block_start><with_stmt>failure('expected: 1 to be 2')<block_start>expect(1).to(be(2))<block_end><block_end><with_stmt>context('#negated')<block_start><with_stmt>it('should pass if object is not expected')<block_start>expect(1).not_to(be(2))<block_end><with_stmt>it('should fail if object is expected')<block_start>value=1<with_stmt>failure('expected: 1 not to be 1')<block_start>expect(value).not_to(be(value))<block_end><block_end><block_end><block_end> |
################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2019-2021 NVIDIA CORPORATION
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
<import_stmt>argparse<import_stmt>os<import_stmt>xml.etree.ElementTree<as>ET<def_stmt>parse_args args=<none><block_start>parser=argparse.ArgumentParser('Converting xml labels to KITTI format.')<line_sep>parser.add_argument('-i' '--input_label_dir' type=str required=<true> help='directory of the input xml labels')<line_sep>parser.add_argument('-o' '--output_label_dir' type=str required=<true> help='directory of the output KITTI labels')<line_sep>parser.add_argument('-d' '--encode_difficult' action="store_true" required=<false> help='Whether or not to encode the difficult object into KITTI labels')<line_sep>args,_=parser.parse_known_args(args)<line_sep><return>args<block_end><def_stmt>xml_to_kitti input_dir output_dir encode_difficult classes<block_start><if_stmt><not>os.path.exists(input_dir)<block_start><raise>ValueError('input_dir not found.')<block_end><if_stmt><not>os.path.exists(output_dir)<block_start><raise>ValueError('output_dir not found.')<block_end><for_stmt>annot os.listdir(input_dir)<block_start>et=ET.parse(os.path.join(input_dir annot))<line_sep>element=et.getroot()<line_sep>element_objs=element.findall('object')<line_sep>element_width=int(element.find('size').find('width').text)<line_sep>element_height=int(element.find('size').find('height').text)<line_sep>element_depth=int(element.find('size').find('depth').text)<assert_stmt>element_depth<eq>3<assert_stmt>len(element_objs)<g>0 'No objects in {}.'.format(os.path.join(input_dir annot))<line_sep>lines=[]<for_stmt>element_obj element_objs<block_start>difficulty=int(element_obj.find('difficult').text)<eq>1<if_stmt>difficulty<and>encode_difficult<block_start>dif='1'<block_end><else_stmt><block_start>dif='0'<block_end>line=''<line_sep>class_name=element_obj.find('name').text<assert_stmt>class_name<in>classes<line_sep>line<augadd>class_name<line_sep>line<augadd>' '<line_sep>line<augadd>'0 {} 0 '.format(dif)<line_sep>obj_bbox=element_obj.find('bndbox')<line_sep>x1=int(round(float(obj_bbox.find('xmin').text)))<line_sep>y1=int(round(float(obj_bbox.find('ymin').text)))<line_sep>x2=int(round(float(obj_bbox.find('xmax').text)))<line_sep>y2=int(round(float(obj_bbox.find('ymax').text)))<line_sep>line<augadd>str(x1)<line_sep>line<augadd>' '<line_sep>line<augadd>str(y1)<line_sep>line<augadd>' '<line_sep>line<augadd>str(x2)<line_sep>line<augadd>' '<line_sep>line<augadd>str(y2)<line_sep>line<augadd>' '<line_sep>line<augadd>'0 0 0 0 0 0 0\n'<line_sep>lines.append(line)<block_end><with_stmt>open(os.path.join(output_dir os.path.basename(annot).split('.')[0]+'.txt') 'w')<as>f<block_start>f.writelines(lines)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>classes=['horse' "pottedplant" "train" "person" "bird" "car" "chair" "tvmonitor" "bus" "sofa" "dog" "motorbike" "bicycle" "sheep" "boat" "cat" "bottle" "diningtable" "cow" "aeroplane" "background" ]<line_sep>args=parse_args()<line_sep>xml_to_kitti(args.input_label_dir args.output_label_dir args.encode_difficult classes)<block_end> |
<import_from_stmt>starlette status<import_from_stmt>starlette.applications Starlette<import_from_stmt>starlette.middleware Middleware<import_from_stmt>starlette.requests Request<import_from_stmt>starlette.responses JSONResponse<import_from_stmt>starlette.testclient TestClient<import_from_stmt>starlette_context context plugins<import_from_stmt>starlette_context.header_keys HeaderKeys<import_from_stmt>starlette_context.middleware RawContextMiddleware<line_sep>plugins_to_use=(plugins.CorrelationIdPlugin() plugins.RequestIdPlugin() plugins.UserAgentPlugin() plugins.ForwardedForPlugin() plugins.DateHeaderPlugin() )<line_sep>middleware=[Middleware(RawContextMiddleware plugins=plugins_to_use )]<line_sep>app=Starlette(middleware=middleware)<line_sep>client=TestClient(app)<line_sep>@app.route("/")<async_keyword><def_stmt>index request:Request<arrow>JSONResponse<block_start><return>JSONResponse(content=context.data)<block_end><def_stmt>test_valid_request <block_start>resp=client.get("/")<assert_stmt>resp.status_code<eq>status.HTTP_200_OK<for_stmt>plugin plugins_to_use<block_start><assert_stmt>plugin.key<in>resp.text<block_end><assert_stmt>HeaderKeys.correlation_id<in>resp.headers<assert_stmt>HeaderKeys.request_id<in>resp.headers<block_end> |
Experiment(description='Testing the pure linear kernel' data_dir='../data/tsdlr/' max_depth=10 random_order=<false> k=1 debug=<false> local_computation=<false> n_rand=9 sd=2 jitter_sd=0.1 max_jobs=500 verbose=<false> make_predictions=<false> skip_complete=<true> results_dir='../results/2013-10-01-pure-lin/' iters=250 base_kernels='SE,PureLin,Const,Exp,Fourier,Noise' zero_mean=<true> random_seed=1 period_heuristic=5 subset=<true> subset_size=250 full_iters=10 bundle_size=5 additive_form=<true> model_noise=<true> no_noise=<true>)<line_sep> |
# Copyright 2018, <NAME> LLC
# License: Apache License Version 2.0
# -------------------------------------------------------------------------
# logger.py - basic wrapper around Python standard logging just so in
# case we need to change this behavior it is all in one place
# --------------------------------------------------------------------------
<import_stmt>logging<class_stmt>Logger(object)<block_start>__instance=<none><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>debug self msg<block_start>self.logger.debug(msg)<block_end><def_stmt>info self msg<block_start>self.logger.info(msg)<block_end><def_stmt>warn self msg<block_start>self.logger.warn(msg)<block_end><def_stmt>error self msg<block_start>self.logger.error(msg)<block_end><def_stmt>__new__ cls<block_start><if_stmt>Logger.__instance<is><none><block_start>Logger.__instance=object.__new__(cls)<line_sep>logger=logging.getLogger('vespene')<line_sep>logger.setLevel(logging.DEBUG)<line_sep>ch=logging.StreamHandler()<line_sep>formatter=logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')<line_sep>ch.setFormatter(formatter)<line_sep>logger.addHandler(ch)<line_sep>Logger.__instance.logger=logger<block_end><return>Logger.__instance<block_end><block_end> |
__version__='0.4.0.dev1'<import_from_stmt>.samplers sample sample_model predict_model sample_split_model Sampler Integrator Metric<import_from_stmt>.util set_random_seed<line_sep> |
# GitHub URL: https://github.com/giswqs/qgis-earthengine-examples/tree/master/Visualization/random_color_visualizer.py
<import_stmt>ee<import_from_stmt>ee_plugin Map<line_sep>dataset=ee.Image('USGS/NLCD/NLCD2016')<line_sep>landcover=ee.Image(dataset.select('landcover'))<line_sep>Map.setCenter(-95 38 5)<line_sep>Map.addLayer(landcover.randomVisualizer() {} 'Landcover')<line_sep> |
<import_from_stmt>conans ConanFile CMake tools<import_stmt>os<class_stmt>LibmikmodConan(ConanFile)<block_start>name="libmikmod"<line_sep>description="Module player and library supporting many formats, including mod, s3m, it, and xm."<line_sep>topics=("libmikmod" "audio")<line_sep>url="https://github.com/conan-io/conan-center-index"<line_sep>homepage="http://mikmod.sourceforge.net"<line_sep>license="LGPL-2.1-or-later"<line_sep>exports_sources=["patches/*" "CMakeLists.txt"]<line_sep>generators="cmake"<line_sep>settings="os" "arch" "compiler" "build_type"<line_sep>options={"shared":[<true> <false>] "fPIC":[<true> <false>] "with_dsound":[<true> <false>] "with_mmsound":[<true> <false>] "with_alsa":[<true> <false>] "with_oss":[<true> <false>] "with_pulse":[<true> <false>] "with_coreaudio":[<true> <false>]}<line_sep>default_options={"shared":<false> "fPIC":<true> "with_dsound":<true> "with_mmsound":<true> "with_alsa":<true> "with_oss":<true> "with_pulse":<true> "with_coreaudio":<true>}<line_sep>_source_subfolder="source_subfolder"<line_sep>_build_subfolder="build_subfolder"<line_sep>_cmake=<none><def_stmt>config_options self<block_start><if_stmt>self.settings.os<eq>"Windows"<block_start><del_stmt>self.options.fPIC<block_end><else_stmt><block_start><del_stmt>self.options.with_dsound<del_stmt>self.options.with_mmsound<block_end><if_stmt>self.settings.os<ne>"Linux"<block_start><del_stmt>self.options.with_alsa<block_end># Non-Apple Unices
<if_stmt>self.settings.os<not><in>["Linux" "FreeBSD"]<block_start><del_stmt>self.options.with_oss<del_stmt>self.options.with_pulse<block_end># Apple
<if_stmt>tools.is_apple_os(self.settings.os)<block_start><del_stmt>self.options.with_coreaudio<block_end><block_end><def_stmt>configure self<block_start><if_stmt>self.options.shared<block_start><del_stmt>self.options.fPIC<block_end><del_stmt>self.settings.compiler.libcxx<del_stmt>self.settings.compiler.cppstd<block_end><def_stmt>requirements self<block_start><if_stmt>self.settings.os<eq>"Linux"<block_start><if_stmt>self.options.with_alsa<block_start>self.requires("libalsa/1.2.4")<block_end><if_stmt>self.options.with_pulse<block_start>self.requires("pulseaudio/13.0")<block_end><block_end><block_end><def_stmt>source self<block_start>tools.get(**self.conan_data["sources"][self.version])<line_sep>extracted_dir=self.name+"-"+self.version<line_sep>os.rename(extracted_dir self._source_subfolder)<block_end><def_stmt>_configure_cmake self<block_start><if_stmt>self._cmake<block_start><return>self._cmake<block_end>self._cmake=CMake(self set_cmake_flags=<true>)<line_sep>self._cmake.definitions["ENABLE_STATIC"]=<not>self.options.shared<line_sep>self._cmake.definitions["ENABLE_DOC"]=<false><line_sep>self._cmake.definitions["ENABLE_DSOUND"]=self.options.get_safe("with_dsound" <false>)<line_sep>self._cmake.definitions["ENABLE_MMSOUND"]=self.options.get_safe("with_mmsound" <false>)<line_sep>self._cmake.definitions["ENABLE_ALSA"]=self.options.get_safe("with_alsa" <false>)<line_sep>self._cmake.definitions["ENABLE_OSS"]=self.options.get_safe("with_oss" <false>)<line_sep>self._cmake.definitions["ENABLE_PULSE"]=self.options.get_safe("with_pulse" <false>)<line_sep>self._cmake.definitions["ENABLE_COREAUDIO"]=self.options.get_safe("with_coreaudio" <false>)<line_sep>self._cmake.configure(build_folder=self._build_subfolder)<line_sep><return>self._cmake<block_end><def_stmt>build self<block_start><for_stmt>patch self.conan_data.get("patches" {}).get(self.version [])<block_start>tools.patch(**patch)<block_end>tools.replace_in_file(os.path.join(self._source_subfolder "CMakeLists.txt") "CMAKE_SOURCE_DIR" "PROJECT_SOURCE_DIR")<line_sep># Ensure missing dependencies yields errors
tools.replace_in_file(os.path.join(self._source_subfolder "CMakeLists.txt") "MESSAGE(WARNING" "MESSAGE(FATAL_ERROR")<line_sep>tools.replace_in_file(os.path.join(self._source_subfolder "drivers" "drv_alsa.c") "alsa_pcm_close(pcm_h);" "if (pcm_h) alsa_pcm_close(pcm_h);")<line_sep>cmake=self._configure_cmake()<line_sep>cmake.build()<block_end><def_stmt>package self<block_start>self.copy(pattern="COPYING.LESSER" dst="licenses" src=self._source_subfolder)<line_sep>cmake=self._configure_cmake()<line_sep>cmake.install()<line_sep>os.remove(os.path.join(self.package_folder "bin" "libmikmod-config"))<if_stmt><not>self.options.shared<block_start>tools.rmdir(os.path.join(self.package_folder "bin"))<block_end>tools.rmdir(os.path.join(self.package_folder "lib" "pkgconfig"))<block_end><def_stmt>package_info self<block_start>self.cpp_info.libs=tools.collect_libs(self)<if_stmt><not>self.options.shared<block_start>self.cpp_info.defines=["MIKMOD_STATIC"]<block_end>self.cpp_info.filenames["pkg_config"]="libmikmod"<if_stmt>self.options.get_safe("with_dsound")<block_start>self.cpp_info.system_libs.append("dsound")<block_end><if_stmt>self.options.get_safe("with_mmsound")<block_start>self.cpp_info.system_libs.append("winmm")<block_end><if_stmt>self.options.get_safe("with_coreaudio")<block_start>self.cpp_info.frameworks.append("CoreAudio")<block_end><block_end><block_end> |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the sphinx extension
"""<import_from_future_stmt> unicode_literals<import_from_stmt>stevedore extension<import_from_stmt>stevedore sphinxext<import_from_stmt>stevedore.tests utils<import_stmt>mock<import_stmt>pkg_resources<def_stmt>_make_ext name docstring<block_start><def_stmt>inner <block_start><pass><block_end>inner.__doc__=docstring<line_sep>m1=mock.Mock(spec=pkg_resources.EntryPoint)<line_sep>m1.module_name='%s_module'%name<line_sep>s=mock.Mock(return_value='ENTRY_POINT(%s)'%name)<line_sep>m1.__str__=s<line_sep><return>extension.Extension(name m1 inner <none>)<block_end><class_stmt>TestSphinxExt(utils.TestCase)<block_start><def_stmt>setUp self<block_start>super(TestSphinxExt self).setUp()<line_sep>self.exts=[_make_ext('test1' 'One-line docstring') _make_ext('test2' 'Multi-line docstring\n\nAnother para') ]<line_sep>self.em=extension.ExtensionManager.make_test_instance(self.exts)<block_end><def_stmt>test_simple_list self<block_start>results=list(sphinxext._simple_list(self.em))<line_sep>self.assertEqual([('* test1 -- One-line docstring' 'test1_module') ('* test2 -- Multi-line docstring' 'test2_module') ] results )<block_end><def_stmt>test_simple_list_no_docstring self<block_start>ext=[_make_ext('nodoc' <none>)]<line_sep>em=extension.ExtensionManager.make_test_instance(ext)<line_sep>results=list(sphinxext._simple_list(em))<line_sep>self.assertEqual([('* nodoc -- ' 'nodoc_module') ] results )<block_end><def_stmt>test_detailed_list self<block_start>results=list(sphinxext._detailed_list(self.em))<line_sep>self.assertEqual([('test1' 'test1_module') ('-----' 'test1_module') ('\n' 'test1_module') ('One-line docstring' 'test1_module') ('\n' 'test1_module') ('test2' 'test2_module') ('-----' 'test2_module') ('\n' 'test2_module') ('Multi-line docstring\n\nAnother para' 'test2_module') ('\n' 'test2_module') ] results )<block_end><def_stmt>test_detailed_list_format self<block_start>results=list(sphinxext._detailed_list(self.em over='+' under='+'))<line_sep>self.assertEqual([('+++++' 'test1_module') ('test1' 'test1_module') ('+++++' 'test1_module') ('\n' 'test1_module') ('One-line docstring' 'test1_module') ('\n' 'test1_module') ('+++++' 'test2_module') ('test2' 'test2_module') ('+++++' 'test2_module') ('\n' 'test2_module') ('Multi-line docstring\n\nAnother para' 'test2_module') ('\n' 'test2_module') ] results )<block_end><def_stmt>test_detailed_list_no_docstring self<block_start>ext=[_make_ext('nodoc' <none>)]<line_sep>em=extension.ExtensionManager.make_test_instance(ext)<line_sep>results=list(sphinxext._detailed_list(em))<line_sep>self.assertEqual([('nodoc' 'nodoc_module') ('-----' 'nodoc_module') ('\n' 'nodoc_module') ('.. warning:: No documentation found in ENTRY_POINT(nodoc)' 'nodoc_module') ('\n' 'nodoc_module') ] results )<block_end><block_end> |
<import_stmt>asyncio<import_stmt>json<import_stmt>logging<import_from_stmt>asyncio Queue<import_from_stmt>asyncio.futures Future<import_from_stmt>logging Logger<import_from_stmt>typing Dict Union Any Optional List Callable Awaitable<import_from_stmt>slack_sdk.errors SlackApiError<import_from_stmt>slack_sdk.socket_mode.async_listeners AsyncWebSocketMessageListener AsyncSocketModeRequestListener <import_from_stmt>slack_sdk.socket_mode.request SocketModeRequest<import_from_stmt>slack_sdk.socket_mode.response SocketModeResponse<import_from_stmt>slack_sdk.web.async_client AsyncWebClient<class_stmt>AsyncBaseSocketModeClient<block_start>logger:Logger<line_sep>web_client:AsyncWebClient<line_sep>app_token:str<line_sep>wss_uri:str<line_sep>auto_reconnect_enabled:bool<line_sep>closed:bool<line_sep>message_queue:Queue<line_sep>message_listeners:List[Union[AsyncWebSocketMessageListener Callable[["AsyncBaseSocketModeClient" dict Optional[str]] Awaitable[<none>]] ]]<line_sep>socket_mode_request_listeners:List[Union[AsyncSocketModeRequestListener Callable[["AsyncBaseSocketModeClient" SocketModeRequest] Awaitable[<none>]] ]]<async_keyword><def_stmt>issue_new_wss_url self<arrow>str<block_start><try_stmt><block_start>response=<await>self.web_client.apps_connections_open(app_token=self.app_token)<line_sep><return>response["url"]<block_end><except_stmt>SlackApiError<as>e<block_start><if_stmt>e.response["error"]<eq>"ratelimited"# NOTE: ratelimited errors rarely occur with this endpoint
<block_start>delay=int(e.response.headers.get("Retry-After" "30"))# Tier1
self.logger.info(f"Rate limited. Retrying in {delay} seconds...")<line_sep><await>asyncio.sleep(delay)<line_sep># Retry to issue a new WSS URL
<return><await>self.issue_new_wss_url()<block_end><else_stmt># other errors
<block_start>self.logger.error(f"Failed to retrieve WSS URL: {e}")<line_sep><raise>e<block_end><block_end><block_end><async_keyword><def_stmt>connect self<block_start><raise>NotImplementedError()<block_end><async_keyword><def_stmt>disconnect self<block_start><raise>NotImplementedError()<block_end><async_keyword><def_stmt>connect_to_new_endpoint self<block_start>self.wss_uri=<await>self.issue_new_wss_url()<line_sep><await>self.connect()<block_end><async_keyword><def_stmt>close self<block_start>self.closed=<true><line_sep><await>self.disconnect()<block_end><async_keyword><def_stmt>send_message self message:str<block_start><raise>NotImplementedError()<block_end><async_keyword><def_stmt>send_socket_mode_response self response:Union[Dict[str Any] SocketModeResponse]<block_start><if_stmt>isinstance(response SocketModeResponse)<block_start><await>self.send_message(json.dumps(response.to_dict()))<block_end><else_stmt><block_start><await>self.send_message(json.dumps(response))<block_end><block_end><async_keyword><def_stmt>enqueue_message self message:str<block_start><await>self.message_queue.put(message)<if_stmt>self.logger.level<le>logging.DEBUG<block_start>queue_size=self.message_queue.qsize()<line_sep>self.logger.debug(f"A new message enqueued (current queue size: {queue_size})")<block_end><block_end><async_keyword><def_stmt>process_messages self<block_start><while_stmt><not>self.closed<block_start><try_stmt><block_start><await>self.process_message()<block_end><except_stmt>Exception<as>e<block_start>self.logger.exception(f"Failed to process a message: {e}")<block_end><block_end><block_end><async_keyword><def_stmt>process_message self<block_start>raw_message=<await>self.message_queue.get()<if_stmt>raw_message<is><not><none><block_start>message:dict={}<if_stmt>raw_message.startswith("{")<block_start>message=json.loads(raw_message)<block_end>_:Future[<none>]=asyncio.ensure_future(self.run_message_listeners(message raw_message))<block_end><block_end><async_keyword><def_stmt>run_message_listeners self message:dict raw_message:str<arrow><none><block_start>type,envelope_id=message.get("type") message.get("envelope_id")<if_stmt>self.logger.level<le>logging.DEBUG<block_start>self.logger.debug(f"Message processing started (type: {type}, envelope_id: {envelope_id})")<block_end><try_stmt><block_start><if_stmt>message.get("type")<eq>"disconnect"<block_start><await>self.connect_to_new_endpoint()<line_sep><return><block_end><for_stmt>listener self.message_listeners<block_start><try_stmt><block_start><await>listener(self message raw_message)<block_end><except_stmt>Exception<as>e<block_start>self.logger.exception(f"Failed to run a message listener: {e}")<block_end><block_end><if_stmt>len(self.socket_mode_request_listeners)<g>0<block_start>request=SocketModeRequest.from_dict(message)<if_stmt>request<is><not><none><block_start><for_stmt>listener self.socket_mode_request_listeners<block_start><try_stmt><block_start><await>listener(self request)<block_end><except_stmt>Exception<as>e<block_start>self.logger.exception(f"Failed to run a request listener: {e}")<block_end><block_end><block_end><block_end><block_end><except_stmt>Exception<as>e<block_start>self.logger.exception(f"Failed to run message listeners: {e}")<block_end><finally_stmt><block_start><if_stmt>self.logger.level<le>logging.DEBUG<block_start>self.logger.debug(f"Message processing completed (type: {type}, envelope_id: {envelope_id})")<block_end><block_end><block_end><block_end> |
# coding=utf-8
<import_stmt>os<import_stmt>shutil<import_stmt>textwrap<import_stmt>unittest<import_stmt>six<import_from_stmt>conans.errors ConanException<import_from_stmt>conans.model.editable_layout EditableLayout<import_from_stmt>conans.test.utils.test_files temp_folder<import_from_stmt>conans.util.files save<class_stmt>ParseTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.test_folder=temp_folder()<line_sep>self.layout_filepath=os.path.join(self.test_folder "layout")<line_sep>self.editable_cpp_info=EditableLayout(self.layout_filepath)<block_end><def_stmt>tearDown self<block_start>shutil.rmtree(self.test_folder)<block_end><def_stmt>test_field_error self<block_start>content=textwrap.dedent("""
[includedrs]
something
""")<line_sep>save(self.layout_filepath content)<with_stmt>six.assertRaisesRegex(self ConanException "Wrong cpp_info field 'includedrs' in layout")<block_start>_=self.editable_cpp_info._load_data(ref=<none> settings=<none> options=<none>)<block_end>content=textwrap.dedent("""
[*:includedrs]
something
""")<line_sep>save(self.layout_filepath content)<with_stmt>six.assertRaisesRegex(self ConanException "Wrong cpp_info field 'includedrs' in layout")<block_start>_=self.editable_cpp_info._load_data(ref=<none> settings=<none> options=<none>)<block_end>content=textwrap.dedent("""
[*:includedirs]
something
""")<line_sep>save(self.layout_filepath content)<with_stmt>six.assertRaisesRegex(self ConanException "Wrong package reference '\*' in layout file")<block_start>_=self.editable_cpp_info._load_data(ref=<none> settings=<none> options=<none>)<block_end>content=textwrap.dedent("""
[pkg/version@user/channel:revision:includedirs]
something
""")<line_sep>save(self.layout_filepath content)<with_stmt>six.assertRaisesRegex(self ConanException "Wrong package reference "<concat>"'pkg/version@user/channel:revision' in layout file")<block_start>_=self.editable_cpp_info._load_data(ref=<none> settings=<none> options=<none>)<block_end><block_end><block_end> |
<import_from_stmt>collections OrderedDict<class_stmt>MyObj(object)<block_start>b=1<line_sep>a=2<def_stmt>__init__ self<block_start>object.__setattr__(self '_attrs' OrderedDict())<line_sep>self.c=1<line_sep>self.d=2<block_end><def_stmt>__setattr__ self key value<block_start><assert_stmt>key<ne>'_attrs'<line_sep>self._attrs[key]=value<block_end><def_stmt>__getattr__ self item<block_start><try_stmt><block_start><return>self._attrs[item]<block_end><except_stmt>KeyError<block_start><return>self.__class__.__dict__[item]<block_end><block_end>@property<def_stmt>__dict__ self<block_start><return>self._attrs<block_end><block_end>a=MyObj()<line_sep>a.e=3<line_sep>print(a.__dict__)<line_sep>print(MyObj.__dict__)<line_sep>print(a.a)<line_sep> |
<import_stmt>pytest<import_from_stmt>plenum.common.exceptions MissingNodeOp InvalidNodeOp<import_from_stmt>plenum.common.messages.fields NonNegativeNumberField AnyValueField HexField BooleanField Base58Field<import_from_stmt>plenum.common.messages.message_base MessageBase<import_from_stmt>plenum.common.messages.node_message_factory MessageFactory NodeMessageFactory<import_from_stmt>plenum.test.input_validation.stub_messages Message1 Message2 Message3 Message4<line_sep>@pytest.fixture<def_stmt>factory <block_start><return>MessageFactory('plenum.test.input_validation.stub_messages')<block_end><def_stmt>test_message_factory_module_is_not_found_fails <block_start><with_stmt>pytest.raises(ImportError)<block_start>MessageFactory('foo.bar')<block_end><block_end><def_stmt>test_message_factory_classes_not_found_fails <block_start><with_stmt>pytest.raises(ValueError)<as>excinfo# TODO assumes that __init__ won't import any
# MessageBase child classes
<block_start>MessageFactory('plenum.test.input_validation.__init__')<block_end><assert_stmt>"no messages classes found"<in>str(excinfo.value)<block_end><def_stmt>test_message_factory_missed_op_fails factory<block_start>msg={'a':0 'b':'bar'}<with_stmt>pytest.raises(MissingNodeOp)<block_start>factory.get_instance(**msg)<block_end><block_end><def_stmt>test_message_factory_invalid_op_fails factory<block_start>msg={'op':'unknown_op' 'a':0 'b':'bar'}<with_stmt>pytest.raises(InvalidNodeOp)<block_start>factory.get_instance(**msg)<block_end><block_end><def_stmt>test_message_factory_stub_module_is_loaded factory<block_start>msg={'op':'Message1' 'a':0 'b':'bar'}<assert_stmt>isinstance(factory.get_instance(**msg) Message1)<block_end><def_stmt>test_message_factory_set_non_message_class_fails factory<block_start><class_stmt>NonMessageClass<block_start><pass><block_end><with_stmt>pytest.raises(ValueError)<block_start>factory.set_message_class(NonMessageClass)<block_end><block_end><def_stmt>test_message_factory_set_message_class_can_add_message_class factory<block_start><class_stmt>ANewMessageClass(MessageBase)<block_start>typename='NewMessage'<line_sep>schema=(('a' NonNegativeNumberField()) )<block_end>factory.set_message_class(ANewMessageClass)<line_sep>msg={'op':'NewMessage' 'a':0}<assert_stmt>isinstance(factory.get_instance(**msg) ANewMessageClass)<block_end><def_stmt>test_node_message_factory_module_is_loaded <block_start>NodeMessageFactory()<block_end><def_stmt>test_message_factory_can_replace_field factory# check precondition
<block_start>msg={'op':'Message2' 'a':0 'b':'foo'}<assert_stmt>isinstance(factory.get_instance(**msg) Message2)<line_sep>factory.update_schemas_by_field_type(AnyValueField NonNegativeNumberField)<with_stmt>pytest.raises(TypeError)<as>exc_info<block_start>factory.get_instance(**msg)<block_end>exc_info.match("expected types 'int', got 'str'")<block_end><def_stmt>test_message_factory_can_replace_iterable_field factory# check precondition
<block_start>msg={'op':'Message3' 'a':0 'b':[<true> <false>]}<assert_stmt>isinstance(factory.get_instance(**msg) Message3)<line_sep>factory.update_schemas_by_field_type(BooleanField Base58Field)<with_stmt>pytest.raises(TypeError)<as>exc_info<block_start>factory.get_instance(**msg)<block_end>exc_info.match("expected types 'str', got 'bool'")<block_end><def_stmt>test_message_factory_can_replace_map_field factory# check precondition
<block_start>msg={'op':'Message4' 'a':0 'b':{'123':'abc'}}<assert_stmt>isinstance(factory.get_instance(**msg) Message4)<line_sep>factory.update_schemas_by_field_type(HexField NonNegativeNumberField)<with_stmt>pytest.raises(TypeError)<as>exc_info<block_start>factory.get_instance(**msg)<block_end>exc_info.match("expected types 'int', got 'str'")<block_end> |
<import_from_stmt>...accounts Account account_factory<import_stmt>tempfile<import_stmt>os<import_stmt>pickle<import_from_stmt>os listdir<import_from_stmt>os.path isfile join<class_stmt>LocalFileSystemAccountAdapter()<block_start><def_stmt>__init__ self root=<none><block_start><if_stmt>root<is><none><block_start>root=tempfile.gettempdir()<block_end><if_stmt><not>os.path.exists(root+"/accounts/")<block_start>os.makedirs(root+"/accounts/")<block_end>self.root=root<block_end><def_stmt>get_account self account_id:str current_date=<none><block_start><with_stmt>open(self.root+"/accounts/"+account_id+".pickle" 'rb')<as>f<block_start><return>pickle.load(file=f)<block_end><block_end><def_stmt>has_account self account_id:str current_date=<none><block_start><try_stmt><block_start>pickle.load(file=self.root+"/accounts/"+account_id+".pickle")<line_sep><return><true><block_end><except_stmt><block_start><return><false><block_end><block_end><def_stmt>put_account self account:Account current_date=<none><block_start><with_stmt>open(self.root+"/accounts/"+account.account_id+".pickle" 'wb')<as>f<block_start>pickle.dump(account file=f)<block_end><block_end><def_stmt>get_account_ids self current_date=<none><block_start>mypath=self.root+"/accounts/"<line_sep><return>[f.split(".")[0]<for>f listdir(mypath)<if>isfile(join(mypath f))]<block_end><def_stmt>delete_account self account current_date=<none><block_start><try_stmt><block_start>os.remove(self.root+"/accounts/"+account_factory(account).account_id+".pickle")<block_end><except_stmt><block_start><pass><block_end><block_end><def_stmt>delete_accounts self accounts current_date=<none><block_start>[self.delete_account(account)<for>account accounts]<block_end><block_end> |
<import_from_stmt>setuptools setup find_packages<import_from_stmt>dexy.version DEXY_VERSION<import_stmt>platform<line_sep>is_windows=platform.system()<eq>'Windows'<if_stmt>is_windows<block_start>os_specific_requires=[]<block_end><else_stmt><block_start>os_specific_requires=['pexpect']<block_end>setup(author='<NAME>' author_email='<EMAIL>' classifiers=["Development Status :: 5 - Production/Stable" "Environment :: Console" "Intended Audience :: Developers" "Intended Audience :: Education" "Intended Audience :: Financial and Insurance Industry" "Intended Audience :: Science/Research" "License :: OSI Approved :: MIT License" "Topic :: Documentation" "Topic :: Software Development :: Build Tools" "Topic :: Software Development :: Code Generators" "Topic :: Software Development :: Documentation" "Topic :: Text Processing" "Topic :: Text Processing :: Markup :: HTML" "Topic :: Text Processing :: Markup :: LaTeX"] description='Document Automation' ### "entry-points"
entry_points={'console_scripts':['dexy = dexy.commands:run'] 'pygments.lexers':['rst+django = dexy.filters.utils:RstDjangoLexer']} ### @end
include_package_data=<true> install_requires=os_specific_requires+[# for internal dexy use or used in many common plugins
'BeautifulSoup4' 'PyYAML' 'cashew>=0.4.1' 'chardet' 'inflection>=0.2.0' 'jinja2' 'ply>=3.4' 'pygments' 'python3-modargs' 'requests>=0.10.6' # for convenience of running additional filters
'Markdown' 'docutils'] name='dexy' packages=find_packages() url='http://dexy.it' version=DEXY_VERSION)<line_sep> |
<import_from_stmt>.lib _empty STR_TYPES<class_stmt>DataError(ValueError)<block_start>"""
Error with data preserve
error can be a message or None if error raised in childs
data can be anything
"""<line_sep>__slots__=['error' 'name' 'value' 'trafaret' 'code']<line_sep>error_code='unknown'<def_stmt>__init__ self error=<none> name=<none> value=_empty trafaret=<none> code=<none><block_start>"""
:attribute error: can be a string or a dict[string, dataerror]
:attribute name:
:attribute value: validated value that leads to this error
:attribute trafaret: trafaret raised error
:attribute code: code for error, like `value_is_too_big`
"""<if_stmt><not>isinstance(error STR_TYPES+(dict ))<block_start><raise>RuntimeError('Only str or dict is supported, got %r'%error)<block_end>self.error=error<line_sep>self.name=name<line_sep>self.value=value<line_sep>self.trafaret=trafaret<line_sep>self.code=code<or>self.__class__.error_code<line_sep># if self.code == 'unknown':
# raise RuntimeError()
<block_end><def_stmt>__str__ self value=<false><block_start><if_stmt>value<and>self.value<ne>_empty<block_start><return>'%s, got %r'%(str(self.error) self.value)<block_end><else_stmt><block_start><return>str(self.error)<block_end><block_end><def_stmt>__repr__ self<block_start><return>'DataError(%r)'%str(self)<block_end><def_stmt>to_struct self value=<false><block_start><if_stmt>isinstance(self.error dict)<block_start><return>{'code':self.code 'nested':dict((k v.to_struct(value=value)<if>isinstance(v DataError)<else>v)<for>k,v self.error.items()) }<block_end><return>{'code':self.code 'message':self.__str__(value=value) }<block_end><def_stmt>as_dict self value=<false><block_start>"""Use `to_struct` if need consistency"""<if_stmt><not>isinstance(self.error dict)<block_start><return>self.__str__(value=value)<block_end><return>dict((k v.as_dict(value=value)<if>isinstance(v DataError)<else>v)<for>k,v self.error.items())<block_end><block_end> |
# -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by <NAME> (<EMAIL>)
# -----------------------------------------------------
<import_stmt>os<import_stmt>h5py<import_stmt>torch<import_stmt>torch.utils.data<as>data<import_from_stmt>utils.img load_image cropBox <import_from_stmt>opt opt<class_stmt>Mscoco_minival(data.Dataset)<block_start><def_stmt>__init__ self annoSet='coco-minival-images-newnms/test-dev'<block_start>self.img_folder='../data/coco/images'# root image folders
self.annot=dict()<line_sep># Read in annotation information from hdf5 file
tags=['xmin' 'ymin' 'xmax' 'ymax']<with_stmt>h5py.File('./predict/annot/'+annoSet+'.h5' 'r')<as>a<block_start><for_stmt>tag tags<block_start>self.annot[tag]=a[tag][:]<block_end><block_end># Load in image file names
<with_stmt>open('./predict/annot/'+annoSet+'_images.txt' 'r')<as>f<block_start>self.images=f.readlines()<block_end>self.images=list(map(<lambda>x:x.strip('\n') self.images))<assert_stmt>len(self.images)<eq>self.annot['xmin'].shape[0]<line_sep>self.size=len(self.images)<line_sep>self.flipRef=((2 3) (4 5) (6 7) (8 9) (10 11) (12 13) (14 15) (16 17))<line_sep>self.year=2017<block_end><def_stmt>__getitem__ self index<block_start><if_stmt>self.year<eq>2014<block_start>imgname=self.images[index]<block_end><else_stmt><block_start>imgname=self.images[index].split('_')[2]<block_end>img_path=os.path.join(self.img_folder imgname)<line_sep>img=load_image(img_path)<line_sep>ori_img=img.clone()<line_sep>img[0].add_(-0.406)<line_sep>img[1].add_(-0.457)<line_sep>img[2].add_(-0.480)<line_sep>imght=img.size(1)<line_sep>imgwidth=img.size(2)<line_sep>upLeft=torch.Tensor((float(self.annot['xmin'][index]) float(self.annot['ymin'][index])))<line_sep>bottomRight=torch.Tensor((float(self.annot['xmax'][index]) float(self.annot['ymax'][index])))<line_sep>ht=bottomRight[1]-upLeft[1]<line_sep>width=bottomRight[0]-upLeft[0]<if_stmt>width<g>100<block_start>scaleRate=0.2<block_end><else_stmt><block_start>scaleRate=0.3<block_end>upLeft[0]=max(0 upLeft[0]-width<times>scaleRate/2)<line_sep>upLeft[1]=max(0 upLeft[1]-ht<times>scaleRate/2)<line_sep>bottomRight[0]=max(min(imgwidth-1 bottomRight[0]+width<times>scaleRate/2) upLeft[0]+5)<line_sep>bottomRight[1]=max(min(imght-1 bottomRight[1]+ht<times>scaleRate/2) upLeft[1]+5)<line_sep>inp=cropBox(img upLeft bottomRight opt.inputResH opt.inputResW)<line_sep>ori_inp=cropBox(ori_img upLeft bottomRight opt.inputResH opt.inputResW)<line_sep>metaData=(upLeft bottomRight ori_inp)<line_sep>box=torch.zeros(4)<line_sep>box[0]=upLeft[0]<line_sep>box[1]=upLeft[1]<line_sep>box[2]=bottomRight[0]<line_sep>box[3]=bottomRight[1]<line_sep><return>inp box imgname metaData<block_end><def_stmt>__len__ self<block_start><return>self.size<block_end><block_end> |
# Copyright (c) 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>types<import_stmt>ovs.util<line_sep>commands={}<line_sep>strtypes=types.StringTypes<class_stmt>_UnixctlCommand(object)<block_start><def_stmt>__init__ self usage min_args max_args callback aux<block_start>self.usage=usage<line_sep>self.min_args=min_args<line_sep>self.max_args=max_args<line_sep>self.callback=callback<line_sep>self.aux=aux<block_end><block_end><def_stmt>_unixctl_help conn unused_argv unused_aux<block_start>reply="The available commands are:\n"<line_sep>command_names=sorted(commands.keys())<for_stmt>name command_names<block_start>reply<augadd>" "<line_sep>usage=commands[name].usage<if_stmt>usage<block_start>reply<augadd>"%-23s %s"%(name usage)<block_end><else_stmt><block_start>reply<augadd>name<block_end>reply<augadd>"\n"<block_end>conn.reply(reply)<block_end><def_stmt>command_register name usage min_args max_args callback aux<block_start>""" Registers a command with the given 'name' to be exposed by the
UnixctlServer. 'usage' describes the arguments to the command; it is used
only for presentation to the user in "help" output.
'callback' is called when the command is received. It is passed a
UnixctlConnection object, the list of arguments as unicode strings, and
'aux'. Normally 'callback' should reply by calling
UnixctlConnection.reply() or UnixctlConnection.reply_error() before it
returns, but if the command cannot be handled immediately, then it can
defer the reply until later. A given connection can only process a single
request at a time, so a reply must be made eventually to avoid blocking
that connection."""<assert_stmt>isinstance(name strtypes)<assert_stmt>isinstance(usage strtypes)<assert_stmt>isinstance(min_args int)<assert_stmt>isinstance(max_args int)<assert_stmt>isinstance(callback types.FunctionType)<if_stmt>name<not><in>commands<block_start>commands[name]=_UnixctlCommand(usage min_args max_args callback aux)<block_end><block_end><def_stmt>socket_name_from_target target<block_start><assert_stmt>isinstance(target strtypes)<if_stmt>target.startswith("/")<block_start><return>0 target<block_end>pidfile_name="%s/%s.pid"%(ovs.dirs.RUNDIR target)<line_sep>pid=ovs.daemon.read_pidfile(pidfile_name)<if_stmt>pid<l>0<block_start><return>-pid "cannot read pidfile \"%s\""%pidfile_name<block_end><return>0 "%s/%s.%d.ctl"%(ovs.dirs.RUNDIR target pid)<block_end>command_register("help" "" 0 0 _unixctl_help <none>)<line_sep> |
# Generated by Django 3.0.4 on 2020-07-12 15:59
<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('users' '0016_auto_20200712_1557') ]<line_sep>operations=[migrations.AlterField(model_name='user' name='email' field=models.EmailField(max_length=254 unique=<true>) ) migrations.RunSQL("""
CREATE OR REPLACE FUNCTION generate_random_hash(int)
RETURNS text
AS $$
SELECT array_to_string(
ARRAY (
SELECT substring(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#*+./:<=>?@[]()^_~'
FROM (random() * 72)::int FOR 1)
FROM generate_series(1, $1) ), '' )
$$ LANGUAGE sql;
""") migrations.RunSQL("""
update users set secret_hash = generate_random_hash(16);
""") migrations.RunSQL("""
drop function generate_random_hash(int);
""") ]<block_end> |
# Fibonacci Sequence: 0 1 1 2 3 5 8 13 ...
<def_stmt>fibonacci num<block_start><if_stmt>num<eq>1<block_start><return>0<block_end><if_stmt>num<eq>2<block_start><return>1<block_end><return>fibonacci(num-1)+fibonacci(num-2)<block_end>print(fibonacci(1))<line_sep>print(fibonacci(2))<line_sep>print(fibonacci(3))<line_sep>print(fibonacci(4))<line_sep>print(fibonacci(5))<line_sep> |
<import_from_stmt>.model *<line_sep> |
#-*- coding:UTF-8 -*-
<import_from_stmt>setuptools setup find_packages<import_from_stmt>autoremovetorrents.version __version__<import_from_stmt>autoremovetorrents.compatibility.disk_usage_ SUPPORT_SHUTIL<import_from_stmt>autoremovetorrents.compatibility.open_ open_<line_sep>setup(name='autoremove-torrents' version=__version__ description='Automatically remove torrents according to your strategies.' long_description=open_('README.rst' 'r' encoding='utf-8').read() classifiers=['Environment :: Console' 'License :: OSI Approved :: MIT License' 'Programming Language :: Python' 'Topic :: Utilities'] # Get classifiers from https://pypi.org/pypi?%3Aaction=list_classifiers
keywords='python autoremove torrent' author='jerrymakesjelly' author_email='<EMAIL>' url='https://github.com/jerrymakesjelly/autoremove-torrents' license='MIT' packages=find_packages() include_package_data=<true> zip_safe=<true> install_requires=['deluge-client' 'enum34' 'ply' ''<if>SUPPORT_SHUTIL<else>'psutil' 'pyyaml' 'requests' ] entry_points={'console_scripts':['autoremove-torrents = autoremovetorrents.main:main']})<line_sep> |
<import_stmt>time<import_stmt>machine<line_sep>print(time.time())<line_sep>t1=time.localtime(546450051)<line_sep>print('t1' t1)<line_sep>t2=time.mktime(t1)<line_sep>print('t2' t2)<line_sep>print(time.time())<line_sep>time.set_time(t1)<line_sep>print(time.time())<line_sep>time.sleep(1)<line_sep>print(time.localtime(time.time()))<line_sep>'''
raw REPL; CTRL-B to exit
>OK
74
t1 (2017, 4, 25, 15, 40, 51, 1, 115)
t2 546450051
546450065
546450051
(2017, 4, 25, 15, 40, 52, 1, 115)
>
MicroPython v0.5.1-136-g039f72b6c-dirty on 2020-11-18; Sipeed_M1 with kendryte-k210
Type "help()" for more information.
>>>
>>>
'''<line_sep> |
<import_from_stmt>.ast_compat ast<import_from_stmt>.extension_register *<import_from_stmt>.extensions template_python<import_from_stmt>.extensions lazy_import<import_stmt>warnings<with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter("ignore" category=SyntaxWarning)<import_from_stmt>.extensions pattern_matching<block_end><import_from_stmt>.extensions scoped_operators<import_from_stmt>.extensions pipelines<import_from_stmt>.extensions quick_lambdas<line_sep> |
<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>torch.nn functional<as>F<import_from_stmt>.activations sigmoid HardSwish Swish<import_from_stmt>.utils_i2rnet relu_fn round_filters round_repeats drop_connect get_same_padding_conv2d Conv2dDynamicSamePadding get_model_params efficientnet_params load_pretrained_weights <def_stmt>_make_divisible v divisor min_value=<none><block_start>"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""<if_stmt>min_value<is><none><block_start>min_value=divisor<block_end>new_v=max(min_value int(v+divisor/2)<floordiv>divisor<times>divisor)<line_sep># Make sure that round down does not go down by more than 10%.
<if_stmt>new_v<l>0.9<times>v<block_start>new_v<augadd>divisor<block_end><return>new_v<block_end><class_stmt>MBConvBlock(nn.Module)<block_start>"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""<def_stmt>__init__ self block_args global_params<block_start>super().__init__()<line_sep>self._block_args=block_args<line_sep>self._bn_mom=1-global_params.batch_norm_momentum<line_sep>self._bn_eps=global_params.batch_norm_epsilon<line_sep>self.has_se=(self._block_args.se_ratio<is><not><none>)<and>(0<l>self._block_args.se_ratio<le>1)<line_sep>self.id_skip=block_args.id_skip# skip connection and drop connect
# Get static or dynamic convolution depending on image size
Conv2d=get_same_padding_conv2d(image_size=global_params.image_size)<line_sep># Conv2d = nn.Conv2d
padding=self._block_args.kernel_size<floordiv>2<line_sep># Conv2d = Conv2dDynamicSamePadding
# Expansion phase
inp=self._block_args.input_filters# number of input channels
oup=self._block_args.input_filters<times>self._block_args.expand_ratio# number of output channels
<if_stmt>self._block_args.expand_ratio<ne>1<block_start>self._expand_conv=Conv2d(in_channels=inp out_channels=oup kernel_size=1 bias=<false>)<line_sep>self._bn0=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<block_end># Depthwise convolution phase
k=self._block_args.kernel_size<line_sep>s=self._block_args.stride<line_sep>self._depthwise_conv=Conv2d(in_channels=oup out_channels=oup groups=oup # groups makes it depthwise
kernel_size=k stride=s bias=<false> padding=padding)<line_sep>self._bn1=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># Squeeze and Excitation layer, if desired
<if_stmt>self.has_se<block_start>num_squeezed_channels=max(1 int(self._block_args.input_filters<times>self._block_args.se_ratio))<line_sep>self._se_reduce=Conv2d(in_channels=oup out_channels=num_squeezed_channels kernel_size=1)<line_sep>self._se_expand=Conv2d(in_channels=num_squeezed_channels out_channels=oup kernel_size=1)<block_end># Output phase
final_oup=self._block_args.output_filters<line_sep>self._project_conv=Conv2d(in_channels=oup out_channels=final_oup kernel_size=1 bias=<false>)<line_sep>self._bn2=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<block_end><def_stmt>forward self inputs drop_connect_rate=<none><block_start>"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""<line_sep># Expansion and Depthwise Convolution
x=inputs<if_stmt>self._block_args.expand_ratio<ne>1<block_start>x=relu_fn(self._bn0(self._expand_conv(inputs)))<block_end>x=relu_fn(self._bn1(self._depthwise_conv(x)))<line_sep># Squeeze and Excitation
<if_stmt>self.has_se<block_start>x_squeezed=F.adaptive_avg_pool2d(x 1)<line_sep>x_squeezed=self._se_expand(relu_fn(self._se_reduce(x_squeezed)))<line_sep>x=torch.sigmoid(x_squeezed)<times>x<block_end>x=self._bn2(self._project_conv(x))<line_sep># Skip connection and drop connect
input_filters,output_filters=self._block_args.input_filters self._block_args.output_filters<if_stmt>self.id_skip<and>self._block_args.stride<eq>1<and>input_filters<eq>output_filters<block_start><if_stmt>drop_connect_rate<block_start>x=drop_connect(x p=drop_connect_rate training=self.training)<block_end>x=x+inputs# skip connection
<block_end><return>x<block_end><block_end><class_stmt>I2RConvBlock(nn.Module)<block_start>"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""<def_stmt>__init__ self block_args global_params<block_start>super().__init__()<line_sep>self._block_args=block_args<line_sep>self._bn_mom=1-global_params.batch_norm_momentum<line_sep>self._bn_eps=global_params.batch_norm_epsilon<line_sep>self.has_se=(self._block_args.se_ratio<is><not><none>)<and>(0<l>self._block_args.se_ratio<le>1)<line_sep>self.id_skip=block_args.id_skip# skip connection and drop connect
# Get static or dynamic convolution depending on image size
Conv2d=get_same_padding_conv2d(image_size=global_params.image_size)<line_sep># Conv2d = nn.Conv2d
padding=self._block_args.kernel_size<floordiv>2<line_sep># Expansion phase
inp=self._block_args.input_filters# number of input channels
oup=self._block_args.input_filters<floordiv>self._block_args.expand_ratio# number of output channels
final_oup=self._block_args.output_filters<line_sep>self.inp,self.final_oup=inp final_oup<line_sep>self.identity=<false><if_stmt>oup<l>oup/6.<block_start>oup=math.ceil(oup/6.)<line_sep>oup=_make_divisible(oup 16)<block_end>k=self._block_args.kernel_size<line_sep>s=self._block_args.stride[0]<if>isinstance(self._block_args.stride list)<else>self._block_args.stride<if_stmt>self._block_args.expand_ratio<eq>2<block_start>self._project_conv=Conv2d(in_channels=inp out_channels=inp kernel_size=k bias=<false> groups=inp)<line_sep>self._bn0=nn.BatchNorm2d(num_features=inp momentum=self._bn_mom eps=self._bn_eps)<line_sep>self._linear1=Conv2d(in_channels=inp out_channels=oup kernel_size=1 bias=<false>)<line_sep>self._bn1=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self._linear2=Conv2d(in_channels=oup out_channels=final_oup kernel_size=1 bias=<false>)<line_sep>self._bn2=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self._expand_conv=Conv2d(in_channels=final_oup out_channels=final_oup kernel_size=k bias=<false> stride=s groups=final_oup)<line_sep>self._bn3=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<block_end><elif_stmt>inp<ne>final_oup<and>s<eq>1<block_start>self._project_conv=<none><line_sep>self._expand_conv=<none><line_sep>self._linear1=Conv2d(in_channels=inp out_channels=oup kernel_size=1 bias=<false>)<line_sep>self._bn1=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self._linear2=Conv2d(in_channels=oup out_channels=final_oup kernel_size=1 bias=<false>)<line_sep>self._bn2=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<block_end><elif_stmt>inp<ne>final_oup<and>s<eq>2<block_start>self._project_conv=<none><line_sep>self._linear1=Conv2d(in_channels=inp out_channels=oup kernel_size=1 bias=<false>)<line_sep>self._bn1=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self._linear2=Conv2d(in_channels=oup out_channels=final_oup kernel_size=1 bias=<false>)<line_sep>self._bn2=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self._expand_conv=Conv2d(in_channels=final_oup out_channels=final_oup kernel_size=k bias=<false> stride=s groups=final_oup)<line_sep>self._bn3=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<block_end><else_stmt># if inp == final_oup:
<block_start>self._project_conv=Conv2d(in_channels=inp out_channels=inp kernel_size=k bias=<false> groups=inp)<line_sep>self._bn0=nn.BatchNorm2d(num_features=inp momentum=self._bn_mom eps=self._bn_eps)<line_sep>self._expand_conv=Conv2d(in_channels=final_oup out_channels=final_oup kernel_size=k bias=<false> groups=final_oup)<line_sep>self._bn3=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># if not (self._block_args.expand_ratio == 2):
self.identity=<true><line_sep>self._linear1=Conv2d(in_channels=inp out_channels=oup kernel_size=1 bias=<false>)<line_sep>self._bn1=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self._linear2=Conv2d(in_channels=oup out_channels=final_oup kernel_size=1 bias=<false>)<line_sep>self._bn2=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<block_end># Depthwise convolution phase
# self._depthwise_conv = Conv2d(
# in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# kernel_size=k, stride=s, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
<if_stmt>self.has_se<block_start>num_squeezed_channels=max(1 int(final_oup/self._block_args.expand_ratio<times>self._block_args.se_ratio))<line_sep>self._se_reduce=Conv2d(in_channels=final_oup out_channels=num_squeezed_channels kernel_size=1)<line_sep>self._se_expand=Conv2d(in_channels=num_squeezed_channels out_channels=final_oup kernel_size=1)<block_end># # Output phase
# self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
<block_end><def_stmt>forward self inputs drop_connect_rate=<none><block_start>"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""<line_sep># Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
x=inputs<line_sep># NOTE:remove the first 3x3 conv to reduce running mem, need to verfy the performance
<if_stmt>self._project_conv<is><not><none><block_start>x=relu_fn(self._bn0(self._project_conv(inputs)))<block_end>x=self._bn1(self._linear1(x))<line_sep>x=relu_fn(self._bn2(self._linear2(x)))<if_stmt>self._expand_conv<is><not><none><block_start>x=self._bn3(self._expand_conv(x))<block_end># Squeeze and Excitation
<if_stmt>self.has_se<block_start>x_squeezed=F.adaptive_avg_pool2d(x 1)<line_sep>x_squeezed=self._se_expand(relu_fn(self._se_reduce(x_squeezed)))<line_sep>x=torch.sigmoid(x_squeezed)<times>x<block_end># Skip connection and drop connect
input_filters,output_filters=self._block_args.input_filters self._block_args.output_filters<if_stmt>self.identity<and>self._block_args.stride<eq>1<and>input_filters<eq>output_filters<block_start><if_stmt>drop_connect_rate<block_start>x=drop_connect(x p=drop_connect_rate training=self.training)<block_end>x=x+inputs# skip connection
<block_end><return>x<block_end><block_end><class_stmt>MBConvBlockV1(nn.Module)<block_start>"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""<def_stmt>__init__ self block_args global_params<block_start>super().__init__()<line_sep>self._block_args=block_args<line_sep>self._bn_mom=1-global_params.batch_norm_momentum<line_sep>self._bn_eps=global_params.batch_norm_epsilon<line_sep>self.has_se=(self._block_args.se_ratio<is><not><none>)<and>(0<l>self._block_args.se_ratio<le>1)<line_sep>self.id_skip=block_args.id_skip# skip connection and drop connect
# Get static or dynamic convolution depending on image size
Conv2d=get_same_padding_conv2d(image_size=global_params.image_size)<line_sep># Conv2d = nn.Conv2d
# Expansion phase
inp=self._block_args.input_filters# number of input channels
oup=self._block_args.input_filters<floordiv>self._block_args.expand_ratio# number of output channels
final_oup=self._block_args.output_filters<line_sep>self.inp,self.final_oup=inp final_oup<line_sep>group_1x1=1<line_sep>self.identity=<false><if_stmt>oup<l>oup/6.<block_start>oup=math.ceil(oup/6.)<line_sep>oup=_make_divisible(oup 16)<block_end>oup=_make_divisible(oup 2)<line_sep>k=self._block_args.kernel_size<line_sep>s=self._block_args.stride[0]<if>isinstance(self._block_args.stride list)<else>self._block_args.stride<line_sep># if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
<if_stmt>self._block_args.expand_ratio<eq>2<block_start>self.features=nn.Sequential(Conv2d(in_channels=inp out_channels=inp kernel_size=k bias=<false> groups=inp) nn.BatchNorm2d(num_features=inp momentum=self._bn_mom eps=self._bn_eps) Swish() #first linear layer
Conv2d(in_channels=inp out_channels=oup kernel_size=1 bias=<false> groups=group_1x1) nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps) # sec linear layer
Conv2d(in_channels=oup out_channels=final_oup kernel_size=1 bias=<false> groups=group_1x1) nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps) Swish() # expand layer
Conv2d(in_channels=final_oup out_channels=final_oup kernel_size=k bias=<false> groups=final_oup stride=s) nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps) )<block_end><elif_stmt>inp<ne>final_oup<and>s<eq>1<block_start>self.features=nn.Sequential(Conv2d(in_channels=inp out_channels=oup kernel_size=1 bias=<false>) nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps) # only two linear layers are needed
Conv2d(in_channels=oup out_channels=final_oup kernel_size=1 bias=<false>) nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps) Swish() )<block_end><elif_stmt>inp<ne>final_oup<and>s<eq>2<block_start>self.features=nn.Sequential(Conv2d(in_channels=inp out_channels=oup kernel_size=1 bias=<false>) nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps) Conv2d(in_channels=oup out_channels=final_oup kernel_size=1 bias=<false>) nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps) Swish() Conv2d(in_channels=final_oup out_channels=final_oup kernel_size=k bias=<false> groups=final_oup stride=s) nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps) )<block_end><else_stmt><block_start>self.identity=<true><line_sep>self.features=nn.Sequential(Conv2d(in_channels=inp out_channels=inp kernel_size=k bias=<false> groups=inp) nn.BatchNorm2d(num_features=inp momentum=self._bn_mom eps=self._bn_eps) Swish() Conv2d(in_channels=inp out_channels=oup kernel_size=1 bias=<false> groups=group_1x1) nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps) Conv2d(in_channels=oup out_channels=final_oup kernel_size=1 bias=<false> groups=group_1x1) nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps) Swish() Conv2d(in_channels=final_oup out_channels=final_oup kernel_size=k bias=<false> groups=final_oup) nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps) )<block_end># Depthwise convolution phase
# self._depthwise_conv = Conv2d(
# in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# kernel_size=k, stride=s, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
# import pdb;pdb.set_trace()
<if_stmt>self.has_se<block_start>se_expand_ratio=1<line_sep># num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio * se_expand_ratio))
num_squeezed_channels=max(1 int(final_oup/self._block_args.expand_ratio<times>self._block_args.se_ratio<times>se_expand_ratio))<line_sep>self._se_reduce=Conv2d(in_channels=final_oup out_channels=num_squeezed_channels kernel_size=1)<line_sep>self._se_expand=Conv2d(in_channels=num_squeezed_channels out_channels=final_oup kernel_size=1)<block_end># # Output phase
# self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
<block_end><def_stmt>forward self inputs drop_connect_rate=<none><block_start>"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""<line_sep># Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
x=self.features(inputs)<line_sep># Squeeze and Excitation
<if_stmt>self.has_se<block_start>x_squeezed=F.adaptive_avg_pool2d(x 1)<line_sep>x_squeezed=self._se_expand(relu_fn(self._se_reduce(x_squeezed)))<line_sep>x=torch.sigmoid(x_squeezed)<times>x<block_end># Skip connection and drop connect
input_filters,output_filters=self._block_args.input_filters self._block_args.output_filters<if_stmt>self.identity<and>self._block_args.stride<eq>1<and>input_filters<eq>output_filters<block_start><if_stmt>drop_connect_rate<block_start>x=drop_connect(x p=drop_connect_rate training=self.training)<block_end>x=x+inputs# skip connection
<block_end><return>x<block_end><block_end><class_stmt>GhostI2RBlock(nn.Module)<block_start>"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""<def_stmt>__init__ self block_args global_params<block_start>super().__init__()<line_sep>self._block_args=block_args<line_sep>self._bn_mom=1-global_params.batch_norm_momentum<line_sep>self._bn_eps=global_params.batch_norm_epsilon<line_sep>self.has_se=(self._block_args.se_ratio<is><not><none>)<and>(0<l>self._block_args.se_ratio<le>1)<line_sep>self.id_skip=block_args.id_skip# skip connection and drop connect
group_1x1=1<line_sep># Get static or dynamic convolution depending on image size
Conv2d=get_same_padding_conv2d(image_size=global_params.image_size)<line_sep># Conv2d = nn.Conv2d
# Expansion phase
inp=self._block_args.input_filters# number of input channels
oup=self._block_args.input_filters<floordiv>self._block_args.expand_ratio# number of output channels
final_oup=self._block_args.output_filters<line_sep>self.inp,self.final_oup=inp final_oup<line_sep>self.identity=<false><if_stmt>oup<l>oup/6.<block_start>oup=math.ceil(oup/6.)<line_sep>oup=_make_divisible(oup 16)<block_end>oup=_make_divisible(oup 2)<line_sep>k=self._block_args.kernel_size<line_sep>s=self._block_args.stride[0]<if>isinstance(self._block_args.stride list)<else>self._block_args.stride<line_sep># apply repeat scheme
self.split_ratio=2<line_sep>self.ghost_idx_inp=inp<floordiv>self.split_ratio<line_sep>self.ghost_idx_oup=int(final_oup-self.ghost_idx_inp)<line_sep>self.inp,self.final_oup,self.s=inp final_oup s<line_sep># if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
<if_stmt>self._block_args.expand_ratio<eq>2# self.features = nn.Sequential(
<block_start>self.dwise_conv1=Conv2d(in_channels=inp out_channels=inp kernel_size=k bias=<false> groups=inp)<line_sep>self.bn1=nn.BatchNorm2d(num_features=inp momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.act=Swish()<line_sep>#first linear layer
self.project_layer=Conv2d(in_channels=self.ghost_idx_inp out_channels=oup kernel_size=1 bias=<false>)<line_sep>self.bn2=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># sec linear layer
self.expand_layer=Conv2d(in_channels=oup out_channels=self.ghost_idx_oup kernel_size=1 bias=<false>)<line_sep>self.bn3=nn.BatchNorm2d(num_features=self.ghost_idx_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># Swish(),
# expand layer
self.dwise_conv2=Conv2d(in_channels=final_oup out_channels=final_oup kernel_size=k bias=<false> groups=final_oup stride=s)<line_sep>self.bn4=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># )
<block_end><elif_stmt>inp<ne>final_oup<and>s<eq>1# self.features=nn.Sequential(
<block_start>self.project_layer=Conv2d(in_channels=inp out_channels=oup kernel_size=1 bias=<false>)<line_sep>self.bn2=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># only two linear layers are needed
self.expand_layer=Conv2d(in_channels=oup out_channels=final_oup kernel_size=1 bias=<false> groups=group_1x1)<line_sep>self.bn3=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.act=Swish()<line_sep># )
<block_end><elif_stmt>inp<ne>final_oup<and>s<eq>2# self.features = nn.Sequential(
<block_start>self.project_layer=Conv2d(in_channels=inp out_channels=oup kernel_size=1 bias=<false>)<line_sep>self.bn2=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.expand_layer=Conv2d(in_channels=oup out_channels=final_oup kernel_size=1 bias=<false>)<line_sep>self.bn3=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.act=Swish()<line_sep>self.dwise_conv2=Conv2d(in_channels=final_oup out_channels=final_oup kernel_size=k bias=<false> groups=final_oup stride=s)<line_sep>self.bn4=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># )
<block_end><else_stmt><block_start>self.identity=<true><line_sep># self.features = nn.Sequential(
self.dwise_conv1=Conv2d(in_channels=inp out_channels=inp kernel_size=k bias=<false> groups=inp)<line_sep>self.bn1=nn.BatchNorm2d(num_features=inp momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.act=Swish()<line_sep>self.project_layer=Conv2d(in_channels=self.ghost_idx_inp out_channels=oup kernel_size=1 bias=<false> groups=group_1x1)<line_sep>self.bn2=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.expand_layer=Conv2d(in_channels=oup out_channels=self.ghost_idx_oup kernel_size=1 bias=<false> groups=group_1x1)<line_sep>self.bn3=nn.BatchNorm2d(num_features=self.ghost_idx_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># Swish(),
self.dwise_conv2=Conv2d(in_channels=final_oup out_channels=final_oup kernel_size=k bias=<false> groups=final_oup)<line_sep>self.bn4=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># )
<block_end># Depthwise convolution phase
# self._depthwise_conv = Conv2d(
# in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# kernel_size=k, stride=s, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
# import pdb;pdb.set_trace()
<if_stmt>self.has_se<block_start>se_mode='small'<if_stmt>se_mode<eq>'large'<block_start>se_expand_ratio=0.5<line_sep>num_squeezed_channels=max(1 int(self._block_args.input_filters<times>self._block_args.se_ratio<times>se_expand_ratio))<block_end><else_stmt><block_start>se_expand_ratio=1<line_sep>num_squeezed_channels=max(1 int(final_oup/self._block_args.expand_ratio<times>self._block_args.se_ratio<times>se_expand_ratio))<block_end>self._se_reduce=Conv2d(in_channels=final_oup out_channels=num_squeezed_channels kernel_size=1)<line_sep>self._se_expand=Conv2d(in_channels=num_squeezed_channels out_channels=final_oup kernel_size=1)<block_end># # Output phase
# self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
<block_end><def_stmt>forward self inputs drop_connect_rate=<none><block_start>"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""<line_sep># Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
# x = self.features(inputs)
<if_stmt>self._block_args.expand_ratio<eq>2# first dwise conv
<block_start>x=self.act(self.bn1(self.dwise_conv1(inputs)))<line_sep># first 1x1 conv
ghost_id=x[: self.ghost_idx_inp: : :]<line_sep>x=self.bn2(self.project_layer(x[: :self.ghost_idx_inp : :]))<line_sep># second 1x1 conv
x=self.act(self.bn3(self.expand_layer(x)))<line_sep># generate more features
x=torch.cat([x ghost_id] dim=1)<line_sep># second dwise conv
x=self.bn4(self.dwise_conv2(x))<block_end><elif_stmt>self.inp<ne>self.final_oup<and>self.s<eq>1# first 1x1 conv
<block_start>x=self.bn2(self.project_layer(inputs))<line_sep># second 1x1 conv
x=self.act(self.bn3(self.expand_layer(x)))<block_end><elif_stmt>self.inp<ne>self.final_oup<and>self.s<eq>2# first 1x1 conv
<block_start>x=self.bn2(self.project_layer(inputs))<line_sep># second 1x1 conv
x=self.act(self.bn3(self.expand_layer(x)))<line_sep># second dwise conv
x=self.bn4(self.dwise_conv2(x))<block_end><else_stmt># first dwise conv
<block_start>x=self.act(self.bn1(self.dwise_conv1(inputs)))<line_sep># first 1x1 conv
ghost_id=x[: self.ghost_idx_inp: : :]<line_sep>x=self.bn2(self.project_layer(x[: :self.ghost_idx_inp : :]))<line_sep># second 1x1 conv
x=self.act(self.bn3(self.expand_layer(x)))<line_sep># second dwise conv
x=torch.cat([x ghost_id] dim=1)<line_sep>x=self.bn4(self.dwise_conv2(x))<block_end># Squeeze and Excitation
<if_stmt>self.has_se<block_start>x_squeezed=F.adaptive_avg_pool2d(x 1)<line_sep>x_squeezed=self._se_expand(relu_fn(self._se_reduce(x_squeezed)))<line_sep>x=torch.sigmoid(x_squeezed)<times>x<block_end># Skip connection and drop connect
input_filters,output_filters=self._block_args.input_filters self._block_args.output_filters<if_stmt>self.identity<and>self._block_args.stride<eq>1<and>input_filters<eq>output_filters# import pdb;pdb.set_trace()
<block_start><if_stmt>drop_connect_rate<block_start>x=drop_connect(x p=drop_connect_rate training=self.training)<block_end>x=x+inputs# skip connection
<block_end><return>x<block_end><block_end><class_stmt>GhostI2RBlock_change_droppath_pos(nn.Module)<block_start>"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""<def_stmt>__init__ self block_args global_params<block_start>super().__init__()<line_sep>self._block_args=block_args<line_sep>self._bn_mom=1-global_params.batch_norm_momentum<line_sep>self._bn_eps=global_params.batch_norm_epsilon<line_sep>self.has_se=(self._block_args.se_ratio<is><not><none>)<and>(0<l>self._block_args.se_ratio<le>1)<line_sep>self.id_skip=block_args.id_skip# skip connection and drop connect
group_1x1=1<line_sep>apply_ghost=<true><line_sep># Get static or dynamic convolution depending on image size
Conv2d=get_same_padding_conv2d(image_size=global_params.image_size)<line_sep># Conv2d = nn.Conv2d
# Expansion phase
inp=self._block_args.input_filters# number of input channels
oup=self._block_args.input_filters<floordiv>self._block_args.expand_ratio# number of output channels
final_oup=self._block_args.output_filters<line_sep>self.inp,self.final_oup=inp final_oup<line_sep>self.identity=<false><if_stmt>oup<l>oup/6.<block_start>oup=math.ceil(oup/6.)<line_sep>oup=_make_divisible(oup 16)<block_end>oup=_make_divisible(oup 2)<line_sep>k=self._block_args.kernel_size<line_sep>s=self._block_args.stride[0]<if>isinstance(self._block_args.stride list)<else>self._block_args.stride<if_stmt>apply_ghost# apply repeat scheme
<block_start>self.split_ratio=2<line_sep>self.ghost_idx_inp=inp<floordiv>self.split_ratio<line_sep>self.ghost_idx_oup=int(final_oup-self.ghost_idx_inp)<block_end><else_stmt><block_start>self.ghost_idx_inp=inp<line_sep>self.ghost_idx_oup=final_oup<block_end>self.inp,self.final_oup,self.s=inp final_oup s<line_sep># if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
<if_stmt>self._block_args.expand_ratio<eq>2# self.features = nn.Sequential(
<block_start>self.dwise_conv1=Conv2d(in_channels=inp out_channels=inp kernel_size=k bias=<false> groups=inp)<line_sep>self.bn1=nn.BatchNorm2d(num_features=inp momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.act=Swish()<line_sep>#first linear layer
self.project_layer=Conv2d(in_channels=self.ghost_idx_inp out_channels=oup kernel_size=1 bias=<false>)<line_sep>self.bn2=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># sec linear layer
self.expand_layer=Conv2d(in_channels=oup out_channels=self.ghost_idx_oup kernel_size=1 bias=<false>)<line_sep>self.bn3=nn.BatchNorm2d(num_features=self.ghost_idx_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># Swish(),
# expand layer
self.dwise_conv2=Conv2d(in_channels=final_oup out_channels=final_oup kernel_size=k bias=<false> groups=final_oup stride=s)<line_sep>self.bn4=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># )
<block_end><elif_stmt>inp<ne>final_oup<and>s<eq>1# self.features=nn.Sequential(
<block_start>self.project_layer=Conv2d(in_channels=inp out_channels=oup kernel_size=1 bias=<false>)<line_sep>self.bn2=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># only two linear layers are needed
self.expand_layer=Conv2d(in_channels=oup out_channels=final_oup kernel_size=1 bias=<false> groups=group_1x1)<line_sep>self.bn3=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.act=Swish()<line_sep># )
<block_end><elif_stmt>inp<ne>final_oup<and>s<eq>2# self.features = nn.Sequential(
<block_start>self.project_layer=Conv2d(in_channels=inp out_channels=oup kernel_size=1 bias=<false>)<line_sep>self.bn2=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.expand_layer=Conv2d(in_channels=oup out_channels=final_oup kernel_size=1 bias=<false>)<line_sep>self.bn3=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.act=Swish()<line_sep>self.dwise_conv2=Conv2d(in_channels=final_oup out_channels=final_oup kernel_size=k bias=<false> groups=final_oup stride=s)<line_sep>self.bn4=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># )
<block_end><else_stmt><block_start>self.identity=<true><line_sep># self.features = nn.Sequential(
self.dwise_conv1=Conv2d(in_channels=inp out_channels=inp kernel_size=k bias=<false> groups=inp)<line_sep>self.bn1=nn.BatchNorm2d(num_features=inp momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.act=Swish()<line_sep>self.project_layer=Conv2d(in_channels=self.ghost_idx_inp out_channels=oup kernel_size=1 bias=<false> groups=group_1x1)<line_sep>self.bn2=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.expand_layer=Conv2d(in_channels=oup out_channels=self.ghost_idx_oup kernel_size=1 bias=<false> groups=group_1x1)<line_sep>self.bn3=nn.BatchNorm2d(num_features=self.ghost_idx_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># Swish(),
self.dwise_conv2=Conv2d(in_channels=final_oup out_channels=final_oup kernel_size=k bias=<false> groups=final_oup)<line_sep>self.bn4=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># )
<block_end># Depthwise convolution phase
# self._depthwise_conv = Conv2d(
# in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# kernel_size=k, stride=s, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
# import pdb;pdb.set_trace()
<if_stmt>self.has_se<block_start>se_expand_ratio=0.5<line_sep>num_squeezed_channels=max(1 int(self._block_args.input_filters<times>self._block_args.se_ratio<times>se_expand_ratio))<line_sep># num_squeezed_channels = max(1, int(final_oup / self._block_args.expand_ratio * self._block_args.se_ratio * se_expand_ratio))
self._se_reduce=Conv2d(in_channels=final_oup out_channels=num_squeezed_channels kernel_size=1)<line_sep>self._se_expand=Conv2d(in_channels=num_squeezed_channels out_channels=final_oup kernel_size=1)<block_end># # Output phase
# self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
<block_end><def_stmt>forward self inputs drop_connect_rate=<none><block_start>"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""<line_sep># Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
# x = self.features(inputs)
input_filters,output_filters=self._block_args.input_filters self._block_args.output_filters<if_stmt>self._block_args.expand_ratio<eq>2# first dwise conv
<block_start>x=self.act(self.bn1(self.dwise_conv1(inputs)))<line_sep># first 1x1 conv
ghost_id=x[: self.ghost_idx_inp: : :]<line_sep>x=self.bn2(self.project_layer(x[: :self.ghost_idx_inp : :]))<line_sep># second 1x1 conv
x=self.act(self.bn3(self.expand_layer(x)))<line_sep># generate more features
x=torch.cat([x ghost_id] dim=1)<if_stmt>self.identity<and>self._block_args.stride<eq>1<and>input_filters<eq>output_filters<block_start><if_stmt>drop_connect_rate<block_start>x=drop_connect(x p=drop_connect_rate training=self.training)<block_end><block_end># second dwise conv
x=self.bn4(self.dwise_conv2(x))<block_end><elif_stmt>self.inp<ne>self.final_oup<and>self.s<eq>1# first 1x1 conv
<block_start>x=self.bn2(self.project_layer(inputs))<line_sep># second 1x1 conv
x=self.act(self.bn3(self.expand_layer(x)))<if_stmt>self.identity<and>self._block_args.stride<eq>1<and>input_filters<eq>output_filters<block_start><if_stmt>drop_connect_rate<block_start>x=drop_connect(x p=drop_connect_rate training=self.training)<block_end><block_end><block_end><elif_stmt>self.inp<ne>self.final_oup<and>self.s<eq>2# first 1x1 conv
<block_start>x=self.bn2(self.project_layer(inputs))<line_sep># second 1x1 conv
x=self.act(self.bn3(self.expand_layer(x)))<line_sep># second dwise conv
x=self.bn4(self.dwise_conv2(x))<block_end><else_stmt># first dwise conv
<block_start>x=self.act(self.bn1(self.dwise_conv1(inputs)))<line_sep># first 1x1 conv
ghost_id=x[: self.ghost_idx_inp: : :]<line_sep>x=self.bn2(self.project_layer(x[: :self.ghost_idx_inp : :]))<line_sep># second 1x1 conv
x=self.act(self.bn3(self.expand_layer(x)))<line_sep># second dwise conv
x=torch.cat([x ghost_id] dim=1)<if_stmt>self.identity<and>self._block_args.stride<eq>1<and>input_filters<eq>output_filters<block_start><if_stmt>drop_connect_rate<block_start>x=drop_connect(x p=drop_connect_rate training=self.training)<block_end><block_end>x=self.bn4(self.dwise_conv2(x))<block_end># Squeeze and Excitation
<if_stmt>self.has_se<block_start>x_squeezed=F.adaptive_avg_pool2d(x 1)<line_sep>x_squeezed=self._se_expand(relu_fn(self._se_reduce(x_squeezed)))<line_sep>x=torch.sigmoid(x_squeezed)<times>x<block_end># Skip connection and drop connect
input_filters,output_filters=self._block_args.input_filters self._block_args.output_filters<if_stmt>self.identity<and>self._block_args.stride<eq>1<and>input_filters<eq>output_filters# if drop_connect_rate:
# x = drop_connect(x, p=drop_connect_rate, training=self.training)
<block_start>x=x+inputs# skip connection
<block_end><return>x<block_end><block_end><class_stmt>NESI2RBlock(nn.Module)<block_start>"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""<def_stmt>__init__ self block_args global_params<block_start>super().__init__()<line_sep>self._block_args=block_args<line_sep>self._bn_mom=1-global_params.batch_norm_momentum<line_sep>self._bn_eps=global_params.batch_norm_epsilon<line_sep>self.has_se=(self._block_args.se_ratio<is><not><none>)<and>(0<l>self._block_args.se_ratio<le>1)<line_sep>self.id_skip=block_args.id_skip# skip connection and drop connect
group_1x1=1<line_sep># Get static or dynamic convolution depending on image size
Conv2d=get_same_padding_conv2d(image_size=global_params.image_size)<line_sep># Conv2d = nn.Conv2d
# Expansion phase
inp=self._block_args.input_filters# number of input channels
oup=self._block_args.input_filters<floordiv>self._block_args.expand_ratio# number of output channels
final_oup=self._block_args.output_filters<line_sep>self.inp,self.final_oup=inp final_oup<line_sep>self.identity=<false><if_stmt>oup<l>oup/6.<block_start>oup=math.ceil(oup/6.)<line_sep>oup=_make_divisible(oup 16)<block_end>oup=_make_divisible(oup 2)<line_sep>k=self._block_args.kernel_size<line_sep>s=self._block_args.stride[0]<if>isinstance(self._block_args.stride list)<else>self._block_args.stride<line_sep># apply repeat scheme
self.split_ratio=2<line_sep>self.nes_idx_inp=inp<floordiv>self.split_ratio<line_sep>self.nes_idx_oup=final_oup<floordiv>self.split_ratio<line_sep>self.inp,self.final_oup,self.s=inp final_oup s<line_sep># if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
<if_stmt>self._block_args.expand_ratio<eq>2# self.features = nn.Sequential(
<block_start>self.dwise_conv1=Conv2d(in_channels=inp out_channels=inp kernel_size=k bias=<false> groups=inp)<line_sep>self.bn1=nn.BatchNorm2d(num_features=inp momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.act=Swish()<line_sep>#first linear layer
self.project_layer=Conv2d(in_channels=self.nes_idx_inp out_channels=oup kernel_size=1 bias=<false>)<line_sep>self.bn2=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># sec linear layer
self.expand_layer=Conv2d(in_channels=oup out_channels=self.nes_idx_oup kernel_size=1 bias=<false>)<line_sep>self.bn3=nn.BatchNorm2d(num_features=self.nes_idx_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># Swish(),
# expand layer
self.dwise_conv2=Conv2d(in_channels=final_oup out_channels=final_oup kernel_size=k bias=<false> groups=final_oup stride=s)<line_sep>self.bn4=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># )
<block_end><elif_stmt>inp<ne>final_oup<and>s<eq>1# self.features=nn.Sequential(
<block_start>self.project_layer=Conv2d(in_channels=inp out_channels=oup kernel_size=1 bias=<false>)<line_sep>self.bn2=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># only two linear layers are needed
self.expand_layer=Conv2d(in_channels=oup out_channels=final_oup kernel_size=1 bias=<false> groups=group_1x1)<line_sep>self.bn3=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.act=Swish()<line_sep># )
<block_end><elif_stmt>inp<ne>final_oup<and>s<eq>2# self.features = nn.Sequential(
<block_start>self.project_layer=Conv2d(in_channels=inp out_channels=oup kernel_size=1 bias=<false>)<line_sep>self.bn2=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.expand_layer=Conv2d(in_channels=oup out_channels=final_oup kernel_size=1 bias=<false>)<line_sep>self.bn3=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.act=Swish()<line_sep>self.dwise_conv2=Conv2d(in_channels=final_oup out_channels=final_oup kernel_size=k bias=<false> groups=final_oup stride=s)<line_sep>self.bn4=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># )
<block_end><else_stmt><block_start>self.identity=<true><line_sep># self.features = nn.Sequential(
self.dwise_conv1=Conv2d(in_channels=inp out_channels=inp kernel_size=k bias=<false> groups=inp)<line_sep>self.bn1=nn.BatchNorm2d(num_features=inp momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.act=Swish()<line_sep>self.project_layer=Conv2d(in_channels=self.nes_idx_inp out_channels=oup kernel_size=1 bias=<false> groups=group_1x1)<line_sep>self.bn2=nn.BatchNorm2d(num_features=oup momentum=self._bn_mom eps=self._bn_eps)<line_sep>self.expand_layer=Conv2d(in_channels=oup out_channels=self.nes_idx_oup kernel_size=1 bias=<false> groups=group_1x1)<line_sep>self.bn3=nn.BatchNorm2d(num_features=self.nes_idx_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># Swish(),
self.dwise_conv2=Conv2d(in_channels=final_oup out_channels=final_oup kernel_size=k bias=<false> groups=final_oup)<line_sep>self.bn4=nn.BatchNorm2d(num_features=final_oup momentum=self._bn_mom eps=self._bn_eps)<line_sep># )
<block_end># Depthwise convolution phase
# self._depthwise_conv = Conv2d(
# in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# kernel_size=k, stride=s, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
# import pdb;pdb.set_trace()
<if_stmt>self.has_se<block_start>se_expand_ratio=0.5<line_sep>num_squeezed_channels=max(1 int(self._block_args.input_filters<times>self._block_args.se_ratio<times>se_expand_ratio))<line_sep># num_squeezed_channels = max(1, int(final_oup / self._block_args.expand_ratio * self._block_args.se_ratio * se_expand_ratio))
self._se_reduce=Conv2d(in_channels=final_oup out_channels=num_squeezed_channels kernel_size=1)<line_sep>self._se_expand=Conv2d(in_channels=num_squeezed_channels out_channels=final_oup kernel_size=1)<block_end># # Output phase
# self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
<block_end><def_stmt>forward self inputs drop_connect_rate=<none><block_start>"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""<line_sep># Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
# x = self.features(inputs)
<if_stmt>self._block_args.expand_ratio<eq>2# first dwise conv
<block_start>x=self.act(self.bn1(self.dwise_conv1(inputs)))<line_sep># first 1x1 conv
nes_x=x[: :self.nes_idx_inp : :]+x[: self.nes_idx_inp: : :]<line_sep>x=self.bn2(self.project_layer(nes_x))<line_sep># second 1x1 conv
x=self.act(self.bn3(self.expand_layer(x)))<line_sep># generate more features
x=torch.cat([x x] dim=1)<line_sep># second dwise conv
x=self.bn4(self.dwise_conv2(x))<block_end><elif_stmt>self.inp<ne>self.final_oup<and>self.s<eq>1# first 1x1 conv
<block_start>x=self.bn2(self.project_layer(inputs))<line_sep># second 1x1 conv
x=self.act(self.bn3(self.expand_layer(x)))<block_end><elif_stmt>self.inp<ne>self.final_oup<and>self.s<eq>2# first 1x1 conv
<block_start>x=self.bn2(self.project_layer(inputs))<line_sep># second 1x1 conv
x=self.act(self.bn3(self.expand_layer(x)))<line_sep># second dwise conv
x=self.bn4(self.dwise_conv2(x))<block_end><else_stmt># first dwise conv
<block_start>x=self.act(self.bn1(self.dwise_conv1(inputs)))<line_sep># first 1x1 conv
nes_x=x[: :self.nes_idx_inp : :]+x[: self.nes_idx_inp: : :]<line_sep>x=self.bn2(self.project_layer(nes_x))<line_sep># second 1x1 conv
x=self.act(self.bn3(self.expand_layer(x)))<line_sep># second dwise conv
x=torch.cat([x x] dim=1)<line_sep>x=self.bn4(self.dwise_conv2(x))<block_end># Squeeze and Excitation
<if_stmt>self.has_se<block_start>x_squeezed=F.adaptive_avg_pool2d(x 1)<line_sep>x_squeezed=self._se_expand(relu_fn(self._se_reduce(x_squeezed)))<line_sep>x=torch.sigmoid(x_squeezed)<times>x<block_end># Skip connection and drop connect
input_filters,output_filters=self._block_args.input_filters self._block_args.output_filters<if_stmt>self.identity<and>self._block_args.stride<eq>1<and>input_filters<eq>output_filters<block_start><if_stmt>drop_connect_rate<block_start>x=drop_connect(x p=drop_connect_rate training=self.training)<block_end>x=x+inputs# skip connection
<block_end><return>x<block_end><block_end><class_stmt>EfficientNet(nn.Module)<block_start>"""
An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained methods
Args:
blocks_args (list): A list of BlockArgs to construct blocks
global_params (namedtuple): A set of GlobalParams shared between blocks
Example:
model = EfficientNet.from_pretrained('efficientnet-b0')
"""<def_stmt>__init__ self blocks_args=<none> global_params=<none><block_start>super().__init__()<assert_stmt>isinstance(blocks_args list) 'blocks_args should be a list'<assert_stmt>len(blocks_args)<g>0 'block args must be greater than 0'<line_sep>self._global_params=global_params<line_sep>self._blocks_args=blocks_args<line_sep># Get static or dynamic convolution depending on image size
Conv2d=get_same_padding_conv2d(image_size=global_params.image_size)<line_sep># Conv2d = nn.Conv2d
# Batch norm parameters
bn_mom=1-self._global_params.batch_norm_momentum<line_sep>bn_eps=self._global_params.batch_norm_epsilon<line_sep># Stem
in_channels=3# rgb
# NOTE change first filter to be 16 to follow MOBILENETV3
# NOTE change back to 32 for efficientnet series
out_channels=round_filters(32 self._global_params)# number of output channels
self._conv_stem=Conv2d(in_channels out_channels kernel_size=3 stride=2 bias=<false>)<line_sep>self._bn0=nn.BatchNorm2d(num_features=out_channels momentum=bn_mom eps=bn_eps)<line_sep># build_block = NESI2RBlock
build_block=GhostI2RBlock<line_sep># build_block = GhostI2RBlock_change_droppath_pos
# build_block = MBConvBlockV1
# build_block = I2RConvBlock
# Build blocks
self._blocks=nn.ModuleList([])<for_stmt>block_args self._blocks_args# Update block input and output filters based on depth multiplier.
<block_start>block_args=block_args._replace(input_filters=round_filters(block_args.input_filters self._global_params) output_filters=round_filters(block_args.output_filters self._global_params) num_repeat=round_repeats(block_args.num_repeat self._global_params))<line_sep># The first block needs to take care of stride and filter size increase.
self._blocks.append(build_block(block_args self._global_params))<if_stmt>block_args.num_repeat<g>1<block_start>block_args=block_args._replace(input_filters=block_args.output_filters stride=1)<block_end><for_stmt>_ range(block_args.num_repeat-1)<block_start>self._blocks.append(build_block(block_args self._global_params))<block_end><block_end># Head
in_channels=block_args.output_filters# output of final block
out_channels=round_filters(1280 self._global_params)<line_sep># self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self._bn1=nn.BatchNorm2d(num_features=out_channels momentum=bn_mom eps=bn_eps)<line_sep># Final linear layer
self._dropout=self._global_params.dropout_rate<line_sep>self._fc=nn.Linear(out_channels self._global_params.num_classes)<block_end><def_stmt>extract_features self inputs<block_start>""" Returns output of the final convolution layer """<line_sep># Stem
x=relu_fn(self._bn0(self._conv_stem(inputs)))<line_sep># Blocks
<for_stmt>idx,block enumerate(self._blocks)<block_start>drop_connect_rate=self._global_params.drop_connect_rate<if_stmt>drop_connect_rate<block_start>drop_connect_rate<augmul>float(idx)/len(self._blocks)<block_end>x=block(x drop_connect_rate=drop_connect_rate)<block_end># Head
# x = relu_fn(self._bn1(self._conv_head(x)))
<return>x<block_end><def_stmt>forward self inputs<block_start>""" Calls extract_features to extract features, applies final linear layer, and returns logits. """<line_sep># Convolution layers
x=self.extract_features(inputs)<line_sep># Pooling and final linear layer
x=F.adaptive_avg_pool2d(x 1).squeeze(-1).squeeze(-1)<if_stmt>self._dropout<block_start>x=F.dropout(x p=self._dropout training=self.training)<block_end>x=self._fc(x)<line_sep><return>x<block_end>@classmethod<def_stmt>from_name cls model_name override_params=<none><block_start>cls._check_model_name_is_valid(model_name)<line_sep>blocks_args,global_params=get_model_params(model_name override_params)<line_sep><return>EfficientNet(blocks_args global_params)<block_end>@classmethod<def_stmt>from_pretrained cls model_name num_classes=1000<block_start>model=EfficientNet.from_name(model_name override_params={'num_classes':num_classes})<line_sep>load_pretrained_weights(model model_name load_fc=(num_classes<eq>1000))<line_sep><return>model<block_end>@classmethod<def_stmt>get_image_size cls model_name<block_start>cls._check_model_name_is_valid(model_name)<line_sep>_,_,res,_=efficientnet_params(model_name)<line_sep><return>res<block_end>@classmethod<def_stmt>_check_model_name_is_valid cls model_name also_need_pretrained_weights=<false><block_start>""" Validates model name. None that pretrained weights are only available for
the first four models (efficientnet-b{i} for i in 0,1,2,3) at the moment. """<line_sep>num_models=4<if>also_need_pretrained_weights<else>8<line_sep>valid_models=['efficientnet_b'+str(i)<for>i range(num_models)]+['i2rnet_b'+str(i)<for>i range(num_models)]+['mnext_l' 'mnext_s' ' mnext_mbv2_cfg']<if_stmt>model_name.replace('-' '_')<not><in>valid_models<block_start><raise>ValueError('model_name should be one of: '+', '.join(valid_models))<block_end><block_end><block_end><def_stmt>efficient_i2rnet progress=<none> width_mult=1 rm_1x1=<none> interpolation=<none> group_1x1=<none><block_start><return>EfficientNet.from_name('efficientnet-b0')<block_end># class I2RConvBlock_half_id(nn.Module):
# """
# Mobile Inverted Residual Bottleneck Block
# Args:
# block_args (namedtuple): BlockArgs, see above
# global_params (namedtuple): GlobalParam, see above
# Attributes:
# has_se (bool): Whether the block contains a Squeeze and Excitation layer.
# """
# def __init__(self, block_args, global_params):
# super().__init__()
# self._block_args = block_args
# self._bn_mom = 1 - global_params.batch_norm_momentum
# self._bn_eps = global_params.batch_norm_epsilon
# self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
# self.id_skip = block_args.id_skip # skip connection and drop connect
# # Get static or dynamic convolution depending on image size
# Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# # Conv2d = nn.Conv2d
# padding = self._block_args.kernel_size //2
# # Expansion phase
# inp = self._block_args.input_filters # number of input channels
# oup = self._block_args.input_filters // self._block_args.expand_ratio # number of output channels
# final_oup = self._block_args.output_filters
# self.inp, self.final_oup = inp, final_oup
# self.identity = False
# if oup < oup / 6.:
# oup = math.ceil(oup / 6.)
# oup = _make_divisible(oup,16)
# k = self._block_args.kernel_size
# s = self._block_args.stride[0] if isinstance(self._block_args.stride,list) else self._block_args.stride
# if self._block_args.expand_ratio == 2:
# self._project_conv = Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups=inp)
# self._bn0 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
# self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._expand_conv = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False,
# stride = s, groups = final_oup)
# self._bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# elif inp != final_oup and s == 1:
# self._project_conv = None
# self._expand_conv = None
# self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# elif inp != final_oup and s == 2:
# self._project_conv = None
# self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._expand_conv = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False,
# stride = s, groups = final_oup)
# self._bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# else:
# # if inp == final_oup:
# self._project_conv = Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups = inp)
# self._bn0 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
# self._expand_conv = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup)
# self._bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# # if not (self._block_args.expand_ratio == 2):
# self.identity = True
# self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps) # Depthwise convolution phase
# # self._depthwise_conv = Conv2d(
# # in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# # kernel_size=k, stride=s, bias=False)
# # self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# # Squeeze and Excitation layer, if desired
# if self.has_se:
# num_squeezed_channels = max(1, int(final_oup / self._block_args.expand_ratio * self._block_args.se_ratio))
# self._se_reduce = Conv2d(in_channels=final_oup, out_channels=num_squeezed_channels, kernel_size=1)
# self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=final_oup, kernel_size=1)
# # # Output phase
# # self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# # self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# def forward(self, inputs, drop_connect_rate=None):
# """
# :param inputs: input tensor
# :param drop_connect_rate: drop connect rate (float, between 0 and 1)
# :return: output of block
# """
# # Expansion and Depthwise Convolution
# # import pdb;pdb.set_trace()
# x = inputs
# # NOTE:remove the first 3x3 conv to reduce running mem, need to verfy the performance
# if self._project_conv is not None:
# x = relu_fn(self._bn0(self._project_conv(inputs)))
# x = self._bn1(self._linear1(x))
# x = relu_fn(self._bn2(self._linear2(x)))
# if self._expand_conv is not None:
# x = self._bn3(self._expand_conv(x))
# # Squeeze and Excitation
# if self.has_se:
# x_squeezed = F.adaptive_avg_pool2d(x, 1)
# x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
# x = torch.sigmoid(x_squeezed) * x
# # Skip connection and drop connect
# input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
# if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
# if drop_connect_rate:
# x = drop_connect(x, p=drop_connect_rate, training=self.training)
# shape = inputs.shape
# # shape[1] = shape[1]//2
# id_tensor = torch.cat([inputs[:,:shape[1]//2,:,:],torch.zeros(shape)[:,shape[1]//2:,:,:].cuda()],dim=1)
# x = x + id_tensor
# # import pdb;pdb.set_trace()
# # x = x + inputs # skip connection
# return x
|
<import_from_stmt>minos.cqrs CommandService <import_from_stmt>minos.networks Request Response ResponseException enroute <import_from_stmt>..aggregates PaymentAggregate <class_stmt>PaymentCommandService(CommandService)<block_start>"""PaymentCommandService class."""<def_stmt>validate_card self card_number:str<arrow>bool<block_start><def_stmt>digits_of n<block_start><return>[int(d)<for>d str(n)]<block_end>digits=digits_of(card_number)<line_sep>odd_digits=digits[-1::-2]<line_sep>even_digits=digits[-2::-2]<line_sep>checksum=0<line_sep>checksum<augadd>sum(odd_digits)<for_stmt>d even_digits<block_start>checksum<augadd>sum(digits_of(d<times>2))<block_end>return_value=checksum%10<if_stmt>return_value<eq>0<block_start><return><true><block_end><return><false><block_end>@enroute.broker.command("CreatePayment")<async_keyword><def_stmt>create_payment self request:Request<arrow>Response<block_start>"""Create a new ``Payment`` instance.
:param request: The ``Request`` instance.
:return: A ``Response`` instance.
"""<try_stmt><block_start>content=<await>request.content()<if_stmt>self.validate_card(content["card_number"])<block_start>payment=<await>PaymentAggregate.create(content["card_number"] content["validity"] content["security_code"] content["name"] content["surname"] )<block_end><return>Response({"status":"payment accepted"})<block_end><except_stmt>Exception<as>exc<block_start><raise>ResponseException(f"An error occurred during Payment creation: {exc}")<block_end><block_end><block_end> |
# train.py
# Source: https://github.com/DrGFreeman/rps-cv
#
# MIT License
#
# Copyright (c) 2017-2019 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This script reads the pre-processed image data and trains the image
# classifier. The trained classifier is stored in a .pkl (pickle) file.
<import_stmt>sys<import_stmt>numpy<as>np<line_sep># Settings:
# Random State
rs=42<line_sep># Classifier output .pkl filename
pklFilename='clf.pkl'<line_sep># Number of folds of Stratified KFold cross-validation
n_splits=5<line_sep># Grid Search parameters
pca__n_components=[40]# Number of components of Principal Component Analysis
clf__gamma=np.logspace(-4 -2 3)# [.0001, .001, .01]
clf__C=np.logspace(0 2 3)# [1, 10, 100]
scoring='f1_micro'<line_sep># The n_jobs parameter controls the number of CPU cores to use in parallel for
# training the machine learning model. Training with a higher number of cores
# will result in faster training time but uses more memory.
#
# If training on a Raspberry Pi 3B or 3B+, due to the limited available memory,
# the following values are recommended as function of the total number of images
# available.
#
# Less than ~700 images: n_jobs=1 (training time* ~35 minutes)
# Less than ~450 images: n_jobs=2 (training time* ~10 minutes)
# Less than ~350 images: n_jobs=3 (training time* ~7 minutes)
# Less than ~280 images: n_jobs=4 (-1) (training time* ~5 minutes)
# * Training times estimates are based on a total of 9 grid-search combinations
# performed on a Raspberry Pi 3 model B+.
#
# NOTE: Ensure the Raspberry Pi has adequate cooling if running on multiple
# CPU cores for extended periods.
#
# If training on a PC with 8+Gb of memory, the n_jobs parameter can be set to
# -1 which will use all available CPU cores. If you run out of memory due to a
# large number of images, reduce the number of CPU cores by ajusting n_jobs.
n_jobs=-1<def_stmt>train nbImg=0 cvScore=<true><block_start><import_stmt>time<line_sep>t0=time.time()<def_stmt>dt <block_start><return>round(time.time()-t0 2)<block_end>print('+{}s: Importing libraries'.format(dt()))<import_stmt>pickle<import_from_stmt>sklearn.pipeline Pipeline<import_from_stmt>sklearn.decomposition PCA<import_from_stmt>sklearn.model_selection StratifiedShuffleSplit<import_from_stmt>sklearn.model_selection StratifiedKFold<import_from_stmt>sklearn.model_selection GridSearchCV<import_from_stmt>sklearn.svm SVC<import_from_stmt>sklearn.metrics f1_score<import_from_stmt>sklearn.metrics confusion_matrix<import_from_stmt>sklearn.metrics classification_report<import_from_stmt>rpscv imgproc<as>imp<import_from_stmt>rpscv utils<line_sep># Generate image data from stored images
print('+{}s: Generating image data'.format(dt()))<line_sep>features,labels=imp.generateGrayFeatures(nbImg=nbImg verbose=<false> rs=rs)<line_sep>unique,count=np.unique(labels return_counts=<true>)<line_sep># Print the number of traning images for each label
<for_stmt>i,label enumerate(unique)<block_start>print(' {}: {} images'.format(utils.gestureTxt[label] count[i]))<block_end># Generate test set
print('+{}s: Generating test set'.format(dt()))<line_sep>sssplit=StratifiedShuffleSplit(n_splits=1 test_size=.15 random_state=rs)<for_stmt>train_index,test_index sssplit.split(features labels)<block_start>features_train=features[train_index]<line_sep>features_test=features[test_index]<line_sep>labels_train=labels[train_index]<line_sep>labels_test=labels[test_index]<block_end># Define pipeline parameters
print('+{}s: Defining pipeline'.format(dt()))<line_sep>steps=[('pca' PCA()) ('clf' SVC(kernel='rbf'))]<line_sep>pipe=Pipeline(steps)<line_sep># Define cross-validation parameters
print('+{}s: Defining cross-validation'.format(dt()))<line_sep>cv=StratifiedKFold(n_splits=n_splits shuffle=<true> random_state=rs)<line_sep># Define grid-search parameters
print('+{}s: Defining grid search'.format(dt()))<line_sep>grid_params=dict(pca__n_components=pca__n_components clf__gamma=clf__gamma clf__C=clf__C)<line_sep>grid=GridSearchCV(pipe grid_params scoring=scoring n_jobs=n_jobs refit=<true> cv=cv verbose=1)<line_sep>print('Grid search parameters:')<line_sep>print(grid)<line_sep># Fit the classifier
t0_train=time.time()<line_sep>print('+{}s: Fitting classifier'.format(dt()))<line_sep>grid.fit(features_train labels_train)<line_sep>dt_train=time.time()-t0_train<if_stmt>cvScore# Print the results of the grid search cross-validation
<block_start>cvres=grid.cv_results_<line_sep>print('Cross-validation results:')<for_stmt>score,std,params zip(cvres['mean_test_score'] cvres['std_test_score'] cvres['params'])<block_start>print(' {}, {}, {}'.format(round(score 4) round(std 5) params))<block_end><block_end># Print the best score and best parameters from the grid-search
print('Grid search best score: {}'.format(grid.best_score_))<line_sep>print('Grid search best parameters:')<for_stmt>key,value grid.best_params_.items()<block_start>print(' {}: {}'.format(key value))<block_end># Validate classifier on test set
print('+{}s: Validating classifier on test set'.format(dt()))<line_sep>pred=grid.predict(features_test)<line_sep>score=f1_score(labels_test pred average='micro')<line_sep>print('Classifier f1-score on test set: {}'.format(score))<line_sep>print('Confusion matrix:')<line_sep>print(confusion_matrix(labels_test pred))<line_sep>print('Classification report:')<line_sep>tn=[utils.gestureTxt[i]<for>i range(3)]<line_sep>print(classification_report(labels_test pred target_names=tn))<line_sep># Write classifier to a .pkl file
print('+{}s: Writing classifier to {}'.format(dt() pklFilename))<with_stmt>open(pklFilename 'wb')<as>f<block_start>f.flush()<line_sep>pickle.dump(grid f)<block_end>print('+{}s: Done!'.format(dt()))<line_sep><return>grid.best_score_ score dt_train<block_end><if_stmt>__name__<eq>'__main__'# Read command line arguments
<block_start>argv=sys.argv<line_sep>cvScore=<true><if_stmt>len(sys.argv)<g>1<block_start><for_stmt>arg argv[1:]<block_start><if_stmt>arg<eq>'--no-cv-score'<block_start>cvScore=<false><block_end><block_end><block_end>train(cvScore=cvScore)<block_end> |
"""Can message."""<import_from_future_stmt> annotations<import_from_stmt>dataclasses dataclass<import_from_stmt>.arbitration_id ArbitrationId<line_sep>@dataclass(frozen=<true>)<class_stmt>CanMessage<block_start>"""A can message."""<line_sep>arbitration_id:ArbitrationId<line_sep>data:bytes<block_end> |
<import_from_stmt>django.db models<class_stmt>Mod(models.Model)<block_start>fld=models.IntegerField()<block_end><class_stmt>SubMod(Mod)<block_start>cnt=models.IntegerField(unique=<true>)<block_end><class_stmt>M2mA(models.Model)<block_start>others=models.ManyToManyField('M2mB')<block_end><class_stmt>M2mB(models.Model)<block_start>fld=models.IntegerField()<block_end> |
<class_stmt>ResultSet(list)<block_start>"""A list like object that holds results from a Unsplash API query."""<block_end><class_stmt>Model(object)<block_start><def_stmt>__init__ self **kwargs<block_start>self._repr_values=["id"]<block_end>@classmethod<def_stmt>parse cls data<block_start>"""Parse a JSON object into a model instance."""<line_sep><raise>NotImplementedError<block_end>@classmethod<def_stmt>parse_list cls data<block_start>"""Parse a list of JSON objects into a result set of model instances."""<line_sep>results=ResultSet()<line_sep>data=data<or>[]<for_stmt>obj data<block_start><if_stmt>obj<block_start>results.append(cls.parse(obj))<block_end><block_end><return>results<block_end><def_stmt>__repr__ self<block_start>items=filter(<lambda>x:x[0]<in>self._repr_values vars(self).items())<line_sep>state=['%s=%s'%(k repr(v))<for>(k v) items]<line_sep><return>'%s(%s)'%(self.__class__.__name__ ', '.join(state))<block_end><block_end><class_stmt>Photo(Model)<block_start>@classmethod<def_stmt>parse cls data<block_start>data=data<or>{}<line_sep>photo=cls()<if>data<else><none><for_stmt>key,value data.items()<block_start><if_stmt><not>value<block_start>setattr(photo key value)<line_sep><continue><block_end><if_stmt>key<eq>"user"<block_start>user=User.parse(value)<line_sep>setattr(photo key user)<block_end><elif_stmt>key<eq>"exif"<block_start>exif=Exif.parse(value)<line_sep>setattr(photo key exif)<block_end><elif_stmt>key<in>["urls" "links"]<block_start>link=Link.parse(value)<line_sep>setattr(photo key link)<block_end><elif_stmt>key<eq>"location"<block_start>location=Location.parse(value)<line_sep>setattr(photo key location)<block_end><else_stmt><block_start>setattr(photo key value)<block_end><block_end><return>photo<block_end><block_end><class_stmt>Exif(Model)<block_start><def_stmt>__init__ self **kwargs<block_start>super(Exif self).__init__(**kwargs)<line_sep>self._repr_values=["make" "model"]<block_end>@classmethod<def_stmt>parse cls data<block_start>data=data<or>{}<line_sep>exif=cls()<if>data<else><none><for_stmt>key,value data.items()<block_start>setattr(exif key value)<block_end><return>exif<block_end><block_end><class_stmt>Link(Model)<block_start><def_stmt>__init__ self **kwargs<block_start>super(Link self).__init__(**kwargs)<line_sep>self._repr_values=["html" "raw" "url"]<block_end>@classmethod<def_stmt>parse cls data<block_start>data=data<or>{}<line_sep>link=cls()<if>data<else><none><for_stmt>key,value data.items()<block_start>setattr(link key value)<block_end><return>link<block_end><block_end><class_stmt>Location(Model)<block_start><def_stmt>__init__ self **kwargs<block_start>super(Location self).__init__(**kwargs)<line_sep>self._repr_values=["title"]<block_end>@classmethod<def_stmt>parse cls data<block_start>data=data<or>{}<line_sep>location=cls()<if>data<else><none><for_stmt>key,value data.items()<block_start>setattr(location key value)<block_end><return>location<block_end><block_end><class_stmt>User(Model)<block_start><def_stmt>__init__ self **kwargs<block_start>super(User self).__init__(**kwargs)<line_sep>self._repr_values=["id" "name" "username"]<block_end>@classmethod<def_stmt>parse cls data<block_start>data=data<or>{}<line_sep>user=cls()<if>data<else><none><for_stmt>key,value data.items()<block_start><if_stmt><not>value<block_start>setattr(user key value)<line_sep><continue><block_end><if_stmt>key<in>["links" "profile_image"]<block_start>link=Link.parse(value)<line_sep>setattr(user key link)<block_end><elif_stmt>key<eq>"photos"<block_start>photo=Photo.parse_list(value)<line_sep>setattr(user key photo)<block_end><else_stmt><block_start>setattr(user key value)<block_end><block_end><return>user<block_end><block_end><class_stmt>Stat(Model)<block_start><def_stmt>__init__ self **kwargs<block_start>super(Stat self).__init__(**kwargs)<line_sep>self._repr_values=["total_photos" "photo_downloads"]<block_end>@classmethod<def_stmt>parse cls data<block_start>data=data<or>{}<line_sep>stat=cls()<if>data<else><none><for_stmt>key,value data.items()<block_start><if_stmt><not>value<block_start>setattr(stat key value)<line_sep><continue><block_end><if_stmt>key<eq>"links"<block_start>link=Link.parse(value)<line_sep>setattr(stat key link)<block_end><else_stmt><block_start>setattr(stat key value)<block_end><block_end><return>stat<block_end><block_end><class_stmt>Collection(Model)<block_start><def_stmt>__init__ self **kwargs<block_start>super(Collection self).__init__(**kwargs)<line_sep>self._repr_values=["id" "title"]<block_end>@classmethod<def_stmt>parse cls data<block_start>data=data<or>{}<line_sep>collection=cls()<if>data<else><none><for_stmt>key,value data.items()<block_start><if_stmt><not>value<block_start>setattr(collection key value)<line_sep><continue><block_end><if_stmt>key<eq>"cover_photo"<block_start>photo=Photo.parse(value)<line_sep>setattr(collection key photo)<block_end><elif_stmt>key<eq>"user"<block_start>user=User.parse(value)<line_sep>setattr(collection key user)<block_end><elif_stmt>key<eq>"links"<block_start>link=Link.parse(value)<line_sep>setattr(collection key link)<block_end><else_stmt><block_start>setattr(collection key value)<block_end><block_end><return>collection<block_end><block_end> |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the arm_allinea_studio module"""<import_from_future_stmt> unicode_literals<import_from_future_stmt> print_function<import_stmt>logging# pylint: disable=unused-import
<import_stmt>unittest<import_from_stmt>helpers aarch64 centos centos8 docker thunderx2 ubuntu20 ubuntu<import_from_stmt>hpccm.building_blocks.arm_allinea_studio arm_allinea_studio<class_stmt>Test_arm_allinea_studio(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>"""Disable logging output messages"""<line_sep>logging.disable(logging.ERROR)<block_end>@aarch64@ubuntu20@docker<def_stmt>test_defaults_ubuntu self<block_start>"""Default arm_allinea_studio building block"""<line_sep>a=arm_allinea_studio(eula=<true>)<line_sep>self.assertEqual(str(a) r'''# Arm Allinea Studio version 21.1
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
libc6-dev \
lmod \
python \
tar \
tcl \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://developer.arm.com/-/media/Files/downloads/hpc/arm-allinea-studio/21-1/ACfL/arm-compiler-for-linux_21.1_Ubuntu-20.04_aarch64.tar && \
mkdir -p /var/tmp && tar -x -f /var/tmp/arm-compiler-for-linux_21.1_Ubuntu-20.04_aarch64.tar -C /var/tmp && \
cd /var/tmp/arm-compiler-for-linux_21.1_Ubuntu-20.04 && ./arm-compiler-for-linux_21.1_Ubuntu-20.04.sh --install-to /opt/arm --accept && \
rm -rf /var/tmp/arm-compiler-for-linux_21.1_Ubuntu-20.04_aarch64.tar /var/tmp/arm-compiler-for-linux_21.1_Ubuntu-20.04
ENV MODULEPATH=/opt/arm/modulefiles:$MODULEPATH''')<block_end>@aarch64@centos@docker<def_stmt>test_defaults_centos self<block_start>"""Default arm_allinea_studio building block"""<line_sep>a=arm_allinea_studio(eula=<true>)<line_sep>self.assertEqual(str(a) r'''# Arm Allinea Studio version 21.1
RUN yum install -y epel-release && \
yum install -y \
Lmod \
glibc-devel \
tar \
wget && \
rm -rf /var/cache/yum/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://developer.arm.com/-/media/Files/downloads/hpc/arm-allinea-studio/21-1/ACfL/arm-compiler-for-linux_21.1_RHEL-7_aarch64.tar && \
mkdir -p /var/tmp && tar -x -f /var/tmp/arm-compiler-for-linux_21.1_RHEL-7_aarch64.tar -C /var/tmp && \
cd /var/tmp/arm-compiler-for-linux_21.1_RHEL-7 && ./arm-compiler-for-linux_21.1_RHEL-7.sh --install-to /opt/arm --accept && \
rm -rf /var/tmp/arm-compiler-for-linux_21.1_RHEL-7_aarch64.tar /var/tmp/arm-compiler-for-linux_21.1_RHEL-7
ENV MODULEPATH=/opt/arm/modulefiles:$MODULEPATH''')<block_end>@aarch64@centos8@docker<def_stmt>test_thunderx2_centos8 self<block_start>"""Default arm_allinea_studio building block"""<line_sep>a=arm_allinea_studio(eula=<true> version='20.3' microarchitectures=['generic' 'thunderx2t99'])<line_sep>self.assertEqual(str(a) r'''# Arm Allinea Studio version 20.3
RUN yum install -y epel-release && \
yum install -y \
Lmod \
glibc-devel \
tar \
wget && \
rm -rf /var/cache/yum/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://developer.arm.com/-/media/Files/downloads/hpc/arm-allinea-studio/20-3/RHEL8/arm-compiler-for-linux_20.3_RHEL-8_aarch64.tar && \
mkdir -p /var/tmp && tar -x -f /var/tmp/arm-compiler-for-linux_20.3_RHEL-8_aarch64.tar -C /var/tmp && \
cd /var/tmp/arm-compiler-for-linux_20.3_RHEL-8_aarch64 && ./arm-compiler-for-linux_20.3_RHEL-8.sh --install-to /opt/arm --accept --only-install-microarchitectures=generic,thunderx2t99 && \
rm -rf /var/tmp/arm-compiler-for-linux_20.3_RHEL-8_aarch64.tar /var/tmp/arm-compiler-for-linux_20.3_RHEL-8_aarch64
ENV MODULEPATH=/opt/arm/modulefiles:$MODULEPATH''')<block_end>@aarch64@ubuntu@docker<def_stmt>test_eula self<block_start>"""Decline EULA"""<with_stmt>self.assertRaises(RuntimeError)<block_start>a=arm_allinea_studio(eula=<false>)<line_sep>str(a)<block_end><block_end>@aarch64@ubuntu@docker<def_stmt>test_tarball self<block_start>"""tarball"""<line_sep>a=arm_allinea_studio(eula=<true> tarball='arm-compiler-for-linux_21.1_Ubuntu-18.04_aarch64.tar')<line_sep>self.assertEqual(str(a) r'''# Arm Allinea Studio version 21.1
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
libc6-dev \
lmod \
python \
tar \
tcl \
wget && \
rm -rf /var/lib/apt/lists/*
COPY arm-compiler-for-linux_21.1_Ubuntu-18.04_aarch64.tar /var/tmp
RUN mkdir -p /var/tmp && tar -x -f /var/tmp/arm-compiler-for-linux_21.1_Ubuntu-18.04_aarch64.tar -C /var/tmp && \
cd /var/tmp/arm-compiler-for-linux_21.1_Ubuntu-18.04 && ./arm-compiler-for-linux_21.1_Ubuntu-18.04.sh --install-to /opt/arm --accept && \
rm -rf /var/tmp/arm-compiler-for-linux_21.1_Ubuntu-18.04_aarch64.tar /var/tmp/arm-compiler-for-linux_21.1_Ubuntu-18.04
ENV MODULEPATH=/opt/arm/modulefiles:$MODULEPATH''')<block_end>@aarch64@centos@docker<def_stmt>test_runtime_centos self<block_start>"""Runtime"""<line_sep>a=arm_allinea_studio(eula=<true>)<line_sep>r=a.runtime()<line_sep>self.assertEqual(r r'''# Arm Allinea Studio
COPY --from=0 /opt/arm/arm-linux-compiler-21.1_Generic-AArch64_RHEL-7_aarch64-linux/lib/libgomp.so \
/opt/arm/arm-linux-compiler-21.1_Generic-AArch64_RHEL-7_aarch64-linux/lib/libiomp5.so \
/opt/arm/arm-linux-compiler-21.1_Generic-AArch64_RHEL-7_aarch64-linux/lib/libomp.so \
/opt/arm/arm-linux-compiler-21.1_Generic-AArch64_RHEL-7_aarch64-linux/lib/libflang.so \
/opt/arm/arm-linux-compiler-21.1_Generic-AArch64_RHEL-7_aarch64-linux/lib/libflangrti.so \
/opt/arm/arm-linux-compiler-21.1_Generic-AArch64_RHEL-7_aarch64-linux/lib/
COPY --from=0 /opt/arm/armpl-21.1.0_AArch64_RHEL-7_arm-linux-compiler_aarch64-linux/lib/libamath.so \
/opt/arm/armpl-21.1.0_AArch64_RHEL-7_arm-linux-compiler_aarch64-linux/lib/libamath_dummy.so \
/opt/arm/armpl-21.1.0_AArch64_RHEL-7_arm-linux-compiler_aarch64-linux/lib/libastring.so \
/opt/arm/armpl-21.1.0_AArch64_RHEL-7_arm-linux-compiler_aarch64-linux/lib/
COPY --from=0 /opt/arm/armpl-21.1.0_AArch64_RHEL-7_gcc_aarch64-linux/lib/libamath.so \
/opt/arm/armpl-21.1.0_AArch64_RHEL-7_gcc_aarch64-linux/lib/libamath_dummy.so \
/opt/arm/armpl-21.1.0_AArch64_RHEL-7_gcc_aarch64-linux/lib/libastring.so \
/opt/arm/armpl-21.1.0_AArch64_RHEL-7_gcc_aarch64-linux/lib/
ENV LD_LIBRARY_PATH=/opt/arm/arm-linux-compiler-21.1_Generic-AArch64_RHEL-7_aarch64-linux/lib:/opt/arm/armpl-21.1.0_AArch64_RHEL-7_arm-linux-compiler_aarch64-linux/lib:/opt/arm/armpl-21.1.0_AArch64_RHEL-7_gcc_aarch64-linux/lib:$LD_LIBRARY_PATH''')<block_end><def_stmt>test_toolchain self<block_start>"""Toolchain"""<line_sep>a=arm_allinea_studio(eula=<true>)<line_sep>tc=a.toolchain<line_sep>self.assertEqual(tc.CC 'armclang')<line_sep>self.assertEqual(tc.CXX 'armclang++')<line_sep>self.assertEqual(tc.FC 'armflang')<line_sep>self.assertEqual(tc.F77 'armflang')<line_sep>self.assertEqual(tc.F90 'armflang')<block_end>@thunderx2<def_stmt>test_toolchain_thunderx2 self<block_start>"""CPU target optimization flags"""<line_sep>a=arm_allinea_studio(eula=<true>)<line_sep>tc=a.toolchain<line_sep>self.assertEqual(tc.CFLAGS '-mcpu=thunderx2t99')<line_sep>self.assertEqual(tc.CXXFLAGS '-mcpu=thunderx2t99')<block_end><block_end> |
<class_stmt>Rect(object)<block_start><def_stmt>__init__ self cx cy width height confidence<block_start>self.cx=cx<line_sep>self.cy=cy<line_sep>self.width=width<line_sep>self.height=height<line_sep>self.confidence=confidence<line_sep>self.true_confidence=confidence<block_end><def_stmt>overlaps self other<block_start><if_stmt>abs(self.cx-other.cx)<g>(self.width+other.width)/1.5<block_start><return><false><block_end><elif_stmt>abs(self.cy-other.cy)<g>(self.height+other.height)/2.0<block_start><return><false><block_end><else_stmt><block_start><return><true><block_end><block_end><def_stmt>distance self other<block_start><return>sum(map(abs [self.cx-other.cx self.cy-other.cy self.width-other.width self.height-other.height]))<block_end><def_stmt>intersection self other<block_start>left=max(self.cx-self.width/2. other.cx-other.width/2.)<line_sep>right=min(self.cx+self.width/2. other.cx+other.width/2.)<line_sep>width=max(right-left 0)<line_sep>top=max(self.cy-self.height/2. other.cy-other.height/2.)<line_sep>bottom=min(self.cy+self.height/2. other.cy+other.height/2.)<line_sep>height=max(bottom-top 0)<line_sep><return>width<times>height<block_end><def_stmt>area self<block_start><return>self.height<times>self.width<block_end><def_stmt>union self other<block_start><return>self.area()+other.area()-self.intersection(other)<block_end><def_stmt>iou self other<block_start><return>self.intersection(other)/self.union(other)<block_end><def_stmt>__eq__ self other<block_start><return>(self.cx<eq>other.cx<and>self.cy<eq>other.cy<and>self.width<eq>other.width<and>self.height<eq>other.height<and>self.confidence<eq>other.confidence)<block_end><block_end> |
<import_from_stmt>.ReportDaily *<line_sep># Find personal repositories that nonowners are pushing to.
# These repositories should be moved into organizations.
# Only look at active users (not suspended!) and only look at pushes
# of the last 4 weeks.
<class_stmt>ReportReposPersonalNonOwnerPushes(ReportDaily)<block_start><def_stmt>name self<block_start><return>"repositories-personal-nonowner-pushes"<block_end><def_stmt>updateDailyData self<block_start>self.detailedHeader,self.detailedData=self.parseData(self.executeQuery(self.query()))<line_sep>self.header=["date" "personal repositories with nonowner pushes"]<line_sep>self.data.append([str(self.yesterday()) len(self.detailedData)])<line_sep>self.truncateData(self.timeRangeTotal())<line_sep>self.sortDataByDate()<block_end><def_stmt>query self<block_start>fourWeeksAgo=self.daysAgo(28)<line_sep><return>'''
SELECT
CONCAT(users.login, "/", repositories.name) as "repository",
COUNT(DISTINCT(pushes.pusher_id)) as "nonowner pushers"
FROM
repositories
JOIN users ON repositories.owner_id = users.id
JOIN pushes ON pushes.repository_id = repositories.id
WHERE
users.type = "user"
AND users.suspended_at IS NULL
AND CAST(pushes.created_at AS DATE) BETWEEN
"'''+str(fourWeeksAgo)+'''" AND "'''+str(self.yesterday())+'''"
AND pushes.pusher_id != users.id
GROUP BY
repositories.id
ORDER BY
2 DESC, 1'''<block_end><block_end> |
# Copyright 2019 Image Analysis Lab, German Center for Neurodegenerative Diseases (DZNE), Bonn
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# IMPORTS
<import_stmt>optparse<import_stmt>sys<import_stmt>nibabel.freesurfer.io<as>fs<import_stmt>numpy<as>np<import_stmt>math<import_from_stmt>lapy.DiffGeo tria_mean_curvature_flow<import_from_stmt>lapy.TriaMesh TriaMesh<import_from_stmt>lapy.read_geometry read_geometry<import_from_stmt>lapy.Solver Solver<line_sep>HELPTEXT="""
Script to compute ShapeDNA using linear FEM matrices.
After correcting sign flips, embeds a surface mesh into the spectral domain,
then projects it onto a unit sphere. This is scaled and rotated to match the
atlas used for FreeSurfer surface registion.
USAGE:
spherically_project -i <input_surface> -o <output_surface>
References:
<NAME> et al. Discrete Laplace-Beltrami Operators for Shape Analysis and
Segmentation. Computers & Graphics 33(3):381-390, 2009
Martin Reuter et al. Laplace-Beltrami spectra as "Shape-DNA" of surfaces and
solids Computer-Aided Design 38(4):342-366, 2006
<NAME> at al. High-resolution inter-subject averaging and a coordinate
system for the cortical surface. Human Brain Mapping 8:272-284, 1999
Dependencies:
Python 3.5
Scipy 0.10 or later to solve the generalized eigenvalue problem.
http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
Numpy
http://www.numpy.org
Nibabel to read and write FreeSurfer surface meshes
http://nipy.org/nibabel/
Original Author: <NAME>
Date: Jan-18-2016
"""<line_sep>h_input='path to input surface'<line_sep>h_output='path to ouput surface, spherically projected'<def_stmt>options_parse <block_start>"""
Command line option parser for spherically_project.py
"""<line_sep>parser=optparse.OptionParser(version='$Id: spherically_project,v 1.1 2017/01/30 20:42:08 ltirrell Exp $' usage=HELPTEXT)<line_sep>parser.add_option('--input' '-i' dest='input_surf' help=h_input)<line_sep>parser.add_option('--output' '-o' dest='output_surf' help=h_output)<line_sep>(options args)=parser.parse_args()<if_stmt>options.input_surf<is><none><or>options.output_surf<is><none><block_start>sys.exit('ERROR: Please specify input and output surfaces')<block_end><return>options<block_end><def_stmt>tria_spherical_project tria flow_iter=3 debug=<false><block_start>"""
spherical(tria) computes the first three non-constant eigenfunctions
and then projects the spectral embedding onto a sphere. This works
when the first functions have a single closed zero level set,
splitting the mesh into two domains each. Depending on the original
shape triangles could get inverted. We also flip the functions
according to the axes that they are aligned with for the special
case of brain surfaces in FreeSurfer coordinates.
Inputs: tria : TriaMesh
flow_iter : mean curv flow iterations (3 should be enough)
Outputs: tria : TriaMesh
"""<if_stmt><not>tria.is_closed()<block_start><raise>ValueError('Error: Can only project closed meshes!')<block_end># sub-function to compute flipped area of trias where normal
# points towards origin, meaningful for the sphere, centered at zero
<def_stmt>get_flipped_area tria<block_start>v1=tria.v[tria.t[: 0] :]<line_sep>v2=tria.v[tria.t[: 1] :]<line_sep>v3=tria.v[tria.t[: 2] :]<line_sep>v2mv1=v2-v1<line_sep>v3mv1=v3-v1<line_sep>cr=np.cross(v2mv1 v3mv1)<line_sep>spatvol=np.sum(v1<times>cr axis=1)<line_sep>areas=0.5<times>np.sqrt(np.sum(cr<times>cr axis=1))<line_sep>area=np.sum(areas[np.where(spatvol<l>0)])<line_sep><return>area<block_end>fem=Solver(tria lump=<false>)<line_sep>evals,evecs=fem.eigs(k=4)<if_stmt>debug<block_start>data=dict()<line_sep>data['Eigenvalues']=evals<line_sep>data['Eigenvectors']=evecs<line_sep>data['Creator']='spherically_project.py'<line_sep>data['Refine']=0<line_sep>data['Degree']=1<line_sep>data['Dimension']=2<line_sep>data['Elements']=tria.t.shape[0]<line_sep>data['DoF']=evecs.shape[0]<line_sep>data['NumEW']=4<import_from_stmt>lapy.FuncIO export_ev<line_sep>export_ev(data 'debug.ev')<block_end># flip efuncs to align to coordinates consistently
ev1=evecs[: 1]<line_sep># ev1maxi = np.argmax(ev1)
# ev1mini = np.argmin(ev1)
# cmax = v[ev1maxi,:]
# cmin = v[ev1mini,:]
cmax1=np.mean(tria.v[ev1<g>0.5<times>np.max(ev1) :] 0)<line_sep>cmin1=np.mean(tria.v[ev1<l>0.5<times>np.min(ev1) :] 0)<line_sep>ev2=evecs[: 2]<line_sep>cmax2=np.mean(tria.v[ev2<g>0.5<times>np.max(ev2) :] 0)<line_sep>cmin2=np.mean(tria.v[ev2<l>0.5<times>np.min(ev2) :] 0)<line_sep>ev3=evecs[: 3]<line_sep>cmax3=np.mean(tria.v[ev3<g>0.5<times>np.max(ev3) :] 0)<line_sep>cmin3=np.mean(tria.v[ev3<l>0.5<times>np.min(ev3) :] 0)<line_sep># we trust ev 1 goes from front to back
l11=abs(cmax1[1]-cmin1[1])<line_sep>l21=abs(cmax2[1]-cmin2[1])<line_sep>l31=abs(cmax3[1]-cmin3[1])<if_stmt>l11<l>l21<or>l11<l>l31<block_start>print("ERROR: direction 1 should be (anterior -posterior) but is not!")<line_sep>print(" debug info: {} {} {} ".format(l11 l21 l31))<line_sep># sys.exit(1)
<raise>ValueError('Direction 1 should be anterior - posterior')<block_end># only flip direction if necessary
print("ev1 min: {} max {} ".format(cmin1 cmax1))<line_sep># axis 1 = y is aligned with this function (for brains in FS space)
v1=cmax1-cmin1<if_stmt>cmax1[1]<l>cmin1[1]<block_start>ev1=-1<times>ev1<line_sep>print("inverting direction 1 (anterior - posterior)")<block_end>l1=abs(cmax1[1]-cmin1[1])<line_sep># for ev2 and ev3 there could be also a swap of the two
l22=abs(cmax2[2]-cmin2[2])<line_sep>l32=abs(cmax3[2]-cmin3[2])<line_sep># usually ev2 should be superior inferior, if ev3 is better in that direction, swap
<if_stmt>l22<l>l32<block_start>print("swapping direction 2 and 3")<line_sep>ev2,ev3=ev3 ev2<line_sep>cmax2,cmax3=cmax3 cmax2<line_sep>cmin2,cmin3=cmin3 cmin2<block_end>l23=abs(cmax2[0]-cmin2[0])<line_sep>l33=abs(cmax3[0]-cmin3[0])<if_stmt>l33<l>l23<block_start>print("WARNING: direction 3 wants to swap with 2, but cannot")<block_end>print("ev2 min: {} max {} ".format(cmin2 cmax2))<line_sep># axis 2 = z is aligned with this function (for brains in FS space)
v2=cmax2-cmin2<if_stmt>cmax2[2]<l>cmin2[2]<block_start>ev2=-1<times>ev2<line_sep>print("inverting direction 2 (superior - inferior)")<block_end>l2=abs(cmax2[2]-cmin2[2])<line_sep>print("ev3 min: {} max {} ".format(cmin3 cmax3))<line_sep># axis 0 = x is aligned with this function (for brains in FS space)
v3=cmax3-cmin3<if_stmt>cmax3[0]<l>cmin3[0]<block_start>ev3=-1<times>ev3<line_sep>print("inverting direction 3 (right - left)")<block_end>l3=abs(cmax3[0]-cmin3[0])<line_sep>v1=v1<times>(1.0/np.sqrt(np.sum(v1<times>v1)))<line_sep>v2=v2<times>(1.0/np.sqrt(np.sum(v2<times>v2)))<line_sep>v3=v3<times>(1.0/np.sqrt(np.sum(v3<times>v3)))<line_sep>spatvol=abs(np.dot(v1 np.cross(v2 v3)))<line_sep>print("spat vol: {}".format(spatvol))<line_sep>mvol=tria.volume()<line_sep>print("orig mesh vol {}".format(mvol))<line_sep>bvol=l1<times>l2<times>l3<line_sep>print("box {}, {}, {} volume: {} ".format(l1 l2 l3 bvol))<line_sep>print("box coverage: {}".format(bvol/mvol))<line_sep># we map evN to -1..0..+1 (keep zero level fixed)
# I have the feeling that this helps a little with the stretching
# at the poles, but who knows...
ev1min=np.amin(ev1)<line_sep>ev1max=np.amax(ev1)<line_sep>ev1[ev1<l>0]<augdiv>-ev1min<line_sep>ev1[ev1<g>0]<augdiv>ev1max<line_sep>ev2min=np.amin(ev2)<line_sep>ev2max=np.amax(ev2)<line_sep>ev2[ev2<l>0]<augdiv>-ev2min<line_sep>ev2[ev2<g>0]<augdiv>ev2max<line_sep>ev3min=np.amin(ev3)<line_sep>ev3max=np.amax(ev3)<line_sep>ev3[ev3<l>0]<augdiv>-ev3min<line_sep>ev3[ev3<g>0]<augdiv>ev3max<line_sep># set evec as new coordinates (spectral embedding)
vn=np.empty(tria.v.shape)<line_sep>vn[: 0]=ev3<line_sep>vn[: 1]=ev1<line_sep>vn[: 2]=ev2<line_sep># do a few mean curvature flow euler steps to make more convex
# three should be sufficient
<if_stmt>flow_iter<g>0<block_start>tflow=tria_mean_curvature_flow(TriaMesh(vn tria.t) max_iter=flow_iter)<line_sep>vn=tflow.v<block_end># project to sphere and scaled to have the same scale/origin as FS:
dist=np.sqrt(np.sum(vn<times>vn axis=1))<line_sep>vn=100<times>(vn/dist[: np.newaxis])<line_sep>trianew=TriaMesh(vn tria.t)<line_sep>svol=trianew.area()/(4.0<times>math.pi<times>10000)<line_sep>print("sphere area fraction: {} ".format(svol))<line_sep>flippedarea=get_flipped_area(trianew)/(4.0<times>math.pi<times>10000)<if_stmt>flippedarea<g>0.95<block_start>print("ERROR: global normal flip, exiting ..")<line_sep><raise>ValueError('global normal flip')<block_end>print("flipped area fraction: {} ".format(flippedarea))<if_stmt>svol<l>0.99<block_start>print("ERROR: sphere area fraction should be above .99, exiting ..")<line_sep><raise>ValueError('sphere area fraction should be above .99')<block_end><if_stmt>flippedarea<g>0.0008<block_start>print("ERROR: flipped area fraction should be below .0008, exiting ..")<line_sep><raise>ValueError('flipped area fraction should be below .0008')<block_end># here we finally check also the spat vol (orthogonality of direction vectors)
# we could stop earlier, but most failure cases will be covered by the svol and
# flipped area which can be better interpreted than spatvol
<if_stmt>spatvol<l>0.6<block_start>print("ERROR: spat vol (orthogonality) should be above .6, exiting ..")<line_sep><raise>ValueError('spat vol (orthogonality) should be above .6')<block_end><return>trianew<block_end><def_stmt>spherically_project_surface insurf outsurf<block_start>""" (string) -> None
takes path to insurf, spherically projects it, outputs it to outsurf
"""<line_sep>surf=read_geometry(insurf read_metadata=<true>)<line_sep>projected=tria_spherical_project(TriaMesh(surf[0] surf[1]) flow_iter=3)<line_sep>fs.write_geometry(outsurf projected.v projected.t volume_info=surf[2])<block_end><if_stmt>__name__<eq>"__main__"# Command Line options are error checking done here
<block_start>options=options_parse()<line_sep>surf_to_project=options.input_surf<line_sep>projected_surf=options.output_surf<line_sep>print("Reading in surface: {} ...".format(surf_to_project))<line_sep>spherically_project_surface(surf_to_project projected_surf)<line_sep>print("Outputing spherically projected surface: {}".format(projected_surf))<line_sep>sys.exit(0)<block_end> |
<import_stmt>torch<import_stmt>numpy<import_stmt>cv2<import_stmt>copy<def_stmt>get_img_array imgtensor mean=[0.485 0.456 0.406] std=[0.229 0.224 0.225]<block_start>"""imgtensor: ([C,H,W],device=cuda)
"""<line_sep>denormimg=imgtensor.cpu().permute(1 2 0).mul_(torch.tensor(std)).add_(torch.tensor(mean))<line_sep>imgarray=denormimg.numpy()<line_sep>imgarray=imgarray<times>255<line_sep>imgarray=imgarray.astype('uint8')<line_sep>imgarray=cv2.cvtColor(imgarray cv2.COLOR_RGB2BGR)<line_sep><return>imgarray<block_end><def_stmt>draw_rec_in_img img target<block_start>tl=3# thickness line
tf=max(tl-1 1)# font thickness
color=[0 0 255]# color
tempimg=copy.deepcopy(img)<line_sep>h,w=target['size']<line_sep>labels=target['labels'].cpu()<line_sep>xyxyboxes=target['xyxyboxes'].cpu()<line_sep>denorm_xyxyboxes=xyxyboxes<times>torch.tensor([w h w h])<for_stmt>box,label zip(denorm_xyxyboxes labels)<block_start>c1,c2=(int(box[0]) int(box[1])) (int(box[2]) int(box[3]))<line_sep>cv2.rectangle(tempimg c1 c2 color thickness=tl lineType=cv2.LINE_AA)<line_sep>label=str(int(label))<line_sep>t_size=cv2.getTextSize(label 0 fontScale=tl/3 thickness=tf)[0]<line_sep>c2=c1[0]+t_size[0] c1[1]-t_size[1]-3<line_sep>cv2.rectangle(tempimg c1 c2 color -1 cv2.LINE_AA)# filled
cv2.putText(tempimg label (c1[0] c1[1]-2) 0 tl/3 [225 255 255] thickness=tf lineType=cv2.LINE_AA)<block_end><return>tempimg<block_end><def_stmt>draw_patch_in_img img tgt_patch inputs_size<block_start>tl=1# thickness line
tf=max(tl-1 1)# font thickness
color=[0 255 0]# color
point_size=4<line_sep>point_color=(255 0 0)# BGR
point_thickness=4# 可以为 0 、4、8
tempimg=copy.deepcopy(img)<line_sep>h,w=inputs_size<line_sep>labels=tgt_patch['labels'].cpu()<line_sep>patch_indexs=tgt_patch['patch_index'].cpu()<line_sep>centers=tgt_patch['centers'].cpu()<line_sep>w_num=w<floordiv>16<for_stmt>patch_index,label,center zip(patch_indexs labels centers)<block_start>point=(int(center[0]) int(center[1]))<line_sep>cv2.circle(tempimg point point_size point_color point_thickness)<line_sep>y_start_index=patch_index<floordiv>w_num<line_sep>x_start_index=patch_index-y_start_index<times>w_num<line_sep>x_start=x_start_index<times>16<line_sep>y_start=y_start_index<times>16<line_sep>x_end=x_start+16<line_sep>y_end=y_start+16<line_sep>c1,c2=(int(x_start) int(y_start)) (int(x_end) int(y_end))<line_sep>cv2.rectangle(tempimg c1 c2 color thickness=tl lineType=cv2.LINE_AA)<line_sep>label=str(int(label))<line_sep>t_size=cv2.getTextSize(label 0 fontScale=tl/3 thickness=tf)[0]<line_sep>c2=c1[0]+t_size[0] c1[1]-t_size[1]-3<line_sep>cv2.rectangle(tempimg c1 c2 color -1 cv2.LINE_AA)# filled
cv2.putText(tempimg label (c1[0] c1[1]-2) 0 tl/3 [225 255 255] thickness=tf lineType=cv2.LINE_AA)<block_end><return>tempimg<block_end> |
<import_stmt>tensorflow<as>tf<import_from_stmt>deepsleep.nn *<class_stmt>DeepFeatureNet(object)<block_start><def_stmt>__init__ self batch_size input_dims n_classes is_train reuse_params use_dropout name="deepfeaturenet"<block_start>self.batch_size=batch_size<line_sep>self.input_dims=input_dims<line_sep>self.n_classes=n_classes<line_sep>self.is_train=is_train<line_sep>self.reuse_params=reuse_params<line_sep>self.use_dropout=use_dropout<line_sep>self.name=name<line_sep>self.activations=[]<line_sep>self.layer_idx=1<line_sep>self.monitor_vars=[]<block_end><def_stmt>_build_placeholder self# Input
<block_start>name="x_train"<if>self.is_train<else>"x_valid"<line_sep>self.input_var=tf.compat.v1.placeholder(tf.float32 shape=[self.batch_size self.input_dims 1 1] name=name+"_inputs")<line_sep># Target
self.target_var=tf.compat.v1.placeholder(tf.int32 shape=[self.batch_size ] name=name+"_targets")<block_end><def_stmt>_conv1d_layer self input_var filter_size n_filters stride wd=0<block_start>input_shape=input_var.get_shape()<line_sep>n_batches=input_shape[0].value<line_sep>input_dims=input_shape[1].value<line_sep>n_in_filters=input_shape[3].value<line_sep>name="l{}_conv".format(self.layer_idx)<with_stmt>tf.compat.v1.variable_scope(name)<as>scope<block_start>output=conv_1d(name="conv1d" input_var=input_var filter_shape=[filter_size 1 n_in_filters n_filters] stride=stride bias=<none> wd=wd)<line_sep># # MONITORING
# self.monitor_vars.append(("{}_before_bn".format(name), output))
output=batch_norm_new(name="bn" input_var=output is_train=self.is_train)<line_sep># # MONITORING
# self.monitor_vars.append(("{}_after_bn".format(name), output))
# output = leaky_relu(name="leaky_relu", input_var=output)
output=tf.nn.relu(output name="relu")<block_end>self.activations.append((name output))<line_sep>self.layer_idx<augadd>1<line_sep><return>output<block_end><def_stmt>build_model self input_var# List to store the output of each CNNs
<block_start>output_conns=[]<line_sep>######### CNNs with small filter size at the first layer #########
# Convolution
# network = self._conv1d_layer(input_var=input_var, filter_size=128, n_filters=64, stride=16, wd=1e-3)
network=self._conv1d_layer(input_var=input_var filter_size=50 n_filters=64 stride=6 wd=1e-3)<line_sep># Max pooling
name="l{}_pool".format(self.layer_idx)<line_sep>network=max_pool_1d(name=name input_var=network pool_size=8 stride=8)<line_sep>self.activations.append((name network))<line_sep>self.layer_idx<augadd>1<line_sep># Dropout
<if_stmt>self.use_dropout<block_start>name="l{}_dropout".format(self.layer_idx)<if_stmt>self.is_train<block_start>network=tf.nn.dropout(network keep_prob=0.5 name=name)<block_end><else_stmt><block_start>network=tf.nn.dropout(network keep_prob=1.0 name=name)<block_end>self.activations.append((name network))<block_end>self.layer_idx<augadd>1<line_sep># Convolution
network=self._conv1d_layer(input_var=network filter_size=8 n_filters=128 stride=1)<line_sep>network=self._conv1d_layer(input_var=network filter_size=8 n_filters=128 stride=1)<line_sep>network=self._conv1d_layer(input_var=network filter_size=8 n_filters=128 stride=1)<line_sep># Max pooling
name="l{}_pool".format(self.layer_idx)<line_sep>network=max_pool_1d(name=name input_var=network pool_size=4 stride=4)<line_sep>self.activations.append((name network))<line_sep>self.layer_idx<augadd>1<line_sep># Flatten
name="l{}_flat".format(self.layer_idx)<line_sep>network=flatten(name=name input_var=network)<line_sep>self.activations.append((name network))<line_sep>self.layer_idx<augadd>1<line_sep>output_conns.append(network)<line_sep>######### CNNs with large filter size at the first layer #########
# Convolution
# network = self._conv1d_layer(input_var=input_var, filter_size=1024, n_filters=64, stride=128)
network=self._conv1d_layer(input_var=input_var filter_size=400 n_filters=64 stride=50)<line_sep># Max pooling
name="l{}_pool".format(self.layer_idx)<line_sep>network=max_pool_1d(name=name input_var=network pool_size=4 stride=4)<line_sep>self.activations.append((name network))<line_sep>self.layer_idx<augadd>1<line_sep># Dropout
<if_stmt>self.use_dropout<block_start>name="l{}_dropout".format(self.layer_idx)<if_stmt>self.is_train<block_start>network=tf.nn.dropout(network keep_prob=0.5 name=name)<block_end><else_stmt><block_start>network=tf.nn.dropout(network keep_prob=1.0 name=name)<block_end>self.activations.append((name network))<block_end>self.layer_idx<augadd>1<line_sep># Convolution
network=self._conv1d_layer(input_var=network filter_size=6 n_filters=128 stride=1)<line_sep>network=self._conv1d_layer(input_var=network filter_size=6 n_filters=128 stride=1)<line_sep>network=self._conv1d_layer(input_var=network filter_size=6 n_filters=128 stride=1)<line_sep># Max pooling
name="l{}_pool".format(self.layer_idx)<line_sep>network=max_pool_1d(name=name input_var=network pool_size=2 stride=2)<line_sep>self.activations.append((name network))<line_sep>self.layer_idx<augadd>1<line_sep># Flatten
name="l{}_flat".format(self.layer_idx)<line_sep>network=flatten(name=name input_var=network)<line_sep>self.activations.append((name network))<line_sep>self.layer_idx<augadd>1<line_sep>output_conns.append(network)<line_sep>######### Aggregate and link two CNNs #########
# Concat
name="l{}_concat".format(self.layer_idx)<line_sep>network=tf.concat(axis=1 values=output_conns name=name)<line_sep>self.activations.append((name network))<line_sep>self.layer_idx<augadd>1<line_sep># Dropout
<if_stmt>self.use_dropout<block_start>name="l{}_dropout".format(self.layer_idx)<if_stmt>self.is_train<block_start>network=tf.nn.dropout(network keep_prob=0.5 name=name)<block_end><else_stmt><block_start>network=tf.nn.dropout(network keep_prob=1.0 name=name)<block_end>self.activations.append((name network))<block_end>self.layer_idx<augadd>1<line_sep><return>network<block_end><def_stmt>init_ops self<block_start>self._build_placeholder()<line_sep># Get loss and prediction operations
<with_stmt>tf.compat.v1.variable_scope(self.name)<as>scope# Reuse variables for validation
<block_start><if_stmt>self.reuse_params<block_start>scope.reuse_variables()<block_end># Build model
network=self.build_model(input_var=self.input_var)<line_sep># Softmax linear
name="l{}_softmax_linear".format(self.layer_idx)<line_sep>network=fc(name=name input_var=network n_hiddens=self.n_classes bias=0.0 wd=0)<line_sep>self.activations.append((name network))<line_sep>self.layer_idx<augadd>1<line_sep># Outputs of softmax linear are logits
self.logits=network<line_sep>######### Compute loss #########
# Cross-entropy loss
loss=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits labels=self.target_var name="sparse_softmax_cross_entropy_with_logits")<line_sep>loss=tf.reduce_mean(loss name="cross_entropy")<line_sep># Regularization loss
regular_loss=tf.add_n(tf.compat.v1.get_collection("losses" scope=scope.name+"\/") name="regular_loss")<line_sep># print " "
# print "Params to compute regularization loss:"
# for p in tf.compat.v1.get_collection("losses", scope=scope.name + "\/"):
# print p.name
# print " "
# Total loss
self.loss_op=tf.add(loss regular_loss)<line_sep># Predictions
self.pred_op=tf.argmax(self.logits 1)<block_end><block_end><block_end><class_stmt>DeepSleepNet(DeepFeatureNet)<block_start><def_stmt>__init__ self batch_size input_dims n_classes seq_length n_rnn_layers return_last is_train reuse_params use_dropout_feature use_dropout_sequence name="deepsleepnet"<block_start>super(self.__class__ self).__init__(batch_size=batch_size input_dims=input_dims n_classes=n_classes is_train=is_train reuse_params=reuse_params use_dropout=use_dropout_feature name=name)<line_sep>self.seq_length=seq_length<line_sep>self.n_rnn_layers=n_rnn_layers<line_sep>self.return_last=return_last<line_sep>self.use_dropout_sequence=use_dropout_sequence<block_end><def_stmt>_build_placeholder self# Input
<block_start>name="x_train"<if>self.is_train<else>"x_valid"<line_sep>self.input_var=tf.compat.v1.placeholder(tf.float32 shape=[self.batch_size<times>self.seq_length self.input_dims 1 1] name=name+"_inputs")<line_sep># Target
self.target_var=tf.compat.v1.placeholder(tf.int32 shape=[self.batch_size<times>self.seq_length ] name=name+"_targets")<block_end><def_stmt>build_model self input_var# Create a network with superclass method
<block_start>network=super(self.__class__ self).build_model(input_var=self.input_var)<line_sep># Residual (or shortcut) connection
output_conns=[]<line_sep># Fully-connected to select some part of the output to add with the output from bi-directional LSTM
name="l{}_fc".format(self.layer_idx)<with_stmt>tf.compat.v1.variable_scope(name)<as>scope<block_start>output_tmp=fc(name="fc" input_var=network n_hiddens=1024 bias=<none> wd=0)<line_sep>output_tmp=batch_norm_new(name="bn" input_var=output_tmp is_train=self.is_train)<line_sep># output_tmp = leaky_relu(name="leaky_relu", input_var=output_tmp)
output_tmp=tf.nn.relu(output_tmp name="relu")<block_end>self.activations.append((name output_tmp))<line_sep>self.layer_idx<augadd>1<line_sep>output_conns.append(output_tmp)<line_sep>######################################################################
# Reshape the input from (batch_size * seq_length, input_dim) to
# (batch_size, seq_length, input_dim)
name="l{}_reshape_seq".format(self.layer_idx)<line_sep>input_dim=network.get_shape()[-1].value<line_sep>seq_input=tf.reshape(network shape=[-1 self.seq_length input_dim] name=name)<assert_stmt>self.batch_size<eq>seq_input.get_shape()[0].value<line_sep>self.activations.append((name seq_input))<line_sep>self.layer_idx<augadd>1<line_sep># Bidirectional LSTM network
name="l{}_bi_lstm".format(self.layer_idx)<line_sep>hidden_size=512# will output 1024 (512 forward, 512 backward)
<with_stmt>tf.compat.v1.variable_scope(name)<as>scope<block_start><def_stmt>lstm_cell <block_start>cell=tf.compat.v1.nn.rnn_cell.LSTMCell(hidden_size use_peepholes=<true> state_is_tuple=<true> reuse=tf.compat.v1.get_variable_scope().reuse)<if_stmt>self.use_dropout_sequence<block_start>keep_prob=0.5<if>self.is_train<else>1.0<line_sep>cell=tf.compat.v1.nn.rnn_cell.DropoutWrapper(cell output_keep_prob=keep_prob)<block_end><return>cell<block_end>fw_cell=tf.compat.v1.nn.rnn_cell.MultiRNNCell([lstm_cell()<for>_ range(self.n_rnn_layers)] state_is_tuple=<true>)<line_sep>bw_cell=tf.compat.v1.nn.rnn_cell.MultiRNNCell([lstm_cell()<for>_ range(self.n_rnn_layers)] state_is_tuple=<true>)<line_sep># Initial state of RNN
self.fw_initial_state=fw_cell.zero_state(self.batch_size tf.float32)<line_sep>self.bw_initial_state=bw_cell.zero_state(self.batch_size tf.float32)<line_sep># Feedforward to MultiRNNCell
list_rnn_inputs=tf.unstack(seq_input axis=1)<line_sep>#outputs, fw_state, bw_state = tf.nn.bidirectional_rnn(
outputs,fw_state,bw_state=tf.compat.v1.nn.static_bidirectional_rnn(cell_fw=fw_cell cell_bw=bw_cell inputs=list_rnn_inputs initial_state_fw=self.fw_initial_state initial_state_bw=self.bw_initial_state)<if_stmt>self.return_last<block_start>network=outputs[-1]<block_end><else_stmt><block_start>network=tf.reshape(tf.concat(axis=1 values=outputs) [-1 hidden_size<times>2] name=name)<block_end>self.activations.append((name network))<line_sep>self.layer_idx<augadd>1<line_sep>self.fw_final_state=fw_state<line_sep>self.bw_final_state=bw_state<block_end># Append output
output_conns.append(network)<line_sep>######################################################################
# Add
name="l{}_add".format(self.layer_idx)<line_sep>network=tf.add_n(output_conns name=name)<line_sep>self.activations.append((name network))<line_sep>self.layer_idx<augadd>1<line_sep># Dropout
<if_stmt>self.use_dropout_sequence<block_start>name="l{}_dropout".format(self.layer_idx)<if_stmt>self.is_train<block_start>network=tf.nn.dropout(network keep_prob=0.5 name=name)<block_end><else_stmt><block_start>network=tf.nn.dropout(network keep_prob=1.0 name=name)<block_end>self.activations.append((name network))<block_end>self.layer_idx<augadd>1<line_sep><return>network<block_end><def_stmt>init_ops self<block_start>self._build_placeholder()<line_sep># Get loss and prediction operations
<with_stmt>tf.compat.v1.variable_scope(self.name)<as>scope# Reuse variables for validation
<block_start><if_stmt>self.reuse_params<block_start>scope.reuse_variables()<block_end># Build model
network=self.build_model(input_var=self.input_var)<line_sep># Softmax linear
name="l{}_softmax_linear".format(self.layer_idx)<line_sep>network=fc(name=name input_var=network n_hiddens=self.n_classes bias=0.0 wd=0)<line_sep>self.activations.append((name network))<line_sep>self.layer_idx<augadd>1<line_sep># Outputs of softmax linear are logits
self.logits=network<line_sep>######### Compute loss #########
# Weighted cross-entropy loss for a sequence of logits (per example)
loss=tf.contrib.legacy_seq2seq.sequence_loss_by_example([self.logits] [self.target_var] [tf.ones([self.batch_size<times>self.seq_length])] name="sequence_loss_by_example")<line_sep>loss=tf.reduce_sum(loss)/self.batch_size<line_sep># Regularization loss
regular_loss=tf.add_n(tf.compat.v1.get_collection("losses" scope=scope.name+"\/") name="regular_loss")<line_sep># print " "
# print "Params to compute regularization loss:"
# for p in tf.compat.v1.get_collection("losses", scope=scope.name + "\/"):
# print p.name
# print " "
# Total loss
self.loss_op=tf.add(loss regular_loss)<line_sep># Predictions
self.pred_op=tf.argmax(self.logits 1)<block_end><block_end><block_end> |
# -*- coding: utf-8 -*-
<import_stmt>copy<import_from_stmt>gitlint.tests.base BaseTestCase<import_from_stmt>gitlint.config LintConfig LintConfigBuilder LintConfigError<import_from_stmt>gitlint rules<class_stmt>LintConfigBuilderTests(BaseTestCase)<block_start><def_stmt>test_set_option self<block_start>config_builder=LintConfigBuilder()<line_sep>config=config_builder.build()<line_sep># assert some defaults
self.assertEqual(config.get_rule_option('title-max-length' 'line-length') 72)<line_sep>self.assertEqual(config.get_rule_option('body-max-line-length' 'line-length') 80)<line_sep>self.assertListEqual(config.get_rule_option('title-must-not-contain-word' 'words') ["WIP"])<line_sep>self.assertEqual(config.verbosity 3)<line_sep># Make some changes and check blueprint
config_builder.set_option('title-max-length' 'line-length' 100)<line_sep>config_builder.set_option('general' 'verbosity' 2)<line_sep>config_builder.set_option('title-must-not-contain-word' 'words' ["foo" "bar"])<line_sep>expected_blueprint={'title-must-not-contain-word':{'words':['foo' 'bar']} 'title-max-length':{'line-length':100} 'general':{'verbosity':2}}<line_sep>self.assertDictEqual(config_builder._config_blueprint expected_blueprint)<line_sep># Build config and verify that the changes have occurred and no other changes
config=config_builder.build()<line_sep>self.assertEqual(config.get_rule_option('title-max-length' 'line-length') 100)<line_sep>self.assertEqual(config.get_rule_option('body-max-line-length' 'line-length') 80)# should be unchanged
self.assertListEqual(config.get_rule_option('title-must-not-contain-word' 'words') ["foo" "bar"])<line_sep>self.assertEqual(config.verbosity 2)<block_end><def_stmt>test_set_from_commit_ignore_all self<block_start>config=LintConfig()<line_sep>original_rules=config.rules<line_sep>original_rule_ids=[rule.id<for>rule original_rules]<line_sep>config_builder=LintConfigBuilder()<line_sep># nothing gitlint
config_builder.set_config_from_commit(self.gitcommit("tëst\ngitlint\nfoo"))<line_sep>config=config_builder.build()<line_sep>self.assertSequenceEqual(config.rules original_rules)<line_sep>self.assertListEqual(config.ignore [])<line_sep># ignore all rules
config_builder.set_config_from_commit(self.gitcommit("tëst\ngitlint-ignore: all\nfoo"))<line_sep>config=config_builder.build()<line_sep>self.assertEqual(config.ignore original_rule_ids)<line_sep># ignore all rules, no space
config_builder.set_config_from_commit(self.gitcommit("tëst\ngitlint-ignore:all\nfoo"))<line_sep>config=config_builder.build()<line_sep>self.assertEqual(config.ignore original_rule_ids)<line_sep># ignore all rules, more spacing
config_builder.set_config_from_commit(self.gitcommit("tëst\ngitlint-ignore: \t all\nfoo"))<line_sep>config=config_builder.build()<line_sep>self.assertEqual(config.ignore original_rule_ids)<block_end><def_stmt>test_set_from_commit_ignore_specific self# ignore specific rules
<block_start>config_builder=LintConfigBuilder()<line_sep>config_builder.set_config_from_commit(self.gitcommit("tëst\ngitlint-ignore: T1, body-hard-tab"))<line_sep>config=config_builder.build()<line_sep>self.assertEqual(config.ignore ["T1" "body-hard-tab"])<block_end><def_stmt>test_set_from_config_file self# regular config file load, no problems
<block_start>config_builder=LintConfigBuilder()<line_sep>config_builder.set_from_config_file(self.get_sample_path("config/gitlintconfig"))<line_sep>config=config_builder.build()<line_sep># Do some assertions on the config
self.assertEqual(config.verbosity 1)<line_sep>self.assertFalse(config.debug)<line_sep>self.assertFalse(config.ignore_merge_commits)<line_sep>self.assertIsNone(config.extra_path)<line_sep>self.assertEqual(config.ignore ["title-trailing-whitespace" "B2"])<line_sep>self.assertEqual(config.get_rule_option('title-max-length' 'line-length') 20)<line_sep>self.assertEqual(config.get_rule_option('body-max-line-length' 'line-length') 30)<block_end><def_stmt>test_set_from_config_file_negative self<block_start>config_builder=LintConfigBuilder()<line_sep># bad config file load
foo_path=self.get_sample_path("föo")<line_sep>expected_error_msg=f"Invalid file path: {foo_path}"<with_stmt>self.assertRaisesMessage(LintConfigError expected_error_msg)<block_start>config_builder.set_from_config_file(foo_path)<block_end># error during file parsing
path=self.get_sample_path("config/no-sections")<line_sep>expected_error_msg="File contains no section headers."<line_sep># We only match the start of the message here, since the exact message can vary depending on platform
<with_stmt>self.assertRaisesRegex(LintConfigError expected_error_msg)<block_start>config_builder.set_from_config_file(path)<block_end># non-existing rule
path=self.get_sample_path("config/nonexisting-rule")<line_sep>config_builder=LintConfigBuilder()<line_sep>config_builder.set_from_config_file(path)<line_sep>expected_error_msg="No such rule 'föobar'"<with_stmt>self.assertRaisesMessage(LintConfigError expected_error_msg)<block_start>config_builder.build()<block_end># non-existing general option
path=self.get_sample_path("config/nonexisting-general-option")<line_sep>config_builder=LintConfigBuilder()<line_sep>config_builder.set_from_config_file(path)<line_sep>expected_error_msg="'foo' is not a valid gitlint option"<with_stmt>self.assertRaisesMessage(LintConfigError expected_error_msg)<block_start>config_builder.build()<block_end># non-existing option
path=self.get_sample_path("config/nonexisting-option")<line_sep>config_builder=LintConfigBuilder()<line_sep>config_builder.set_from_config_file(path)<line_sep>expected_error_msg="Rule 'title-max-length' has no option 'föobar'"<with_stmt>self.assertRaisesMessage(LintConfigError expected_error_msg)<block_start>config_builder.build()<block_end># invalid option value
path=self.get_sample_path("config/invalid-option-value")<line_sep>config_builder=LintConfigBuilder()<line_sep>config_builder.set_from_config_file(path)<line_sep>expected_error_msg="'föo' is not a valid value for option 'title-max-length.line-length'. "+"Option 'line-length' must be a positive integer (current value: 'föo')."<with_stmt>self.assertRaisesMessage(LintConfigError expected_error_msg)<block_start>config_builder.build()<block_end><block_end><def_stmt>test_set_config_from_string_list self<block_start>config=LintConfig()<line_sep># change and assert changes
config_builder=LintConfigBuilder()<line_sep>config_builder.set_config_from_string_list(['general.verbosity=1' 'title-max-length.line-length=60' 'body-max-line-length.line-length=120' "title-must-not-contain-word.words=håha"])<line_sep>config=config_builder.build()<line_sep>self.assertEqual(config.get_rule_option('title-max-length' 'line-length') 60)<line_sep>self.assertEqual(config.get_rule_option('body-max-line-length' 'line-length') 120)<line_sep>self.assertListEqual(config.get_rule_option('title-must-not-contain-word' 'words') ["håha"])<line_sep>self.assertEqual(config.verbosity 1)<block_end><def_stmt>test_set_config_from_string_list_negative self<block_start>config_builder=LintConfigBuilder()<line_sep># assert error on incorrect rule - this happens at build time
config_builder.set_config_from_string_list(["föo.bar=1"])<with_stmt>self.assertRaisesMessage(LintConfigError "No such rule 'föo'")<block_start>config_builder.build()<block_end># no equal sign
expected_msg="'föo.bar' is an invalid configuration option. Use '<rule>.<option>=<value>'"<with_stmt>self.assertRaisesMessage(LintConfigError expected_msg)<block_start>config_builder.set_config_from_string_list(["föo.bar"])<block_end># missing value
expected_msg="'föo.bar=' is an invalid configuration option. Use '<rule>.<option>=<value>'"<with_stmt>self.assertRaisesMessage(LintConfigError expected_msg)<block_start>config_builder.set_config_from_string_list(["föo.bar="])<block_end># space instead of equal sign
expected_msg="'föo.bar 1' is an invalid configuration option. Use '<rule>.<option>=<value>'"<with_stmt>self.assertRaisesMessage(LintConfigError expected_msg)<block_start>config_builder.set_config_from_string_list(["föo.bar 1"])<block_end># no period between rule and option names
expected_msg="'föobar=1' is an invalid configuration option. Use '<rule>.<option>=<value>'"<with_stmt>self.assertRaisesMessage(LintConfigError expected_msg)<block_start>config_builder.set_config_from_string_list([u'föobar=1'])<block_end><block_end><def_stmt>test_rebuild_config self# normal config build
<block_start>config_builder=LintConfigBuilder()<line_sep>config_builder.set_option('general' 'verbosity' 3)<line_sep>lint_config=config_builder.build()<line_sep>self.assertEqual(lint_config.verbosity 3)<line_sep># check that existing config gets overwritten when we pass it to a configbuilder with different options
existing_lintconfig=LintConfig()<line_sep>existing_lintconfig.verbosity=2<line_sep>lint_config=config_builder.build(existing_lintconfig)<line_sep>self.assertEqual(lint_config.verbosity 3)<line_sep>self.assertEqual(existing_lintconfig.verbosity 3)<block_end><def_stmt>test_clone self<block_start>config_builder=LintConfigBuilder()<line_sep>config_builder.set_option('general' 'verbosity' 2)<line_sep>config_builder.set_option('title-max-length' 'line-length' 100)<line_sep>expected={'title-max-length':{'line-length':100} 'general':{'verbosity':2}}<line_sep>self.assertDictEqual(config_builder._config_blueprint expected)<line_sep># Clone and verify that the blueprint is the same as the original
cloned_builder=config_builder.clone()<line_sep>self.assertDictEqual(cloned_builder._config_blueprint expected)<line_sep># Modify the original and make sure we're not modifying the clone (i.e. check that the copy is a deep copy)
config_builder.set_option('title-max-length' 'line-length' 120)<line_sep>self.assertDictEqual(cloned_builder._config_blueprint expected)<block_end><def_stmt>test_named_rules self# Store a copy of the default rules from the config, so we can reference it later
<block_start>config_builder=LintConfigBuilder()<line_sep>config=config_builder.build()<line_sep>default_rules=copy.deepcopy(config.rules)<line_sep>self.assertEqual(default_rules config.rules)# deepcopy should be equal
# Add a named rule by setting an option in the config builder that follows the named rule pattern
# Assert that whitespace in the rule name is stripped
rule_qualifiers=[u'T7:my-extra-rüle' u' T7 : my-extra-rüle ' u'\tT7:\tmy-extra-rüle\t' u'T7:\t\n \tmy-extra-rüle\t\n\n' "title-match-regex:my-extra-rüle"]<for_stmt>rule_qualifier rule_qualifiers<block_start>config_builder=LintConfigBuilder()<line_sep>config_builder.set_option(rule_qualifier 'regex' "föo")<line_sep>expected_rules=copy.deepcopy(default_rules)<line_sep>my_rule=rules.TitleRegexMatches({'regex':"föo"})<line_sep>my_rule.id=rules.TitleRegexMatches.id+":my-extra-rüle"<line_sep>my_rule.name=rules.TitleRegexMatches.name+":my-extra-rüle"<line_sep>expected_rules._rules[u'T7:my-extra-rüle']=my_rule<line_sep>self.assertEqual(config_builder.build().rules expected_rules)<line_sep># assert that changing an option on the newly added rule is passed correctly to the RuleCollection
# we try this with all different rule qualifiers to ensure they all are normalized and map
# to the same rule
<for_stmt>other_rule_qualifier rule_qualifiers<block_start>cb=config_builder.clone()<line_sep>cb.set_option(other_rule_qualifier 'regex' other_rule_qualifier+"bōr")<line_sep># before setting the expected rule option value correctly, the RuleCollection should be different
self.assertNotEqual(cb.build().rules expected_rules)<line_sep># after setting the option on the expected rule, it should be equal
my_rule.options['regex'].set(other_rule_qualifier+"bōr")<line_sep>self.assertEqual(cb.build().rules expected_rules)<line_sep>my_rule.options['regex'].set("wrong")<block_end><block_end><block_end><def_stmt>test_named_rules_negative self# T7 = title-match-regex
# Invalid rule name
<block_start><for_stmt>invalid_name ["" " " " " "\t" "\n" "å b" "å:b" "åb:" ":åb"]<block_start>config_builder=LintConfigBuilder()<line_sep>config_builder.set_option(f"T7:{invalid_name}" 'regex' "tëst")<line_sep>expected_msg=f"The rule-name part in 'T7:{invalid_name}' cannot contain whitespace, colons or be empty"<with_stmt>self.assertRaisesMessage(LintConfigError expected_msg)<block_start>config_builder.build()<block_end><block_end># Invalid parent rule name
config_builder=LintConfigBuilder()<line_sep>config_builder.set_option("Ž123:foöbar" "fåke-option" "fåke-value")<with_stmt>self.assertRaisesMessage(LintConfigError "No such rule 'Ž123' (named rule: 'Ž123:foöbar')")<block_start>config_builder.build()<block_end># Invalid option name (this is the same as with regular rules)
config_builder=LintConfigBuilder()<line_sep>config_builder.set_option("T7:foöbar" "blå" "my-rëgex")<with_stmt>self.assertRaisesMessage(LintConfigError "Rule 'T7:foöbar' has no option 'blå'")<block_start>config_builder.build()<block_end><block_end><block_end> |
"""This module implements normal imputation with constant unit variance single imputation
via the NormUnitVarianceImputer.
The NormUnitVarianceImputer imputes missing data assuming that the
single column is normally distributed with a-priori known constant unit
variance. Use SingleImputer or MultipleImputer with strategy=`norm_const_variance`
to broadcast the strategy across all the columns in a dataframe,
or specify this strategy for a given column.
"""<import_from_stmt>scipy stats<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_from_stmt>sklearn.utils.validation check_is_fitted<import_from_stmt>autoimpute.imputations method_names<import_from_stmt>autoimpute.imputations.errors _not_num_series<import_from_stmt>.base ISeriesImputer<line_sep>methods=method_names<line_sep># pylint:disable=attribute-defined-outside-init
# pylint:disable=unnecessary-pass
<class_stmt>NormUnitVarianceImputer(ISeriesImputer)<block_start>"""Impute missing values assuming normally distributed
data with unknown mean and *known* variance.
"""<line_sep># class variables
strategy=methods.NORM_UNIT_VARIANCE<def_stmt>__init__ self<block_start>"""Create an instance of the NormUnitVarianceImputer class."""<line_sep><pass><block_end><def_stmt>fit self X y<block_start>"""Fit the Imputer to the dataset and calculate the mean.
Args:
X (pd.Series): Dataset to fit the imputer.
y (None): ignored, None to meet requirements of base class
Returns:
self. Instance of the class.
"""<line_sep>_not_num_series(self.strategy X)<line_sep>mu=X.mean()# mean of observed data
self.statistics_={"param":mu "strategy":self.strategy}<line_sep><return>self<block_end><def_stmt>impute self X<block_start>"""Perform imputations using the statistics generated from fit.
The impute method handles the actual imputation. Missing values
in a given dataset are replaced with the respective mean from fit.
Args:
X (pd.Series): Dataset to impute missing data from fit.
Returns:
np.array -- imputed dataset.
"""<line_sep># check if fitted then impute with mean
check_is_fitted(self "statistics_")<line_sep>_not_num_series(self.strategy X)<line_sep>omu=self.statistics_["param"]# mean of observed data
idx=X.isnull()# missing data
nO=sum(~idx)# number of observed
m=sum(idx)# number to impute
muhatk=stats.norm(omu np.sqrt(1/nO))<line_sep># imputation cross-terms *NOT* uncorrelated
Ymi=stats.multivariate_normal(np.ones(m)<times>muhatk.rvs() np.ones((m m))/nO+np.eye(m)).rvs()<line_sep>out=X.copy()<line_sep>out[idx]=Ymi<line_sep><return>out<block_end><def_stmt>fit_impute self X y=<none><block_start>"""Convenience method to perform fit and imputation in one go."""<line_sep><return>self.fit(X y).impute(X)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_from_stmt>autoimpute.imputations SingleImputer<line_sep>si=SingleImputer('normal unit variance')<line_sep>Yo=stats.norm(0 1).rvs(100)<line_sep>df=pd.DataFrame(columns=['Yo'] index=range(200) dtype=float)<line_sep>df.loc[range(100) 'Yo']=Yo<line_sep>si.fit_transform(df)<block_end> |
<import_from_stmt>carbon.conf Settings<class_stmt>TestSettings(Settings)<block_start><def_stmt>readFrom *args **kwargs<block_start><pass><block_end><block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.