content stringlengths 0 1.55M |
|---|
# Leo colorizer control file for bbj mode.
# This file is in the public domain.
# Properties for bbj mode.
properties={"commentEnd":"*/" "commentStart":"/*" "wordBreakChars":",+-=<>/?^&*" }<line_sep># Attributes dict for bbj_main ruleset.
bbj_main_attributes_dict={"default":"null" "digit_re":"" "escape":"\\" "highlight_digits":"true" "ignore_case":"true" "no_word_sep":"" }<line_sep># Dictionary of attributes dictionaries for bbj mode.
attributesDictDict={"bbj_main":bbj_main_attributes_dict }<line_sep># Keywords dict for bbj_main ruleset.
bbj_main_keywords_dict={"abs":"keyword1" "addr":"keyword3" "adjn":"keyword1" "all":"keyword3" "argc":"keyword1" "argv":"keyword1" "asc":"keyword1" "ath":"keyword1" "atn":"keyword1" "auto":"keyword3" "background":"keyword1" "begin":"keyword3" "bin":"keyword1" "break":"keyword3" "bsz":"keyword1" "call":"keyword3" "callback":"keyword1" "case":"keyword3" "chanopt":"keyword1" "chdir":"keyword2" "chn":"keyword3" "chr":"keyword1" "cisam":"keyword2" "clear":"keyword3" "clipclear":"keyword1" "clipfromfile":"keyword1" "clipfromstr":"keyword1" "clipisformat":"keyword1" "cliplock":"keyword1" "clipregformat":"keyword1" "cliptofile":"keyword1" "cliptostr":"keyword1" "clipunlock":"keyword1" "close":"keyword2" "continue":"keyword2" "cos":"keyword1" "cpl":"keyword1" "crc":"keyword1" "crc16":"keyword1" "ctl":"keyword3" "ctrl":"keyword1" "cvs":"keyword1" "cvt":"keyword1" "data":"keyword3" "date":"keyword1" "day":"keyword3" "dec":"keyword1" "def":"keyword3" "default":"keyword3" "defend":"keyword3" "delete":"keyword3" "dim":"keyword3" "dims":"keyword1" "dir":"keyword2" "direct":"keyword2" "disable":"keyword2" "dom":"keyword2" "dread":"keyword3" "drop":"keyword3" "dsk":"keyword1" "dsz":"keyword1" "dump":"keyword2" "edit":"keyword3" "else":"keyword3" "enable":"keyword2" "end":"keyword2" "endif":"keyword3" "endtrace":"keyword2" "enter":"keyword3" "ept":"keyword1" "erase":"keyword2" "err":"keyword3" "errmes":"keyword1" "escape":"keyword3" "escoff":"keyword3" "escon":"keyword3" "execute":"keyword3" "exit":"keyword3" "exitto":"keyword3" "extract":"keyword2" "fattr":"keyword1" "fbin":"keyword1" "fdec":"keyword1" "fi":"keyword3" "fid":"keyword2" "field":"keyword1" "file":"keyword2" "fileopt":"keyword1" "fill":"keyword1" "fin":"keyword2" "find":"keyword2" "floatingpoint":"keyword1" "for":"keyword3" "fpt":"keyword1" "from":"keyword2" "gap":"keyword1" "gosub":"keyword3" "goto":"keyword3" "hsa":"keyword1" "hsh":"keyword1" "hta":"keyword1" "if":"keyword3" "iff":"keyword3" "imp":"keyword1" "ind":"keyword2" "indexed":"keyword2" "info":"keyword1" "initfile":"keyword3" "input":"keyword2" "inpute":"keyword2" "inputn":"keyword2" "int":"keyword1" "iol":"keyword2" "iolist":"keyword2" "ior":"keyword3" "jul":"keyword1" "key":"keyword2" "keyf":"keyword2" "keyl":"keyword2" "keyn":"keyword2" "keyp":"keyword2" "kgen":"keyword2" "knum":"keyword2" "lcheckin":"keyword1" "lcheckout":"keyword1" "len":"keyword1" "let":"keyword3" "linfo":"keyword1" "list":"keyword2" "load":"keyword2" "lock":"keyword2" "log":"keyword1" "lrc":"keyword1" "lst":"keyword1" "mask":"keyword1" "max":"keyword1" "menuinfo":"keyword1" "merge":"keyword2" "min":"keyword1" "mkdir":"keyword2" "mkeyed":"keyword2" "mod":"keyword1" "msgbox":"keyword1" "neval":"keyword1" "next":"keyword3" "nfield":"keyword1" "not":"keyword3" "notice":"keyword1" "noticetpl":"keyword1" "num":"keyword1" "on":"keyword3" "open":"keyword2" "opts":"keyword3" "or":"keyword3" "pad":"keyword1" "pck":"keyword1" "pfx":"keyword3" "pgm":"keyword1" "pos":"keyword1" "precision":"keyword3" "prefix":"keyword2" "print":"keyword2" "process_events":"keyword1" "program":"keyword1" "psz":"keyword1" "pub":"keyword1" "read":"keyword2" "read_resource":"keyword2" "record":"keyword2" "release":"keyword3" "remove":"keyword2" "remove_callback":"keyword1" "rename":"keyword2" "renum":"keyword3" "repeat":"keyword3" "resclose":"keyword2" "reserve":"keyword1" "reset":"keyword3" "resfirst":"keyword2" "resget":"keyword2" "resinfo":"keyword2" "resnext":"keyword2" "resopen":"keyword2" "restore":"keyword3" "retry":"keyword3" "return":"keyword3" "rev":"keyword2" "rmdir":"keyword2" "rnd":"keyword1" "round":"keyword1" "run":"keyword3" "save":"keyword2" "scall":"keyword1" "select":"keyword2" "sendmsg":"keyword1" "serial":"keyword2" "set_case_sensitive_off":"keyword3" "set_case_sensitive_on":"keyword3" "setday":"keyword2" "setdrive":"keyword2" "seterr":"keyword3" "setesc":"keyword3" "setopts":"keyword3" "settime":"keyword3" "settrace":"keyword2" "seval":"keyword1" "sgn":"keyword1" "sin":"keyword1" "siz":"keyword2" "sort":"keyword2" "sqlchn":"keyword2" "sqlclose":"keyword2" "sqlerr":"keyword2" "sqlexec":"keyword2" "sqlfetch":"keyword2" "sqllist":"keyword2" "sqlopen":"keyword2" "sqlprep":"keyword2" "sqlset":"keyword2" "sqltables":"keyword2" "sqltmpl":"keyword2" "sqlunt":"keyword2" "sqr":"keyword1" "ssn":"keyword3" "ssort":"keyword1" "ssz":"keyword1" "start":"keyword3" "stbl":"keyword1" "step":"keyword3" "stop":"keyword3" "str":"keyword1" "string":"keyword2" "swap":"keyword1" "swend":"keyword3" "switch":"keyword3" "sys":"keyword1" "table":"keyword2" "tbl":"keyword2" "tcb":"keyword1" "then":"keyword3" "tim":"keyword2" "tmpl":"keyword1" "to":"keyword3" "tsk":"keyword1" "unlock":"keyword2" "unt":"keyword3" "until":"keyword3" "upk":"keyword1" "wait":"keyword3" "wend":"keyword3" "where":"keyword2" "while":"keyword3" "winfirst":"keyword1" "wininfo":"keyword1" "winnext":"keyword1" "write":"keyword2" "xfid":"keyword2" "xfile":"keyword2" "xfin":"keyword2" "xor":"keyword3" }<line_sep># Dictionary of keywords dictionaries for bbj mode.
keywordsDictDict={"bbj_main":bbj_main_keywords_dict }<line_sep># Rules for bbj_main ruleset.
<def_stmt>bbj_rule0 colorer s i<block_start><return>colorer.match_span(s i kind="comment1" begin="/*" end="*/" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="" exclude_match=<false> no_escape=<false> no_line_break=<false> no_word_break=<false>)<block_end><def_stmt>bbj_rule1 colorer s i<block_start><return>colorer.match_span(s i kind="literal1" begin="\"" end="\"" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="" exclude_match=<false> no_escape=<false> no_line_break=<true> no_word_break=<false>)<block_end><def_stmt>bbj_rule2 colorer s i<block_start><return>colorer.match_eol_span(s i kind="comment2" seq="//" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="" exclude_match=<false>)<block_end><def_stmt>bbj_rule3 colorer s i<block_start><return>colorer.match_eol_span(s i kind="comment2" seq="REM" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="" exclude_match=<false>)<block_end><def_stmt>bbj_rule4 colorer s i<block_start><return>colorer.match_seq(s i kind="operator" seq="=" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="")<block_end><def_stmt>bbj_rule5 colorer s i<block_start><return>colorer.match_seq(s i kind="operator" seq=">=" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="")<block_end><def_stmt>bbj_rule6 colorer s i<block_start><return>colorer.match_seq(s i kind="operator" seq="<=" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="")<block_end><def_stmt>bbj_rule7 colorer s i<block_start><return>colorer.match_seq(s i kind="operator" seq="+" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="")<block_end><def_stmt>bbj_rule8 colorer s i<block_start><return>colorer.match_seq(s i kind="operator" seq="-" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="")<block_end><def_stmt>bbj_rule9 colorer s i<block_start><return>colorer.match_seq(s i kind="operator" seq="/" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="")<block_end><def_stmt>bbj_rule10 colorer s i<block_start><return>colorer.match_seq(s i kind="operator" seq="*" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="")<block_end><def_stmt>bbj_rule11 colorer s i<block_start><return>colorer.match_seq(s i kind="operator" seq=">" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="")<block_end><def_stmt>bbj_rule12 colorer s i<block_start><return>colorer.match_seq(s i kind="operator" seq="<" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="")<block_end><def_stmt>bbj_rule13 colorer s i<block_start><return>colorer.match_seq(s i kind="operator" seq="<>" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="")<block_end><def_stmt>bbj_rule14 colorer s i<block_start><return>colorer.match_seq(s i kind="operator" seq="^" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="")<block_end><def_stmt>bbj_rule15 colorer s i<block_start><return>colorer.match_seq(s i kind="operator" seq="and" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="")<block_end><def_stmt>bbj_rule16 colorer s i<block_start><return>colorer.match_seq(s i kind="operator" seq="or" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> delegate="")<block_end><def_stmt>bbj_rule17 colorer s i<block_start><return>colorer.match_mark_previous(s i kind="label" pattern=":" at_line_start=<true> at_whitespace_end=<false> at_word_start=<false> exclude_match=<true>)<block_end><def_stmt>bbj_rule18 colorer s i<block_start><return>colorer.match_mark_previous(s i kind="function" pattern="(" at_line_start=<false> at_whitespace_end=<false> at_word_start=<false> exclude_match=<true>)<block_end><def_stmt>bbj_rule19 colorer s i<block_start><return>colorer.match_keywords(s i)<block_end># Rules dict for bbj_main ruleset.
rulesDict1={"\"":[bbj_rule1 ] "(":[bbj_rule18 ] "*":[bbj_rule10 ] "+":[bbj_rule7 ] "-":[bbj_rule8 ] "/":[bbj_rule0 bbj_rule2 bbj_rule9 ] "0":[bbj_rule19 ] "1":[bbj_rule19 ] "2":[bbj_rule19 ] "3":[bbj_rule19 ] "4":[bbj_rule19 ] "5":[bbj_rule19 ] "6":[bbj_rule19 ] "7":[bbj_rule19 ] "8":[bbj_rule19 ] "9":[bbj_rule19 ] ":":[bbj_rule17 ] "<":[bbj_rule6 bbj_rule12 bbj_rule13 ] "=":[bbj_rule4 ] ">":[bbj_rule5 bbj_rule11 ] "@":[bbj_rule19 ] "A":[bbj_rule19 ] "B":[bbj_rule19 ] "C":[bbj_rule19 ] "D":[bbj_rule19 ] "E":[bbj_rule19 ] "F":[bbj_rule19 ] "G":[bbj_rule19 ] "H":[bbj_rule19 ] "I":[bbj_rule19 ] "J":[bbj_rule19 ] "K":[bbj_rule19 ] "L":[bbj_rule19 ] "M":[bbj_rule19 ] "N":[bbj_rule19 ] "O":[bbj_rule19 ] "P":[bbj_rule19 ] "Q":[bbj_rule19 ] "R":[bbj_rule3 bbj_rule19 ] "S":[bbj_rule19 ] "T":[bbj_rule19 ] "U":[bbj_rule19 ] "V":[bbj_rule19 ] "W":[bbj_rule19 ] "X":[bbj_rule19 ] "Y":[bbj_rule19 ] "Z":[bbj_rule19 ] "^":[bbj_rule14 ] "_":[bbj_rule19 ] "a":[bbj_rule15 bbj_rule19 ] "b":[bbj_rule19 ] "c":[bbj_rule19 ] "d":[bbj_rule19 ] "e":[bbj_rule19 ] "f":[bbj_rule19 ] "g":[bbj_rule19 ] "h":[bbj_rule19 ] "i":[bbj_rule19 ] "j":[bbj_rule19 ] "k":[bbj_rule19 ] "l":[bbj_rule19 ] "m":[bbj_rule19 ] "n":[bbj_rule19 ] "o":[bbj_rule16 bbj_rule19 ] "p":[bbj_rule19 ] "q":[bbj_rule19 ] "r":[bbj_rule19 ] "s":[bbj_rule19 ] "t":[bbj_rule19 ] "u":[bbj_rule19 ] "v":[bbj_rule19 ] "w":[bbj_rule19 ] "x":[bbj_rule19 ] "y":[bbj_rule19 ] "z":[bbj_rule19 ] }<line_sep># x.rulesDictDict for bbj mode.
rulesDictDict={"bbj_main":rulesDict1 }<line_sep># Import dict for bbj mode.
importDict={}<line_sep> |
<import_from_stmt>aiogram types<import_from_stmt>.dataset PHOTO<line_sep>photo=types.PhotoSize(**PHOTO)<def_stmt>test_export <block_start>exported=photo.to_python()<assert_stmt>isinstance(exported dict)<assert_stmt>exported<eq>PHOTO<block_end><def_stmt>test_file_id <block_start><assert_stmt>isinstance(photo.file_id str)<assert_stmt>photo.file_id<eq>PHOTO['file_id']<block_end><def_stmt>test_file_size <block_start><assert_stmt>isinstance(photo.file_size int)<assert_stmt>photo.file_size<eq>PHOTO['file_size']<block_end><def_stmt>test_size <block_start><assert_stmt>isinstance(photo.width int)<assert_stmt>isinstance(photo.height int)<assert_stmt>photo.width<eq>PHOTO['width']<assert_stmt>photo.height<eq>PHOTO['height']<block_end> |
<def_stmt>__residuumSign self<block_start><if_stmt>self.outcome<eq>0<block_start><return>-1<block_end><else_stmt><block_start><return>1<block_end><block_end> |
# Generated by Django 3.1 on 2020-08-13 02:38
<import_from_stmt>django.db migrations models<import_stmt>django.utils.timezone<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("core" "0011_auto_20200214_1939") ]<line_sep>operations=[migrations.AlterField(model_name="note" name="time" field=models.DateTimeField(default=django.utils.timezone.now verbose_name="Time") ) ]<block_end> |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
<import_stmt>json<import_stmt>unittest<import_from_stmt>blinkpy.web_tests.layout_package json_results_generator<class_stmt>JSONGeneratorTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.builder_name='DUMMY_BUILDER_NAME'<line_sep>self.build_number='DUMMY_BUILDER_NUMBER'<line_sep># For archived results.
self._json=<none><line_sep>self._num_runs=0<line_sep>self._tests_set=set([])<line_sep>self._test_timings={}<line_sep>self._failed_count_map={}<line_sep>self._PASS_count=0<line_sep>self._DISABLED_count=0<line_sep>self._FLAKY_count=0<line_sep>self._FAILS_count=0<line_sep>self._fixable_count=0<block_end><def_stmt>test_strip_json_wrapper self<block_start>json="['contents']"<line_sep>self.assertEqual(json_results_generator.strip_json_wrapper(json_results_generator._JSON_PREFIX+json+json_results_generator._JSON_SUFFIX) json)<line_sep>self.assertEqual(json_results_generator.strip_json_wrapper(json) json)<block_end><def_stmt>test_test_timings_trie self<block_start>individual_test_timings=[]<line_sep>individual_test_timings.append(json_results_generator.TestResult('foo/bar/baz.html' elapsed_time=1.2))<line_sep>individual_test_timings.append(json_results_generator.TestResult('bar.html' elapsed_time=0.0001))<line_sep>trie=json_results_generator.test_timings_trie(individual_test_timings)<line_sep>expected_trie={'bar.html':0 'foo':{'bar':{'baz.html':1200 }}}<line_sep>self.assertEqual(json.dumps(trie) json.dumps(expected_trie))<block_end><block_end> |
<import_from_stmt>abc abstractmethod<import_stmt>numpy<as>np<import_from_stmt>pymoo.core.population Population<line_sep># ---------------------------------------------------------------------------------------------------------
# Survival
# ---------------------------------------------------------------------------------------------------------
<class_stmt>Survival<block_start><def_stmt>__init__ self filter_infeasible=<true><block_start>super().__init__()<line_sep>self.filter_infeasible=filter_infeasible<block_end><def_stmt>do self problem pop *args n_survive=<none> return_indices=<false> **kwargs# make sure the population has at least one individual
<block_start><if_stmt>len(pop)<eq>0<block_start><return>pop<block_end><if_stmt>n_survive<is><none><block_start>n_survive=len(pop)<block_end>n_survive=min(n_survive len(pop))<line_sep># if the split should be done beforehand
<if_stmt>self.filter_infeasible<and>problem.n_constr<g>0# split feasible and infeasible solutions
<block_start>feas,infeas=split_by_feasibility(pop eps=0.0 sort_infeasbible_by_cv=<true>)<if_stmt>len(feas)<eq>0<block_start>survivors=Population()<block_end><else_stmt><block_start>survivors=self._do(problem pop[feas] *args n_survive=min(len(feas) n_survive) **kwargs)<block_end># calculate how many individuals are still remaining to be filled up with infeasible ones
n_remaining=n_survive-len(survivors)<line_sep># if infeasible solutions needs to be added
<if_stmt>n_remaining<g>0<block_start>survivors=Population.merge(survivors pop[infeas[:n_remaining]])<block_end><block_end><else_stmt><block_start>survivors=self._do(problem pop *args n_survive=n_survive **kwargs)<block_end><if_stmt>return_indices<block_start>H={}<for_stmt>k,ind enumerate(pop)<block_start>H[ind]=k<block_end><return>[H[survivor]<for>survivor survivors]<block_end><else_stmt><block_start><return>survivors<block_end><block_end>@abstractmethod<def_stmt>_do self problem pop *args n_survive=<none> **kwargs<block_start><pass><block_end><block_end><def_stmt>split_by_feasibility pop eps=0.0 sort_infeasbible_by_cv=<true><block_start>CV=pop.get("CV")<line_sep>b=(CV<le>eps)<line_sep>feasible=np.where(b)[0]<line_sep>infeasible=np.where(~b)[0]<if_stmt>sort_infeasbible_by_cv<block_start>infeasible=infeasible[np.argsort(CV[infeasible 0])]<block_end><return>feasible infeasible<block_end><def_stmt>calc_adapt_eps pop<block_start>cv=pop.get("CV")[: 0]<line_sep>cv_mean=np.median(cv)<line_sep>fr=(cv<le>0).sum()/len(cv)<line_sep><return>cv_mean<times>fr<block_end> |
"""Create, modify, and delete columns
See source https://github.com/tidyverse/dplyr/blob/master/R/mutate.R
"""<import_from_stmt>typing Any Tuple List Union<import_from_stmt>pandas DataFrame Series<import_from_stmt>pipda register_verb evaluate_expr ContextBase<import_from_stmt>pipda.utils CallingEnvs<import_from_stmt>..core.contexts Context ContextEval<import_from_stmt>..core.utils dedup_name recycle_value arg_match df_setitem name_mutatable_args reconstruct_tibble <import_from_stmt>..core.defaults DEFAULT_COLUMN_PREFIX<import_from_stmt>..core.grouped DataFrameGroupBy<import_from_stmt>..base setdiff union intersect c NA<import_from_stmt>.group_by group_by_drop_default<import_from_stmt>.group_data group_vars group_data<import_from_stmt>.relocate relocate<line_sep>@register_verb(DataFrame context=Context.PENDING extra_contexts={"_before":Context.SELECT "_after":Context.SELECT} )<def_stmt>mutate _data:DataFrame *args:Any _keep:str="all" _before:Union[int str]=<none> _after:Union[int str]=<none> base0_:bool=<none> **kwargs:Any <arrow>DataFrame<block_start>"""Adds new variables and preserves existing ones
The original API:
https://dplyr.tidyverse.org/reference/mutate.html
Args:
_data: A data frame
_keep: allows you to control which columns from _data are retained
in the output:
- "all", the default, retains all variables.
- "used" keeps any variables used to make new variables;
it's useful for checking your work as it displays inputs and
outputs side-by-side.
- "unused" keeps only existing variables not used to make new
variables.
- "none", only keeps grouping keys (like transmute()).
_before: and
_after: Optionally, control where new columns should appear
(the default is to add to the right hand side).
See relocate() for more details.
base0_: Whether `_before` and `_after` are 0-based if given by indexes.
If not provided, will use `datar.base.get_option('index.base.0')`
*args: and
**kwargs: Name-value pairs. The name gives the name of the column
in the output. The value can be:
- A vector of length 1, which will be recycled to the correct
length.
- A vector the same length as the current group (or the whole
data frame if ungrouped).
- None to remove the column
Returns:
An object of the same type as _data. The output has the following
properties:
- Rows are not affected.
- Existing columns will be preserved according to the _keep
argument. New columns will be placed according to the
_before and _after arguments. If _keep = "none"
(as in transmute()), the output order is determined only
by ..., not the order of existing columns.
- Columns given value None will be removed
- Groups will be recomputed if a grouping variable is mutated.
- Data frame attributes are preserved.
"""<line_sep>keep=arg_match(_keep "_keep" ["all" "unused" "used" "none"])<line_sep>context=ContextEval()<line_sep>cols,removed=_mutate_cols(_data context *args **kwargs)<if_stmt>cols<is><none><block_start>cols=DataFrame(index=_data.index)<block_end>out=_data.copy()<line_sep># order is the same as _data
out[cols.columns.tolist()]=cols<line_sep># out.columns.difference(removed)
# changes column order when removed == []
out=out[setdiff(out.columns removed __calling_env=CallingEnvs.REGULAR)]<if_stmt>_before<is><not><none><or>_after<is><not><none><block_start>new=setdiff(cols.columns _data.columns __calling_env=CallingEnvs.REGULAR )<line_sep>out=relocate(out *new _before=_before _after=_after base0_=base0_ __calling_env=CallingEnvs.REGULAR )<block_end><if_stmt>keep<eq>"all"<block_start>keep=out.columns<block_end><elif_stmt>keep<eq>"unused"<block_start>used=context.used_refs.keys()<line_sep>unused=setdiff(_data.columns used __calling_env=CallingEnvs.REGULAR )<line_sep>keep=intersect(out.columns c(group_vars(_data __calling_env=CallingEnvs.REGULAR) unused cols.columns ) __calling_env=CallingEnvs.REGULAR )<block_end><elif_stmt>keep<eq>"used"<block_start>used=context.used_refs.keys()<line_sep>keep=intersect(out.columns c(group_vars(_data __calling_env=CallingEnvs.REGULAR) used cols.columns ) __calling_env=CallingEnvs.REGULAR )<block_end><else_stmt># keep == 'none':
<block_start>keep=union(setdiff(group_vars(_data __calling_env=CallingEnvs.REGULAR) cols.columns __calling_env=CallingEnvs.REGULAR ) intersect(cols.columns out.columns __calling_env=CallingEnvs.REGULAR ) __calling_env=CallingEnvs.REGULAR )<block_end>out=out[keep]<line_sep><return>out.loc[[] :]<if>len(_data)<eq>0<else>out<block_end>@mutate.register(DataFrameGroupBy context=Context.PENDING)<def_stmt>_ _data:DataFrameGroupBy *args:Any _keep:str="all" _before:str=<none> _after:str=<none> base0_:bool=<none> **kwargs:Any <arrow>DataFrameGroupBy<block_start>"""Mutate on DataFrameGroupBy object"""<def_stmt>apply_func df<block_start><if_stmt>isinstance(df Series)<block_start>df=df.to_frame().T<line_sep>index=df.attrs["_group_index"]=df.index[0]<line_sep>df.attrs["_group_data"]=_data._group_data<line_sep>rows=[index]<block_end><else_stmt><block_start>index=df.attrs["_group_index"]<line_sep>rows=df.attrs["_group_data"].loc[index "_rows"]<block_end>ret=mutate(df.reset_index(drop=<true>) *args _keep=_keep _before=_before _after=_after base0_=base0_ __calling_env=CallingEnvs.REGULAR **kwargs )<line_sep>ret.index=rows<line_sep><return>ret<block_end>out=_data._datar_apply(apply_func _drop_index=<false>).sort_index()<if_stmt>out.shape[0]<g>0# keep the original row order
# out.sort_index(inplace=True)
# not only DataFrameGroupBy but also DataFrameRowwise
<block_start><return>reconstruct_tibble(_data out keep_rowwise=<true>)<block_end># 0-row
named=name_mutatable_args(*args **kwargs)<line_sep>df=DataFrame({key:[]<for>key named})<line_sep>out=_data.copy()<line_sep>out[df.columns.tolist()]=df<line_sep><return>_data.__class__(out _group_vars=group_vars(_data __calling_env=CallingEnvs.REGULAR) _group_drop=group_by_drop_default(_data) _group_data=group_data(_data __calling_env=CallingEnvs.REGULAR) )<block_end>@register_verb(DataFrame context=Context.PENDING)<def_stmt>transmute _data:DataFrame *args:Any _before:Union[int str]=<none> _after:Union[int str]=<none> base0_:bool=<none> **kwargs:Any <arrow>DataFrame<block_start>"""Mutate with _keep='none'
See Also:
[`mutate()`](datar.dplyr.mutate.mutate).
"""<line_sep><return>mutate(_data *args _keep="none" _before=_before _after=_after base0_=base0_ __calling_env=CallingEnvs.REGULAR **kwargs )<block_end><def_stmt>_mutate_cols _data:DataFrame _context:ContextBase *args:Any **kwargs:Any <arrow>Tuple[DataFrame List[str]]<block_start>"""Mutate columns"""<if_stmt><not>args<and><not>kwargs<block_start><return><none> []<block_end>_data=_data.copy()<line_sep>named_mutatables=name_mutatable_args(*args **kwargs)<line_sep>new_columns=[]<line_sep>removed=[]<line_sep>add_new_name=<true><for_stmt>name,mutatable named_mutatables.items()<block_start>ddp_name=dedup_name(name list(named_mutatables))<line_sep># if not a dedup name, it's a new name
add_new_name=ddp_name<eq>name<line_sep>mutatable=evaluate_expr(mutatable _data _context)<if_stmt>mutatable<is><none><block_start><if_stmt>ddp_name<in>_data<block_start>removed.append(ddp_name)<line_sep>_data.drop(columns=[ddp_name] inplace=<true>)<block_end># be silent if name doesn't exist
<continue><block_end><if_stmt>isinstance(mutatable DataFrame)<block_start><if_stmt>mutatable.shape[1]<eq>0<and><not>ddp_name.startswith(DEFAULT_COLUMN_PREFIX)<block_start>_data=df_setitem(_data ddp_name [NA]<times>max(mutatable.shape[0] 1))<if_stmt>add_new_name<block_start>new_columns.append(ddp_name)<block_end><block_end><else_stmt><block_start><for_stmt>col mutatable.columns<block_start>new_name=(col<if>ddp_name.startswith(DEFAULT_COLUMN_PREFIX)<else>f"{ddp_name}${col}")<line_sep>coldata=recycle_value(mutatable[col] _data.shape[0] ddp_name)<line_sep>_data=df_setitem(_data new_name coldata)<if_stmt>add_new_name<block_start>new_columns.append(new_name)<block_end><block_end><block_end><block_end><else_stmt><block_start>mutatable=recycle_value(mutatable _data.shape[0] ddp_name)<line_sep>_data=df_setitem(_data ddp_name mutatable)<if_stmt>add_new_name<block_start>new_columns.append(ddp_name)<block_end><block_end><block_end># keep column order
<return>_data[new_columns] removed<block_end> |
"""
Copyright (c) 2016, <NAME> .All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
<NAME>. April, 2018.
email: <EMAIL>
LIVIA Department, ETS, Montreal.
"""<import_stmt>numpy<as>np<import_stmt>pdb<line_sep># If you are not using nifti files you can comment this line
<import_stmt>nibabel<as>nib<import_stmt>scipy.io<as>sio<import_from_stmt>ImgOperations.imgOp applyPadding<line_sep># ----- Loader for nifti files ------ #
<def_stmt>load_nii imageFileName printFileNames<block_start><if_stmt>printFileNames<eq><true><block_start>print(" ... Loading file: {}".format(imageFileName))<block_end>img_proxy=nib.load(imageFileName)<line_sep>imageData=img_proxy.get_data()<line_sep><return>(imageData img_proxy)<block_end><def_stmt>release_nii_proxy img_proxy<block_start>img_proxy.uncache()<block_end># ----- Loader for matlab format ------- #
# Very important: All the volumes should have been saved as 'vol'.
# Otherwise, change its name here
<def_stmt>load_matlab imageFileName printFileNames<block_start><if_stmt>printFileNames<eq><true><block_start>print(" ... Loading file: {}".format(imageFileName))<block_end>mat_contents=sio.loadmat(imageFileName)<line_sep>imageData=mat_contents['vol']<line_sep><return>(imageData)<block_end>""" It loads the images (CT/MRI + Ground Truth + ROI) for the patient image Idx"""<def_stmt>load_imagesSinglePatient imageIdx imageNames imageNames_Bottom groundTruthNames roiNames applyPaddingBool receptiveField sampleSizes imageType<block_start><if_stmt>imageIdx<ge>len(imageNames)<block_start>print(" ERROR!!!!! : The image index specified is greater than images array size....)")<line_sep>exit(1)<block_end># --- Load image data (CT/MRI/...) ---
printFileNames=<false># Get this from config.ini
imageFileName=imageNames[imageIdx]<if_stmt>imageType<eq>0<block_start>[imageData img_proxy]=load_nii(imageFileName printFileNames)<block_end><else_stmt><block_start>imageData=load_matlab(imageFileName printFileNames)<block_end><if_stmt>applyPaddingBool<eq><true><block_start>[imageData paddingValues]=applyPadding(imageData sampleSizes receptiveField)<block_end><else_stmt><block_start>paddingValues=((0 0) (0 0) (0 0))<block_end><if_stmt>len(imageData.shape)<g>3<block_start>imageData=imageData[: : : 0]<block_end><if_stmt>imageType<eq>0<block_start>release_nii_proxy(img_proxy)<block_end># --- Load image data for bottom path (CT/MRI/...) ---
printFileNames=<false># Get this from config.ini
imageFileName=imageNames_Bottom[imageIdx]<if_stmt>imageType<eq>0<block_start>[imageData_Bottom img_proxy]=load_nii(imageFileName printFileNames)<block_end><else_stmt><block_start>imageData_Bottom=load_matlab(imageFileName printFileNames)<block_end><if_stmt>applyPaddingBool<eq><true><block_start>[imageData_Bottom paddingValues]=applyPadding(imageData_Bottom sampleSizes receptiveField)<block_end><else_stmt><block_start>paddingValues=((0 0) (0 0) (0 0))<block_end><if_stmt>len(imageData_Bottom.shape)<g>3<block_start>imageData_Bottom=imageData_Bottom[: : : 0]<block_end><if_stmt>imageType<eq>0<block_start>release_nii_proxy(img_proxy)<block_end># --- Load ground truth (i.e. labels) ---
<if_stmt>len(groundTruthNames)<g>0<block_start>GTFileName=groundTruthNames[imageIdx]<if_stmt>imageType<eq>0<block_start>[gtLabelsData gt_proxy]=load_nii(GTFileName printFileNames)<block_end><else_stmt><block_start>gtLabelsData=load_matlab(GTFileName printFileNames)<block_end># Convert ground truth to int type
<if_stmt>np.issubdtype(gtLabelsData.dtype np.int)<block_start>gtLabelsData=gtLabelsData<block_end><else_stmt><block_start>np.rint(gtLabelsData).astype("int32")<block_end>imageGtLabels=gtLabelsData<if_stmt>imageType<eq>0# Release data
<block_start>release_nii_proxy(gt_proxy)<block_end><if_stmt>applyPaddingBool<eq><true><block_start>[imageGtLabels paddingValues]=applyPadding(imageGtLabels sampleSizes receptiveField)<block_end><block_end><else_stmt><block_start>imageGtLabels=np.empty(0)<block_end># --- Load roi ---
<if_stmt>len(roiNames)<g>0<block_start>roiFileName=roiNames[imageIdx]<if_stmt>imageType<eq>0<block_start>[roiMaskData roi_proxy]=load_nii(roiFileName printFileNames)<block_end><else_stmt><block_start>roiMaskData=load_matlab(roiFileName printFileNames)<block_end>roiMask=roiMaskData<if_stmt>imageType<eq>0# Release data
<block_start>release_nii_proxy(roi_proxy)<block_end><if_stmt>applyPaddingBool<eq><true><block_start>[roiMask paddingValues]=applyPadding(roiMask sampleSizes receptiveField)<block_end><block_end><else_stmt><block_start>roiMask=np.ones(imageGtLabels.shape)<block_end><return>[imageData imageData_Bottom imageGtLabels roiMask paddingValues]<block_end># -------------------------------------------------------- #
<def_stmt>getRandIndexes total maxNumberIdx# Generate a shuffle array of a vector containing "total" elements
<block_start>idxs=range(total)<line_sep>np.random.shuffle(idxs)<line_sep>rand_idxs=idxs[0:maxNumberIdx]<line_sep><return>rand_idxs<block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> unicode_literals<import_stmt>flask_restplus<as>restplus<class_stmt>Foo(restplus.Resource)<block_start><def_stmt>get self<block_start><return>"data"<block_end><block_end><class_stmt>ErrorsTest(object)<block_start><def_stmt>test_accept_default_application_json self app client<block_start>api=restplus.Api(app)<line_sep>api.add_resource(Foo '/test/')<line_sep>res=client.get('/test/' headers={'Accept':<none>})<assert_stmt>res.status_code<eq>200<assert_stmt>res.content_type<eq>'application/json'<block_end><def_stmt>test_accept_application_json_by_default self app client<block_start>api=restplus.Api(app)<line_sep>api.add_resource(Foo '/test/')<line_sep>res=client.get('/test/' headers=[('Accept' 'application/json')])<assert_stmt>res.status_code<eq>200<assert_stmt>res.content_type<eq>'application/json'<block_end><def_stmt>test_accept_no_default_match_acceptable self app client<block_start>api=restplus.Api(app default_mediatype=<none>)<line_sep>api.add_resource(Foo '/test/')<line_sep>res=client.get('/test/' headers=[('Accept' 'application/json')])<assert_stmt>res.status_code<eq>200<assert_stmt>res.content_type<eq>'application/json'<block_end><def_stmt>test_accept_default_override_accept self app client<block_start>api=restplus.Api(app)<line_sep>api.add_resource(Foo '/test/')<line_sep>res=client.get('/test/' headers=[('Accept' 'text/plain')])<assert_stmt>res.status_code<eq>200<assert_stmt>res.content_type<eq>'application/json'<block_end><def_stmt>test_accept_default_any_pick_first self app client<block_start>api=restplus.Api(app)<line_sep>@api.representation('text/plain')<def_stmt>text_rep data status_code headers=<none><block_start>resp=app.make_response((str(data) status_code headers))<line_sep><return>resp<block_end>api.add_resource(Foo '/test/')<line_sep>res=client.get('/test/' headers=[('Accept' '*/*')])<assert_stmt>res.status_code<eq>200<assert_stmt>res.content_type<eq>'application/json'<block_end><def_stmt>test_accept_no_default_no_match_not_acceptable self app client<block_start>api=restplus.Api(app default_mediatype=<none>)<line_sep>api.add_resource(Foo '/test/')<line_sep>res=client.get('/test/' headers=[('Accept' 'text/plain')])<assert_stmt>res.status_code<eq>406<assert_stmt>res.content_type<eq>'application/json'<block_end><def_stmt>test_accept_no_default_custom_repr_match self app client<block_start>api=restplus.Api(app default_mediatype=<none>)<line_sep>api.representations={}<line_sep>@api.representation('text/plain')<def_stmt>text_rep data status_code headers=<none><block_start>resp=app.make_response((str(data) status_code headers))<line_sep><return>resp<block_end>api.add_resource(Foo '/test/')<line_sep>res=client.get('/test/' headers=[('Accept' 'text/plain')])<assert_stmt>res.status_code<eq>200<assert_stmt>res.content_type<eq>'text/plain'<block_end><def_stmt>test_accept_no_default_custom_repr_not_acceptable self app client<block_start>api=restplus.Api(app default_mediatype=<none>)<line_sep>api.representations={}<line_sep>@api.representation('text/plain')<def_stmt>text_rep data status_code headers=<none><block_start>resp=app.make_response((str(data) status_code headers))<line_sep><return>resp<block_end>api.add_resource(Foo '/test/')<line_sep>res=client.get('/test/' headers=[('Accept' 'application/json')])<assert_stmt>res.status_code<eq>406<assert_stmt>res.content_type<eq>'text/plain'<block_end><def_stmt>test_accept_no_default_match_q0_not_acceptable self app client<block_start>"""
q=0 should be considered NotAcceptable,
but this depends on werkzeug >= 1.0 which is not yet released
so this test is expected to fail until we depend on werkzeug >= 1.0
"""<line_sep>api=restplus.Api(app default_mediatype=<none>)<line_sep>api.add_resource(Foo '/test/')<line_sep>res=client.get('/test/' headers=[('Accept' 'application/json; q=0')])<assert_stmt>res.status_code<eq>406<assert_stmt>res.content_type<eq>'application/json'<block_end><def_stmt>test_accept_no_default_accept_highest_quality_of_two self app client<block_start>api=restplus.Api(app default_mediatype=<none>)<line_sep>@api.representation('text/plain')<def_stmt>text_rep data status_code headers=<none><block_start>resp=app.make_response((str(data) status_code headers))<line_sep><return>resp<block_end>api.add_resource(Foo '/test/')<line_sep>res=client.get('/test/' headers=[('Accept' 'application/json; q=0.1, text/plain; q=1.0')])<assert_stmt>res.status_code<eq>200<assert_stmt>res.content_type<eq>'text/plain'<block_end><def_stmt>test_accept_no_default_accept_highest_quality_of_three self app client<block_start>api=restplus.Api(app default_mediatype=<none>)<line_sep>@api.representation('text/html')@api.representation('text/plain')<def_stmt>text_rep data status_code headers=<none><block_start>resp=app.make_response((str(data) status_code headers))<line_sep><return>resp<block_end>api.add_resource(Foo '/test/')<line_sep>res=client.get('/test/' headers=[('Accept' 'application/json; q=0.1, text/plain; q=0.3, text/html; q=0.2')])<assert_stmt>res.status_code<eq>200<assert_stmt>res.content_type<eq>'text/plain'<block_end><def_stmt>test_accept_no_default_no_representations self app client<block_start>api=restplus.Api(app default_mediatype=<none>)<line_sep>api.representations={}<line_sep>api.add_resource(Foo '/test/')<line_sep>res=client.get('/test/' headers=[('Accept' 'text/plain')])<assert_stmt>res.status_code<eq>406<assert_stmt>res.content_type<eq>'text/plain'<block_end><def_stmt>test_accept_invalid_default_no_representations self app client<block_start>api=restplus.Api(app default_mediatype='nonexistant/mediatype')<line_sep>api.representations={}<line_sep>api.add_resource(Foo '/test/')<line_sep>res=client.get('/test/' headers=[('Accept' 'text/plain')])<assert_stmt>res.status_code<eq>500<block_end><block_end> |
# -*- coding: utf-8 -*-
# OPEN AI LAB is pleased to support the open source community by supporting Tengine available.
#
# Copyright (C) 2021 OPEN AI LAB. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
This tool is mainly for optimizing the network structure of nanodet_m.ckpt from
https://github.com/RangiLyu/nanodet .
Preparation:
1. Export pytorch model to onnx by official tools/export_onnx.py, e.g.
$ python tools/export_onnx.py --cfg_path config/nanodet-m.yml --model_path nanodet_m.ckpt
Optimization:
1. Load onnx model and just simplify it:
$ python3 nanodet_m-opt.py --input nanodet_m.onnx --output nanodet_m-opt.onnx
Optimization(not recommended):
1. Updata the output shape in all distance prediction branches from [1, *, 32] to [1, *, 4, 8];
2. Add additional "Softmax" node in the end of all distance prediction branches with axis=-1;
3. Update output tensor name(from "dis_pred_stride_*" to "dis_sm_stride_*", in which "sm" is
short for "softmax") and shape(from [1, *, 32] to [1, *, 4, 8] for later integral);
4. Check and simplify new onnx model.
$ python3 nanodet_m-opt.py --input nanodet_m.onnx --output nanodet_m-opt.onnx --softmax --const 893,915,937
This tool is based on ONNX Framework.
Author:
<EMAIL>
Histroy:
2021/06/26 create
"""<import_stmt>argparse<import_stmt>onnx<import_from_stmt>onnxsim simplify<def_stmt>parse_args <block_start>parser=argparse.ArgumentParser(description='NanoDet-m Optimize Tool Parameters')<line_sep>parser.add_argument('--input' type=str default='nanodet_m.onnx' help='input model path')<line_sep>parser.add_argument('--output' type=str default='nanodet_m-opt.onnx' help='output model path')<line_sep>parser.add_argument('--const' type=str default='893,915,937' help='constant(nodes) for final reshape node in distance prediction branch')<line_sep>parser.add_argument("--softmax" action='store_true' default=<false> help="add additional softmax node to distance prediction branch")<line_sep>args=parser.parse_args()<line_sep><return>args<block_end><def_stmt>optimize_node_shape nodes names<block_start>"""
optimize input constant nodes of final reshape nodes in distance prediction branch
Args:
nodes: the graph.node of ONNX model
names: target constant node name list
Returns:
optimized graph nodes(inplace)
"""<line_sep>## new shape value for "Constant" nodes
t=onnx.helper.make_tensor('' onnx.TensorProto.INT64 [4] [1 4 8 -1])<line_sep>t=[onnx.helper.make_attribute(key value)<for>key,value {"value":t}.items()]<line_sep>## new attribute for "Transpose" nodes
a=[onnx.helper.make_attribute(key value)<for>key,value {"perm":(0 3 1 2)}.items()]<line_sep>reshape_output=[]<for_stmt>i,n enumerate(nodes)<block_start><if_stmt>'Constant'<eq>n.op_type<and>n.output[0]<in>names## replace attr with new one
<block_start>n.attribute.pop()<line_sep>n.attribute.extend(t)<line_sep>#print(n)
<continue><block_end><if_stmt>'Reshape'<eq>n.op_type<and>set(names).intersection(n.input)## cache output tensor name of reshape node
<block_start>reshape_output.extend(n.output)<line_sep>#print(n)
<continue><block_end><if_stmt>'Transpose'<eq>n.op_type<and>n.input[0]<in>reshape_output## replace attr with new one
<block_start>n.attribute.pop()<line_sep>n.attribute.extend(a)<line_sep>#print(n)
<continue><block_end><block_end><return>nodes<block_end><def_stmt>optimize_output_tensor output<block_start>"""
optimize output tensor name and shape
Args:
output: the graph.output of ONNX model
Returns:
optimized graph output(inplace)
"""<for_stmt>o output<block_start><if_stmt>"dis_pred_stride_"<in>o.name<block_start>_d=o.type.tensor_type.shape.dim<line_sep>## kick out the last dim: 32
d2=_d.pop(2)<line_sep>## add two new dims: 4, 8
d2.dim_value=4<line_sep>_d.append(d2)<line_sep>d2.dim_value=8<line_sep>_d.append(d2)<line_sep>## update output name
o.name=o.name.replace("dis_pred_stride_" "dis_sm_stride_")<block_end><block_end><return>output<block_end><def_stmt>optimize_add_softmax nodes<block_start>"""
add additional softmax node in the end of all distance prediction branches
Args:
nodes: the graph.node of ONNX model
Returns:
optimized graph nodes(inplace)
"""<for_stmt>n nodes<block_start><if_stmt>'Transpose'<eq>n.op_type<and>"dis_pred_stride_"<in>n.output[0]## add additional softmax node
<block_start>_input=n.output[0]<line_sep>_output=_input.replace("dis_pred_stride_" "dis_sm_stride_")<line_sep>n_sm=onnx.helper.make_node('Softmax' inputs=[_input] outputs=[_output] axis=-1)<line_sep>nodes.append(n_sm)<block_end><block_end><return>nodes<block_end><def_stmt>usage_info <block_start>"""
usage info
"""<line_sep>print("Input params is illegal...╮(╯3╰)╭")<line_sep>print("try it again:\n python nanodet_m-opt.py -h")<block_end><def_stmt>main <block_start>"""
main function
"""<line_sep>print("---- Tengine NanoDet-m Optimize Tool ----\n")<line_sep>args=parse_args()<if_stmt>args<eq><none><or>args.input<eq><none><block_start>usage_info()<line_sep><return><none><block_end>print(" Input model path : %s"%(args.input))<line_sep>print("Output model path : %s"%(args.output))<line_sep># load original onnx model, graph, nodes
print("[Opt Tools Info]: Step 0, load original onnx model from %s."%(args.input))<line_sep>onnx_model=onnx.load(args.input)<if_stmt>args.softmax<block_start>constant_shape_list=args.const.split(',')<line_sep># update input constant nodes
print("[Opt Tools Info]: Step 1, update the output shape in all dis_pred branches.")<line_sep>optimize_node_shape(onnx_model.graph.node constant_shape_list)<line_sep># add additional softmax nodes
print("[Opt Tools Info]: Step 2, add Softmax node in the end of all dis_pred branche.")<line_sep>optimize_add_softmax(onnx_model.graph.node)<line_sep># update output tensor name and shape
print("[Opt Tools Info]: Step 3, update output tensor name and shape.")<line_sep>optimize_output_tensor(onnx_model.graph.output)<block_end># do check and simplify the onnx model
print("[Opt Tools Info]: Step 4, check and simplify the new onnx model.")<line_sep>onnx_model,check=simplify(onnx_model)<line_sep># save the new optimize onnx model
print("[Opt Tools Info]: Step 5, save the new onnx model to %s"%(args.output))<line_sep>onnx.save(onnx_model args.output)<line_sep>print("\n---- Tengine NanoDet-m Optimize onnx create success, best wish for your inference has a high accuracy ...\\(^0^)/ ----")<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
<import_stmt>json<import_stmt>re<import_stmt>six<import_from_stmt>six.moves xrange<import_stmt>actions<import_stmt>identifiers<import_stmt>schema<import_stmt>summary<import_stmt>table_data_set<import_stmt>logger<line_sep>log=logger.Logger(__name__ logger.INFO)<line_sep># PHILOSOPHY OF MIGRATIONS.
#
# We should probably never remove, modify, or rename metadata tables or columns.
# Instead, we should only add.
#
# We can mark old columns/tables as deprecated, which should be ignored except to prevent us from
# adding same-named entities in the future.
#
# If we change the meaning of a column, we have to create a new column with a new name.
#
# This should make it at least barely possible to share documents by people who are not all on the
# same Grist version (even so, it will require more work). It should also make it somewhat safe to
# upgrade and then open the document with a previous version.
all_migrations={}<def_stmt>noop_migration _all_tables<block_start><return>[]<block_end># Each migration function includes a .need_all_tables attribute. See migration() decorator.
noop_migration.need_all_tables=<false><def_stmt>create_migrations all_tables metadata_only=<false><block_start>"""
Creates and returns a list of DocActions needed to bring this document to
schema.SCHEMA_VERSION.
all_tables: all tables or just the metadata tables (those named with _grist_ prefix) as a
dictionary mapping table name to TableData.
metadata_only: should be set if only metadata tables are passed in. If ALL tables are
required to process migrations, this method will raise a "need all tables..." exception.
"""<try_stmt><block_start>doc_version=all_tables['_grist_DocInfo'].columns["schemaVersion"][0]<block_end><except_stmt>Exception<block_start>doc_version=0<block_end># We create a TableDataSet, and populate it with the subset of the current schema that matches
# all_tables. For missing items, we make up tables and incomplete columns, which should be OK
# since we would not be adding new records to deprecated columns.
# Note that this approach makes it NOT OK to change column types.
tdset=table_data_set.TableDataSet()<line_sep># For each table in the provided metadata tables, create an AddTable action.
user_schema=schema.build_schema(all_tables['_grist_Tables'] all_tables['_grist_Tables_column'] include_builtin=<false>)<for_stmt>t six.itervalues(user_schema)<block_start>tdset.apply_doc_action(actions.AddTable(t.tableId schema.cols_to_dict_list(t.columns)))<block_end># For each old table/column, construct an AddTable action using the current schema.
new_schema={a.table_id:a<for>a schema.schema_create_actions()}<for_stmt>table_id,data sorted(six.iteritems(all_tables))# User tables should already be in tdset; the rest must be metadata tables.
# (If metadata_only is true, there is simply nothing to skip here.)
<block_start><if_stmt>table_id<not><in>tdset.all_tables<block_start>new_col_info={}<if_stmt>table_id<in>new_schema<block_start>new_col_info={c['id']:c<for>c new_schema[table_id].columns}<block_end># Use an incomplete default for unknown (i.e. deprecated) columns; some uses of the column
# would be invalid, such as adding a new record with missing values.
col_info=sorted([new_col_info.get(col_id {'id':col_id})<for>col_id data.columns] key=<lambda>c:list(six.iteritems(c)))<line_sep>tdset.apply_doc_action(actions.AddTable(table_id col_info))<block_end># And load in the original data, interpreting the TableData object as BulkAddRecord action.
tdset.apply_doc_action(actions.BulkAddRecord(*data))<block_end>migration_actions=[]<for_stmt>version xrange(doc_version+1 schema.SCHEMA_VERSION+1)<block_start>migration_func=all_migrations.get(version noop_migration)<if_stmt>migration_func.need_all_tables<and>metadata_only<block_start><raise>Exception("need all tables for migration to %s"%version)<block_end>migration_actions.extend(all_migrations.get(version noop_migration)(tdset))<block_end># Note that if we are downgrading versions (i.e. doc_version is higher), then the following is
# the only action we include into the migration.
migration_actions.append(actions.UpdateRecord('_grist_DocInfo' 1 {'schemaVersion':schema.SCHEMA_VERSION}))<line_sep><return>migration_actions<block_end><def_stmt>get_last_migration_version <block_start>"""
Returns the last schema version number for which we have a migration defined.
"""<line_sep><return>max(all_migrations)<block_end><def_stmt>migration schema_version need_all_tables=<false><block_start>"""
Decorator for migrations that associates the decorated migration function with the given
schema_version. This decorated function will be run to migrate forward to schema_version.
Migrations are first attempted with only metadata tables, but if any required migration function
is marked with need_all_tables=True, then the migration will be retried with all tables.
NOTE: new migrations should NOT set need_all_tables=True; it would require more work to process
very large documents safely (incuding those containing on-demand tables).
"""<def_stmt>add_migration migration_func<block_start>migration_func.need_all_tables=need_all_tables<line_sep>all_migrations[schema_version]=migration_func<line_sep><return>migration_func<block_end><return>add_migration<block_end># A little shorthand to make AddColumn actions more concise.
<def_stmt>add_column table_id col_id col_type *args **kwargs<block_start><return>actions.AddColumn(table_id col_id schema.make_column(col_id col_type *args **kwargs))<block_end># Another shorthand to only add a column if it isn't already there.
<def_stmt>maybe_add_column tdset table_id col_id col_type *args **kwargs<block_start><if_stmt>col_id<not><in>tdset.all_tables[table_id].columns<block_start><return>add_column(table_id col_id col_type *args **kwargs)<block_end><return><none><block_end># Returns the next unused row id for the records of the table given by table_id.
<def_stmt>next_id tdset table_id<block_start>row_ids=tdset.all_tables[table_id].row_ids<line_sep><return>max(row_ids)+1<if>row_ids<else>1<block_end># Parses a json string, but returns an empty object for invalid json.
<def_stmt>safe_parse json_str<block_start><try_stmt><block_start><return>json.loads(json_str)<block_end><except_stmt>ValueError<block_start><return>{}<block_end><block_end>@migration(schema_version=1)<def_stmt>migration1 tdset<block_start>"""
Add TabItems table, and populate based on existing sections.
"""<line_sep>doc_actions=[]<line_sep># The very first migration is extra-lax, and creates some tables that are missing in some test
# docs. That's only because we did not distinguish schema version before migrations were
# implemented. Other migrations should not need such conditionals.
<if_stmt>'_grist_Attachments'<not><in>tdset.all_tables<block_start>doc_actions.append(actions.AddTable("_grist_Attachments" [schema.make_column("fileIdent" "Text") schema.make_column("fileName" "Text") schema.make_column("fileType" "Text") schema.make_column("fileSize" "Int") schema.make_column("timeUploaded" "DateTime")]))<block_end><if_stmt>'_grist_TabItems'<not><in>tdset.all_tables<block_start>doc_actions.append(actions.AddTable("_grist_TabItems" [schema.make_column("tableRef" "Ref:_grist_Tables") schema.make_column("viewRef" "Ref:_grist_Views") ]))<block_end><if_stmt>'schemaVersion'<not><in>tdset.all_tables['_grist_DocInfo'].columns<block_start>doc_actions.append(add_column('_grist_DocInfo' 'schemaVersion' 'Int'))<block_end>doc_actions.extend([add_column('_grist_Attachments' 'imageHeight' 'Int') add_column('_grist_Attachments' 'imageWidth' 'Int') ])<line_sep>view_sections=actions.transpose_bulk_action(tdset.all_tables['_grist_Views_section'])<line_sep>rows=sorted({(s.tableRef s.parentId)<for>s view_sections})<if_stmt>rows<block_start>values={'tableRef':[r[0]<for>r rows] 'viewRef':[r[1]<for>r rows]}<line_sep>row_ids=list(xrange(1 len(rows)+1))<line_sep>doc_actions.append(actions.ReplaceTableData('_grist_TabItems' row_ids values))<block_end><return>tdset.apply_doc_actions(doc_actions)<block_end>@migration(schema_version=2)<def_stmt>migration2 tdset<block_start>"""
Add TableViews table, and populate based on existing sections.
Add TabBar table, and populate based on existing views.
Add PrimaryViewId to Tables and populated using relatedViews
"""<line_sep># Maps tableRef to viewRef
primary_views={}<line_sep># Associate each view with a single table; this dict includes primary views.
views_to_table={}<line_sep># For each table, find a view to serve as the primary view.
view_sections=actions.transpose_bulk_action(tdset.all_tables['_grist_Views_section'])<for_stmt>s view_sections<block_start><if_stmt>s.tableRef<not><in>primary_views<and>s.parentKey<eq>"record"# The view containing this section is a good candidate for primary view.
<block_start>primary_views[s.tableRef]=s.parentId<block_end><if_stmt>s.parentId<not><in>views_to_table# The first time we see a (view, table) combination, associate the view with that table.
<block_start>views_to_table[s.parentId]=s.tableRef<block_end><block_end><def_stmt>create_primary_views_action primary_views<block_start>row_ids=sorted(primary_views.keys())<line_sep>values={'primaryViewId':[primary_views[r]<for>r row_ids]}<line_sep><return>actions.BulkUpdateRecord('_grist_Tables' row_ids values)<block_end><def_stmt>create_tab_bar_action views_to_table<block_start>row_ids=list(xrange(1 len(views_to_table)+1))<line_sep><return>actions.ReplaceTableData('_grist_TabBar' row_ids {'viewRef':sorted(views_to_table.keys())})<block_end><def_stmt>create_table_views_action views_to_table primary_views<block_start>related_views=sorted(set(views_to_table.keys())-set(primary_views.values()))<line_sep>row_ids=list(xrange(1 len(related_views)+1))<line_sep><return>actions.ReplaceTableData('_grist_TableViews' row_ids {'tableRef':[views_to_table[v]<for>v related_views] 'viewRef':related_views })<block_end><return>tdset.apply_doc_actions([actions.AddTable('_grist_TabBar' [schema.make_column('viewRef' 'Ref:_grist_Views') ]) actions.AddTable('_grist_TableViews' [schema.make_column('tableRef' 'Ref:_grist_Tables') schema.make_column('viewRef' 'Ref:_grist_Views') ]) add_column('_grist_Tables' 'primaryViewId' 'Ref:_grist_Views') create_primary_views_action(primary_views) create_tab_bar_action(views_to_table) create_table_views_action(views_to_table primary_views)])<block_end>@migration(schema_version=3)<def_stmt>migration3 tdset<block_start>"""
There is no longer a "Derived" type for columns, and summary tables use the type suitable for
the column being summarized. For old documents, convert "Derived" type to "Any", and adjust the
usage of "lookupOrAddDerived()" function.
"""<line_sep># Note that this is a complicated migration, and mainly acceptable because it is before our very
# first release. For a released product, a change like this should be done in a backwards
# compatible way: keep but deprecate 'Derived'; introduce a lookupOrAddDerived2() to use for new
# summary tables, but keep the old interface as well for existing ones. The reason is that such
# migrations are error-prone and may mess up customers' data.
doc_actions=[]<line_sep>tables=list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables']))<line_sep>tables_map={t.id:t<for>t tables}<line_sep>columns=list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables_column']))<line_sep># Convert columns from type 'Derived' to type 'Any'
affected_cols=[c<for>c columns<if>c.type<eq>'Derived']<if_stmt>affected_cols<block_start>doc_actions.extend(actions.ModifyColumn(tables_map[c.parentId].tableId c.colId {'type':'Any'})<for>c affected_cols)<line_sep>doc_actions.append(actions.BulkUpdateRecord('_grist_Tables_column' [c.id<for>c affected_cols] {'type':['Any'<for>c affected_cols]}))<block_end># Convert formulas of the form '.lookupOrAddDerived($x,$y)' to '.lookupOrAddDerived(x=$x,y=$y)'
formula_re=re.compile(r'(\w+).lookupOrAddDerived\((.*?)\)')<line_sep>arg_re=re.compile(r'^\$(\w+)$')<def_stmt>replace match<block_start>args=", ".join(arg_re.sub(r'\1=$\1' arg.strip())<for>arg match.group(2).split(","))<line_sep><return>'%s.lookupOrAddDerived(%s)'%(match.group(1) args)<block_end>formula_updates=[]<for_stmt>c columns<block_start>new_formula=c.formula<and>formula_re.sub(replace c.formula)<if_stmt>new_formula<ne>c.formula<block_start>formula_updates.append((c new_formula))<block_end><block_end><if_stmt>formula_updates<block_start>doc_actions.extend(actions.ModifyColumn(tables_map[c.parentId].tableId c.colId {'formula':f})<for>c,f formula_updates)<line_sep>doc_actions.append(actions.BulkUpdateRecord('_grist_Tables_column' [c.id<for>c,f formula_updates] {'formula':[f<for>c,f formula_updates]}))<block_end><return>tdset.apply_doc_actions(doc_actions)<block_end>@migration(schema_version=4)<def_stmt>migration4 tdset<block_start>"""
Add TabPos column to TabBar table
"""<line_sep>doc_actions=[]<line_sep>row_ids=tdset.all_tables['_grist_TabBar'].row_ids<line_sep>doc_actions.append(add_column('_grist_TabBar' 'tabPos' 'PositionNumber'))<line_sep>doc_actions.append(actions.BulkUpdateRecord('_grist_TabBar' row_ids {'tabPos':row_ids}))<line_sep><return>tdset.apply_doc_actions(doc_actions)<block_end>@migration(schema_version=5)<def_stmt>migration5 tdset<block_start><return>tdset.apply_doc_actions([add_column('_grist_Views' 'primaryViewTable' 'Ref:_grist_Tables' formula='_grist_Tables.lookupOne(primaryViewId=$id)' isFormula=<true>) ])<block_end>@migration(schema_version=6)<def_stmt>migration6 tdset# This undoes the previous migration, since primaryViewTable is now a formula private to the
# sandbox rather than part of the document schema.
<block_start><return>tdset.apply_doc_actions([actions.RemoveColumn('_grist_Views' 'primaryViewTable') ])<block_end>@migration(schema_version=7)<def_stmt>migration7 tdset<block_start>"""
Add summarySourceTable/summarySourceCol fields to metadata, and adjust existing summary tables
to correspond to the new style.
"""<line_sep># Note: this migration has some faults.
# - It doesn't delete viewSectionFields for columns it removes (if a user added some special
# columns manually.
# - It doesn't fix types of Reference columns that refer to old-style summary tables
# (if the user created some such columns manually).
doc_actions=[action<for>action [maybe_add_column(tdset '_grist_Tables' 'summarySourceTable' 'Ref:_grist_Tables') maybe_add_column(tdset '_grist_Tables_column' 'summarySourceCol' 'Ref:_grist_Tables_column')]<if>action]<line_sep># Maps tableRef to Table object.
tables_map={t.id:t<for>t actions.transpose_bulk_action(tdset.all_tables['_grist_Tables'])}<line_sep># Maps tableName to tableRef
table_name_to_ref={t.tableId:t.id<for>t six.itervalues(tables_map)}<line_sep># List of Column objects
columns=list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables_column']))<line_sep># Maps columnRef to Column object.
columns_map_by_ref={c.id:c<for>c columns}<line_sep># Maps (tableRef, colName) to Column object.
columns_map_by_table_colid={(c.parentId c.colId):c<for>c columns}<line_sep># Set of all tableNames.
table_name_set=set(table_name_to_ref.keys())<line_sep>remove_cols=[]# List of columns to remove
formula_updates=[]# List of (column, new_table_name, new_formula) pairs
table_renames=[]# List of (table, new_name) pairs
source_tables=[]# List of (table, summarySourceTable) pairs
source_cols=[]# List of (column, summarySourceColumn) pairs
# Summary tables used to be named as "Summary_<SourceName>_<ColRef1>_<ColRef2>". This regular
# expression parses that.
summary_re=re.compile(r'^Summary_(\w+?)((?:_\d+)*)$')<for_stmt>t six.itervalues(tables_map)<block_start>m=summary_re.match(t.tableId)<if_stmt><not>m<or>m.group(1)<not><in>table_name_to_ref<block_start><continue><block_end># We have a valid summary table.
source_table_name=m.group(1)<line_sep>source_table_ref=table_name_to_ref[source_table_name]<line_sep>groupby_colrefs=[int(x)<for>x m.group(2).strip("_").split("_")]<line_sep># Prepare a new-style name for the summary table. Be sure not to conflict with existing tables
# or with each other (i.e. don't rename multiple tables to the same name).
new_name=summary.encode_summary_table_name(source_table_name)<line_sep>new_name=identifiers.pick_table_ident(new_name avoid=table_name_set)<line_sep>table_name_set.add(new_name)<line_sep>log.warn("Upgrading summary table %s for %s(%s) to %s"%(t.tableId source_table_name groupby_colrefs new_name))<line_sep># Remove the "lookupOrAddDerived" column from the source table (which is named using the
# summary table name for its colId).
remove_cols.extend(c<for>c columns<if>c.parentId<eq>source_table_ref<and>c.colId<eq>t.tableId)<line_sep># Upgrade the "group" formula in the summary table.
expected_group_formula="%s.lookupRecords(%s=$id)"%(source_table_name t.tableId)<line_sep>new_formula="table.getSummarySourceGroup(rec)"<line_sep>formula_updates.extend((c new_name new_formula)<for>c columns<if>(c.parentId<eq>t.id<and>c.colId<eq>"group"<and>c.formula<eq>expected_group_formula))<line_sep># Schedule a rename of the summary table.
table_renames.append((t new_name))<line_sep># Set summarySourceTable fields on the metadata.
source_tables.append((t source_table_ref))<line_sep># Set summarySourceCol fields in the metadata. We need to find the right summary column.
groupby_cols=set()<for_stmt>col_ref groupby_colrefs<block_start>src_col=columns_map_by_ref.get(col_ref)<line_sep>sum_col=columns_map_by_table_colid.get((t.id src_col.colId))<if>src_col<else><none><if_stmt>sum_col<block_start>groupby_cols.add(sum_col)<line_sep>source_cols.append((sum_col src_col.id))<block_end><else_stmt><block_start>log.warn("Upgrading summary table %s: couldn't find column %s"%(t.tableId col_ref))<block_end><block_end># Finally, we have to remove all non-formula columns that are not groupby-columns (e.g.
# 'manualSort'), because the new approach assumes ALL non-formula columns are for groupby.
remove_cols.extend(c<for>c columns<if>c.parentId<eq>t.id<and>c<not><in>groupby_cols<and><not>c.isFormula)<block_end># Create all the doc actions from the arrays we prepared.
# Process remove_cols
doc_actions.extend(actions.RemoveColumn(tables_map[c.parentId].tableId c.colId)<for>c remove_cols)<line_sep>doc_actions.append(actions.BulkRemoveRecord('_grist_Tables_column' [c.id<for>c remove_cols]))<line_sep># Process table_renames
doc_actions.extend(actions.RenameTable(t.tableId new)<for>(t new) table_renames)<line_sep>doc_actions.append(actions.BulkUpdateRecord('_grist_Tables' [t.id<for>t,new table_renames] {'tableId':[new<for>t,new table_renames]}))<line_sep># Process source_tables and source_cols
doc_actions.append(actions.BulkUpdateRecord('_grist_Tables' [t.id<for>t,ref source_tables] {'summarySourceTable':[ref<for>t,ref source_tables]}))<line_sep>doc_actions.append(actions.BulkUpdateRecord('_grist_Tables_column' [t.id<for>t,ref source_cols] {'summarySourceCol':[ref<for>t,ref source_cols]}))<line_sep># Process formula_updates. Do this last since recalculation of these may cause new records added
# to summary tables, so we should have all the tables correctly set up by this time.
doc_actions.extend(actions.ModifyColumn(table_id c.colId {'formula':f})<for>c,table_id,f formula_updates)<line_sep>doc_actions.append(actions.BulkUpdateRecord('_grist_Tables_column' [c.id<for>c,t,f formula_updates] {'formula':[f<for>c,t,f formula_updates]}))<line_sep><return>tdset.apply_doc_actions(doc_actions)<block_end>@migration(schema_version=8)<def_stmt>migration8 tdset<block_start><return>tdset.apply_doc_actions([add_column('_grist_Tables_column' 'untieColIdFromLabel' 'Bool') ])<block_end>@migration(schema_version=9)<def_stmt>migration9 tdset<block_start><return>tdset.apply_doc_actions([add_column('_grist_Tables_column' 'displayCol' 'Ref:_grist_Tables_column') add_column('_grist_Views_section_field' 'displayCol' 'Ref:_grist_Tables_column') ])<block_end>@migration(schema_version=10)<def_stmt>migration10 tdset<block_start>"""
Add displayCol to all reference cols, with formula $<ref_col_id>.<visible_col_id>
(Note that displayCol field was added in the previous migration.)
"""<line_sep>doc_actions=[]<line_sep>tables=list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables']))<line_sep>columns=list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables_column']))<line_sep># Maps tableRef to tableId.
tables_map={t.id:t.tableId<for>t tables}<line_sep># Maps tableRef to sets of colIds in the tables. Used to prevent repeated colIds.
table_col_ids={t.id:set(tdset.all_tables[t.tableId].columns.keys())<for>t tables}<line_sep># Get the next sequential column row id.
row_id=next_id(tdset '_grist_Tables_column')<for_stmt>c columns# If a column is a reference with an unset display column, add a display column.
<block_start><if_stmt>c.type.startswith('Ref:')<and><not>c.displayCol# Get visible_col_id. If not found, row id is used and no display col is necessary.
<block_start>visible_col_id=""<try_stmt><block_start>visible_col_id=json.loads(c.widgetOptions).get('visibleCol')<if_stmt><not>visible_col_id<block_start><continue><block_end><block_end><except_stmt>Exception<block_start><continue><block_end># If invalid widgetOptions, skip this column.
# Set formula to use the current visibleCol in widgetOptions.
formula=("$%s.%s"%(c.colId visible_col_id))<line_sep># Get a unique colId for the display column, and add it to the set of used ids.
used_col_ids=table_col_ids[c.parentId]<line_sep>display_col_id=identifiers.pick_col_ident('gristHelper_Display' avoid=used_col_ids)<line_sep>used_col_ids.add(display_col_id)<line_sep># Add all actions to the list.
doc_actions.append(add_column(tables_map[c.parentId] 'gristHelper_Display' 'Any' formula=formula isFormula=<true>))<line_sep>doc_actions.append(actions.AddRecord('_grist_Tables_column' row_id {'parentPos':1.0 'label':'gristHelper_Display' 'isFormula':<true> 'parentId':c.parentId 'colId':'gristHelper_Display' 'formula':formula 'widgetOptions':'' 'type':'Any'}))<line_sep>doc_actions.append(actions.UpdateRecord('_grist_Tables_column' c.id {'displayCol':row_id}))<line_sep># Increment row id to the next unused.
row_id<augadd>1<block_end><block_end><return>tdset.apply_doc_actions(doc_actions)<block_end>@migration(schema_version=11)<def_stmt>migration11 tdset<block_start><return>tdset.apply_doc_actions([add_column('_grist_Views_section' 'embedId' 'Text') ])<block_end>@migration(schema_version=12)<def_stmt>migration12 tdset<block_start><return>tdset.apply_doc_actions([add_column('_grist_Views_section' 'options' 'Text')])<block_end>@migration(schema_version=13)<def_stmt>migration13 tdset# Adds a basketId to the entire document to take advantage of basket functionality.
# From this version on, embedId is deprecated.
<block_start><return>tdset.apply_doc_actions([add_column('_grist_DocInfo' 'basketId' 'Text')])<block_end>@migration(schema_version=14)<def_stmt>migration14 tdset# Create the ACL table AND also the default ACL groups, default resource, and the default rule.
# These match the actions applied to new document by 'InitNewDoc' useraction (as of v14).
<block_start><return>tdset.apply_doc_actions([actions.AddTable('_grist_ACLMemberships' [schema.make_column('parent' 'Ref:_grist_ACLPrincipals') schema.make_column('child' 'Ref:_grist_ACLPrincipals') ]) actions.AddTable('_grist_ACLPrincipals' [schema.make_column('userName' 'Text') schema.make_column('groupName' 'Text') schema.make_column('userEmail' 'Text') schema.make_column('instanceId' 'Text') schema.make_column('type' 'Text') ]) actions.AddTable('_grist_ACLResources' [schema.make_column('colIds' 'Text') schema.make_column('tableId' 'Text') ]) actions.AddTable('_grist_ACLRules' [schema.make_column('aclFormula' 'Text') schema.make_column('principals' 'Text') schema.make_column('resource' 'Ref:_grist_ACLResources') schema.make_column('aclColumn' 'Ref:_grist_Tables_column') schema.make_column('permissions' 'Int') ]) # Set up initial ACL data.
actions.BulkAddRecord('_grist_ACLPrincipals' [1 2 3 4] {'type':['group' 'group' 'group' 'group'] 'groupName':['Owners' 'Admins' 'Editors' 'Viewers'] }) actions.AddRecord('_grist_ACLResources' 1 {'tableId':'' 'colIds':''}) actions.AddRecord('_grist_ACLRules' 1 {'resource':1 'permissions':0x3F 'principals':'[1]'}) ])<block_end>@migration(schema_version=15)<def_stmt>migration15 tdset# Adds a filter JSON property to each field.
# From this version on, filterSpec in _grist_Views_section is deprecated.
<block_start>doc_actions=[add_column('_grist_Views_section_field' 'filter' 'Text')]<line_sep># Get all section and field data to move section filter data to the fields
sections=list(actions.transpose_bulk_action(tdset.all_tables['_grist_Views_section']))<line_sep>fields=list(actions.transpose_bulk_action(tdset.all_tables['_grist_Views_section_field']))<line_sep>specs={s.id:safe_parse(s.filterSpec)<for>s sections}<line_sep># Move filter data from sections to fields
<for_stmt>f fields# If the field belongs to the section and the field's colRef is in its filterSpec,
# pull the filter setting from the section.
<block_start>filter_spec=specs.get(f.parentId)<if_stmt>filter_spec<and>str(f.colRef)<in>filter_spec<block_start>doc_actions.append(actions.UpdateRecord('_grist_Views_section_field' f.id {'filter':json.dumps(filter_spec[str(f.colRef)])}))<block_end><block_end><return>tdset.apply_doc_actions(doc_actions)<block_end>@migration(schema_version=16)<def_stmt>migration16 tdset# Add visibleCol to columns and view fields, and set it from columns' and fields' widgetOptions.
<block_start>doc_actions=[add_column('_grist_Tables_column' 'visibleCol' 'Ref:_grist_Tables_column') add_column('_grist_Views_section_field' 'visibleCol' 'Ref:_grist_Tables_column') ]<line_sep># Maps tableId to table, for looking up target table as listed in "Ref:*" types.
tables=list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables']))<line_sep>tables_by_id={t.tableId:t<for>t tables}<line_sep># Allow looking up columns by ref or by (tableRef, colId)
columns=list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables_column']))<line_sep>columns_by_ref={c.id:c<for>c columns}<line_sep>columns_by_id={(c.parentId c.colId):c.id<for>c columns}<line_sep># Helper which returns the {'visibleCol', 'widgetOptions'} update visibleCol should be set.
<def_stmt>convert_visible_col col widget_options<block_start><if_stmt><not>col.type.startswith('Ref:')<block_start><return><none><block_end># To set visibleCol, we need to know the target table. Skip if we can't find it.
target_table=tables_by_id.get(col.type[len('Ref:'):])<if_stmt><not>target_table<block_start><return><none><block_end><try_stmt><block_start>parsed_options=json.loads(widget_options)<block_end><except_stmt>Exception<block_start><return><none><block_end># If invalid widgetOptions, skip this column.
visible_col_id=parsed_options.pop('visibleCol' <none>)<if_stmt><not>visible_col_id<block_start><return><none><block_end># Find visible_col_id as the column name in the appropriate table.
target_col_ref=(0<if>visible_col_id<eq>'id'<else>columns_by_id.get((target_table.id visible_col_id) <none>))<if_stmt>target_col_ref<is><none><block_start><return><none><block_end># Use compact separators without whitespace, to match how JS encodes JSON.
<return>{'visibleCol':target_col_ref 'widgetOptions':json.dumps(parsed_options separators=(',' ':'))}<block_end><for_stmt>c columns<block_start>new_values=convert_visible_col(c c.widgetOptions)<if_stmt>new_values<block_start>doc_actions.append(actions.UpdateRecord('_grist_Tables_column' c.id new_values))<block_end><block_end>fields=list(actions.transpose_bulk_action(tdset.all_tables['_grist_Views_section_field']))<for_stmt>f fields<block_start>c=columns_by_ref.get(f.colRef)<if_stmt>c<block_start>new_values=convert_visible_col(c f.widgetOptions)<if_stmt>new_values<block_start>doc_actions.append(actions.UpdateRecord('_grist_Views_section_field' f.id new_values))<block_end><block_end><block_end><return>tdset.apply_doc_actions(doc_actions)<block_end># This is actually the only migration that requires all tables because it modifies user data
# (specifically, any columns of the deprecated "Image" type).
@migration(schema_version=17 need_all_tables=<true>)<def_stmt>migration17 tdset<block_start>"""
There is no longer an "Image" type for columns, as "Attachments" now serves as a
display type for arbitrary files including images. Convert "Image" columns to "Attachments"
columns.
"""<line_sep>doc_actions=[]<line_sep>tables=list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables']))<line_sep>tables_map={t.id:t<for>t tables}<line_sep>columns=list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables_column']))<line_sep># Convert columns from type 'Image' to type 'Attachments'
affected_cols=[c<for>c columns<if>c.type<eq>'Image']<line_sep>conv=<lambda>val:[val]<if>isinstance(val int)<and>val<g>0<else>[]<if_stmt>affected_cols# Update the types in the data tables
<block_start>doc_actions.extend(actions.ModifyColumn(tables_map[c.parentId].tableId c.colId {'type':'Attachments'})<for>c affected_cols)<line_sep># Update the values to lists
<for_stmt>c affected_cols<block_start><if_stmt>c.isFormula# Formula columns don't have data stored in DB, should not have data changes.
<block_start><continue><block_end>table_id=tables_map[c.parentId].tableId<line_sep>table=tdset.all_tables[table_id]<line_sep>doc_actions.append(actions.BulkUpdateRecord(table_id table.row_ids {c.colId:[conv(val)<for>val table.columns[c.colId]]}))<block_end># Update the types in the metadata tables
doc_actions.append(actions.BulkUpdateRecord('_grist_Tables_column' [c.id<for>c affected_cols] {'type':['Attachments'<for>c affected_cols]}))<block_end><return>tdset.apply_doc_actions(doc_actions)<block_end>@migration(schema_version=18)<def_stmt>migration18 tdset<block_start><return>tdset.apply_doc_actions([add_column('_grist_DocInfo' 'timezone' 'Text') # all documents prior to this migration have been created in New York
actions.UpdateRecord('_grist_DocInfo' 1 {'timezone':'America/New_York'})])<block_end>@migration(schema_version=19)<def_stmt>migration19 tdset<block_start><return>tdset.apply_doc_actions([add_column('_grist_Tables' 'onDemand' 'Bool') ])<block_end>@migration(schema_version=20)<def_stmt>migration20 tdset<block_start>"""
Add _grist_Pages table and populate based on existing TableViews entries, ie: tables are sorted
alphabetically by their `tableId` and views are gathered within their corresponding table and
sorted by their id.
"""<line_sep>tables=list(actions.transpose_bulk_action(tdset.all_tables['_grist_Tables']))<line_sep>table_map={t.id:t<for>t tables}<line_sep>table_views=list(actions.transpose_bulk_action(tdset.all_tables['_grist_TableViews']))<line_sep># Old docs may include "Other views", not associated with any table. Don't include those in
# table_views_map: they'll get included but not sorted or grouped by tableId.
table_views_map={tv.viewRef:table_map[tv.tableRef].tableId<for>tv table_views<if>tv.tableRef<in>table_map}<line_sep>views=list(actions.transpose_bulk_action(tdset.all_tables['_grist_Views']))<def_stmt>view_key view<block_start>"""
Returns ("Table1", 2) where "Table1" is the view's tableId and 2 the view id. For
primary view (ie: not referenced in _grist_TableViews) returns ("Table1", -1). Useful
to get the list of views sorted in the same way as in the Table side pane. We use -1
for primary view to make sure they come first among all the views of the same table.
"""<if_stmt>view.id<in>table_views_map<block_start><return>(table_views_map[view.id] view.id)<block_end># the name of primary view's is the same as the tableId
<return>(view.name -1)<block_end>views.sort(key=view_key)<line_sep>row_ids=list(xrange(1 len(views)+1))<line_sep><return>tdset.apply_doc_actions([actions.AddTable('_grist_Pages' [schema.make_column('viewRef' 'Ref:_grist_Views') schema.make_column('pagePos' 'PositionNumber') schema.make_column('indentation' 'Int') ]) actions.ReplaceTableData('_grist_Pages' row_ids {'viewRef':[v.id<for>v views] 'pagePos':row_ids 'indentation':[1<if>v.id<in>table_views_map<else>0<for>v views]})])<block_end>@migration(schema_version=21)<def_stmt>migration21 tdset<block_start><return>tdset.apply_doc_actions([add_column('_grist_ACLRules' 'aclFormulaParsed' 'Text') add_column('_grist_ACLRules' 'permissionsText' 'Text') add_column('_grist_ACLRules' 'rulePos' 'PositionNumber') add_column('_grist_ACLRules' 'userAttributes' 'Text') ])<block_end>@migration(schema_version=22)<def_stmt>migration22 tdset<block_start><return>tdset.apply_doc_actions([add_column('_grist_Tables_column' 'recalcWhen' 'Int') add_column('_grist_Tables_column' 'recalcDeps' 'RefList:_grist_Tables_column') ])<block_end>@migration(schema_version=23)<def_stmt>migration23 tdset<block_start><return>tdset.apply_doc_actions([add_column('_grist_DocInfo' 'documentSettings' 'Text') actions.UpdateRecord('_grist_DocInfo' 1 {'documentSettings':'{"locale":"en-US"}'})])<block_end>@migration(schema_version=24)<def_stmt>migration24 tdset<block_start><return>tdset.apply_doc_actions([actions.AddTable('_grist_Triggers' [schema.make_column("tableRef" "Ref:_grist_Tables") schema.make_column("eventTypes" "ChoiceList") schema.make_column("isReadyColRef" "Ref:_grist_Tables_column") schema.make_column("actions" "Text") # JSON
]) ])<block_end> |
<import_from_stmt>keras backend<as>K<import_from_stmt>keras.layers LSTM time_distributed_dense<import_from_stmt>keras initializations activations regularizers<import_from_stmt>keras.engine InputSpec<line_sep># LSTM with Layer Normalization as described in:
# https://arxiv.org/pdf/1607.06450v1.pdf
# page 13, equation (20), (21), and (22)
<class_stmt>LSTM_LN(LSTM)<block_start><def_stmt>__init__ self output_dim **kwargs<block_start>super(LSTM_LN self).__init__(output_dim **kwargs)<block_end><def_stmt>norm self xs norm_id<block_start>mu=K.mean(xs axis=-1 keepdims=<true>)<line_sep>sigma=K.sqrt(K.var(xs axis=-1 keepdims=<true>)+1e-3)<line_sep>xs=self.gs[norm_id]<times>(xs-mu)/(sigma+1e-3)+self.bs[norm_id]<line_sep><return>xs<block_end><def_stmt>build self input_shape<block_start>super(LSTM_LN self).build(input_shape)<line_sep>self.gs,self.bs=[] []<for_stmt>i xrange(3)<block_start>f=1<if>i<eq>2<else>4<line_sep>self.gs<augadd>[K.ones((f<times>self.output_dim ) name='{}_g%i'.format(self.name i))]<line_sep>self.bs<augadd>[K.zeros((f<times>self.output_dim ) name='{}_b%d'.format(self.name i))]<block_end>self.trainable_weights<augadd>self.gs+self.bs<block_end><def_stmt>step self x states<block_start>h_tm1=states[0]<line_sep>c_tm1=states[1]<line_sep>B_U=states[2]<line_sep>B_W=states[3]<if_stmt>self.consume_less<eq>'gpu'<block_start>z=self.norm(K.dot(x<times>B_W[0] self.W) 0)+self.norm(K.dot(h_tm1<times>B_U[0] self.U) 1)+self.b<line_sep>z0=z[: :self.output_dim]<line_sep>z1=z[: self.output_dim:2<times>self.output_dim]<line_sep>z2=z[: 2<times>self.output_dim:3<times>self.output_dim]<line_sep>z3=z[: 3<times>self.output_dim:]<line_sep>i=self.inner_activation(z0)<line_sep>f=self.inner_activation(z1)<line_sep>c=f<times>c_tm1+i<times>self.activation(z2)<line_sep>o=self.inner_activation(z3)<block_end><else_stmt><block_start><assert_stmt>(<false>)<if_stmt>self.consume_less<eq>'cpu'<block_start>x_i=x[: :self.output_dim]<line_sep>x_f=x[: self.output_dim:2<times>self.output_dim]<line_sep>x_c=x[: 2<times>self.output_dim:3<times>self.output_dim]<line_sep>x_o=x[: 3<times>self.output_dim:]<block_end><elif_stmt>self.consume_less<eq>'mem'<block_start>x_i=K.dot(x<times>B_W[0] self.W_i)+self.b_i<line_sep>x_f=K.dot(x<times>B_W[1] self.W_f)+self.b_f<line_sep>x_c=K.dot(x<times>B_W[2] self.W_c)+self.b_c<line_sep>x_o=K.dot(x<times>B_W[3] self.W_o)+self.b_o<block_end><else_stmt><block_start><raise>Exception('Unknown `consume_less` mode.')<block_end>i=self.inner_activation(x_i+K.dot(h_tm1<times>B_U[0] self.U_i))<line_sep>f=self.inner_activation(x_f+K.dot(h_tm1<times>B_U[1] self.U_f))<line_sep>c=f<times>c_tm1+i<times>self.activation(x_c+K.dot(h_tm1<times>B_U[2] self.U_c))<line_sep>o=self.inner_activation(x_o+K.dot(h_tm1<times>B_U[3] self.U_o))<block_end>h=o<times>self.activation(self.norm(c 2))<line_sep><return>h [h c]<block_end><block_end> |
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""<import_stmt>pytest<line_sep>pytest.importorskip('mxnet')<import_stmt>cv2<import_stmt>numpy<as>np<import_from_stmt>accuracy_checker.launcher.launcher create_launcher<import_from_stmt>accuracy_checker.config ConfigError<import_from_stmt>accuracy_checker.data_readers DataRepresentation<def_stmt>get_mx_test_model models_dir config_override=<none><block_start>config={"framework":'mxnet' "model":models_dir/'samplenet-0000.params' "adapter":'classification' "device":'cpu' 'inputs':[{'name':'data' 'type':'INPUT' 'shape':'3,32,32'}]}<if_stmt>config_override<block_start>config.update(config_override)<block_end><return>create_launcher(config)<block_end><class_stmt>TestMxNetLauncher<block_start><def_stmt>test_launcher_creates self models_dir<block_start>launcher=get_mx_test_model(models_dir)<assert_stmt>launcher.inputs['data']<eq>(1 3 32 32)<assert_stmt>launcher.output_blob<eq>'fc3'<block_end><def_stmt>test_infer self data_dir models_dir<block_start>mx_test_model=get_mx_test_model(models_dir)<line_sep>_,_,h,w=mx_test_model.inputs['data']<line_sep>img_raw=cv2.imread(str(data_dir/'1.jpg'))<line_sep>img_rgb=cv2.cvtColor(img_raw cv2.COLOR_BGR2RGB)<line_sep>img_resized=cv2.resize(img_rgb (w h))<line_sep>input_blob=np.transpose([img_resized] (0 3 1 2))<line_sep>res=mx_test_model.predict([{'data':input_blob.astype(np.float32)}] [{}])<assert_stmt>np.argmax(res[0]['fc3'])<eq>7<block_end><def_stmt>test_mxnet_launcher_provide_input_shape_to_adapter self mocker models_dir<block_start>mocker.patch('mxnet.mod.Module.forward' return_value={'fc3':0})<line_sep>launcher=get_mx_test_model(models_dir)<line_sep>zeros=DataRepresentation(np.zeros((1 3 32 32)))<line_sep>launcher.predict([{'data':zeros.data}] [zeros.metadata])<assert_stmt>zeros.metadata['input_shape']<eq>{'data':(1 3 32 32)}<block_end><def_stmt>test_mxnet_launcher_auto_model_search self models_dir<block_start>launcher=get_mx_test_model(models_dir {'model':models_dir})<assert_stmt>launcher.model<eq>models_dir/'samplenet-0000.params'<block_end><block_end>@pytest.mark.usefixtures('mock_path_exists')<class_stmt>TestMxNetLauncherConfig<block_start><def_stmt>test_missed_model_in_create_mxnet_launcher_raises_config_error_exception self<block_start>config={'framework':'mxnet'}<with_stmt>pytest.raises(ConfigError)<block_start>create_launcher(config)<block_end><block_end><def_stmt>test_missed_inputs_in_create_mxnet_launcher_raises_config_error_exception self<block_start>config={'framework':'mxnet' 'model':'model-0000.params'}<with_stmt>pytest.raises(ConfigError)<block_start>create_launcher(config)<block_end><block_end><def_stmt>test_missed_shape_in_inputs_in_create_mxnet_launcher_raises_config_error_exception self<block_start>config={'framework':'mxnet' 'model':'model-0000.params' 'inputs':[{'name':'data' 'type':'INPUT'}]}<with_stmt>pytest.raises(ConfigError)<block_start>create_launcher(config)<block_end><block_end><block_end> |
"""
Manipulate HTML or XHTML documents.
Version 1.1.1. This source code has been placed in the
public domain by <NAME>.
Features:
- Translate HTML back and forth to data structures.
This allows you to read and write HTML documents
programmably, with much flexibility.
- Extract and modify URLs in an HTML document.
- Compatible with Python 3+
See the L{examples} for a quick start.
Moved to Python3 by <NAME> May 2020
"""<line_sep>__version__='1.1.2'<line_sep>__all__=['examples' 'tagextract' 'tagjoin' 'urlextract' 'urljoin' 'URLMatch']<line_sep># -------------------------------------------------------------------
# Globals
# -------------------------------------------------------------------
<import_stmt>re<import_stmt>shlex<import_stmt>string<import_stmt>urllib.request urllib.parse urllib.error<import_stmt>urllib.parse<import_stmt>types<line_sep># Translate text between these strings as plain text (not HTML).
_IGNORE_TAGS=[('script' '/script') ('style' '/style')]<line_sep># Special tags where we have to look for _END_X as part of the
# HTML/XHTML parsing rules.
_BEGIN_COMMENT='<!--'<line_sep>_END_COMMENT='-->'<line_sep>_BEGIN_CDATA='<![CDATA['<line_sep>_END_CDATA=']]>'<line_sep># Mime types that can be parsed as HTML or HTML-like.
_HTML_MIMETYPES=['text/html' 'application/xhtml' 'application/xhtml+xml' 'text/xml' 'application/xml']<line_sep># Mime types that can be parsed as CSS.
_CSS_MIMETYPES=['text/css']<line_sep># -------------------------------------------------------------------
# HTML <-> Data structure
# -------------------------------------------------------------------
<def_stmt>tagextract doc<block_start>"""
Convert HTML to data structure.
Returns a list. HTML tags become C{(name, keyword_dict)} tuples
within the list, while plain text becomes strings within the
list. All tag names are lowercased and stripped of whitespace.
Tags which end with forward slashes have a single forward slash
placed at the end of their name, to indicate that they are XML
unclosed tags.
Example:
>>> tagextract('<img src=hi.gif alt="hi">foo<br><br/></body>')
[('img', {'src': 'hi.gif', 'alt': 'hi'}), 'foo',
('br', {}), ('br/', {}), ('/body', {})]
Text between C{'<script>'} and C{'<style>'} is rendered directly to
plain text. This prevents rogue C{'<'} or C{'>'} characters from
interfering with parsing.
>>> tagextract('<script type="a"><blah>var x; </script>')
[('script', {'type': 'a'}), '<blah>var x; ', ('/script', {})]
Comment strings and XML directives are rendered as a single long
tag with no attributes. The case of the tag "name" is not changed:
>>> tagextract('<!-- blah -->')
[('!-- blah --', {})]
>>> tagextract('<?xml version="1.0" encoding="utf-8" ?>')
[('?xml version="1.0" encoding="utf-8" ?', {})]
>>> tagextract('<!DOCTYPE html PUBLIC etc...>')
[('!DOCTYPE html PUBLIC etc...', {})]
Greater-than and less-than characters occurring inside comments or
CDATA blocks are correctly kept as part of the block:
>>> tagextract('<!-- <><><><>>..> -->')
[('!-- <><><><>>..> --', {})]
>>> tagextract('<!CDATA[[><>><>]<> ]]>')
[('!CDATA[[><>><>]<> ]]', {})]
Note that if one modifies these tags, it is important to retain the
C{"--"} (for comments) or C{"]]"} (for C{CDATA}) at the end of the
tag name, so that output from L{tagjoin} will be correct HTML/XHTML.
"""<line_sep>L=_full_tag_extract(doc)<for_stmt>i range(len(L))<block_start><if_stmt>isinstance(L[i] _TextTag)# _TextTag object.
<block_start>L[i]=L[i].text<block_end><else_stmt># _HTMLTag object.
<block_start>L[i]=(L[i].name L[i].attrs)<block_end><block_end><return>L<block_end><def_stmt>_is_str s<block_start>"""
True iff s is a string (checks via duck typing).
"""<line_sep><return>hasattr(s 'capitalize')<block_end><def_stmt>tagjoin L<block_start>"""
Convert data structure back to HTML.
This reverses the L{tagextract} function.
More precisely, if an HTML string is turned into a data structure,
then back into HTML, the resulting string will be functionally
equivalent to the original HTML.
>>> tagjoin(tagextract(s))
(string that is functionally equivalent to s)
Three changes are made to the HTML by L{tagjoin}: tags are
lowercased, C{key=value} pairs are sorted, and values are placed in
double-quotes.
"""<if_stmt>_is_str(L)<block_start><raise>ValueError('got string arg, expected non-string iterable')<block_end>ans=[]<for_stmt>item L# Check for string using duck typing.
<block_start><if_stmt>_is_str(item)# Handle plain text.
<block_start>ans.append(item)<block_end><elif_stmt>item[0]<eq>'--'# Handle closing comment.
<block_start>ans.append('-->')<block_end><elif_stmt>item[0]<eq>'!--'# Handle opening comment.
<block_start>ans.append('<!--')<block_end><else_stmt># Handle regular HTML tag.
<block_start>(name d)=item<if_stmt>name[-1:]<eq>'/'<block_start>rslash=' /'<line_sep>name=name[:-1]<block_end><else_stmt><block_start>rslash=''<block_end>tag_items=[]<line_sep>items=list(d.items())<line_sep>items.sort()<for_stmt>(key value) items<block_start><if_stmt>value<ne><none><block_start><if_stmt>'"'<in>value<and>"'"<in>value<block_start><raise>ValueError('attribute value contains both single'+' and double quotes')<block_end><elif_stmt>'"'<in>value<block_start>tag_items.append(key+"='"+value+"'")<block_end><else_stmt><block_start>tag_items.append(key+'="'+value+'"')<block_end><block_end><else_stmt><block_start>tag_items.append(key)<block_end><block_end>tag_items=' '.join(tag_items)<if_stmt>tag_items<ne>''<block_start>tag_items=' '+tag_items<block_end>ans.append('<'+name+tag_items+rslash+'>')<block_end><block_end><return>''.join(ans)<block_end><def_stmt>_enumerate L<block_start>"""
Like C{enumerate}, provided for compatibility with Python < 2.3.
Returns a list instead of an iterator.
"""<line_sep><return>list(zip(list(range(len(L))) L))<block_end><def_stmt>_ignore_tag_index s i<block_start>"""
Helper routine: Find index within C{_IGNORE_TAGS}, or C{-1}.
If C{s[i:]} begins with an opening tag from C{_IGNORE_TAGS}, return
the index. Otherwise, return C{-1}.
"""<for_stmt>(j (a b)) _enumerate(_IGNORE_TAGS)<block_start><if_stmt>s[i:i+len(a)+1].lower()<eq>'<'+a<block_start><return>j<block_end><block_end><return>-1<block_end><def_stmt>_html_split s<block_start>"""
Helper routine: Split string into a list of tags and non-tags.
>>> html_split(' blah <tag text> more </tag stuff> ')
[' blah ', '<tag text>', ' more ', '</tag stuff>', ' ']
Tags begin with C{'<'} and end with C{'>'}.
The identity C{''.join(L) == s} is always satisfied.
Exceptions to the normal parsing of HTML tags:
C{'<script>'}, C{'<style>'}, and HTML comment tags ignore all HTML
until the closing pair, and are added as three elements:
>>> html_split(' blah<style><<<><></style><!-- hi -->' +
... ' <script language="Javascript"></>a</script>end')
[' blah', '<style>', '<<<><>', '</style>', '<!--', ' hi ', '-->',
' ', '<script language="Javascript">', '</>a', '</script>', 'end']
"""<line_sep>s_lower=s.lower()<line_sep>L=[]<line_sep>i=0# Index of char being processed
<while_stmt>i<l>len(s)<block_start>c=s[i]<if_stmt>c<eq>'<'# Left bracket, handle various cases.
<block_start><if_stmt>s[i:i+len(_BEGIN_COMMENT)].startswith(_BEGIN_COMMENT)# HTML begin comment tag, '<!--'. Scan for '-->'.
<block_start>i2=s.find(_END_COMMENT i)<if_stmt>i2<l>0# No '-->'. Append the remaining malformed content and stop.
<block_start>L.append(s[i:])<line_sep><break><block_end><else_stmt># Append the comment.
<block_start>L.append(s[i:i2+len(_END_COMMENT)])<line_sep>i=i2+len(_END_COMMENT)<block_end><block_end><elif_stmt>s[i:i+len(_BEGIN_CDATA)].startswith(_BEGIN_CDATA)# XHTML begin CDATA tag. Scan for ']]>'.
<block_start>i2=s.find(_END_CDATA i)<if_stmt>i2<l>0# No ']]>'. Append the remaining malformed content and stop.
<block_start>L.append(s[i:])<line_sep><break><block_end><else_stmt># Append the CDATA.
<block_start>L.append(s[i:i2+len(_END_CDATA)])<line_sep>i=i2+len(_END_CDATA)<block_end><block_end><else_stmt># Regular HTML tag. Scan for '>'.
<block_start>orig_i=i<line_sep>found=<false><line_sep>in_quot1=<false><line_sep>in_quot2=<false><for_stmt>i2 range(i+1 len(s))<block_start>c2=s[i2]<if_stmt>c2<eq>'"'<and><not>in_quot1<block_start>in_quot2=<not>in_quot2<line_sep># Only turn on double quote if it's in a realistic place.
<if_stmt>in_quot2<and><not>in_quot1<block_start><if_stmt>i2<g>0<and>s[i2-1]<not><in>[' ' '\t' '=']<block_start>in_quot2=<false><block_end><block_end><block_end><elif_stmt>c2<eq>"'"<and><not>in_quot2<block_start>in_quot1=<not>in_quot1<line_sep># Only turn on single quote if it's in a realistic place.
<if_stmt>in_quot1<and><not>in_quot2<block_start><if_stmt>i2<g>0<and>s[i2-1]<not><in>[' ' '\t' '=']<block_start>in_quot1=<false><block_end><block_end><block_end><elif_stmt>c2<eq>'>'<and>(<not>in_quot2<and><not>in_quot1)<block_start>found=<true><line_sep><break><block_end><block_end><if_stmt><not>found# No end '>'. Append the rest as text.
<block_start>L.append(s[i:])<line_sep><break><block_end><else_stmt># Append the tag.
<block_start>L.append(s[i:i2+1])<line_sep>i=i2+1<block_end># Check whether we found a special ignore tag, eg '<script>'
tagi=_ignore_tag_index(s orig_i)<if_stmt>tagi<ge>0# It's an ignore tag. Scan for the end tag.
<block_start>i2=s_lower.find('<'+_IGNORE_TAGS[tagi][1] i)<if_stmt>i2<l>0# No end tag. Append the rest as text.
<block_start>L.append(s[i2:])<line_sep><break><block_end><else_stmt># Append the text sandwiched between the tags.
<block_start>L.append(s[i:i2])<line_sep># Catch the closing tag with the next loop iteration.
i=i2<block_end><block_end><block_end><block_end><else_stmt># Not a left bracket, append text up to next left bracket.
<block_start>i2=s.find('<' i)<if_stmt>i2<l>0# No left brackets, append the rest as text.
<block_start>L.append(s[i:])<line_sep><break><block_end><else_stmt><block_start>L.append(s[i:i2])<block_end>i=i2<block_end><block_end><return>L<block_end><def_stmt>_shlex_split s<block_start>"""
Like C{shlex.split}, but reversible, and for HTML.
Splits a string into a list C{L} of strings. List elements
contain either an HTML tag C{name=value} pair, an HTML name
singleton (eg C{"checked"}), or whitespace.
The identity C{''.join(L) == s} is always satisfied.
>>> _shlex_split('a=5 b="15" name="<NAME>"')
['a=5', ' ', 'b="15"', ' ', 'name="<NAME>"']
>>> _shlex_split('a = a5 b=#b19 name="foo bar" q="hi"')
['a = a5', ' ', 'b=#b19', ' ', 'name="foo bar"', ' ', 'q="hi"']
>>> _shlex_split('a="9"b="15"')
['a="9"', 'b="15"']
"""<line_sep>ans=[]<line_sep>i=0<while_stmt>i<l>len(s)<block_start>c=s[i]<if_stmt>c<in>string.whitespace# Whitespace. Add whitespace while found.
<block_start><for_stmt>i2 range(i len(s))<block_start><if_stmt>s[i2]<not><in>string.whitespace<block_start><break><block_end><block_end># Include the entire string if the last char is whitespace.
<if_stmt>s[i2]<in>string.whitespace<block_start>i2<augadd>1<block_end>ans.append(s[i:i2])<line_sep>i=i2<block_end><else_stmt># Match 'name = "value"'
<block_start>c=re.compile(r'[^ \t\n\r\f\v"\']+\s*\=\s*"[^"]*"')<line_sep>m=c.match(s i)<if_stmt>m<block_start>ans.append(s[i:m.end()])<line_sep>i=m.end()<line_sep><continue><block_end># Match "name = 'value'"
c=re.compile(r'[^ \t\n\r\f\v"\']+\s*\=\s*\'[^\']*\'')<line_sep>m=c.match(s i)<if_stmt>m<block_start>ans.append(s[i:m.end()])<line_sep>i=m.end()<line_sep><continue><block_end># Match 'name = value'
c=re.compile(r'[^ \t\n\r\f\v"\']+\s*\=\s*[^ \t\n\r\f\v"\']*')<line_sep>m=c.match(s i)<if_stmt>m<block_start>ans.append(s[i:m.end()])<line_sep>i=m.end()<line_sep><continue><block_end># Match 'name'
c=re.compile(r'[^ \t\n\r\f\v"\']+')<line_sep>m=c.match(s i)<if_stmt>m<block_start>ans.append(s[i:m.end()])<line_sep>i=m.end()<line_sep><continue><block_end># Couldn't match anything so far, so it's likely that the page
# has malformed quotes inside a tag. Add leading quotes
# and spaces to the previous field until we see something.
subadd=[]<while_stmt>i<l>len(s)<and>s[i]<in>['"' "'" ' ' '\t']<block_start>subadd.append(s[i])<line_sep>i<augadd>1<block_end># Add whatever we could salvage from the situation and move on.
<if_stmt>len(subadd)<g>0<block_start>ans.append(''.join(subadd))<block_end><else_stmt># We totally failed at matching this character, so add it
# as a separate item and move on.
<block_start>ans.append(s[i])<block_end><block_end><block_end><return>ans<block_end><def_stmt>_test_shlex_split <block_start>"""
Unit test for L{_shlex_split}.
"""<assert_stmt>_shlex_split('')<eq>[]<assert_stmt>_shlex_split(' ')<eq>[' ']<assert_stmt>_shlex_split('a=5 b="15" name="<NAME>"')<eq>['a=5' ' ' 'b="15"' ' ' 'name="<NAME>"']<assert_stmt>_shlex_split('a=cvn b=32vsd c= 234jk\te d \t="hi"')<eq>['a=cvn' ' ' 'b=32vsd' ' ' 'c= 234jk' '\t' 'e' ' ' 'd \t="hi"']<assert_stmt>_shlex_split(' a b c d=e f g h i="jk" l mno = p '+'qr = "st"')<eq>[' ' 'a' ' ' 'b' ' ' 'c' ' ' 'd=e' ' ' 'f' ' ' 'g' ' ' 'h' ' ' 'i="jk"' ' ' 'l' ' ' 'mno = p' ' ' 'qr = "st"']<assert_stmt>_shlex_split('a=5 b="9"c="15 dfkdfkj "d="25"')<eq>['a=5' ' ' 'b="9"' 'c="15 dfkdfkj "' 'd="25"']<assert_stmt>_shlex_split('a=5 b="9"c="15 dfkdfkj "d="25" e=4')<eq>['a=5' ' ' 'b="9"' 'c="15 dfkdfkj "' 'd="25"' ' ' 'e=4']<assert_stmt>_shlex_split('a=5 b=\'9\'c=\'15 dfkdfkj \'d=\'25\' e=4')<eq>['a=5' ' ' 'b=\'9\'' 'c=\'15 dfkdfkj \'' 'd=\'25\'' ' ' 'e=4']<block_end><def_stmt>_tag_dict s<block_start>"""
Helper routine: Extracts a dict from an HTML tag string.
>>> _tag_dict('bgcolor=#ffffff text="#000000" blink')
({'bgcolor':'#ffffff', 'text':'#000000', 'blink': None},
{'bgcolor':(0,7), 'text':(16,20), 'blink':(31,36)},
{'bgcolor':(8,15), 'text':(22,29), 'blink':(36,36)})
Returns a 3-tuple. First element is a dict of
C{(key, value)} pairs from the HTML tag. Second element
is a dict mapping keys to C{(start, end)} indices of the
key in the text. Third element maps keys to C{(start, end)}
indices of the value in the text.
Names are lowercased.
Raises C{ValueError} for unmatched quotes and other errors.
"""<line_sep>d=_shlex_split(s)<line_sep>attrs={}<line_sep>key_pos={}<line_sep>value_pos={}<line_sep>start=0<for_stmt>item d<block_start>end=start+len(item)<line_sep>equals=item.find('=')<if_stmt>equals<ge>0# Contains an equals sign.
<block_start>(k1 k2)=(start start+equals)<line_sep>(v1 v2)=(start+equals+1 start+len(item))<line_sep># Strip spaces.
<while_stmt>k1<l>k2<and>s[k1]<in>string.whitespace<block_start>k1<augadd>1<block_end><while_stmt>k1<l>k2<and>s[k2-1]<in>string.whitespace<block_start>k2<augsub>1<block_end><while_stmt>v1<l>v2<and>s[v1]<in>string.whitespace<block_start>v1<augadd>1<block_end><while_stmt>v1<l>v2<and>s[v2-1]<in>string.whitespace<block_start>v2<augsub>1<block_end># Strip one pair of double quotes around value.
<if_stmt>v1<l>v2-1<and>s[v1]<eq>'"'<and>s[v2-1]<eq>'"'<block_start>v1<augadd>1<line_sep>v2<augsub>1<block_end># Strip one pair of single quotes around value.
<if_stmt>v1<l>v2-1<and>s[v1]<eq>"'"<and>s[v2-1]<eq>"'"<block_start>v1<augadd>1<line_sep>v2<augsub>1<block_end>(key value)=(s[k1:k2].lower() s[v1:v2])<line_sep># Drop bad keys and values.
<if_stmt>'"'<in>key<or>"'"<in>key<block_start><continue><block_end><if_stmt>'"'<in>value<and>"'"<in>value<block_start><continue><block_end>attrs[key]=value<line_sep>key_pos[key]=(k1 k2)<line_sep>value_pos[key]=(v1 v2)<block_end><elif_stmt>item.split()<eq>[]# Whitespace. Ignore it.
<block_start><pass><block_end><else_stmt># A single token, like 'blink'.
<block_start>key=item.lower()<line_sep># Drop bad keys.
<if_stmt>'"'<in>key<or>"'"<in>key<block_start><continue><block_end>attrs[key]=<none><line_sep>key_pos[key]=(start end)<line_sep>value_pos[key]=(end end)<block_end>start=end<block_end><return>(attrs key_pos value_pos)<block_end><def_stmt>_test_tag_dict <block_start>"""
Unit test for L{_tag_dict}.
"""<assert_stmt>_tag_dict('')<eq>({} {} {})<assert_stmt>_tag_dict(' \t\r \n\n \r\n ')<eq>({} {} {})<assert_stmt>_tag_dict('bgcolor=#ffffff text="#000000" blink')<eq>({'bgcolor':'#ffffff' 'text':'#000000' 'blink':<none>} {'bgcolor':(0 7) 'text':(16 20) 'blink':(31 36)} {'bgcolor':(8 15) 'text':(22 29) 'blink':(36 36)})<assert_stmt>_tag_dict("bgcolor='#ffffff'text='#000000' blink")<eq>({'bgcolor':'#ffffff' 'text':'#000000' 'blink':<none>} {'bgcolor':(0 7) 'text':(17 21) 'blink':(32 37)} {'bgcolor':(9 16) 'text':(23 30) 'blink':(37 37)})<line_sep>s=' \r\nbg = val text \t= "hi you" name\t e="5"\t\t\t\n'<line_sep>(a b c)=_tag_dict(s)<assert_stmt>a<eq>{'text':'hi you' 'bg':'val' 'e':'5' 'name':<none>}<for_stmt>key list(a.keys())<block_start><assert_stmt>s[b[key][0]:b[key][1]]<eq>key<if_stmt>a[key]<ne><none><block_start><assert_stmt>s[c[key][0]:c[key][1]]<eq>a[key]<block_end><block_end><block_end><def_stmt>_full_tag_extract s<block_start>"""
Like L{tagextract}, but different return format.
Returns a list of L{_HTMLTag} and L{_TextTag} instances.
The return format is very inconvenient for manipulating HTML, and
only will be useful if you want to find the exact locations where
tags occur in the original HTML document.
"""<line_sep>L=_html_split(s)<line_sep># Starting position of each L[i] in s.
Lstart=[0]<times>len(L)<for_stmt>i range(1 len(L))<block_start>Lstart[i]=Lstart[i-1]+len(L[i-1])<block_end><class_stmt>NotTagError(Exception)<block_start><pass><block_end><for_stmt>(i text) _enumerate(L)<block_start><try_stmt># Is it an HTML tag?
<block_start>is_tag=<false><if_stmt>len(text)<ge>2<and>text[0]<eq>'<'<and>text[-1]<eq>'>'# Turn HTML tag text into (name, keyword_dict) tuple.
<block_start>is_tag=<true><block_end>is_special=<false><if_stmt>len(text)<ge>2<and>(text[1]<eq>'!'<or>text[1]<eq>'?')<block_start>is_special=<true><block_end><if_stmt>is_special# A special tag such as XML directive or <!-- comment -->
<block_start>pos=(Lstart[i] Lstart[i]+len(L[i]))<line_sep># Wrap inside an _HTMLTag object.
L[i]=_HTMLTag(pos text[1:-1].strip() {} {} {})<block_end><elif_stmt>is_tag# If an HTML tag, strip brackets and handle what's left.
# Strip off '<>' and update offset.
<block_start>orig_offset=0<if_stmt>len(text)<ge>1<and>text[0]<eq>'<'<block_start>text=text[1:]<line_sep>orig_offset=1<block_end><if_stmt>len(text)<ge>1<and>text[-1]<eq>'>'<block_start>text=text[:-1]<block_end><if_stmt>len(text)<g>0<and>text[-1]<eq>'/'<block_start>rslash=<true><line_sep>text=text[:-1]<block_end><else_stmt><block_start>rslash=<false><block_end>m=re.search(r'\s' text)<line_sep>first_space=-1<if_stmt>m<block_start>first_space=m.start()<block_end><if_stmt>first_space<l>0<block_start>(name dtext)=(text '')<block_end><else_stmt><block_start>name=text[:first_space]<line_sep>dtext=text[first_space+1:len(text)]<block_end># Position of dtext relative to original text.
dtext_offset=len(name)+1+orig_offset# +1 for space.
# Lowercase everything except XML directives and comments.
<if_stmt><not>name.startswith('!')<and><not>name.startswith('?')<block_start>name=name.strip().lower()<block_end><if_stmt>rslash<block_start>name<augadd>'/'<block_end># Strip off spaces, and update dtext_offset as appropriate.
orig_dtext=dtext<line_sep>dtext=dtext.strip()<line_sep>dtext_offset<augadd>orig_dtext.index(dtext)<line_sep>(attrs key_pos value_pos)=_tag_dict(dtext)<line_sep># Correct offsets in key_pos and value_pos.
<for_stmt>key list(attrs.keys())<block_start>key_pos[key]=(key_pos[key][0]+Lstart[i]+dtext_offset key_pos[key][1]+Lstart[i]+dtext_offset)<line_sep>value_pos[key]=(value_pos[key][0]+Lstart[i]+dtext_offset value_pos[key][1]+Lstart[i]+dtext_offset)<block_end>pos=(Lstart[i] Lstart[i]+len(L[i]))<line_sep># Wrap inside an _HTMLTag object.
L[i]=_HTMLTag(pos name attrs key_pos value_pos)<block_end><else_stmt># Not an HTML tag.
<block_start><raise>NotTagError<block_end><block_end><except_stmt>NotTagError# Wrap non-HTML strings inside a _TextTag object.
<block_start>pos=(Lstart[i] Lstart[i]+len(L[i]))<line_sep>L[i]=_TextTag(pos L[i])<block_end><block_end><return>L<block_end><class_stmt>_HTMLTag<block_start>"""
HTML tag extracted by L{_full_tag_extract}.
@ivar pos: C{(start, end)} indices of the entire tag in the
HTML document.
@ivar name: Name of tag. For example, C{'img'}.
@ivar attrs: Dictionary mapping tag attributes to corresponding
tag values.
Example:
>>> tag = _full_tag_extract('<a href="d.com">')[0]
>>> tag.attrs
{'href': 'd.com'}
Surrounding quotes are stripped from the values.
@ivar key_pos: Key position dict.
Maps the name of a tag attribute to C{(start, end)}
indices for the key string in the C{"key=value"}
HTML pair. Indices are absolute, where 0 is the
start of the HTML document.
Example:
>>> tag = _full_tag_extract('<a href="d.com">')[0]
>>> tag.key_pos['href']
(3, 7)
>>> '<a href="d.com">'[3:7]
'href'
@ivar value_pos: Value position dict.
Maps the name of a tag attribute to C{(start, end)}
indices for the value in the HTML document string.
Surrounding quotes are excluded from this range.
Indices are absolute, where 0 is the start of the
HTML document.
Example:
>>> tag = _full_tag_extract('<a href="d.com">')[0]
>>> tag.value_pos['href']
(9, 14)
>>> '<a href="d.com">'[9:14]
'd.com'
"""<def_stmt>__init__ self pos name attrs key_pos value_pos<block_start>"""
Create an _HTMLTag object.
"""<line_sep>self.pos=pos<line_sep>self.name=name<line_sep>self.attrs=attrs<line_sep>self.key_pos=key_pos<line_sep>self.value_pos=value_pos<block_end><block_end><class_stmt>_TextTag<block_start>"""
Text extracted from an HTML document by L{_full_tag_extract}.
@ivar text: Extracted text.
@ivar pos: C{(start, end)} indices of the text.
"""<def_stmt>__init__ self pos text<block_start>"""
Create a _TextTag object.
"""<line_sep>self.pos=pos<line_sep>self.text=text<block_end><block_end># -------------------------------------------------------------------
# URL Editing
# -------------------------------------------------------------------
# Tags within which URLs may be found.
_URL_TAGS=['a href' 'applet archive' 'applet code' 'applet codebase' 'area href' 'base href' 'blockquote cite' 'body background' 'del cite' 'form action' 'frame longdesc' 'frame src' 'head profile' 'iframe src' 'iframe longdesc' 'img src' 'img ismap' 'img longdesc' 'img usemap' 'input src' 'ins cite' 'link href' 'object archive' 'object codebase' 'object data' 'object usemap' 'script src' 'table background' 'tbody background' 'td background' 'tfoot background' 'th background' 'thead background' 'tr background']<line_sep>_URL_TAGS=[tuple(s.split())<for>s _URL_TAGS]<def_stmt>_finditer pattern string<block_start>"""
Like C{re.finditer}, provided for compatibility with Python < 2.3.
Returns a list instead of an iterator. Otherwise the return format
is identical to C{re.finditer} (except possibly in the details of
empty matches).
"""<line_sep>compiled=re.compile(pattern)<line_sep>ans=[]<line_sep>start=0<while_stmt><true><block_start>m=compiled.search(string start)<if_stmt>m<block_start>ans.append(m)<block_end><else_stmt><block_start><return>ans<block_end>m_start=m.start(m.lastindex)<line_sep>m_end=m.end(m.lastindex)<if_stmt>m_end<g>m_start<block_start>start=m_end<block_end><else_stmt><block_start>start<augadd>1<block_end><block_end><block_end><def_stmt>_remove_comments doc<block_start>"""
Replaces commented out characters with spaces in a CSS document.
"""<line_sep>ans=[]<line_sep>i=0<while_stmt><true><block_start>i2=doc.find('/*' i)<if_stmt>i2<l>0<block_start>ans<augadd>[doc[i:]]<line_sep><break><block_end>ans<augadd>[doc[i:i2]]<line_sep>i3=doc.find('*/' i2+1)<if_stmt>i3<l>0<block_start>i3=len(doc)-2<block_end>ans<augadd>[' '<times>(i3-i2+2)]<line_sep>i=i3+2<block_end><return>''.join(ans)<block_end><def_stmt>_test_remove_comments <block_start>"""
Unit test for L{_remove_comments}.
"""<line_sep>s='/*d s kjlsdf */*//*/*//**/**/*//**/a'<times>50<assert_stmt>len(_remove_comments(s))<eq>len(s)<line_sep>s='/**/'<times>50+'/*5845*/*/*//*/**/dfd'+'/*//**//'<assert_stmt>len(_remove_comments(s))<eq>len(s)<line_sep>s='a/**/'<times>50+'/**//**/////***/****/*//**//*/'<times>5<assert_stmt>len(_remove_comments(s))<eq>len(s)<line_sep>s='hi /* foo */ hello /* bar!!!!! \n\n */ there!'<assert_stmt>_remove_comments(s)<eq>'hi hello there!'<block_end><def_stmt>urlextract doc siteurl=<none> mimetype='text/html'<block_start>"""
Extract URLs from HTML or stylesheet.
Extracts only URLs that are linked to or embedded in the document.
Ignores plain text URLs that occur in the non-HTML part of the
document.
Returns a list of L{URLMatch} objects.
>>> L = urlextract('<img src="a.gif"><a href="www.google.com">')
>>> L[0].url
'a.gif'
>>> L[1].url
'www.google.com'
If C{siteurl} is specified, all URLs are made into absolute URLs
by assuming that C{doc} is located at the URL C{siteurl}.
>>> doc = '<img src="a.gif"><a href="/b.html">'
>>> L = urlextract(doc, 'http://www.python.org/~guido/')
>>> L[0].url
'http://www.python.org/~guido/a.gif'
>>> L[1].url
'http://www.python.org/b.html'
If C{mimetype} is C{"text/css"}, the document will be parsed
as a stylesheet.
If a stylesheet is embedded inside an HTML document, then
C{urlextract} will extract the URLs from both the HTML and the
stylesheet.
"""<line_sep>mimetype=mimetype.lower()<if_stmt>mimetype.split()[0]<in>_CSS_MIMETYPES<block_start>doc=_remove_comments(doc)<line_sep># Match URLs within CSS stylesheet.
# Match url(blah) or url('blah') or url("blah").
L=_finditer(r'''url\s*\(([^\r\n\("']*?)\)|'''+r'''url\s*\(\s*"([^\r\n]*?)"\s*\)|'''+r'''url\s*\(\s*'([^\r\n]*?)'\s*\)|'''+r'''@import\s+([^ \t\r\n"';@\(\)]+)[^\r\n;@\(\)]*[\r\n;]|'''+r'''@import\s+'([^ \t\r\n"';@\(\)]+)'[^\r\n;@\(\)]*[\r\n;]|'''+r'''@import\s+"([^ \t\r\n"';\(\)']+)"[^\r\n;@\(\)]*[\r\n;]''' doc+';\n')<line_sep>L=[(x.start(x.lastindex) x.end(x.lastindex))<for>x L]<line_sep>ans=[]<for_stmt>(s e) L<block_start>e=min(e len(doc))<if_stmt>e<g>s<block_start>ans.append(URLMatch(doc s e siteurl <false> <true>))<block_end><block_end><block_end><elif_stmt>mimetype.split()[0]<in>_HTML_MIMETYPES# Match URLs within HTML document.
<block_start>ans=[]<line_sep>L=_full_tag_extract(doc)<line_sep>item=<none><for_stmt>i range(len(L))<block_start>prev_item=item<line_sep>item=L[i]<line_sep># Handle string item (text) or tuple item (tag).
<if_stmt>isinstance(item _TextTag)# Current item is text.
<block_start><if_stmt>isinstance(prev_item _HTMLTag)<and>prev_item.name<eq>'style'# And previous item is <style>. Process a stylesheet.
<block_start>temp=urlextract(item.text siteurl 'text/css')<line_sep># Offset indices and add to ans.
<for_stmt>j range(len(temp))<block_start>temp[j].start<augadd>item.pos[0]<line_sep>temp[j].end<augadd>item.pos[0]<block_end>ans<augadd>temp<block_end><else_stmt># Regular text. Ignore.
<block_start><pass><block_end><block_end><else_stmt># Current item is a tag.
<block_start><if_stmt>'style'<in>item.attrs# Process a stylesheet embedded in the 'style' attribute.
<block_start>temp=urlextract(item.attrs['style'] siteurl 'text/css')<line_sep># Offset indices and add to ans.
<for_stmt>j range(len(temp))<block_start>temp[j].start<augadd>item.value_pos['style'][0]<line_sep>temp[j].end<augadd>item.value_pos['style'][0]<block_end>ans<augadd>temp<block_end><for_stmt>(a b) _URL_TAGS<block_start><if_stmt>item.name.startswith(a)<and>b<in>list(item.attrs.keys())# Got one URL.
<block_start>url=item.attrs[b]<line_sep># FIXME: Some HTML tag wants a URL list, look up which
# tag and make it a special case.
(start end)=item.value_pos[b]<line_sep>tag_name=a<line_sep>tag_attr=b<line_sep>tag_attrs=item.attrs<line_sep>tag_index=i<line_sep>tag=URLMatch(doc start end siteurl <true> <false> tag_attr tag_attrs tag_index tag_name)<line_sep>ans.append(tag)<block_end><block_end><block_end><block_end># End of 'text/html' mimetype case.
<block_end><else_stmt><block_start><raise>ValueError('unknown MIME type: '+repr(mimetype))<block_end># Filter the answer, removing duplicate matches.
start_end_map={}<line_sep>filtered_ans=[]<for_stmt>item ans<block_start><if_stmt>(item.start item.end)<not><in>start_end_map<block_start>start_end_map[(item.start item.end)]=<none><line_sep>filtered_ans.append(item)<block_end><block_end><return>filtered_ans<block_end><def_stmt>_tuple_replace s Lindices Lreplace<block_start>"""
Replace slices of a string with new substrings.
Given a list of slice tuples in C{Lindices}, replace each slice
in C{s} with the corresponding replacement substring from
C{Lreplace}.
Example:
>>> _tuple_replace('0123456789',[(4,5),(6,9)],['abc', 'def'])
'0123abc5def9'
"""<line_sep>ans=[]<line_sep>Lindices=Lindices[:]<line_sep>Lindices.sort()<if_stmt>len(Lindices)<ne>len(Lreplace)<block_start><raise>ValueError('lists differ in length')<block_end><for_stmt>i range(len(Lindices)-1)<block_start><if_stmt>Lindices[i][1]<g>Lindices[i+1][0]<block_start><raise>ValueError('tuples overlap')<block_end><if_stmt>Lindices[i][1]<l>Lindices[i][0]<block_start><raise>ValueError('invalid tuple')<block_end><if_stmt>min(Lindices[i][0] Lindices[i][1])<l>0<or>max(Lindices[i][0] Lindices[i][1])<ge>len(s)<block_start><raise>ValueError('bad index')<block_end><block_end>j=0<line_sep>offset=0<for_stmt>i range(len(Lindices))<block_start>len1=Lindices[i][1]-Lindices[i][0]<line_sep>len2=len(Lreplace[i])<line_sep>ans.append(s[j:Lindices[i][0]+offset])<line_sep>ans.append(Lreplace[i])<line_sep>j=Lindices[i][1]<block_end>ans.append(s[j:])<line_sep><return>''.join(ans)<block_end><def_stmt>_test_tuple_replace <block_start>"""
Unit test for L{_tuple_replace}.
"""<assert_stmt>_tuple_replace('' [] [])<eq>''<assert_stmt>_tuple_replace('0123456789' [] [])<eq>'0123456789'<assert_stmt>_tuple_replace('0123456789' [(4 5) (6 9)] ['abc' 'def'])<eq>'0123abc5def9'<assert_stmt>_tuple_replace('01234567890123456789' [(1 9) (13 14) (16 18)] ['abcd' 'efg' 'hijk'])<eq>'0abcd9012efg45hijk89'<block_end><def_stmt>urljoin s L<block_start>"""
Write back document with modified URLs (reverses L{urlextract}).
Given a list C{L} of L{URLMatch} objects obtained from
L{urlextract}, substitutes changed URLs into the original
document C{s}, and returns the modified document.
One should only modify the C{.url} attribute of the L{URLMatch}
objects. The ordering of the URLs in the list is not important.
>>> doc = '<img src="a.png"><a href="b.png">'
>>> L = urlextract(doc)
>>> L[0].url = 'foo'
>>> L[1].url = 'bar'
>>> urljoin(doc, L)
'<img src="foo"><a href="bar">'
"""<line_sep><return>_tuple_replace(s [(x.start x.end)<for>x L] [x.url<for>x L])<block_end><def_stmt>examples <block_start>"""
Examples of the C{htmldata} module.
Example 1:
Print all absolutized URLs from Google.
Here we use L{urlextract} to obtain all URLs in the document.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> for u in htmldata.urlextract(contents, url):
... print u.url
...
http://www.google.com/images/logo.gif
http://www.google.com/search
(More output)
Note that the second argument to L{urlextract} causes the
URLs to be made absolute with respect to that base URL.
Example 2:
Print all image URLs from Google in relative form.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> for u in htmldata.urlextract(contents):
... if u.tag_name == 'img':
... print u.url
...
/images/logo.gif
Equivalently, one can use L{tagextract}, and look for occurrences
of C{<img>} tags. The L{urlextract} function is mostly a convenience
function for when one wants to extract and/or modify all URLs in a
document.
Example 3:
Replace all C{<a href>} links on Google with the Microsoft web page.
Here we use L{tagextract} to turn the HTML into a data structure,
and then loop over the in-order list of tags (items which are not
tuples are plain text, which is ignored).
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> L = htmldata.tagextract(contents)
>>> for item in L:
... if isinstance(item, tuple) and item[0] == 'a':
... # It's an HTML <a> tag! Give it an href=.
... item[1]['href'] = 'http://www.microsoft.com/'
...
>>> htmldata.tagjoin(L)
(Microsoftized version of Google)
Example 4:
Make all URLs on an HTML document be absolute.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> htmldata.urljoin(htmldata.urlextract(contents, url))
(Google HTML page with absolute URLs)
Example 5:
Properly quote all HTML tag values for pedants.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> htmldata.tagjoin(htmldata.tagextract(contents))
(Properly quoted version of the original HTML)
Example 6:
Modify all URLs in a document so that they are appended
to our proxy CGI script C{http://mysite.com/proxy.cgi}.
>>> import urllib2, htmldata
>>> url = 'http://www.google.com/'
>>> contents = urllib2.urlopen(url).read()
>>> proxy_url = 'http://mysite.com/proxy.cgi?url='
>>> L = htmldata.urlextract(contents)
>>> for u in L:
... u.url = proxy_url + u.url
...
>>> htmldata.urljoin(L)
(Document with all URLs wrapped in our proxy script)
Example 7:
Download all images from a website.
>>> import urllib, htmldata, time
>>> url = 'http://www.google.com/'
>>> contents = urllib.urlopen(url).read()
>>> for u in htmldata.urlextract(contents, url):
... if u.tag_name == 'img':
... filename = urllib.quote_plus(u.url)
... urllib.urlretrieve(u.url, filename)
... time.sleep(0.5)
...
(Images are downloaded to the current directory)
Many sites will protect against bandwidth-draining robots by
checking the HTTP C{Referer} [sic] and C{User-Agent} fields.
To circumvent this, one can create a C{urllib2.Request} object
with a legitimate C{Referer} and a C{User-Agent} such as
C{"Mozilla/4.0 (compatible; MSIE 5.5)"}. Then use
C{urllib2.urlopen} to download the content. Be warned that some
website operators will respond to rapid robot requests by banning
the offending IP address.
"""<line_sep>print(examples.__doc__)<block_end><class_stmt>URLMatch<block_start>"""
A matched URL inside an HTML document or stylesheet.
A list of C{URLMatch} objects is returned by L{urlextract}.
@ivar url: URL extracted.
@ivar start: Starting character index.
@ivar end: End character index.
@ivar in_html: C{True} if URL occurs within an HTML tag.
@ivar in_css: C{True} if URL occurs within a stylesheet.
@ivar tag_attr: Specific tag attribute in which URL occurs.
Example: C{'href'}.
C{None} if the URL does not occur within an HTML
tag.
@ivar tag_attrs: Dictionary of all tag attributes and values.
Example: C{{'src':'http://X','alt':'Img'}}.
C{None} if the URL does not occur within an HTML
tag.
@ivar tag_index: Index of the tag in the list that would be
generated by a call to L{tagextract}.
@ivar tag_name: HTML tag name in which URL occurs.
Example: C{'img'}.
C{None} if the URL does not occur within an HTML
tag.
"""<def_stmt>__init__ self doc start end siteurl in_html in_css tag_attr=<none> tag_attrs=<none> tag_index=<none> tag_name=<none><block_start>"""
Create a URLMatch object.
"""<line_sep>self.doc=doc<line_sep>self.start=start<line_sep>self.end=end<line_sep>self.url=doc[start:end]<line_sep>self.in_html=in_html<line_sep>self.in_css=in_css<if_stmt>siteurl<ne><none><block_start>self.url=urllib.parse.urljoin(siteurl self.url)<block_end>self.tag_attr=tag_attr<line_sep>self.tag_attrs=tag_attrs<line_sep>self.tag_index=tag_index<line_sep>self.tag_name=tag_name<block_end><block_end><def_stmt>_cast_to_str arg str_class<block_start>"""
Casts string components of several data structures to str_class.
Casts string, list of strings, or list of tuples (as returned by
L{tagextract}) such that all strings are made to type str_class.
"""<if_stmt>_is_str(arg)<block_start><return>str_class(arg)<block_end><elif_stmt>isinstance(arg list)<block_start>ans=[]<for_stmt>item arg<block_start><if_stmt>_is_str(item)<block_start>ans.append(str_class(item))<block_end><elif_stmt>isinstance(item tuple)<and>len(item)<eq>2<block_start>(a b)=item<line_sep>b_prime={}<for_stmt>(b_key b_value) list(b.items())<block_start><if_stmt>b_value<is><none><block_start>b_prime[str_class(b_key)]=<none><block_end><else_stmt><block_start>b_prime[str_class(b_key)]=str_class(b_value)<block_end><block_end>ans.append((str_class(a) b_prime))<block_end><else_stmt><block_start><raise>ValueError('unknown argument type')<block_end><block_end><return>ans<block_end><else_stmt><block_start><raise>ValueError('unknown argument type')<block_end><block_end># -------------------------------------------------------------------
# Unit Tests: HTML <-> Data structure
# -------------------------------------------------------------------
<def_stmt>_test_tagextract str_class=str<block_start>"""
Unit tests for L{tagextract} and L{tagjoin}.
Strings are cast to the string class argument str_class.
"""<line_sep># Work around lack of nested scopes in Python <= 2.1.
<def_stmt>f obj str_class2=str_class<block_start><return>_cast_to_str(obj str_class2)<block_end># Simple HTML document to test.
doc1=f('\n\n<Html><BODY bgcolor=#ffffff>Hi<h1>Ho</h1><br>'+'<br /><img SRc="text%5f.gif"><TAG NOshow>'+'<img test="5%ff" /></body></html>\nBye!\n')<line_sep>doc2=f('\r<HTML><!-- Comment<a href="blah"> --><hiYa><foo>'+'<test tag="5" content=6><is broken=False><yay>'+'<style><><>><</style><foo bar=5>end<!-- <!-- nested --> '+'<script language="JavaScript"><>!><!_!_!-->!_-></script>')<line_sep>doc3=f('\r\t< html >< tag> <!--comment--> <tag a = 5> '+'<foo \r\nbg = val text \t= "hi you" name\t e="5"\t\t\t\n>')<line_sep>doc4=f('<?xml ??><foo><!-- <img> --><!DOCTYPE blah""/>'+'<![CDATA[ more and weirder<bar> ] ][]]><![C[DATA[[>'+'<abc key=value><![CDATA[to eof')<line_sep>doc5=f('<a href="foobar/ \t="base="10" x="15"><a x="9"t="20">')<line_sep># -----------------------------------------------------------------
# Test _html_split()
# -----------------------------------------------------------------
s=doc1<assert_stmt>s<eq>f('').join(_html_split(s))<assert_stmt>_html_split(s)<eq>f(['\n\n' '<Html>' '<BODY bgcolor=#ffffff>' 'Hi' '<h1>' 'Ho' '</h1>' '<br>' '<br />' '<img SRc="text%5f.gif">' '<TAG NOshow>' '<img test="5%ff" />' '</body>' '</html>' '\nBye!\n'])<line_sep>s=doc2<assert_stmt>s<eq>f('').join(_html_split(s))<line_sep># Test single quotes
s=doc2.replace(f('"') f("'"))<assert_stmt>s<eq>f('').join(_html_split(s))<line_sep>s=f('<!-- test weird comment <body> <html> --> <h1>Header'+'</h1 value=10 a=11>')<assert_stmt>s<eq>f('').join(_html_split(s))<assert_stmt>_html_split(s)<eq>f(['<!-- test weird comment <body> <html> -->' ' ' '<h1>' 'Header' '</h1 value=10 a=11>'])<line_sep>s=f('<!-- <!-- nested messed up --> blah ok <now> what<style>hi'+'<><>></style><script language="Java"><aL><>><>></script>a')<assert_stmt>s<eq>f('').join(_html_split(s))<assert_stmt>_html_split(s)<eq>f(['<!-- <!-- nested messed up -->' ' blah ok ' '<now>' ' what' '<style>' 'hi<><>>' '</style>' '<script language="Java">' '<aL><>><>>' '</script>' 'a'])<line_sep>s=f('<!-- ><# -->!<!-!._-><!-- aa--> <style><tag//</style> <tag '+'<tag <! <! -> <!-- </who< <who> tag> <huh-->-</style>'+'</style<style>')<assert_stmt>s<eq>f('').join(_html_split(s))<assert_stmt>_html_split(s)<eq>f(['<!-- ><# -->' '!' '<!-!._->' '<!-- aa-->' ' ' '<style>' '<tag//' '</style>' ' ' '<tag <tag <! <! ->' ' ' '<!-- </who< <who> tag> <huh-->' '-' '</style>' '</style<style>'])<line_sep>s=doc4<assert_stmt>s<eq>f('').join(_html_split(s))<assert_stmt>_html_split(s)<eq>f(['<?xml ??>' '<foo>' '<!-- <img> -->' '<!DOCTYPE blah""/>' '<![CDATA[ more and weirder<bar> ] ][]]>' '<![C[DATA[[>' '<abc key=value>' '<![CDATA[to eof'])<line_sep># -----------------------------------------------------------------
# Test tagextract() and tagjoin()
# -----------------------------------------------------------------
# Test for whitespace handling in tags.
<assert_stmt>(tagextract('<a\n\t\t\t\v\rhref="a.png"\tsize=10>')<eq>[('a' {'href':'a.png' 'size':'10'})])<line_sep>s=doc1<line_sep>s2=doc1.replace(f('"') f("'"))# Test single quotes, too.
<assert_stmt>tagextract(f(''))<eq>[]<assert_stmt>tagextract(s)<eq>tagextract(s2)<eq>f(['\n\n' ('html' {}) ('body' {'bgcolor':'#ffffff'}) 'Hi' ('h1' {}) 'Ho' ('/h1' {}) ('br' {}) ('br/' {}) ('img' {'src':'text%5f.gif'}) ('tag' {'noshow':<none>}) ('img/' {'test':'5%ff'}) ('/body' {}) ('/html' {}) '\nBye!\n'])<line_sep>s2=f('\n\n<html><body bgcolor="#ffffff">Hi<h1>Ho</h1><br>'+'<br /><img src="text%5f.gif"><tag noshow>'+'<img test="5%ff" /></body></html>\nBye!\n')<assert_stmt>tagjoin(tagextract(s))<eq>s2<line_sep>doc2old=doc2<line_sep>doc2=f('\r<HTML><!-- Comment<a href="blah"> --><hiYa><foo>'+'<test tag="5" content=6><is broken=False><yay>'+'<style><><>><</style><foo bar=5>end<!-- <!-- nested --> '+'<script language="JavaScript"><>!><!_!_!-->!_-></script>')<assert_stmt>doc2old<eq>doc2<line_sep>s=doc2<assert_stmt>tagextract(s)<eq>f(['\r' ('html' {}) ('!-- Comment<a href="blah"> --' {}) ('hiya' {}) ('foo' {}) ('test' {'content':'6' 'tag':'5'}) ('is' {'broken':'False'}) ('yay' {}) ('style' {}) '<><>><' ('/style' {}) ('foo' {'bar':'5'}) 'end' ('!-- <!-- nested --' {}) ' ' ('script' {'language':'JavaScript'}) ('>!><!_!_!-->!_-' {}) ('/script' {})])<assert_stmt>tagjoin(tagextract(s))<eq>f('\r<html><!-- Comment<a href="blah"> --><hiya><foo><test '+'content="6" tag="5"><is broken="False"><yay><style><><>><'+'</style><foo bar="5">end<!-- <!-- nested --> '+'<script language="JavaScript"><>!><!_!_!-->!_-></script>')<line_sep>s=doc5<assert_stmt>tagextract(s)<eq>f([('a' {'href':'foobar/ \t=' 'base':'10' 'x':'15'}) ('a' {'x':'9' 't':'20'})])<assert_stmt>tagjoin(tagextract(s))<eq>f('<a base="10" href="foobar/ \t=" x="15"><a t="20" x="9">')<line_sep># -----------------------------------------------------------------
# Test _full_tag_extract()
# -----------------------------------------------------------------
<for_stmt>s [doc1 doc2 doc3 doc1.replace(f('"') f("'")) doc2.replace(f('"') f("'")) doc3.replace(f('"') f("'"))]<block_start>L=_full_tag_extract(s)<for_stmt>(i item) _enumerate(L)<block_start><if_stmt>isinstance(item _HTMLTag)<block_start><for_stmt>key list(item.attrs.keys())<block_start><assert_stmt>s[item.key_pos[key][0]:item.key_pos[key][1]].lower()<eq>key<if_stmt>item.attrs[key]<ne><none><block_start><assert_stmt>s[item.value_pos[key][0]:item.value_pos[key][1]]<eq>item.attrs[key]<block_end><block_end><block_end><block_end><block_end>n=1000<line_sep>doc4=f('<tag name = "5" value ="6afdjherknc4 cdk j" a="7" b=8/>')<line_sep>doc4<augmul>n<line_sep>L=tagextract(doc4)<assert_stmt>len(L)<eq>n<for_stmt>i range(n)<block_start><assert_stmt>L[i]<eq>f([('tag/' {'name':'5' 'value':'6afdjherknc4 cdk j' 'a':'7' 'b':'8'})])[0]<block_end># -----------------------------------------------------------------
# Test tagextract() and tagjoin() with XML directives.
# -----------------------------------------------------------------
doc1=f('a<?xml version="1.0"?>'+'b<!DOCTYPE html'+'PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'+'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd" >c'+'<html a=b><!-- Comment <><> hi! -->'+'z<![CDATA[ some content ]]>rx'+'<![C[DATA[ more and weirder ] ][]]>tt')<line_sep>doc1join=f('a<?xml version="1.0"?>b<!DOCTYPE htmlPUBLIC "-//W3C//DTD '+'XHTML 1.0 Transitional//EN""http://www.w3.org/TR/xhtml1/DTD/'+'xhtml1-transitional.dtd">c<html a="b"><!-- Comment <><> hi! '+'-->z<![CDATA[ some content ]]>rx<![C[DATA[ more and weirder ]'+' ][]]>tt')<line_sep>ans1=f(['a' ('?xml version="1.0"?' {}) 'b' ('!DOCTYPE html'+'PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'+'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"' {}) 'c' ('html' {'a':'b'}) ('!-- Comment <><> hi! --' {}) 'z' ('![CDATA[ some content ]]' {}) 'rx' ('![C[DATA[ more and weirder ] ][]]' {}) 'tt'])<assert_stmt>(tagextract(f('<?xml version="1.0" encoding="utf-8" ?>'))<eq>f([('?xml version="1.0" encoding="utf-8" ?' {})]))<assert_stmt>(tagextract(f('<!DOCTYPE html PUBLIC etc...>'))<eq>f([('!DOCTYPE html PUBLIC etc...' {})]))<assert_stmt>tagextract(doc1)<eq>ans1<assert_stmt>tagjoin(tagextract(doc1))<eq>doc1join<block_end># -------------------------------------------------------------------
# Unit Tests: URL Parsing
# -------------------------------------------------------------------
<def_stmt>_test_urlextract str_class=str<block_start>"""
Unit tests for L{urlextract} and L{urljoin}.
Strings are cast to the string class argument str_class.
"""<line_sep># Work around lack of nested scopes in Python <= 2.1.
<def_stmt>f obj str_class2=str_class<block_start><return>_cast_to_str(obj str_class2)<block_end>doc1=f('urlblah, url ( blah2, url( blah3) url(blah4) '+'url("blah5") hum("blah6") url)"blah7"( url ( " blah8 " );;')<line_sep>doc2=f('<html><img src="a.gif" alt="b"><a href = b.html name='+'"c"><td background = ./c.png width=100%><a value=/f.jpg>'+'<img src="http://www.abc.edu/d.tga">http://www.ignore.us/'+'\nhttp://www.nowhere.com <style>url(h.gif) '+'url(http://www.testdomain.com/) http://ignore.com/a'+'</style><img alt="c" src = "a.gif"><img src=/i.png>')<line_sep>doc3=f('@import foo;\n@import bar\n@import url(\'foo2\');'+'@import url(\'http://bar2\')\n@import\turl("foo!");'+'@import \'foo3\'\n@import "bar3";\n@importfails;'+'@import;@import\n;url(\'howdy!\')\n@import foo5 ;'+'@import \'foo6\' \n@import "foo7";')<line_sep>doc4=f('@import foo handheld;\n@import \'bar\' handheld\n'+'@import url(\'foo2\') handheld; @import url(bar2) ha\n'+'@import url("foo3") handheld\n')<line_sep>doc5=f('<html><img src="a.gif" alt="b" style="url(\'foo\')">'+'<a href = b.html name="c" style="@import \'bar.css\'">')<line_sep>doc6=doc2.replace(f('"') f("'"))# Test single quotes, too.
# Test CSS.
s=doc1<line_sep>L=urlextract(s mimetype='text/css')<line_sep>L2=[x.url<for>x L]<assert_stmt>L2<eq>f([' blah3' 'blah4' 'blah5' ' blah8 '])<assert_stmt>[s[x.start:x.end]<eq>x.url<for>x L].count(<false>)<eq>0<line_sep># Test CSS more.
s=doc3<line_sep>L=urlextract(s mimetype='text/css')<line_sep>L2=[x.url<for>x L]<assert_stmt>L2<eq>f(['foo' 'bar' 'foo2' 'http://bar2' 'foo!' 'foo3' 'bar3' 'howdy!' 'foo5' 'foo6' 'foo7'])<assert_stmt>[s[x.start:x.end]<eq>x.url<for>x L].count(<false>)<eq>0<line_sep># Test CSS even more.
s=doc4<line_sep>L=urlextract(s mimetype='text/css')<line_sep>L2=[x.url<for>x L]<assert_stmt>L2<eq>f(['foo' 'bar' 'foo2' 'bar2' 'foo3'])<assert_stmt>[s[x.start:x.end]<eq>x.url<for>x L].count(<false>)<eq>0<line_sep># Test HTML.
s=doc2<line_sep>L=urlextract(s)<line_sep>L2=[x.url<for>x L]<line_sep>L3=[x.url<for>x urlextract(doc6)]<line_sep>ans=f(['a.gif' 'b.html' './c.png' 'http://www.abc.edu/d.tga' 'h.gif' 'http://www.testdomain.com/' 'a.gif' '/i.png'])<assert_stmt>L2<eq>L3<eq>ans<for_stmt>i range(len(L))<block_start><assert_stmt>s[L[i].start:L[i].end]<eq>L[i].url<block_end># Test HTML more.
n=100<line_sep>s2=s<times>n<line_sep>L3=urlextract(s2)<line_sep>L4=[x.url<for>x L3]<assert_stmt>L4<eq>L2<times>n<for_stmt>i range(len(L3))<block_start><assert_stmt>s2[L3[i].start:L3[i].end]<eq>L3[i].url<block_end># Test HTML w/ siteurl.
base=f('http://www.python.org/~guido/')<line_sep>L=urlextract(s base)<line_sep>L2=[x.url<for>x L]<assert_stmt>L2<eq>[urllib.parse.urljoin(base x)<for>x ans]<line_sep># Test urljoin().
<assert_stmt>urljoin(doc1 urlextract(doc1 mimetype='text/css'))<eq>doc1<assert_stmt>urljoin(doc2 urlextract(doc2))<eq>doc2<line_sep>s=doc2<line_sep>L=urlextract(s)<line_sep>L[3].url=f('FOO')<line_sep>L[5].url=f('BAR')<line_sep>L[7].url=f('F00!')<assert_stmt>urljoin(s L)<eq>f('<html><img src="a.gif" alt="b"><a href = b.html name="c">'+'<td background = ./c.png width=100%><a value=/f.jpg>'+'<img src="FOO">http://www.ignore.us/\nhttp://www.nowhere.com '+'<style>url(h.gif) url(BAR) http://ignore.com/a</style>'+'<img alt="c" src = "a.gif"><img src=F00!>')<line_sep># Test HTML yet more.
s=doc5<line_sep>L=urlextract(s)<line_sep>L2=[x.url<for>x L]<assert_stmt>L2<eq>f(['foo' 'a.gif' 'bar.css' 'b.html'])<assert_stmt>[s[x.start:x.end]<eq>x.url<for>x L].count(<false>)<eq>0<block_end># -------------------------------------------------------------------
# Unit Test Main Routine
# -------------------------------------------------------------------
<def_stmt>_test <block_start>"""
Unit test main routine.
"""<line_sep>print('Unit tests:')<line_sep>_test_remove_comments()<line_sep>print(' _remove_comments: OK')<line_sep>_test_shlex_split()<line_sep>print(' _shlex_split: OK')<line_sep>_test_tag_dict()<line_sep>print(' _tag_dict: OK')<line_sep>_test_tuple_replace()<line_sep>print(' _tuple_replace: OK')<line_sep>_test_tagextract()<line_sep>print(' tagextract*: OK')<line_sep>_test_tagextract(str)<line_sep>print(' tagextract (unicode)*: OK')<line_sep>_test_urlextract()<line_sep>print(' urlextract*: OK')<line_sep>_test_urlextract(str)<line_sep>print(' urlextract (unicode)*: OK')<line_sep>print()<line_sep>print('* The corresponding join method has been tested as well.')<block_end><if_stmt>__name__<eq>'__main__'<block_start>_test()<block_end> |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
<import_stmt>os<import_stmt>re<import_stmt>shlex<import_from_stmt>knack.util CLIError<import_stmt>azext_alias<import_from_stmt>azext_alias.argument get_placeholders<import_from_stmt>azext_alias.util get_config_parser is_url reduce_alias_table filter_alias_create_namespace retrieve_file_from_url <import_from_stmt>azext_alias._const COLLISION_CHECK_LEVEL_DEPTH INVALID_ALIAS_COMMAND_ERROR EMPTY_ALIAS_ERROR INVALID_STARTING_CHAR_ERROR INCONSISTENT_ARG_ERROR COMMAND_LVL_ERROR CONFIG_PARSING_ERROR ALIAS_FILE_NOT_FOUND_ERROR ALIAS_FILE_DIR_ERROR FILE_ALREADY_EXISTS_ERROR ALIAS_FILE_NAME <import_from_stmt>azext_alias.alias AliasManager<def_stmt>process_alias_create_namespace namespace<block_start>"""
Validate input arguments when the user invokes 'az alias create'.
Args:
namespace: argparse namespace object.
"""<line_sep>namespace=filter_alias_create_namespace(namespace)<line_sep>_validate_alias_name(namespace.alias_name)<line_sep>_validate_alias_command(namespace.alias_command)<line_sep>_validate_alias_command_level(namespace.alias_name namespace.alias_command)<line_sep>_validate_pos_args_syntax(namespace.alias_name namespace.alias_command)<block_end><def_stmt>process_alias_import_namespace namespace<block_start>"""
Validate input arguments when the user invokes 'az alias import'.
Args:
namespace: argparse namespace object.
"""<if_stmt>is_url(namespace.alias_source)<block_start>alias_source=retrieve_file_from_url(namespace.alias_source)<line_sep>_validate_alias_file_content(alias_source url=namespace.alias_source)<block_end><else_stmt><block_start>namespace.alias_source=os.path.abspath(namespace.alias_source)<line_sep>_validate_alias_file_path(namespace.alias_source)<line_sep>_validate_alias_file_content(namespace.alias_source)<block_end><block_end><def_stmt>process_alias_export_namespace namespace<block_start>"""
Validate input arguments when the user invokes 'az alias export'.
Args:
namespace: argparse namespace object.
"""<line_sep>namespace.export_path=os.path.abspath(namespace.export_path)<if_stmt>os.path.isfile(namespace.export_path)<block_start><raise>CLIError(FILE_ALREADY_EXISTS_ERROR.format(namespace.export_path))<block_end>export_path_dir=os.path.dirname(namespace.export_path)<if_stmt><not>os.path.isdir(export_path_dir)<block_start>os.makedirs(export_path_dir)<block_end><if_stmt>os.path.isdir(namespace.export_path)<block_start>namespace.export_path=os.path.join(namespace.export_path ALIAS_FILE_NAME)<block_end><block_end><def_stmt>_validate_alias_name alias_name<block_start>"""
Check if the alias name is valid.
Args:
alias_name: The name of the alias to validate.
"""<if_stmt><not>alias_name<block_start><raise>CLIError(EMPTY_ALIAS_ERROR)<block_end><if_stmt><not>re.match('^[a-zA-Z]' alias_name)<block_start><raise>CLIError(INVALID_STARTING_CHAR_ERROR.format(alias_name[0]))<block_end><block_end><def_stmt>_validate_alias_command alias_command<block_start>"""
Check if the alias command is valid.
Args:
alias_command: The command to validate.
"""<if_stmt><not>alias_command<block_start><raise>CLIError(EMPTY_ALIAS_ERROR)<block_end>split_command=shlex.split(alias_command)<line_sep>boundary_index=len(split_command)<for_stmt>i,subcommand enumerate(split_command)<block_start><if_stmt><not>re.match('^[a-z]' subcommand.lower())<or>i<g>COLLISION_CHECK_LEVEL_DEPTH<block_start>boundary_index=i<line_sep><break><block_end><block_end># Extract possible CLI commands and validate
command_to_validate=' '.join(split_command[:boundary_index]).lower()<for_stmt>command azext_alias.cached_reserved_commands<block_start><if_stmt>re.match(r'([a-z\-]*\s)*{}($|\s)'.format(command_to_validate) command)<block_start><return><block_end><block_end>_validate_positional_arguments(shlex.split(alias_command))<block_end><def_stmt>_validate_pos_args_syntax alias_name alias_command<block_start>"""
Check if the positional argument syntax is valid in alias name and alias command.
Args:
alias_name: The name of the alias to validate.
alias_command: The command to validate.
"""<line_sep>pos_args_from_alias=get_placeholders(alias_name)<line_sep># Split by '|' to extract positional argument name from Jinja filter (e.g. {{ arg_name | upper }})
# Split by '.' to extract positional argument name from function call (e.g. {{ arg_name.split()[0] }})
pos_args_from_command=[x.split('|')[0].split('.')[0].strip()<for>x get_placeholders(alias_command)]<if_stmt>set(pos_args_from_alias)<ne>set(pos_args_from_command)<block_start>arg_diff=set(pos_args_from_alias)^set(pos_args_from_command)<line_sep><raise>CLIError(INCONSISTENT_ARG_ERROR.format(''<if>len(arg_diff)<eq>1<else>'s' arg_diff 'is'<if>len(arg_diff)<eq>1<else>'are'))<block_end><block_end><def_stmt>_validate_alias_command_level alias command<block_start>"""
Make sure that if the alias is a reserved command, the command that the alias points to
in the command tree does not conflict in levels.
e.g. 'dns' -> 'network dns' is valid because dns is a level 2 command and network dns starts at level 1.
However, 'list' -> 'show' is not valid because list and show are both reserved commands at level 2.
Args:
alias: The name of the alias.
command: The command that the alias points to.
"""<line_sep>alias_collision_table=AliasManager.build_collision_table([alias])<line_sep># Alias is not a reserved command, so it can point to any command
<if_stmt><not>alias_collision_table<block_start><return><block_end>command_collision_table=AliasManager.build_collision_table([command])<line_sep>alias_collision_levels=alias_collision_table.get(alias.split()[0] [])<line_sep>command_collision_levels=command_collision_table.get(command.split()[0] [])<line_sep># Check if there is a command level conflict
<if_stmt>set(alias_collision_levels)&set(command_collision_levels)<block_start><raise>CLIError(COMMAND_LVL_ERROR.format(alias command))<block_end><block_end><def_stmt>_validate_alias_file_path alias_file_path<block_start>"""
Make sure the alias file path is neither non-existant nor a directory
Args:
The alias file path to import aliases from.
"""<if_stmt><not>os.path.exists(alias_file_path)<block_start><raise>CLIError(ALIAS_FILE_NOT_FOUND_ERROR)<block_end><if_stmt>os.path.isdir(alias_file_path)<block_start><raise>CLIError(ALIAS_FILE_DIR_ERROR.format(alias_file_path))<block_end><block_end><def_stmt>_validate_alias_file_content alias_file_path url=''<block_start>"""
Make sure the alias name and alias command in the alias file is in valid format.
Args:
The alias file path to import aliases from.
"""<line_sep>alias_table=get_config_parser()<try_stmt><block_start>alias_table.read(alias_file_path)<for_stmt>alias_name,alias_command reduce_alias_table(alias_table)<block_start>_validate_alias_name(alias_name)<line_sep>_validate_alias_command(alias_command)<line_sep>_validate_alias_command_level(alias_name alias_command)<line_sep>_validate_pos_args_syntax(alias_name alias_command)<block_end><block_end><except_stmt>Exception<as>exception# pylint: disable=broad-except
<block_start>error_msg=CONFIG_PARSING_ERROR%AliasManager.process_exception_message(exception)<line_sep>error_msg=error_msg.replace(alias_file_path url<or>alias_file_path)<line_sep><raise>CLIError(error_msg)<block_end><block_end><def_stmt>_validate_positional_arguments args<block_start>"""
To validate the positional argument feature - https://github.com/Azure/azure-cli/pull/6055.
Assuming that unknown commands are positional arguments immediately
led by words that only appear at the end of the commands
Slight modification of
https://github.com/Azure/azure-cli/blob/dev/src/azure-cli-core/azure/cli/core/commands/__init__.py#L356-L373
Args:
args: The arguments that the user inputs in the terminal.
Returns:
Rudimentary parsed arguments.
"""<line_sep>nouns=[]<for_stmt>arg args<block_start><if_stmt><not>arg.startswith('-')<or><not>arg.startswith('{{')<block_start>nouns.append(arg)<block_end><else_stmt><block_start><break><block_end><block_end><while_stmt>nouns<block_start>search=' '.join(nouns)<line_sep># Since the command name may be immediately followed by a positional arg, strip those off
<if_stmt><not>next((x<for>x azext_alias.cached_reserved_commands<if>x.endswith(search)) <false>)<block_start><del_stmt>nouns[-1]<block_end><else_stmt><block_start><return><block_end><block_end><raise>CLIError(INVALID_ALIAS_COMMAND_ERROR.format(' '.join(args)))<block_end> |
"""JSON encoding functions."""<import_stmt>datetime<import_stmt>decimal<import_stmt>types<import_from_stmt>json JSONEncoder<as>_JSONEncoder<import_from_stmt>tg.support.converters asbool<import_from_stmt>webob.multidict MultiDict<import_from_stmt>tg._compat string_type<import_from_stmt>tg.configuration.utils GlobalConfigurable<import_from_stmt>tg.util.sqlalchemy dictify<as>dictify_sqla is_saobject is_query_result is_query_row<import_from_stmt>tg.util.ming dictify<as>dictify_ming is_mingobject is_objectid<import_stmt>logging<line_sep>log=logging.getLogger(__name__)<class_stmt>JsonEncodeError(Exception)<block_start>"""JSON Encode error"""<block_end><class_stmt>JSONEncoder(_JSONEncoder GlobalConfigurable)<block_start>"""TurboGears custom JSONEncoder.
Provides support for encoding objects commonly used in TurboGears apps, like:
- SQLAlchemy queries
- Ming queries
- Dates
- Decimals
- Generators
Support for additional types is provided through the ``__json__`` method
that will be called on the object by the JSONEncoder when provided and through
the ability to register custom encoder for specific types using
:meth:`.JSONEncoder.register_custom_encoder`.
"""<line_sep>CONFIG_NAMESPACE='json.'<line_sep>CONFIG_OPTIONS={'isodates':asbool 'allow_lists':asbool}<def_stmt>__init__ self **kwargs<block_start>self._registered_types_map={}<line_sep>self._registered_types_list=tuple()<line_sep>kwargs=self.configure(**kwargs)<line_sep>super(JSONEncoder self).__init__(**kwargs)<block_end><def_stmt>configure self isodates=<false> custom_encoders=<none> allow_lists=<false> **kwargs<block_start>"""JSON encoder can be configured through :class:`.ApplicationConfigurator`
(``app_cfg.base_config``) using the following options:
- ``json.isodates`` -> encode dates using ISO8601 format
- ``json.custom_encoders`` -> List of tuples ``(type, encode_func)`` to register
custom encoders for specific types.
- ``json.allow_lists`` -> Allows lists to be encoded, this is usually disabled for
security reasons due to JSON hijacking. See http://stackoverflow.com/questions/16289894
for additional details.
"""<line_sep>self._isodates=isodates<line_sep>self._allow_lists=allow_lists<if_stmt>custom_encoders<is><not><none><block_start><for_stmt>type_,encoder custom_encoders.items()<block_start>self.register_custom_encoder(type_ encoder)<block_end><block_end><return>kwargs<block_end><def_stmt>register_custom_encoder self objtype encoder<block_start>"""Register a custom encoder for the given type.
Instead of using standard behavior for encoding the given type to JSON, the
``encoder`` will used instead. ``encoder`` must be a callable that takes
the object as argument and returns an object that can be encoded in JSON (usually a dict).
"""<if_stmt>objtype<in>self._registered_types_map<block_start>log.warning('%s type already registered for a custom encoder, replacing it' objtype)<block_end>self._registered_types_map[objtype]=encoder<line_sep># Append to head, so we find first the last registered types
self._registered_types_list=(objtype )+self._registered_types_list<block_end><def_stmt>default self obj<block_start><if_stmt>isinstance(obj self._registered_types_list)# Minor optimization, enter loop only when we are instance of a supported type.
<block_start><for_stmt>type_,encoder self._registered_types_map.items()<block_start><if_stmt>isinstance(obj type_)<block_start><return>encoder(obj)<block_end><block_end><block_end><elif_stmt>hasattr(obj '__json__')<and>callable(obj.__json__)<block_start><return>obj.__json__()<block_end><elif_stmt>isinstance(obj (datetime.date datetime.datetime datetime.time))<block_start><if_stmt>self._isodates<block_start><if_stmt>isinstance(obj (datetime.datetime datetime.time))<block_start>obj=obj.replace(microsecond=0)<block_end><return>obj.isoformat()<block_end><else_stmt><block_start><return>str(obj)<block_end><block_end><elif_stmt>isinstance(obj decimal.Decimal)<block_start><return>float(obj)<block_end><elif_stmt>is_saobject(obj)<block_start><return>dictify_sqla(obj)<block_end><elif_stmt>is_mingobject(obj)<block_start><return>dictify_ming(obj)<block_end><elif_stmt>is_query_result(obj)<block_start><return>dict(rows=list(obj) count=obj.rowcount)<block_end><elif_stmt>is_query_row(obj)<block_start><return>dict(rows=dict(obj) count=1)<block_end><elif_stmt>is_objectid(obj)<block_start><return>str(obj)<block_end><elif_stmt>isinstance(obj MultiDict)<block_start><return>obj.mixed()<block_end><elif_stmt>isinstance(obj types.GeneratorType)<block_start><return>list(obj)<block_end><else_stmt><block_start><return>_JSONEncoder.default(self obj)<block_end><block_end><block_end>_default_encoder=JSONEncoder.create_global()<def_stmt>encode obj encoder=<none> iterencode=<false><block_start>"""Return a JSON string representation of a Python object."""<if_stmt>encoder<is><none><block_start>encoder=_default_encoder<block_end>encode_func=encoder.encode<if_stmt>iterencode<block_start>encode_func=encoder.iterencode<block_end><if_stmt>isinstance(obj string_type)<block_start><return>encode_func(obj)<block_end><if_stmt>encoder._allow_lists<is><false><block_start><try_stmt><block_start>value=obj['test']<block_end><except_stmt>TypeError<block_start><if_stmt><not>hasattr(obj '__json__')<and><not>is_saobject(obj)<and><not>is_mingobject(obj)<block_start><raise>JsonEncodeError('Your Encoded object must be dict-like.')<block_end><block_end><except_stmt><block_start><pass><block_end><block_end><return>encode_func(obj)<block_end><def_stmt>encode_iter obj encoder=<none><block_start>"""Encode object, yielding each string representation as available."""<line_sep><return>encode(obj encoder=encoder iterencode=<true>)<block_end> |
<import_stmt>numpy<as>np<import_from_stmt>supervised.algorithms.knn KNeighborsAlgorithm KNeighborsRegressorAlgorithm<import_stmt>optuna<import_from_stmt>supervised.utils.metric Metric<import_from_stmt>supervised.algorithms.registry BINARY_CLASSIFICATION<import_from_stmt>supervised.algorithms.registry MULTICLASS_CLASSIFICATION<import_from_stmt>supervised.algorithms.registry REGRESSION<class_stmt>KNNObjective<block_start><def_stmt>__init__ self ml_task X_train y_train sample_weight X_validation y_validation sample_weight_validation eval_metric n_jobs random_state <block_start>self.ml_task=ml_task<line_sep>self.X_train=X_train<line_sep>self.y_train=y_train<line_sep>self.sample_weight=sample_weight<line_sep>self.X_validation=X_validation<line_sep>self.y_validation=y_validation<line_sep>self.eval_metric=eval_metric<line_sep>self.n_jobs=n_jobs<line_sep>self.seed=random_state<block_end><def_stmt>__call__ self trial<block_start><try_stmt><block_start>params={"n_neighbors":trial.suggest_int("n_neighbors" 1 128) "weights":trial.suggest_categorical("weights" ["uniform" "distance"]) "n_jobs":self.n_jobs "rows_limit":100000 "ml_task":self.ml_task }<line_sep>Algorithm=(KNeighborsRegressorAlgorithm<if>self.ml_task<eq>REGRESSION<else>KNeighborsAlgorithm)<line_sep>model=Algorithm(params)<line_sep>model.fit(self.X_train self.y_train sample_weight=self.sample_weight)<line_sep>preds=model.predict(self.X_validation)<line_sep>score=self.eval_metric(self.y_validation preds)<if_stmt>Metric.optimize_negative(self.eval_metric.name)<block_start>score<augmul>-1.0<block_end><block_end><except_stmt>optuna.exceptions.TrialPruned<as>e<block_start><raise>e<block_end><except_stmt>Exception<as>e<block_start>print("Exception in KNNObjective" str(e))<line_sep><return><none><block_end><return>score<block_end><block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>#---------------------------------------------------------------------------------------------------------
# This describes the full TMTT track reconstruction chain with 3 GeV threshold, where:
# the GP divides the tracker into 18 eta sectors (each sub-divided into 2 virtual eta subsectors);
# the HT uses a 32x18 array followed by 2x2 mini-HT array, with transverese HT readout & multiplexing,
# followed by the KF (or optionally SF+SLR) track fit; duplicate track removal (Algo50) is run.
#---------------------------------------------------------------------------------------------------------
TMTrackProducer_params=cms.PSet(# Tags for ES products
magneticFieldInputTag=cms.ESInputTag("VolumeBasedMagneticFieldESProducer" "") trackerGeometryInputTag=cms.ESInputTag("trackerGeometry" "") trackerTopologyInputTag=cms.ESInputTag("trackerTopology" "") ttStubAlgoInputTag=cms.ESInputTag("TTStubAlgorithm_official_Phase2TrackerDigi_" "") # Tags for ED products
tpInputTag=cms.InputTag("mix" "MergedTrackTruth") stubInputTag=cms.InputTag("TTStubsFromPhase2TrackerDigis" "StubAccepted") stubTruthInputTag=cms.InputTag("TTStubAssociatorFromPixelDigis" "StubAccepted") clusterTruthInputTag=cms.InputTag("TTClusterAssociatorFromPixelDigis" "ClusterAccepted") genJetInputTag=cms.InputTag("ak4GenJets" "") # Enable output of TTTracks from part-way through tracking chain (after HT & RZ).
EnableOutputIntermediateTTTracks=cms.bool(<false>) # Enable all use of MC truth info (disable to save CPU)
EnableMCtruth=cms.bool(<false>) # Enable output histograms & job tracking performance summary (disable to save CPU)
EnableHistos=cms.bool(<false>) #=== Cuts on MC truth particles (i.e., tracking particles) used for tracking efficiency measurements.
GenCuts=cms.PSet(GenMinPt=cms.double(3.0) GenMaxAbsEta=cms.double(2.4) GenMaxVertR=cms.double(1.0) # Max distance of particle production vertex from centre of CMS.
GenMaxVertZ=cms.double(30.0) GenMaxD0=cms.double(5.0) # Max transverse impact parameter.
GenMaxZ0=cms.double(999.0) # Max transverse impact parameter.
GenPdgIds=cms.vuint32() # Only particles with these PDG codes used for efficiency measurement.
# Cut on MC truth tracks used for algorithmic tracking efficiency measurements.
GenMinStubLayers=cms.uint32(4)) #=== Cuts applied to stubs before arriving in L1 track finding board.
StubCuts=cms.PSet(# Reduce number of bits used by front-end chips to store stub bend info?
# = 0 (no); = 1 (yes using official recipe); = 2 (yes using TMTT method)
DegradeBendRes=cms.uint32(2) # Don't use stubs with eta beyond this cut, since the tracker geometry makes it impossible to reconstruct tracks with them.
MaxStubEta=cms.double(2.4) # Don't use stubs whose measured Pt from bend info is significantly below HTArraySpec.HoughMinPt, where "significantly" means allowing for resolution in q/Pt derived from stub bend resolution specified below.
KillLowPtStubs=cms.bool(<true>) # Print FE stub window sizes recommended by this code (in python cfg format used by CMSSW).
PrintStubWindows=cms.bool(<false>) # Bend resolution assumed by bend filter in units of strip pitch. Also used when assigning stubs to sectors if EtaPhiSectors.CalcPhiTrkRes=True. And by the bend filter if HTFillingRphi.UseBendFilter=True.
# Suggested value: 1.19 if DegradeBendRes = 0, or 1.249 if it > 0.
# N.B. Avoid 1/4-integer values due to rounding error issues.
BendCut=cms.double(1.249) # Additional contribution to bend resolution from its encoding into a reduced number of bits.
# This number is the assumed resolution relative to the naive guess of its value.
# It is ignored in DegradeBendRes = 0.
BendCutExtra=cms.double(0.0) # Order stubs by bend in DTC, such that highest Pt stubs are transmitted first.
OrderStubsByBend=cms.bool(<true>)) #=== Optional Stub digitization.
StubDigitize=cms.PSet(EnableDigitize=cms.bool(<true>) # Digitize stub coords? If not, use floating point coords.
#
#--- Parameters available in MP board. (And in case of Hybrid used internally in KF)
#
PhiSectorBits=cms.uint32(6) # Bits used to store phi sector number -- NOT USED
PhiSBits=cms.uint32(14) # Bits used to store phiS coord. (13 enough?)
PhiSRange=cms.double(0.698131700) # Range phiS coord. covers in radians.
RtBits=cms.uint32(12) # Bits used to store Rt coord.
RtRange=cms.double(91.652837) # Range Rt coord. covers in units of cm.
ZBits=cms.uint32(14) # Bits used to store z coord.
ZRange=cms.double(733.2227) # Range z coord. covers in units of cm.
#
#--- Parameters available in GP board (excluding any in common with MP specified above).
#
PhiNBits=cms.uint32(15) # Bits used to store PhiO parameter.
PhiNRange=cms.double(1.3962634) # Range PhiO parameter covers.
BendBits=cms.uint32(6)# Bits used to store stub bend.
) #=== Configuration of tracker module type. Only provides test data for firmware.
TrackerModuleType=cms.PSet(# Modules matching these criteria are type 0, 1, 2, 3 ...
PitchVsType=cms.vdouble(0.0099 0.0099 0.0099 0.0099 0.0089 0.0099 0.0089 0.0089) SpaceVsType=cms.vdouble(0.26 0.26 0.16 0.4 0.18 0.4 0.18 0.4) # (Type vbool not implemented, so use vuint32 instead ...)
BarrelVsType=cms.vuint32(1 1 1 1 1 0 0 0) PSVsType=cms.vuint32(1 1 1 1 0 1 0 0) TiltedVsType=cms.vuint32(0 1 0 1 0 0 0 0)) #=== Configuration of Geometric Processor.
GeometricProc=cms.PSet(# Use an FPGA-friendly approximation to determine track angle dphi from bend in GP?
UseApproxB=cms.bool(<true>) # Use approximation for B
# Params of approximation if used.
BApprox_gradient=cms.double(0.886454) # Gradient term of linear equation for approximating B
BApprox_intercept=cms.double(0.504148)# Intercept term of linear equation for approximating B
) #=== Division of Tracker into phi sectors.
PhiSectors=cms.PSet(NumPhiNonants=cms.uint32(9) # Divisions of Tracker at DTC
NumPhiSectors=cms.uint32(18) # Divisions of Tracker at GP.
ChosenRofPhi=cms.double(67.240) # Use phi of track at this radius for assignment of stubs to phi sectors & also for one of the axes of the r-phi HT. If ChosenRofPhi=0, then use track phi0. - Should be an integer multiple of the stub r digitisation granularity.
#--- You can set one or both the following parameters to True.
UseStubPhi=cms.bool(<true>) # Require stub phi to be consistent with track of Pt > HTArraySpec.HoughMinPt that crosses HT phi axis?
UseStubPhiTrk=cms.bool(<true>) # Require stub phi0 (or phi65 etc.) as estimated from stub bend, to lie within HT phi axis, allowing tolerance(s) specified below?
AssumedPhiTrkRes=cms.double(0.5) # Tolerance in stub phi0 (or phi65) assumed to be this fraction of phi sector width. (N.B. If > 0.5, then stubs can be shared by more than 2 phi sectors).
CalcPhiTrkRes=cms.bool(<true>)# If true, tolerance in stub phi0 (or phi65 etc.) will be reduced below AssumedPhiTrkRes if stub bend resolution specified in StubCuts.BendCut suggests it is safe to do so.
) #=== Division of Tracker into eta sectors
EtaSectors=cms.PSet(# Eta boundaries for 18 eta regions
# EtaRegions = cms.vdouble(-2.4,-2.16,-1.95,-1.7,-1.43,-1.16,-0.89,-0.61,-0.31,0.0,0.31,0.61,0.89,1.16,1.43,1.7,1.95,2.16,2.4),
# Eta boundaries for 16 eta regions
EtaRegions=cms.vdouble(-2.4 -2.08 -1.68 -1.26 -0.90 -0.62 -0.41 -0.20 0.0 0.20 0.41 0.62 0.90 1.26 1.68 2.08 2.4) ChosenRofZ=cms.double(50.) # Use z of track at this radius for assignment of tracks to eta sectors & also for one of the axes of the r-z HT. Do not set to zero!
BeamWindowZ=cms.double(15) # Half-width of window assumed to contain beam-spot in z.
AllowOver2EtaSecs=cms.bool(<true>)# If True, the code will not throw an error if a stub is assigned to 3 or more eta sectors.
) #=== r-phi Hough transform array specifications.
HTArraySpecRphi=cms.PSet(HoughMinPt=cms.double(3.0) # Min track Pt that Hough Transform must find. Also used by StubCuts.KillLowPtStubs and by EtaPhiSectors.UseStubPhi.
# If MiniHTstage = True, these refers to mini cells in whole HT array.
HoughNbinsPt=cms.uint32(32) # HT array dimension in track q/Pt. (If MiniHTstage = True, this refers to mini cells in whole HT array).
HoughNbinsPhi=cms.uint32(64) # HT array dimension in track phi0 (or phi65 or any other track phi angle. (If MiniHTstage = True, this refers to mini cells in whole HT array).
EnableMerge2x2=cms.bool(<false>) # Groups of neighbouring 2x2 cells in HT will be treated as if they are a single large cell? N.B. You can only enable this option if your HT array has even numbers of bins in both dimensions. And this cfg param ignored if MiniHTstage = True. HISTORIC OPTION. SUGGEST NOT USING!
MaxPtToMerge2x2=cms.double(3.5) # but only cells with pt < MaxPtToMerge2x2 will be merged in this way (irrelevant if EnableMerge2x2 = false).
NumSubSecsEta=cms.uint32(2) # Subdivide each sector into this number of subsectors in eta within r-phi HT.
Shape=cms.uint32(0) # cell shape: 0 for square, 1 for diamond, 2 hexagon (with vertical sides), 3 square with alternate rows shifted by 0.5*cell_width.
MiniHTstage=cms.bool(<true>) # Run 2nd stage HT with mini cells inside each 1st stage normal HT cell..
MiniHoughNbinsPt=cms.uint32(2) # Number of mini cells along q/Pt axis inside each normal HT cell.
MiniHoughNbinsPhi=cms.uint32(2) # Number of mini cells along phi axis inside each normal HT cell.
MiniHoughMinPt=cms.double(3.0) # Below this Pt threshold, the mini HT will not be used, to reduce sensitivity to scattering, with instead tracks found by 1st stage coarse HT sent to output. (HT cell numbering remains as if mini HT were in use everywhere).
MiniHoughDontKill=cms.bool(<false>) # If true, allows tracks found by 1st stage coarse HT to be output if 2nd stage mini HT finds no tracks.
MiniHoughDontKillMinPt=cms.double(8.0) # If MiniHoughDontKill=True, this option restricts it to keep 1st stage HT tracks only if their Pt is exceeds this cut. (Used to improve electron tracking above this threshold).
MiniHoughLoadBalance=cms.uint32(2)# Load balancing disabled = 0; static load balancing of output links = 1; dynamic load balancing of output links = 2.
) #=== Rules governing how stubs are filled into the r-phi Hough Transform array.
HTFillingRphi=cms.PSet(# Take all cells in r-phi HT array crossed by line corresponding to each stub (= 0) or take only some to reduce rate at cost
# of efficiency ( > 0). If this option is > 0, it can be 1 or 2, corresponding to different algorithms for rejecting
# some of the cells. "1" is an algorithm invented by Ian, whereas "2" corresponds to Thomas' 1st firmware implementation which only handled 1 cell per HT column.
# Suggest setting KillSomeHTCellsRphi=1 (=0) if HTArraySpec.ChosenRofPhi=0 (>0)
KillSomeHTCellsRphi=cms.uint32(0) # Use filter in each r-phi HT cell, filling it only with stubs that have consistent bend information?
# The assumed bend resolution is specified in StubCuts.BendCut.
UseBendFilter=cms.bool(<true>) # Use filter in each HT cell, preventing more than the specified number of stubs being stored in the cell. (Reflecting memory limit of hardware). N.B. Results depend on assumed order of stubs.
# N.B. If mini-HT is in use, then this cut applies to coarse-HT.
#MaxStubsInCell = cms.uint32(99999), # Setting this to anything more than 999 disables this option
MaxStubsInCell=cms.uint32(32) # set it equal to value used in hardware.
MaxStubsInCellMiniHough=cms.uint32(16) # Same type of cut for mini-HT (if in use)
# If BusySectorKill = True, and more than BusySectorNumStubs stubs are assigned to tracks by an r-phi HT array, then the excess tracks are killed, with lowest Pt ones killed first. This is because HT hardware has finite readout time.
BusySectorKill=cms.bool(<true>) BusySectorNumStubs=cms.uint32(162) # Or 144 if only 320 MHz FW.
# If BusySectorMbinRanges is not empty, then the BusySectorNumStubs cut is instead applied to the subset of tracks appearing in the following m bin (q/Pt) ranges of the HT array. The sum of the entries in the vector should equal the number of m bins in the HT. (N.B. If EnableMerge2x2 or MiniHTstage = True, then the m bin ranges here correspond to the bins before merging. Also in these cases, the odd m-bin numbers don't correspond to HT outputs, so should be all grouped together on a single imaginary link).
# If BusySectorMbinOrder is not empty, then the m-bins are grouped in the specified order, instead of sequentially.
# (Histos NumStubsPerLink, NumStubsVsLink & MeanStubsPerLink useful for optimising this option).
#
# Choice for 16x32 coarse HT array followed by 2x2 mini-HT array with 3 GeV Pt threshold.
BusySectorMbinRanges=cms.vuint32(1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 16) BusySectorMbinOrder=cms.vuint32(0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31) # Choice for 24x32 coarse HT array followed by 2x2 mini-HT array with 2 GeV Pt threshold.
#BusySectorMbinRanges = cms.vuint32(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 24),
#BusySectorMbinOrder = cms.vuint32(0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46, 1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39,41,43,45,47),
#
# If BusyInputSectorKill = True, and more than BusyInputSectorNumStubs are input to the HT array from the GP, then
# the excess stubs are killed. This is because HT hardware has finite readin time.
# Results unreliable as depend on assumed order of stubs.
BusyInputSectorKill=cms.bool(<true>) BusyInputSectorNumStubs=cms.uint32(162) # Or 144 if only 320 MHz FW
# Multiplex the outputs from several HTs onto a single pair of output optical links?
# Options: 0 = disable Mux; 1 = Sept 2019 Mux (transerse HT readout by m-bin), with single m bin in entire nonant going to each link.
MuxOutputsHT=cms.uint32(1) # If this is non-empty, then only the specified eta sectors are enabled, to study them individually.
EtaRegWhitelist=cms.vuint32()) #=== Options controlling r-z track filters (or any other track filters run after the Hough transform, as opposed to inside it).
#=== (Irrelevant for track fitters that don't require any r-z filter run before them).
RZfilterOpts=cms.PSet(# Specify preferred r-z filter (from those available inside TrkRZfilter.cc) - currently only "SeedFilter".
RZFilterName=cms.string("SeedFilter") #--- Options relevant for Seed filter, (so only relevant if rzFilterName="SeedFilter").
# Cut at this many standard deviations on seed resolution.
SeedResCut=cms.double(1.732) # Store stubs compatible with all possible good seed.
KeepAllSeed=cms.bool(<false>) # Maximum number of seed combinations to bother checking per track candidate.
#MaxSeedCombinations = cms.uint32(999),
MaxSeedCombinations=cms.uint32(15) # Maximum number of seed combinations consistent with (z0,eta) sector constraints to bother checking per track candidate.
#MaxGoodSeedCombinations = cms.uint32(13),
MaxGoodSeedCombinations=cms.uint32(10) # Maximum number of seeds that a single stub can be included in.
MaxSeedsPerStub=cms.uint32(4) # Reject tracks whose estimated rapidity from seed filter is inconsistent range of with eta sector. (Kills some duplicate tracks).
zTrkSectorCheck=cms.bool(<true>) # Min. number of layers in rz track that must have stubs for track to be declared found by seed filter.
MinFilterLayers=cms.uint32(4)) #=== Rules for deciding when the (HT) track finding has found an L1 track candidate
L1TrackDef=cms.PSet(# Min. number of layers the track must have stubs in.
MinStubLayers=cms.uint32(5) # Change min. number of layers cut to (MinStubLayers - 1) for tracks with Pt exceeding this cut.
# If this is set to a -ve number, this option is disabled.
MinPtToReduceLayers=cms.double(-99999.) # Change min. number of layers cut to (MinStubLayers - 1) for tracks in these rapidity sectors.
# (Histogram "AlgEffVsEtaSec" will help you identify which sectors to declare).
#EtaSecsReduceLayers = cms.vuint32(),
EtaSecsReduceLayers=cms.vuint32(5 12) # Reduce this layer ID, so that it takes no more than 8 different values in any eta region (simplifies firmware).
ReducedLayerID=cms.bool(<true>)) #=== Specification of algorithm to eliminate duplicate tracks.
DupTrkRemoval=cms.PSet(# Algorithm run on tracks after the track helix fit has been done.
# (Disable dup removal = 0; two alternative algos = 1 or 2).
DupTrkAlgFit=cms.uint32(1)) #=== Rules for deciding when a reconstructed L1 track matches a MC truth particle (i.e. tracking particle).
TrackMatchDef=cms.PSet(#--- Three different ways to define if a tracking particle matches a reco track candidate. (Usually, set two of them to ultra loose).
# Min. fraction of matched stubs relative to number of stubs on reco track.
MinFracMatchStubsOnReco=cms.double(-99.) # Min. fraction of matched stubs relative to number of stubs on tracking particle.
MinFracMatchStubsOnTP=cms.double(-99.) # Min. number of matched layers.
MinNumMatchLayers=cms.uint32(4) # Min. number of matched PS layers.
MinNumMatchPSLayers=cms.uint32(0) # Associate stub to TP only if the TP contributed to both its clusters? (If False, then associate even if only one cluster was made by TP).
StubMatchStrict=cms.bool(<false>)) #=== Track Fitting Algorithm Settings.
TrackFitSettings=cms.PSet(#
#--- Options applicable to all track fitters ---
#
# Track Fitting algortihms to use. You can run several in parallel.
# TrackFitLinearAlgo & ChiSquared* are chi2 fits, KF* is a Kalman filter fit,
# & SimpleLR4 is a linear regression fit that neglects the hit uncertainties.
# The number 4 or 5 in the name indicates if 4 or 5 helix parameters are fitted.
# Options KF4ParamsComb, KF5ParamsComb or SimpleLR4 are the best ones.
# KF*ParamsCombHLS is the HLS version of the code, which only works if linked with Vivado libraries.
TrackFitters=cms.vstring(# "ChiSquaredFit4",
# "SimpleLR4",
# "KF4ParamsCombHLS",
# "KF5ParamsCombHLS",
"KF5ParamsComb" "KF4ParamsComb") # Indicate subset of fitters wanting r-z track filter to be run before them. (Irrelevant for those not specified in "TrackFitters").
# Typically, Chi2 & LR fits work best with r-z filter & KF works best without it.
UseRZfilter=cms.vstring("ChiSquaredFit4" "SimpleLR4") # Print detailed summary of track fit performance at end of job (as opposed to a brief one).
DetailedFitOutput=cms.bool(<false>) #
# Use MC truth to eliminate all fake tracks & all incorrect stubs assigned to tracks before doing fit.
TrackFitCheat=cms.bool(<false>) #
#--- Options for chi2 track fitter ---
#
# Number of fitting iterations to undertake. (15 is not realistic in hardware, but is necessary to kill bad hits)
NumTrackFitIterations=cms.uint32(15) # Optionally kill hit with biggest residuals in track fit (occurs after the first fit, so three iterations would have two killings).
KillTrackFitWorstHit=cms.bool(<true>) # Cuts in standard deviations used to kill hits with big residuals during fit. If the residual exceeds the "General" cut, the hit is killed providing it leaves the track with enough hits to survive. If the residual exceeds the "Killing" cut, the hit is killed even if that kills the track.
GeneralResidualCut=cms.double(3.0) KillingResidualCut=cms.double(20.0) #
#--- Additional options for <NAME>'s Linear Regression track fitter ---
#
# Maximum allowed number of iterations of LR fitter.
MaxIterationsLR=cms.uint32(8) # If False: residual of a stub is the max of its r-phi & r-z residuals.
# If True: the residual is the mean of these residuals.
CombineResiduals=cms.bool(<true>) # Correct stub phi coordinate for higher orders in circle expansion, so that a trajectory is straight in r-phi.
LineariseStubPosition=cms.bool(<true>) # Checks if the fitted track is consistent with the sector, if not it will be not accepted.
CheckSectorConsistency=cms.bool(<false>) # Checks if the fitted track r phi parameter are consistent with the HT candidate parameter within in range of +- 2 cells.
CheckHTCellConsistency=cms.bool(<false>) # Tracks must have stubs in at least this number of PS layers.
MinPSLayers=cms.uint32(2) # Digitization
DigitizeLR=cms.bool(<false>) PhiPrecision=cms.double(0.009/108.) RPrecision=cms.double(0.14) ZPrecision=cms.double(0.28) ZSlopeWidth=cms.uint32(11) ZInterceptWidth=cms.uint32(11) #
#--- Additional options for <NAME>'s Simple Linear Regression track fitter ---
#
# Digitize Simple Linear Regression variables and calculation. (Disabled if EnableDigitize=False).
DigitizeSLR=cms.bool(<false>) # Disable, as was never retuned for nonants
# Number of bits to be used in hardware to compute the division needed to calculate the helix params
DividerBitsHelix=cms.uint32(23) DividerBitsHelixZ=cms.uint32(23) # Number of bits to reduce the rphi helix parameter calculation weight
ShiftingBitsDenRPhi=cms.uint32(14) # Number of bits to reduce the rphi helix parameter calculation weight
ShiftingBitsDenRZ=cms.uint32(14) # Number of bits to reduce the phi0 parameter calculation weight
ShiftingBitsPhi=cms.uint32(10) # Number of bits to reduce the qOverPt parameter calculation weight
ShiftingBitsPt=cms.uint32(3) # Number of bits to reduce the tanLambda parameter calculation weight
ShiftingBitsLambda=cms.uint32(1) # Number of bits to reduce the z0 parameter calculation weight
ShiftingBitsZ0=cms.uint32(16) # Fit ChiSquare Cut (tightening reduces fake track rate at cost of efficiency)
SLR_chi2cut=cms.double(300.) # Cut on Rphi Residuals (radians) - stubs killed until only 4 left or all have residuals below this cut.
ResidualCut=cms.double(0.0) #ResidualCut = cms.double(0.0005), # This allows more than 4 stubs per track.
#
#--- Options for Kalman filter track fitters ---
#
# Larger number has more debug printout. "1" is useful for understanding why tracks are lost, best combined with TrackFitCheat=True.
KalmanDebugLevel=cms.uint32(0) # Fit will reject fitted tracks unless it can assign at least this number of stubs to them.
KalmanMinNumStubs=cms.uint32(4) # Fit will attempt to add up to this nummber of stubs to each fitted tracks, but won't bother adding more.
KalmanMaxNumStubs=cms.uint32(4) # For 5-param helix fits, calculate also beam-constrained helix params after fit is complete, & use them for duplicate removal if DupTrkAlgFit=1.
KalmanAddBeamConstr=cms.bool(<true>) # Remove requirement of at least 2 PS layers per track.
KalmanRemove2PScut=cms.bool(<false>) # Allow the KF to skip this many layers in total per track.
KalmanMaxSkipLayersHard=cms.uint32(1) # For HT tracks with many stubs
KalmanMaxSkipLayersEasy=cms.uint32(2) # For HT tracks with few stubs
KalmanMaxStubsEasy=cms.uint32(10) # Max stubs an HT track can have to be "easy".
KFUseMaybeLayers=cms.bool(<false>) # Disable "maybe layer" to match with firmware
#--- Cuts applied to KF states as a function of the last KF tracker layer they had a stub in.
# (If "4" or "5" in name, cut only applies to 4 or 5 param helix fit).
KFLayerVsPtToler=cms.vdouble(999. 999. 0.1 0.1 0.05 0.05 0.05) # d0 cut only applied to 5 param helix fit.
KFLayerVsD0Cut5=cms.vdouble(999. 999. 999. 10. 10. 10. 10.) KFLayerVsZ0Cut5=cms.vdouble(999. 999. 25.5 25.5 25.5 25.5 25.5) KFLayerVsZ0Cut4=cms.vdouble(999. 999. 15. 15. 15. 15. 15.) # Chi2 cuts should be retuned if KalmanMultiScattTerm value changed.
KFLayerVsChiSq5=cms.vdouble(999. 999. 10. 30. 80. 120. 160.) KFLayerVsChiSq4=cms.vdouble(999. 999. 10. 30. 80. 120. 160.) # KF will consider at most this #stubs per layer to save time.
KalmanMaxStubsPerLayer=cms.uint32(4) # Multiple scattering term - inflate hit phi errors by this divided by Pt
# (0.00075 gives best helix resolution & 0.00450 gives best chi2 distribution).
KalmanMultiScattTerm=cms.double(0.00075) # Scale down chi2 in r-phi plane by this factor to improve electron performance (should be power of 2)
KalmanChi2RphiScale=cms.uint32(8) # N.B. KF track fit chi2 cut is not cfg param, but instead is hard-wired in KF4ParamsComb::isGoodState(...).
#--- Enable Higher order corrections
# Treat z uncertainty in tilted barrel modules correctly.
KalmanHOtilted=cms.bool(<false>) # Higher order circle explansion terms for low Pt.
KalmanHOhelixExp=cms.bool(<false>) # Alpha correction for non-radial 2S endcap strips. (0=disable correction, 1=correct with offset, 2=correct with non-diagonal stub covariance matrix). -- Option 1 is easier in FPGA, but only works if fit adds PS stubs before 2S ones.
KalmanHOalpha=cms.uint32(0) # Projection from (r,phi) to (z,phi) for endcap 2S modules. (0=disable correction, 1=correct with offset, 2=correct with non-diagonal stub covariance matrix). -- Option 1 is easier in FPGA, but only works if fit adds PS stubs before 2S ones.
KalmanHOprojZcorr=cms.uint32(0) # Use approx calc to account for non-radial endcap 2S modules corresponding to current FW, with no special treatment for tilted modules.
KalmanHOfw=cms.bool(<true>)) #=== Treatment of dead modules.
DeadModuleOpts=cms.PSet(# Emulate dead/inefficient modules using the StubKiller code, with stubs killed according to the scenarios of the Stress Test group.
# (0=Don't kill any stubs; 1-5 = Scenarios described in StubKiller.cc)
KillScenario=cms.uint32(0) # Modify TMTT tracking to try to recover tracking efficiency in presence of dead modules. (Does nothing if KillScenario = 0).
KillRecover=cms.bool(<true>)) #=== Fitted track digitisation.
TrackDigi=cms.PSet(# For firmware reasons, can't use common digitisation cfg for all fitters.
#======= SimpleLR4 digi parameters ========
SLR_skipTrackDigi=cms.bool(<false>) # Optionally skip track digitisation if done internally inside fitting code.
SLR_oneOver2rBits=cms.uint32(13) SLR_oneOver2rRange=cms.double(0.01354135) SLR_d0Bits=cms.uint32(12) # Made up by Ian as never yet discussed.
SLR_d0Range=cms.double(10.) SLR_phi0Bits=cms.uint32(18) SLR_phi0Range=cms.double(1.3962636) # phi0 is actually only digitised relative to centre of sector.
SLR_z0Bits=cms.uint32(12) SLR_z0Range=cms.double(51.555509) SLR_tanlambdaBits=cms.uint32(15) SLR_tanlambdaRange=cms.double(32.0) SLR_chisquaredBits=cms.uint32(10) SLR_chisquaredRange=cms.double(512.) #====== Kalman Filter digi parameters ========
KF_skipTrackDigi=cms.bool(<false>) # Optionally skip track digitisation if done internally inside fitting code.
KF_oneOver2rBits=cms.uint32(15) KF_oneOver2rRange=cms.double(0.0076171313) # pT > 1.5 GeV
KF_d0Bits=cms.uint32(12) KF_d0Range=cms.double(31.992876) KF_phi0Bits=cms.uint32(12) KF_phi0Range=cms.double(0.6981317) # phi0 digitised relative to centre of sector. (Required range 2pi/18 + 2*overlap; overlap = 0.19206rads*(2GeV/ptCut)*(chosenR/67.24). MUST DOUBLE TO GO TO 2 GEV.
KF_z0Bits=cms.uint32(12) KF_z0Range=cms.double(45.826419) KF_tanlambdaBits=cms.uint32(16) KF_tanlambdaRange=cms.double(16.) KF_chisquaredBits=cms.uint32(15) # N.B. 17 bits are used internally inside KF.
KF_chisquaredRange=cms.double(1024.) KF_chisquaredBinEdges=cms.vdouble(0 0.5 1 2 3 5 7 10 20 40 100 200 500 1000 3000) # Additional bin for >3000
KF_bendchisquaredBinEdges=cms.vdouble(0 0.5 1 2 3 5 10 50) # Additional bin for >50
#====== Other track fitter Digi params.
# Currently equal to those for KF, although you can skip track digitisation for them with following.
Other_skipTrackDigi=cms.bool(<true>)) #===== Use HYBRID TRACKING (Tracklet pattern reco + TMTT KF -- requires tracklet C++ too) =====
Hybrid=cms.bool(<false>) #===== Debug plot options
# When making helix parameter resolution plots, only use particles from the physics event (True)
# or also use particles from pileup (False) ?
ResPlotOpt=cms.bool(<true>))<line_sep> |
"""A collection of modules containing dialog-style widgets and popups.
"""<import_stmt>py_cui.dialogs.form<import_stmt>py_cui.dialogs.filedialog<line_sep> |
<import_from_future_stmt> annotations<import_stmt>pytest<import_from_stmt>testing.runner and_exit<line_sep>@pytest.mark.parametrize('key' ('^C' 'Enter'))<def_stmt>test_replace_cancel run key<block_start><with_stmt>run()<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press(key)<line_sep>h.await_text('cancelled')<block_end><block_end><def_stmt>test_replace_invalid_regex run<block_start><with_stmt>run()<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('(')<line_sep>h.await_text("invalid regex: '('")<block_end><block_end><def_stmt>test_replace_invalid_replacement run ten_lines<block_start><with_stmt>run(str(ten_lines))<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('line_0')<line_sep>h.await_text('replace with:')<line_sep>h.press_and_enter('\\')<line_sep>h.await_text('invalid replacement string')<block_end><block_end><def_stmt>test_replace_cancel_at_replace_string run<block_start><with_stmt>run()<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('hello')<line_sep>h.await_text('replace with:')<line_sep>h.press('^C')<line_sep>h.await_text('cancelled')<block_end><block_end>@pytest.mark.parametrize('key' ('y' 'Y'))<def_stmt>test_replace_actual_contents run ten_lines key<block_start><with_stmt>run(str(ten_lines))<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('line_0')<line_sep>h.await_text('replace with:')<line_sep>h.press_and_enter('ohai')<line_sep>h.await_text('replace [yes, no, all]?')<line_sep>h.press(key)<line_sep>h.await_text_missing('line_0')<line_sep>h.await_text('ohai')<line_sep>h.await_text(' *')<line_sep>h.await_text('replaced 1 occurrence')<block_end><block_end><def_stmt>test_replace_sets_x_hint_properly run tmpdir<block_start>f=tmpdir.join('f')<line_sep>contents='''\
beginning_line
match me!
'''<line_sep>f.write(contents)<with_stmt>run(str(f))<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('me!')<line_sep>h.await_text('replace with:')<line_sep>h.press_and_enter('youuuu')<line_sep>h.await_text('replace [yes, no, all]?')<line_sep>h.press('y')<line_sep>h.await_cursor_position(x=6 y=3)<line_sep>h.press('Up')<line_sep>h.press('Up')<line_sep>h.await_cursor_position(x=6 y=1)<block_end><block_end><def_stmt>test_replace_cancel_at_individual_replace run ten_lines<block_start><with_stmt>run(str(ten_lines))<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter(r'line_\d')<line_sep>h.await_text('replace with:')<line_sep>h.press_and_enter('ohai')<line_sep>h.await_text('replace [yes, no, all]?')<line_sep>h.press('^C')<line_sep>h.await_text('cancelled')<block_end><block_end><def_stmt>test_replace_unknown_characters_at_individual_replace run ten_lines<block_start><with_stmt>run(str(ten_lines))<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter(r'line_\d')<line_sep>h.await_text('replace with:')<line_sep>h.press_and_enter('ohai')<line_sep>h.await_text('replace [yes, no, all]?')<line_sep>h.press('?')<line_sep>h.press('^C')<line_sep>h.await_text('cancelled')<block_end><block_end><def_stmt>test_replace_say_no_to_individual_replace run ten_lines<block_start><with_stmt>run(str(ten_lines))<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('line_[135]')<line_sep>h.await_text('replace with:')<line_sep>h.press_and_enter('ohai')<line_sep>h.await_text('replace [yes, no, all]?')<line_sep>h.press('y')<line_sep>h.await_text_missing('line_1')<line_sep>h.press('n')<line_sep>h.await_text('line_3')<line_sep>h.press('y')<line_sep>h.await_text_missing('line_5')<line_sep>h.await_text('replaced 2 occurrences')<block_end><block_end><def_stmt>test_replace_all run ten_lines<block_start><with_stmt>run(str(ten_lines))<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter(r'line_(\d)')<line_sep>h.await_text('replace with:')<line_sep>h.press_and_enter(r'ohai+\1')<line_sep>h.await_text('replace [yes, no, all]?')<line_sep>h.press('a')<line_sep>h.await_text_missing('line')<line_sep>h.await_text('ohai+1')<line_sep>h.await_text('replaced 10 occurrences')<block_end><block_end><def_stmt>test_replace_with_empty_string run ten_lines<block_start><with_stmt>run(str(ten_lines))<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('line_1')<line_sep>h.await_text('replace with:')<line_sep>h.press('Enter')<line_sep>h.await_text('replace [yes, no, all]?')<line_sep>h.press('y')<line_sep>h.await_text_missing('line_1')<block_end><block_end><def_stmt>test_replace_search_not_found run ten_lines<block_start><with_stmt>run(str(ten_lines))<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('wat')<line_sep># TODO: would be nice to not prompt for a replace string in this case
h.await_text('replace with:')<line_sep>h.press('Enter')<line_sep>h.await_text('no matches')<block_end><block_end><def_stmt>test_replace_small_window_size run ten_lines<block_start><with_stmt>run(str(ten_lines))<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('line')<line_sep>h.await_text('replace with:')<line_sep>h.press_and_enter('wat')<line_sep>h.await_text('replace [yes, no, all]?')<with_stmt>h.resize(width=8 height=24)<block_start>h.await_text('replace…')<block_end>h.press('^C')<block_end><block_end><def_stmt>test_replace_height_1_highlight run tmpdir<block_start>f=tmpdir.join('f')<line_sep>f.write('x'<times>90)<with_stmt>run(str(f))<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('^x+$')<line_sep>h.await_text('replace with:')<line_sep>h.press('Enter')<line_sep>h.await_text('replace [yes, no, all]?')<with_stmt>h.resize(width=80 height=1)<block_start>h.await_text_missing('xxxxx')<block_end>h.await_text('xxxxx')<line_sep>h.press('^C')<block_end><block_end><def_stmt>test_replace_line_goes_off_screen run<block_start><with_stmt>run()<as>h and_exit(h)<block_start>h.press(f'{"a"<times>20}{"b"<times>90}')<line_sep>h.press('^A')<line_sep>h.await_text(f'{"a"<times>20}{"b"<times>59}»')<line_sep>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('b+')<line_sep>h.await_text('replace with:')<line_sep>h.press_and_enter('wat')<line_sep>h.await_text('replace [yes, no, all]?')<line_sep>h.await_text(f'{"a"<times>20}{"b"<times>59}»')<line_sep>h.press('y')<line_sep>h.await_text(f'{"a"<times>20}wat')<line_sep>h.await_text('replaced 1 occurrence')<block_end><block_end><def_stmt>test_replace_undo_undoes_only_one run ten_lines<block_start><with_stmt>run(str(ten_lines))<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('line')<line_sep>h.await_text('replace with:')<line_sep>h.press_and_enter('wat')<line_sep>h.press('y')<line_sep>h.await_text_missing('line_0')<line_sep>h.press('y')<line_sep>h.await_text_missing('line_1')<line_sep>h.press('^C')<line_sep>h.press('M-u')<line_sep>h.await_text('line_1')<line_sep>h.await_text_missing('line_0')<block_end><block_end><def_stmt>test_replace_multiple_occurrences_in_line run<block_start><with_stmt>run()<as>h and_exit(h)<block_start>h.press('baaaaabaaaaa')<line_sep>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('a+')<line_sep>h.await_text('replace with:')<line_sep>h.press_and_enter('q')<line_sep>h.await_text('replace [yes, no, all]?')<line_sep>h.press('a')<line_sep>h.await_text('bqbq')<block_end><block_end><def_stmt>test_replace_after_wrapping run ten_lines<block_start><with_stmt>run(str(ten_lines))<as>h and_exit(h)<block_start>h.press('Down')<line_sep>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('line_[02]')<line_sep>h.await_text('replace with:')<line_sep>h.press_and_enter('ohai')<line_sep>h.await_text('replace [yes, no, all]?')<line_sep>h.press('y')<line_sep>h.await_text_missing('line_2')<line_sep>h.press('y')<line_sep>h.await_text_missing('line_0')<line_sep>h.await_text('replaced 2 occurrences')<block_end><block_end><def_stmt>test_replace_after_cursor_after_wrapping run<block_start><with_stmt>run()<as>h and_exit(h)<block_start>h.press('baaab')<line_sep>h.press('Left')<line_sep>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('b')<line_sep>h.await_text('replace with:')<line_sep>h.press_and_enter('q')<line_sep>h.await_text('replace [yes, no, all]?')<line_sep>h.press('n')<line_sep>h.press('y')<line_sep>h.await_text('replaced 1 occurrence')<line_sep>h.await_text('qaaab')<block_end><block_end><def_stmt>test_replace_separate_line_after_wrapping run ten_lines<block_start><with_stmt>run(str(ten_lines))<as>h and_exit(h)<block_start>h.press('Down')<line_sep>h.press('Down')<line_sep>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('line_[01]')<line_sep>h.await_text('replace with:')<line_sep>h.press_and_enter('_')<line_sep>h.await_text('replace [yes, no, all]?')<line_sep>h.press('y')<line_sep>h.await_text_missing('line_0')<line_sep>h.press('y')<line_sep>h.await_text_missing('line_1')<block_end><block_end><def_stmt>test_replace_with_newline_characters run ten_lines<block_start><with_stmt>run(str(ten_lines))<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('(line)_([01])')<line_sep>h.await_text('replace with:')<line_sep>h.press_and_enter(r'\1\n\2')<line_sep>h.await_text('replace [yes, no, all]?')<line_sep>h.press('a')<line_sep>h.await_text_missing('line_0')<line_sep>h.await_text_missing('line_1')<line_sep>h.await_text('line\n0\nline\n1\n')<block_end><block_end><def_stmt>test_replace_with_multiple_newline_characters run ten_lines<block_start><with_stmt>run(str(ten_lines))<as>h and_exit(h)<block_start>h.press('^\\')<line_sep>h.await_text('search (to replace):')<line_sep>h.press_and_enter('(li)(ne)_(1)')<line_sep>h.await_text('replace with:')<line_sep>h.press_and_enter(r'\1\n\2\n\3\n')<line_sep>h.await_text('replace [yes, no, all]?')<line_sep>h.press('a')<line_sep>h.await_text_missing('line_1')<line_sep>h.await_text('li\nne\n1\n\nline_2')<block_end><block_end> |
<import_stmt>csv<import_stmt>os.path<import_stmt>matplotlib<line_sep>matplotlib.use('Agg')<import_from_stmt>matplotlib pyplot<as>plt<import_stmt>numpy<as>np<line_sep>plt.switch_backend('agg')<class_stmt>CsvLogger<block_start><def_stmt>__init__ self filepath='./' filename='results.csv' data=<none><block_start>self.log_path=filepath<line_sep>self.log_name=filename<line_sep>self.csv_path=os.path.join(self.log_path self.log_name)<line_sep>self.fieldsnames=['epoch' 'val_error1' 'val_error5' 'val_loss' 'train_error1' 'train_error5' 'train_loss']<with_stmt>open(self.csv_path 'w')<as>f<block_start>writer=csv.DictWriter(f fieldnames=self.fieldsnames)<line_sep>writer.writeheader()<block_end>self.data={}<for_stmt>field self.fieldsnames<block_start>self.data[field]=[]<block_end><if_stmt>data<is><not><none><block_start><for_stmt>d data<block_start>d_num={}<for_stmt>key d<block_start>d_num[key]=float(d[key])<if>key<ne>'epoch'<else>int(d[key])<block_end>self.write(d_num)<block_end><block_end><block_end><def_stmt>write self data<block_start><for_stmt>k self.data<block_start>self.data[k].append(data[k])<block_end><with_stmt>open(self.csv_path 'a')<as>f<block_start>writer=csv.DictWriter(f fieldnames=self.fieldsnames)<line_sep>writer.writerow(data)<block_end><block_end><def_stmt>save_params self args params<block_start><with_stmt>open(os.path.join(self.log_path 'params.txt') 'w')<as>f<block_start>f.write('{}\n'.format(' '.join(args)))<line_sep>f.write('{}\n'.format(params))<block_end><block_end><def_stmt>write_text self text print_t=<true><block_start><with_stmt>open(os.path.join(self.log_path 'params.txt') 'a')<as>f<block_start>f.write('{}\n'.format(text))<block_end><if_stmt>print_t<block_start>print(text)<block_end><block_end><def_stmt>plot_progress_errk self claimed_acc=<none> title='ShuffleNetv2' k=1<block_start>tr_str='train_error{}'.format(k)<line_sep>val_str='val_error{}'.format(k)<line_sep>plt.figure(figsize=(9 8) dpi=300)<line_sep>plt.plot(self.data[tr_str] label='Training error')<line_sep>plt.plot(self.data[val_str] label='Validation error')<if_stmt>claimed_acc<is><not><none><block_start>plt.plot((0 len(self.data[tr_str])) (1-claimed_acc 1-claimed_acc) 'k--' label='Claimed validation error ({:.2f}%)'.format(100.<times>(1-claimed_acc)))<block_end>plt.plot((0 len(self.data[tr_str])) (np.min(self.data[val_str]) np.min(self.data[val_str])) 'r--' label='Best validation error ({:.2f}%)'.format(100.<times>np.min(self.data[val_str])))<line_sep>plt.title('Top-{} error for {}'.format(k title))<line_sep>plt.xlabel('Epoch')<line_sep>plt.ylabel('Error')<line_sep>plt.legend()<line_sep>plt.xlim(0 len(self.data[tr_str])+1)<line_sep>plt.savefig(os.path.join(self.log_path 'top{}.png'.format(k)))<block_end><def_stmt>plot_progress_loss self title='ShuffleNetv2'<block_start>plt.figure(figsize=(9 8) dpi=300)<line_sep>plt.plot(self.data['train_loss'] label='Training')<line_sep>plt.plot(self.data['val_loss'] label='Validation')<line_sep>plt.title(title)<line_sep>plt.xlabel('Epoch')<line_sep>plt.ylabel('Loss')<line_sep>plt.legend()<line_sep>plt.xlim(0 len(self.data['train_loss'])+1)<line_sep>plt.savefig(os.path.join(self.log_path 'loss.png'))<block_end><def_stmt>plot_progress self claimed_acc1=<none> claimed_acc5=<none> title='ShuffleNetv2'<block_start>self.plot_progress_errk(claimed_acc1 title 1)<line_sep>self.plot_progress_errk(claimed_acc5 title 5)<line_sep>self.plot_progress_loss(title)<line_sep>plt.close('all')<block_end><block_end> |
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for open_spiel.python.algorithms.jpsro."""<import_stmt>itertools<import_from_stmt>absl.testing absltest<import_from_stmt>absl.testing parameterized<import_from_stmt>open_spiel.python.algorithms jpsro<import_stmt>pyspiel<line_sep>GAMES=("sheriff_2p_gabriele" )<line_sep>SWEEP_KWARGS=[dict(# pylint: disable=g-complex-comprehension
game_name=game iterations=iterations policy_init=policy_init update_players_strategy=update_players_strategy target_equilibrium=target_equilibrium br_selection=br_selection train_meta_solver=train_meta_solver eval_meta_solver=eval_meta_solver ignore_repeats=ignore_repeats )<for>(iterations game policy_init update_players_strategy target_equilibrium br_selection train_meta_solver eval_meta_solver ignore_repeats) itertools.product([2] GAMES jpsro.INIT_POLICIES jpsro.UPDATE_PLAYERS_STRATEGY jpsro.BRS jpsro.BR_SELECTIONS jpsro.META_SOLVERS ["mwcce"] [<true> <false>])]<line_sep>TEST_COUNT_LIMIT=100<line_sep>interval=len(SWEEP_KWARGS)<floordiv>TEST_COUNT_LIMIT<line_sep>interval=interval<if>interval%2<ne>0<else>interval+1# Odd interval.
SWEEP_KWARGS=SWEEP_KWARGS[::interval]<def_stmt>get_game game_name<block_start>"""Returns the game."""<if_stmt>game_name<eq>"kuhn_poker_3p"<block_start>game_name="kuhn_poker"<line_sep>game_kwargs={"players":int(3)}<block_end><elif_stmt>game_name<eq>"trade_comm_2p_2i"<block_start>game_name="trade_comm"<line_sep>game_kwargs={"num_items":int(2)}<block_end><elif_stmt>game_name<eq>"sheriff_2p_gabriele"<block_start>game_name="sheriff"<line_sep>game_kwargs={"item_penalty":float(1.0) "item_value":float(5.0) "max_bribe":int(2) "max_items":int(10) "num_rounds":int(2) "sheriff_penalty":float(1.0) }<block_end><else_stmt><block_start><raise>ValueError("Unrecognised game: %s"%game_name)<block_end><return>pyspiel.load_game_as_turn_based(game_name game_kwargs)<block_end><class_stmt>JPSROTest(parameterized.TestCase absltest.TestCase)<block_start>@parameterized.parameters(*SWEEP_KWARGS)<def_stmt>test_jpsro_cce self **kwargs<block_start>game=get_game(kwargs["game_name"])<line_sep>jpsro.run_loop(game=game **kwargs)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>absltest.main()<block_end> |
"""
Function to convert MAC address onti EUI style format.
Creds to https://stackoverflow.com/a/29446103 unswers on stackoeverflow
and NAPALM base helpers module
"""<import_from_stmt>re sub<def_stmt>mac_eui data<block_start>mac=str(data)<line_sep># remove delimiters and convert to lower case
mac=sub("[.:-]" "" mac).lower()<line_sep># mac should only contain letters and numbers, also
# if length now not 12 (eg. 008041aefd7e), staff up to
# 12 with "0" - can happen with some vendors
<if_stmt>mac.isalnum()<block_start><if_stmt><not>len(mac)<eq>12<block_start>mac<augadd>"0"<times>(12-len(mac))<block_end><block_end><else_stmt><block_start><return>data <none><block_end># convert mac in canonical form (eg. 00:80:41:ae:fd:7e)
mac=":".join([mac[i:i+2]<for>i,j enumerate(mac)<if><not>(i%2)])<line_sep><return>mac <none><block_end> |
RANDOM_SEED=42<line_sep>TARGET_COL='target'<line_sep> |
<import_stmt>asyncio<import_from_stmt>abc ABC abstractmethod<import_from_stmt>.colors BLACK_ON_BLACK<import_from_stmt>.io KeyPressEvent MouseEvent PasteEvent io<import_from_stmt>.widgets._root _Root<line_sep>RESIZE_POLL_INTERVAL=0.5# Seconds between polling for resize events.
RENDER_INTERVAL=0# Seconds between screen renders.
<class_stmt>App(ABC)<block_start>"""
Base for creating terminal applications.
Parameters
----------
exit_key : KeyPressEvent | None, default: KeyPressEvent.ESCAPE
Quit the app when this key is pressed.
default_char : str, default: " "
Default background character for root widget.
default_color_pair : ColorPair, default: BLACK_ON_BLACK
Default background color pair for root widget.
title : str | None, default: None
Set terminal title (if supported).
"""<def_stmt>__init__ self * exit_key=KeyPressEvent.ESCAPE default_char=" " default_color_pair=BLACK_ON_BLACK title=<none><block_start>self.exit_key=exit_key<line_sep>self.default_char=default_char<line_sep>self.default_color_pair=default_color_pair<line_sep>self.title=title<block_end>@abstractmethod<async_keyword><def_stmt>on_start self<block_start>"""
Coroutine scheduled when app is run.
"""<block_end><def_stmt>run self<block_start>"""
Run the app.
"""<try_stmt><block_start>asyncio.run(self._run_async())<block_end><except_stmt>asyncio.CancelledError<block_start><pass><block_end><block_end><def_stmt>exit self<block_start><for_stmt>task asyncio.all_tasks()<block_start>task.cancel()<block_end><block_end><async_keyword><def_stmt>_run_async self<block_start>"""
Build environment, create root, and schedule app-specific tasks.
"""<with_stmt>io()<as>(env_in env_out)<block_start>self.root=root=_Root(app=self env_out=env_out default_char=self.default_char default_color_pair=self.default_color_pair )<if_stmt>self.title<block_start>env_out.set_title(self.title)<block_end>dispatch_press=root.dispatch_press<line_sep>dispatch_click=root.dispatch_click<line_sep>dispatch_paste=root.dispatch_paste<def_stmt>read_from_input <block_start>"""
Read and process input.
"""<for_stmt>key env_in.read_keys()<block_start><match_stmt>key<block_start><case_stmt>self.exit_key<block_start><return>self.exit()<block_end><case_stmt>MouseEvent()<block_start>dispatch_click(key)<block_end><case_stmt>KeyPressEvent()<block_start>dispatch_press(key)<block_end><case_stmt>PasteEvent()<block_start>dispatch_paste(key)<block_end><block_end><block_end><block_end><async_keyword><def_stmt>poll_size <block_start>"""
Poll terminal size every `RESIZE_POLL_INTERVAL` seconds.
"""<line_sep>size=env_out.get_size()<line_sep>resize=root.resize<while_stmt><true><block_start><await>asyncio.sleep(RESIZE_POLL_INTERVAL)<line_sep>new_size=env_out.get_size()<if_stmt>size<ne>new_size<block_start>resize(new_size)<line_sep>size=new_size<block_end><block_end><block_end><async_keyword><def_stmt>auto_render <block_start>"""
Render screen every `RENDER_INTERVAL` seconds.
"""<line_sep>render=root.render<while_stmt><true><block_start><await>asyncio.sleep(RENDER_INTERVAL)<line_sep>render()<block_end><block_end><with_stmt>env_in.raw_mode() env_in.attach(read_from_input)<block_start><await>asyncio.gather(poll_size() auto_render() self.on_start() )<block_end><block_end><block_end><def_stmt>add_widget self widget<block_start>self.root.add_widget(widget)<block_end><def_stmt>add_widgets self *widgets<block_start>self.root.add_widgets(*widgets)<block_end>@property<def_stmt>children self<block_start><return>self.root.children<block_end><block_end> |
<class_stmt>Destiny2APIError(Exception)<block_start><pass><block_end><class_stmt>Destiny2InvalidParameters(Destiny2APIError)<block_start><pass><block_end><class_stmt>Destiny2APICooldown(Destiny2APIError)<block_start><pass><block_end><class_stmt>Destiny2RefreshTokenError(Destiny2APIError)<block_start><pass><block_end><class_stmt>Destiny2MissingAPITokens(Destiny2APIError)<block_start><pass><block_end><class_stmt>Destiny2MissingManifest(Destiny2APIError)<block_start><pass><block_end> |
'''Gradient penalty functions.
'''<import_stmt>torch<import_from_stmt>torch autograd<def_stmt>contrastive_gradient_penalty network input penalty_amount=1.<block_start>"""Contrastive gradient penalty.
This is essentially the loss introduced by Mescheder et al 2018.
Args:
network: Network to apply penalty through.
input: Input or list of inputs for network.
penalty_amount: Amount of penalty.
Returns:
torch.Tensor: gradient penalty loss.
"""<def_stmt>_get_gradient inp output<block_start>gradient=autograd.grad(outputs=output inputs=inp grad_outputs=torch.ones_like(output) create_graph=<true> retain_graph=<true> only_inputs=<true> allow_unused=<true>)[0]<line_sep><return>gradient<block_end><if_stmt><not>isinstance(input (list tuple))<block_start>input=[input]<block_end>input=[inp.detach()<for>inp input]<line_sep>input=[inp.requires_grad_()<for>inp input]<with_stmt>torch.set_grad_enabled(<true>)<block_start>output=network(*input)[-1]<block_end>gradient=_get_gradient(input output)<line_sep>gradient=gradient.view(gradient.size()[0] -1)<line_sep>penalty=(gradient<power>2).sum(1).mean()<line_sep><return>penalty<times>penalty_amount<block_end> |
<import_stmt>argparse<import_from_stmt>typing List<import_from_stmt>rasa data<import_from_stmt>rasa.cli.default_arguments add_nlu_data_param<import_from_stmt>rasa.cli.utils get_validated_path<import_from_stmt>rasa.constants DEFAULT_DATA_PATH<line_sep># noinspection PyProtectedMember
<def_stmt>add_subparser subparsers:argparse._SubParsersAction parents:List[argparse.ArgumentParser]<block_start><import_stmt>rasa_nlu.convert<as>convert<line_sep>data_parser=subparsers.add_parser("data" conflict_handler="resolve" formatter_class=argparse.ArgumentDefaultsHelpFormatter parents=parents help="Utils for the Rasa training files")<line_sep>data_parser.set_defaults(func=<lambda>_:data_parser.print_help(<none>))<line_sep>data_subparsers=data_parser.add_subparsers()<line_sep>convert_parser=data_subparsers.add_parser("convert" formatter_class=argparse.ArgumentDefaultsHelpFormatter parents=parents help="Convert Rasa data between different formats")<line_sep>convert_parser.set_defaults(func=<lambda>_:convert_parser.print_help(<none>))<line_sep>convert_subparsers=convert_parser.add_subparsers()<line_sep>convert_nlu_parser=convert_subparsers.add_parser("nlu" formatter_class=argparse.ArgumentDefaultsHelpFormatter parents=parents help="Convert NLU training data between markdown and json")<line_sep>convert.add_arguments(convert_nlu_parser)<line_sep>convert_nlu_parser.set_defaults(func=convert.main)<line_sep>split_parser=data_subparsers.add_parser("split" formatter_class=argparse.ArgumentDefaultsHelpFormatter parents=parents help="Split Rasa data in training and test data")<line_sep>split_parser.set_defaults(func=<lambda>_:split_parser.print_help(<none>))<line_sep>split_subparsers=split_parser.add_subparsers()<line_sep>nlu_split_parser=split_subparsers.add_parser("nlu" formatter_class=argparse.ArgumentDefaultsHelpFormatter help="Perform a split of your NLU data according to the specified "<concat>"percentages")<line_sep>nlu_split_parser.set_defaults(func=split_nlu_data)<line_sep>_add_split_args(nlu_split_parser)<block_end><def_stmt>_add_split_args parser:argparse.ArgumentParser<arrow><none><block_start>add_nlu_data_param(parser)<line_sep>parser.add_argument("--training_fraction" type=float default=0.8 help="Percentage of the data which should be the "<concat>"training data")<line_sep>parser.add_argument("-o" "--out" type=str default="train_test_split" help="Directory where the split files should be "<concat>"stored")<block_end><def_stmt>split_nlu_data args<block_start><import_from_stmt>rasa_nlu.training_data.loading load_data<line_sep>data_path=get_validated_path(args.nlu "nlu" DEFAULT_DATA_PATH)<line_sep>data_path=data.get_nlu_directory(data_path)<line_sep>nlu_data=load_data(data_path)<line_sep>train,test=nlu_data.train_test_split(args.training_fraction)<line_sep>train.persist(args.out filename="training_data.json")<line_sep>test.persist(args.out filename="test_data.json")<block_end> |
# Copyright 2017-2018 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Test for the EpisodeTimeMs callback."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>os<import_stmt>unittest<import_stmt>numpy<as>np<import_stmt>six<import_stmt>deepmind_lab<class_stmt>EpisodeTimeTest(unittest.TestCase)<block_start><def_stmt>run_at_frame_rate self fps<block_start>env=deepmind_lab.Lab('tests/episode_time_test' ['EPISODE_TIME_SECONDS'] config={'fps':str(fps) 'width':'32' 'height':'32'})<line_sep>env.reset()<line_sep>nop=np.zeros((7 ) dtype=np.intc)<for_stmt>_ six.moves.range(0 fps)<block_start>env.step(nop 1)<block_end>obs=env.observations()<line_sep>self.assertEqual(obs['EPISODE_TIME_SECONDS'][0] 1.0)<block_end><def_stmt>test_at_60 self<block_start>self.run_at_frame_rate(60)<block_end><def_stmt>test_at_30 self<block_start>self.run_at_frame_rate(30)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><if_stmt>os.environ.get('TEST_SRCDIR')<block_start>deepmind_lab.set_runfiles_path(os.path.join(os.environ['TEST_SRCDIR'] 'org_deepmind_lab'))<block_end>unittest.main()<block_end> |
<def_stmt>extractMichilunWordpressCom item<block_start>'''
Parser for 'michilun.wordpress.com'
'''<line_sep>bad=['Recommendations and Reviews' ]<if_stmt>any([tmp<in>item['tags']<for>tmp bad])<block_start><return><none><block_end>vol,chp,frag,postfix=extractVolChapterFragmentPostfix(item['title'])<if_stmt><not>(chp<or>vol)<or>"preview"<in>item['title'].lower()<block_start><return><none><block_end>tagmap=[('Side Projects - Scheme of the Official Descendant' 'Scheme of the Official Descendant' 'translated') ('Song in the Peach Blossoms' 'Song in the Peach Blossoms' 'translated') ('Onrain (Online - The Novel)' 'Onrain (Online - The Novel)' 'translated') ('At the End of the Wish' 'At the End of the Wish' 'translated') ('Bringing Calamity to the Nation' 'Bringing Calamity to the Nation' 'translated') ('Side Projects - The Flame\'s Daughter' 'The Flame\'s Daughter' 'translated') ('PRC' 'PRC' 'translated') ('Loiterous' 'Loiterous' 'oel') ]<for_stmt>tagname,name,tl_type tagmap<block_start><if_stmt>tagname<in>item['tags']<block_start><return>buildReleaseMessageWithType(item name vol chp frag=frag postfix=postfix tl_type=tl_type)<block_end><block_end><return><false><block_end> |
<import_from_stmt>kaffe.tensorflow Network<class_stmt>GoogleNet(Network)<block_start><def_stmt>setup self<block_start>(self.feed('data').conv(7 7 64 2 2 name='conv1_7x7_s2').max_pool(3 3 2 2 name='pool1_3x3_s2').lrn(2 2e-05 0.75 name='pool1_norm1').conv(1 1 64 1 1 name='conv2_3x3_reduce').conv(3 3 192 1 1 name='conv2_3x3').lrn(2 2e-05 0.75 name='conv2_norm2').max_pool(3 3 2 2 name='pool2_3x3_s2').conv(1 1 64 1 1 name='inception_3a_1x1'))<line_sep>(self.feed('pool2_3x3_s2').conv(1 1 96 1 1 name='inception_3a_3x3_reduce').conv(3 3 128 1 1 name='inception_3a_3x3'))<line_sep>(self.feed('pool2_3x3_s2').conv(1 1 16 1 1 name='inception_3a_5x5_reduce').conv(5 5 32 1 1 name='inception_3a_5x5'))<line_sep>(self.feed('pool2_3x3_s2').max_pool(3 3 1 1 name='inception_3a_pool').conv(1 1 32 1 1 name='inception_3a_pool_proj'))<line_sep>(self.feed('inception_3a_1x1' 'inception_3a_3x3' 'inception_3a_5x5' 'inception_3a_pool_proj').concat(3 name='inception_3a_output').conv(1 1 128 1 1 name='inception_3b_1x1'))<line_sep>(self.feed('inception_3a_output').conv(1 1 128 1 1 name='inception_3b_3x3_reduce').conv(3 3 192 1 1 name='inception_3b_3x3'))<line_sep>(self.feed('inception_3a_output').conv(1 1 32 1 1 name='inception_3b_5x5_reduce').conv(5 5 96 1 1 name='inception_3b_5x5'))<line_sep>(self.feed('inception_3a_output').max_pool(3 3 1 1 name='inception_3b_pool').conv(1 1 64 1 1 name='inception_3b_pool_proj'))<line_sep>(self.feed('inception_3b_1x1' 'inception_3b_3x3' 'inception_3b_5x5' 'inception_3b_pool_proj').concat(3 name='inception_3b_output').max_pool(3 3 2 2 name='pool3_3x3_s2').conv(1 1 192 1 1 name='inception_4a_1x1'))<line_sep>(self.feed('pool3_3x3_s2').conv(1 1 96 1 1 name='inception_4a_3x3_reduce').conv(3 3 208 1 1 name='inception_4a_3x3'))<line_sep>(self.feed('pool3_3x3_s2').conv(1 1 16 1 1 name='inception_4a_5x5_reduce').conv(5 5 48 1 1 name='inception_4a_5x5'))<line_sep>(self.feed('pool3_3x3_s2').max_pool(3 3 1 1 name='inception_4a_pool').conv(1 1 64 1 1 name='inception_4a_pool_proj'))<line_sep>(self.feed('inception_4a_1x1' 'inception_4a_3x3' 'inception_4a_5x5' 'inception_4a_pool_proj').concat(3 name='inception_4a_output').conv(1 1 160 1 1 name='inception_4b_1x1'))<line_sep>(self.feed('inception_4a_output').conv(1 1 112 1 1 name='inception_4b_3x3_reduce').conv(3 3 224 1 1 name='inception_4b_3x3'))<line_sep>(self.feed('inception_4a_output').conv(1 1 24 1 1 name='inception_4b_5x5_reduce').conv(5 5 64 1 1 name='inception_4b_5x5'))<line_sep>(self.feed('inception_4a_output').max_pool(3 3 1 1 name='inception_4b_pool').conv(1 1 64 1 1 name='inception_4b_pool_proj'))<line_sep>(self.feed('inception_4b_1x1' 'inception_4b_3x3' 'inception_4b_5x5' 'inception_4b_pool_proj').concat(3 name='inception_4b_output').conv(1 1 128 1 1 name='inception_4c_1x1'))<line_sep>(self.feed('inception_4b_output').conv(1 1 128 1 1 name='inception_4c_3x3_reduce').conv(3 3 256 1 1 name='inception_4c_3x3'))<line_sep>(self.feed('inception_4b_output').conv(1 1 24 1 1 name='inception_4c_5x5_reduce').conv(5 5 64 1 1 name='inception_4c_5x5'))<line_sep>(self.feed('inception_4b_output').max_pool(3 3 1 1 name='inception_4c_pool').conv(1 1 64 1 1 name='inception_4c_pool_proj'))<line_sep>(self.feed('inception_4c_1x1' 'inception_4c_3x3' 'inception_4c_5x5' 'inception_4c_pool_proj').concat(3 name='inception_4c_output').conv(1 1 112 1 1 name='inception_4d_1x1'))<line_sep>(self.feed('inception_4c_output').conv(1 1 144 1 1 name='inception_4d_3x3_reduce').conv(3 3 288 1 1 name='inception_4d_3x3'))<line_sep>(self.feed('inception_4c_output').conv(1 1 32 1 1 name='inception_4d_5x5_reduce').conv(5 5 64 1 1 name='inception_4d_5x5'))<line_sep>(self.feed('inception_4c_output').max_pool(3 3 1 1 name='inception_4d_pool').conv(1 1 64 1 1 name='inception_4d_pool_proj'))<line_sep>(self.feed('inception_4d_1x1' 'inception_4d_3x3' 'inception_4d_5x5' 'inception_4d_pool_proj').concat(3 name='inception_4d_output').conv(1 1 256 1 1 name='inception_4e_1x1'))<line_sep>(self.feed('inception_4d_output').conv(1 1 160 1 1 name='inception_4e_3x3_reduce').conv(3 3 320 1 1 name='inception_4e_3x3'))<line_sep>(self.feed('inception_4d_output').conv(1 1 32 1 1 name='inception_4e_5x5_reduce').conv(5 5 128 1 1 name='inception_4e_5x5'))<line_sep>(self.feed('inception_4d_output').max_pool(3 3 1 1 name='inception_4e_pool').conv(1 1 128 1 1 name='inception_4e_pool_proj'))<line_sep>(self.feed('inception_4e_1x1' 'inception_4e_3x3' 'inception_4e_5x5' 'inception_4e_pool_proj').concat(3 name='inception_4e_output').max_pool(3 3 2 2 name='pool4_3x3_s2').conv(1 1 256 1 1 name='inception_5a_1x1'))<line_sep>(self.feed('pool4_3x3_s2').conv(1 1 160 1 1 name='inception_5a_3x3_reduce').conv(3 3 320 1 1 name='inception_5a_3x3'))<line_sep>(self.feed('pool4_3x3_s2').conv(1 1 32 1 1 name='inception_5a_5x5_reduce').conv(5 5 128 1 1 name='inception_5a_5x5'))<line_sep>(self.feed('pool4_3x3_s2').max_pool(3 3 1 1 name='inception_5a_pool').conv(1 1 128 1 1 name='inception_5a_pool_proj'))<line_sep>(self.feed('inception_5a_1x1' 'inception_5a_3x3' 'inception_5a_5x5' 'inception_5a_pool_proj').concat(3 name='inception_5a_output').conv(1 1 384 1 1 name='inception_5b_1x1'))<line_sep>(self.feed('inception_5a_output').conv(1 1 192 1 1 name='inception_5b_3x3_reduce').conv(3 3 384 1 1 name='inception_5b_3x3'))<line_sep>(self.feed('inception_5a_output').conv(1 1 48 1 1 name='inception_5b_5x5_reduce').conv(5 5 128 1 1 name='inception_5b_5x5'))<line_sep>(self.feed('inception_5a_output').max_pool(3 3 1 1 name='inception_5b_pool').conv(1 1 128 1 1 name='inception_5b_pool_proj'))<line_sep>(self.feed('inception_5b_1x1' 'inception_5b_3x3' 'inception_5b_5x5' 'inception_5b_pool_proj').concat(3 name='inception_5b_output').avg_pool(7 7 1 1 padding='VALID' name='pool5_7x7_s1').fc(1000 relu=<false> name='loss3_classifier').softmax(name='prob'))<block_end><block_end> |
<import_from_stmt>typing Optional<import_from_stmt>ansys.mapdl.core.mapdl_types MapdlInt<class_stmt>AnalysisOptions<block_start><def_stmt>abextract self mode1="" mode2="" **kwargs<block_start>"""Extracts the alpha-beta damping multipliers for Rayleigh damping.
APDL Command: ABEXTRACT
Parameters
----------
mode1
First mode number.
mode2
Second mode number.
Notes
-----
ABEXTRACT calls the command macro DMPEXT to extract the damping ratio
of MODE1 and MODE2 and then computes the Alpha and Beta damping
multipliers for use in a subsequent structural harmonic or transient
analysis. See Damping in the Structural Analysis Guide for more
information on the alpha and beta damping multipliers. The damping
multipliers are stored in parameters ALPHADMP and BETADMP and can be
applied using the ALPHAD and BETAD commands. Before calling ABEXTRACT,
you must issue RMFLVEC to extract the modal displacements. In addition,
a node component FLUN must exist from all FLUID136 nodes. See
Introduction for more information on thin film analyses.
This command is also valid in PREP7.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""<line_sep>command=f"ABEXTRACT,{mode1},{mode2}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>accoption self activate="" **kwargs<block_start>"""Specifies GPU accelerator capability options.
APDL Command: ACCOPTION
Parameters
----------
activate
Activates the GPU accelerator capability within the equation
solvers.
Do not use GPU accelerator. - Use GPU accelerator.
Notes
-----
The GPU accelerator capability requires specific hardware to be
installed on the machine. See the appropriate ANSYS, Inc. Installation
Guide (Windows or Linux) for a list of supported GPU hardware. Use of
this capability also requires HPC licensing. For more information, see
GPU Accelerator Capability in the Parallel Processing Guide.
The GPU accelerator capability is available for the sparse direct
solver and the PCG and JCG iterative solvers. Static, buckling, modal,
full harmonic, and full transient analyses are supported. For buckling
analyses, the Block Lanczos and Subspace eigensolvers are supported.
For modal analyses, only the Block Lanczos, PCG Lanczos, Subspace,
Unsymmetric, and Damped eigensolvers are supported. Activating this
capability when using other equation solvers or other analysis types
has no effect.
The GPU accelerator capability is supported only on the Windows 64-bit
and Linux 64-bit platforms.
"""<line_sep>command=f"ACCOPTION,{activate}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>adams self nmodes="" kstress="" kshell="" **kwargs<block_start>"""Performs solutions and writes flexible body information to a modal
APDL Command: ADAMS
neutral file (Jobname.MNF) for use in an ADAMS analysis.
Parameters
----------
nmodes
Number of normal modes to be written to Jobname.MNF file (no
default).
kstress
Specifies whether to write stress or strain results:
0 - Do not write stress or strain results (default).
1 - Write stress results.
2 - Write strain results.
3 - Write both stress and strain results.
kshell
Shell element output location. This option is valid only for shell
elements.
0, 1 - Shell top surface (default).
2 - Shell middle surface.
3 - Shell bottom surface.
Notes
-----
ADAMS invokes a predefined ANSYS macro that solves a series of analyses
and then writes the modal neutral file, Jobname.MNF. This file can be
imported into the ADAMS program in order to perform a rigid body
dynamics simulation. For detailed information on how to use the ADAMS
command macro to create a modal neutral file, see Rigid Body Dynamics
and the ANSYS-ADAMS Interface in the Substructuring Analysis Guide.
Before running the ADAMS command macro, you must specify the units with
the /UNITS command. The interface points should be the only selected
nodes when the command macro is initiated. (Interface points are nodes
where constraints may be applied in ADAMS.) Only selected elements will
be considered in the calculations.
By default, stress and strain data is transferred to the ADAMS program
for all nodes, as specified by the KSTRESS value. If you want to
transfer stress/strain data for only a subset of nodes, select the
desired subset and create a node component named "STRESS" before
running the ADAMS command macro. For example, you may want to select
exterior nodes for the purpose of visualization in the ADAMS program.
The default filename for the modal neutral file is Jobname.MNF. In
interactive (GUI) mode, you can specify a filename other than
Jobname.MNF. In batch mode, there is no option to change the filename,
and the modal neutral file is always written to Jobname.MNF.
"""<line_sep>command=f"ADAMS,{nmodes},{kstress},{kshell}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>antype self antype="" status="" ldstep="" substep="" action="" **kwargs<block_start>"""Specifies the analysis type and restart status.
APDL Command: ANTYPE
Parameters
----------
antype
Analysis type (defaults to the previously specified analysis type,
or to STATIC if none specified):
STATIC or 0 - Perform a static analysis. Valid for all degrees of freedom.
BUCKLE or 1 - Perform a buckling analysis. Implies that a previous static solution was
performed with prestress effects calculated
(PSTRES,ON). Valid for structural degrees of freedom
only.
MODAL or 2 - Perform a modal analysis. Valid for structural and fluid degrees of freedom.
HARMIC or 3 - Perform a harmonic analysis. Valid for structural, fluid, magnetic, and
electrical degrees of freedom.
TRANS or 4 - Perform a transient analysis. Valid for all degrees of freedom.
SUBSTR or 7 - Perform a substructure analysis. Valid for all degrees of freedom.
SPECTR or 8 - Perform a spectrum analysis. Implies that a previous modal analysis was
performed. Valid for structural degrees of freedom
only.
status
Specifies the status of the analysis (new or restart):
NEW - Specifies a new analysis (default). If NEW, the remaining fields on this
command are ignored.
RESTART - Specifies a restart of a previous analysis. Valid for static, modal, and
transient (full or mode-superposition method) analyses.
For more information about restarting static and
transient analyses, see Multiframe Restart in the Basic
Analysis Guide. For more information on restarting a
modal analysis, see Modal Analysis Restart in the Basic
Analysis Guide.
Multiframe restart is also valid for harmonic analysis, but is limited to 2-D magnetic analysis only. - A substructure analysis (backsubstitution method only) can be restarted for the
purpose of generating additional load vectors.
For more information, see the SEOPT command and
Applying Loads and Creating the Superelement
Matrices in the Substructuring Analysis Guide.
VTREST - Specifies the restart of a previous VT Accelerator analysis. Valid only with
Antype = STATIC, HARMIC, or TRANS. For more information,
see VT Accelerator Re-run in the Basic Analysis Guide.
ldstep
Specifies the load step at which a multiframe restart begins.
substep
Specifies the substep at which a multiframe restart begins.
action
Specifies the manner of a multiframe restart.
CONTINUE - The program continues the analysis based on the specified LDSTEP and SUBSTEP
(default). The current load step is continued. If the
end of the load step is encountered in the .Rnnn file, a
new load step is started. The program deletes all .Rnnn
files, or .Mnnn files for mode-superposition transient
analyses, beyond the point of restart and updates the
.LDHI file if a new load step is encountered.
ENDSTEP - At restart, force the specified load step (LDSTEP) to end at the specified
substep (SUBSTEP), even though the end of the current
load step has not been reached. At the end of the
specified substep, all loadings are scaled to the level
of the current ending and stored in the .LDHI file. A run
following this ENDSTEP starts a new load step. This
capability allows you to change the load level in the
middle of a load step. The program updates the .LDHI file
and deletes all .Rnnn files, or .Mnnn files for mode-
superposition transient analyses, beyond the point of
ENDSTEP. The .Rnnn or .Mnnn file at the point of ENDSTEP
are rewritten to record the rescaled load level.
RSTCREATE - At restart, retrieve information to be written to the results file for the
specified load step (LDSTEP) and substep (SUBSTEP). Be
sure to use OUTRES to write the results to the
results file. This action does not affect the .LDHI or
.Rnnn files. Previous items stored in the results file
at and beyond the point of RSTCREATE are deleted. This
option cannot be used to restart a mode-superposition
transient analysis.
PERTURB - At restart, a linear perturbation analysis (static, modal, buckling, or full
harmonic) is performed for the specified load step
(LDSTEP) and substep (SUBSTEP). This action does not
affect the .LDHI, .Rnnn, or .RST files.
Notes
-----
If using the ANTYPE command to change the analysis type in the same
SOLVE session, the program issues the following message: "Some analysis
options have been reset to their defaults. Please verify current
settings or respecify as required." Typically, the program resets
commands such as NLGEOM and EQSLV to their default values.
The analysis type (Antype) cannot be changed if a restart is specified.
Always save parameters before doing a restart. For more information on
the different types of restart, see Restarting an Analysis in the Basic
Analysis Guide.
This command is also valid in PREP7.
The ANSYS Professional - Nonlinear Structural (PRN) product supports
the Antype = TRANS option for mode-superposition (TRNOPT,MSUP) analyses
only.
"""<line_sep>command=f"ANTYPE,{antype},{status},{ldstep},{substep},{action}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>ascres self opt="" **kwargs<block_start>"""Specifies the output type for an acoustic scattering analysis.
APDL Command: ASCRES
Parameters
----------
opt
Output option:
TOTAL - Output the total pressure field (default).
SCAT - Output the scattered pressure field.
Notes
-----
Use the ASCRES command to specify the output type for an acoustic
scattering analysis.
The scattered option (Opt = SCAT) provides a scattered pressure output,
psc, required for calculating target strength (TS).
The default behavior (Opt = TOTAL) provides a sum of the incident and
scattering fields, ptotal = pinc + psc.
Issue the AWAVE command to define the incident pressure pinc. If the
AWAVE command is defined with Opt2 = INT, only the total pressure field
is output regardless of the ASCRES,Opt command.
"""<line_sep>command=f"ASCRES,{opt}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>asol self lab="" opt="" **kwargs<block_start>"""Specifies the output type of an acoustic scattering analysis.
APDL Command: ASOL
Parameters
----------
lab
Acoustic solver specification (no default):
SCAT - Set acoustic solver to the scattered field formulation.
opt
Option identifying an acoustic solver status:
OFF - Deactivate the specified acoustic solver (default).
ON - Activate the specified acoustic solver.
Notes
-----
Use the ASOL command to activate the specified acoustic solution
process.
The scattered option (Lab = SCAT) sets the acoustic solver to the
scattered-pressure field formulation.
Issue the AWAVE command to define the incident pressure pinc. If the
AWAVE command is defined with Opt2 = INT, the acoustic solver is set to
the scattered field formulation regardless of the ASOL command issued.
"""<line_sep>command=f"ASOL,{lab},{opt}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>bcsoption self memory_option="" memory_size="" solve_info="" **kwargs<block_start>"""Sets memory option for the sparse solver.
APDL Command: BCSOPTION
Parameters
----------
memory_option
Memory allocation option:
DEFAULT - Use the default memory allocation strategy for
the sparse solver. The default strategy attempts
to run in the INCORE memory mode. If there is
not enough available physical memory when the
solver starts to run in the INCORE memory mode,
the solver will then attempt to run in the
OUTOFCORE memory mode.
INCORE - Use a memory allocation strategy in the sparse
solver that will attempt to obtain enough memory
to run with the entire factorized matrix in
memory. This option uses the most amount of
memory and should avoid doing any I/O. By
avoiding I/O, this option achieves optimal solver
performance. However, a significant amount of
memory is required to run in this mode, and it is
only recommended on machines with a large amount
of memory. If the allocation for in-core memory
fails, the solver will automatically revert to
out-of-core memory mode.
OUTOFCORE - Use a memory allocation strategy in the sparse
solver that will attempt to allocate only
enough work space to factor each individual
frontal matrix in memory, but will store the
entire factorized matrix on disk. Typically,
this memory mode results in poor performance
due to the potential bottleneck caused by the
I/O to the various files written by the
solver.
FORCE - This option, when used in conjunction with the
Memory_Size option, allows you to force the sparse
solver to run with a specific amount of
memory. This option is only recommended for the
advanced user who understands sparse solver memory
requirements for the problem being solved,
understands the physical memory on the system, and
wants to control the sparse solver memory usage.
memory_size
Initial memory size allocation for the sparse solver in
MB. This argument allows you to tune the sparse solver
memory and is not generally required. Although there is no
upper limit for Memory_Size, the Memory_Size setting
should always be well within the physical memory
available, but not so small as to cause the sparse solver
to run out of memory. Warnings and/or errors from the
sparse solver will appear if this value is set too low. If
the FORCE memory option is used, this value is the amount
of memory allocated for the entire duration of the sparse
solver solution.
solve_info
Solver output option:
OFF - Turns off additional output printing from the sparse
solver (default).
PERFORMANCE - Turns on additional output printing from the
sparse solver, including a performance
summary and a summary of file I/O for the
sparse solver. Information on memory usage
during assembly of the global matrix (that
is, creation of the Jobname.FULL file) is
also printed with this option.
Notes
-----
This command controls options related to the sparse solver in
all analysis types where the sparse solver can be used. It
also controls the Block Lanczos eigensolver in a modal or
buckling analysis.
The sparse solver runs from one large work space (that is, one
large memory allocation). The amount of memory required for
the sparse solver is unknown until the matrix structure is
preprocessed, including equation reordering. The amount of
memory allocated for the sparse solver is then dynamically
adjusted to supply the solver what it needs to compute the
solution.
If you have a very large memory system, you may want to try
selecting the INCORE memory mode for larger jobs to improve
performance. When running the sparse solver on a machine with
very slow I/O performance (for example, slow hard drive
speed), you may want to try using the INCORE memory mode to
achieve better performance. However, doing so may require much
more memory compared to running in the OUTOFCORE memory mode.
Running with the INCORE memory mode is best for jobs which
comfortably fit within the limits of the physical memory on a
given system. If the sparse solver work space exceeds physical
memory size, the system will be forced to use virtual memory
(or the system page/swap file). In this case, it is typically
more efficient to run with the OUTOFCORE memory mode. Assuming
the job fits comfortably within the limits of the machine,
running with the INCORE memory mode is often ideal for jobs
where repeated solves are performed for a single matrix
factorization. This occurs in a modal or buckling analysis or
when doing multiple load steps in a linear, static analysis.
For repeated runs with the sparse solver, you may set the
initial sparse solver memory allocation to the amount required
for factorization. This strategy reduces the frequency of
allocation and reallocation in the run to make the INCORE
option fully effective. If you have a very large memory
system, you may use the Memory_Size argument to increase the
maximum size attempted for in-core runs.
"""<line_sep>command=f"BCSOPTION,,{memory_option},{memory_size},,,{solve_info}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>cgrow self action="" par1="" par2="" **kwargs<block_start>"""Defines crack-growth information
APDL Command: CGROW
Parameters
----------
action
Specifies the action for defining or manipulating crack-growth
data:
NEW - Initiate a new set of crack-growth simulation data (default).
CID - Specify the crack-calculation (CINT) ID for energy-release rates to be used in
the fracture criterion calculation.
FCOPTION - Specify the fracture criterion for crack-growth/delamination.
CPATH - Specify the element component for crack growth.
DTIME - Specify the initial time step for crack growth.
DTMIN - Specify the minimum time step for crack growth.
DTMAX - Specify the maximum time step for crack growth.
FCRAT - Fracture criterion ratio (fc).
STOP - Stops the analysis when the specified maximum crack extension is reached.
METHOD - Define the method of crack propagation.
Notes
-----
When Action = NEW, the CGROW command initializes a crack-growth
simulation set. Subsequent CGROW commands define the parameters
necessary for the simulation.
For multiple cracks, issue multiple CGROW,NEW commands (and any
subsequent CGROW commands necessary to define the parameters) for each
crack.
If the analysis is restarted (ANTYPE,,RESTART), the CGROW command must
be re-issued.
For additional details on this command, see
https://www.mm.bme.hu/~gyebro/files/ans_help_v182/ans_cmd/Hlp_C_CGROW.html
"""<line_sep>command=f"CGROW,{action},{par1},{par2}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>cmatrix self symfac="" condname="" numcond="" grndkey="" capname="" **kwargs<block_start>"""Performs electrostatic field solutions and calculates the
self and mutual capacitances between multiple conductors.x
APDL Command: CMATRIX
Parameters
----------
symfac
Geometric symmetry factor. Capacitance values are scaled by this
factor which represents the fraction of the total device modeled.
Defaults to 1.
condname
Alphanumeric prefix identifier used in defining named conductor
components.
numcond
Total Number of Components. If a ground is modeled, it is to be
included as a component. If a ground is not modeled, but infinite
elements are used to model the far-field ground, a named component
for the far-field ground is not required.
grndkey
Ground key:
0 - Ground is one of the components, which is not at infinity.
1 - Ground is at infinity (modeled by infinite elements).
capname
Array name for computed capacitance matrix. Defaults to CMATRIX.
Notes
-----
To invoke the CMATRIX macro, the exterior nodes of each conductor must
be grouped into individual components using the CM command. Each set
of independent components is assigned a component name with a common
prefix followed by the conductor number. A conductor system with a
ground must also include the ground nodes as a component. The ground
component is numbered last in the component name sequence.
A ground capacitance matrix relates charge to a voltage vector. A
ground matrix cannot be applied to a circuit modeler. The lumped
capacitance matrix is a combination of lumped "arrangements" of
voltage differences between conductors. Use the lumped capacitance
terms in a circuit modeler to represent capacitances between
conductors.
Enclose all name-strings in single quotes in the CMATRIX command line.
See the Mechanical APDL Theory Reference and HMAGSOLV in the Low-
Frequency Electromagnetic Analysis Guide for details.
This command does not support multiframe restarts.
"""<line_sep>command=f"CMATRIX,{symfac},'{condname}',{numcond},{grndkey},'{capname}'"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>cmsopt self cmsmeth="" nmode="" freqb="" freqe="" fbddef="" fbdval="" iokey="" **kwargs <block_start>"""Specifies component mode synthesis (CMS) analysis options.
APDL Command: CMSOPT
Parameters
----------
cmsmeth
The component mode synthesis method to use. This value is required.
FIX - Fixed-interface method.
FREE - Free-interface method.
RFFB - Residual-flexible free-interface method.
nmode
The number of normal modes extracted and used in the superelement
generation. This value is required; the minimum is 1.
freqb
Beginning, or lower end, of frequency range of interest. This value
is optional.
freqe
Ending, or upper end, of frequency range of interest. This value is
optional.
fbddef
In a free-interface (CMSMETH = FREE) or residual-flexible free-
interface (CMSMETH = RFFB) CMS analysis, the method to use for
defining free body modes:
FNUM - The number (FDBVAL) of rigid body modes in the calculation.
FTOL - Employ a specified tolerance (FDBVAL) to determine rigid body modes in the
calculation.
FAUTO - Automatically determine rigid body modes in the calculation. This method is the
default.
RIGID - If no rigid body modes exist, define your own via the RIGID command.
fbdval
In a free-interface CMS analysis (CMSMETH = FREE), the number of
rigid body modes if FBDDEF = fnum (where the value is an integer
from 0 through 6), or the tolerance to employ if FBDDEF = ftol
(where the value is a positive real number representing rad/sec).
This value is required only when FBDDEF = fnum or FBDDEF = ftol;
otherwise, any specified value is ignored.
iokey
Output key to control writing of the transformation matrix to the
.TCMS file (FIX or FREE methods) or body properties to the .EXB
file (FIX method).
TCMS - Write the transformation matrix of the nodal component defined by the OUTPR
command to a .TCMS file. Refer to TCMS File Format in the
Programmer's Reference for more information on the this
file.
EXB - Write a body property input file (.EXB file) containing the condensed
substructure matrices and other body properties for use with
AVL EXCITE. Refer to ANSYS Interface to AVL EXCITE in the
Substructuring Analysis Guide for more information.
Notes
-----
CMS employs the Block Lanczos eigensolution method in the generation
pass.
CMS supports damping matrix reduction when a damping matrix exists. Set
the matrix generation key to 3 (SEOPT,Sename,SEMATR) to generate and
then reduce stiffness, mass, and damping matrices.
CMS does not support the SEOPT,,,,,RESOLVE command. Instead, ANSYS sets
the expansion method for the expansion pass (EXPMTH) to BACKSUB.
For more information about performing a CMS analysis, see Component
Mode Synthesis in the Substructuring Analysis Guide.
If IOKEY = TCMS is used to output the transformation matrix, then only
ITEM = NSOL is valid in the OUTPR command. In the interactive
sessions, the transformation matrix will not be output if the model has
more than 10 elements.
This command is also valid in /PREP7.
"""<line_sep>command=f"CMSOPT,{cmsmeth},{nmode},{freqb},{freqe},{fbddef},{fbdval},{iokey}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>cncheck self option="" rid1="" rid2="" rinc="" intertype="" trlevel="" cgap="" cpen="" ioff="" **kwargs <block_start>"""Provides and/or adjusts the initial status of contact pairs.
APDL Command: CNCHECK
Parameters
----------
option
Option to be performed:
* ``"DETAIL"`` : List all contact pair properties (default).
* ``"SUMMARY"`` : List only the open/closed status for each
contact pair.
* ``"POST"`` : Execute a partial solution to write the initial
contact configuration to the Jobname.RCN file.
* ``"ADJUST"`` : Physically move contact nodes to the target
in order to close a gap or reduce penetration. The initial
adjustment is converted to structural displacement values
(UX, UY, UZ) and stored in the Jobname.RCN file.
* ``"MORPH"`` : Physically move contact nodes to the target in
order to close a gap or reduce penetration, and also morph
the underlying solid mesh. The initial adjustment of contact
nodes and repositioning of solid element nodes due to mesh
morphing are converted to structural displacement values
(UX, UY, UZ) and stored in the Jobname.RCN file.
* ``"RESET"`` : Reset target element and contact element key
options and real constants to their default values. This
option is not valid for general contact.
* ``"AUTO"`` : Automatically sets certain real constants and
key options to recommended values or settings in order to
achieve better convergence based on overall contact pair
behaviors. This option is not valid for general contact.
* ``"TRIM"`` : Trim contact pair (remove certain contact and
target elements).
* ``"UNSE"`` : Unselect certain contact and target elements.
rid1, rid2, rinc
For pair-based contact, the range of real constant pair IDs
for which Option will be performed. If RID2 is not specified,
it defaults to RID1. If no value is specified, all contact
pairs in the selected set of elements are considered.
For general contact (InterType = GCN), RID1 and RID2 are
section IDs associated with general contact surfaces instead
of real constant IDs. If RINC = 0, the Option is performed
between the two sections, RID1 and RID2. If RINC > 0, the
Option is performed among all specified sections (RID1 to RID2
with increment of RINC).
intertype
The type of contact interface (pair-based versus general
contact) to be considered; or the type of contact pair to be
trimmed/unselected/auto-set.
The following labels specify the type of contact interface:
* ``""`` : (blank) Include all contact definitions (pair-based
and general contact).
* ``"GCN"`` : Include general contact definitions only (not valid when Option = RESET or AUTO).
The following labels specify the type of contact pairs to be
trimmed/unselected/auto-set (used only when Option = TRIM,
UNSE, or AUTO, and only for pair-based contact definitions):
* ``"ANY"`` : All types (default).
* ``"MPC"`` : MPC-based contact pairs (KEYOPT(2) = 2).
* ``"BOND"`` : Bonded contact pairs (KEYOPT(12) = 3, 5, 6).
* ``"NOSP"`` : No separation contact pairs (KEYOPT(12) = 2, 4).
* ``"INAC"`` : Inactive contact pairs (symmetric contact pairs for MPC contact or KEYOPT(8) = 2).
* ``"TRlevel"`` : mming level (used only when Option = TRIM, UNSE, or MORPH):
* ``"(blank)"`` : Normal trimming (default): remove/unselect contact and target elements which are in far-field.
* ``"AGGRE"`` : Aggressive trimming: remove/unselect contact and target elements which are in far-field, and certain elements in near-field.
cgap
They are only valid when Option = ADJUST or MORPH. Control
parameter for opening gap. Close the opening gap if the
absolute value of the gap is smaller than the CGAP value. CGAP
defaults to ``0.25*PINB`` (where PINB is the pinball radius) for
bonded and no-separation contact; otherwise it defaults to the
value of real constant ICONT.
CPEN
They are only valid when Option = ADJUST or MORPH. Control
parameter for initial penetration. Close the initial
penetration if the absolute value of the penetration is
smaller than the CPEN value. CPEN defaults to ``0.25*PINB`` (where
PINB is the pinball radius) for any type of interface behavior
(either bonded or standard contact).
IOFF
They are only valid when Option = ADJUST or MORPH. Control
parameter for initial adjustment. Input a positive value to
adjust the contact nodes towards the target surface with a
constant interference distance equal to IOFF. Input a negative
value to adjust the contact node towards the target surface
with a uniform gap distance equal to the absolute value of
IOFF.
Notes
-----
The CNCHECK command provides information for surface-to-surface,
node-to-surface, and line-to-line contact pairs (element types
TARGE169, TARGE170, CONTA171, CONTA172, CONTA173, CONTA174,
CONTA175, CONTA176, CONTA177). All contact and target elements of
interest, along with the solid elements and nodes attached to
them, must be selected for the command to function properly. For
performance reasons, the program uses a subset of nodes and
elements based on the specified contact regions (RID1, RID2, RINC)
when executing the CNCHECK command.
For additional details, see the notes section at:
https://www.mm.bme.hu/~gyebro/files/ans_help_v182/ans_cmd/Hlp_C_CNCHECK.html
"""<line_sep>command=f"CNCHECK,{option},{rid1},{rid2},{rinc},{intertype},{trlevel},{cgap},{cpen},{ioff}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>cnkmod self itype="" knum="" value="" **kwargs<block_start>"""Modifies contact element key options.
APDL Command: CNKMOD
Parameters
----------
itype
Contact element type number as defined on the ET command.
knum
Number of the KEYOPT to be modified (KEYOPT(KNUM)).
value
Value to be assigned to the KEYOPT.
Notes
-----
The CNKMOD command has the same syntax as the KEYOPT command. However,
it is valid only in the SOLUTION processor. This command is intended
only for use in a linear perturbation analysis, and can only be used to
modify certain contact element KEYOPT values as described below.
Modifying KEYOPT(12)
One use for this command is to modify contact interface behavior
between load steps in a linear perturbation analysis; it allows the
user to control the contact status locally per contact pair. For this
application, this command is limited to changing the contact interface
behavior key option: KEYOPT(12) of CONTA171, CONTA172, CONTA173,
CONTA174, CONTA175, CONTA176, and CONTA177; and KEYOPT(10) of CONTA178.
When used for this purpose, the command adjusts the contact status from
the linear perturbation base analysis (at the point of restart) as
described in the table below. Note that CNKMOD allows you to take
points in the base analysis that are near contact (within the pinball
region) and modify them to be treated as "in contact" in the
perturbation analysis; see the "1 - near-field" row with KEYOPT(12)
values set to 4 or 5. CNKMOD also allows you to take points that are
sliding in the base analysis and treat them as sticking in the
perturbation analysis, irrespective of the MU value; see the "2 -
sliding" row with KEYOPT(12) values set to 1,3, 5, or 6.
Table: 128:: : Adjusted Contact Status with CNKMOD is Issued
(if outside of the adjusted pinball region)
(if inside of the adjusted pinball region)
(if outside of the adjusted pinball region)
(if inside of the adjusted pinball region)
If an open gap exists at the end of the previous load step and the
contact status is adjusted as sliding or sticking due to a "bonded" or
"no separation" contact behavior definition, then the program will
treat it as near-field contact when executing CNKMOD in the subsequent
load steps.
In the linear perturbation analysis procedure, contact status can also
be controlled or modified by the PERTURB command. The contact status
always follows local controls defined by the CNKMOD command first, and
is then adjusted by the global sticking or bonded setting (ContKey =
STICKING or BONDED) on the PERTURB command (see the PERTURB command for
details).
Modifying KEYOPT(3)
Another use for this command is to change the units of normal contact
stiffness (contact element real constant FKN) in a linear perturbation
modal analysis that is used to model brake squeal. For contact elements
CONTA171, CONTA172, CONTA173, and CONTA174, KEYOPT(3) controls the
units of normal contact stiffness. You can issue the command
CNKMOD,ITYPE,3,1 during the first phase of the linear perturbation
analysis in order to change the units of normal contact stiffness from
FORCE/LENGTH3 (in the base analysis) to FORCE/LENGTH. Note that
KEYOPT(3) = 1 is valid only when a penalty-based algorithm is used
(KEYOPT(2) = 0 or 1) and the absolute normal contact stiffness value is
explicitly specified (that is, a negative value input for real constant
FKN).
"""<line_sep>command=f"CNKMOD,{itype},{knum},{value}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>cntr self option="" key="" **kwargs<block_start>"""Redirects contact pair output quantities to a text file.
APDL Command: CNTR
Parameters
----------
option
Output option:
OUT - Contact output control.
key
Control key:
NO - Write contact information to the output file or to the screen (default).
YES - Write contact information to the Jobname.CNM file.
Notes
-----
Issue the command CNTR,OUT,YES to redirect contact pair output
quantities to the Jobname.CNM file.
To ensure that the contact information is written to Jobname.CNM,
reissue CNTR,OUT,YES each time you reenter the solution processor
(/SOLU).
"""<line_sep>command=f"CNTR,{option},{key}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>cutcontrol self lab="" value="" option="" **kwargs<block_start>"""Controls time-step cutback during a nonlinear solution.
APDL Command: CUTCONTROL
Parameters
----------
lab
Specifies the criteria for causing a cutback. Valid labels are:
PLSLIMIT - Maximum equivalent plastic strain allowed within a time-step (substep). If the
calculated value exceeds the VALUE, the program
performs a cutback (bisection). VALUE defaults to 0.15
(15%).
CRPLIMIT - Set values for calculating the maximum equivalent creep ratio allowed within a
time step. If the calculated maximum creep ratio
exceeds the defined creep ratio limit, the program
performs a cutback.
DSPLIMIT - Maximum incremental displacement within the solution field in a time step
(substep). If the maximum calculated value exceeds
VALUE, the program performs a cutback (bisection).
VALUE defaults to 1.0 x 107.
NPOINT - Number of points in a cycle for a second order dynamic equation, used to
control automatic time stepping. If the number of
solution points per cycle is less than VALUE, the program
performs a cutback in time step size. VALUE defaults to
13 for linear analysis, 5 for nonlinear analysis. A
larger number of points yields a more accurate solution
but also increases the solution run time.
This option works well for linear problems. For nonlinear analyses, other factors such as contact status changes and solution convergence rate can overwrite NPOINT. See Automatic Time Stepping in the Mechanical APDL Theory Reference for more information on automatic time stepping. - NOITERPREDICT
If VALUE is 0 (default), an internal auto time step scheme will predict the number of iterations for nonlinear convergence and perform a cutback earlier than the number of iterations specified by the NEQIT command. This is the recommended option. If VALUE is 1, the solution will iterate (if nonconvergent) to NEQIT number of iterations before a cutback is invoked. It is sometimes useful for poorly-convergent problems, but rarely needed in general. - Bisection is also controlled by contact status change, plasticity or creep
strain limit, and other factors. If any of these
factors occur, bisection will still take place,
regardless of the NOITERPREDICT setting.
CUTBACKFACTOR - Changes the cutback value for bisection. Default is 0.5. VALUE must be greater
than 0.0 and less than 1.0. This option is active
only if AUTOTS,ON is set.
value
Numeric value for the specified cutback criterion. For Lab =
CRPLIMIT, VALUE is the creep criteria for the creep ratio limit.
option
Type of creep analysis. Valid for Lab = CRPLIMIT only.
IMPRATIO - Set the maximum creep ratio value for implicit creep. The default is 0.0 (i.e.,
no creep limit control) and any positive value is
valid. (See Implicit Creep Procedure in the Structural
Analysis Guide for information on how to define
implicit creep.)
EXPRATIO - Set the maximum creep ratio value for explicit creep. The default value is 0.1
and any positive value up to 0.25 is allowed. (See
Explicit Creep Procedure in the Structural Analysis
Guide for information on how to define explicit
creep.)
STSLIMIT - Stress threshold for calculating the creep ratio. For integration points with
effective stress below this threshold, the creep ratio
does not cause cutback. The default value is 0.0 and
any positive value is valid.
STNLIMIT - Elastic strain threshold for calculating the creep ratio. For integration
points with effective elastic strain below this
threshold, the creep ratio does not cause cutback. The
default value is 0.0 and any positive value is valid.
Notes
-----
A cutback is a method for automatically reducing the step size when
either the solution error is too large or the solution encounters
convergence difficulties during a nonlinear analysis.
Should a convergence failure occur, the program reduces the time step
interval to a fraction of its previous size and automatically continues
the solution from the last successfully converged time step. If the
reduced time step again fails to converge, the program again reduces
the time step size and proceeds with the solution. This process
continues until convergence is achieved or the minimum specified time
step value is reached.
For creep analysis, the cutback procedure is similar; the process
continues until the minimum specified time step size is reached.
However, if the creep ratio limit is exceeded, the program issues a
warning but continues the substep until the analysis is complete. In
this case, convergence is achieved but the creep ratio criteria is not
satisfied.
The CRPLIM command is functionally equivalent to Lab = CRPLIMIT with
options IMPRATIO and EXPRATIO
"""<line_sep>command=f"CUTCONTROL,{lab},{value},{option}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>ddoption self decomp="" **kwargs<block_start>"""Sets domain decomposer option for Distributed ANSYS.
APDL Command: DDOPTION
Parameters
----------
decomp
Controls which domain decomposition algorithm to use.
AUTO - Use the default domain decomposition algorithm when splitting the model into
domains for Distributed ANSYS (default).
GREEDY - Use the "greedy" domain decomposition algorithm.
METIS - Use the METIS graph partitioning domain decomposition algorithm.
Notes
-----
This command controls options relating to the domain decomposition
algorithm used by Distributed ANSYS to split the model into pieces (or
domains), with each piece being solved on a different processor.
The greedy domain decomposition algorithm starts from a single element
at a corner of the model. The domain grows by taking the properly
connected neighboring elements and stops after reaching the optimal
size.
The METIS domain decomposition algorithm starts by creating a graph
from the finite element mesh. It then uses a multilevel graph
partitioning scheme which reduces the size of the original graph,
creates domains using the reduced graph, and then creates the final CPU
domains by expanding the smaller domains from the reduced graph back to
the original mesh.
"""<line_sep>command=f"DDOPTION,{decomp}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>dmpext self smode="" tmode="" dmpname="" freqb="" freqe="" nsteps="" **kwargs<block_start>"""Extracts modal damping coefficients in a specified frequency range.
APDL Command: DMPEXT
Parameters
----------
smode
Source mode number. There is no default for this field; you must
enter an integer greater than zero.
tmode
Target mode. Defaults to SMODE.
dmpname
Array parameter name containing the damping results. Defaults to
d_damp.
freqb
Beginning frequency range (real number greater than zero) or 'EIG'
at eigenfrequency of source mode. 'EIG' is valid only if SMODE =
TMODE. Note that EIG must be enclosed in single quotes when this
command is used on the command line or in an input file. There is
no default for this field; you must enter a value.
freqe
End of frequency range. Must be blank for Freqb = EIG. Default is
Freqb.
nsteps
Number of substeps. Defaults to 1.
Notes
-----
DMPEXT invokes an ANSYS macro that uses modal projection techniques to
compute the damping force by the modal velocity of the source mode onto
the target mode. From the damping force, damping parameters are
extracted. DMPEXT creates an array parameter Dmpname, with the
following entries in each row:
response frequency
modal damping coefficient
modal squeeze stiffness coefficient
damping ratio
squeeze-to-structural stiffness ratio
The macro requires the modal displacements from the file Jobname.EFL
obtained from the RMFLVEC command. In addition, a node component FLUN
must exist from all FLUID136 nodes. The computed damping ratio may be
used to specify constant or modal damping by means of the DMPRAT or
MDAMP commands. For Rayleigh damping, use the ABEXTRACT command to
compute ALPHAD and BETAD damping parameters. See Thin Film Analysis for
more information on thin film analyses.
The macro uses the LSSOLVE command to perform two load steps for each
frequency. The first load case contains the solution of the source
mode excitation and can be used for further postprocessing. Solid model
boundary conditions are deleted from the model. In addition,
prescribed nodal boundary conditions are applied to the model. You
should carefully check the boundary conditions of your model prior to
executing a subsequent analysis.
This command is also valid in PREP7.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""<line_sep>command=f"DMPEXT,{smode},{tmode},{dmpname},{freqb},{freqe},{nsteps}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>dmpoption self filetype="" combine="" **kwargs<block_start>"""Specifies distributed memory parallel (Distributed ANSYS) file
APDL Command: DMPOPTION
combination options.
Parameters
----------
filetype
Type of solution file to combine after a distributed memory
parallel solution. There is no default; if (blank), the command is
ignored.
RST - Results files (.RST, .RTH, .RMG, .RSTP)
EMAT - Element matrix files (.EMAT).
ESAV - Element saved data files (.ESAVE)
MODE - Modal results files (.MODE)
MLV - Modal load vector file (.MLV)
IST - Initial state file (.IST)
FULL - Full matrix file (.FULL)
RFRQ - Reduced complex displacement file (.RFRQ)
RDSP - Reduced displacement file (.RDSP)
combine
Option to combine solution files.
Yes - Combine solution files (default).
No - Do not combine solution files.
Notes
-----
The DMPOPTION command controls how solution files are written during a
distributed memory parallel (Distributed ANSYS) solution. This command
is most useful for controlling how results files (.RST,.RTH, etc.) are
written.
In a distributed memory parallel solution, a local results file is
written by each process (JobnameN.ext, where N is the process number).
By default, the program automatically combines the local results files
(for example, JobnameN.RST) upon leaving the SOLUTION processor (for
example, upon the FINISH command) into a single global results file
(Jobname.RST) which can be used in ANSYS postprocessing. To reduce the
amount of communication and I/O performed by this operation, you can
issue the command DMPOPTION,RST,NO to bypass this step of combining the
local results files; the local files will remain on the local disks in
the current working directory. You can then use the RESCOMBINE command
macro in the POST1 general postprocessor (/POST1) to read all results
into the database for postprocessing.
The RESCOMBINE command macro is intended for use with POST1. If you
want to postprocess distributed parallel solution results using the
POST26 time-history postprocessor (/POST26), it is recommended that you
combine your local results files into one global results file
(DMPOPTION,RST,YES or COMBINE).
Local .EMAT, .ESAV, .MODE, .MLV, .IST, .RFRQ, .RDSP, and .FULL files
are also written (when applicable) by each process in a distributed
memory parallel solution. If these files are not needed for a
downstream solution or operation, you can issue the command
DMPOPTION,FileType,NO for each file type to bypass the file combination
step and thereby improve performance. You should not bypass the file
combination step if a downstream PSD analysis or modal expansion pass
will be performed.
If DMPOPTION,MODE,NO or DMPOPTION,RST,NO is specified in a modal
analysis, element results cannot be written to the combined mode file
(Jobname.MODE). In this case, if Distributed ANSYS is used in a
downstream harmonic or transient analysis that uses the mode-
superposition method, the MSUPkey on the MXPAND command can retain its
value. However, if shared memory parallel processing is used in the
downstream harmonic or transient analysis, the MSUPkey is effectively
set to NO.
The DMPOPTION command can be changed between load steps; however, doing
so will not affect which set of solution files are combined. Only the
last values of FileType and Combine upon leaving the solution processor
will be used to determine whether the solution files are combined. For
example, given a two load step solution and FileType = RST, setting
Combine = NO for the first load step and YES for the second load step
will cause all sets on the local results files to be combined. If the
opposite is true (Combine = YES for the first load step and NO for the
second load step), no results will be combined.
After using DMPOPTION to suppress file combination, you may find it
necessary to combine the local files for a specific FileType for use in
a subsequent analysis. In this case, use the COMBINE command to combine
local solution files into a single, global file.
"""<line_sep>command=f"DMPOPTION,{filetype},{combine}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>dspoption self reord_option="" memory_option="" memory_size="" solve_info="" **kwargs<block_start>"""Sets memory option for the distributed sparse solver.
APDL Command: DSPOPTION
Parameters
----------
reord_option
Reordering option:
DEFAULT - Use the default reordering scheme.
SEQORDER - Use a sequential equation reordering scheme
within the distributed sparse solver. Relative
to PARORDER, this option typically results in
longer equation ordering times and therefore
longer overall solver times. Occasionally,
however, this option will produce better
quality orderings which decrease the matrix
factorization times and improve overall solver
performance.
PARORDER - Use a parallel equation reordering scheme
within the distributed sparse solver. Relative
to SEQORDER, this option typically results in
shorter equation ordering times and therefore
shorter overall solver times. Occasionally,
however, this option will produce lower quality
orderings which increase the matrix
factorization times and degrade overall solver
performance.
memory_option
Memory allocation option:
DEFAULT - Use the default memory allocation strategy for
the distributed sparse solver. The default
strategy attempts to run in the INCORE memory
mode. If there is not enough physical memory
available when the solver starts to run in the
INCORE memory mode, the solver will then attempt
to run in the OUTOFCORE memory mode.
INCORE - Use a memory allocation strategy in the
distributed sparse solver that will attempt to
obtain enough memory to run with the entire
factorized matrix in memory. This option uses the
most amount of memory and should avoid doing any
I/O. By avoiding I/O, this option achieves
optimal solver performance. However, a
significant amount of memory is required to run
in this mode, and it is only recommended on
machines with a large amount of memory. If the
allocation for in-core memory fails, the solver
will automatically revert to out-of-core memory
mode.
OUTOFCORE - Use a memory allocation strategy in the
distributed sparse solver that will attempt to
allocate only enough work space to factor each
individual frontal matrix in memory, but will
share the entire factorized matrix on
disk. Typically, this memory mode results in
poor performance due to the potential
bottleneck caused by the I/O to the various
files written by the solver.
FORCE - This option, when used in conjunction with the
Memory_Size option, allows you to force the
distributed sparse solver to run with a specific
amount of memory. This option is only recommended
for the advanced user who understands distributed
sparse solver memory requirements for the problem
being solved, understands the physical memory on
the system, and wants to control the distributed
sparse solver memory usage.
memory_size
Initial memory size allocation for the sparse solver in
MB. The Memory_Size setting should always be well within
the physical memory available, but not so small as to
cause the distributed sparse solver to run out of
memory. Warnings and/or errors from the distributed sparse
solver will appear if this value is set too low. If the
FORCE memory option is used, this value is the amount of
memory allocated for the entire duration of the
distributed sparse solver solution.
solve_info
Solver output option:
OFF - Turns off additional output printing from the
distributed sparse solver (default).
PERFORMANCE - Turns on additional output printing from the
distributed sparse solver, including a
performance summary and a summary of file
I/O for the distributed sparse
solver. Information on memory usage during
assembly of the global matrix (that is,
creation of the Jobname.FULL file) is also
printed with this option.
Notes
-----
This command controls options related to the distributed sparse solver
in all analysis types where the distributed sparse solver can be used.
The amount of memory required for the distributed sparse solver is
unknown until the matrix structure is preprocessed, including equation
reordering. The amount of memory allocated for the distributed sparse
solver is then dynamically adjusted to supply the solver what it needs
to compute the solution.
If you have a large memory system, you may want to try selecting the
INCORE memory mode for larger jobs to improve performance. Also, when
running the distributed sparse solver with many processors on the same
machine or on a machine with very slow I/O performance (e.g., slow hard
drive speed), you may want to try using the INCORE memory mode to
achieve better performance. However, doing so may require much more
memory compared to running in the OUTOFCORE memory mode.
Running with the INCORE memory mode is best for jobs which comfortably
fit within the limits of the physical memory on a given system. If the
distributed sparse solver workspace exceeds physical memory size, the
system will be forced to use virtual memory (or the system page/swap
file). In this case, it is typically more efficient to run with the
OUTOFCORE memory mode.
"""<line_sep>command=(f"DSPOPTION,{reord_option},{memory_option},{memory_size},,,{solve_info}")<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>exbopt self outinv2="" outtcms="" outsub="" outcms="" outcomp="" outrm="" noinv="" outele="" **kwargs <block_start>"""Specifies .EXB file output options in a CMS generation pass.
APDL Command: EXBOPT
Parameters
----------
outinv2
Output control for 2nd order invariant:
* ``"0"`` : Do not output (default).
* ``"1"`` : Output the second order invariant.
outtcms
Output control for .TCMS file:
* ``"0"`` : Do not output (default).
* ``"1"`` : Output the .TCMS file.
outsub
Output control for .SUB file:
* ``"0"`` : Do not output (default).
* ``"1"`` : Output the .SUB file.
OUTCMS
Output control for .CMS file:
* ``"0"`` : Do not output (default).
* ``"1"`` : Output the .CMS file.
outcomp
Output control for node and element component information:
* ``"0"`` : Do not output any component information.
* ``"1"`` : Output node component information only.
* ``"2"`` : Output element component information only.
* ``"3"`` : Output both node and element component information (default).
outrm
Output control for the recovery matrix:
* ``"0"`` : Do not output (default).
* ``"1"`` : Output the recovery matrix to file.EXB.
* ``"2"`` : Output the recovery matrix to a separate file, file_RECOVER.EXB.
noinv
Invariant calculation:
* ``"0"`` : Calculate all invariants (default).
* ``"1"`` : Suppress calculation of the 1st and 2nd order
invariants. NOINV = 1 suppresses OUTINV2 = 1.
OUTELE
Output control for the element data:
* ``"0"`` : Do not output (default).
* ``"1"`` : Output the element data.
Notes
-----
When the body property file (file.EXB) is requested in a CMS
generation pass (CMSOPT,,,,,,,EXB command), the .TCMS, .SUB, and
.CMS files are not output by default. Use the EXBOPT command to
request these files, as needed.
EXBOPT can also be used to manage some content in the .EXB file
for improving performance and storage (see the OUTINV2, OUTCOMP,
OUTRM, NOINV, and OUTELE arguments described above).
If both recovery matrix output (OUTRM = 1 or 2) and the .TCMS file
(OUTTCMS = 1) are requested, the .TCMS file writing is turned off
due to potentially large in-core memory use.
For more information on how to generate file.EXB, see ANSYS
Interface to AVL EXCITE in the Mechanical APDL Substructuring
Analysis Guide
"""<line_sep>command=f"EXBOPT,{outinv2},{outtcms},{outsub},{outcms},{outcomp},{outrm},{noinv},{outele}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>ematwrite self key:str="" **kwargs<arrow>Optional[str]<block_start>"""Forces the writing of all the element matrices to File.EMAT.
APDL Command: EMATWRITE
Parameters
----------
key
Write key:
YES - Forces the writing of the element matrices to
File.EMAT even if not normally
done.
NO - Element matrices are written only if required. This
value is the default.
Notes
-----
The EMATWRITE command forces ANSYS to write the File.EMAT
file. The file is necessary if you intend to follow the
initial load step with a subsequent inertia relief
calculation (IRLF). If used in the solution
processor (/SOLU), this command is only valid within the
first load step.
This command is also valid in PREP7.
"""<line_sep>command=f"EMATWRITE,{key}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>eqslv self lab="" toler="" mult="" keepfile="" **kwargs<block_start>"""Specifies the type of equation solver.
APDL Command: EQSLV
Parameters
----------
lab
Equation solver type:
SPARSE - Sparse direct equation solver. Applicable to
real-value or complex-value symmetric and
unsymmetric matrices. Available only for STATIC,
HARMIC (full method only), TRANS (full method
only), SUBSTR, and PSD spectrum analysis types
[ANTYPE]. Can be used for nonlinear and linear
analyses, especially nonlinear analysis where
indefinite matrices are frequently
encountered. Well suited for contact analysis
where contact status alters the mesh
topology. Other typical well-suited applications
are: (a) models consisting of shell/beam or
shell/beam and solid elements (b) models with a
multi-branch structure, such as an automobile
exhaust or a turbine fan. This is an alternative
to iterative solvers since it combines both speed
and robustness. Generally, it requires
considerably more memory (~10x) than the PCG
solver to obtain optimal performance (running
totally in-core). When memory is limited, the
solver works partly in-core and out-of-core,
which can noticeably slow down the performance of
the solver. See the BCSOPTION command for more
details on the various modes of operation for
this solver.
This solver can be run in shared memory parallel or
distributed memory parallel (Distributed ANSYS) mode. When
used in Distributed ANSYS, this solver preserves all of
the merits of the classic or shared memory sparse
solver. The total sum of memory (summed for all processes)
is usually higher than the shared memory sparse
solver. System configuration also affects the performance
of the distributed memory parallel solver. If enough
physical memory is available, running this solver in the
in-core memory mode achieves optimal performance. The
ideal configuration when using the out-of-core memory mode
is to use one processor per machine on multiple machines
(a cluster), spreading the I/O across the hard drives of
each machine, assuming that you are using a high-speed
network such as Infiniband to efficiently support all
communication across the multiple machines. - This solver
supports use of the GPU accelerator capability.
JCG - Jacobi Conjugate Gradient iterative equation
solver. Available only for STATIC, HARMIC (full
method only), and TRANS (full method only) analysis
types [ANTYPE]. Can be used for structural, thermal,
and multiphysics applications. Applicable for
symmetric, unsymmetric, complex, definite, and
indefinite matrices. Recommended for 3-D harmonic
analyses in structural and multiphysics
applications. Efficient for heat transfer,
electromagnetics, piezoelectrics, and acoustic field
problems.
This solver can be run in shared memory parallel or
distributed memory parallel (Distributed ANSYS) mode. When
used in Distributed ANSYS, in addition to the limitations
listed above, this solver only runs in a distributed
parallel fashion for STATIC and TRANS (full method)
analyses in which the stiffness is symmetric and only when
not using the fast thermal option (THOPT). Otherwise, this
solver runs in shared memory parallel mode inside
Distributed ANSYS. - This solver supports use of the GPU
accelerator capability. When using the GPU accelerator
capability, in addition to the limitations listed above,
this solver is available only for STATIC and TRANS (full
method) analyses where the stiffness is symmetric and does
not support the fast thermal option (THOPT).
ICCG - Incomplete Cholesky Conjugate Gradient iterative
equation solver. Available for STATIC, HARMIC (full
method only), and TRANS (full method only) analysis
types [ANTYPE]. Can be used for structural,
thermal, and multiphysics applications, and for
symmetric, unsymmetric, complex, definite, and
indefinite matrices. The ICCG solver requires more
memory than the JCG solver, but is more robust than
the JCG solver for ill-conditioned matrices.
This solver can only be run in shared memory parallel
mode. This is also true when the solver is used inside
Distributed ANSYS. - This solver does not support use of
the GPU accelerator capability.
QMR - Quasi-Minimal Residual iterative equation
solver. Available for the HARMIC (full method only)
analysis type [ANTYPE]. Can be used for
high-frequency electromagnetic applications, and for
symmetric, complex, definite, and indefinite
matrices. The QMR solver is more stable than the
ICCG solver.
This solver can only be run in shared memory parallel
mode. This is also true when the solver is used inside
Distributed ANSYS. - This solver does not support use of
the GPU accelerator capability.
PCG - Preconditioned Conjugate Gradient iterative equation
solver (licensed from Computational Applications and
Systems Integration, Inc.). Requires less disk file
space than SPARSE and is faster for large
models. Useful for plates, shells, 3-D models, large
2-D models, and other problems having symmetric,
sparse, definite or indefinite matrices for
nonlinear analysis. Requires twice as much memory
as JCG. Available only for analysis types [ANTYPE]
STATIC, TRANS (full method only), or MODAL (with PCG
Lanczos option only). Also available for the use
pass of substructure analyses (MATRIX50). The PCG
solver can robustly solve equations with constraint
equations (CE, CEINTF, CPINTF, and CERIG). With
this solver, you can use the MSAVE command to obtain
a considerable memory savings.
The PCG solver can handle ill-conditioned problems by
using a higher level of difficulty (see
PCGOPT). Ill-conditioning arises from elements with high
aspect ratios, contact, and plasticity. - This solver can
be run in shared memory parallel or distributed memory
parallel (Distributed ANSYS) mode. When used in
Distributed ANSYS, this solver preserves all of the merits
of the classic or shared memory PCG solver. The total sum
of memory (summed for all processes) is about 30% more
than the shared memory PCG solver.
toler
Iterative solver tolerance value. Used only with the
Jacobi Conjugate Gradient, Incomplete Cholesky Conjugate
Gradient, Pre- conditioned Conjugate Gradient, and
Quasi-Minimal Residual equation solvers. For the PCG
solver, the default is 1.0E-8. The value 1.0E-5 may be
acceptable in many situations. When using the PCG Lanczos
mode extraction method, the default solver tolerance value
is 1.0E-4. For the JCG and ICCG solvers with symmetric
matrices, the default is 1.0E-8. For the JCG and ICCG
solvers with unsymmetric matrices, and for the QMR solver,
the default is 1.0E-6. Iterations continue until the SRSS
norm of the residual is less than TOLER times the norm of
the applied load vector. For the PCG solver in the linear
static analysis case, 3 error norms are used. If one of
the error norms is smaller than TOLER, and the SRSS norm
of the residual is smaller than 1.0E-2, convergence is
assumed to have been reached. See Iterative Solver in the
Mechanical APDL Theory Reference for details.
mult
Multiplier (defaults to 2.5 for nonlinear analyses; 1.0
for linear analyses) used to control the maximum number of
iterations performed during convergence calculations. Used
only with the Pre- conditioned Conjugate Gradient equation
solver (PCG). The maximum number of iterations is equal to
the multiplier (MULT) times the number of degrees of
freedom (DOF). If MULT is input as a negative value, then
the maximum number of iterations is equal to abs(MULT).
Iterations continue until either the maximum number of
iterations or solution convergence has been reached. In
general, the default value for MULT is adequate for
reaching convergence. However, for ill-conditioned
matrices (that is, models containing elements with high
aspect ratios or material type discontinuities) the
multiplier may be used to increase the maximum number of
iterations used to achieve convergence. The recommended
range for the multiplier is 1.0 MULT 3.0. Normally, a
value greater than 3.0 adds no further benefit toward
convergence, and merely increases time requirements. If
the solution does not converge with 1.0 MULT 3.0, or in
less than 10,000 iterations, then convergence is highly
unlikely and further examination of the model is
recommended. Rather than increasing the default value of
MULT, consider increasing the level of difficulty
(Lev_Diff) on the PCGOPT command.
keepfile
Determines whether files from a SPARSE solver run should be deleted
or retained. Applies only to Lab = SPARSE for static and full
transient analyses.
"""<line_sep><return>self.run(f"EQSLV,{lab},{toler},{mult},,{keepfile}" **kwargs)<block_end><def_stmt>eresx self key="" **kwargs<block_start>"""Specifies extrapolation of integration point results.
APDL Command: ERESX
Parameters
----------
key
Extrapolation key:
DEFA - If element is fully elastic (no active plasticity, creep, or swelling
nonlinearities), extrapolate the integration point results
to the nodes. If any portion of the element is plastic (or
other active material nonlinearity), copy the integration
point results to the nodes (default).
YES - Extrapolate the linear portion of the integration point results to the nodes
and copy the nonlinear portion (for example, plastic
strains).
NO - Copy the integration point results to the nodes.
Notes
-----
Specifies whether the solution results at the element integration
points are extrapolated or copied to the nodes for element and nodal
postprocessing. The structural stresses, elastic and thermal strains,
field gradients, and fluxes are affected. Nonlinear data (plastic,
creep, and swelling strains) are always copied to the nodes, never
extrapolated. For shell elements, ERESX applies only to integration
point results in the in-plane directions.
This command is also valid in PREP7.
"""<line_sep>command=f"ERESX,{key}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>escheck self sele:str="" levl:str="" defkey:MapdlInt="" **kwargs<arrow>Optional[str]<block_start>"""Perform element shape checking for a selected element set.
APDL Command: ESCHECK
Parameters
----------
sele
Specifies whether to select elements for checking:
(blank) - List all warnings/errors from element shape
checking.
ESEL - Select the elements based on the .Levl criteria
specified below.
levl
WARN - Select elements producing warning and error messages.
ERR - Select only elements producing error messages (
default).
defkey
Specifies whether check should be performed on deformed
element
shapes. .
0 - Do not update node coordinates before performing
shape checks (default).
1 - Update node coordinates using the current set of
deformations in the database.
Notes
-----
Shape checking will occur according to the current SHPP
settings. Although ESCHECK is valid in all processors,
Defkey uses the current results in the database. If no
results are available a warning will be issued.
This command is also valid in PREP7, SOLUTION and POST1.
"""<line_sep>command=f"ESCHECK,{sele},{levl},{defkey}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>essolv self electit="" strutit="" dimn="" morphopt="" mcomp="" xcomp="" electol="" strutol="" mxloop="" ruseky="" restky="" eiscomp="" **kwargs <block_start>"""Performs a coupled electrostatic-structural analysis.
APDL Command: ESSOLV
Parameters
----------
electit
Title of the electrostatics physics file as assigned by the PHYSICS
command.
strutit
Title of the structural physics file as assigned by the PHYSICS
command.
dimn
Model dimensionality (a default is not allowed):
2 - 2-D model.
3 - 3-D model.
morphopt
Morphing option:
<0 - Do not perform any mesh morphing or remeshing.
0 - Remesh the non-structural regions for each recursive loop only if mesh morphing
fails (default).
1 - Remesh the non-structural regions each recursive loop and bypass mesh morphing.
2 - Perform mesh morphing only, do not remesh any non-structural regions.
mcomp
Component name of the region to be morphed. For 2-D models, the
component may be elements or areas. For 3-D models, the component
may be elements or volumes. A component must be specified. You
must enclose name-strings in single quotes in the ESSOLV command
line.
xcomp
Component name of entities excluded from morphing. In the 2-D
case, it is the component name for the lines excluded from
morphing. In the 3-D case, it is component name for the areas
excluded from morphing. Defaults to exterior non-shared entities
(see the DAMORPH, DVMORPH, and DEMORPH commands). You must enclose
name-strings in single quotes in the ESSOLV command line.
electol
Electrostatic energy convergence tolerance. Defaults to .005 (.5%)
of the value computed from the previous iteration. If less than
zero, the convergence criteria based on electrostatics results is
turned off.
strutol
Structural maximum displacement convergence tolerance. Defaults to
.005 (.5%) of the value computed from the previous iteration. If
less than zero, the convergence criteria base on structural results
is turned off.
mxloop
Maximum number of allowable solution recursive loops. A single
pass through both an electrostatics and structural analysis
constitutes one loop. Defaults to 100.
ruseky
Reuse flag option:
1 - Assumes initial run of ESSOLV using base geometry for
the first electrostatics solution.
>1 - Assumes ESSOLV run is a continuation of a previous
ESSOLV run, whereby the morphed geometry is used for
the initial electrostatic simulation.
restky
Structural restart key.
0 - Use static solution option for structural solution.
1 - Use static restart solution option for structural solution.
eiscomp
Element component name for elements containing initial stress data
residing in file jobname.ist. The initial stress data must be
defined prior to issuing ESSOLV (see INISTATE command).
Notes
-----
ESSOLV invokes an ANSYS macro which automatically performs a coupled
electrostatic-structural analysis.
The macro displays periodic updates of the convergence.
If non-structural regions are remeshed during the analysis, boundary
conditions and loads applied to nodes and elements will be lost.
Accordingly, it is better to assign boundary conditions and loads to
the solid model.
Use RUSEKY > 1 for solving multiple ESSOLV simulations for different
excitation levels (i.e., for running a voltage sweep). Do not issue the
SAVE command to save the database between ESSOLV calls.
For nonlinear structural solutions, the structural restart option
(RESTKY = 1) may improve solution time by starting from the previous
converged structural solution.
For solid elements, ESSOLV automatically detects the air-structure
interface and applies a Maxwell surface flag on the electrostatic
elements. This flag is used to initiate the transfer for forces from
the electrostatic region to the structure. When using the ESSOLV
command with structural shell elements (for example, SHELL181), you
must manually apply the Maxwell surface flag on all air elements
surrounding the shells before writing the final electrostatic physics
file. Use the SFA command to apply the Maxwell surface flag to the
areas representing the shell elements; doing so ensures that the air
elements next to both sides of the shells receive the Maxwell surface
flag.
If lower-order structural solids or shells are used, set KEYOPT(7) = 1
for the electrostatic element types to ensure the correct transfer of
forces.
Information on creating the initial stress file is documented in the
Loading chapter in the Basic Analysis Guide.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""<line_sep>command=f"ESSOLV,{electit},{strutit},{dimn},{morphopt},{mcomp},{xcomp},{electol},{strutol},{mxloop},,{ruseky},{restky},{eiscomp}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>expass self key="" **kwargs<block_start>"""Specifies an expansion pass of an analysis.
APDL Command: EXPASS
Parameters
----------
key
Expansion pass key:
OFF - No expansion pass will be performed (default).
ON - An expansion pass will be performed.
Notes
-----
Specifies that an expansion pass of a modal, substructure, buckling,
transient, or harmonic analysis is to be performed.
Note:: : This separate solution pass requires an explicit FINISH to
preceding analysis and reentry into SOLUTION.
This command is also valid in PREP7.
"""<line_sep>command=f"EXPASS,{key}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>gauge self opt="" freq="" **kwargs<block_start>"""Gauges the problem domain for a magnetic edge-element formulation.
APDL Command: GAUGE
Parameters
----------
opt
Type of gauging to be performed:
ON - Perform tree gauging of the edge values (default).
OFF - Gauging is off. (You must specify custom gauging via APDL specifications.)
STAT - Gauging status (returns the current Opt and FREQ values)
freq
The following options are valid when Opt = ON:
0 - Generate tree-gauging information once, at the first load step. Gauging data is
retained for subsequent load steps. (This behavior is the
default.)
1 - Repeat gauging for each load step. Rewrites the gauging information at each
load step to accommodate changing boundary conditions on the AZ
degree of freedom (for example, adding or deleting AZ
constraints via the D or CE commands).
Notes
-----
The GAUGE command controls the tree-gauging procedure required for
electromagnetic analyses using an edge-based magnetic formulation
(elements SOLID236 and SOLID237).
Gauging occurs at the solver level for each solution (SOLVE). It sets
additional zero constraints on the edge-flux degrees of freedom AZ to
produce a unique solution; the additional constraints are removed after
solution.
Use the FREQ option to specify how the command generates gauging
information for multiple load steps.
Access the gauging information via the _TGAUGE component of gauged
nodes. The program creates and uses this component internally to remove
and reapply the AZ constraints required by gauging. If FREQ = 0, the
_TGAUGE component is created at the first load step and is used to
reapply the tree gauge constraints at subsequent load steps. If FREQ =
1, the tree-gauging information and the _TGAUGE component are generated
at every load step
If gauging is turned off (GAUGE,OFF), you must specify your own gauging
at the APDL level.
This command is also valid in PREP7.
"""<line_sep>command=f"GAUGE,{opt},{freq}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>gmatrix self symfac="" condname="" numcond="" matrixname="" **kwargs<block_start>"""Performs electric field solutions and calculates the self and mutual
APDL Command: GMATRIX
conductance between multiple conductors.
Parameters
----------
symfac
Geometric symmetry factor. Conductance values are scaled by this
factor which represents the fraction of the total device modeled.
Defaults to 1.
condname
Alphanumeric prefix identifier used in defining named conductor
components.
numcond
Total number of components. If a ground is modeled, it is to be
included as a component.
matrixname
Array name for computed conductance matrix. Defaults to GMATRIX.
Notes
-----
To invoke the GMATRIX macro, the exterior nodes of each conductor must
be grouped into individual components using the CM command. Each set
of independent components is assigned a component name with a common
prefix followed by the conductor number. A conductor system with a
ground must also include the ground nodes as a component. The ground
component is numbered last in the component name sequence.
A ground conductance matrix relates current to a voltage vector. A
ground matrix cannot be applied to a circuit modeler. The lumped
conductance matrix is a combination of lumped "arrangements" of
voltage differences between conductors. Use the lumped conductance
terms in a circuit modeler to represent conductances between
conductors.
Enclose all name-strings in single quotes in the GMATRIX command line.
GMATRIX works with the following elements:
SOLID5 (KEYOPT(1) = 9)
SOLID98 (KEYOPT(1) = 9)
LINK68
PLANE230
SOLID231
SOLID232
This command is available from the menu path shown below only if
existing results are available.
This command does not support multiframe restarts
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""<line_sep>command=f"GMATRIX,{symfac},{condname},{numcond},,{matrixname}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>lanboption self strmck="" **kwargs<block_start>"""Specifies Block Lanczos eigensolver options.
APDL Command: LANBOPTION
strmck
Controls whether the Block Lanczos eigensolver will perform a
Sturm sequence check:
* ``"OFF"`` : Do not perform the Sturm sequence check
(default).
* ``"ON"`` : Perform a Sturm sequence check. This requires
additional matrix factorization (which can be expensive),
but does help ensure that no modes are missed in the
specified range.
Notes
-----
LANBOPTION specifies options to be used with the Block Lanczos
eigensolver during an eigenvalue buckling analysis (BUCOPT,LANB)
or a modal analysis (MODOPT,LANB).
By default the sturm sequence check is off for the Block Lanczos
eigensolver when it is used in a modal analysis, and on when it is
used in a buckling analysis.
"""<line_sep><return>self.run(f"LANBOPTION,{strmck}" **kwargs)<block_end><def_stmt>lumpm self key="" **kwargs<block_start>"""Specifies a lumped mass matrix formulation.
APDL Command: LUMPM
Parameters
----------
key
Formulation key:
OFF - Use the element-dependent default mass matrix formulation (default).
ON - Use a lumped mass approximation.
Notes
-----
This command is also valid in PREP7. If used in SOLUTION, this command
is valid only within the first load step.
"""<line_sep>command=f"LUMPM,{key}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>moddir self key="" directory="" fname="" **kwargs<block_start>"""Activates the remote read-only modal files usage.
APDL Command: MODDIR
Parameters
----------
key
Key to activate the remote modal files usage
* ``"1 (ON or YES)"`` : The program performs the analysis
using remote modal files. The files are read-only.
* ``"0 (OFF or NO)"`` : The program performs the analysis
using modal files located in the working directory
(default).
directory
Directory path (248 characters maximum). The directory
contains the modal analysis files. The directory path
defaults to the current working directory.
fname
File name (no extension or directory path) for the modal
analysis files. The file name defaults to the current
Jobname.
Notes
-----
Only applies to spectrum analyses (ANTYPE,SPECTR).
Using the default for both the directory path (Directory) and the
file name (Fname) is not valid. At least one of these values must
be specified.
The MODDIR command must be issued during the first solution and at
the beginning of the solution phase (before LVSCALE in
particular).
Remote modal files usage is not supported when mode file reuse is
activated (modeReuseKey = YES on SPOPT).
"""<line_sep><return>self.run(f"MODDIR,{key},{directory},{fname}" **kwargs)<block_end><def_stmt>monitor self var="" node="" lab="" **kwargs<block_start>"""Controls contents of three variable fields in nonlinear solution
APDL Command: MONITOR
monitor file.
Parameters
----------
var
One of three variable field numbers in the monitor file whose
contents can be specified by the Lab field. Valid arguments are
integers 1, 2, or 3. See Notes section for default values.
node
The node number for which information is monitored in the specified
VAR field. In the GUI, if Node = P, graphical picking is enabled.
If blank, the monitor file lists the maximum value of the specified
quantity (Lab field) for the entire structure.
lab
The solution quantity to be monitored in the specified VAR field.
Valid labels for solution quantities are UX, UY, and UZ
(displacements); ROTX, ROTY, and ROTZ (rotations); and TEMP
(temperature). Valid labels for reaction force are FX, FY, and FZ
(structural force) and MX, MY, and MZ (structural moment). Valid
label for heat flow rate is HEAT. For defaults see the Notes
section.
Notes
-----
The monitor file always has an extension of .mntr, and takes its file
name from the specified Jobname. If no Jobname is specified, the file
name defaults to file.
You must issue this command once for each solution quantity you want to
monitor at a specified node at each load step. You cannot monitor a
reaction force during a linear analysis. The variable field contents
can be redefined at each load step by reissuing the command. The
monitored quantities are appended to the file for each load step.
Reaction forces reported in the monitor file may be incorrect if the
degree of freedom of the specified node is involved in externally
defined coupling (CP command) or constraint equations (CE command), or
if the program has applied constraint equations internally to the node.
The following example shows the format of a monitor file. Note that
the file only records the solution substep history when a substep is
convergent.
The following details the contents of the various fields in the monitor
file:
The current load step number.
The current substep (time step) number.
The number of attempts made in solving the current substep. This
number is equal to the number of failed attempts (bisections) plus one
(the successful attempt).
The number of iterations used by the last successful attempt.
Total cumulative number of iterations (including each iteration used by
a bisection).
:
Time or load factor increments for the current substep.
Total time (or load factor) for the last successful attempt in the
current substep.
Variable field 1. In this example, the field is reporting the UZ
value. By default, this field lists the CPU time used up to (but not
including) the current substep.
Variable field 2. In this example, the field is reporting the MZ
value. By default, this field lists the maximum displacement in the
entire structure.
Variable field 3. By default (and in the example), this field reports
the maximum equivalent plastic strain increment in the entire
structure.
"""<line_sep>command=f"MONITOR,{var},{node},{lab}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>msave self key="" **kwargs<block_start>"""Sets the solver memory saving option. This option only applies to the
APDL Command: MSAVE
PCG solver (including PCG Lanczos).
Parameters
----------
key
Activation key:
0 or OFF - Use global assembly for the stiffness matrix (and mass matrix, when using PCG
Lanczos) of the entire model.
1 or ON - Use an element-by-element approach when possible to save memory during the
solution. In this case, the global stiffness (and mass)
matrix is not assembled; element stiffness (and mass) is
regenerated during PCG or PCG Lanczos iterations.
Notes
-----
MSAVE,ON only applies to and is the default for parts of the model
using the following element types with linear material properties that
meet the conditions listed below.
SOLID186 (Structural Solid only)
SOLID187
The following conditions must also be true:
The PCG solver has been specified.
Small strains are assumed (NLGEOM,OFF).
No prestress effects (PSTRES) are included.
All nodes on the supported element types must be defined (i.e., the
midside nodes cannot be removed using the EMID command).
For elements with thermally dependent material properties, MSAVE,ON
applies only to elements with uniform temperatures prescribed.
The default element coordinate system must be used.
If you manually force MSAVE,ON by including it in the input file, the
model can include the following additional conditions:
The analysis can be a modal analysis using the PCG Lanczos method
(MODOPT,LANPCG).
Large deflection effects (NLGEOM,ON) are included.
SOLID185 (brick shapes and KEYOPT(2) = 3 only) elements can be
included.
All other element types or other parts of the model that don't meet the
above criteria will be solved using global assembly (MSAVE,OFF). This
command can result in memory savings of up to 70 percent over the
global assembly approach for the part of the model that meets the
criteria. Depending on the hardware (e.g., processor speed, memory
bandwidth, etc.), the solution time may increase or decrease when this
feature is used.
This memory-saving feature runs in parallel when multiple processors
are used with the /CONFIG command or with Distributed ANSYS. The gain
in performance with using multiple processors with this feature turned
on should be similar to the default case when this feature is turned
off. Performance also improves when using the uniform reduced
integration option for SOLID186 elements.
This command does not support the layered option of the SOLID185 and
SOLID186 elements.
When using MSAVE,ON with the PCGOPT command, note the following
restrictions:
For static and modal analyses, MSAVE,ON is not valid when using a
Lev_Diff value of 5 on the PCGOPT command; Lev_Diff will automatically
be reset to 2.
For modal analyses, MSAVE,ON is not valid with the StrmCk option of the
PCGOPT command; Strmck will be set to OFF.
For all analysis types, MSAVE,ON is not valid when the Lagrange
multiplier option (LM_Key) of the PCGOPT command is set to ON; the
MSAVE activation key will be set to OFF.
For linear perturbation static and modal analyses, MSAVE,ON is not
valid; the MSAVE activation key will be set to OFF.
When using MSAVE,ON for modal analyses, no .FULL file will be created.
The .FULL file may be necessary for subsequent analyses (e.g.,
harmonic, transient mode-superposition, or spectrum analyses). To
generate the .FULL file, rerun the modal analysis using the WRFULL
command.
"""<line_sep>command=f"MSAVE,{key}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>msolve self numslv="" nrmtol="" nrmchkinc="" **kwargs<block_start>"""Starts multiple solutions for random acoustics analysis with diffuse
APDL Command: MSOLVE
sound field.
Parameters
----------
numslv
Number of multiple solutions (load steps) corresponding to the
number of samplings. Default = 1.
Notes
-----
The MSOLVE command starts multiple solutions (load steps) for random
acoustics analysis with multiple samplings.
The process is controlled by the norm convergence tolerance NRMTOL or
the number of multiple solutions NUMSLV (if the solution steps reach
the defined number).
The program checks the norm convergence by comparing two averaged sets
of radiated sound powers with the interval NRMCHKINC over the frequency
range. For example, if NRMCHKINC = 5, the averaged values from 5
solutions are compared with the averaged values from 10 solutions, then
the averaged values from 10 solutions are compared with the averaged
values from 15 solutions, and so on.
The incident diffuse sound field is defined via the DFSWAVE command.
The average result of multiple solutions with different samplings is
calculated via the PLST command.
"""<line_sep>command=f"MSOLVE,{numslv},{nrmtol},{nrmchkinc}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>opncontrol self lab="" value="" numstep="" **kwargs<block_start>"""Sets decision parameter for automatically increasing the time step
APDL Command: OPNCONTROL
interval.
Parameters
----------
lab
DOF
DOF - Degree-of-freedom label used to base a decision for increasing the time step
(substep) interval in a nonlinear or transient analysis.
The only DOF label currently supported is TEMP.
OPENUPFACTOR - Factor for increasing the time step interval. Specify when AUTOTS,ON is issued
and specify a VALUE > 1.0 (up to 10.0). The default
VALUE = 1.5 (except for thermal analysis, where it
is 3.0). Generally, VALUE > 3.0 is not recommended.
value, numstep
Two values used in the algorithm for determining if the time step
interval can be increased. Valid only when Lab = DOF.
Notes
-----
This command is available only for nonlinear or full transient
analysis.
"""<line_sep>command=f"OPNCONTROL,{lab},{value},{numstep}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>outaero self sename="" timeb="" dtime="" **kwargs<block_start>"""Outputs the superelement matrices and load vectors to formatted files
APDL Command: OUTAERO
for aeroelastic analysis.
Parameters
----------
sename
Name of the superelement that models the wind turbine supporting
structure. Defaults to the current Jobname.
timeb
First time at which the load vector is formed (defaults to be read
from SENAME.sub).
dtime
Time step size of the load vectors (defaults to be read from
SENAME.sub).
Notes
-----
Both TIMEB and DTIME must be blank if the time data is to be read from
the SENAME.sub file.
The matrix file (SENAME.SUB) must be available from the substructure
generation run before issuing this command. This superelement that
models the wind turbine supporting structure must contain only one
master node with six freedoms per node: UX, UY, UZ, ROTX, ROTY, ROTZ.
The master node represents the connection point between the turbine and
the supporting structure.
This command will generate four files that are exported to the
aeroelastic code for integrated wind turbine analysis. The four files
are Jobname.GNK for the generalized stiffness matrix, Jobname.GNC for
the generalized damping matrix, Jobname.GNM for the generalized mass
matrix and Jobname.GNF for the generalized load vectors.
For detailed information on how to perform a wind coupling analysis,
see Coupling to External Aeroelastic Analysis of Wind Turbines in the
Mechanical APDL Advanced Analysis Guide.
"""<line_sep>command=f"OUTAERO,{sename},{timeb},{dtime}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>ovcheck self method="" frequency="" set_="" **kwargs<block_start>"""Checks for overconstraint among constraint equations and Lagrange
APDL Command: OVCHECK
multipliers.
Parameters
----------
method
Method used to determine which slave DOFs will be eliminated:
TOPO - Topological approach (default). This method only works with constraint
equations; it does not work with Lagrange multipliers.
ALGE - Algebraic approach.
NONE - Do not use overconstraint detection logic.
frequency
Frequency of overconstraint detection for static or full transient
analyses:
ITERATION - For all equilibrium iterations (default).
SUBSTEP - At the beginning of each substep.
LOADSTEP - At the beginning of each load step.
set\_
Set of equations:
All - Check for overconstraint between all constraint equations (default).
LAG - Check for overconstraint only on the set of equations that involves Lagrange
multipliers. This is faster than checking all sets,
especially when the model contains large MPC bonded contact
pairs.
Notes
-----
The OVCHECK command checks for overconstraint among the constraint
equations (CE/CP) and the Lagrange multipliers for the globally
assembled stiffness matrix. If overconstrained constraint equations or
Lagrange multipliers are detected, they are automatically removed from
the system of equations.
The constraint equations that are identified as redundant are removed
from the system and printed to the output file. It is very important
that you check the removed equations—they may lead to convergence
issues, especially for nonlinear analyses.
The Frequency and Set arguments are active only for the topological
method (Method = TOPO). If you do not issue the OVCHECK command,
overconstraint detection is performed topologically, and the slave DOFs
are also determined topologically.
Overconstraint detection slows down the run. We recommend using it to
validate that your model does not contain any overconstraints. Then,
you can switch back to the default method (no OVCHECK command is
needed).
As an example, consider the redundant set of constraint equations
defined below:
Equation number 2 will be removed by the overconstraint detection
logic. However, this is an arbitrary decision since equation number 1
could be removed instead. This is an important choice as the constant
term is not the same in these two constraint equations. Therefore, you
must check the removed constraint equations carefully.
For detailed information on the topological and algebraic methods of
overconstraint detection, see Constraints: Automatic Selection of Slave
DOFs in the Mechanical APDL Theory Reference
"""<line_sep>command=f"OVCHECK,{method},{frequency},{set_}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>pcgopt self lev_diff="" reduceio="" strmck="" wrtfull="" memory="" lm_key="" **kwargs <block_start>"""Controls PCG solver options.
APDL Command: PCGOPT
Parameters
----------
lev_diff
Indicates the level of difficulty of the analysis. Valid
settings are AUTO or 0 (default), 1, 2, 3, 4, or 5. This
option applies to both the PCG solver when used in static
and full transient analyses and to the PCG Lanczos method
in modal analyses. Use AUTO to let ANSYS automatically
choose the proper level of difficulty for the model. Lower
values (1 or 2) generally provide the best performance for
well-conditioned problems. Values of 3 or 4 generally
provide the best performance for ill-conditioned problems;
however, higher values may increase the solution time for
well-conditioned problems. Higher level-of-difficulty
values typically require more memory. Using the highest
value of 5 essentially performs a factorization of the
global matrix (similar to the sparse solver) and may
require a very large amount of memory. If necessary, use
Memory to reduce the memory usage when using Lev_Diff = 5.
Lev_Diff = 5 is generally recommended for small- to
medium-sized problems when using the PCG Lanczos mode
extraction method.
reduceio
Controls whether the PCG solver will attempt to reduce I/O
performed during equation solution:
AUTO - Automatically chooses whether to reduce I/O or not
(default).
YES - Reduces I/O performed during equation solution in
order to reduce total solver time.
NO - Does NOT reduce I/O performed during equation solution.
strmck
Controls whether or not a Sturm sequence check is performed:
OFF - Does NOT perform Sturm sequence check (default).
ON - Performs Sturm sequence check
wrtfull
Controls whether or not the .FULL file is written.
ON - Write .FULL file (default)
OFF - Do not write .FULL file.
memory
Controls whether to run using in-core or out-of-core mode
when using Lev_Diff = 5.
AUTO - Automatically chooses which mode to use (default).
INCORE - Run using in-core mode.
OOC - Run using out-of-core mode.
lm_key
Controls use of the PCG solver for MPC184 Lagrange
multiplier method elements. This option applies only to
the PCG solver when used in static and full transient
analyses.
OFF - Do not use the PCG solver for the MPC184 Lagrange
multiplier method (default).
ON - Allow use of the PCG solver for the MPC184 Lagrange
multiplier method.
Notes
-----
ReduceIO works independently of the MSAVE command in the PCG
solver. Setting ReduceIO to YES can significantly increase
the memory usage in the PCG solver.
To minimize the memory used by the PCG solver with respect to
the Lev_Diff option only, set Lev_Diff = 1 if you do not have
sufficient memory to run the PCG solver with Lev_Diff = AUTO.
The MSAVE,ON command is not valid when using Lev_Diff = 5. In
this case, the Lev_Diff value will automatically be reset to
2. The MSAVE,ON command is also not valid with the StrmCk
option. In this case, StrmCk will be set to OFF.
Distributed ANSYS Restriction: The Memory option and the
LM_Key option are not supported in Distributed ANSYS.
"""<line_sep>command=f"PCGOPT,{lev_diff},,{reduceio},{strmck},{wrtfull},{memory},{lm_key}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>perturb self type_="" matkey="" contkey="" loadcontrol="" **kwargs<block_start>"""Sets linear perturbation analysis options.
APDL Command: PERTURB
Parameters
----------
type\_
Type of linear perturbation analysis to be performed:
STATIC - Perform a linear perturbation static analysis.
MODAL - Perform a linear perturbation modal analysis.
BUCKLE - Perform a linear perturbation eigenvalue buckling analysis.
HARMONIC - Perform a linear perturbation full harmonic analysis.
SUBSTR - Perform a linear perturbation substructure generation pass.
OFF - Do not perform a linear perturbation analysis (default).
matkey
Key for specifying how the linear perturbation analysis uses
material properties, valid for all structural elements except
contact elements. For more information, see Linear Perturbation
Analysis in the Mechanical APDL Theory Reference.
AUTO - The program selects the material properties for the linear perturbation
analysis automatically (default). The materials are handled
in the following way:
For pure linear elastic materials used in the base analysis, the same properties are used in the linear perturbation analysis. - For hyperelastic materials used in the base analysis, the material properties
are assumed to be linear elastic in the linear
perturbation analysis. The material property data
(or material Jacobian) is obtained based on the
tangent of the hyperelastic material's
constitutive law at the point where restart
occurs.
For any nonlinear materials other than hyperelastic materials used in the base analysis, the material properties are assumed to be linear elastic in the linear perturbation analysis. The material data is the same as the linear portion of the nonlinear materials (that is, the parts defined by MP commands). - For COMBIN39, the stiffness is that of the first segment of the force-
deflection curve.
TANGENT - Use the tangent (material Jacobian) on the material constitutive curve as the
material property. The material property remains linear
in the linear perturbation analysis and is obtained at
the point of the base analysis where restart occurs. The
materials are handled in the following way:
For pure linear elastic materials used in the base analysis, the same properties are used in the linear perturbation analysis. Because the material constitutive curve is linear, the tangent is the same as the base analysis. - For hyperelastic materials used in the base analysis, the program uses the same
tangent as that used for MatKey = AUTO, and the
results are therefore identical.
For any nonlinear materials other than hyperelastic materials used in the base analysis, the material properties are obtained via the material tangent on the material constitutive curve at the restart point of the base analysis. - The materials and properties typically differ from Matkey = AUTO, but it is
possible the results could be identical or very
similar if a.) the material is elasto-plastic
rate-independent and is unloading (or has neutral
loading) at the restart point, or b.) the
material is rate-dependent, depending on the
material properties and loading conditions.
For COMBIN39, the stiffness is equal to the tangent of the current segment of the force-deflection curve. - In a modal restart solution that follows a linear perturbation modal analysis,
the TANGENT option is overridden by the AUTO
option and linear material properties are used
for stress calculations in the modal restart. See
the discussion in the Notes for more information.
contkey
Key that controls contact status for the linear perturbation
analysis. This key controls all contact elements (TARGE169,
TARGE170, and CONTA171 through CONTA178) globally for all contact
pairs. Alternatively, contact status can be controlled locally per
contact pair by using the CNKMOD command. Note that the contact
status from the base analysis solution is always adjusted by the
local contact controls specified by CNKMOD first and then modified
by the global sticking or bonded control (ContKey = STICKING or
BONDED). The tables in the Notes section show how the contact
status is adjusted by CNKMOD and/or the ContKey setting.
CURRENT - Use the current contact status from the restart
snapshot (default). If the previous run is
nonlinear, then the nonlinear contact status at
the point of restart is frozen and used
throughout the linear perturbation analysis.
STICKING - For frictional contact pairs (MU > 0), use
sticking contact (e.g., ``MU*KN`` for tangential
contact stiffness) everywhere the contact state
is closed (i.e., status is sticking or
sliding). This option only applies to contact
pairs that are in contact and have a frictional
coefficient MU greater than zero. Contact pairs
without friction (MU = 0) and in a sliding
state remain free to slide in the linear
perturbation analysis.
BONDED - Any contact pairs that are in the closed
(sticking or sliding) state are moved to bonded
(for example, KN for both normal and tangential
contact stiffness). Contact pairs that have a
status of far-field or near-field remain open.
loadcontrol
Key that controls how the load vector of {Fperturbed} is
calculated. This control is provided for convenience of load
generation for linear perturbation analysis. In general, a new set
of loads is required for a linear perturbation analysis. This key
controls all mechanical loads; it does not affect non-mechanical
loads. Non-mechanical loads (including thermal loads) are always
kept (i.e., not deleted).
ALLKEEP - Keep all the boundary conditions (loads and
constraints) from the end of the load step of
the current restart point. This option is
convenient for further load application and is
useful for a linear perturbation analysis
restarted from a previous linear analysis. For
this option, {Fend} is the total load vector at
the end of the load step at the restart point.
INERKEEP - Delete all loads and constraints from the
restart step, except for displacement
constraints and inertia loads (default). All
displacement constraints and inertia loads are
kept for convenience when performing the linear
perturbation analysis. Note that nonzero and
tabular displacement constraints can be
considered as external loads; however, they are
not deleted when using this option.
PARKEEP - Delete all loads and constraints from the
restart step, except for displacement
constraints. All displacement constraints are
kept for convenience when performing the linear
perturbation analysis. Note that nonzero and
tabular displacement constraints can be
considered as external loads; however, they are
not deleted when using this option.
DZEROKEEP - Behaves the same as the PARKEEP option, except
that all nonzero displacement constraints are
set to zero upon the onset of linear
perturbation.
NOKEEP - Delete all the loads and constraints, including
all displacement constraints. For this option,
{Fend} is zero unless non-mechanical loads (e.g.,
thermal loads) are present.
Notes
-----
This command controls options relating to linear perturbation analyses.
It must be issued in the first phase of a linear perturbation analysis.
This command is also valid in PREP7.
"""<line_sep>command=f"PERTURB,{type_},{matkey},{contkey},{loadcontrol}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>prscontrol self key="" **kwargs<block_start>"""Specifies whether to include pressure load stiffness in the element
APDL Command: PRSCONTROL
stiffness formation.
Parameters
----------
key
Pressure load stiffness key. In general, use the default setting.
Use a non-default setting only if you encounter convergence
difficulties. Pressure load stiffness is automatically included
when using eigenvalue buckling analyses (ANTYPE,BUCKLE), equivalent
to Key = INCP. For all other types of analyses, valid arguments for
Key are:
NOPL - Pressure load stiffness not included for any elements.
(blank) (default) - Include pressure load stiffness for elements SURF153, SURF154, SURF156,
SURF159, SHELL181, PLANE182, PLANE183, SOLID185,
SOLID186, SOLID187, SOLSH190, BEAM188, BEAM189,
FOLLW201, SHELL208, SHELL209, SOLID272, SOLID273,
SHELL281, SOLID285, PIPE288, PIPE289, and
ELBOW290. Do not include pressure load stiffness
for elements SOLID65.
INCP - Pressure load stiffness included for all of the default elements listed above
and SOLID65.
Notes
-----
This command is rarely needed. The default settings are recommended for
most analyses.
"""<line_sep>command=f"PRSCONTROL,{key}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>pscontrol self option="" key="" **kwargs<block_start>"""Enables or disables shared-memory parallel operations.
APDL Command: PSCONTROL
Parameters
----------
option
Specify the operations for which you intend to enable/disable
parallel behavior:
ALL - Enable/disable parallel for all areas (default).
PREP - Enable/disable parallel during preprocessing (/PREP7).
SOLU - Enable/disable parallel during solution (/SOLU).
FORM - Enable/disable parallel during element matrix generation.
SOLV - Enable/disable parallel during equation solver.
RESU - Enable/disable parallel during element results calculation.
POST - Enable/disable parallel during postprocessing (/POST1 and /POST26).
STAT - List parallel operations that are enabled/disabled.
key
Option control key. Used for all Option values except STAT.
ON - Enable parallel operation.
OFF - Disable parallel operation.
Notes
-----
Use this command in shared-memory parallel operations.
This command is useful when you encounter minor discrepancies in a
nonlinear solution when using different numbers of processors. A
parallel operation applied to the element matrix generation can produce
a different nonlinear solution with a different number of processors.
Although the nonlinear solution converges to the same nonlinear
tolerance, the minor discrepancy created may not be desirable for
consistency.
Enabling/disabling parallel behavior for the solution (Option = SOLU)
supersedes the activation/deactivation of parallel behavior for element
matrix generation (FORM), equation solver (SOLV), and element results
calculation (RESU).
The SOLV option supports only the sparse direct and PCG solvers
(EQSLV,SPARSE or PCG). No other solvers are supported.
This command applies only to shared-memory architecture. It does not
apply to the Distributed ANSYS product.
"""<line_sep>command=f"PSCONTROL,{option},{key}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>rate self option="" **kwargs<block_start>"""Specifies whether the effect of creep strain rate will be used in the
APDL Command: RATE
solution of a load step.
Parameters
----------
option
Activates implicit creep analysis.
0 or OFF - No implicit creep analysis. This option is the default.
1 or ON - Perform implicit creep analysis.
Notes
-----
Set Option = 1 (or ON) to perform an implicit creep analysis (TB,CREEP
with TBOPT : 1). For viscoplasticity/creep analysis, Option specifies
whether or not to include the creep calculation in the solution of a
load step. If Option = 1 (or ON), ANSYS performs the creep calculation.
Set an appropriate time for solving the load step via a TIME,TIME
command.
"""<line_sep>command=f"RATE,{option}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>resvec self key="" **kwargs<block_start>"""Calculates or includes residual vectors.
APDL Command: RESVEC
Parameters
----------
key
Residual vector key:
OFF - Do not calculate or include residual vectors. This option is the default.
ON - Calculate or include residual vectors.
Notes
-----
In a modal analysis, the RESVEC command calculates residual vectors. In
a mode-superposition transient dynamic, mode-superposition harmonic,
PSD or spectrum analysis, the command includes residual vectors.
In a component mode synthesis (CMS) generation pass, the RESVEC command
calculates one residual vector which is included in the normal modes
basis used in the transformation matrix. It is supported for the three
available CMS methods. RESVEC,ON can only be specified in the first
load step of a generation pass and is ignored if issued at another load
step.
If rigid-body modes exist, pseudo-constraints are required for the
calculation. Issue the D,,,SUPPORT command to specify only the minimum
number of pseudo-constraints necessary to prevent rigid-body motion.
For more information about residual vector formulation, see Residual
Vector Method in the Mechanical APDL Theory Reference.
"""<line_sep>command=f"RESVEC,{key}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>rstoff self lab="" offset="" **kwargs<block_start>"""Offsets node or element IDs in the FE geometry record.
APDL Command: RSTOFF
Parameters
----------
lab
The offset type:
NODE - Offset the node IDs.
ELEM - Offset the element IDs.
offset
A positive integer value specifying the offset value to apply. The
value must be greater than the number of nodes or elements in the
existing superelement results file.
Notes
-----
The RSTOFF command offsets node or element IDs in the FE geometry
record saved in the .rst results file. Use the command when expanding
superelements in a bottom-up substructuring analysis (where each
superelement is generated individually in a generation pass, and all
superelements are assembled together in the use pass).
With appropriate offsets, you can write results files with unique node
or element IDs and thus display the entire model even if the original
superelements have overlapping element or node ID sets. (Such results
files are incompatible with the .db database file saved at the
generation pass.)
The offset that you specify is based on the original superelement node
or element numbering, rather than on any offset specified via a SESYMM
or SETRAN command. When issuing an RSTOFF command, avoid specifying an
offset that creates conflicting node or element numbers for a
superelement generated via a SESYMM or SETRAN command.
If you issue the command to set non-zero offsets for node or element
IDs, you must bring the geometry into the database via the SET command
so that ANSYS can display the results. You must specify appropriate
offsets to avoid overlapping node or element IDs with other
superelement results files.
The command is valid only in the first load step of a superelement
expansion pass.
"""<line_sep>command=f"RSTOFF,{lab},{offset}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>scopt self tempdepkey="" **kwargs<block_start>"""Specifies System Coupling options.
APDL Command: SCOPT
Parameters
----------
tempdepkey
Temperature-dependent behavior key based on the convection
coefficient:
* ``"YES"`` : A negative convection coefficient, -N, is
assumed to be a function of temperature and is determined
from the HF property table for material N (MP command). This
is the default.
* ``"NO"`` : A negative convection coefficient, -N, is used as
is in the convection calculation.
Notes
-----
By default in the Mechanical APDL program, a negative convection
coefficient value triggers temperature-dependent behavior. In
System Coupling, and in some one-way CFD to Mechanical APDL
thermal simulations, it is desirable to allow convection
coefficients to be used as negative values. To do so, issue the
command ``scopt("NO")``.
"""<line_sep><return>self.run(f"SCOPT,{tempdepkey}" **kwargs)<block_end><def_stmt>seexp self sename="" usefil="" imagky="" expopt="" **kwargs<block_start>"""Specifies options for the substructure expansion pass.
APDL Command: SEEXP
Parameters
----------
sename
The name (case-sensitive) of the superelement matrix file created
by the substructure generation pass (Sename.SUB). Defaults to the
initial jobname File. If a number, it is the element number of the
superelement as used in the use pass.
usefil
The name of the file containing the superelement degree-of-freedom
(DOF) solution created by the substructure use pass (Usefil.DSUB).
imagky
Key to specify use of the imaginary component of the DOF solution.
Applicable only if the use pass is a harmonic (ANTYPE,HARMIC)
analysis:
OFF - Use real component of DOF solution (default).
ON - Use imaginary component of DOF solution.
expopt
Key to specify whether the superelement (ANTYPE,SUBSTR) expansion
pass (EXPASS,ON) should transform the geometry:
OFF - Do not transform node or element locations (default).
ON - Transform node or element locations in the FE geometry record of the .rst
results file.
Notes
-----
Specifies options for the expansion pass of the substructure analysis
(ANTYPE,SUBSTR). If used in SOLUTION, this command is valid only
within the first load step.
If you specify geometry transformation (Expopt = ON), you must retrieve
the transformation matrix (if it exists) from the specified .SUB file.
The command updates the nodal X, Y, and Z coordinates to represent the
transformed node locations. The Expopt option is useful when you want
to expand superelements created from other superelements (via SETRAN or
SESYMM commands). For more information, see Superelement Expansion in
Transformed Locations and Plotting or Printing Mode Shapes.
This command is also valid in /PREP7.
"""<line_sep>command=f"SEEXP,{sename},{usefil},{imagky},{expopt}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>seopt self sename="" sematr="" sepr="" sesst="" expmth="" seoclvl="" **kwargs<block_start>"""Specifies substructure analysis options.
APDL Command: SEOPT
Parameters
----------
sename
The name (case-sensitive, thirty-two character maximum) assigned to
the superelement matrix file. The matrix file will be named
Sename.SUB. This field defaults to Fname on the /FILNAME command.
sematr
Matrix generation key:
1 - Generate stiffness (or conductivity) matrix (default).
2 - Generate stiffness and mass (or conductivity and specific heat) matrices.
3 - Generate stiffness, mass and damping matrices.
sepr
Print key:
0 - Do not print superelement matrices or load vectors.
1 - Print both load vectors and superelement matrices.
2 - Print load vectors but not matrices.
sesst
Stress stiffening key:
0 - Do not save space for stress stiffening in a later run.
1 - Save space for the stress stiffening matrix (calculated in a subsequent
generation run after the expansion pass).
expmth
Expansion method for expansion pass:
BACKSUB - Save necessary factorized matrix files for backsubstitution during subsequent
expansion passes (default). This normally results in a
large usage of disk space
RESOLVE - Do not save factorized matrix files. Global stiffness matrix will be reformed
during expansion pass. This option provides an effective
way to save disk space usage. This option cannot be used
if the use pass uses large deflections (NLGEOM,ON).
seoclvl
For the added-mass calculation, the ocean level to use when ocean
waves (OCTYPE,,WAVE) are present:
ATP - The ocean level at this point in time (default).
MSL - The mean ocean level.
Notes
-----
The SEOPT command specifies substructure analysis options
(ANTYPE,SUBSTR). If used during solution, the command is valid only
within the first load step.
When ocean waves (OCTYPE,,WAVE) are present, the SeOcLvL argument
specifies the ocean height or level to use for the added-mass
calculation, as the use-run analysis type is unknown during the
generation run.
The expansion pass method RESOLVE is not supported with component mode
synthesis analysis (CMSOPT). ExpMth is automatically set to BACKSUB for
CMS analysis. The RESOLVE method invalidates the use of the NUMEXP
command. The RESOLVE method does not allow the computation of results
based on nodal velocity and nodal acceleration (damping force, inertial
force, kinetic energy, etc.) in the substructure expansion pass.
This command is also valid in PREP7.
"""<line_sep>command=f"SEOPT,{sename},{sematr},{sepr},{sesst},{expmth},{seoclvl}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>snoption self rangefact="" blocksize="" robustlev="" compute="" solve_info="" **kwargs <block_start>"""Specifies Supernode (SNODE) eigensolver options.
APDL Command: SNOPTION
Parameters
----------
rangefact
Factor used to control the range of eigenvalues computed for each
supernode. The value of RangeFact must be a number between 1.0 and
5.0. By default the RangeFact value is set to 2.0, which means that
all eigenvalues between 0 and ``2*FREQE`` are computed for each
supernode (where FREQE is the upper end of the frequency range of
interest as specified on the MODOPT command). As the RangeFact
value increases, the eigensolution for the SNODE solver becomes
more accurate and the computational time increases.
blocksize
BlockSize to be used when computing the final eigenvectors. The
value of Blocksize must be either MAX or a number between 1 and
NMODE, where NMODE is the number of modes to be computed as set on
the MODOPT command. Input a value of MAX to force the algorithm to
allocate enough memory to hold all of the final eigenvectors in
memory and, therefore, only read through the file containing the
supernode eigenvectors once. Note that this setting is ONLY
recommended when there is sufficient physical memory on the machine
to safely hold all of the final eigenvectors in memory.
robustlev
Parameter used to control the robustness of the SNODE eigensolver.
The value of RobustLev must be a number between 0 and 10. Lower
values of RobustLev allow the eigensolver to run in the most
efficient manner for optimal performance. Higher values of
RobustLev often slow down the performance of the eigensolver, but
can increase the robustness; this may be desirable if a problem is
detected with the eigensolver or its eigensolution.
compute
Key to control which computations are performed by the Supernode
eigensolver:
EVALUE - The eigensolver computes only the eigenvalues.
EVECTOR - The eigensolver computes only the eigenvectors
(must be preceded by a modal analysis where the
eigenvalues were computed using the Supernode
eigensolver).
BOTH - The eigensolver computes both the eigenvalues and
eigenvectors in the same pass (default).
solve_info
Solver output option:
OFF - Turns off additional output printing from the
Supernode eigensolver (default).
PERFORMANCE - Turns on additional output printing from the
Supernode eigensolver, including a
performance summary and a summary of file
I/O for the Supernode
eigensolver. Information on memory usage
during assembly of the global matrices (that
is, creation of the Jobname.FULL file) is
also printed with this option.
Notes
-----
This command specifies options for the Supernode (SNODE)
eigensolver.
Setting RangeFact to a value greater than 2.0 will improve the
accuracy of the computed eigenvalues and eigenvectors, but
will often increase the computing time of the SNODE
eigensolver. Conversely, setting RangeFact to a value less
than 2.0 will deteriorate the accuracy of the computed
eigenvalues and eigenvectors, but will often speedup the
computing time of the SNODE eigensolver. The default value of
2.0 has been set as a good blend of accuracy and performance.
The SNODE eigensolver reads the eigenvectors and related
information for each supernode from a file and uses that
information to compute the final eigenvectors. For each
eigenvalue/eigenvector requested by the user, the program must
do one pass through the entire file that contains the
supernode eigenvectors. By choosing a BlockSize value greater
than 1, the program can compute BlockSize number of final
eigenvectors for each pass through the file. Therefore,
smaller values of BlockSize result in more I/O, and larger
values of BlockSize result in less I/O. Larger values of
BlockSize also result in significant additional memory usage,
as BlockSize number of final eigenvectors must be stored in
memory. The default Blocksize of min(NMODE,40) is normally a
good choice to balance memory and I/O usage.
The RobustLev field should only be used when a problem is
detected with the accuracy of the final solution or if the
Supernode eigensolver fails while computing the
eigenvalues/eigenvectors. Setting RobustLev to a value greater
than 0 will cause the performance of the eigensolver to
deteriorate. If the performance deteriorates too much or if
the eigensolver continues to fail when setting the RobustLev
field to higher values, then switching to another eigensolver
such as Block Lanczos or PCG Lanczos is recommended.
Setting Compute = EVALUE causes the Supernode eigensolver to
compute only the requested eigenvalues. During this process a
Jobname.SNODE file is written; however, a Jobname.MODE file is
not written. Thus, errors will likely occur in any downstream
computations that require the Jobname.MODE file (for example,
participation factor computations, mode superpostion
transient/harmonic analysis, PSD analysis). Setting Compute =
EVECTOR causes the Supernode eigensolver to compute only the
corresponding eigenvectors. The Jobname.SNODE file and the
associated Jobname.FULL file are required when requesting
these eigenvectors. In other words, the eigenvalues must have
already been computed for this model before computing the
eigenvectors. This field can be useful in order to separate
the two steps (computing eigenvalues and computing
eigenvectors).
"""<line_sep>command=(f"SNOPTION,{rangefact},{blocksize},{robustlev},{compute},,{solve_info}")<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>solve self action="" **kwargs<block_start>"""Starts a solution.
APDL Command: SOLVE
Parameters
----------
action
Action to be performed on solve (used only for linear perturbation
analyses).
ELFORM - Reform all appropriate element matrices in the first phase of a linear
perturbation analysis.
Notes
-----
Starts the solution of one load step of a solution sequence based on
the current analysis type and option settings. Use Action = ELFORM only
in the first phase of a linear perturbation analysis.
"""<line_sep>command=f"SOLVE,{action}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>stabilize self key="" method="" value="" substpopt="" forcelimit="" **kwargs<block_start>"""Activates stabilization for all elements that support nonlinear
APDL Command: STABILIZE
stabilization.
Parameters
----------
key
Key for controlling nonlinear stabilization:
OFF - Deactivate stabilization. This value is the default.
CONSTANT - Activate stabilization. The energy-dissipation ratio or damping factor remains
constant during the load step.
REDUCE - Activate stabilization. The energy-dissipation ratio or damping factor is
reduced linearly to zero at the end of the load step from
the specified or calculated value.
method
The stabilization-control method:
ENERGY - Use the energy-dissipation ratio as the control. This value is the default
when Key ≠ OFF.
DAMPING - Use the damping factor as the control.
value
The energy-dissipation ratio (Method = ENERGY) or damping factor
(Method = DAMPING). This value must be greater than 0 when Method =
ENERGY or Method = DAMPING. When Method = ENERGY, this value is
usually a number between 0 and 1.
substpopt
Option for the first substep of the load step:
NO - Stabilization is not activated for the first substep even when it does not
converge after the minimal allowed time increment is reached.
This value is the default when Key ≠ OFF.
MINTIME - Stabilization is activated for the first substep if it still does not converge
after the minimal allowed time increment is reached.
ANYTIME - Stabilization is activated for the first substep. Use this option if
stabilization was active for the previous load step via
Key = CONSTANT.
forcelimit
The stabilization force limit coefficient, such that 0 < FORCELIMIT
< 1. The default value is 0.2. To omit a stabilization force check,
set this value to 0.
Notes
-----
Once issued, a STABILIZE command remains in effect until you reissue
the command.
For the energy dissipation ratio, specify VALUE = 1.0e-4 if you have no
prior experience with the current model; if convergence problems are
still an issue, increase the value gradually. The damping factor is
mesh-, material-, and time-step-dependent; an initial reference value
from the previous run (such as a run with the energy-dissipation ratio
as input) should suggest itself.
Exercise caution when specifying SubStpOpt = MINTIME or ANYTIME for the
first load step; ANSYS, Inc. recommends this option only for
experienced users. If stabilization was active for the previous load
step via Key = CONSTANT and convergence is an issue for the first
substep, specify SubStpOpt = ANYTIME.
When the L2-norm of the stabilization force (CSRSS value) exceeds the
L2-norm of the internal force multiplied by the stabilization force
coefficient, ANSYS issues a message displaying both the stabilization
force norm and the internal force norm. The FORCELIMIT argument allows
you to change the default stabilization force coefficient (normally 20
percent).
This command stabilizes the degrees of freedom for current-technology
elements only. Other elements can be included in the FE model, but
their degrees of freedom are not stabilized.
For more information about nonlinear stabilization, see Unstable
Structures in the Structural Analysis Guide. For additional tips that
can help you to achieve a stable final model, see Simplify Your Model
in the Structural Analysis Guide.
"""<line_sep>command=f"STABILIZE,{key},{method},{value},{substpopt},{forcelimit}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>thexpand self key="" **kwargs<block_start>"""Enables or disables thermal loading
APDL Command: THEXPAND
Parameters
----------
key
Activation key:
ON - Thermal loading is included in the load vector (default).
OFF - Thermal loading is not included in the load vector.
Notes
-----
Temperatures applied in the analysis are used by default to evaluate
material properties and contribute to the load vector if the
temperature does not equal the reference temperature and a coefficient
of thermal expansion is specified.
Use THEXPAND,OFF to evaluate the material properties but not contribute
to the load vector. This capability is particularly useful when
performing a harmonic analysis where you do not want to include
harmonically varying thermal loads. It is also useful in a modal
analysis when computing a modal load vector but excluding the thermal
load.
This command is valid for all analysis types except linear perturbation
modal and linear perturbation harmonic analyses. For these two linear
perturbation analysis types, the program internally sets THEXPAND,OFF,
and it cannot be set to ON by using this command (THEXPAND,ON is
ignored).
"""<line_sep>command=f"THEXPAND,{key}"<line_sep><return>self.run(command **kwargs)<block_end><def_stmt>thopt self refopt="" reformtol="" ntabpoints="" tempmin="" tempmax="" algo="" **kwargs <block_start>"""Specifies nonlinear transient thermal solution options.
APDL Command: THOPT
Parameters
----------
refopt
Matrix reform option.
FULL - Use the full Newton-Raphson solution option (default). All subsequent input
values are ignored.
QUASI - Use a selective reform solution option based on REFORMTOL.
reformtol
Property change tolerance for Matrix Reformation (.05 default). The
thermal matrices are reformed if the maximum material property
change in an element (from the previous reform time) is greater
than the reform tolerance. Valid only when Refopt = QUASI.
ntabpoints
Number of points in Fast Material Table (64 default). Valid only
when Refopt = QUASI.
tempmin
Minimum temperature for Fast Material Table. Defaults to the
minimum temperature defined by the MPTEMP command for any material
property defined. Valid only when Refopt = QUASI.
tempmax
Maximum temperature for Fast Material Table. Defaults to the
maximum temperature defined by the MPTEMP command for any material
property defined. Valid only when Refopt = QUASI.
--
Reserved field.
algo
Specifies which solution algorithm to apply:
0 - Multipass (default).
1 - Iterative.
Notes
-----
The QUASI matrix reform option is supported by the ICCG, JCG, and
sparse solvers only (EQSLV).
For Refopt = QUASI:
Results from a restart may be different than results from a single run
because the stiffness matrices are always recreated in a restart run,
but may or may not be in a single run (depending on the behavior
resulting from the REFORMTOL setting). Additionally, results may differ
between two single runs as well, if the matrices are reformed as a
result of the REFORMTOL setting.
Midside node temperatures are not calculated if 20-node thermal solid
elements (SOLID90 or SOLID279) are used.
For more information, see Solution Algorithms Used in Transient Thermal
Analysis in the Thermal Analysis Guide.
"""<line_sep>command=f"THOPT,{refopt},{reformtol},{ntabpoints},{tempmin},{tempmax},{algo}"<line_sep><return>self.run(command **kwargs)<block_end><block_end> |
<import_from_future_stmt> print_function<import_stmt>os<import_stmt>unittest<import_from_stmt>.test_base_column_profilers AbstractTestColumnProfiler<import_from_stmt>dataprofiler.profilers.column_profile_compilers ColumnPrimitiveTypeProfileCompiler<line_sep>test_root_path=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))<class_stmt>TestColumnDataTypeProfiler(AbstractTestColumnProfiler unittest.TestCase)<block_start>column_profiler=ColumnPrimitiveTypeProfileCompiler<line_sep>profile_types=['data_type' 'statistics' 'data_type_representation']<def_stmt>setUp self<block_start>AbstractTestColumnProfiler.setUp(self)<block_end>@classmethod<def_stmt>setUpClass cls<block_start>super(TestColumnDataTypeProfiler cls).setUpClass()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_callback.ipynb (unless otherwise specified).
__all__=['GatherInputsCallback' 'SetInputsCallback' 'GeneratorCallback']<line_sep># Cell
<import_from_stmt>fastcore.basics store_attr<import_from_stmt>fastcore.meta delegates<import_from_stmt>fastai.callback.core Callback CancelBatchException<import_from_stmt>transformers PreTrainedModel<line_sep># Cell
<class_stmt>GatherInputsCallback(Callback)<block_start>"""
Prepares basic input dictionary for HuggingFace Transformers
This `Callback` generates a very basic dictionary consisting of `input_ids`,
`attention_masks`, and `token_type_ids`, and saves it to the attribute `self.learn.inputs`.
If further data is expected or needed from the batch, the additional Callback(s) should have
an order of -2
"""<line_sep>order=-3<def_stmt>before_validate self<block_start>"""
Sets the number of inputs in `self.dls`
"""<line_sep>x=self.dl.one_batch()<line_sep>self.learn.dls.n_inp=len(x)<block_end><def_stmt>before_batch self<block_start>"""
Turns `self.xb` from a tuple to a dictionary of either
`{"input_ids", "attention_masks", "token_type_ids"}`d
or
`{"input_ids", "attention_masks"}`
"""<line_sep>inputs={"input_ids":self.learn.xb[0] "attention_mask":self.learn.xb[1]}<if_stmt>len(self.learn.xb)<g>2<block_start>inputs["token_type_ids"]=self.learn.xb[2]<block_end>self.learn.inputs=inputs<block_end><block_end># Cell
<class_stmt>SetInputsCallback(Callback)<block_start>"""
Callback which runs after `GatherInputsCallback` that sets `self.learn.xb`
"""<line_sep>order=-1<def_stmt>__init__ self as_dict=<false># Whether to leave `self.xb` as a dictionary of values
<block_start>store_attr()<block_end><def_stmt>before_batch self<block_start>"""
Set `self.learn.xb` to `self.learn.inputs.values()`
"""<if_stmt><not>self.as_dict<block_start>self.learn.xb=list(self.learn.inputs.values())<block_end><else_stmt><block_start>self.learn.xb=self.learn.inputs<block_end><block_end><block_end># Cell
<class_stmt>GeneratorCallback(Callback)<block_start>"""
Callback used for models that utilize `self.model.generate`
"""<line_sep>@delegates(PreTrainedModel.generate)<def_stmt>__init__ self num_beams:int # Number of beams for beam search
min_length:int # Minimal length of the sequence generated
max_length:int # Maximum length of the sequence generated
early_stopping:bool # Whether to do early stopping
**kwargs<block_start>store_attr()<line_sep>self.kwargs=kwargs<block_end><def_stmt>before_batch self<block_start>"""
Run model-specific inference
"""<line_sep>pred=self.learn.model.generate(input_ids=self.xb['input_ids'] attention_mask=self.xb['attention_mask'] num_beams=self.num_beams min_length=self.min_length max_length=self.max_length early_stopping=self.early_stopping **self.kwargs)<line_sep>self.learn.pred=pred<line_sep><raise>CancelBatchException<block_end><block_end># skip original model inference
|
<import_stmt>unittest<import_from_stmt>unittest.mock MagicMock<import_stmt>pandas<as>pd<import_from_stmt>pandas.testing assert_frame_equal<import_from_stmt>data_export.pipeline.dataset Dataset<class_stmt>TestDataset(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>example=MagicMock()<line_sep>example.to_dict.return_value={"data":"example"}<line_sep>self.examples=MagicMock()<line_sep>self.examples.__iter__.return_value=[example]<line_sep>label=MagicMock()<line_sep>label.find_by.return_value={"labels":["label"]}<line_sep>self.labels=MagicMock()<line_sep>self.labels.__iter__.return_value=[label]<block_end><def_stmt>test_to_dataframe self<block_start>dataset=Dataset(self.examples self.labels)<line_sep>df=dataset.to_dataframe()<line_sep>expected=pd.DataFrame([{"data":"example" "labels":["label"]}])<line_sep>assert_frame_equal(df expected)<block_end><block_end> |
"""Factory classes for easily generating test objects."""<import_from_stmt>.activation Activation<import_from_stmt>.annotation Annotation<import_from_stmt>.annotation_moderation AnnotationModeration<import_from_stmt>.auth_client AuthClient ConfidentialAuthClient<import_from_stmt>.auth_ticket AuthTicket<import_from_stmt>.authz_code AuthzCode<import_from_stmt>.base set_session<import_from_stmt>.document Document DocumentMeta DocumentURI<import_from_stmt>.feature Feature<import_from_stmt>.flag Flag<import_from_stmt>.group Group OpenGroup RestrictedGroup<import_from_stmt>.group_scope GroupScope<import_from_stmt>.job Job SyncAnnotationJob<import_from_stmt>.organization Organization<import_from_stmt>.setting Setting<import_from_stmt>.token DeveloperToken OAuth2Token<import_from_stmt>.user User<import_from_stmt>.user_identity UserIdentity<line_sep>__all__=("Activation" "Annotation" "AnnotationModeration" "AuthClient" "AuthTicket" "AuthzCode" "ConfidentialAuthClient" "DeveloperToken" "Document" "DocumentMeta" "DocumentURI" "Feature" "Flag" "Group" "GroupScope" "Job" "OAuth2Token" "OpenGroup" "Organization" "RestrictedGroup" "Setting" "SyncAnnotationJob" "User" "UserIdentity" "set_session" )<line_sep> |
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
<import_stmt>torch<import_stmt>torch.distributions<as>torchdist<import_from_stmt>torch.distributions constraints<import_stmt>pyro<import_stmt>pyro.distributions<as>dist<import_from_stmt>pyro.contrib.gp.models.model GPModel<import_from_stmt>pyro.contrib.gp.util conditional<import_from_stmt>pyro.nn.module PyroParam pyro_method<import_from_stmt>pyro.util warn_if_nan<class_stmt>GPRegression(GPModel)<block_start>r"""
Gaussian Process Regression model.
The core of a Gaussian Process is a covariance function :math:`k` which governs
the similarity between input points. Given :math:`k`, we can establish a
distribution over functions :math:`f` by a multivarite normal distribution
.. math:: p(f(X)) = \mathcal{N}(0, k(X, X)),
where :math:`X` is any set of input points and :math:`k(X, X)` is a covariance
matrix whose entries are outputs :math:`k(x, z)` of :math:`k` over input pairs
:math:`(x, z)`. This distribution is usually denoted by
.. math:: f \sim \mathcal{GP}(0, k).
.. note:: Generally, beside a covariance matrix :math:`k`, a Gaussian Process can
also be specified by a mean function :math:`m` (which is a zero-value function
by default). In that case, its distribution will be
.. math:: p(f(X)) = \mathcal{N}(m(X), k(X, X)).
Given inputs :math:`X` and their noisy observations :math:`y`, the Gaussian Process
Regression model takes the form
.. math::
f &\sim \mathcal{GP}(0, k(X, X)),\\
y & \sim f + \epsilon,
where :math:`\epsilon` is Gaussian noise.
.. note:: This model has :math:`\mathcal{O}(N^3)` complexity for training,
:math:`\mathcal{O}(N^3)` complexity for testing. Here, :math:`N` is the number
of train inputs.
Reference:
[1] `Gaussian Processes for Machine Learning`,
<NAME>, <NAME>
:param torch.Tensor X: A input data for training. Its first dimension is the number
of data points.
:param torch.Tensor y: An output data for training. Its last dimension is the
number of data points.
:param ~pyro.contrib.gp.kernels.kernel.Kernel kernel: A Pyro kernel object, which
is the covariance function :math:`k`.
:param torch.Tensor noise: Variance of Gaussian noise of this model.
:param callable mean_function: An optional mean function :math:`m` of this Gaussian
process. By default, we use zero mean.
:param float jitter: A small positive term which is added into the diagonal part of
a covariance matrix to help stablize its Cholesky decomposition.
"""<def_stmt>__init__ self X y kernel noise=<none> mean_function=<none> jitter=1e-6<block_start><assert_stmt>isinstance(X torch.Tensor) "X needs to be a torch Tensor instead of a {}".format(type(X))<if_stmt>y<is><not><none><block_start><assert_stmt>isinstance(y torch.Tensor) "y needs to be a torch Tensor instead of a {}".format(type(y))<block_end>super().__init__(X y kernel mean_function jitter)<line_sep>noise=self.X.new_tensor(1.0)<if>noise<is><none><else>noise<line_sep>self.noise=PyroParam(noise constraints.positive)<block_end>@pyro_method<def_stmt>model self<block_start>self.set_mode("model")<line_sep>N=self.X.size(0)<line_sep>Kff=self.kernel(self.X)<line_sep>Kff.view(-1)[::N+1]<augadd>self.jitter+self.noise# add noise to diagonal
Lff=torch.linalg.cholesky(Kff)<line_sep>zero_loc=self.X.new_zeros(self.X.size(0))<line_sep>f_loc=zero_loc+self.mean_function(self.X)<if_stmt>self.y<is><none><block_start>f_var=Lff.pow(2).sum(dim=-1)<line_sep><return>f_loc f_var<block_end><else_stmt><block_start><return>pyro.sample(self._pyro_get_fullname("y") dist.MultivariateNormal(f_loc scale_tril=Lff).expand_by(self.y.shape[:-1]).to_event(self.y.dim()-1) obs=self.y )<block_end><block_end>@pyro_method<def_stmt>guide self<block_start>self.set_mode("guide")<line_sep>self._load_pyro_samples()<block_end><def_stmt>forward self Xnew full_cov=<false> noiseless=<true><block_start>r"""
Computes the mean and covariance matrix (or variance) of Gaussian Process
posterior on a test input data :math:`X_{new}`:
.. math:: p(f^* \mid X_{new}, X, y, k, \epsilon) = \mathcal{N}(loc, cov).
.. note:: The noise parameter ``noise`` (:math:`\epsilon`) together with
kernel's parameters have been learned from a training procedure (MCMC or
SVI).
:param torch.Tensor Xnew: A input data for testing. Note that
``Xnew.shape[1:]`` must be the same as ``self.X.shape[1:]``.
:param bool full_cov: A flag to decide if we want to predict full covariance
matrix or just variance.
:param bool noiseless: A flag to decide if we want to include noise in the
prediction output or not.
:returns: loc and covariance matrix (or variance) of :math:`p(f^*(X_{new}))`
:rtype: tuple(torch.Tensor, torch.Tensor)
"""<line_sep>self._check_Xnew_shape(Xnew)<line_sep>self.set_mode("guide")<line_sep>N=self.X.size(0)<line_sep>Kff=self.kernel(self.X).contiguous()<line_sep>Kff.view(-1)[::N+1]<augadd>self.jitter+self.noise# add noise to the diagonal
Lff=torch.linalg.cholesky(Kff)<line_sep>y_residual=self.y-self.mean_function(self.X)<line_sep>loc,cov=conditional(Xnew self.X self.kernel y_residual <none> Lff full_cov jitter=self.jitter )<if_stmt>full_cov<and><not>noiseless<block_start>M=Xnew.size(0)<line_sep>cov=cov.contiguous()<line_sep>cov.view(-1 M<times>M)[: ::M+1]<augadd>self.noise# add noise to the diagonal
<block_end><if_stmt><not>full_cov<and><not>noiseless<block_start>cov=cov+self.noise<block_end><return>loc+self.mean_function(Xnew) cov<block_end><def_stmt>iter_sample self noiseless=<true><block_start>r"""
Iteratively constructs a sample from the Gaussian Process posterior.
Recall that at test input points :math:`X_{new}`, the posterior is
multivariate Gaussian distributed with mean and covariance matrix
given by :func:`forward`.
This method samples lazily from this multivariate Gaussian. The advantage
of this approach is that later query points can depend upon earlier ones.
Particularly useful when the querying is to be done by an optimisation
routine.
.. note:: The noise parameter ``noise`` (:math:`\epsilon`) together with
kernel's parameters have been learned from a training procedure (MCMC or
SVI).
:param bool noiseless: A flag to decide if we want to add sampling noise
to the samples beyond the noise inherent in the GP posterior.
:returns: sampler
:rtype: function
"""<line_sep>noise=self.noise.detach()<line_sep>X=self.X.clone().detach()<line_sep>y=self.y.clone().detach()<line_sep>N=X.size(0)<line_sep>Kff=self.kernel(X).contiguous()<line_sep>Kff.view(-1)[::N+1]<augadd>noise# add noise to the diagonal
outside_vars={"X":X "y":y "N":N "Kff":Kff}<def_stmt>sample_next xnew outside_vars<block_start>"""Repeatedly samples from the Gaussian process posterior,
conditioning on previously sampled values.
"""<line_sep>warn_if_nan(xnew)<line_sep># Variables from outer scope
X,y,Kff=outside_vars["X"] outside_vars["y"] outside_vars["Kff"]<line_sep># Compute Cholesky decomposition of kernel matrix
Lff=torch.linalg.cholesky(Kff)<line_sep>y_residual=y-self.mean_function(X)<line_sep># Compute conditional mean and variance
loc,cov=conditional(xnew X self.kernel y_residual <none> Lff <false> jitter=self.jitter)<if_stmt><not>noiseless<block_start>cov=cov+noise<block_end>ynew=torchdist.Normal(loc+self.mean_function(xnew) cov.sqrt()).rsample()<line_sep># Update kernel matrix
N=outside_vars["N"]<line_sep>Kffnew=Kff.new_empty(N+1 N+1)<line_sep>Kffnew[:N :N]=Kff<line_sep>cross=self.kernel(X xnew).squeeze()<line_sep>end=self.kernel(xnew xnew).squeeze()<line_sep>Kffnew[N :N]=cross<line_sep>Kffnew[:N N]=cross<line_sep># No noise, just jitter for numerical stability
Kffnew[N N]=end+self.jitter<line_sep># Heuristic to avoid adding degenerate points
<if_stmt>Kffnew.logdet()<g>-15.0<block_start>outside_vars["Kff"]=Kffnew<line_sep>outside_vars["N"]<augadd>1<line_sep>outside_vars["X"]=torch.cat((X xnew))<line_sep>outside_vars["y"]=torch.cat((y ynew))<block_end><return>ynew<block_end><return><lambda>xnew:sample_next(xnew outside_vars)<block_end><block_end> |
#Image Stego using LSB
<import_stmt>cv2<def_stmt>encode input_image_name output_image_name file_name<block_start>input_image=cv2.imread(input_image_name)<line_sep>height,width,nbchannels=input_image.shape<line_sep>size=width<times>height<line_sep>current_width=0<line_sep>current_height=0<line_sep>current_channel=0<line_sep>maskonevalues=[1 2 4 8 16 32 64 128]<line_sep>maskone=maskonevalues.pop(0)<line_sep>maskzerovalues=[254 253 251 247 239 223 191 127]<line_sep>maskzero=maskzerovalues.pop(0)<line_sep>data=open(file_name "rb").read()<line_sep>length=len(data)<if_stmt>(width<times>height<times>nbchannels<l>length+64)<block_start><raise>Exception("Not enough space to hold all steganographic data")<block_end>binary_value=bin(length)[2:]<if_stmt>(len(binary_value)<g>64)<block_start><raise>Exception("Binary Value larger than expected")<block_end><else_stmt><block_start><while_stmt>(len(binary_value)<l>64)<block_start>binary_value="0"+binary_value<block_end><block_end><for_stmt>c binary_value<block_start>value=list(input_image[current_height current_width])<if_stmt>(int(c)<eq>1)<block_start>value[current_channel]=int(value[current_channel])|maskone<block_end><else_stmt><block_start>value[current_channel]=int(value[current_channel])&maskzero<block_end>input_image[current_height current_width]=tuple(value)<if_stmt>(current_channel<eq>nbchannels-1)<block_start>current_channel=0<if_stmt>(current_width<eq>width-1)<block_start>current_width=0<if_stmt>(current_height<eq>height-1)<block_start>current_height=0<if_stmt>maskone<eq>128<block_start><raise>Exception("No more space available in image")<block_end><else_stmt><block_start>maskone=maskonevalues.pop(0)<line_sep>maskzero=maskzerovalues.pop(0)<block_end><block_end><else_stmt><block_start>current_height<augadd>1<block_end><block_end><else_stmt><block_start>current_width<augadd>1<block_end><block_end><else_stmt><block_start>current_channel<augadd>1<block_end><block_end><for_stmt>byte data<block_start><if_stmt>(isinstance(byte int))<block_start><pass><block_end><else_stmt><block_start>byte=ord(byte)<block_end>binv=bin(byte)[2:]<if_stmt>(len(binv)<g>8)<block_start><raise>Exception("Binary Value larger than expected")<block_end><else_stmt><block_start><while_stmt>(len(binv)<l>8)<block_start>binv="0"+binv<block_end><block_end><for_stmt>c binv<block_start>val=list(input_image[current_height current_width])<if_stmt>(int(c)<eq>1)<block_start>val[current_channel]=int(val[current_channel])|maskone<block_end><else_stmt><block_start>val[current_channel]=int(val[current_channel])&maskzero<block_end>input_image[current_height current_width]=tuple(val)<if_stmt>(current_channel<eq>nbchannels-1)<block_start>current_channel=0<if_stmt>(current_width<eq>width-1)<block_start>current_width=0<if_stmt>(current_height<eq>height-1)<block_start>current_height=0<if_stmt>maskone<eq>128<block_start><raise>Exception("No more space available in image")<block_end><else_stmt><block_start>maskone=maskonevalues.pop(0)<line_sep>maskzero=maskzerovalues.pop(0)<block_end><block_end><else_stmt><block_start>current_height<augadd>1<block_end><block_end><else_stmt><block_start>current_width<augadd>1<block_end><block_end><else_stmt><block_start>current_channel<augadd>1<block_end><block_end>cv2.imwrite(output_image_name input_image)<block_end><block_end><def_stmt>decode encoded_image_name extracted_file_name<block_start>encoded_image=cv2.imread(encoded_image_name)<line_sep>height,width,nbchannels=encoded_image.shape<line_sep>size=width<times>height<line_sep>current_width=0<line_sep>current_height=0<line_sep>current_channel=0<line_sep>maskonevalues=[1 2 4 8 16 32 64 128]<line_sep>maskone=maskonevalues.pop(0)<line_sep>maskzerovalues=[254 253 251 247 239 223 191 127]<line_sep>maskzero=maskzerovalues.pop(0)<line_sep>bits=""<for_stmt>i range(64)<block_start>value=encoded_image[current_height current_width][current_channel]<line_sep>value=int(value)&maskone<if_stmt>(current_channel<eq>nbchannels-1)<block_start>current_channel=0<if_stmt>(current_width<eq>width-1)<block_start>current_width=0<if_stmt>(current_height<eq>height-1)<block_start>current_height=0<if_stmt>(maskone<eq>128)<block_start><raise>Exception("No more space available in image")<block_end><else_stmt><block_start>maskone=maskonevalues.pop(0)<line_sep>maskzero=maskzerovalues.pop(0)<block_end><block_end><else_stmt><block_start>current_height<augadd>1<block_end><block_end><else_stmt><block_start>current_width<augadd>1<block_end><block_end><else_stmt><block_start>current_channel<augadd>1<block_end><if_stmt>(value<g>0)<block_start>bits<augadd>"1"<block_end><else_stmt><block_start>bits<augadd>"0"<block_end><block_end>length=int(bits 2)<line_sep>output=b""<for_stmt>i range(length)<block_start>bits=""<for_stmt>i range(8)<block_start>value=encoded_image[current_height current_width][current_channel]<line_sep>value=int(value)&maskone<if_stmt>(current_channel<eq>nbchannels-1)<block_start>current_channel=0<if_stmt>(current_width<eq>width-1)<block_start>current_width=0<if_stmt>(current_height<eq>height-1)<block_start>current_height=0<if_stmt>(maskone<eq>128)<block_start><raise>Exception("No more space available in image")<block_end><else_stmt><block_start>maskone=maskonevalues.pop(0)<line_sep>maskzero=maskzerovalues.pop(0)<block_end><block_end><else_stmt><block_start>current_height<augadd>1<block_end><block_end><else_stmt><block_start>current_width<augadd>1<block_end><block_end><else_stmt><block_start>current_channel<augadd>1<block_end><if_stmt>(value<g>0)<block_start>bits<augadd>"1"<block_end><else_stmt><block_start>bits<augadd>"0"<block_end><block_end>output<augadd>bytearray([int(bits 2)])<block_end>f=open(extracted_file_name "wb")<line_sep>f.write(output)<line_sep>f.close()<block_end><if_stmt>__name__<eq>"__main__"<block_start>input_string=input()<line_sep>#encode input_image_name output_image_name file_name
#decode encoded_image_name extracted_file_name
input_list=input_string.split()<if_stmt>input_list[0]<eq>"encode"<block_start>encode(input_list[1] input_list[2] input_list[3])<line_sep>print(f"{input_list[2]}")<block_end><elif_stmt>input_list[0]<eq>"decode"<block_start>decode(input_list[1] input_list[2])<line_sep>print(f"{input_list[2]}")<block_end><else_stmt><block_start>print("Invalid Entry")<block_end><block_end> |
# (C) <NAME> and Carnegie Mellon University, 2017
<import_stmt>logging<import_stmt>numpy<as>np<import_stmt>os<import_stmt>unittest<import_stmt>sys<import_stmt>collections<import_stmt>tempfile<import_from_stmt>tensorlog xctargets<if_stmt>xctargets.tf<block_start><import_stmt>tensorflow<as>tf<import_from_stmt>tensorlog tensorflowxcomp<block_end><else_stmt><block_start>tensorflowxcomp=<none><block_end><if_stmt>xctargets.theano<block_start><import_stmt>theano<import_from_stmt>tensorlog theanoxcomp<block_end><else_stmt><block_start>theanoxcomp=<none><block_end><import_from_stmt>tensorlog bpcompiler<import_from_stmt>tensorlog comline<import_from_stmt>tensorlog dataset<import_from_stmt>tensorlog declare<import_from_stmt>tensorlog matrixdb<import_from_stmt>tensorlog learn<import_from_stmt>tensorlog mutil<import_from_stmt>tensorlog parser<import_from_stmt>tensorlog program<import_from_stmt>tensorlog simple<import_from_stmt>tensorlog testtensorlog<import_from_stmt>tensorlog funs<import_from_stmt>tensorlog ops<import_from_stmt>tensorlog learnxcomp<as>learnxc<import_from_stmt>tensorlog.expt Expt<if_stmt>xctargets.tf<block_start>tf.logging.set_verbosity(tf.logging.WARN)<block_end>TESTED_COMPILERS=[]<line_sep>TESTED_LEARNERS={}<if_stmt>xctargets.theano<block_start><for_stmt>c [theanoxcomp.DenseMatDenseMsgCrossCompiler theanoxcomp.SparseMatDenseMsgCrossCompiler]<block_start>TESTED_COMPILERS.append(c)<line_sep>TESTED_LEARNERS[c]=theanoxcomp.FixedRateGDLearner<block_end><block_end><if_stmt>xctargets.tf<block_start><for_stmt>c [tensorflowxcomp.DenseMatDenseMsgCrossCompiler tensorflowxcomp.SparseMatDenseMsgCrossCompiler ]<block_start>TESTED_COMPILERS.append(c)<line_sep>TESTED_LEARNERS[c]=tensorflowxcomp.FixedRateGDLearner<block_end><block_end>RUN_OLD_INFERENCE_TESTS=<false><line_sep>SAVE_SUMMARIES=<false><def_stmt>close_cross_compiler xc<block_start>xc.close()<if_stmt>xctargets.tf<and>isinstance(xc tensorflowxcomp.TensorFlowCrossCompiler)<block_start>tf.reset_default_graph()<block_end><block_end><class_stmt>TestXCSmallProofs(testtensorlog.TestSmallProofs)<block_start><def_stmt>test_if self<block_start>self.xcomp_check(['p(X,Y):-spouse(X,Y).'] 'p(i,o)' 'william' {'susan':1.0})<block_end><def_stmt>test_failure self<block_start>self.xcomp_check(['p(X,Y):-spouse(X,Y).'] 'p(i,o)' 'lottie' {matrixdb.NULL_ENTITY_NAME:1.0})<block_end><def_stmt>test_reverse_if self<block_start>self.xcomp_check(['p(X,Y):-sister(Y,X).'] 'p(i,o)' 'rachel' {'william':1.0})<block_end><def_stmt>test_or self<block_start>self.xcomp_check(['p(X,Y):-spouse(X,Y).' 'p(X,Y):-sister(X,Y).'] 'p(i,o)' 'william' {'susan':1.0 'rachel':1.0 'lottie':1.0 'sarah':1.0})<block_end><def_stmt>test_chain self<block_start>self.xcomp_check(['p(X,Z):-spouse(X,Y),sister(Y,Z).'] 'p(i,o)' 'susan' {'rachel':1.0 'lottie':1.0 'sarah':1.0})<line_sep>self.xcomp_check(['p(X,Z):-sister(X,Y),child(Y,Z).'] 'p(i,o)' 'william' {'charlotte':1.0 'lucas':1.0 'poppy':1.0 'caroline':1.0 'elizabeth':1.0})<block_end><def_stmt>test_mid self<block_start>self.xcomp_check(['p(X,Y):-sister(X,Y),child(Y,Z).'] 'p(i,o)' 'william' {'sarah':1.0 'rachel':2.0 'lottie':2.0})<block_end><def_stmt>test_nest self<block_start>self.xcomp_check(['s(X,Y):-spouse(X,Y).' 't(X,Z):-spouse(X,Y),s(Y,Z).'] 't(i,o)' 'susan' {'susan':1.0})<block_end><def_stmt>test_back1 self# fails for tensorflowxcomp
<block_start>self.xcomp_check(['p(X,Y):-spouse(X,Y),sister(X,Z).'] 'p(i,o)' 'william' {'susan':3.0})<block_end><def_stmt>test_back2 self<block_start>self.xcomp_check(['p(X,Y):-spouse(X,Y),sister(X,Z1),sister(X,Z2).'] 'p(i,o)' 'william' {'susan':9.0})<block_end><def_stmt>test_rec1 self<block_start>program.DEFAULT_MAXDEPTH=4<line_sep>self.xcomp_check(['p(X,Y):-spouse(X,Y).' 'p(X,Y):-p(Y,X).'] 'p(i,o)' 'william' {'susan':5.0})<line_sep>program.DEFAULT_MAXDEPTH=10<line_sep>self.xcomp_check(['p(X,Y):-spouse(X,Y).' 'p(X,Y):-p(Y,X).'] 'p(i,o)' 'william' {'susan':11.0})<block_end><def_stmt>test_const_output self<block_start>self.xcomp_check(['sis(X,W):-assign(W,william),child(X,Y).'] 'sis(i,o)' 'sarah' {'william':1.0})<line_sep>self.xcomp_check(['sis(X,W):-assign(W,william),child(X,Y).'] 'sis(i,o)' 'lottie' {'william':2.0})<block_end><def_stmt>test_const_chain1 self<block_start>self.xcomp_check(['p(X,S) :- assign(S,susan),sister(X,Y),child(Y,Z).'] 'p(i,o)' 'william' {'susan':5.0})<block_end><def_stmt>test_const_chain2 self<block_start>self.xcomp_check(['p(X,Pos) :- assign(Pos,pos),child(X,Y),young(Y).'] 'p(i,o)' 'sarah' {'pos':1.0})<line_sep>self.xcomp_check(['p(X,Pos) :- assign(Pos,pos),child(X,Y),young(Y).'] 'p(i,o)' 'lottie' {'pos':2.0})<block_end><def_stmt>test_alt_chain self<block_start>self.xcomp_check(['p(X,W) :- spouse(X,W),sister(X,Y),child(Y,Z).'] 'p(i,o)' 'william' {'susan':5.0})<line_sep><pass><block_end><def_stmt>test_proppr1 self<block_start>w=7<times>self.db.onehot('r1')+3<times>self.db.onehot('r2')<line_sep>self.proppr_xcomp_check(w ['p(X,Y):-sister(X,Y) {r1}.' 'p(X,Y):-spouse(X,Y) {r2}.'] 'p(i,o)' 'william' {'sarah':7.0 'rachel':7.0 'lottie':7.0 'susan':3.0})<block_end><def_stmt>test_proppr2 self<block_start>w=3<times>self.db.onehot('r2')<line_sep>self.proppr_xcomp_check(w ['p(X,Y):-spouse(Y,X) {r2}.'] 'p(i,o)' 'susan' {'william':3.0})<block_end><def_stmt>test_reuse1 self<block_start>self.xcomp_check(['p(X,Y) :- r(X,Z),r(Z,Y).' 'r(X,Y):-spouse(X,Y).'] 'p(i,o)' 'william' {'william':1.0})<block_end><def_stmt>_removeZeros self sdict<block_start><if_stmt><true><block_start><return>sdict<block_end>e=sdict[<none>]<line_sep>ret=dict([(k v-e)<for>(k v) list(sdict.items())<if>v<ne>e])<line_sep>z=sum(ret.values())<for_stmt>k ret<block_start>ret[k]=ret[k]/z<block_end><return>ret<block_end><def_stmt>xcomp_check self ruleStrings mode_string input_symbol expected_result_dict compare=<false><block_start>self._xcomp_check('vanilla' <none> ruleStrings mode_string input_symbol expected_result_dict compare)<block_end><def_stmt>proppr_xcomp_check self weightVec ruleStrings mode_string input_symbol expected_result_dict<block_start>self._xcomp_check('proppr' weightVec ruleStrings mode_string input_symbol expected_result_dict)<block_end><def_stmt>_xcomp_check self progType weightVec ruleStrings mode_string input_symbol expected_result_dict compare=<false># run the base class check to see that the inference is correct
<block_start><if_stmt>RUN_OLD_INFERENCE_TESTS<block_start><if_stmt>progType<eq>'proppr'<block_start>self.proppr_inference_check(weightVec ruleStrings mode_string input_symbol expected_result_dict)<block_end><else_stmt><block_start>self.inference_check(ruleStrings mode_string input_symbol expected_result_dict)<block_end><block_end># setup the next round of tests by compiling a tensorlog
# Program - this code is lifted from the testtensorlog
# inference routines
print('xcomp inference for mode' mode_string 'on input' input_symbol)<line_sep>testtensorlog.softmax_normalize(expected_result_dict)<line_sep>rules=parser.RuleCollection()<for_stmt>r ruleStrings<block_start>rules.add(parser.Parser().parseRule(r))<block_end><if_stmt>progType<eq>'proppr'<block_start>prog=program.ProPPRProgram(db=self.db rules=rules weights=weightVec)<block_end><else_stmt><block_start>prog=program.Program(db=self.db rules=rules)<block_end><for_stmt>compilerClass TESTED_COMPILERS#cross-compile the function
<block_start>xc=compilerClass(prog)<line_sep># evaluate the function and get the output y
#xc.show()
print('== performing eval with' compilerClass '==')<line_sep>inferenceFun=xc.inferenceFunction(mode_string)<line_sep>y=inferenceFun(prog.db.onehot(input_symbol))<line_sep># print 'input',xc.getInputName(mode_string),'args,fun
# =',xc.inference(mode_string) theano output will a be (probably
# dense) message, so just compare and check that the maximal
# elements from these two dicts are the same
actual_result_dict=self.db.rowAsSymbolDict(y)<line_sep>self.check_maxes_in_dicts(actual_result_dict expected_result_dict)<line_sep># check it's normalized
l1_error=abs(sum(actual_result_dict.values())-1.0)<line_sep>#print 'l1_error',l1_error,'actual_result_dict',actual_result_dict,'expected_result_dict',expected_result_dict
self.assertTrue(l1_error<l>0.0001)<line_sep># also test proofCountFun
proofCountFun=xc.proofCountFunction(mode_string)<line_sep>pc=proofCountFun(prog.db.onehot(input_symbol))<line_sep># theano output will a be (probably dense) message, so
# just compare that maximal elements from these two dicts
# are the same
pc_result_dict=self.db.rowAsSymbolDict(pc)<if_stmt>len(pc_result_dict)<g>0<block_start>self.check_maxes_in_dicts(pc_result_dict expected_result_dict)<block_end>print('== eval checks passed ==')<line_sep>close_cross_compiler(xc)<block_end><block_end><def_stmt>check_maxes_in_dicts self actual expected<block_start><def_stmt>maximalElements d<block_start>m=max(d.values())<line_sep><return>set(k<for>k d<if>d[k]<eq>m)<block_end>actualMaxes=maximalElements(actual)<line_sep>expectedMaxes=maximalElements(expected)<line_sep>print('actual' actualMaxes 'expected' expectedMaxes)<for_stmt>a actualMaxes<block_start>self.assertTrue(a<in>expectedMaxes)<block_end><for_stmt>a expectedMaxes<block_start>self.assertTrue(a<in>actualMaxes)<block_end><block_end><block_end><class_stmt>TestXCGrad(testtensorlog.TestGrad)<block_start><def_stmt>setUp self<block_start>self.db=matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR 'fam.cfacts'))<block_end><def_stmt>test_if self<block_start>rules=['p(X,Y):-sister(X,Y).']<line_sep>mode='p(i,o)'<line_sep>params=[('sister' 2)]<line_sep>self.xgrad_check(rules mode params [('william' ['rachel' 'sarah'])] {'sister(william,rachel)':+1 'sister(william,sarah)':+1 'sister(william,lottie)':-1})<line_sep>self.xgrad_check(rules mode params [('william' ['lottie'])] {'sister(william,rachel)':-1 'sister(william,lottie)':+1})<block_end><def_stmt>test_if2 self<block_start>rules=['p(X,Y):-sister(X,Y).']<line_sep>mode='p(i,o)'<line_sep>params=[('sister' 2)]<line_sep>self.xgrad_check(rules mode params [('william' ['rachel' 'sarah']) ('william' ['rachel' 'sarah'])] {'sister(william,rachel)':+1 'sister(william,sarah)':+1 'sister(william,lottie)':-1})<line_sep>self.xgrad_check(rules mode params [('william' ['lottie']) ('william' ['lottie'])] {'sister(william,rachel)':-1 'sister(william,lottie)':+1})<block_end><def_stmt>test_reverse_if self<block_start>rules=['p(X,Y):-parent(Y,X).']<line_sep>mode='p(i,o)'<line_sep>params=[('parent' 2)]<line_sep>self.xgrad_check(rules mode params [('lottie' ['charlotte'])] {'parent(charlotte,lottie)':+1 'parent(lucas,lottie)':-1})<block_end><def_stmt>test_chain1 self<block_start>rules=['p(X,Z):-sister(X,Y),child(Y,Z).']<line_sep>mode='p(i,o)'<line_sep>self.xgrad_check(rules mode [('sister' 2)] [('william' ['caroline' 'elizabeth'])] {'sister(william,rachel)':+1 'sister(william,lottie)':-1})<line_sep>self.xgrad_check(rules mode [('child' 2)] [('william' ['caroline' 'elizabeth'])] {'child(rachel,elizabeth)':+1 'child(lottie,lucas)':-1})<line_sep>self.xgrad_check(rules mode [('child' 2) ('sister' 2)] [('william' ['caroline' 'elizabeth'])] {'child(rachel,elizabeth)':+1 'child(lottie,lucas)':-1 'sister(william,rachel)':+1 'sister(william,lottie)':-1})<block_end><def_stmt>test_chain2 self<block_start>rules=['p(X,Z):-spouse(X,Y),sister(Y,Z).']<line_sep>mode='p(i,o)'<line_sep>self.xgrad_check(rules mode [('sister' 2)] [('susan' ['rachel'])] {'sister(william,rachel)':+1 'sister(william,lottie)':-1})<block_end><def_stmt>test_call1 self<block_start>rules=['q(X,Y):-sister(X,Y).' 'p(Z,W):-q(Z,W).']<line_sep>mode='p(i,o)'<line_sep>params=[('sister' 2)]<line_sep>self.xgrad_check(rules mode params [('william' ['rachel' 'sarah'])] {'sister(william,rachel)':+1 'sister(william,sarah)':+1 'sister(william,lottie)':-1})<line_sep>self.xgrad_check(rules mode params [('william' ['lottie'])] {'sister(william,rachel)':-1 'sister(william,lottie)':+1})<block_end><def_stmt>test_call2 self<block_start>rules=['q(X,Y):-sister(X,Y).' 'p(Z,W):-r(Z,W).' 'r(Z,W):-q(Z,W).']<line_sep>mode='p(i,o)'<line_sep>params=[('sister' 2)]<line_sep>self.xgrad_check(rules mode params [('william' ['rachel' 'sarah'])] {'sister(william,rachel)':+1 'sister(william,sarah)':+1 'sister(william,lottie)':-1})<line_sep>self.xgrad_check(rules mode params [('william' ['lottie'])] {'sister(william,rachel)':-1 'sister(william,lottie)':+1})<block_end><def_stmt>test_split self<block_start>rules=['p(X,Y):-sister(X,Y),child(Y,Z),young(Z).']<line_sep>mode='p(i,o)'<line_sep>params=[('child' 2)]<line_sep>self.xgrad_check(rules mode params [('william' ['lottie'])] {'child(lottie,lucas)':+1 'child(lottie,charlotte)':+1 'child(sarah,poppy)':-1})<line_sep>params=[('sister' 2)]<line_sep>self.xgrad_check(rules mode params [('william' ['lottie'])] {'sister(william,lottie)':+1 'sister(william,sarah)':-1})<block_end><def_stmt>test_or self<block_start>rules=['p(X,Y):-child(X,Y).' 'p(X,Y):-sister(X,Y).']<line_sep>mode='p(i,o)'<line_sep>params=[('sister' 2)]<line_sep>self.xgrad_check(rules mode params [('william' ['charlie' 'rachel'])] {'sister(william,rachel)':+1 'sister(william,sarah)':-1 'sister(william,lottie)':-1})<line_sep>params=[('child' 2)]<line_sep>self.xgrad_check(rules mode params [('william' ['charlie' 'rachel'])] {'child(william,charlie)':+1 'child(william,josh)':-1})<line_sep>params=[('child' 2) ('sister' 2)]<line_sep>self.xgrad_check(rules mode params [('william' ['charlie' 'rachel'])] {'child(william,charlie)':+1 'child(william,josh)':-1 'sister(william,rachel)':+1 'sister(william,sarah)':-1})<block_end><def_stmt>test_weighted_vec self<block_start>rules=['p(X,Y):-sister(X,Y),assign(R,r1),feat(R).' 'p(X,Y):-child(X,Y),assign(R,r2),feat(R).']<line_sep>mode='p(i,o)'<line_sep>params=[('sister' 2)]<line_sep>self.xgrad_check(rules mode params [('william' ['rachel' 'charlie'])] {'sister(william,rachel)':+1 'sister(william,sarah)':-1})<line_sep>params=[('child' 2)]<line_sep>self.xgrad_check(rules mode params [('william' ['rachel' 'charlie'])] {'child(william,charlie)':+1 'child(william,josh)':-1})<line_sep>params=[('feat' 1)]<line_sep>self.xgrad_check(rules mode params [('william' ['josh' 'charlie'])] {'feat(r1)':-1 'feat(r2)':+1})<line_sep>self.xgrad_check(rules mode params [('william' ['rachel' 'sarah' 'lottie'])] {'feat(r1)':+1 'feat(r2)':-1})<block_end><def_stmt>learnxc_check self rule_strings mode_string params xyPairs expected<block_start>print("XLearner loss/grad eval")<line_sep>rules=testtensorlog.rules_from_strings(rule_strings)<line_sep>prog=program.Program(db=self.db rules=rules)<line_sep>mode=declare.ModeDeclaration(mode_string)<line_sep>prog.db.clearParameterMarkings()<for_stmt>(functor arity) params<block_start>prog.db.markAsParameter(functor arity)<block_end># TODO: not working yet for mini-batches so check each example
# individually
<for_stmt>x,ys xyPairs<block_start>data=testtensorlog.DataBuffer(self.db)<line_sep>data.add_data_symbols(x ys)<for_stmt>compilerClass TESTED_COMPILERS<block_start>xc=compilerClass(prog)<line_sep>print('learner check for compiler' xc.__class__)<line_sep>learner=learnxc.XLearner(prog xc)<line_sep>paramsWithUpdates=learner.crossEntropyGrad(mode data.get_x() data.get_y())<line_sep>updates_with_string_keys={}<for_stmt>(functor arity),up paramsWithUpdates<block_start>print('testxcomp update for' functor arity 'is' up)<line_sep>upDict=prog.db.matrixAsPredicateFacts(functor arity up)<line_sep>print('upDict' upDict)<for_stmt>fact,grad_of_fact list(upDict.items())# need to flip for cross-compilers
<block_start>updates_with_string_keys[str(fact)]=-grad_of_fact<block_end><block_end>self.check_directions(updates_with_string_keys expected)<block_end><block_end><block_end><def_stmt>xgrad_check self rule_strings mode_string params xyPairs expected<block_start>print("direct loss/grad eval")<line_sep>rules=testtensorlog.rules_from_strings(rule_strings)<line_sep>prog=program.Program(db=self.db rules=rules)<line_sep>prog.db.clearParameterMarkings()<for_stmt>(functor arity) params<block_start>prog.db.markAsParameter(functor arity)<block_end><for_stmt>x,ys xyPairs<block_start>data=testtensorlog.DataBuffer(self.db)<line_sep>data.add_data_symbols(x ys)<for_stmt>compilerClass TESTED_COMPILERS<block_start>xc=compilerClass(prog)<line_sep>print('grad check for compiler' xc.__class__)<line_sep>gradFun=xc.dataLossGradFunction(mode_string)<line_sep>updates_with_string_keys={}<line_sep>paramsWithUpdates=gradFun(data.get_x() data.get_y())<for_stmt>(functor arity),up paramsWithUpdates<block_start>upDict=prog.db.matrixAsPredicateFacts(functor arity up)<for_stmt>fact,grad_of_fact list(upDict.items())# need to flip for cross-compilers
<block_start>updates_with_string_keys[str(fact)]=-grad_of_fact<block_end><block_end>self.check_directions(updates_with_string_keys expected)<block_end><block_end>self.learnxc_check(rule_strings mode_string params xyPairs expected)<line_sep>close_cross_compiler(xc)<block_end><block_end><class_stmt>TestXCProPPR(testtensorlog.TestProPPR)<block_start><def_stmt>setUp self<block_start>super(TestXCProPPR self).setUp()<block_end><def_stmt>debug self<block_start><return>self<block_end><def_stmt>evalxc self xc input<block_start>inferenceFun=xc.inferenceFunction('predict/io')<line_sep>print(inferenceFun)<line_sep>rawPred=inferenceFun(input)<line_sep># trim small numbers to zero
pred=mutil.mapData(<lambda>d:np.clip((d-1e-5) 0.00 9999.99) rawPred)<line_sep>pred.eliminate_zeros()<line_sep><return>pred<block_end><def_stmt>testNativeRow self<block_start><for_stmt>compilerClass TESTED_COMPILERS<block_start>xc=compilerClass(self.prog)<for_stmt>i range(self.numExamples)<block_start>pred=self.evalxc(xc self.X.getrow(i))<line_sep>d=self.prog.db.rowAsSymbolDict(pred)<line_sep>uniform={'pos':0.5 'neg':0.5}<line_sep>self.check_dicts(d uniform)<block_end>close_cross_compiler(xc)<block_end><block_end><def_stmt>testNativeMatrix self<block_start><for_stmt>compilerClass TESTED_COMPILERS<block_start>xc=compilerClass(self.prog)<line_sep>xc.ensureCompiled(self.mode inputs=<none>)<line_sep>pred=self.prog.eval(self.mode [self.X])<line_sep>d0=self.prog.db.matrixAsSymbolDict(pred)<for_stmt>i,d list(d0.items())<block_start>uniform={'pos':0.5 'neg':0.5 }<line_sep>self.check_dicts(d uniform)<block_end>close_cross_compiler(xc)<block_end><block_end><def_stmt>testGradVector self<block_start>data=testtensorlog.DataBuffer(self.prog.db)<line_sep>X,Y=testtensorlog.matrixAsTrainingData(self.labeledData 'train' 2)<line_sep>learner=learn.OnePredFixedRateGDLearner(self.prog)<for_stmt>compilerClass TESTED_COMPILERS<block_start>xc=compilerClass(self.prog)<line_sep>self.prog.db.markAsParameter('weighted' 1)<line_sep>#xc.compile(self.mode)
gradFun=xc.dataLossGradFunction('predict/io')<for_stmt>i range(X.shape[0])<block_start>print("example" i)<line_sep>updates=learner.crossEntropyGrad(declare.ModeDeclaration('predict(i,o)') X[i] Y[i])<line_sep>w0=updates[('weighted' 1)].sum(axis=0)<line_sep>print(w0)<line_sep>updates=gradFun(X[i] Y[i])<line_sep>paramKey,w=updates[0]<line_sep>print(w)<line_sep># w is different from the w in the corresponding testtensorlog test,
# which is a crossEntropy gradient for each example, but it should have
# opposite directions
nrow,ncol=w.shape<for_stmt>i range(nrow)<block_start><for_stmt>j range(ncol)<block_start>self.assertTrue((w[i j]<eq>0)<eq>(w0[i j]<eq>0))<line_sep>self.assertTrue(w[i j]<times>w0[i j]<le>0)<block_end><block_end><block_end><block_end><block_end><def_stmt>testGradMatrix self<block_start>data=testtensorlog.DataBuffer(self.prog.db)<line_sep>X,Y=testtensorlog.matrixAsTrainingData(self.labeledData 'train' 2)<line_sep>learner=learn.OnePredFixedRateGDLearner(self.prog)<line_sep>updates=learner.crossEntropyGrad(declare.ModeDeclaration('predict(i,o)') X Y)<line_sep>w0=updates[('weighted' 1)].sum(axis=0)<for_stmt>compilerClass TESTED_COMPILERS<block_start>xc=compilerClass(self.prog)<line_sep>self.prog.db.markAsParameter('weighted' 1)<line_sep>#xc.compile(self.mode)
gradFun=xc.dataLossGradFunction('predict/io')<line_sep>updates=gradFun(X Y)<line_sep>paramKey,w=updates[0]<line_sep># w is different from the w in the corresponding testtensorlog test,
# which is a crossEntropy gradient for each example, but it should have
# opposite directions
nrow,ncol=w.shape<for_stmt>i range(nrow)<block_start><for_stmt>j range(ncol)<block_start>self.assertTrue((w[i j]<eq>0)<eq>(w0[i j]<eq>0) "i=%d,j=%d,w=%g,w0=%g"%(i j w[i j] w0[i j]))<line_sep>self.assertTrue(w[i j]<times>w0[i j]<le>0.0 "i=%d,j=%d,w=%g,w0=%g"%(i j w[i j] w0[i j]))<block_end><block_end>close_cross_compiler(xc)<block_end><block_end><def_stmt>testMultiLearn1 self<block_start><pass><block_end><def_stmt>testLearn self<block_start>mode=declare.ModeDeclaration('predict(i,o)')<line_sep>modestr='predict/io'<line_sep>X,Y=testtensorlog.matrixAsTrainingData(self.labeledData 'train' 2)<for_stmt>compilerClass TESTED_COMPILERS<block_start>self.prog.setRuleWeights()<line_sep>self.prog.setFeatureWeights()<if_stmt>SAVE_SUMMARIES<block_start>xc=compilerClass(self.prog compilerClass.__name__+".summary")<block_end><else_stmt><block_start>xc=compilerClass(self.prog)<block_end>self.prog.db.markAsParameter('weighted' 1)<line_sep>v=self.prog.db.getParameter('weighted' 1)<line_sep>d=self.prog.db.rowAsSymbolDict(v)<line_sep># sanity check a couple of values
self.assertTrue(d['little_pos']<eq>d['little_neg'])<line_sep>self.assertTrue(d['big_pos']<eq>d['big_neg'])<line_sep># optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
learner=TESTED_LEARNERS[compilerClass](self.prog xc=xc rate=0.1 epochs=20)<line_sep>lossFun=xc.dataLossFunction('predict/io')<line_sep>loss0=lossFun(X Y)<line_sep>print('initial train data loss' loss0)<line_sep>TX,TY=testtensorlog.matrixAsTrainingData(self.labeledData 'test' 2)<line_sep>loss1=lossFun(TX TY)<line_sep>print('initial test data loss' loss1)<line_sep>P=learner.predict('predict/io' X)<line_sep>#acc0 = xc.accuracy('predict/io',X,Y)
acc0=learner.accuracy(Y P)<line_sep>print('initial train accuracy' acc0)<line_sep>TP=learner.predict('predict/io' TX)<line_sep>#acc1 = xc.accuracy('predict/io',TX,TY)
acc1=learner.accuracy(TY TP)<line_sep>print('initial test accuracy' acc1)<line_sep>print('params to optimize' xc.prog.getParamList())<line_sep>print('vars to optimize' xc.getParamVariables('predict/io'))<line_sep># xc.optimizeDataLoss('predict/io', optimizer, X, Y, epochs=20)
learner.trainMode('predict/io' X Y)<line_sep>loss2=lossFun(X Y)<line_sep>print('final train data loss' loss2)<line_sep>loss3=lossFun(TX TY)<line_sep>print('final test data loss' loss3)<line_sep>P2=learner.predict('predict/io' X)<line_sep>#acc2 = xc.accuracy('predict/io',X,Y)
acc2=learner.accuracy(Y P2)<line_sep>print('final train accuracy' acc2)<line_sep>TP2=learner.predict('predict/io' TX)<line_sep>#acc3 = xc.accuracy('predict/io',TX,TY)
acc3=learner.accuracy(TY TP2)<line_sep>print('final test accuracy' acc3)<line_sep>xc.exportAllLearnedParams()<line_sep>v=self.prog.db.getParameter('weighted' 1)<line_sep>d=self.prog.db.rowAsSymbolDict(v)<line_sep># sanity check a couple of values
self.assertTrue(d['little_pos']<g>d['little_neg'])<line_sep>self.assertTrue(d['big_pos']<l>d['big_neg'])<line_sep>close_cross_compiler(xc)<line_sep>self.assertTrue(acc2<ge>acc0)<line_sep>self.assertTrue(acc3<ge>acc1)<line_sep>self.assertTrue(loss2<l>loss0)<line_sep>self.assertTrue(loss2<l>loss1)<line_sep>self.assertTrue(acc2<ge>0.9)<line_sep>self.assertTrue(acc2<eq>1.0)<block_end><block_end><def_stmt>testDatasetPredict self<block_start>mode=declare.ModeDeclaration('predict(i,o)')<line_sep>modestr='predict/io'<line_sep>X,Y=testtensorlog.matrixAsTrainingData(self.labeledData 'train' 2)<for_stmt>compilerClass TESTED_COMPILERS<block_start>self.prog.setRuleWeights()<line_sep>self.prog.setFeatureWeights()<if_stmt>SAVE_SUMMARIES<block_start>xc=compilerClass(self.prog compilerClass.__name__+".summary")<block_end><else_stmt><block_start>xc=compilerClass(self.prog)<block_end>self.prog.db.markAsParameter('weighted' 1)<line_sep>learner=TESTED_LEARNERS[compilerClass](self.prog xc=xc rate=0.1 epochs=20)<line_sep>P=learner.predict(mode X)<line_sep>print("X" X.shape)<line_sep>print("P" P.shape)<line_sep>self.assertTrue(X.shape<eq>P.shape)<line_sep>P=learner.datasetPredict(dataset.Dataset({mode:X} {mode:Y}))<line_sep>print("X" X.shape)<line_sep>print("P" P.getX(mode).shape)<line_sep>self.assertTrue(X.shape<eq>P.getX(mode).shape)<line_sep><return>xc learner X Y P<block_end><block_end><def_stmt>testExptScaffold self<block_start>mode=declare.ModeDeclaration('predict(i,o)')<line_sep>X,Y=testtensorlog.matrixAsTrainingData(self.labeledData 'train' 2)<line_sep>TX,TY=testtensorlog.matrixAsTrainingData(self.labeledData 'test' 2)<line_sep>self.prog.setAllWeights()<for_stmt>compilerClass TESTED_COMPILERS<block_start>xc=compilerClass(self.prog)<line_sep>learner=TESTED_LEARNERS[compilerClass](self.prog xc=xc rate=0.1 epochs=20)<line_sep>Expt({'prog':self.prog 'trainData':dataset.Dataset({mode:X} {mode:Y}) 'testData':dataset.Dataset({mode:TX} {mode:TY}) 'targetMode':mode 'learner':learner}).run()<block_end><block_end>@unittest.skipUnless(xctargets.tf "Tensorflow not available")<def_stmt>testExpt self<block_start>mode=declare.ModeDeclaration('predict(i,o)')<line_sep>X,Y=testtensorlog.matrixAsTrainingData(self.labeledData 'train' 2)<line_sep>TX,TY=testtensorlog.matrixAsTrainingData(self.labeledData 'test' 2)<for_stmt>compilerClass [tensorflowxcomp.DenseMatDenseMsgCrossCompiler tensorflowxcomp.SparseMatDenseMsgCrossCompiler]<block_start>xc=compilerClass(self.prog)<line_sep>xc.runExpt(prog=self.prog trainData=dataset.Dataset({mode:X} {mode:Y}) testData=dataset.Dataset({mode:TX} {mode:TY}) targetMode=mode)<line_sep>close_cross_compiler(xc)<block_end><block_end><block_end><class_stmt>TestXCOpGen(unittest.TestCase)# TODO tests for other xcompilers?
<block_start>@unittest.skipUnless(xctargets.tf "Tensorflow not available")<def_stmt>testTCToyTypes self<block_start>matrixdb.conf.ignore_types=<false><line_sep>tlog=simple.Compiler(db=os.path.join(testtensorlog.TEST_DATA_DIR "textcattoy3.cfacts") prog=os.path.join(testtensorlog.TEST_DATA_DIR "textcat3.ppr"))<line_sep>trainData=tlog.load_small_dataset(os.path.join(testtensorlog.TEST_DATA_DIR "toytrain.exam"))<line_sep>mode=list(trainData.keys())[0]<line_sep>docs,labels=trainData[mode]<line_sep>xc=tlog.get_cross_compiler()<line_sep>ops=xc.possibleOps(docs 'doc')<line_sep>print('doc ops' ops)<line_sep>self.assertTrue(len(ops)<eq>1)<line_sep>(words wordType)=ops[0]<line_sep>self.assertTrue(wordType<eq>'word')<line_sep>ops=xc.possibleOps(words 'word')<line_sep>self.assertTrue(len(ops)<eq>3)<line_sep>pairs=<none><for_stmt>(expr exprType) ops<block_start><if_stmt>exprType<eq>'labelWordPair'<block_start>pairs=expr<line_sep><break><block_end><block_end>self.assertTrue(pairs<is><not><none>)<line_sep>ops=xc.possibleOps(pairs 'labelWordPair')<line_sep>self.assertTrue(len(ops)<eq>2)<for_stmt>(expr exprType) ops<block_start>self.assertTrue(exprType<eq>'word')<block_end>close_cross_compiler(xc)<block_end>@unittest.skipUnless(xctargets.tf "Tensorflow not available")<def_stmt>testTCToyIgnoringTypes self<block_start>matrixdb.conf.ignore_types=<true><line_sep>tlog=simple.Compiler(db=os.path.join(testtensorlog.TEST_DATA_DIR "textcattoy3.cfacts") prog=os.path.join(testtensorlog.TEST_DATA_DIR "textcat3.ppr"))<line_sep>trainData=tlog.load_small_dataset(os.path.join(testtensorlog.TEST_DATA_DIR "toytrain.exam"))<line_sep>mode=list(trainData.keys())[0]<line_sep>docs,labels=trainData[mode]<line_sep>xc=tlog.get_cross_compiler()<line_sep>ops=xc.possibleOps(docs)<line_sep>binary_predicates=[functor<for>(functor arity) tlog.db.matEncoding<if>arity<eq>2]<line_sep>self.assertTrue(len(ops)<eq>len(binary_predicates)<times>2)<for_stmt>x ops# ops should just be tensors
<block_start>self.assertFalse(isinstance(x tuple))<block_end>close_cross_compiler(xc)<block_end><block_end><class_stmt>TestXCExpt(unittest.TestCase)<block_start><def_stmt>testTCToyTypes_wscaffold self<block_start>matrixdb.conf.ignore_types=<false><line_sep>optdict,args=comline.parseCommandLine(["--db" os.path.join(testtensorlog.TEST_DATA_DIR "textcattoy3.cfacts") "--prog" os.path.join(testtensorlog.TEST_DATA_DIR "textcat3.ppr") "--trainData" os.path.join(testtensorlog.TEST_DATA_DIR "toytrain.exam") "--testData" os.path.join(testtensorlog.TEST_DATA_DIR "toytest.exam") "--proppr"])<line_sep>optdict['prog'].setAllWeights()<for_stmt>compilerClass TESTED_COMPILERS<block_start>xc=compilerClass(optdict['prog'])<line_sep>learner=TESTED_LEARNERS[compilerClass](optdict['prog'] xc)<line_sep>Expt({'prog':optdict['prog'] 'trainData':optdict['trainData'] 'testData':optdict['testData'] 'learner':learner 'targetMode':declare.asMode("predict/io")}).run()<line_sep>pbDoc=xc.db.onehot('pb' 'doc')<line_sep>self.checkXC(xc 'predict/io' pbDoc {'negPair':115 'posPair':115 'hasWord':59 'weighted':115 'label':5})<line_sep># some checks on the output of pprint
lines=xc.pprint('predict/io')<line_sep>self.assertTrue(lines[0].find("SoftMaxFunction")<ge>0)<line_sep>self.assertTrue(lines[1].find("SumFunction")<ge>0)<line_sep>self.assertEqual(len(lines) 16)<line_sep># some checks on misc xcomp API
self.assertEqual(xc.inferenceOutputType('predict/io') 'label')<line_sep>pbId=xc.asSymbolId('pb' typeName='doc')<line_sep>pbSym=xc.asSymbol(pbId typeName='doc')<line_sep>self.assertEqual(pbSym 'pb')<line_sep>self.assertEqual(xc.asSymbolId('this does not appear in the data' typeName='doc') -1)<block_end><block_end>@unittest.skipUnless(xctargets.tf "Tensorflow not available")<def_stmt>testTCToyTypes self<block_start>matrixdb.conf.ignore_types=<false><line_sep>optdict,args=comline.parseCommandLine(["--db" os.path.join(testtensorlog.TEST_DATA_DIR "textcattoy3.cfacts") "--prog" os.path.join(testtensorlog.TEST_DATA_DIR "textcat3.ppr") "--trainData" os.path.join(testtensorlog.TEST_DATA_DIR "toytrain.exam") "--testData" os.path.join(testtensorlog.TEST_DATA_DIR "toytest.exam") "--proppr"])<for_stmt>compilerClass [tensorflowxcomp.DenseMatDenseMsgCrossCompiler tensorflowxcomp.SparseMatDenseMsgCrossCompiler]<block_start>xc=compilerClass(optdict['prog'])<line_sep>xc.runExpt(prog=optdict['prog'] trainData=optdict['trainData'] testData=optdict['testData'] targetMode=declare.asMode("predict/io"))<line_sep># check trainability
<for_stmt>(functor arity) xc.db.matEncoding<block_start>v=xc.parameterFromDBToVariable(functor arity)<if_stmt>v<is><not><none><block_start>vIsTrainable=(v<in>tf.trainable_variables())<line_sep>vIsParameter=((functor arity)<in>xc.db.paramSet)<line_sep>self.assertEqual(vIsTrainable vIsParameter)<block_end><block_end>pbDoc=xc.db.onehot('pb' 'doc')<line_sep>self.checkXC(xc 'predict/io' pbDoc {'negPair':115 'posPair':115 'hasWord':59 'weighted':115 'label':5})<line_sep># some checks on the output of pprint
lines=xc.pprint('predict/io')<line_sep>self.assertTrue(lines[0].find("SoftMaxFunction")<ge>0)<line_sep>self.assertTrue(lines[1].find("SumFunction")<ge>0)<line_sep>self.assertEqual(len(lines) 16)<line_sep># some checks on misc xcomp API
self.assertEqual(xc.inferenceOutputType('predict/io') 'label')<line_sep>pbId=xc.asSymbolId('pb' typeName='doc')<line_sep>pbSym=xc.asSymbol(pbId typeName='doc')<line_sep>self.assertEqual(pbSym 'pb')<line_sep>self.assertEqual(xc.asSymbolId('this does not appear in the data' typeName='doc') -1)<line_sep>close_cross_compiler(xc)<block_end><block_end><def_stmt>testTCToyIgnoringTypes_wscaffold self<block_start>matrixdb.conf.ignore_types=<true><line_sep>optdict,args=comline.parseCommandLine(["--db" os.path.join(testtensorlog.TEST_DATA_DIR "textcattoy3.cfacts") "--prog" os.path.join(testtensorlog.TEST_DATA_DIR "textcat3.ppr") "--trainData" os.path.join(testtensorlog.TEST_DATA_DIR "toytrain.exam") "--testData" os.path.join(testtensorlog.TEST_DATA_DIR "toytest.exam") "--proppr"])<line_sep>optdict['prog'].setAllWeights()<for_stmt>compilerClass TESTED_COMPILERS<block_start>xc=compilerClass(optdict['prog'])<line_sep>learner=TESTED_LEARNERS[compilerClass](optdict['prog'] xc)<line_sep>Expt({'prog':optdict['prog'] 'trainData':optdict['trainData'] 'testData':optdict['testData'] 'learner':learner 'targetMode':declare.asMode("predict/io")}).run()<line_sep>pbDoc=xc.db.onehot('pb')<line_sep>self.checkXC(xc 'predict/io' pbDoc collections.defaultdict(<lambda>:191))<block_end><block_end>@unittest.skipUnless(xctargets.tf "Tensorflow not available")<def_stmt>testTCToyIgnoringTypes self<block_start>matrixdb.conf.ignore_types=<true><line_sep>optdict,args=comline.parseCommandLine(["--db" os.path.join(testtensorlog.TEST_DATA_DIR "textcattoy3.cfacts") "--prog" os.path.join(testtensorlog.TEST_DATA_DIR "textcat3.ppr") "--trainData" os.path.join(testtensorlog.TEST_DATA_DIR "toytrain.exam") "--testData" os.path.join(testtensorlog.TEST_DATA_DIR "toytest.exam") "--proppr"])<for_stmt>compilerClass [tensorflowxcomp.DenseMatDenseMsgCrossCompiler tensorflowxcomp.SparseMatDenseMsgCrossCompiler]<block_start>xc=compilerClass(optdict['prog'])<line_sep>xc.runExpt(prog=optdict['prog'] trainData=optdict['trainData'] testData=optdict['testData'] targetMode=declare.asMode("predict/io"))<line_sep>pbDoc=xc.db.onehot('pb')<line_sep>self.checkXC(xc 'predict/io' pbDoc collections.defaultdict(<lambda>:191))<line_sep>close_cross_compiler(xc)<block_end><block_end><def_stmt>checkXC self xc mode rawInput expectedCols<block_start>print('matrixdb.conf.ignore_types' matrixdb.conf.ignore_types)<line_sep>db=xc.db<for_stmt>(functor arity),mat list(db.matEncoding.items())<block_start>print(functor arity 'shape' mat.shape)<line_sep>r,c=mat.shape<line_sep>self.assertEqual(c expectedCols[functor])<block_end>inferenceFun=xc.inferenceFunction(mode)<line_sep>y=inferenceFun(rawInput)<line_sep>r,c=y.shape<line_sep>self.assertEqual(c expectedCols['label'])<block_end><block_end><class_stmt>TestMultiModeXC(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.db=matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR 'matchtoy.cfacts'))<line_sep>self.prog=program.ProPPRProgram.loadRules(os.path.join(testtensorlog.TEST_DATA_DIR "matchtoy.ppr") db=self.db)<line_sep>self.dset=dataset.Dataset.loadExamples(self.db os.path.join(testtensorlog.TEST_DATA_DIR 'matchtoy-train.exam') proppr=<false>)<line_sep>self.prog.setAllWeights()<block_end><def_stmt>testInScaffold self<block_start>print(TESTED_COMPILERS)<line_sep>self.assertTrue(self.dset.modesToLearn()<g>1)<line_sep>self.prog.setAllWeights()<for_stmt>compilerClass TESTED_COMPILERS<block_start>print(compilerClass)<line_sep>xc=compilerClass(self.prog)<line_sep># compile everything
<for_stmt>mode self.dset.modesToLearn()<block_start>xc.ensureCompiled(mode)<block_end>learner=TESTED_LEARNERS[compilerClass](self.prog xc)<line_sep>testAcc,testXent=Expt({'prog':self.prog 'trainData':self.dset 'testData':self.dset 'learner':learner 'savedTestPredictions':'TestMultiModeXC.testInScaffold.%s.solutions.txt'%compilerClass.__name__}).run()<line_sep>print(testAcc)<block_end><block_end>@unittest.skipUnless(xctargets.tf "Tensorflow not available")<def_stmt>testIt self<block_start>self.assertTrue(self.dset.modesToLearn()<g>1)<for_stmt>compilerClass [tensorflowxcomp.DenseMatDenseMsgCrossCompiler tensorflowxcomp.SparseMatDenseMsgCrossCompiler]<block_start>xc=compilerClass(self.prog)<line_sep># compile everything
<for_stmt>mode self.dset.modesToLearn()<block_start>xc.ensureCompiled(mode inputs=<none>)<block_end># check the variables
optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.1)<line_sep>session=tf.Session()<line_sep>session.run(tf.global_variables_initializer())<line_sep># set up for training
trainStep={}<for_stmt>mode self.dset.modesToLearn()<block_start>(dataLossArgs dataLossExpr)=xc.dataLoss(mode)<line_sep>trainStep[mode]=optimizer.minimize(dataLossExpr var_list=xc.getParamVariables(mode))<block_end># train
<for_stmt>i range(2)#epochs
<block_start><for_stmt>mode self.dset.modesToLearn()<block_start>X=self.dset.getX(mode)<line_sep>Y=self.dset.getY(mode)<line_sep>fd=xc.getFeedDict(mode X Y wrapped=<false>)<line_sep>session.run(trainStep[mode] feed_dict=fd)<block_end><block_end># test
<for_stmt>mode self.dset.modesToLearn()<block_start>X=self.dset.getX(mode)<line_sep>Y=self.dset.getY(mode)<line_sep>Y_=xc.inferenceFunction(mode)(X)<line_sep>acc=xc.accuracy(mode X Y)<line_sep>print('mode' mode 'acc' acc)<block_end>session.close()<line_sep>close_cross_compiler(xc)<block_end><block_end><block_end><class_stmt>TestMatParams(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.cacheDir=tempfile.mkdtemp()<block_end><def_stmt>cacheFile self fileName<block_start><return>os.path.join(self.cacheDir fileName)<block_end><def_stmt>testMToyMatParam self<block_start>tlog=simple.Compiler(db=os.path.join(testtensorlog.TEST_DATA_DIR "matchtoy.cfacts") prog=os.path.join(testtensorlog.TEST_DATA_DIR "matchtoy.ppr"))<line_sep>trainData=tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR "matchtoy-train.exam"))<line_sep>tlog.db.markAsParameter('dabbrev' 2)<line_sep>factDict=tlog.db.matrixAsPredicateFacts('dabbrev' 2 tlog.db.matEncoding[('dabbrev' 2)])<line_sep>print('before learning' len(factDict) 'dabbrevs')<line_sep>self.assertTrue(len(factDict)<eq>5)<for_stmt>f sorted(factDict.keys())<block_start>print('>' str(f) factDict[f])<block_end># expt pipeline
mode=list(trainData.keys())[0]<line_sep>TX,TY=trainData[mode]<line_sep>inference=tlog.inference(mode)<line_sep>trueY=tf.placeholder(tf.float32 shape=TY.shape name='tensorlog/trueY')<line_sep>loss=tlog.loss(mode)<line_sep>optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.1)<line_sep>train_step=optimizer.minimize(loss)<line_sep>train_batch_fd={tlog.input_placeholder_name(mode):TX tlog.target_output_placeholder_name(mode):TY}<line_sep>session=tf.Session()<line_sep>session.run(tf.global_variables_initializer())<for_stmt>i range(5)<block_start>print('epoch' i+1)<line_sep>session.run(train_step feed_dict=train_batch_fd)<block_end>tlog.set_all_db_params_to_learned_values(session)<block_end><block_end># params = {'prog':prog,'trainData':trainData, 'testData':testData}
# result = expt.Expt(params).run()
# factDict = db.matrixAsPredicateFacts('dabbrev',2,db.matEncoding[('dabbrev',2)])
# print 'after learning',len(factDict),'dabbrevs'
# for f in sorted(factDict.keys()):
# print '>',str(f),factDict[f]
# self.assertTrue(len(factDict)>5)
@unittest.skipUnless(xctargets.tf "Tensorflow not available")<class_stmt>TestSimple(unittest.TestCase)<block_start><def_stmt>testEmptyRules self# should not throw an error
<block_start>tlog=simple.Compiler(db=os.path.join(testtensorlog.TEST_DATA_DIR "textcattoy3.cfacts"))<block_end><def_stmt>testIncrementalDBLoad self<block_start>b=simple.Builder()<line_sep>predict,label,hasWord,posPair,negPair=b.predicates("predict,label,hasWord,posPair,negPair")<line_sep>doc_t,label_t,word_t,labelWordPair_t=b.types("doc_t,label_t,word_t,labelWordPair_t")<line_sep>b.schema<augadd>predict(doc_t label_t)&label(label_t)<line_sep>b.schema<augadd>hasWord(doc_t word_t)&posPair(word_t labelWordPair_t)&negPair(word_t labelWordPair_t)<for_stmt>basename "textcattoy_corpus.cfacts textcattoy_labels.cfacts textcattoy_pairs.cfacts".split(" ")<block_start>b.db<augadd>os.path.join(testtensorlog.TEST_DATA_DIR basename)<block_end>tlog=simple.Compiler(db=b.db)<for_stmt>(functor arity nnz) [('hasWord' 2 99) ('label' 1 2) ('negPair' 2 56)]<block_start>m=tlog.db.matEncoding[(functor arity)]<line_sep>self.assertTrue(m.nnz<eq>nnz)<block_end><block_end><def_stmt>testBatch self<block_start>tlog=simple.Compiler(db=os.path.join(testtensorlog.TEST_DATA_DIR "textcattoy3.cfacts") prog=os.path.join(testtensorlog.TEST_DATA_DIR "textcat3.ppr"))<line_sep>trainData=tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR "toytrain.exam"))<line_sep>testData=tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR "toytest.exam"))<line_sep>mode=list(trainData.keys())[0]<line_sep>TX,TY=trainData[mode]<line_sep>UX,UY=testData[mode]<line_sep>inference=tlog.inference(mode)<line_sep>trueY=tf.placeholder(tf.float32 shape=UY.shape name='tensorlog/trueY')<line_sep>correct=tf.equal(tf.argmax(trueY 1) tf.argmax(inference 1))<line_sep>accuracy=tf.reduce_mean(tf.cast(correct tf.float32))<line_sep>test_batch_fd={tlog.input_placeholder_name(mode):UX trueY.name:UY}<line_sep>loss=tlog.loss(mode)<line_sep>optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.1)<line_sep>train_step=optimizer.minimize(loss)<line_sep>train_batch_fd={tlog.input_placeholder_name(mode):TX tlog.target_output_placeholder_name(mode):TY}<line_sep>session=tf.Session()<line_sep>session.run(tf.global_variables_initializer())<line_sep>acc0=session.run(accuracy feed_dict=test_batch_fd)<line_sep>print('initial accuracy' acc0)<line_sep>self.assertTrue(acc0<l>0.6)<for_stmt>i range(10)<block_start>print('epoch' i+1)<line_sep>session.run(train_step feed_dict=train_batch_fd)<block_end>acc1=session.run(accuracy feed_dict=test_batch_fd)<line_sep>print('final accuracy' acc1)<line_sep>self.assertTrue(acc1<ge>0.9)<line_sep># test a round-trip serialization
# saves the db
cacheDir=tempfile.mkdtemp()<line_sep>db_file=os.path.join(cacheDir 'simple.db')<line_sep>tlog.set_all_db_params_to_learned_values(session)<line_sep>tlog.serialize_db(db_file)<line_sep># load everything into a new graph and don't reset the learned params
new_graph=tf.Graph()<with_stmt>new_graph.as_default()<block_start>tlog2=simple.Compiler(db=db_file prog=os.path.join(testtensorlog.TEST_DATA_DIR "textcat3.ppr") autoset_db_params=<false>)<line_sep># reconstruct the accuracy measure
inference2=tlog2.inference(mode)<line_sep>trueY2=tf.placeholder(tf.float32 shape=UY.shape name='tensorlog/trueY2')<line_sep>correct2=tf.equal(tf.argmax(trueY2 1) tf.argmax(inference2 1))<line_sep>accuracy2=tf.reduce_mean(tf.cast(correct2 tf.float32))<line_sep># eval accuracy in a new session
session2=tf.Session()<line_sep>session2.run(tf.global_variables_initializer())<line_sep>test_batch_fd2={tlog2.input_placeholder_name(mode):UX trueY2.name:UY}<line_sep>acc3=session2.run(accuracy2 feed_dict=test_batch_fd2)<line_sep>print('accuracy after round-trip serialization' acc3)<line_sep>self.assertTrue(acc3<ge>0.9)<block_end>session.close()<block_end><def_stmt>testMinibatch self<block_start>tlog=simple.Compiler(db=os.path.join(testtensorlog.TEST_DATA_DIR "textcattoy3.cfacts") prog=os.path.join(testtensorlog.TEST_DATA_DIR "textcat3.ppr"))<line_sep>self.runTextCatLearner(tlog)<block_end><def_stmt>runTextCatLearner self tlog<block_start>trainData=tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR "toytrain.exam"))<line_sep>testData=tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR "toytest.exam"))<line_sep>mode=list(trainData.keys())[0]<line_sep>UX,UY=testData[mode]<line_sep>inference=tlog.inference(mode)<line_sep>trueY=tf.placeholder(tf.float32 shape=UY.shape name='tensorlog/trueY')<line_sep>correct=tf.equal(tf.argmax(trueY 1) tf.argmax(inference 1))<line_sep>accuracy=tf.reduce_mean(tf.cast(correct tf.float32))<line_sep>test_batch_fd={tlog.input_placeholder_name(mode):UX trueY.name:UY}<line_sep>loss=tlog.loss(mode)<line_sep>optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.1)<line_sep>train_step=optimizer.minimize(loss)<line_sep>session=tf.Session()<line_sep>session.run(tf.global_variables_initializer())<line_sep>acc0=session.run(accuracy feed_dict=test_batch_fd)<line_sep>print('initial accuracy' acc0)<line_sep>self.assertTrue(acc0<l>0.6)<for_stmt>i range(10)<block_start>print('epoch' i+1 end=' ')<for_stmt>mode,(TX TY) tlog.minibatches(trainData batch_size=2)<block_start>print('.' end=' ')<line_sep>train_minibatch_fd={tlog.input_placeholder_name(mode):TX tlog.target_output_placeholder_name(mode):TY}<line_sep>session.run(train_step feed_dict=train_minibatch_fd)<block_end>print('epoch' i+1 'finished')<block_end>acc1=session.run(accuracy feed_dict=test_batch_fd)<line_sep>print('final accuracy' acc1)<line_sep>self.assertTrue(acc1<ge>0.9)<line_sep>session.close()<block_end><def_stmt>testBuilder1 self<block_start>b=simple.Builder()<line_sep>X,Y,Z=b.variables("X Y Z")<line_sep>aunt,parent,sister,wife=b.predicates("aunt parent sister wife")<line_sep>uncle=b.predicate("uncle")<line_sep>b<augadd>aunt(X Y)<le>uncle(X Z)&wife(Z Y)<line_sep>b<augadd>aunt(X Y)<le>parent(X Z)&sister(Z Y)<line_sep>r1=b.rule_id("ruleid_t" "r1")<line_sep>r2=b.rule_id("ruleid_t" "r2")<line_sep>b<augadd>aunt(X Y)<le>uncle(X Z)&wife(Z Y)<floordiv>r1<line_sep>b<augadd>aunt(X Y)<le>parent(X Z)&sister(Z Y)<floordiv>r2<line_sep>feature,description=b.predicates("feature description")<line_sep>weight=b.predicate("weight")<line_sep>F=b.variable("F")<line_sep>D=b.variable("D")<line_sep>b<augadd>aunt(X Y)<le>uncle(X Z)&wife(Z Y)<floordiv>(weight(F)|description(X D)&feature(X F))<line_sep>b.rules.listing()<line_sep>rs=b.rules.rulesFor(parser.Goal('aunt' [X Y]))<line_sep>self.assertEqual(str(rs[0]) "aunt(X,Y) :- uncle(X,Z), wife(Z,Y).")<line_sep>self.assertEqual(str(rs[1]) "aunt(X,Y) :- parent(X,Z), sister(Z,Y).")<line_sep>self.assertEqual(str(rs[2]) "aunt(X,Y) :- uncle(X,Z), wife(Z,Y) {weight(R1) : assign(R1,r1,ruleid_t)}.")<line_sep>self.assertEqual(str(rs[3]) "aunt(X,Y) :- parent(X,Z), sister(Z,Y) {weight(R2) : assign(R2,r2,ruleid_t)}.")<line_sep>self.assertEqual(str(rs[4]) "aunt(X,Y) :- uncle(X,Z), wife(Z,Y) {weight(F) : description(X,D),feature(X,F)}.")<block_end><def_stmt>testBuilder2 self<block_start>b=simple.Builder()<line_sep>predict,assign,weighted,hasWord,posPair,negPair=b.predicates("predict assign weighted hasWord posPair negPair")<line_sep>X,Pos,Neg,F,W=b.variables("X Pos Neg F W")<line_sep>b<augadd>predict(X Pos)<le>assign(Pos 'pos' 'label')<floordiv>(weighted(F)|hasWord(X W)&posPair(W F))<line_sep>b<augadd>predict(X Neg)<le>assign(Neg 'neg' 'label')<floordiv>(weighted(F)|hasWord(X W)&negPair(W F))<line_sep>dbSpec=os.path.join(testtensorlog.TEST_DATA_DIR "textcattoy3.cfacts")<line_sep>self.runTextCatLearner(simple.Compiler(db=dbSpec prog=b.rules))<block_end><def_stmt>testBuilder3 self<block_start>b=simple.Builder()<line_sep>predict,assign,weighted,hasWord,posPair,negPair,label=b.predicates("predict assign weighted hasWord posPair negPair label")<line_sep>doc_t,label_t,word_t,labelWordPair_t=b.types("doc_t label_t word_t labelWordPair_t")<line_sep>b.schema<augadd>predict(doc_t label_t)<line_sep>b.schema<augadd>hasWord(doc_t word_t)<line_sep>b.schema<augadd>posPair(word_t labelWordPair_t)<line_sep>b.schema<augadd>negPair(word_t labelWordPair_t)<line_sep>b.schema<augadd>label(label_t)<line_sep>X,Pos,Neg,F,W=b.variables("X Pos Neg F W")<line_sep>b.rules<augadd>predict(X Pos)<le>assign(Pos 'pos' 'label_t')<floordiv>(weighted(F)|hasWord(X W)&posPair(W F))<line_sep>b.rules<augadd>predict(X Neg)<le>assign(Neg 'neg' 'label_t')<floordiv>(weighted(F)|hasWord(X W)&negPair(W F))<line_sep># use the untyped version of the facts to make sure the schema works
b.db=os.path.join(testtensorlog.TEST_DATA_DIR "textcattoy.cfacts")<line_sep>self.runTextCatLearner(simple.Compiler(db=b.db prog=b.rules))<block_end><block_end><class_stmt>TestReparameterizationAndTypedLoading(unittest.TestCase)<block_start><def_stmt>testBugWasFixed self# use the untyped version of the facts to make sure the schema works
<block_start>db=matrixdb.MatrixDB()<line_sep>db.addLines(["# :- r(lo_or_hi_t)\n" "\t".join("r low 0.1".split())+"\n" "\t".join("r hi 0.9".split())+"\n"])<line_sep>db.markAsParameter('r' 1)<line_sep>prog=program.Program(db=db)<line_sep>typeName=db.schema.getArgType("r" 1 0)<line_sep>idLow=db.schema.getId(typeName "low")<line_sep>idHi=db.schema.getId(typeName "hi")<line_sep>db_r=db.matEncoding[('r' 1)]<line_sep>self.approxEqual(db_r[0 idLow] 0.1)<line_sep>self.approxEqual(db_r[0 idHi] 0.9)<line_sep>xc=tensorflowxcomp.SparseMatDenseMsgCrossCompiler(prog)<line_sep>v_r=xc._vector(declare.asMode("r(i)"))<line_sep>session=tf.Session()<line_sep>session.run(tf.global_variables_initializer())<line_sep>xc.exportAllLearnedParams()<line_sep>print('exported to xc' db.matEncoding[('r' 1)])<line_sep>db_r=db.matEncoding[('r' 1)]<line_sep>self.approxEqual(db_r[0 idLow] 0.1)<line_sep>self.approxEqual(db_r[0 idHi] 0.9)<block_end><def_stmt>approxEqual self a b<block_start>self.assertTrue(abs(float(a)-b)<l>0.0001)<block_end><block_end><class_stmt>TestPlugins(unittest.TestCase)<block_start><def_stmt>test_identity_io self<block_start>ruleStrings=['predict(X,Y) :- assign(Pos,pos,label),udp1(Pos,Y) {weighted(F): hasWord(X,W),posPair(W,F)}.' 'predict(X,Y) :- assign(Neg,neg,label),udp1(Neg,Y) {weighted(F): hasWord(X,W),negPair(W,F)}.']<line_sep>plugins=program.Plugins()<line_sep>plugins.define('udp1/io' <lambda>x:x <lambda>inputType:'label')<line_sep>self.check_learning_with_udp(ruleStrings plugins)<block_end><def_stmt>test_identity_oi self<block_start>ruleStrings=['predict(X,Y) :- assign(Pos,pos,label),udp2(Y,Pos) {weighted(F): hasWord(X,W),posPair(W,F)}.' 'predict(X,Y) :- assign(Neg,neg,label),udp2(Y,Neg) {weighted(F): hasWord(X,W),negPair(W,F)}.']<line_sep>plugins=program.Plugins()<line_sep>plugins.define('udp2/oi' <lambda>x:x <lambda>inputType:'label')<line_sep>self.check_learning_with_udp(ruleStrings plugins)<block_end><def_stmt>test_double_io1 self<block_start>ruleStrings=['predict(X,Y) :- assign(Pos,pos,label),udp3(Pos,Y) {weighted(F): hasWord(X,W),posPair(W,F)}.' 'predict(X,Y) :- assign(Neg,neg,label),udp3(Neg,Y) {weighted(F): hasWord(X,W),negPair(W,F)}.']<line_sep>plugins=program.Plugins()<line_sep>plugins.define('udp3/io' <lambda>x:2<times>x <lambda>inputType:'label')<line_sep>self.check_learning_with_udp(ruleStrings plugins)<block_end><def_stmt>test_double_io2 self<block_start>ruleStrings=['predict(X,Pos) :- assign(Pos,pos,label) {weighted(F): hasWord(X,W),double(W,W2),posPair(W2,F)}.' 'predict(X,Neg) :- assign(Neg,neg,label) {weighted(F2): hasWord(X,W),negPair(W,F),double(F,F2)}.']<line_sep>plugins=program.Plugins()<line_sep>plugins.define('double/io' <lambda>x:2<times>x <lambda>inputType:inputType)<line_sep>self.check_learning_with_udp(ruleStrings plugins)<block_end><def_stmt>test_kw_i self<block_start>ruleStrings=['predict(X,Pos) :- assign(Pos,pos,label),hasWord(X,W),poskw(W).' 'predict(X,Neg) :- assign(Neg,neg,label),hasWord(X,W),negkw(W).']<line_sep>plugins=program.Plugins()<line_sep>db=matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR "textcattoy3.cfacts"))<line_sep>poskw_v=(db.onehot('little' 'word')+db.onehot('red' 'word')).todense()<line_sep>negkw_v=(db.onehot('big' 'word')+db.onehot('job' 'word')+db.onehot('huge' 'word')).todense()<line_sep>plugins.define('poskw/i' <lambda>:poskw_v <lambda>:'word')<line_sep>plugins.define('negkw/i' <lambda>:negkw_v <lambda>:'word')<line_sep>self.check_udp(ruleStrings plugins)<block_end><def_stmt>check_udp self ruleStrings plugins<block_start>db=matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR "textcattoy3.cfacts"))<line_sep>rules=testtensorlog.rules_from_strings(ruleStrings)<line_sep>prog=program.ProPPRProgram(rules=rules db=db plugins=plugins)<line_sep>mode=declare.asMode("predict/io")<line_sep>prog.compile(mode)<line_sep>fun=prog.function[(mode 0)]<line_sep>print("\n".join(fun.pprint()))<line_sep>tlog=simple.Compiler(db=db prog=prog)<line_sep>testData=tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR "toytest.exam"))<line_sep>mode=list(testData.keys())[0]<line_sep>UX,UY=testData[mode]<line_sep>inference=tlog.inference(mode)<line_sep>trueY=tf.placeholder(tf.float32 shape=UY.shape name='tensorlog/trueY')<line_sep>correct=tf.equal(tf.argmax(trueY 1) tf.argmax(inference 1))<line_sep>accuracy=tf.reduce_mean(tf.cast(correct tf.float32))<line_sep>test_batch_fd={tlog.input_placeholder_name(mode):UX trueY.name:UY}<line_sep>session=tf.Session()<line_sep>session.run(tf.global_variables_initializer())<line_sep>acc1=session.run(accuracy feed_dict=test_batch_fd)<line_sep>print('final accuracy' acc1)<line_sep>session.close()<block_end># TOFIX needs some work to pass
# - you can't do polytree BP with multiple inputs
# - so there's not a simple fix
# - probably do this: (1) treat inputs to leftmost userDef as outputs (2) run message-passing for those outputs
# (3) add the user def operator (4) repeat .... (5) when there are no more plugins
<def_stmt>notest_isect_iio self<block_start>bpcompiler.conf.trace=<true><line_sep>ruleStrings=['predict(X,Y) :- hasWord(X,W),posPair(W,P1),negPair(W,P2),isect(P1,P2,Y).']<line_sep>plugins=program.Plugins()<line_sep>plugins.define('isect/iio' <lambda>x1 x2:x1<times>x2 <lambda>t1 t2:t1)<line_sep>self.assertTrue(plugins.isDefined(declare.asMode('isect/iio')))<line_sep>self.check_learning_with_udp(ruleStrings plugins)<block_end><def_stmt>argmax self<block_start>bpcompiler.conf.trace=<true><line_sep>ruleStrings=['predict(X,Y):-olympics(X,Z),nations(Z),argmax(Z,Y).']<line_sep>plugins=program.Plugins()<line_sep>plugins.define('argmax/io' <lambda>x1:tf.nn.softmax(x1) <lambda>t1:t1)<line_sep>db=matrixdb.MatrixDB.loadFile(os.path.join(testtensorlog.TEST_DATA_DIR 'argmax.cfacts'))<line_sep>rules=testtensorlog.rules_from_strings(ruleStrings)<line_sep>prog=program.ProPPRProgram(rules=rules db=db plugins=plugins)<line_sep>prog.setAllWeights()<line_sep>mode=declare.asMode("predict/io")<line_sep>prog.compile(mode)<line_sep>fun=prog.function[(mode 0)]<line_sep>print("\n".join(fun.pprint()))<line_sep>tlog=simple.Compiler(db=db prog=prog)<line_sep>data=tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR "argmax.exam"))<line_sep>mode=list(data.keys())[0]<line_sep>UX,UY=data[mode]<line_sep>inference=tlog.inference(mode)<line_sep>trueY=tf.placeholder(tf.float32 shape=UY.shape name='tensorlog/trueY')<line_sep>correct=tf.equal(tf.argmax(trueY 1) tf.argmax(inference 1))<line_sep>accuracy=tf.reduce_mean(tf.cast(correct tf.float32))<line_sep>test_batch_fd={tlog.input_placeholder_name(mode):UX trueY.name:UY}<line_sep>session=tf.Session()<line_sep>session.run(tf.global_variables_initializer())<line_sep>acc0=session.run(accuracy feed_dict=test_batch_fd)<line_sep>print('initial accuracy' acc0)<line_sep>self.assertTrue(acc0<g>0.9)<line_sep>session.close()<block_end># acc0 = session.run(inference, feed_dict=test_batch_fd)
# print "inference results:"
# print acc0
# print np.argmax(acc0,1)
# print "trueY:"
# print UY
# print np.argmax(UY,1)
@unittest.skipUnless(xctargets.tf "Tensorflow not available")<def_stmt>check_learning_with_udp self ruleStrings plugins dbfile=os.path.join(testtensorlog.TEST_DATA_DIR "textcattoy3.cfacts")<block_start>db=matrixdb.MatrixDB.loadFile(dbfile)<line_sep>rules=testtensorlog.rules_from_strings(ruleStrings)<line_sep>prog=program.ProPPRProgram(rules=rules db=db plugins=plugins)<line_sep>prog.setAllWeights()<line_sep>mode=declare.asMode("predict/io")<line_sep>prog.compile(mode)<line_sep>fun=prog.function[(mode 0)]<line_sep>print("\n".join(fun.pprint()))<line_sep>tlog=simple.Compiler(db=db prog=prog)<line_sep>trainData=tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR "toytrain.exam"))<line_sep>testData=tlog.load_dataset(os.path.join(testtensorlog.TEST_DATA_DIR "toytest.exam"))<line_sep>mode=list(trainData.keys())[0]<line_sep>TX,TY=trainData[mode]<line_sep>UX,UY=testData[mode]<line_sep>inference=tlog.inference(mode)<line_sep>trueY=tf.placeholder(tf.float32 shape=UY.shape name='tensorlog/trueY')<line_sep>correct=tf.equal(tf.argmax(trueY 1) tf.argmax(inference 1))<line_sep>accuracy=tf.reduce_mean(tf.cast(correct tf.float32))<line_sep>test_batch_fd={tlog.input_placeholder_name(mode):UX trueY.name:UY}<line_sep>loss=tlog.loss(mode)<line_sep>optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.1)<line_sep>train_step=optimizer.minimize(loss)<line_sep>train_batch_fd={tlog.input_placeholder_name(mode):TX tlog.target_output_placeholder_name(mode):TY}<line_sep>session=tf.Session()<line_sep>session.run(tf.global_variables_initializer())<line_sep>acc0=session.run(accuracy feed_dict=test_batch_fd)<line_sep>print('initial accuracy' acc0)<line_sep>self.assertTrue(acc0<l>0.6)<for_stmt>i range(10)<block_start>print('epoch' i+1)<line_sep>session.run(train_step feed_dict=train_batch_fd)<block_end>acc1=session.run(accuracy feed_dict=test_batch_fd)<line_sep>print('final accuracy' acc1)<line_sep>self.assertTrue(acc1<ge>0.9)<line_sep>session.close()<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>logging.basicConfig(level=logging.INFO)<line_sep># default is to test on everything adding command line arguments
# 'tensorflow' 'theano' 'sparse' 'dense' filters the list (so
# 'testxcomp.py tensorflow sparse' will run just
# tensorflowxcomp.SparseMatDenseMsgCrossCompiler)
<if_stmt>'theano'<in>sys.argv[1:]<block_start>TESTED_COMPILERS=[c<for>c TESTED_COMPILERS<if>c.__module__.endswith("theanoxcomp")]<block_end><if_stmt>'tensorflow'<in>sys.argv[1:]<block_start>TESTED_COMPILERS=[c<for>c TESTED_COMPILERS<if>c.__module__.endswith("tensorflowxcomp")]<block_end><if_stmt>'dense'<in>sys.argv[1:]<block_start>TESTED_COMPILERS=[c<for>c TESTED_COMPILERS<if>c.__name__.startswith("Dense")]<block_end><if_stmt>'sparse'<in>sys.argv[1:]<block_start>TESTED_COMPILERS=[c<for>c TESTED_COMPILERS<if>c.__name__.startswith("Sparse")]<block_end>sys.argv=[a<for>a sys.argv<if>a<not><in>"theano tensorflow dense sparse".split()]<line_sep>print('TESTED_COMPILERS' TESTED_COMPILERS)<line_sep>unittest.main()<block_end> |
<import_from_stmt>typing Dict Any Tuple<import_from_stmt>checkov.common.checks.base_check_registry BaseCheckRegistry<class_stmt>Registry(BaseCheckRegistry)<block_start><def_stmt>extract_entity_details self entity:Dict[str Any]<arrow>Tuple[str str Dict[str Any]]<block_start>provider_type=list(entity.keys())[0]<line_sep>provider_name=list(entity.keys())[0]<line_sep>provider_configuration=entity[provider_name]<line_sep><return>provider_type provider_name provider_configuration<block_end><block_end> |
# -*- coding: utf-8 -*-
'''
Copyright (c) 2015 by <NAME>
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: <NAME> and <NAME>
:paper: <NAME>., <NAME>., <NAME>. and <NAME>.:
SPOTting Model Parameters Using a Ready-Made Python Package,
PLoS ONE, 10(12), e0145180, doi:10.1371/journal.pone.0145180, 2015.
'''<import_from_stmt>numba jit<def_stmt>hymod Precip PET cmax bexp alpha Rs Rq<block_start>"""
See https://www.proc-iahs.net/368/180/2015/piahs-368-180-2015.pdf for a scientific paper:
<NAME>.; <NAME>.; <NAME>.; <NAME>. & <NAME>. (2015): Evaluation of the HYMOD model
for rainfall–runoff simulation using the GLUE method. Remote Sensing and GIS for Hydrology
and Water Resources, 180 - 185, IAHS Publ. 368. DOI: 10.5194/piahs-368-180-2015.
:param cmax:
:param bexp:
:param alpha:
:param Rs:
:param Rq:
:return: Dataset of water in hymod (has to be calculated in litres)
:rtype: list
"""<line_sep># HYMOD PROGRAM IS SIMPLE RAINFALL RUNOFF MODEL
x_loss=0.0<line_sep># Initialize slow tank state
x_slow=2.3503/(Rs<times>22.5)<line_sep>x_slow=0# --> works ok if calibration data starts with low discharge
# Initialize state(s) of quick tank(s)
x_quick=[0 0 0]<line_sep>t=0<line_sep>output=[]<line_sep># START PROGRAMMING LOOP WITH DETERMINING RAINFALL - RUNOFF AMOUNTS
<while_stmt>t<le>len(Precip)-1<block_start>Pval=Precip[t]<line_sep>PETval=PET[t]<line_sep># Compute excess precipitation and evaporation
ER1,ER2,x_loss=excess(x_loss cmax bexp Pval PETval)<line_sep># Calculate total effective rainfall
ET=ER1+ER2<line_sep># Now partition ER between quick and slow flow reservoirs
UQ=alpha<times>ET<line_sep>US=(1-alpha)<times>ET<line_sep># Route slow flow component with single linear reservoir
x_slow,QS=linres(x_slow US Rs)<line_sep># Route quick flow component with linear reservoirs
inflow=UQ<for_stmt>i range(3)# Linear reservoir
<block_start>x_quick[i],outflow=linres(x_quick[i] inflow Rq)<line_sep>inflow=outflow<block_end># Compute total flow for timestep
output.append(QS+outflow)<line_sep>t=t+1<block_end><return>output<block_end>@jit<def_stmt>power X Y<block_start>X=abs(X)# Needed to capture invalid overflow with netgative values
<return>X<power>Y<block_end>@jit<def_stmt>linres x_slow inflow Rs# Linear reservoir
<block_start>x_slow=(1-Rs)<times>x_slow+(1-Rs)<times>inflow<line_sep>outflow=(Rs/(1-Rs))<times>x_slow<line_sep><return>x_slow outflow<block_end>@jit<def_stmt>excess x_loss cmax bexp Pval PETval# this function calculates excess precipitation and evaporation
<block_start>xn_prev=x_loss<line_sep>ct_prev=cmax<times>(1-power((1-((bexp+1)<times>(xn_prev)/cmax)) (1/(bexp+1))))<line_sep># Calculate Effective rainfall 1
ER1=max((Pval-cmax+ct_prev) 0.0)<line_sep>Pval=Pval-ER1<line_sep>dummy=min(((ct_prev+Pval)/cmax) 1)<line_sep>xn=(cmax/(bexp+1))<times>(1-power((1-dummy) (bexp+1)))<line_sep># Calculate Effective rainfall 2
ER2=max(Pval-(xn-xn_prev) 0)<line_sep># Alternative approach
evap=(1-(((cmax/(bexp+1))-xn)/(cmax/(bexp+1))))<times>PETval# actual ET is linearly related to the soil moisture state
xn=max(xn-evap 0)# update state
<return>ER1 ER2 xn<block_end> |
<import_from_stmt>django.utils.translation ugettext_lazy<as>_<import_from_stmt>mayan.apps.permissions PermissionNamespace<line_sep>namespace=PermissionNamespace(label=_('Announcements') name='announcements')<line_sep>permission_announcement_create=namespace.add_permission(label=_('Create announcements') name='announcement_create')<line_sep>permission_announcement_delete=namespace.add_permission(label=_('Delete announcements') name='announcement_delete')<line_sep>permission_announcement_edit=namespace.add_permission(label=_('Edit announcements') name='announcement_edit')<line_sep>permission_announcement_view=namespace.add_permission(label=_('View announcements') name='announcement_view')<line_sep> |
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>tensorflow.keras.backend<as>K<import_from_stmt>tensorflow.keras.initializers Constant<import_from_stmt>tensorflow.keras.layers InputSpec Layer Lambda Dropout Multiply<line_sep>INFTY=-100<class_stmt>Highway(Layer)<block_start><def_stmt>__init__ self activation=<none> bias_initializer=-1 **kwargs<block_start>super().__init__(**kwargs)<line_sep>self.activation=tf.keras.activations.get(activation)<line_sep>self.bias_initializer=bias_initializer<if_stmt>isinstance(self.bias_initializer int)<block_start>self.bias_initializer=Constant(self.bias_initializer)<block_end>self.input_spec=[InputSpec(min_ndim=2)]<block_end><def_stmt>build self input_shape<block_start><assert_stmt>len(input_shape)<ge>2<line_sep>input_dim=input_shape[-1]<line_sep>self.gate_kernel=self.add_weight(shape=(input_dim input_dim) initializer='uniform' name='gate_kernel')<line_sep>self.gate_bias=self.add_weight(shape=(input_dim ) initializer=self.bias_initializer name='gate_bias')<line_sep>self.dense_kernel=self.add_weight(shape=(input_dim input_dim) initializer='uniform' name='dense_kernel')<line_sep>self.dense_bias=self.add_weight(shape=(input_dim ) initializer=self.bias_initializer name='dense_bias')<line_sep>self.input_spec=InputSpec(min_ndim=2 axes={-1:input_dim})<line_sep>self.built=<true><block_end><def_stmt>call self inputs **kwargs<block_start>gate=K.dot(inputs self.gate_kernel)<line_sep>gate=K.bias_add(gate self.gate_bias data_format="channels_last")<line_sep>gate=self.activation(gate)<line_sep>new_value=K.dot(inputs self.dense_kernel)<line_sep>new_value=K.bias_add(new_value self.dense_bias data_format="channels_last")<line_sep><return>gate<times>new_value+(1.0-gate)<times>inputs<block_end><def_stmt>compute_output_shape self input_shape<block_start><return>input_shape<block_end><block_end><def_stmt>weighted_sum first second sigma first_threshold=-np.inf second_threshold=np.inf<block_start>logit_probs=first<times>sigma+second<times>(1.0-sigma)<line_sep>infty_tensor=K.ones_like(logit_probs)<times>INFTY<line_sep>logit_probs=K.switch(K.greater(first first_threshold) logit_probs infty_tensor)<line_sep>logit_probs=K.switch(K.greater(second second_threshold) logit_probs infty_tensor)<line_sep><return>logit_probs<block_end><class_stmt>WeightedCombinationLayer(Layer)<block_start>"""
A class for weighted combination of probability distributions
"""<def_stmt>__init__ self first_threshold=<none> second_threshold=<none> use_dimension_bias=<false> use_intermediate_layer=<false> intermediate_dim=64 intermediate_activation=<none> from_logits=<false> return_logits=<false> bias_initializer=1.0 **kwargs# if 'input_shape' not in kwargs:
# kwargs['input_shape'] = [(None, input_dim,), (None, input_dim)]
<block_start>super(WeightedCombinationLayer self).__init__(**kwargs)<line_sep>self.first_threshold=first_threshold<if>first_threshold<is><not><none><else>INFTY<line_sep>self.second_threshold=second_threshold<if>second_threshold<is><not><none><else>INFTY<line_sep>self.use_dimension_bias=use_dimension_bias<line_sep>self.use_intermediate_layer=use_intermediate_layer<line_sep>self.intermediate_dim=intermediate_dim<line_sep>self.intermediate_activation=tf.keras.activations.get(intermediate_activation)<line_sep>self.from_logits=from_logits<line_sep>self.return_logits=return_logits<line_sep>self.bias_initializer=bias_initializer<line_sep>self.input_spec=[InputSpec() InputSpec() InputSpec()]<block_end><def_stmt>build self input_shape<block_start><assert_stmt>len(input_shape)<eq>3<assert_stmt>input_shape[0]<eq>input_shape[1]<assert_stmt>input_shape[0][:-1]<eq>input_shape[2][:-1]<line_sep>input_dim,features_dim=input_shape[0][-1] input_shape[2][-1]<if_stmt>self.use_intermediate_layer<block_start>self.first_kernel=self.add_weight(shape=(features_dim self.intermediate_dim) initializer="random_uniform" name='first_kernel')<line_sep>self.first_bias=self.add_weight(shape=(self.intermediate_dim ) initializer="random_uniform" name='first_bias')<block_end>self.features_kernel=self.add_weight(shape=(features_dim 1) initializer="random_uniform" name='kernel')<line_sep>self.features_bias=self.add_weight(shape=(1 ) initializer=Constant(self.bias_initializer) name='bias')<if_stmt>self.use_dimension_bias<block_start>self.dimensions_bias=self.add_weight(shape=(input_dim ) initializer="random_uniform" name='dimension_bias')<block_end>super(WeightedCombinationLayer self).build(input_shape)<block_end><def_stmt>call self inputs **kwargs<block_start><assert_stmt>isinstance(inputs list)<and>len(inputs)<eq>3<line_sep>first,second,features=inputs[0] inputs[1] inputs[2]<if_stmt><not>self.from_logits<block_start>first=K.clip(first 1e-10 1.0)<line_sep>second=K.clip(second 1e-10 1.0)<line_sep>first_,second_=K.log(first) K.log(second)<block_end><else_stmt><block_start>first_,second_=first second<block_end># embedded_features.shape = (M, T, 1)
<if_stmt>self.use_intermediate_layer<block_start>features=K.dot(features self.first_kernel)<line_sep>features=K.bias_add(features self.first_bias data_format="channels_last")<line_sep>features=self.intermediate_activation(features)<block_end>embedded_features=K.dot(features self.features_kernel)<line_sep>embedded_features=K.bias_add(embedded_features self.features_bias data_format="channels_last")<if_stmt>self.use_dimension_bias<block_start>tiling_shape=[1]<times>(K.ndim(first)-1)+[K.shape(first)[-1]]<line_sep>embedded_features=K.tile(embedded_features tiling_shape)<line_sep>embedded_features=K.bias_add(embedded_features self.dimensions_bias data_format="channels_last")<block_end>sigma=K.sigmoid(embedded_features)<line_sep>result=weighted_sum(first_ second_ sigma self.first_threshold self.second_threshold)<line_sep>probs=K.softmax(result)<if_stmt>self.return_logits<block_start><return>[probs result]<block_end><return>probs<block_end><def_stmt>compute_output_shape self input_shape<block_start>first_shape=input_shape[0]<if_stmt>self.return_logits<block_start><return>[first_shape first_shape]<block_end><return>first_shape<block_end><block_end><def_stmt>TemporalDropout inputs dropout=0.0<block_start>"""
Drops with :dropout probability temporal steps of input 3D tensor
"""<line_sep># TO DO: adapt for >3D tensors
<if_stmt>dropout<eq>0.0<block_start><return>inputs<block_end>inputs_func=<lambda>x:K.ones_like(inputs[: : 0:1])<line_sep>inputs_mask=Lambda(inputs_func)(inputs)<line_sep>inputs_mask=Dropout(dropout)(inputs_mask)<line_sep>tiling_shape=[1 1 K.shape(inputs)[2]]+[1]<times>(K.ndim(inputs)-3)<line_sep>inputs_mask=Lambda(K.tile arguments={"n":tiling_shape} output_shape=inputs._keras_shape[1:])(inputs_mask)<line_sep>answer=Multiply()([inputs inputs_mask])<line_sep><return>answer<block_end><def_stmt>positions_func inputs pad=0<block_start>"""
A layer filling i-th column of a 2D tensor with
1+ln(1+i) when it contains a meaningful symbol
and with 0 when it contains PAD
"""<line_sep>position_inputs=K.cumsum(K.ones_like(inputs dtype="float32") axis=1)<line_sep>position_inputs<augmul>K.cast(K.not_equal(inputs pad) "float32")<line_sep><return>K.log(1.0+position_inputs)<block_end> |
<import_stmt>contextlib<import_stmt>unittest<import_from_stmt>unittest.mock Mock patch<import_stmt>sys<import_from_stmt>frosch.type_hooks HookLoader<import_from_stmt>frosch.parser Variable<line_sep>@contextlib.contextmanager<def_stmt>mock_numpy_module <block_start>"""Inject a numpy mock to avoid import errors"""<line_sep>numpy_mock=Mock()<line_sep>numpy_mock.name="numpy"<line_sep>sys.modules["numpy"]=numpy_mock<line_sep><yield>numpy_mock<del_stmt>sys.modules["numpy"]<block_end><class_stmt>TestLoader(unittest.TestCase)<block_start><def_stmt>test_lazy_load_hooks self<block_start><with_stmt>mock_numpy_module()<as>numpy_mock<block_start>hook_loader=HookLoader()<line_sep>hook_loader._lazy_load_hooks("hook_numpy")<line_sep>self.assertEqual(len(hook_loader._hooks) 2)<block_end><block_end><def_stmt>test_lazy_load_hooks_from_variable self<block_start><class_stmt>ndarray<block_start><pass><block_end>nd_array=ndarray()<line_sep>var=Variable("nd_array" 2 nd_array)<with_stmt>patch("frosch.type_hooks.HookLoader._lazy_load_hooks")<as>lazy_hook_mock<block_start>hook_loader=HookLoader()<line_sep>hook_loader.lazy_load_hooks_from_variable(var)<line_sep>lazy_hook_mock.assert_called_once_with("hook_numpy")<block_end><block_end><block_end> |
<import_from_stmt>collections defaultdict<import_stmt>numpy<as>np<class_stmt>MetricsAccumulator<block_start><def_stmt>__init__ self<arrow><none><block_start>self.accumulator=defaultdict(<lambda>:[])<block_end><def_stmt>update_metric self metric_name metric_value<block_start>self.accumulator[metric_name].append(metric_value)<block_end><def_stmt>print_average_metric self<block_start><for_stmt>k,v self.accumulator.items()<block_start>average_v=np.array(v).mean()<line_sep>print(f"{k} - {average_v:.2f}")<block_end>self.__init__()<block_end><block_end> |
<def_stmt>main request response<block_start>response.headers.set(b"Content-Type" b"text/event-stream")<line_sep><return>u""<block_end> |
<import_from_stmt>twilio.twiml.voice_response Pay VoiceResponse<line_sep>response=VoiceResponse()<line_sep>response.pay()<line_sep>print(response)<line_sep> |
# coding=utf-8
# Copyright 2021 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cyclegan.train."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>numpy<as>np<import_stmt>tensorflow.compat.v1<as>tf<import_stmt>tensorflow_gan<as>tfgan<import_from_stmt>tensorflow_gan.examples.dme_cyclegan train_lib<line_sep>mock=tf.test.mock<def_stmt>_test_generator input_images<block_start>"""Simple generator function."""<line_sep><return>input_images<times>tf.get_variable('dummy_g' initializer=2.0)<block_end><def_stmt>_test_discriminator image_batch unused_conditioning=<none><block_start>"""Simple discriminator function."""<line_sep><return>tf.layers.flatten(image_batch<times>tf.get_variable('dummy_d' initializer=2.0))<block_end><class_stmt>TrainTest(tf.test.TestCase)<block_start><def_stmt>setUp self<block_start>super(TrainTest self).setUp()<line_sep># Force the TF lazy loading to kick in before mocking these out below.
_=tf.train.get_or_create_global_step<line_sep>_=tf.train.AdamOptimizer<line_sep>self._original_generator=train_lib.networks.generator<line_sep>self._original_discriminator=train_lib.networks.discriminator<line_sep>train_lib.networks.generator=_test_generator<line_sep>train_lib.networks.discriminator=_test_discriminator<line_sep>self.hparams=train_lib.HParams(image_set_x_file_pattern=<none> image_set_y_file_pattern=<none> batch_size=1 patch_size=64 master='' train_log_dir='/tmp/tfgan_logdir/cyclegan/' generator_lr=0.0002 discriminator_lr=0.0001 max_number_of_steps=500000 ps_replicas=0 task=0 cycle_consistency_loss_weight=10.0)<block_end><def_stmt>tearDown self<block_start>super(TrainTest self).tearDown()<line_sep>train_lib.networks.generator=self._original_generator<line_sep>train_lib.networks.discriminator=self._original_discriminator<block_end>@mock.patch.object(tfgan 'eval' autospec=<true>)<def_stmt>test_define_model self mock_eval<block_start><if_stmt>tf.executing_eagerly()# `tfgan.cyclegan_model` doesn't work when executing eagerly.
<block_start><return><block_end>self.hparams=self.hparams._replace(batch_size=2)<line_sep>images_shape=[self.hparams.batch_size 4 4 3]<line_sep>images_x_np=np.zeros(shape=images_shape)<line_sep>images_y_np=np.zeros(shape=images_shape)<line_sep>images_x=tf.constant(images_x_np dtype=tf.float32)<line_sep>images_y=tf.constant(images_y_np dtype=tf.float32)<line_sep>cyclegan_model=train_lib._define_model(images_x images_y)<line_sep>self.assertIsInstance(cyclegan_model tfgan.CycleGANModel)<line_sep>self.assertShapeEqual(images_x_np cyclegan_model.reconstructed_x)<line_sep>self.assertShapeEqual(images_y_np cyclegan_model.reconstructed_y)<block_end>@mock.patch.object(train_lib.networks 'generator' autospec=<true>)@mock.patch.object(train_lib.networks 'discriminator' autospec=<true>)@mock.patch.object(tf.train 'get_or_create_global_step' autospec=<true>)<def_stmt>test_get_lr self mock_get_or_create_global_step unused_mock_discriminator unused_mock_generator<block_start><if_stmt>tf.executing_eagerly()<block_start><return><block_end>base_lr=0.01<line_sep>max_number_of_steps=10<with_stmt>self.cached_session(use_gpu=<true>)<as>sess<block_start>mock_get_or_create_global_step.return_value=tf.constant(2)<line_sep>lr_step2=sess.run(train_lib._get_lr(base_lr max_number_of_steps))<line_sep>mock_get_or_create_global_step.return_value=tf.constant(9)<line_sep>lr_step9=sess.run(train_lib._get_lr(base_lr max_number_of_steps))<block_end>self.assertAlmostEqual(base_lr lr_step2)<line_sep>self.assertAlmostEqual(base_lr<times>0.2 lr_step9)<block_end>@mock.patch.object(tf.train 'AdamOptimizer' autospec=<true>)<def_stmt>test_get_optimizer self mock_adam_optimizer<block_start>gen_lr,dis_lr=0.1 0.01<line_sep>train_lib._get_optimizer(gen_lr=gen_lr dis_lr=dis_lr)<line_sep>mock_adam_optimizer.assert_has_calls([mock.call(gen_lr beta1=mock.ANY use_locking=<true>) mock.call(dis_lr beta1=mock.ANY use_locking=<true>)])<block_end><def_stmt>test_define_train_ops self<block_start><if_stmt>tf.executing_eagerly()# `tfgan.cyclegan_model` doesn't work when executing eagerly.
<block_start><return><block_end>self.hparams=self.hparams._replace(batch_size=2 generator_lr=0.1 discriminator_lr=0.01)<line_sep>images_shape=[self.hparams.batch_size 4 4 3]<line_sep>images_x=tf.zeros(images_shape dtype=tf.float32)<line_sep>images_y=tf.zeros(images_shape dtype=tf.float32)<line_sep>cyclegan_model=train_lib._define_model(images_x images_y)<line_sep>cyclegan_loss=tfgan.cyclegan_loss(cyclegan_model cycle_consistency_loss_weight=10.0)<line_sep>train_ops=train_lib._define_train_ops(cyclegan_model cyclegan_loss self.hparams)<line_sep>self.assertIsInstance(train_ops tfgan.GANTrainOps)<block_end>@mock.patch.object(tf.io 'gfile' autospec=<true>)@mock.patch.object(train_lib 'data_provider' autospec=<true>)@mock.patch.object(train_lib '_define_model' autospec=<true>)@mock.patch.object(tfgan 'cyclegan_loss' autospec=<true>)@mock.patch.object(train_lib '_define_train_ops' autospec=<true>)@mock.patch.object(tfgan 'gan_train' autospec=<true>)<def_stmt>test_main self mock_gan_train mock_define_train_ops mock_cyclegan_loss mock_define_model mock_data_provider mock_gfile<block_start>self.hparams=self.hparams._replace(image_set_x_file_pattern='/tmp/x/*.jpg' image_set_y_file_pattern='/tmp/y/*.jpg' batch_size=3 patch_size=8 generator_lr=0.02 discriminator_lr=0.3 train_log_dir='/tmp/foo' master='master' task=0 cycle_consistency_loss_weight=2.0 max_number_of_steps=1)<line_sep>mock_data_provider.provide_custom_data.return_value=(tf.zeros([3 2 2 3] dtype=tf.float32) tf.zeros([3 2 2 3] dtype=tf.float32))<line_sep>train_lib.train(self.hparams)<line_sep>mock_data_provider.provide_custom_data.assert_called_once_with(batch_size=3 image_file_patterns=['/tmp/x/*.jpg' '/tmp/y/*.jpg'] patch_size=8)<line_sep>mock_define_model.assert_called_once_with(mock.ANY mock.ANY)<line_sep>mock_cyclegan_loss.assert_called_once_with(mock_define_model.return_value cycle_consistency_loss_weight=2.0 tensor_pool_fn=mock.ANY)<line_sep>mock_define_train_ops.assert_called_once_with(mock_define_model.return_value mock_cyclegan_loss.return_value self.hparams)<line_sep>mock_gan_train.assert_called_once_with(mock_define_train_ops.return_value '/tmp/foo' get_hooks_fn=mock.ANY hooks=mock.ANY master='master' is_chief=<true>)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end> |
<import_stmt>scrapy<class_stmt>QuotesSpider(scrapy.Spider)<block_start>name="quotes2"<line_sep>start_urls=['http://quotes.toscrape.com/page/1/' 'http://quotes.toscrape.com/page/2/' ]<def_stmt>parse self response<block_start>self.log('I just visited {}'.format(response.url))<block_end><block_end> |
"""Prioritization scheme for identifying follow up variants in tumor-only samples.
Generalizes the filtering scheme used in VarDict post-processing:
https://github.com/AstraZeneca-NGS/VarDict/blob/9ffec9168e91534fac5fb74b3ec7bdd2badd3464/vcf2txt.pl#L190
The goal is to build up a standard set of prioritization filters based on known
data. Uses GEMINI to load a database of variants with associated third party
query information. Makes use of ExAC, dbSNP, 1000 genomes, clinvar, cosmic and
effects annotations. The general idea is to prioritize deleterious variants
missing or present at a low frequency in the population, or secondarily identified
in external databases like COSMIC and ClinVar.
"""<import_stmt>collections<import_stmt>csv<import_stmt>re<import_from_stmt>bcbio utils<import_from_stmt>bcbio.distributed.transaction file_transaction<import_from_stmt>bcbio.pipeline datadict<as>dd<import_from_stmt>bcbio.provenance do<import_from_stmt>bcbio.variation population vcfutils<line_sep>geneimpacts=utils.LazyImport("geneimpacts")<line_sep>cyvcf2=utils.LazyImport("cyvcf2")<def_stmt>handle_vcf_calls vcf_file data orig_items<block_start>"""Prioritize VCF calls based on external annotations supplied through GEMINI.
"""<if_stmt><not>_do_prioritize(orig_items)<block_start><return>vcf_file<block_end><else_stmt><block_start>ann_vcf=population.run_vcfanno(vcf_file data)<if_stmt>ann_vcf<block_start>priority_file=_prep_priority_filter_vcfanno(ann_vcf data)<line_sep><return>_apply_priority_filter(ann_vcf priority_file data)<block_end># No data available for filtering, return original file
<else_stmt><block_start><return>vcf_file<block_end><block_end><block_end><def_stmt>_apply_priority_filter in_file priority_file data<block_start>"""Annotate variants with priority information and use to apply filters.
"""<line_sep>out_file="%s-priority%s"%utils.splitext_plus(in_file)<if_stmt><not>utils.file_exists(out_file)<block_start><with_stmt>file_transaction(data out_file)<as>tx_out_file<block_start>header=('##INFO=<ID=EPR,Number=.,Type=String,'<concat>'Description="Somatic prioritization based on external annotations, '<concat>'identify as likely germline">')<line_sep>header_file="%s-repeatheader.txt"%utils.splitext_plus(tx_out_file)[0]<with_stmt>open(header_file "w")<as>out_handle<block_start>out_handle.write(header)<block_end><if_stmt>"tumoronly_germline_filter"<in>dd.get_tools_on(data)<block_start>filter_cmd=("bcftools filter -m '+' -s 'LowPriority' "<concat>"""-e "EPR[0] != 'pass'" |""")<block_end><else_stmt><block_start>filter_cmd=""<block_end># bcftools 1.13+ requires to skip TO
cmd=("bcftools annotate -a {priority_file} -h {header_file} "<concat>"-c CHROM,FROM,-,REF,ALT,INFO/EPR {in_file} | "<concat>"{filter_cmd} bgzip -c > {tx_out_file}")<line_sep>do.run(cmd.format(**locals()) "Run external annotation based prioritization filtering")<block_end><block_end>vcfutils.bgzip_and_index(out_file data["config"])<line_sep><return>out_file<block_end><def_stmt>_prep_priority_filter_vcfanno in_vcf data<block_start>"""Prepare tabix file with priority filters based on vcfanno annotations.
"""<line_sep>pops=['af_adj_exac_afr' 'af_adj_exac_amr' 'af_adj_exac_eas' 'af_adj_exac_fin' 'af_adj_exac_nfe' 'af_adj_exac_oth' 'af_adj_exac_sas' 'af_exac_all' 'max_aaf_all' "af_esp_ea" "af_esp_aa" "af_esp_all" "af_1kg_amr" "af_1kg_eas" "af_1kg_sas" "af_1kg_afr" "af_1kg_eur" "af_1kg_all"]<line_sep>known=["cosmic_ids" "cosmic_id" "clinvar_sig"]<line_sep>out_file="%s-priority.tsv"%utils.splitext_plus(in_vcf)[0]<if_stmt><not>utils.file_exists(out_file)<and><not>utils.file_exists(out_file+".gz")<block_start><with_stmt>file_transaction(data out_file)<as>tx_out_file<block_start><with_stmt>open(tx_out_file "w")<as>out_handle<block_start>writer=csv.writer(out_handle dialect="excel-tab")<line_sep>header=["#chrom" "start" "end" "ref" "alt" "filter"]<line_sep>writer.writerow(header)<line_sep>vcf_reader=cyvcf2.VCF(in_vcf)<line_sep>impact_info=_get_impact_info(vcf_reader)<for_stmt>rec vcf_reader<block_start>row=_prepare_vcf_rec(rec pops known impact_info)<line_sep>cur_filter=_calc_priority_filter(row pops)<line_sep>writer.writerow([rec.CHROM rec.start rec.end rec.REF ",".join(rec.ALT) cur_filter])<block_end><block_end><block_end><block_end><return>vcfutils.bgzip_and_index(out_file data["config"] tabix_args="-0 -c '#' -s 1 -b 2 -e 3")<block_end><def_stmt>_get_impact_info vcf_reader<block_start>"""Retrieve impact parsing information from INFO header.
"""<line_sep>ImpactInfo=collections.namedtuple("ImpactInfo" "header, gclass, id")<line_sep>KEY_2_CLASS={'CSQ':geneimpacts.VEP 'ANN':geneimpacts.SnpEff 'BCSQ':geneimpacts.BCFT}<for_stmt>l (x.strip()<for>x _from_bytes(vcf_reader.raw_header).split("\n"))<block_start><if_stmt>l.startswith("##INFO")<block_start>patt=re.compile(r"(\w+)=(\"[^\"]+\"|[^,]+)")<line_sep>stub=l.split("=<")[1].rstrip(">")<line_sep>d=dict(patt.findall(_from_bytes(stub)))<if_stmt>d["ID"]<in>KEY_2_CLASS<block_start><return>ImpactInfo(_parse_impact_header(d) KEY_2_CLASS[d["ID"]] d["ID"])<block_end><block_end><block_end><block_end><def_stmt>_from_bytes s<block_start><if_stmt>isinstance(s bytes)<block_start><import_stmt>locale<line_sep>ENC=locale.getpreferredencoding()<try_stmt><block_start><return>s.decode(ENC)<block_end><except_stmt>UnicodeDecodeError<block_start><return>s.decode('utf8')<block_end><block_end><return>s<block_end><def_stmt>_parse_impact_header hdr_dict<block_start>"""Parse fields for impact, taken from vcf2db
"""<line_sep>desc=hdr_dict["Description"]<if_stmt>hdr_dict["ID"]<eq>"ANN"<block_start>parts=[x.strip("\"'")<for>x re.split("\s*\|\s*" desc.split(":" 1)[1].strip('" '))]<block_end><elif_stmt>hdr_dict["ID"]<eq>"EFF"<block_start>parts=[x.strip(" [])'(\"")<for>x re.split("\||\(" desc.split(":" 1)[1].strip())]<block_end><elif_stmt>hdr_dict["ID"]<eq>"CSQ"<block_start>parts=[x.strip(" [])'(\"")<for>x re.split("\||\(" desc.split(":" 1)[1].strip())]<block_end><elif_stmt>hdr_dict["ID"]<eq>"BCSQ"<block_start>parts=desc.split(']' 1)[1].split(']')[0].replace('[' '').split("|")<block_end><else_stmt><block_start><raise>Exception("don't know how to use %s as annotation"%hdr_dict["ID"])<block_end><return>parts<block_end><def_stmt>_prepare_vcf_rec rec pops known impact_info<block_start>"""Parse a vcfanno output into a dictionary of useful attributes.
"""<line_sep>out={}<for_stmt>k pops+known<block_start>out[k]=rec.INFO.get(k)<block_end><if_stmt>impact_info<block_start>cur_info=rec.INFO.get(impact_info.id)<if_stmt>cur_info<block_start>cur_impacts=[impact_info.gclass(e impact_info.header)<for>e _from_bytes(cur_info).split(",")]<line_sep>top=geneimpacts.Effect.top_severity(cur_impacts)<if_stmt>isinstance(top list)<block_start>top=top[0]<block_end>out["impact_severity"]=top.effect_severity<block_end><block_end><return>out<block_end><def_stmt>_calc_priority_filter row pops<block_start>"""Calculate the priority filter based on external associated data.
- Pass high/medium impact variants not found in population databases
- Pass variants found in COSMIC or Clinvar provided they don't have two
additional reasons to filter (found in multiple external populations)
"""<line_sep>filters=[]<line_sep>passes=[]<line_sep>passes.extend(_find_known(row))<line_sep>filters.extend(_known_populations(row pops))<if_stmt>len(filters)<eq>0<or>(len(passes)<g>0<and>len(filters)<l>2)<block_start>passes.insert(0 "pass")<block_end><return>",".join(passes+filters)<block_end><def_stmt>_known_populations row pops<block_start>"""Find variants present in substantial frequency in population databases.
"""<line_sep>cutoff=0.01<line_sep>out=set([])<for_stmt>pop,base [("esp" "af_esp_all") ("1000g" "af_1kg_all") ("exac" "af_exac_all") ("anypop" "max_aaf_all")]<block_start><for_stmt>key [x<for>x pops<if>x.startswith(base)]<block_start>val=row[key]<if_stmt>val<and>val<g>cutoff<block_start>out.add(pop)<block_end><block_end><block_end><return>sorted(list(out))<block_end><def_stmt>_find_known row<block_start>"""Find variant present in known pathogenic databases.
"""<line_sep>out=[]<line_sep>clinvar_no=set(["unknown" "untested" "non-pathogenic" "probable-non-pathogenic" "uncertain_significance" "uncertain_significance" "not_provided" "benign" "likely_benign"])<if_stmt>row["cosmic_ids"]<or>row["cosmic_id"]<block_start>out.append("cosmic")<block_end><if_stmt>row["clinvar_sig"]<and><not>row["clinvar_sig"].lower()<in>clinvar_no<block_start>out.append("clinvar")<block_end><return>out<block_end><def_stmt>_do_prioritize items<block_start>"""Determine if we should perform prioritization.
Currently done on tumor-only input samples and feeding into PureCN
which needs the germline annotations.
"""<if_stmt><not>any("tumoronly-prioritization"<in>dd.get_tools_off(d)<for>d items)<block_start><if_stmt>vcfutils.get_paired_phenotype(items[0])<block_start>has_tumor=<false><line_sep>has_normal=<false><for_stmt>sub_data items<block_start><if_stmt>vcfutils.get_paired_phenotype(sub_data)<eq>"tumor"<block_start>has_tumor=<true><block_end><elif_stmt>vcfutils.get_paired_phenotype(sub_data)<eq>"normal"<block_start>has_normal=<true><block_end><block_end><return>has_tumor<and><not>has_normal<block_end><block_end><block_end> |
<import_stmt>json<line_sep># import logging
<import_from_stmt>.utils is_invalid_params<import_from_stmt>.exceptions JSONRPCInvalidParams JSONRPCInvalidRequest JSONRPCInvalidRequestException JSONRPCMethodNotFound JSONRPCParseError JSONRPCServerError JSONRPCDispatchException <import_from_stmt>.jsonrpc1 JSONRPC10Response<import_from_stmt>.jsonrpc2 JSONRPC20BatchRequest JSONRPC20BatchResponse JSONRPC20Response <import_from_stmt>.jsonrpc JSONRPCRequest<line_sep># logger = logging.getLogger(__name__)
<class_stmt>JSONRPCResponseManager(object)<block_start>""" JSON-RPC response manager.
Method brings syntactic sugar into library. Given dispatcher it handles
request (both single and batch) and handles errors.
Request could be handled in parallel, it is server responsibility.
:param str request_str: json string. Will be converted into
JSONRPC20Request, JSONRPC20BatchRequest or JSONRPC10Request
:param dict dispather: dict<function_name:function>.
"""<line_sep>RESPONSE_CLASS_MAP={"1.0":JSONRPC10Response "2.0":JSONRPC20Response }<line_sep>@classmethod<def_stmt>handle cls request_str dispatcher<block_start><if_stmt>isinstance(request_str bytes)<block_start>request_str=request_str.decode("utf-8")<block_end><try_stmt><block_start>json.loads(request_str)<block_end><except_stmt>(TypeError ValueError)<block_start><return>JSONRPC20Response(error=JSONRPCParseError()._data)<block_end><try_stmt><block_start>request=JSONRPCRequest.from_json(request_str)<block_end><except_stmt>JSONRPCInvalidRequestException<block_start><return>JSONRPC20Response(error=JSONRPCInvalidRequest()._data)<block_end><return>cls.handle_request(request dispatcher)<block_end>@classmethod<def_stmt>handle_request cls request dispatcher<block_start>""" Handle request data.
At this moment request has correct jsonrpc format.
:param dict request: data parsed from request_str.
:param jsonrpc.dispatcher.Dispatcher dispatcher:
.. versionadded: 1.8.0
"""<line_sep>rs=request<if>isinstance(request JSONRPC20BatchRequest)<else>[request]<line_sep>responses=[r<for>r cls._get_responses(rs dispatcher)<if>r<is><not><none>]<line_sep># notifications
<if_stmt><not>responses<block_start><return><block_end><if_stmt>isinstance(request JSONRPC20BatchRequest)<block_start><return>JSONRPC20BatchResponse(*responses)<block_end><else_stmt><block_start><return>responses[0]<block_end><block_end>@classmethod<def_stmt>_get_responses cls requests dispatcher<block_start>""" Response to each single JSON-RPC Request.
:return iterator(JSONRPC20Response):
.. versionadded: 1.9.0
TypeError inside the function is distinguished from Invalid Params.
"""<for_stmt>request requests<block_start><def_stmt>response **kwargs<block_start><return>cls.RESPONSE_CLASS_MAP[request.JSONRPC_VERSION](_id=request._id **kwargs)<block_end><try_stmt><block_start>method=dispatcher[request.method]<block_end><except_stmt>KeyError<block_start>output=response(error=JSONRPCMethodNotFound()._data)<block_end><else_stmt><block_start><try_stmt><block_start>result=method(*request.args **request.kwargs)<block_end><except_stmt>JSONRPCDispatchException<as>e<block_start>output=response(error=e.error._data)<block_end><except_stmt>Exception<as>e<block_start>data={"type":e.__class__.__name__ "args":e.args "message":str(e) }<if_stmt>isinstance(e TypeError)<and>is_invalid_params(method *request.args **request.kwargs)<block_start>output=response(error=JSONRPCInvalidParams(data=data)._data)<block_end><else_stmt># logger.exception("API Exception: {0}".format(data))
<block_start>print("API Exception: {0}".format(data))<line_sep>output=response(error=JSONRPCServerError(data=data)._data)<block_end><block_end><else_stmt><block_start>output=response(result=result)<block_end><block_end><finally_stmt><block_start><if_stmt><not>request.is_notification<block_start><yield>output<block_end><block_end><block_end><block_end><block_end> |
<import_stmt>types<import_stmt>pandas<as>pd<import_stmt>pytest<import_from_stmt>kartothek.io_components.merge align_datasets<import_from_stmt>kartothek.io_components.metapartition MetaPartition<import_from_stmt>kartothek.io_components.write store_dataset_from_partitions<def_stmt>test_align_datasets_prefix dataset evaluation_dataset store_session<block_start>generator=align_datasets(left_dataset_uuid=dataset.uuid right_dataset_uuid=evaluation_dataset.uuid store=store_session match_how="prefix" )<assert_stmt>isinstance(generator types.GeneratorType)<line_sep>list_metapartitions=list(generator)<line_sep># Two separate cluster_groups (e.g. cluster_1*)
<assert_stmt>len(list_metapartitions)<eq>2<line_sep>mp_list=list_metapartitions[0]<assert_stmt>len(mp_list)<eq>3 [mp.label<for>mp mp_list]<line_sep>mp_list=list_metapartitions[1]<assert_stmt>len(mp_list)<eq>3 [mp.label<for>mp mp_list]<line_sep># Test sorting of datasets by length, i.e. order of dataframes is different
generator=align_datasets(left_dataset_uuid=evaluation_dataset.uuid right_dataset_uuid=dataset.uuid store=store_session match_how="prefix" )<line_sep>list_metapartitions=list(generator)<line_sep>mp_list=list_metapartitions[0]<block_end><def_stmt>test_align_datasets_prefix__equal_number_of_partitions dataset evaluation_dataset store_session<block_start>"""
Test a scenario where the simple prefix match algorithm didn't find any
matches in case of equal number of partitions in both datasets.
"""<line_sep># Create a reference dataset which matches the problem (equal number of
# partitions and suitable for prefix matching)
mp=MetaPartition(label="cluster_1_1" metadata_version=dataset.metadata_version)<line_sep>mp2=MetaPartition(label="cluster_2_1" metadata_version=dataset.metadata_version)<line_sep>metapartitions=[mp mp2]<line_sep>store_dataset_from_partitions(partition_list=metapartitions dataset_uuid="reference_dataset_uuid" store=store_session )<line_sep>generator=align_datasets(left_dataset_uuid=dataset.uuid right_dataset_uuid="reference_dataset_uuid" store=store_session match_how="prefix" )<assert_stmt>isinstance(generator types.GeneratorType)<line_sep>list_metapartitions=list(generator)<line_sep># Two separate cluster_groups (e.g. cluster_1*)
<assert_stmt>len(list_metapartitions)<eq>2<line_sep>mp_list=list_metapartitions[0]<assert_stmt>len(mp_list)<eq>2<line_sep>mp_list=list_metapartitions[1]<assert_stmt>len(mp_list)<eq>2<line_sep># Test sorting of datasets by length, i.e. order of dataframes is different
generator=align_datasets(left_dataset_uuid=evaluation_dataset.uuid right_dataset_uuid=dataset.uuid store=store_session match_how="prefix" )<line_sep>list_metapartitions=list(generator)<line_sep>mp_list=list_metapartitions[0]<block_end><def_stmt>test_align_datasets_exact dataset evaluation_dataset store_session<block_start><with_stmt>pytest.raises(RuntimeError)<block_start>list(align_datasets(left_dataset_uuid=dataset.uuid right_dataset_uuid=evaluation_dataset.uuid store=store_session match_how="exact" ))<block_end>generator=align_datasets(left_dataset_uuid=dataset.uuid right_dataset_uuid=dataset.uuid store=store_session match_how="exact" )<assert_stmt>isinstance(generator types.GeneratorType)<line_sep>list_metapartitions=list(generator)<line_sep># Two separate cluster_groups (e.g. cluster_1*)
<assert_stmt>len(list_metapartitions)<eq>2<line_sep>mp_list=list_metapartitions[0]<assert_stmt>len(mp_list)<eq>2 [mp.label<for>mp mp_list]<assert_stmt>[mp.label<for>mp mp_list]<eq>["cluster_1" "cluster_1"]<line_sep>mp_list=list_metapartitions[1]<assert_stmt>len(mp_list)<eq>2 [mp.label<for>mp mp_list]<assert_stmt>[mp.label<for>mp mp_list]<eq>["cluster_2" "cluster_2"]<block_end><def_stmt>test_align_datasets_left dataset evaluation_dataset store_session<block_start>generator=align_datasets(left_dataset_uuid=dataset.uuid right_dataset_uuid=evaluation_dataset.uuid store=store_session match_how="left" )<assert_stmt>isinstance(generator types.GeneratorType)<line_sep>list_metapartitions=list(generator)<assert_stmt>len(list_metapartitions)<eq>len(dataset.partitions)<line_sep>mp_list=list_metapartitions[0]<assert_stmt>len(mp_list)<eq>5 [mp.label<for>mp mp_list]<line_sep>expected=["cluster_1" "cluster_1_1" "cluster_1_2" "cluster_2_1" "cluster_2_2"]<assert_stmt>[mp.label<for>mp mp_list]<eq>expected<line_sep>mp_list=list_metapartitions[1]<assert_stmt>len(mp_list)<eq>5 [mp.label<for>mp mp_list]<line_sep>expected=["cluster_2" "cluster_1_1" "cluster_1_2" "cluster_2_1" "cluster_2_2"]<assert_stmt>[mp.label<for>mp mp_list]<eq>expected<block_end><def_stmt>test_align_datasets_right dataset evaluation_dataset store_session<block_start>generator=align_datasets(left_dataset_uuid=dataset.uuid right_dataset_uuid=evaluation_dataset.uuid store=store_session match_how="right" )<assert_stmt>isinstance(generator types.GeneratorType)<line_sep>list_metapartitions=list(generator)<assert_stmt>len(list_metapartitions)<eq>len(evaluation_dataset.partitions)<line_sep>mp_list=list_metapartitions[0]<assert_stmt>len(mp_list)<eq>3 [mp.label<for>mp mp_list]<line_sep>expected=["cluster_1_1" "cluster_1" "cluster_2"]<assert_stmt>[mp.label<for>mp mp_list]<eq>expected<line_sep>mp_list=list_metapartitions[1]<assert_stmt>len(mp_list)<eq>3 [mp.label<for>mp mp_list]<line_sep>expected=["cluster_1_2" "cluster_1" "cluster_2"]<assert_stmt>[mp.label<for>mp mp_list]<eq>expected<line_sep>mp_list=list_metapartitions[2]<assert_stmt>len(mp_list)<eq>3 [mp.label<for>mp mp_list]<line_sep>expected=["cluster_2_1" "cluster_1" "cluster_2"]<assert_stmt>[mp.label<for>mp mp_list]<eq>expected<line_sep>mp_list=list_metapartitions[3]<assert_stmt>len(mp_list)<eq>3 [mp.label<for>mp mp_list]<line_sep>expected=["cluster_2_2" "cluster_1" "cluster_2"]<assert_stmt>[mp.label<for>mp mp_list]<eq>expected<block_end><def_stmt>test_align_datasets_callable dataset evaluation_dataset store_session<block_start><def_stmt>comp left right<block_start><return>left<eq>right<block_end><with_stmt>pytest.raises(RuntimeError)<block_start>list(align_datasets(left_dataset_uuid=dataset.uuid right_dataset_uuid=evaluation_dataset.uuid store=store_session match_how=comp ))<block_end>generator=align_datasets(left_dataset_uuid=dataset.uuid right_dataset_uuid=dataset.uuid store=store_session match_how=comp )<assert_stmt>isinstance(generator types.GeneratorType)<line_sep>list_metapartitions=list(generator)<line_sep># Two separate cluster_groups (e.g. cluster_1*)
<assert_stmt>len(list_metapartitions)<eq>2<line_sep>mp_list=list_metapartitions[0]<assert_stmt>len(mp_list)<eq>2 [mp.label<for>mp mp_list]<assert_stmt>[mp.label<for>mp mp_list]<eq>["cluster_1" "cluster_1"]<line_sep>mp_list=list_metapartitions[1]<assert_stmt>len(mp_list)<eq>2 [mp.label<for>mp mp_list]<assert_stmt>[mp.label<for>mp mp_list]<eq>["cluster_2" "cluster_2"]<block_end><def_stmt>test_merge_metapartitions <block_start>df=pd.DataFrame({"P":[1 1] "L":[1 2] "TARGET":[1 2]})<line_sep>df_2=pd.DataFrame({"P":[1] "info":"a"})<line_sep>mp=MetaPartition(label="cluster_1" data={"core":df "helper":df_2})<line_sep>df_3=pd.DataFrame({"P":[1 1] "L":[1 2] "PRED":[0.1 0.2]})<line_sep>mp2=MetaPartition(label="cluster_1" data={"predictions":df_3})<line_sep>merged_mp=MetaPartition.merge_metapartitions(metapartitions=[mp mp2])<line_sep>df=pd.DataFrame({"P":[1 1] "L":[1 2] "TARGET":[1 2] "info":["a" "a"] "PRED":[0.1 0.2] })<assert_stmt>merged_mp.label<eq>"cluster_1"<assert_stmt>len(merged_mp.data)<eq>3<block_end> |
# encoding: utf-8
<import_stmt>logging<import_from_stmt>datetime datetime<import_from_stmt>sqlalchemy Column Integer String DateTime Text Boolean ForeignKey PickleType Index Float <import_from_stmt>sqlalchemy.ext.declarative declarative_base declared_attr<import_from_stmt>sqlalchemy.event listen<import_from_stmt>sqlalchemy.orm sessionmaker<import_from_stmt>airflow settings configuration<import_from_stmt>airflow.utils.db provide_session<import_from_stmt>airflow.models User<line_sep>Base=declarative_base()<class_stmt>DcmpUserProfile(Base)<block_start>__tablename__="dcmp_user_profile"<line_sep>id=Column(Integer primary_key=<true>)<line_sep>user_id=Column(Integer unique=<true> nullable=<false>)<line_sep>is_superuser=Column(Boolean index=<true> default=<false> nullable=<false>)<line_sep>is_data_profiler=Column(Boolean index=<true> default=<false> nullable=<false>)<line_sep>is_approver=Column(Boolean index=<true> default=<false> nullable=<false>)<line_sep>approval_notification_emails=Column(Text default="" nullable=<false>)<line_sep>updated_at=Column(DateTime index=<true> default=datetime.now onupdate=datetime.now)<line_sep>created_at=Column(DateTime index=<true> default=datetime.now)<def_stmt>__repr__ self<block_start><return>"<DcmpUserProfile: %s user#%s>"%(self.id self.user_id)<block_end>@property@provide_session<def_stmt>username self session=<none><block_start>user=session.query(User).filter(User.id<eq>self.user_id).first()<if_stmt>user<block_start><return>user.username<block_end><return>""<block_end>@property<def_stmt>approval_notification_emails_list self<block_start><return>[email.strip()<for>email self.approval_notification_emails.split(",")<if>email.strip()]<block_end><block_end><def_stmt>sync_profiles action=<none> target=<none><block_start>session=sessionmaker(autocommit=<false> autoflush=<false> bind=settings.engine)()<line_sep>user_ids={user.id<for>user session.query(User)}<if_stmt>action<eq>"insert"<block_start>user_ids.add(target.id)<block_end><elif_stmt>action<eq>"delete"<block_start>user_ids.remove(target.id)<block_end>profile_user_ids={profile.user_id<for>profile session.query(DcmpUserProfile)}<line_sep>no_profile_user_ids=user_ids-profile_user_ids<line_sep>no_user_user_ids=profile_user_ids-user_ids<for_stmt>user_id no_profile_user_ids<block_start>profile=DcmpUserProfile()<line_sep>profile.user_id=user_id<line_sep>session.add(profile)<block_end>session.query(DcmpUserProfile).filter(DcmpUserProfile.user_id.in_(no_user_user_ids)).delete(synchronize_session=<false>)<line_sep>session.commit()<line_sep>session.close()<block_end><if_stmt>__package__<block_start><try_stmt><block_start>sync_profiles()<block_end><except_stmt>Exception<as>e<block_start>logging.warn("Run python {AIRFLOW_HOME}/plugins/dcmp/tools/upgradedb.py first")<block_end>listen(User 'after_insert' <lambda>mapper connection target:sync_profiles(action="insert" target=target))<line_sep>listen(User 'after_delete' <lambda>mapper connection target:sync_profiles(action="delete" target=target))<if_stmt>configuration.get('webserver' 'auth_backend').endswith('dcmp.auth.backends.password_auth')<block_start><import_from_stmt>dcmp.auth.backends.password_auth PasswordUser<line_sep>listen(PasswordUser 'after_insert' <lambda>mapper connection target:sync_profiles(action="insert" target=target))<line_sep>listen(PasswordUser 'after_delete' <lambda>mapper connection target:sync_profiles(action="delete" target=target))<block_end><block_end> |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
<import_stmt>argparse<import_stmt>copy<import_stmt>unittest<import_stmt>torch<import_from_stmt>fairseq.criterions.cross_entropy CrossEntropyCriterion<import_from_stmt>fairseq.criterions.label_smoothed_cross_entropy LabelSmoothedCrossEntropyCriterion<import_stmt>tests.utils<as>test_utils<class_stmt>TestLabelSmoothing(unittest.TestCase)<block_start><def_stmt>setUp self# build dictionary
<block_start>self.d=test_utils.dummy_dictionary(3)<line_sep>vocab=len(self.d)<line_sep>self.assertEqual(vocab 4+3)# 4 special + 3 tokens
self.assertEqual(self.d.pad() 1)<line_sep>self.assertEqual(self.d.eos() 2)<line_sep>self.assertEqual(self.d.unk() 3)<line_sep>pad,eos,unk,w1,w2,w3=1 2 3 4 5 6# noqa: F841
# build dataset
self.data=[# the first batch item has padding
{'source':torch.LongTensor([w1 eos]) 'target':torch.LongTensor([w1 eos])} {'source':torch.LongTensor([w1 eos]) 'target':torch.LongTensor([w1 w1 eos])} ]<line_sep>self.sample=next(test_utils.dummy_dataloader(self.data))<line_sep># build model
self.args=argparse.Namespace()<line_sep>self.args.sentence_avg=<false><line_sep>self.args.probs=torch.FloatTensor([# pad eos unk w1 w2 w3
[0.05 0.05 0.1 0.05 0.3 0.4 0.05] [0.05 0.10 0.2 0.05 0.2 0.3 0.10] [0.05 0.15 0.3 0.05 0.1 0.2 0.15] ]).unsqueeze(0).expand(2 3 7)<line_sep># add batch dimension
self.task=test_utils.TestTranslationTask.setup_task(self.args self.d self.d)<line_sep>self.model=self.task.build_model(self.args)<block_end><def_stmt>test_nll_loss self<block_start>self.args.label_smoothing=0.1<line_sep>nll_crit=CrossEntropyCriterion(self.args self.task)<line_sep>smooth_crit=LabelSmoothedCrossEntropyCriterion(self.args self.task)<line_sep>nll_loss,nll_sample_size,nll_logging_output=nll_crit(self.model self.sample)<line_sep>smooth_loss,smooth_sample_size,smooth_logging_output=smooth_crit(self.model self.sample)<line_sep>self.assertLess(abs(nll_loss-nll_logging_output['loss']) 1e-6)<line_sep>self.assertLess(abs(nll_loss-smooth_logging_output['nll_loss']) 1e-6)<block_end><def_stmt>test_padding self<block_start>self.args.label_smoothing=0.1<line_sep>crit=LabelSmoothedCrossEntropyCriterion(self.args self.task)<line_sep>loss,_,logging_output=crit(self.model self.sample)<def_stmt>get_one_no_padding idx# create a new sample with just a single batch item so that there's
# no padding
<block_start>sample1=next(test_utils.dummy_dataloader([self.data[idx]]))<line_sep>args1=copy.copy(self.args)<line_sep>args1.probs=args1.probs[idx : :].unsqueeze(0)<line_sep>model1=self.task.build_model(args1)<line_sep>loss1,_,_=crit(model1 sample1)<line_sep><return>loss1<block_end>loss1=get_one_no_padding(0)<line_sep>loss2=get_one_no_padding(1)<line_sep>self.assertAlmostEqual(loss loss1+loss2)<block_end><def_stmt>test_reduction self<block_start>self.args.label_smoothing=0.1<line_sep>crit=LabelSmoothedCrossEntropyCriterion(self.args self.task)<line_sep>loss,_,logging_output=crit(self.model self.sample reduce=<true>)<line_sep>unreduced_loss,_,_=crit(self.model self.sample reduce=<false>)<line_sep>self.assertAlmostEqual(loss unreduced_loss.sum())<block_end><def_stmt>test_zero_eps self<block_start>self.args.label_smoothing=0.0<line_sep>nll_crit=CrossEntropyCriterion(self.args self.task)<line_sep>smooth_crit=LabelSmoothedCrossEntropyCriterion(self.args self.task)<line_sep>nll_loss,nll_sample_size,nll_logging_output=nll_crit(self.model self.sample)<line_sep>smooth_loss,smooth_sample_size,smooth_logging_output=smooth_crit(self.model self.sample)<line_sep>self.assertAlmostEqual(nll_loss smooth_loss)<block_end><def_stmt>assertAlmostEqual self t1 t2<block_start>self.assertEqual(t1.size() t2.size() "size mismatch")<line_sep>self.assertLess((t1-t2).abs().max() 1e-6)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
<import_stmt>os<line_sep>os.environ["DJANGO_SETTINGS_MODULE"]="settings"<line_sep> |
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>os<import_stmt>unittest<import_stmt>launch_testing<import_stmt>launch<import_stmt>launch.actions<import_stmt>launch_testing.actions<import_stmt>launch_testing.markers<import_from_stmt>ament_index_python.packages get_package_share_directory<import_from_stmt>launch_ros.actions Node<def_stmt>generate_test_description # Test fixture
<block_start>gazebo_test_fixture=Node(package='dolly_tests' executable='follow_ignition_TEST' output='screen')<line_sep># Spawn dolly
pkg_dolly_ignition=get_package_share_directory('dolly_ignition')<line_sep>spawn=Node(package='ros_ign_gazebo' executable='create' arguments=['-name' 'dolly' '-z' '0.225' '-file' os.path.join(pkg_dolly_ignition 'models' 'dolly_ignition' 'model.sdf')] output='screen')<line_sep># Bridge
bridge=Node(package='ros_ign_bridge' executable='parameter_bridge' arguments=['/dolly/cmd_vel@geometry_msgs/msg/Twist@ignition.msgs.Twist' '/dolly/laser_scan@sensor_msgs/msg/LaserScan@ignition.msgs.LaserScan'] output='screen')<line_sep># Follow node
follow=Node(package='dolly_follow' executable='dolly_follow' output='screen' remappings=[('cmd_vel' '/dolly/cmd_vel') ('laser_scan' '/dolly/laser_scan')])<line_sep><return>launch.LaunchDescription([gazebo_test_fixture spawn bridge follow launch_testing.util.KeepAliveProc() launch_testing.actions.ReadyToTest()]) locals()<block_end><class_stmt>DollyFollowTest(unittest.TestCase)<block_start><def_stmt>test_termination self gazebo_test_fixture proc_info<block_start>proc_info.assertWaitForShutdown(process=gazebo_test_fixture timeout=200)<block_end><block_end>@launch_testing.post_shutdown_test()<class_stmt>DollyFollowTestAfterShutdown(unittest.TestCase)<block_start><def_stmt>test_exit_code self gazebo_test_fixture proc_info<block_start>launch_testing.asserts.assertExitCodes(proc_info [launch_testing.asserts.EXIT_OK] gazebo_test_fixture)<block_end><block_end> |
<import_from_stmt>pipetools.utils foreach<line_sep>__version__=VERSION=1 0 1<line_sep>__versionstr__=VERSION<g>foreach(str)|'.'.join<import_from_stmt>pipetools.main pipe X maybe xpartial<import_from_stmt>pipetools.utils *<line_sep># prevent namespace pollution
<import_stmt>pipetools.compat<for_stmt>symbol dir(pipetools.compat)<block_start><if_stmt>globals().get(symbol)<is>getattr(pipetools.compat symbol)<block_start>globals().pop(symbol)<block_end><block_end> |
<import_stmt>tensorflow<as>tf<import_from_stmt>waymo_open_dataset dataset_pb2<as>open_dataset<import_from_stmt>waymo_open_dataset.utils frame_utils<def_stmt>get_data_from_seg segment<block_start>dataset=tf.data.TFRecordDataset(segment compression_type='')<for_stmt>data dataset<block_start>frame=open_dataset.Frame()<line_sep>frame.ParseFromString(bytearray(data.numpy()))<line_sep>(range_images camera_projections range_image_top_pose)=frame_utils.parse_range_image_and_camera_projection(frame)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>seg='./segment-967082162553397800_5102_900_5122_900_with_camera_labels.tfrecord'<line_sep>get_data_from_seg(seg)<block_end> |
<import_from_stmt>html4vision Col imagetable<line_sep>cols=[Col('img' 'Image' 'images/road_*_image.jpg') Col('img' 'Label' 'images/road_*_label.png' 1) # 1 is used to select only the first item
Col('img' 'Image + Label' 'images/road_*_image.jpg') Col('overlay' '' 'images/road_*_label.png' 1 'opacity: 0.4') ]<line_sep>imagetable(cols 'overlay.html' 'Image Overlay' imscale=1.5 overlay_toggle=<true>)<line_sep> |
###
### Read a geometry from a single xml file created from mfxmlwriter.py
### and write it into a db file.
###
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("MagneticFieldWriter")<line_sep>process.load("CondCore.DBCommon.CondDBCommon_cfi")<line_sep>#GEOMETRY_VERSION = '90322'
#GEOMETRY_VERSION = '120812'
#GEOMETRY_VERSION = '130503'
GEOMETRY_VERSION='160812'<line_sep>process.source=cms.Source("EmptyIOVSource" lastValue=cms.uint64(1) timetype=cms.string('runnumber') firstValue=cms.uint64(1) interval=cms.uint64(1))<line_sep># This reads the big XML file and the only way to fill the
# nonreco part of the database is to read this file. It
# somewhat duplicates the information read from the little
# XML files, but there is no way to directly build the
# DDCompactView from this.
process.XMLGeometryWriter=cms.EDAnalyzer("XMLGeometryBuilder" XMLFileName=cms.untracked.string("./mfGeometry_"+GEOMETRY_VERSION+".xml") ZIP=cms.untracked.bool(<true>) record=cms.untracked.string('MFGeometryFileRcd'))<line_sep>process.CondDBCommon.BlobStreamerName=cms.untracked.string('TBufferBlobStreamingService')<line_sep>process.CondDBCommon.timetype=cms.untracked.string('runnumber')<line_sep>process.CondDBCommon.connect=cms.string('sqlite_file:mfGeometry_'+GEOMETRY_VERSION+'.db')<line_sep>process.PoolDBOutputService=cms.Service("PoolDBOutputService" process.CondDBCommon toPut=cms.VPSet(cms.PSet(record=cms.string('MFGeometryFileRcd') tag=cms.string('MagneticFieldGeometry_'+str(GEOMETRY_VERSION)))))<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(1))<line_sep>process.p1=cms.Path(process.XMLGeometryWriter)<line_sep># Create the corresponding metadata file
f=open('mfGeometry_'+GEOMETRY_VERSION+'.txt' 'w')<line_sep>f.write('{\n'+' \"destinationDatabase\": \"oracle://cms_orcon_prod/CMS_CONDITIONS\",\n'+' \"destinationTags\": {\n'+' \"MFGeometry_'+GEOMETRY_VERSION+'\": {}\n'+' },\n'+' \"inputTag\": "MagneticFieldGeometry_'+GEOMETRY_VERSION+'\",\n'+' \"since\": 1,\n'+' \"userText\": "Mag field geometry, version '+GEOMETRY_VERSION+'\"\n'+'}\n')<line_sep> |
<import_stmt>os<import_stmt>tensorflow<as>tf<import_from_stmt>string Template<import_from_stmt>PIL Image<import_from_stmt>logging DEBUG<import_from_stmt>pathlib Path<import_from_stmt>threading Thread Event<import_from_stmt>logging getLogger<import_from_stmt>logging.handlers QueueHandler<import_from_stmt>multiprocessing Queue<import_from_stmt>watsor.stream.log LogHandler<import_from_stmt>watsor.stream.work Work WorkPublish Payload<import_from_stmt>watsor.stream.share FrameBuffer<import_from_stmt>watsor.stream.sync CountDownLatch CountableQueue<import_from_stmt>watsor.test.detect_stream Artist ShapeDetector<line_sep>CLASSES={idx:shape<for>idx,shape enumerate(['unlabeled' 'triangle' 'ellipse' 'rectangle'])}<line_sep>CONFIG="""model {
ssd {
num_classes: 3
image_resizer {
fixed_shape_resizer {
height: $height
width: $width
}
}
feature_extractor {
type: "ssd_mobilenet_v1"
depth_multiplier: 1.0
min_depth: 16
conv_hyperparams {
regularizer {
l2_regularizer {
weight: 3.99999989895e-05
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.0299999993294
}
}
activation: RELU_6
batch_norm {
decay: 0.999700009823
center: true
scale: true
epsilon: 0.0010000000475
train: true
}
}
}
box_coder {
faster_rcnn_box_coder {
y_scale: 10.0
x_scale: 10.0
height_scale: 5.0
width_scale: 5.0
}
}
matcher {
argmax_matcher {
matched_threshold: 0.5
unmatched_threshold: 0.5
ignore_thresholds: false
negatives_lower_than_unmatched: true
force_match_for_each_row: true
}
}
similarity_calculator {
iou_similarity {
}
}
box_predictor {
convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
weight: 3.99999989895e-05
}
}
initializer {
truncated_normal_initializer {
mean: 0.0
stddev: 0.0299999993294
}
}
activation: RELU_6
batch_norm {
decay: 0.999700009823
center: true
scale: true
epsilon: 0.0010000000475
train: true
}
}
min_depth: 0
max_depth: 0
num_layers_before_predictor: 0
use_dropout: false
dropout_keep_probability: 0.800000011921
kernel_size: 1
box_code_size: 4
apply_sigmoid_to_scores: false
}
}
anchor_generator {
ssd_anchor_generator {
num_layers: 6
min_scale: 0.20000000298
max_scale: 0.949999988079
aspect_ratios: 1.0
aspect_ratios: 2.0
aspect_ratios: 0.5
aspect_ratios: 3.0
aspect_ratios: 0.333299994469
}
}
post_processing {
batch_non_max_suppression {
score_threshold: 0.300000011921
iou_threshold: 0.600000023842
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SIGMOID
}
normalize_loss_by_num_matches: true
loss {
localization_loss {
weighted_smooth_l1 {
}
}
classification_loss {
weighted_sigmoid {
}
}
hard_example_miner {
num_hard_examples: 3000
iou_threshold: 0.990000009537
loss_type: CLASSIFICATION
max_negatives_per_positive: 3
min_negatives_per_image: 0
}
classification_weight: 1.0
localization_weight: 1.0
}
}
}
train_config {
batch_size: 24
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
ssd_random_crop {
}
}
optimizer {
rms_prop_optimizer {
learning_rate {
exponential_decay_learning_rate {
initial_learning_rate: 0.00400000018999
decay_steps: 800720
decay_factor: 0.949999988079
}
}
momentum_optimizer_value: 0.899999976158
decay: 0.899999976158
epsilon: 1.0
}
}
fine_tune_checkpoint: "$path/training/model.ckpt-XXXX"
from_detection_checkpoint: true
num_steps: 200000
}
train_input_reader {
label_map_path: "$path/annotations/label_map.pbtxt"
tf_record_input_reader {
input_path: "$path/annotations/train.record"
}
}
eval_config {
num_examples: 8000
max_evals: 10
use_moving_averages: false
}
eval_input_reader {
label_map_path: "$path/annotations/label_map.pbtxt"
shuffle: false
num_readers: 1
tf_record_input_reader {
input_path: "$path/annotations/test.record"
}
}
"""<class_stmt>Classifier(WorkPublish)<block_start><def_stmt>__init__ self delegate_class name:str stop_event log_queue frame_queue frame_buffer path group latch kwargs=<none><block_start>super().__init__(delegate_class name stop_event log_queue frame_queue frame_buffer args=(latch path group) kwargs={}<if>kwargs<is><none><else>kwargs)<block_end><def_stmt>_run self stop_event log_queue *args **kwargs<block_start>super(Work self)._run(stop_event log_queue *args **kwargs)<try_stmt><block_start>path=args[-2]<line_sep>group=args[-1]<line_sep>output_path=os.path.join(path "annotations" "{}.record".format(group))<with_stmt>tf.io.TFRecordWriter(output_path)<as>writer<block_start>self._spin(self._process stop_event *args writer **kwargs)<block_end>self._gen_label_map(os.path.join(path "annotations" "label_map.pbtxt"))<line_sep>self._gen_config(os.path.join(path "ssd.config") CONFIG *args **kwargs)<block_end><except_stmt>FileNotFoundError<as>e<block_start>self._logger.error(e)<block_end><except_stmt>Exception<block_start>self._logger.exception('Classification failure')<block_end><block_end><def_stmt>_new_frame self frame payload:Payload stop_event frame_buffer:FrameBuffer latch path group writer *args **kwargs<block_start><try_stmt><block_start>detections=filter(<lambda>d:d.label<g>0 frame.header.detections)<with_stmt>Image.frombytes('RGB' (frame.header.width frame.header.height) frame.image.get_obj())<as>img<block_start>count=latch.count_down()<line_sep>filename=self._gen_filename(path group count+1 *args **kwargs)<line_sep>img.save(filename)<line_sep>self._logger.debug("Frame saved to {}".format(filename))<block_end>tf_example=self._gen_tf_record(frame detections filename *args **kwargs)<line_sep>writer.write(tf_example.SerializeToString())<block_end><finally_stmt><block_start>frame.latch.next()<block_end><block_end>@staticmethod<def_stmt>_gen_filename path group count *args **kwargs<block_start><return>os.path.abspath(os.path.join(path "images" group "{:03d}.jpg".format(count)))<block_end>@staticmethod<def_stmt>_gen_tf_record frame detections filename *args **kwargs<block_start>width=frame.header.width<line_sep>height=frame.header.height<line_sep>image_format=b'jpeg'<with_stmt>open(filename "rb")<as>file<block_start>encoded_jpg=file.read()<block_end>filename=os.path.basename(filename).encode('utf-8')<line_sep>xmins=[]<line_sep>xmaxs=[]<line_sep>ymins=[]<line_sep>ymaxs=[]<line_sep>label=[]<line_sep>label_text=[]<for_stmt>detection detections<block_start>xmins.append(detection.bounding_box.x_min/width)<line_sep>xmaxs.append(detection.bounding_box.x_max/width)<line_sep>ymins.append(detection.bounding_box.y_min/height)<line_sep>ymaxs.append(detection.bounding_box.y_max/height)<line_sep>label.append(detection.label)<line_sep>label_text.append(CLASSES.get(detection.label).encode('utf-8'))<block_end>tf_example=tf.train.Example(features=tf.train.Features(feature={'image/height':tf.train.Feature(int64_list=tf.train.Int64List(value=[height])) 'image/width':tf.train.Feature(int64_list=tf.train.Int64List(value=[width])) 'image/filename':tf.train.Feature(bytes_list=tf.train.BytesList(value=[filename])) 'image/source_id':tf.train.Feature(bytes_list=tf.train.BytesList(value=[filename])) 'image/format':tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_format])) 'image/encoded':tf.train.Feature(bytes_list=tf.train.BytesList(value=[encoded_jpg])) 'image/object/bbox/xmin':tf.train.Feature(float_list=tf.train.FloatList(value=xmins)) 'image/object/bbox/xmax':tf.train.Feature(float_list=tf.train.FloatList(value=xmaxs)) 'image/object/bbox/ymin':tf.train.Feature(float_list=tf.train.FloatList(value=ymins)) 'image/object/bbox/ymax':tf.train.Feature(float_list=tf.train.FloatList(value=ymaxs)) 'image/object/class/text':tf.train.Feature(bytes_list=tf.train.BytesList(value=label_text)) 'image/object/class/label':tf.train.Feature(int64_list=tf.train.Int64List(value=label)) }))<line_sep><return>tf_example<block_end>@staticmethod<def_stmt>_gen_label_map path<block_start>contents=''<for_stmt>idx,shape CLASSES.items()<block_start><if_stmt>idx<eq>0<block_start><continue><block_end>contents=contents+"item {\n"<line_sep>contents=contents+" id: "+str(idx)+"\n"<line_sep>contents=contents+" name: '"+shape+"'\n}\n\n"<block_end><with_stmt>open(path 'w')<as>f<block_start>f.write(contents)<block_end><block_end>@staticmethod<def_stmt>_gen_config filename config frame_queue stop_event frame_buffer *args **kwargs<block_start>path=os.path.dirname(filename)<line_sep>config=Template(config).substitute(path=path width=frame_buffer.frames[0].header.width height=frame_buffer.frames[0].header.height)<line_sep>os.makedirs(path exist_ok=<true>)<with_stmt>open(filename 'w')<as>f<block_start>f.write(config)<block_end><block_end><block_end><def_stmt>prepare_shape_model groups<block_start>frame_buffer=FrameBuffer(10 300 300)<line_sep>frame_queue=Queue(1)<line_sep>subscriber_queue=Queue(1)<line_sep>log_queue=CountableQueue()<line_sep>getLogger().addHandler(QueueHandler(log_queue))<line_sep>stop_logging_event=Event()<line_sep>log_handler=LogHandler(Thread "logger" stop_logging_event log_queue filename=<none>)<line_sep>log_handler.start()<for_stmt>group,count groups.items()<block_start>path=os.path.abspath(os.path.join(Path(__file__).parent.parent.parent.parent 'build/test/model'))<line_sep>os.makedirs(os.path.join(path "images" group) exist_ok=<true>)<line_sep>os.makedirs(os.path.join(path "annotations") exist_ok=<true>)<line_sep>stop_process_event=Event()<line_sep>latch=CountDownLatch(count)<line_sep>artist=Artist("artist" stop_process_event log_queue frame_queue frame_buffer)<line_sep>processes=[artist ShapeDetector(Thread "detector" stop_process_event log_queue frame_queue frame_buffer) Classifier(Thread "classifier" stop_process_event log_queue subscriber_queue frame_buffer path group latch kwargs={'log_level':DEBUG})]<line_sep>artist.subscribe(subscriber_queue)<for_stmt>process processes<block_start>process.start()<block_end><try_stmt><block_start>latch.wait()<block_end><finally_stmt><block_start>stop_process_event.set()<for_stmt>process processes<block_start>process.join(30)<block_end><block_end><block_end>stop_logging_event.set()<line_sep>log_queue.join()<block_end><if_stmt>__name__<eq>'__main__'<block_start>prepare_shape_model({"train":900 "test":100})<block_end> |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
<import_from_future_stmt> absolute_import<import_from_stmt>..errors UnsupportedCommandError<def_stmt>add_subparser cmd name parent<block_start>"""Add a new subparser to the given parent and add args to it."""<line_sep>parser=parent.add_parser(name)<if_stmt>cmd<eq>'discover'# For now we don't have any tool-specific CLI options to add.
<block_start><pass><block_end><else_stmt><block_start><raise>UnsupportedCommandError(cmd)<block_end><return>parser<block_end> |
<import_from_stmt>cleo.commands.command Command<import_from_stmt>cleo.io.io IO<class_stmt>FooCommand(Command)<block_start>name="foo bar"<line_sep>description="The foo bar command"<line_sep>aliases=["afoobar"]<def_stmt>interact self io:IO<arrow><none><block_start>io.write_line("interact called")<block_end><def_stmt>handle self<arrow>int<block_start>self._io.write_line("called")<line_sep><return>0<block_end><block_end> |
<import_stmt>sys os<line_sep>sys.path.insert(0 os.path.join(os.path.dirname(__file__) '../'))<import_from_stmt>dcdownloader arg_parse version<line_sep># for unittest
cmd_args=<none><def_stmt>main <block_start>args=arg_parse.parser.parse_args(cmd_args)<line_sep>version.show_welcome()<import_from_stmt>dcdownloader.scheduler Scheduler<import_from_stmt>dcdownloader parser_selector<line_sep>s=Scheduler(url=args.url output_path=args.output_path parser=parser_selector.get_parser(args.url) fetch_only=args.fetch_only proxy=args.proxy verify_ssl=args.verify_ssl)<line_sep>s.run()<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
<import_stmt>os<import_stmt>numpy<as>np<import_stmt>argparse<import_stmt>os.path<as>osp<import_stmt>json<import_from_stmt>tqdm tqdm<import_from_stmt>mmcv mkdir_or_exist<def_stmt>getFlying3dMetas root Type data_type='clean'<block_start>Metas=[]<line_sep>imgDir='flyingthings3d/frames_'+data_type+'pass'<line_sep>dispDir='flyingthings3d/disparity'<line_sep>Parts=['A' 'B' 'C']<for_stmt>Part Parts<block_start>partDir=osp.join(root dispDir Type Part)<line_sep>idxDirs=os.listdir(partDir)<for_stmt>idxDir idxDirs<block_start>dispNames=os.listdir(osp.join(partDir idxDir 'left'))<line_sep>imgNames=["{}.png".format(name.split('.')[0])<for>name dispNames]<for_stmt>imgName,dispName zip(imgNames dispNames)<block_start>meta=dict(left_image_path=osp.join(imgDir Type Part idxDir 'left' imgName) right_image_path=osp.join(imgDir Type Part idxDir 'right' imgName) left_disp_map_path=osp.join(dispDir Type Part idxDir 'left' dispName) right_disp_map_path=osp.join(dispDir Type Part idxDir 'right' dispName) )<line_sep>Metas.append(meta)<block_end><block_end><block_end><return>Metas<block_end><def_stmt>getMonkaaMetas root data_type='clean'<block_start>Metas=[]<line_sep>imgDir='Monkaa/frames_'+data_type+'pass'<line_sep>dispDir='Monkaa/disparity'<line_sep>sceneDirs=os.listdir(osp.join(root dispDir))<for_stmt>sceneDir sceneDirs<block_start>dispNames=os.listdir(osp.join(root dispDir sceneDir 'left'))<line_sep>imgNames=["{}.png".format(name.split('.')[0])<for>name dispNames]<for_stmt>imgName,dispName zip(imgNames dispNames)<block_start>meta=dict(left_image_path=osp.join(imgDir sceneDir 'left' imgName) right_image_path=osp.join(imgDir sceneDir 'right' imgName) left_disp_map_path=osp.join(dispDir sceneDir 'left' dispName) right_disp_map_path=osp.join(dispDir sceneDir 'right' dispName) )<line_sep>Metas.append(meta)<block_end><block_end><return>Metas<block_end><def_stmt>getDrivingMetas root data_type='clean'<block_start>Metas=[]<line_sep>imgDir='driving/frames_'+data_type+'pass'<line_sep>dispDir='driving/disparity'<line_sep>focalLengthDirs=os.listdir(osp.join(root dispDir))<for_stmt>focalLengthDir focalLengthDirs<block_start>wardDirs=os.listdir(osp.join(root dispDir focalLengthDir))<for_stmt>wardDir wardDirs<block_start>speedDirs=os.listdir(osp.join(root dispDir focalLengthDir wardDir))<for_stmt>speedDir speedDirs<block_start>dispNames=os.listdir(osp.join(root dispDir focalLengthDir wardDir speedDir 'left'))<line_sep>imgNames=["{}.png".format(name.split('.')[0])<for>name dispNames]<for_stmt>imgName,dispName zip(imgNames dispNames)<block_start>meta=dict(left_image_path=osp.join(imgDir focalLengthDir wardDir speedDir 'left' imgName) right_image_path=osp.join(imgDir focalLengthDir wardDir speedDir 'right' imgName) left_disp_map_path=osp.join(dispDir focalLengthDir wardDir speedDir 'left' dispName) right_disp_map_path=osp.join(dispDir focalLengthDir wardDir speedDir 'right' dispName) )<line_sep>Metas.append(meta)<block_end><block_end><block_end><block_end><return>Metas<block_end><def_stmt>build_annoFile root save_annotation_root data_type='clean'<block_start>"""
Build annotation files for Scene Flow Dataset.
Args:
root:
"""<line_sep># check existence
<assert_stmt>osp.exists(root) 'Path: {} not exists!'.format(root)<line_sep>mkdir_or_exist(save_annotation_root)<line_sep>trainMetas=getFlying3dMetas(root 'TRAIN' data_type)<line_sep>testMetas=getFlying3dMetas(root 'TEST' data_type)<line_sep>trainMetas.extend(getMonkaaMetas(root data_type))<line_sep>trainMetas.extend(getDrivingMetas(root data_type))<for_stmt>meta tqdm(trainMetas)<block_start><for_stmt>k,v meta.items()<block_start><assert_stmt>osp.exists(osp.join(root v)) 'trainMetas:{} not exists'.format(v)<block_end><block_end><for_stmt>meta tqdm(testMetas)<block_start><for_stmt>k,v meta.items()<block_start><assert_stmt>osp.exists(osp.join(root v)) 'testMetas: {} not exists'.format(v)<block_end><block_end>info_str='SceneFlow Dataset contains:\n'<concat>' {:5d} training samples \n'<concat>' {:5d} validation samples'.format(len(trainMetas) len(testMetas))<line_sep>print(info_str)<def_stmt>make_json name metas<block_start>filepath=osp.join(save_annotation_root data_type+'pass_'+name+'.json')<line_sep>print('Save to {}'.format(filepath))<with_stmt>open(file=filepath mode='w')<as>fp<block_start>json.dump(metas fp=fp)<block_end><block_end>make_json(name='train' metas=trainMetas)<line_sep>make_json(name='test' metas=testMetas)<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description="SceneFlow Data PreProcess.")<line_sep>parser.add_argument("--data-root" default=<none> help="root of data" type=str )<line_sep>parser.add_argument("--save-annotation-root" default='./' help="save root of generated annotation file" type=str )<line_sep>parser.add_argument("--data-type" default='clean' help="the type of data, (clean or final)pass" type=str )<line_sep>args=parser.parse_args()<line_sep>build_annoFile(args.data_root args.save_annotation_root args.data_type)<block_end> |
"""
:codeauthor: <NAME> <<EMAIL>>
"""<import_stmt>pytest<import_stmt>salt.states.layman<as>layman<import_from_stmt>tests.support.mock MagicMock patch<line_sep>@pytest.fixture<def_stmt>configure_loader_modules <block_start><return>{layman:{}}<block_end><def_stmt>test_present <block_start>"""
Test to verify that the overlay is present.
"""<line_sep>name="sunrise"<line_sep>ret={"name":name "result":<true> "comment":"" "changes":{}}<line_sep>mock=MagicMock(side_effect=[[name] []])<with_stmt>patch.dict(layman.__salt__ {"layman.list_local":mock})<block_start>comt="Overlay {} already present".format(name)<line_sep>ret.update({"comment":comt})<assert_stmt>layman.present(name)<eq>ret<with_stmt>patch.dict(layman.__opts__ {"test":<true>})<block_start>comt="Overlay {} is set to be added".format(name)<line_sep>ret.update({"comment":comt "result":<none>})<assert_stmt>layman.present(name)<eq>ret<block_end><block_end><block_end><def_stmt>test_absent <block_start>"""
Test to verify that the overlay is absent.
"""<line_sep>name="sunrise"<line_sep>ret={"name":name "result":<true> "comment":"" "changes":{}}<line_sep>mock=MagicMock(side_effect=[[] [name]])<with_stmt>patch.dict(layman.__salt__ {"layman.list_local":mock})<block_start>comt="Overlay {} already absent".format(name)<line_sep>ret.update({"comment":comt})<assert_stmt>layman.absent(name)<eq>ret<with_stmt>patch.dict(layman.__opts__ {"test":<true>})<block_start>comt="Overlay {} is set to be deleted".format(name)<line_sep>ret.update({"comment":comt "result":<none>})<assert_stmt>layman.absent(name)<eq>ret<block_end><block_end><block_end> |
"""
========
Circuits
========
Convert a Boolean circuit to an equivalent Boolean formula.
A Boolean circuit can be exponentially more expressive than an
equivalent formula in the worst case, since the circuit can reuse
subcircuits multiple times, whereas a formula cannot reuse subformulas
more than once. Thus creating a Boolean formula from a Boolean circuit
in this way may be infeasible if the circuit is large.
"""<import_stmt>matplotlib.pyplot<as>plt<import_stmt>networkx<as>nx<def_stmt>circuit_to_formula circuit# Convert the circuit to an equivalent formula.
<block_start>formula=nx.dag_to_branching(circuit)<line_sep># Transfer the operator or variable labels for each node from the
# circuit to the formula.
<for_stmt>v formula<block_start>source=formula.nodes[v]["source"]<line_sep>formula.nodes[v]["label"]=circuit.nodes[source]["label"]<block_end><return>formula<block_end><def_stmt>formula_to_string formula<block_start><def_stmt>_to_string formula root# If there are no children, this is a variable node.
<block_start>label=formula.nodes[root]["label"]<if_stmt><not>formula[root]<block_start><return>label<block_end># Otherwise, this is an operator.
children=formula[root]<line_sep># If one child, the label must be a NOT operator.
<if_stmt>len(children)<eq>1<block_start>child=nx.utils.arbitrary_element(children)<line_sep><return>f"{label}({_to_string(formula child)})"<block_end># NB "left" and "right" here are a little misleading: there is
# no order on the children of a node. That's okay because the
# Boolean AND and OR operators are symmetric. It just means that
# the order of the operands cannot be predicted and hence the
# function does not necessarily behave the same way on every
# invocation.
left,right=formula[root]<line_sep>left_subformula=_to_string(formula left)<line_sep>right_subformula=_to_string(formula right)<line_sep><return>f"({left_subformula} {label} {right_subformula})"<block_end>root=next(v<for>v,d formula.in_degree()<if>d<eq>0)<line_sep><return>_to_string(formula root)<block_end>###############################################################################
# Create an example Boolean circuit.
# ----------------------------------
#
# This circuit has a ∧ at the output and two ∨s at the next layer.
# The third layer has a variable x that appears in the left ∨, a
# variable y that appears in both the left and right ∨s, and a
# negation for the variable z that appears as the sole node in the
# fourth layer.
circuit=nx.DiGraph()<line_sep># Layer 0
circuit.add_node(0 label="∧" layer=0)<line_sep># Layer 1
circuit.add_node(1 label="∨" layer=1)<line_sep>circuit.add_node(2 label="∨" layer=1)<line_sep>circuit.add_edge(0 1)<line_sep>circuit.add_edge(0 2)<line_sep># Layer 2
circuit.add_node(3 label="x" layer=2)<line_sep>circuit.add_node(4 label="y" layer=2)<line_sep>circuit.add_node(5 label="¬" layer=2)<line_sep>circuit.add_edge(1 3)<line_sep>circuit.add_edge(1 4)<line_sep>circuit.add_edge(2 4)<line_sep>circuit.add_edge(2 5)<line_sep># Layer 3
circuit.add_node(6 label="z" layer=3)<line_sep>circuit.add_edge(5 6)<line_sep># Convert the circuit to an equivalent formula.
formula=circuit_to_formula(circuit)<line_sep>print(formula_to_string(formula))<line_sep>labels=nx.get_node_attributes(circuit "label")<line_sep>options={"node_size":600 "alpha":0.5 "node_color":"blue" "labels":labels "font_size":22 }<line_sep>plt.figure(figsize=(8 8))<line_sep>pos=nx.multipartite_layout(circuit subset_key="layer")<line_sep>nx.draw_networkx(circuit pos **options)<line_sep>plt.title(formula_to_string(formula))<line_sep>plt.axis("equal")<line_sep>plt.show()<line_sep> |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Class to do trained model inference in beam."""<import_stmt>importlib<import_stmt>os<import_stmt>struct<import_stmt>subprocess<as>sp<import_stmt>time<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.contrib framework<as>contrib_framework<line_sep># LDIF is an internal package, should be imported last.
# pylint: disable=g-bad-import-order
<import_from_stmt>ldif.datasets preprocess<import_from_stmt>ldif.datasets shapenet<import_from_stmt>ldif.inference experiment<as>experiments<import_from_stmt>ldif.inference extract_mesh<import_from_stmt>ldif.inference metrics<import_from_stmt>ldif.model model<as>sdf_model<import_from_stmt>ldif.representation structured_implicit_function<import_from_stmt>ldif.util camera_util<import_from_stmt>ldif.util file_util<import_from_stmt>ldif.util gaps_util<import_from_stmt>ldif.util geom_util<import_from_stmt>ldif.util geom_util_np<import_from_stmt>ldif.util gpu_util<import_from_stmt>ldif.util path_util<import_from_stmt>ldif.util py_util<import_from_stmt>ldif.util sdf_util<import_from_stmt>ldif.util np_util<import_from_stmt>ldif.util.file_util log<line_sep># pylint: enable=g-bad-import-order
importlib.reload(extract_mesh)<line_sep>importlib.reload(structured_implicit_function)<line_sep>importlib.reload(sdf_model)<line_sep>importlib.reload(geom_util)<class_stmt>TrainedNetwork(object)<block_start>"""A base class for all networks trained in XManager."""<def_stmt>__init__ self job ckpt use_gpu **kwargs# pylint: disable=unused-argument
<block_start>self.job=job<line_sep>self.ckpt=ckpt<line_sep>self.graph=tf.Graph()<line_sep>self.use_gpu=use_gpu<block_end>@classmethod<def_stmt>from_experiment cls experiment xid ckpt_idx use_temp_ckpts=<none> overrides=<none> use_gpu=<true> **kwargs<block_start>"""Instantiates a TrainedNetwork from an experiment object."""<line_sep>job=experiment.job_from_xmanager_id(xid must_be_visible=<true>)<if_stmt>use_temp_ckpts<is><not><none><block_start>job.set_use_temp_ckpts(use_temp_ckpts)<block_end><if_stmt>overrides<is><not><none><block_start><for_stmt>k,v overrides.items()<block_start>setattr(job.model_config.hparams k v)<block_end><block_end><if_stmt>ckpt_idx<eq>0<block_start>log.error('Please select a checkpoint and rerun. Valid checkpoints:')<line_sep>log.error(str(job.all_checkpoint_indices))<line_sep><return><block_end>must_equal=ckpt_idx<ne>-1<line_sep>ckpt=job.latest_checkpoint_before(ckpt_idx must_equal=must_equal)<line_sep>log.info(f'Loading checkpoint {ckpt.abspath}')<line_sep><return>cls(job ckpt use_gpu **kwargs)<block_end>@classmethod<def_stmt>from_modeldir cls model_directory model_name experiment_name xid ckpt_idx overrides=<none> use_temp_ckpts=<true> use_gpu=<true> **kwargs<block_start>"""Creates a TrainedModel from a model directory root and name."""<line_sep>experiment=experiments.Experiment(model_directory model_name experiment_name)<line_sep><return>cls.from_experiment(experiment xid ckpt_idx use_temp_ckpts overrides use_gpu **kwargs)<block_end>@classmethod<def_stmt>from_identifiers cls user model_name experiment_name xid ckpt_idx overrides=<none> use_temp_ckpts=<none> charged_user='viscam' use_gpu=<true> **kwargs<block_start>"""Creates a trained network from experiment identifiers."""<line_sep><raise>ValueError('No longer supported.')<block_end><def_stmt>restore self<block_start>"""Creates a session with restored model variables."""<with_stmt>self.graph.as_default()<block_start><if_stmt>self.use_gpu# For now these are disabled since it is difficult to work on
# all GPUs.
#allowable_frac = gpu_util.get_allowable_fraction_without(
# mem_to_reserve=1024 + 512, cuda_device_index=0) # ~1GB
#gpu_options = tf.GPUOptions(
# per_process_gpu_memory_fraction=allowable_frac)
#config = tf.ConfigProto(gpu_options=gpu_options)
<block_start>config=tf.ConfigProto()<line_sep>config.gpu_options.allow_growth=<true><block_end><else_stmt><block_start>config=tf.ConfigProto(device_count={'GPU':0})<block_end>self.session=tf.Session(config=config)<line_sep>saver=tf.train.Saver()<line_sep>saver.restore(self.session self.ckpt.abspath)<block_end><block_end><block_end><def_stmt>conform_prediction vector<block_start>"""Forces an arbitrary vector to be a valid (D)SIF."""<line_sep>vector=vector.copy()<if_stmt>vector.shape[-1]<not><in>[10 42]<block_start><raise>ValueError('Unimplemented.')<block_end>consts,centers,radii_aa,radii_cov=np.split(vector[<ellipsis> :10] [1 4 7] axis=-1)<line_sep>consts=np.minimum(consts 0.0)<line_sep>radii_aa=np.maximum(radii_aa 1e-9)<line_sep>radii_cov=np.clip(radii_cov -np.pi/4. np.pi/4.)<line_sep>log.verbose(repr([x.shape<for>x [consts centers radii_aa radii_cov vector[<ellipsis> 10:]]]))<line_sep><return>np.concatenate([consts centers radii_aa radii_cov vector[<ellipsis> 10:]] axis=-1)<block_end><class_stmt>SingleViewDepthEncoder(TrainedNetwork)<block_start>"""Maps from a single depth image (max-0) to a shape representation."""<def_stmt>__init__ self job ckpt use_gpu **kwargs<block_start>super(SingleViewDepthEncoder self).__init__(job ckpt use_gpu **kwargs)<with_stmt>self.graph.as_default()<block_start>model_config=self.job.model_config<line_sep>model_config.inputs=shapenet.build_placeholder_interface(model_config proto='ShapeNetOneImXyzPC')<line_sep>training_example=preprocess.preprocess(model_config)<line_sep>self.depth_input=model_config.inputs['dataset'].depth_render<line_sep>self.xyz_input=model_config.inputs['dataset'].xyz_render<line_sep>self.points_input=model_config.inputs['dataset'].surface_point_samples<line_sep>training_example=preprocess.preprocess(model_config)<line_sep>observation=sdf_model.Observation(model_config training_example)<line_sep>imp_net=sdf_model.StructuredImplicitModel(model_config 'imp_net')<line_sep>prediction=imp_net.forward(observation)<line_sep>structured_implicit=prediction.structured_implicit<line_sep>self.packed_vector=structured_implicit.vector<line_sep>self.restore()<block_end><block_end><def_stmt>run self depth points xyz<block_start>"""Runs the network on the input data, returning a (D)SIF."""<line_sep>h,w=np.squeeze(depth).shape<line_sep>depth=np.reshape(depth [1 h w 1])<line_sep>points=np.reshape(points [1 10000 6])<line_sep>xyz=np.reshape(xyz [1 h w 3])<with_stmt>self.graph.as_default()<block_start>packed_vector=self.session.run(self.packed_vector feed_dict={self.depth_input:depth self.points_input:points self.xyz_input:xyz})<line_sep>packed_vector=np.reshape(packed_vector [self.job.model_config.hparams.sc -1])<block_end><return>packed_vector<block_end><def_stmt>run_example self ex<block_start><return>self.run(ex.max_depth_224[0 <ellipsis>]<times>1000.0 ex.get_max_world_pts_from_idx(0) ex.max_world_xyz_224[0 <ellipsis>])<block_end><def_stmt>run_example_bts self ex<block_start><return>self.run(ex.bts_depth_224[0 <ellipsis>]<times>1000.0 ex.get_bts_world_pts_from_idx(0) ex.bts_world_xyz_224[0 <ellipsis>])<block_end><block_end><class_stmt>DepthEncoder(TrainedNetwork)<block_start>"""Maps from a dodecahedron of depth images to shape elements."""<def_stmt>__init__ self job ckpt use_gpu **kwargs<block_start>super(DepthEncoder self).__init__(job ckpt use_gpu **kwargs)<with_stmt>self.graph.as_default()<block_start>model_config=self.job.model_config<line_sep>model_config.hparams.bs=1<line_sep>model_config.inputs=shapenet.build_placeholder_interface(model_config)<line_sep>training_example=preprocess.preprocess(model_config)<line_sep>self.depth_input=model_config.inputs['dataset'].depth_renders<line_sep>self.points_input=model_config.inputs['dataset'].surface_point_samples<line_sep>self.nss_input=model_config.inputs['dataset'].near_surface_samples<line_sep>training_example=preprocess.preprocess(model_config)<if_stmt>hasattr(training_example '_tx')<block_start>self.tx=training_example._tx<block_end><else_stmt><block_start>self.tx=<none><block_end>observation=sdf_model.Observation(model_config training_example)<line_sep>imp_net=sdf_model.StructuredImplicitModel(model_config 'imp_net')<line_sep>prediction=imp_net.forward(observation)<line_sep>structured_implicit=prediction.structured_implicit<line_sep>self.packed_vector=structured_implicit.vector<line_sep># *phew* we have set up the graph... now we need to pull the weights.
self.restore()<block_end><block_end><def_stmt>run self dodeca points nss=<none><block_start>"""Runs the network on the input data, returning a (D)SIF."""<line_sep>dodeca=np.reshape(dodeca [1 20 224 224 1])<line_sep>points=np.reshape(points [1 10000 6])<with_stmt>self.graph.as_default()<block_start>feed_dict={self.depth_input:dodeca self.points_input:points}<if_stmt>nss<is><not><none><block_start>feed_dict[self.nss_input]=np.reshape(nss [1 100000 4])<block_end><if_stmt>self.tx<is><not><none><block_start>packed_vector,tx=self.session.run([self.packed_vector self.tx] feed_dict=feed_dict)<block_end><else_stmt><block_start>packed_vector=self.session.run(self.packed_vector feed_dict=feed_dict)<block_end>packed_vector=np.reshape(packed_vector [self.job.model_config.hparams.sc -1])<block_end><if_stmt>self.tx<is><not><none><block_start><return>packed_vector np.reshape(tx [4 4])<block_end><return>packed_vector<block_end><def_stmt>run_example self ex<block_start><return>self.run(ex.depth_images ex.precomputed_surface_samples_from_dodeca)<block_end><block_end><class_stmt>Decoder(TrainedNetwork)<block_start>"""A SIF -> Mesh decoder."""<def_stmt>__init__ self job ckpt use_gpu **kwargs<block_start>super(Decoder self).__init__(job ckpt use_gpu **kwargs)<with_stmt>self.graph.as_default()<block_start>self.sif_input=tf.placeholder(tf.float32 self.batched_vector_shape)<line_sep># TODO(kgenova) Maybe the net should be handled entirely by the structured
# implicit function? Although there is a difference between the network
# that can give a result from a vector and a simple wrapper for models
# that don't need variables. Maybe it's just intelligent about creating
# the net only when really needed.
<if_stmt>'silence_implicits'<in>kwargs<and>kwargs['silence_implicits']<block_start>self.job.model_config.hparams.ipc='f'<line_sep>log.info('Silencing implicits.')<block_end>net=sdf_model.StructuredImplicitModel(self.job.model_config name='imp_net')<line_sep>structured_implicit=(structured_implicit_function.StructuredImplicit.from_packed_vector(self.job.model_config self.sif_input net))<line_sep>self.structured_implicit=structured_implicit<line_sep>self.block_res=32<line_sep>self.native_point_count=self.block_res<power>3<line_sep>self.sample_locations_ph=tf.placeholder(tf.float32 shape=[self.block_res self.block_res self.block_res 3])<line_sep>samples=tf.reshape(self.sample_locations_ph [1 self.block_res<power>3 3])<line_sep>predicted_alg,predicted_locals=structured_implicit.class_at_samples(samples apply_class_transfer=<false>)<line_sep>predicted_class=sdf_util.apply_class_transfer(predicted_alg self.job.model_config soft_transfer=<true> offset=self.job.model_config.hparams.lset)<line_sep>vol_shape=[self.block_res self.block_res self.block_res]<line_sep>self.predicted_alg_grid=tf.reshape(predicted_alg vol_shape)<line_sep>self.predicted_class_grid=tf.reshape(predicted_class vol_shape)<line_sep>effective_element_count=(structured_implicit_function.get_effective_element_count(self.job.model_config))<line_sep>self.local_decisions=tf.reshape(predicted_locals[0] [effective_element_count self.block_res self.block_res self.block_res])<line_sep>self.base_grid=np_util.make_coordinate_grid_3d(length=self.block_res height=self.block_res width=self.block_res is_screen_space=<false> is_homogeneous=<false>).astype(np.float32)<line_sep>self._world2local=structured_implicit.world2local<line_sep>self._use_inference_kernel=<true><line_sep># Influence samples
self.true_sample_count=10000<line_sep>self.generic_sample_ph=tf.placeholder(tf.float32 shape=[self.true_sample_count 3])<line_sep>self.predicted_influences=structured_implicit.rbf_influence_at_samples(tf.expand_dims(self.generic_sample_ph axis=0))<line_sep># Optimizer stuff
self.optimizer_pc=5000<line_sep>self.optimizer_samples=tf.placeholder(tf.float32 shape=[self.optimizer_pc 3])<line_sep>optimizer_samples=tf.reshape(self.optimizer_samples [1 self.optimizer_pc 3])<line_sep>self.predicted_class,_=structured_implicit.class_at_samples(optimizer_samples)<line_sep>self.predicted_class=tf.reshape(self.predicted_class [self.optimizer_pc 1])<line_sep>self.target_class_ph=tf.placeholder(tf.float32 [self.optimizer_pc 1])<line_sep>loss='crossentropy'<if_stmt>loss<eq>'crossentropy'<block_start>clipped_pred=tf.clip_by_value(self.predicted_class 1e-05 1-1e-05)<line_sep>self.optimizer_elt_loss=tf.where(self.target_class_ph<g>0.5 -tf.log(clipped_pred) -tf.log(1-clipped_pred))<block_end><elif_stmt>loss<eq>'l1'<block_start>self.optimizer_elt_loss=tf.abs(self.target_class_ph-self.predicted_class)<block_end><elif_stmt>loss<eq>'l2'<block_start>self.optimizer_elt_loss=tf.square(self.target_class_ph-self.predicted_class)<block_end>apply_where_agree=<true><if_stmt><not>apply_where_agree<block_start>gt_outside=self.target_class_ph<g>0.5<line_sep>pred_outside=self.predicted_class<g>0.5<line_sep>gt_inside=tf.logical_not(gt_outside)<line_sep>pred_inside=tf.logical_not(pred_outside)<line_sep>agree=tf.logical_or(tf.logical_and(gt_outside pred_outside) tf.logical_and(gt_inside pred_inside))<line_sep>self.optimizer_elt_loss=tf.where_v2(agree 0.0 self.optimizer_elt_loss)<block_end>self.optimizer_loss=tf.reduce_mean(self.optimizer_elt_loss)<line_sep>self.ldif_gradients=tf.gradients(self.optimizer_loss self.sif_input)<line_sep># TODO(kgenova) Currently disabled since it's in testing and hardcodes
# some values.
# self.coords_ph = tf.placeholder(tf.float32, shape=[3])
# self.am_image_ph = tf.placeholder(tf.int32, shape=[224, 224])
# pose_cam2world, pose_eye = self._spherical_to_4x4(self.coords_ph)
# self.pose_error = self._evaluate_pose_error(pose_cam2world, pose_eye,
# self.am_image_ph)
# self.pose3_gradients = tf.gradients(self.pose_error, self.coords_ph)
<try_stmt><block_start>self.restore()<block_end><except_stmt>ValueError<block_start>log.warning('No variables to restore or restoration otherwise failed.')<block_end><block_end><block_end>@property<def_stmt>unbatched_vector_shape self<block_start>shape_count=self.job.model_config.hparams.sc<line_sep>shape_size=structured_implicit_function.element_dof(self.job.model_config)<line_sep><return>[shape_count shape_size]<block_end>@property<def_stmt>batched_vector_shape self<block_start><return>[1]+self.unbatched_vector_shape<block_end>@property<def_stmt>use_inference_kernel self<block_start><return>self._use_inference_kernel<block_end>@use_inference_kernel.setter<def_stmt>use_inference_kernel self should_use<block_start>self._use_inference_kernel=bool(should_use)<block_end># TODO(kgenova) The intermediate vector should really be its own class...
<def_stmt>savetxt self sif_vector path=<none> version='v1'<block_start>"""Saves a (D)SIF as ASCII text in the SIF file format.
Args:
sif_vector: A numpy array containing the ldif to write to disk. Has shape
(element_count, element_length).
path: A string containing the path to the file to write to, if provided.
If none, no file is written.
version: A string with the version identifier. Must equal 'v1'.
Returns:
A string encoding of the (D)SIF.
"""<if_stmt>version<eq>'v0'<block_start><raise>ValueError('SIF v0 files are no longer supported.')<block_end><elif_stmt>version<eq>'v1'<block_start>s=self.encode_sif_v1(sif_vector)<block_end><else_stmt><block_start><raise>ValueError(f'Unrecognized SIF file format: {version}.')<block_end><if_stmt>path<is><not><none><block_start>file_util.writetxt(path s)<block_end><return>s<block_end><def_stmt>encode_sif_v1 self sif_vector<block_start>"""Encodes a ldif to a string, and optionally writes it to disk.
A description of the file format:
Line 1: SIF
Line 2: Three ints separated by spaces. In order:
1) The number of blobs.
2) The version ID for the blob types. I added this to be safe since
last time when we updated to add rotation it broke all the old txt
files. For now it will always be zero, which means the following
eleven explicit parameters will be given per blob (in order):
1 constant. float.
3 centers (XYZ). float.
3 radii (XYZ diagonals). float.
3 radii (roll-pitch-yaw rotations). float.
1 symmetry ID type. int. For now it will be either 0 or 1:
Zero: Not symmetric.
One: Left-right (XY-plane) symmetry.
3) The number of implicit parameters per blob. So it will likely
be between 0-256.
After the first two lines, there is a line for each blob.
Each line will have the explicit parameters followed by the implicit
parameters. They are space separated.
Args:
sif_vector: The SIF vector to encode as a np array. Has shape
(element_count, element_length).
Returns:
A string encoding of v in the ldif v1 file format.
"""<line_sep>sif_vector=sif_vector.copy()<line_sep>shape_count=sif_vector.shape[-2]<line_sep>shape_len=sif_vector.shape[-1]<if_stmt>shape_len<eq>7<block_start>off_axis=np.zeros([shape_count 3])<line_sep>sif_vector=np.concatenate([sif_vector off_axis] axis=1)<line_sep>shape_len=10<block_end>explicit_len=10<line_sep>implicit_len=shape_len-explicit_len<line_sep>sif_vector=np.reshape(sif_vector [shape_count shape_len])<line_sep>has_implicits=implicit_len<g>0<if_stmt><not>has_implicits<block_start><assert_stmt>shape_len<eq>10<line_sep>implicit_len=0<block_end>sif_vector[: 4:7]=np.sqrt(np.maximum(sif_vector[: 4:7] 0))<line_sep>header='SIF\n%i %i %i\n'%(shape_count 0 implicit_len)<line_sep>out=header<for_stmt>row_idx range(shape_count)<block_start>row=' '.join(10<times>['%.9g'])%tuple(sif_vector[row_idx :10].tolist())<line_sep>symmetry=int(row_idx<l>self.job.model_config.hparams.lyr)<line_sep>row<augadd>' %i'%symmetry<if_stmt>has_implicits<block_start>implicit_params=' '.join(implicit_len<times>['%.9g'])%(tuple(sif_vector[row_idx 10:].tolist()))<line_sep>row<augadd>' '+implicit_params<block_end>row<augadd>'\n'<line_sep>out<augadd>row<block_end><return>out<block_end><def_stmt>render_ellipsoids self sif_vector<block_start>"""Renders an ellipsoid image visualizing the (D)SIF RBFs."""<with_stmt>py_util.py2_temporary_directory()<as>d<block_start>qpath=d+'/q.txt'<line_sep>self.savetxt(sif_vector qpath)<line_sep>impath=d+'/im.png'<line_sep>camera=('1.0451 1.17901 0.630437 '<concat>'-0.614259 -0.695319 -0.373119 '<concat>'-0.547037 0.715996 -0.433705')<with_stmt>py_util.x11_server()<block_start>cmd='%s/qview %s -camera %s -image %s'%(path_util.gaps_path() qpath camera impath)<line_sep>sp.check_output(cmd shell=<true>)<block_end>im=file_util.read_image(impath)<block_end><return>im<block_end><def_stmt>interactive_viewer self sif_vector mesh=<none><block_start>"""Opens a GAPS viewer that can display the SIF blobs alongside a mesh."""<with_stmt>py_util.py2_temporary_directory()<as>d<block_start>qpath=d+'/q.txt'<line_sep>self.savetxt(sif_vector qpath)<line_sep>init_camera=('1.0451 1.17901 0.630437 '<concat>'-0.614259 -0.695319 -0.373119 '<concat>'-0.547037 0.715996 -0.433705')<line_sep>mstr=''<if_stmt>mesh<is><not><none><block_start>mpath=d+'/m.ply'<line_sep>file_util.write_mesh(mpath mesh)<line_sep>mstr=f' -input_mesh {mpath}'<block_end>cmd=f'{path_util.gaps_path()}/qview {qpath} -camera {init_camera}{mstr}'<line_sep>sp.check_output(cmd shell=<true>)<block_end><block_end><def_stmt>world2local self sif_vector<block_start><if_stmt>sif_vector.shape[0]<ne>1<block_start>sif_vector=np.expand_dims(sif_vector axis=0)<block_end>m=self.session.run(self._world2local feed_dict={self.sif_input:sif_vector})<line_sep><return>m<block_end><def_stmt>interactive_mesh_viewer self sif_vector resolution<block_start>"""Opens up an OpenGL session viewing the mesh defined by the SIF/LDIF."""<with_stmt>py_util.py2_temporary_directory()<as>d<block_start>mpath=d+'/m.ply'<line_sep>m=self.extract_mesh(sif_vector resolution)<line_sep>file_util.write_mesh(mpath m)<line_sep>init_camera=('1.0451 1.17901 0.630437 '<concat>'-0.614259 -0.695319 -0.373119 '<concat>'-0.547037 0.715996 -0.433705')<line_sep>cmd='%s/mshview %s -camera %s'%(path_util.gaps_path() mpath init_camera)<line_sep>sp.check_output(cmd shell=<true>)<block_end><block_end><def_stmt>interactive_gridview self sif_vector resolution extent=0.75<block_start>volume=self._grid_eval(sif_vector resolution extent extract_parts=<false> world2local=<none>)<line_sep><return>gaps_util.grdview(volume)<block_end><def_stmt>_spherical_to_4x4 self coords<block_start>"""Turns spherical coords into a 4x4 affine transformation matrix."""<line_sep>r=coords[0]<line_sep>theta=coords[1]<line_sep>phi=coords[2]<line_sep>st=tf.sin(theta)<line_sep>x=r<times>st<times>tf.cos(phi)<line_sep>y=r<times>st<times>tf.sin(phi)<line_sep>z=r<times>tf.cos(theta)<line_sep>eye=tf.stack([x y z] axis=0)<line_sep>eye=tf.reshape(eye [1 3])<line_sep>center=tf.zeros([1 3] dtype=tf.float32)<line_sep>world_up=tf.constant([[0. 1. 0.]] dtype=tf.float32)<line_sep>world2cam=camera_util.look_at(eye center world_up)<line_sep>cam2world=tf.linalg.inv(world2cam)<line_sep>cam2world=tf.constant([[-9.9398971e-01 2.7342862e-03 -4.7837296e-03 1.4993416e-04] [1.6200442e-09 8.6298174e-01 4.9326313e-01 7.1943283e-01] [5.5100261e-03 4.9325553e-01 -8.6296844e-01 -1.2277470e+00] [0.0000000e+00 0.0000000e+00 0.0000000e+00 1.0000000e+00]] dtype=tf.float32)<line_sep><return>tf.reshape(cam2world [4 4]) eye<block_end><def_stmt>_evaluate_pose_error self cam2world eye am_image<block_start>"""Evaluates the error of an estimated 4x4 pose matrix."""<line_sep># TODO(kgenova) Thisis a hack that only workds for 3d-r2n2
ray_directions=gaps_util.gaps_depth_image_to_cam_image(np.ones((224 224)) xfov=0.422204).astype(np.float32)<line_sep>tc=15<line_sep>t_vals=tf.constant(np.arange(0.75 2.25 .1) dtype=tf.float32)<line_sep>t_vals=tf.reshape(t_vals [1 tc 1])<line_sep>ray_count=int(np.prod(ray_directions.shape[:-1]))<line_sep>ray_directions=tf.reshape(ray_directions [ray_count 1 3])<line_sep>eye=tf.reshape(eye [1 1 3])<line_sep>cam_rays=ray_directions<times>t_vals+eye<line_sep>world_pts=geom_util.apply_4x4(cam_rays cam2world are_points=<true> batch_rank=0 sample_rank=2)<line_sep>world_pts=tf.reshape(world_pts [1 ray_count<times>tc 3])<line_sep>self.cam_3dof_pts=world_pts<line_sep>world_rbfs=self.structured_implicit.rbf_influence_at_samples(world_pts)<line_sep>eec=world_rbfs.get_shape().as_list()[-1]<assert_stmt>len(am_image.get_shape().as_list())<eq>2<line_sep>is_bg=tf.reshape(tf.logical_not(tf.equal(am_image eec)) [1 ray_count 1])<line_sep>am_image=tf.tile(tf.expand_dims(am_image axis=-1) [1 1 tc])<line_sep>flat_am=tf.reshape(am_image [ray_count<times>tc 1])<line_sep>flat_am=tf.where_v2(tf.equal(flat_am 45) 0 flat_am)<line_sep>world_rbfs=tf.reshape(world_rbfs [ray_count<times>tc 45])<line_sep>max_val=tf.gather(world_rbfs flat_am batch_dims=1)<line_sep>max_val=tf.reshape(max_val [1 ray_count tc])<line_sep>max_val=tf.reduce_max(max_val axis=-1)<line_sep>is_bg_mult=tf.cast(is_bg dtype=tf.float32)<line_sep>max_val=is_bg_mult<times>max_val<line_sep>error=-1.0<times>tf.reduce_sum(max_val)<line_sep><return>error<block_end><def_stmt>optimize_3dof_pose self sif_vector am_image e step_count=10 lr=1e-6<block_start>"""Tries to fit a pose given a SIF in 3D and a SIF segmentation image."""<if_stmt>len(sif_vector.shape)<eq>2<block_start>sif_vector=np.expand_dims(sif_vector axis=0)<block_end># Now rays is an array of shape [h, w, 3]. The origin is currently [0,0,0]
# because the rays are in camera space (for now).
lr=np.array([0.0 lr lr] dtype=np.float32)<line_sep># Just worry about a single step for now:
# The pose is 3-dof: distance, phi, theta.
coords=np.array([0.812717413913/1.75 0.0 0.0] dtype=np.float32)<line_sep># cam2world, eye = self._spherical_to_4x4(coords)
<for_stmt>i range(step_count)<block_start>log.verbose('Step %i: (%0.4f, %0.4f, %0.4f)'%(i coords[0] coords[1] coords[2]))<line_sep>grad,err,pts=self.session.run([self.pose3_gradients self.pose_error self.cam_3dof_pts] feed_dict={self.am_image_ph:am_image self.sif_input:sif_vector self.coords_ph:coords})<line_sep>grad=grad[0]<line_sep>log.verbose('Error: %0.2f'%err)<line_sep>log.verbose('grad: %s'%repr(grad))<line_sep>log.verbose('pts.shape: ' repr(pts.shape))<assert_stmt>len(grad.shape)<eq>1<assert_stmt>grad.shape[0]<eq>3<line_sep>update=lr<times>grad<line_sep>log.verbose('Update: ' str(update))<line_sep>gaps_util.ptsview(pts mesh=e.v1_gt_mesh)<line_sep>coords=coords-lr<times>grad<block_end><return>coords<block_end><def_stmt>optimize_to_gt self sif_vector example step_count=1 lr=0.01 vis=0 verbosity=0 target='all' samps='nss'<block_start>"""Iteratively optimizes a SIF or LDIF to fit ground truth in/out values."""<if_stmt>samps<eq>'nss'<block_start>all_samples=example.near_surface_samples.copy()<line_sep>np.random.shuffle(all_samples)<block_end><elif_stmt>samps<eq>'uni'<block_start>all_samples=example.uniform_samples.copy()<block_end><elif_stmt>samps<eq>'nssuni'<block_start>all_samples=np.concatenate([example.near_surface_samples example.uniform_samples] axis=0)<block_end><elif_stmt>samps<eq>'dodeca'<block_start>depth_ims=example.depth_images/1000.0<line_sep>all_samples=geom_util.depth_dodeca_to_samples(depth_ims)<block_end><elif_stmt>samps<eq>'depth'<block_start>depth_idx=1# TODO(kgenova) Make this the one in the observation.
depth_ims=example.depth_images/1000.0<line_sep>depth_im=depth_ims[0 depth_idx : : :]<line_sep>cam2world=geom_util.get_dodeca_camera_to_worlds()[depth_idx : :]<assert_stmt>depth_im.shape[0]<eq>224<assert_stmt>cam2world.shape[0]<eq>4<line_sep>log.verbose('Depth im shape: ' depth_im.shape)<line_sep>all_samples=geom_util.depth_image_to_samples(depth_im cam2world)<block_end><if_stmt>verbosity<ge>2<block_start>gaps_util.ptsview(all_samples[<ellipsis> :] self.extract_mesh(sif_vector 128))<block_end>np.random.shuffle(all_samples)<line_sep>cl=all_samples[: 3]<line_sep>all_samples[cl<l>0 3]=0<line_sep>all_samples[cl<g>0 3]=1<line_sep>samples,gt_class=np.split(all_samples [3] axis=-1)<line_sep>samples=samples[:self.optimizer_pc :]<line_sep>gt_class=gt_class[:self.optimizer_pc :]<def_stmt>print_sat_count vec<block_start>"""Prints the number of contraints that are satisfied and the total."""<line_sep>pred=self.class_at_samples(vec np.reshape(samples [-1 3]))<line_sep>pred_is_out=pred<g>0.5<line_sep>gt_is_out=gt_class<g>0.5<line_sep>log.verbose(pred_is_out.shape gt_is_out.shape)<line_sep>agree=np.logical_or(np.logical_and(pred_is_out gt_is_out) np.logical_and(np.logical_not(pred_is_out) np.logical_not(gt_is_out)))<line_sep>sat_count=np.count_nonzero(agree)<line_sep>log.info('%i/%i constraints are satisfied.'%(sat_count self.optimizer_pc))<block_end><if_stmt>verbosity<ge>1<block_start>log.info('Beginning optimization.')<line_sep>print_sat_count(sif_vector)<block_end><assert_stmt>gt_class.shape[-1]<eq>1<line_sep>sif_vector=sif_vector.copy()<line_sep>sif_vector=np.expand_dims(sif_vector axis=0)<line_sep>cur_vector=sif_vector.copy()<line_sep>ret_best=<false><if_stmt>ret_best<block_start>min_loss=np.inf<line_sep>best_vec=cur_vector.copy()<block_end>momentum=0.9<line_sep>velocity=np.zeros_like(cur_vector)<line_sep>cur_batch_idx=0<for_stmt>i range(step_count)<block_start>batch_start=cur_batch_idx<line_sep>batch_end=cur_batch_idx+self.optimizer_pc<if_stmt>batch_end<g>all_samples.shape[0]<block_start>np.random.shuffle(all_samples)<line_sep>batch_start=0<line_sep>batch_end=self.optimizer_pc<line_sep>cur_batch_idx=0<block_end>batch_all_samples=all_samples[batch_start:batch_end :]<line_sep>cur_batch_idx<augadd>self.optimizer_pc<line_sep>batch_samples,batch_gt_class=np.split(batch_all_samples [3] axis=-1)<line_sep>grad=self.session.run(self.ldif_gradients feed_dict={self.target_class_ph:batch_gt_class self.sif_input:cur_vector self.optimizer_samples:batch_samples})[0]<line_sep>vis_this_time=vis<ge>2<or>(vis<ge>1<and>(i<eq>0<or>i<eq>step_count-1))<line_sep>print_this_time=verbosity<ge>2<or>(verbosity<ge>1<and><not>i%1000)<if_stmt>vis_this_time<or>print_this_time<block_start>loss=self.session.run(self.optimizer_elt_loss feed_dict={self.target_class_ph:batch_gt_class self.sif_input:cur_vector self.optimizer_samples:batch_samples})<if_stmt>ret_best<block_start>lsum=np.sum(loss)<if_stmt>lsum<l>min_loss<block_start>min_loss=lsum<line_sep>best_vec=cur_vector.copy()<block_end><block_end># Assuming the loss is zero if a constraint is satisfied:
is_sat=self.optimizer_pc-np.count_nonzero(loss)<if_stmt>print_this_time<block_start>log.info('Step %i: Total loss: %s. Constraints %i/%i'%(i repr(np.sum(loss)) is_sat self.optimizer_pc))<block_end><if_stmt>vis_this_time<block_start>self.vis_loss(cur_vector gt_at_loss=gt_class loss=loss loss_positions=samples)<block_end><block_end><if_stmt>target<eq>'all-eq'<block_start>mults=42<times>[1]<block_end><elif_stmt>target<eq>'all'<block_start>mults=[0.001]+3<times>[0.001]+6<times>[0.0000001]+32<times>[50]<block_end><elif_stmt>target<eq>'centers'<block_start>mults=[0.000]+3<times>[0.001]+6<times>[0.0000000]+32<times>[0]<block_end><elif_stmt>target<eq>'radii'<block_start>mults=[0.000]+3<times>[0.000]+6<times>[0.0000001]+32<times>[0]<block_end><elif_stmt>target<eq>'features'<block_start>mults=[0.000]+3<times>[0.000]+6<times>[0.0000000]+32<times>[50]<block_end><elif_stmt>target<eq>'constants'<block_start>mults=[0.001]+3<times>[0.000]+6<times>[0.0000000]+32<times>[0]<block_end><else_stmt><block_start><assert_stmt><false><block_end>mults=np.array(mults).reshape([1 1 42])<line_sep>velocity=momentum<times>velocity+mults<times>lr<times>grad<line_sep>cur_vector=cur_vector-velocity<block_end><if_stmt>verbosity<ge>1<block_start>log.info('Finished optimization.')<line_sep>print_sat_count(cur_vector)<block_end><if_stmt>ret_best<block_start>cur_vector=best_vec<block_end><return>np.reshape(cur_vector self.unbatched_vector_shape)<block_end><def_stmt>vis_loss self sif_vector gt_at_loss loss loss_positions<block_start>"""Visualizes the loss mid-optimization."""<line_sep>loss=np.reshape(loss [-1 1])<line_sep>gt_at_loss=np.reshape(gt_at_loss [-1 1])<assert_stmt>gt_at_loss.shape[0]<eq>loss.shape[0]<line_sep>loss[gt_at_loss<le>0.5]=-loss[gt_at_loss<le>0.5]<line_sep>loss_positions=np.reshape(loss_positions [-1 3])<line_sep>arr=np.concatenate([loss_positions loss] axis=1)<with_stmt>py_util.py2_temporary_directory()<as>d<block_start>sdf_path=f'{d}/a.sdf'<with_stmt>file_util.open_file(sdf_path 'wb')<as>f<block_start>arr=arr.astype(np.float32)<line_sep>arr.tofile(f)<block_end>m=self.extract_mesh(sif_vector resolution=128)<line_sep>m_path=f'{d}/m.ply'<line_sep>file_util.write_mesh(m_path m)<line_sep>init_camera=('1.0451 1.17901 0.630437 '<concat>'-0.614259 -0.695319 -0.373119 '<concat>'-0.547037 0.715996 -0.433705')<line_sep>cmd='%s/ptsview %s %s -camera %s'%(path_util.gaps_path() sdf_path m_path init_camera)<line_sep>sp.check_output(cmd shell=<true>)<block_end><block_end><def_stmt>_grid_eval_cuda self sif_vector resolution extent<block_start>"""Evaluates a SIF/LDIF densely on a voxel grid."""<line_sep>log.verbose('Using custom CUDA kernel for evaluation.')<line_sep># First step: Get the path where the serialized occnet should be.
# The serialized occnet should be at whatever the checkpoint path is,
# but replace model.ckpt-[idx] with serialized-occnet-[idx].occnet
checkpoint_path=self.ckpt.abspath<line_sep>log.info(f'Using checkpoint {checkpoint_path} to write OccNet file.')<assert_stmt>'model.ckpt-'<in>checkpoint_path<line_sep>occnet_path=checkpoint_path.replace('model.ckpt-' 'serialized-occnet-')<line_sep>occnet_path=occnet_path+'.occnet'<line_sep># Second step: If it isn't there, write it to disk.
<if_stmt><not>os.path.isfile(occnet_path)<block_start><assert_stmt>os.path.isdir(os.path.dirname(occnet_path))<if_stmt>self.job.model_config.hparams.ipe<eq>'t'<block_start>self.write_occnet_file(occnet_path)<block_end><else_stmt><block_start>occnet_path=path_util.get_path_to_ldif_root()+'/ldif2mesh/extracted.occnet'<block_end><block_end># Third step: open a temporary directory, and write the embedding.
# Make sure that the temp directories are deleted afterwards.
<with_stmt>py_util.py2_temporary_directory()<as>d<block_start>rep_path=f'{d}/ldif.txt'<line_sep>self.savetxt(sif_vector rep_path)<line_sep># Pick the path to the output grd file:
grd_path=f'{d}/grid.grd'<line_sep># Fourth step: Get the path to the kernel
kernel_path=os.path.join(path_util.get_path_to_ldif_root() 'ldif2mesh/ldif2mesh')<if_stmt><not>os.path.isfile(kernel_path)<block_start><raise>ValueError(f'There is no compiled CUDA executable at {kernel_path}.')<block_end>cmd=(f'CUDA_VISIBLE_DEVICES=0 {kernel_path} {rep_path} {occnet_path} '<concat>f'{grd_path} -resolution {resolution}')<line_sep>log.verbose(f'Executing command {cmd}')<line_sep># TODO(kgenova) Support extent as a flag
<if_stmt>extent<ne>0.75<block_start><raise>ValueError('Currently only 0.75 extent is supported on the '<concat>'custom kernel. Please set use_inference_kernel to false for an'<concat>f' extent of {extent}.')<block_end># Fifth step: Invoke the kernel.
<try_stmt><block_start>cmd_result=sp.check_output(cmd shell=<true>)<line_sep>log.info(cmd_result.decode('utf-8').replace('\n' ''))<block_end><except_stmt>sp.CalledProcessError<as>e<block_start><if_stmt>'out of memory'<in>e.output.decode('utf-8')<block_start><raise>ValueError('The GPU does not have enough free memory left for the'<concat>' inference kernel. Please reduce the fraction'<concat>' reserved by tensorflow.')<block_end><elif_stmt>'no kernel image is available'<in>e.output.decode('utf-8')<block_start><raise>ValueError('It appears that the CUDA kernel was not built to your '<concat>'gpu\'s architecture. Hopefully this is an easy fix. '<concat>'Please go to developer.nvidia.com/cuda-gpus, and find '<concat>'your gpu from the list. Then, modify ./build_kernel.sh '<concat>'by adding compute_XX and sm_XX for whatever your GPU '<concat>'compute capability is according to the website. For '<concat>'example, a 2080 Ti would use compute_75 and sm_75. '<concat>'Note that if your card supports below 35, it likely '<concat>'will fail to compile using this method. If you are '<concat>'seeing this error, please feel free to open up an issue '<concat>'and report it. We would like to support as many gpus as '<concat>'possible.')<block_end><else_stmt><block_start><raise>ValueError(f'Unrecognized error code {e.returncode} occurred'<concat>f' during inference kernel evaluation: {e.output}')<block_end><block_end># Seventh step: Read the grid file.
_,grd=file_util.read_grd(grd_path)<block_end># Eighth step: Verify the grid shape and return the grid.
log.verbose(f'The output CUDA grid has shape {grd.shape}.')<line_sep># gaps_util.grdview(grd)
<return>grd<block_end><def_stmt>_grid_eval self sif_vector resolution extent extract_parts world2local=<none><block_start>"""Evalutes the LDIF/SIF on a grid."""<line_sep>log.verbose('Evaluating SDF grid for mesh.')<if_stmt>self.use_inference_kernel<and><not>extract_parts<block_start><return>self._grid_eval_cuda(sif_vector resolution extent)<block_end><if_stmt>extract_parts<or>world2local<block_start>log.warning('Part extraction and world2local are not supported with the'<concat>' custom kernel.')<block_end>log.warning('Using pure tensorflow for grid evaluation, this will be slow.')<line_sep>t=time.time()<line_sep>sif_vector=np.reshape(sif_vector self.batched_vector_shape)<assert_stmt><not>resolution%self.block_res<line_sep>block_count=resolution<floordiv>self.block_res<line_sep>block_size=(2.0<times>extent)/block_count<line_sep>l_block=[]<line_sep>i=0<line_sep>dim_offset=1<if>extract_parts<else>0<line_sep>grid=self.local_decisions<if>extract_parts<else>self.predicted_alg_grid<for_stmt>li range(block_count)<block_start>l_min=-extent+(li)<times>block_size-0.5/resolution<line_sep>h_block=[]<for_stmt>hi range(block_count)<block_start>h_min=-extent+(hi)<times>block_size-0.5/resolution<line_sep>w_block=[]<for_stmt>wi range(block_count)<block_start>w_min=-extent+(wi)<times>block_size-0.5/resolution<line_sep>offset=np.reshape(np.array([w_min l_min h_min] dtype=np.float32) [1 1 1 3])<line_sep>sample_locations=block_size<times>self.base_grid+offset<if_stmt>world2local<is><not><none><block_start>sample_locations=geom_util_np.apply_4x4(sample_locations world2local are_points=<true>)<block_end>grid_out_np=self.session.run(grid feed_dict={self.sif_input:sif_vector self.sample_locations_ph:sample_locations})<line_sep>i<augadd>1<line_sep>w_block.append(grid_out_np)<block_end>h_block.append(np.concatenate(w_block axis=2+dim_offset))<block_end>l_block.append(np.concatenate(h_block axis=0+dim_offset))<block_end>grid_out=np.concatenate(l_block axis=1+dim_offset)<line_sep># log.verbose(f'Grid extent: {np.min(grid_out)}, {np.max(grid_out)}')
# grid_out -= 0.5
grid_out_time=time.time()<line_sep>log.verbose(f'Grid Eval Time: {grid_out_time-t}')<line_sep><return>grid_out<block_end><def_stmt>extract_mesh self sif_vectors resolution=128 extent=0.75 return_success=<false> world2local=<none><block_start>"""Extracts a mesh that is the sum of one or more SIF meshes."""<line_sep>extract_start_time=time.time()<if_stmt>isinstance(sif_vectors list)<block_start>volumes=[]<if_stmt>world2local<is><not><none><block_start><assert_stmt>isinstance(world2local list)<block_end><for_stmt>i,v enumerate(sif_vectors)<block_start>volumes.append(self._grid_eval(v resolution extent extract_parts=<false> world2local=world2local[i]<if>world2local<is><not><none><else><none>))<block_end>volume=np.sum(volumes axis=0)<block_end><else_stmt><block_start>volume=self._grid_eval(sif_vectors resolution extent extract_parts=<false> world2local=world2local)<block_end>grid_out_time=time.time()<line_sep>log.verbose(f'Grid eval time: {grid_out_time-extract_start_time}')<line_sep>had_crossing,mesh=extract_mesh.marching_cubes(volume extent)<if_stmt><not>had_crossing<block_start>log.warning('Warning: Marching Cubes found no surface.')<block_end>mesh.marching_cubes_successful=had_crossing<line_sep>done_time=time.time()<line_sep>log.verbose(f'MCubes Time: {done_time-grid_out_time}')<if_stmt>return_success<block_start><return>mesh had_crossing<block_end><return>mesh<block_end><def_stmt>extract_part_meshes self sif_vector resolution extent=0.75<block_start>elt_volume=self._grid_eval(sif_vector resolution extent extract_parts=<true> world2local=<none>)<line_sep>local_meshes=[]<for_stmt>i range(self.job.model_config.hparams.sc)<block_start>had_crossing,mesh_i=extract_mesh.marching_cubes(elt_volume[i <ellipsis>] extent)<line_sep>mesh_i.marching_cubes_successful=had_crossing<line_sep>local_meshes.append(mesh_i)<block_end><return>local_meshes<block_end><def_stmt>_chunk_sample_eval self samples query_fun chunk_size<block_start>"""Evaluates a set of query locations chunk by chunk to avoid OOM issues."""<line_sep># Note- this code will have strange behavior if there is randomness during
# decoding, because it chunks the decoding up into multiple calls.
<assert_stmt>len(samples.shape)<eq>2<line_sep>point_count=samples.shape[0]<if_stmt>point_count<eq>chunk_size<block_start>chunks=[samples]<block_end><else_stmt><block_start>pad_len=chunk_size-(point_count%chunk_size)<if_stmt>pad_len<block_start>samples=np.pad(samples ((0 pad_len) (0 0)) 'constant')<block_end><assert_stmt><not>(point_count+pad_len)%chunk_size<line_sep>chunk_count=(point_count+pad_len)<floordiv>chunk_size<line_sep>chunks=np.split(samples chunk_count axis=0)<block_end>out=[]<for_stmt>chunk chunks<block_start>out_i=query_fun(chunk)<assert_stmt>len(out_i.shape)<eq>2<assert_stmt>out_i.shape[0]<eq>chunk_size<line_sep>out.append(out_i)<block_end><return>np.concatenate(out axis=0)[:point_count :]<block_end><def_stmt>iou self sif_vector example<block_start>samps=example.uniform_samples[: :3]<line_sep>gt_is_inside=example.uniform_samples[: 3:4]<l>0.0<line_sep>pred_is_inside=self.class_at_samples(sif_vector samps)<l>0.5<line_sep>result=metrics.point_iou(pred_is_inside gt_is_inside)<line_sep><return>result<block_end><def_stmt>class_at_samples self sif_vector samples<block_start>"""Determines whether input xyz locations are inside or outside the shape.
Args:
sif_vector: A numpy array containing the LDIF/SIF to evaluate. Has shape
(element_count, element_length).
samples: A numpy array containing samples in the LDIF/SIF frame. Has shape
(sample_count, 3).
Returns:
A numpy array with shape (sample_count, 1). A float that is positive
outside the LDIF/SIF, and negative inside.
"""<line_sep>sif_vector=np.reshape(sif_vector self.batched_vector_shape)<def_stmt>query sample_chunk<block_start>chunk_grid=sample_chunk.reshape([self.block_res self.block_res self.block_res 3])<line_sep>classes=self.session.run(self.predicted_class_grid feed_dict={self.sif_input:sif_vector self.sample_locations_ph:chunk_grid})<line_sep>classes=classes.reshape([self.block_res<power>3 1])<line_sep><return>classes<block_end><return>self._chunk_sample_eval(samples query self.block_res<power>3)<block_end><def_stmt>rbf_influence_at_samples self sif_vector samples<block_start>"""Evalutes the influence of each RBF in the SIF/LDIF at each sample.
Args:
sif_vector: A numpy array containing the ldif to evaluate. Has shape
(element_count, element_length).
samples: A numpy array containing the samples in the ldif frame. Has shape
(sample_count, 3).
Returns:
A numpy array with shape (sample_count, effective_element_count). The
RBF weight of each effective element at each sample point. The 'effective'
element count may be higher than the element count, depending on the
symmetry settings of the ldif. In the case where a ldif is partially
symmetric, then some elements have multiple RBF weights- their main weight
(given first) and the weight associated with the shadow element(s)
transformed by their symmetry matrix. See structured_implicit_function.py
for a mapping from element indices to equivalent classes. Regardless of
additional 'effective' elements, the first RBF weights correspond to the
'real' elements with no symmetry transforms applied, in order.
"""<line_sep># TODO(kgenova) It's a bit clunky to make the user refer to a different
# python file to get symmetry equivalence classes. Maybe that mapping should
# be returned as needed.
sif_vector=np.reshape(sif_vector self.batched_vector_shape)<def_stmt>query sample_chunk<block_start>chunk_in=sample_chunk.reshape([self.true_sample_count 3])<line_sep>influences=self.session.run(self.predicted_influences feed_dict={self.generic_sample_ph:chunk_in self.sif_input:sif_vector})<line_sep><return>np.squeeze(influences)<block_end><return>self._chunk_sample_eval(samples query self.true_sample_count)<block_end><def_stmt>write_occnet_file self path<block_start>"""Serializes an occnet network and writes it to disk."""<line_sep>f=file_util.open_file(path 'wb')<line_sep># Get the weight tensors associated with the occnet:
<with_stmt>self.graph.as_default()<block_start>all_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)<line_sep>occnet_vars=contrib_framework.filter_variables(all_vars include_patterns=['eval_implicit_parameters'])<block_end># Extract all the model weights as numpy values:
model={}<for_stmt>v occnet_vars<block_start>value=self.session.run(v)<line_sep>log.verbose(f'{v.name}: {value.shape}')<assert_stmt>v.name<not><in>model<line_sep>model[v.name]=value<block_end># Serialize them into a single file:
<def_stmt>write_header base_scope# Write the shape so the number of occnet resnet layers and their sizes
# are known.
<block_start>num_resnet_layers=1<line_sep># Writes all arrays in row-major order.
dim=model[base_scope+'sample_resize_fc/fully_connected/weights:0'].shape[1]<line_sep>log.verbose(f'Dimensionality is {dim}')<line_sep>f.write(struct.pack('ii' num_resnet_layers dim))<block_end><def_stmt>write_fc_layer layer_scope<block_start>weights=model[layer_scope+'/fully_connected/weights:0']<line_sep>biases=model[layer_scope+'/fully_connected/biases:0']<line_sep>log.verbose(f'FC layer shapes: {weights.shape}, {biases.shape}')<line_sep>f.write(weights.astype('f').tostring())<line_sep>f.write(biases.astype('f').tostring())<block_end><def_stmt>write_cbn_layer layer_scope<block_start>write_fc_layer(layer_scope+'/beta_fc')<line_sep>write_fc_layer(layer_scope+'/gamma_fc')<line_sep>running_mean=float(model[layer_scope+'/running_mean:0'])<line_sep>running_var=float(model[layer_scope+'/running_variance:0'])<line_sep>log.verbose(f'Running mean, variance: {running_mean}, {running_var}')<line_sep>f.write(struct.pack('ff' running_mean running_var))<block_end><def_stmt>write_input_layer layer_scope<block_start>weights=model[layer_scope+'/fully_connected/weights:0']<line_sep>biases=model[layer_scope+'/fully_connected/biases:0']<line_sep>log.verbose(f'Input FC layer shapes: {weights.shape}, {biases.shape}')<line_sep>f.write(weights.astype('f').tostring())<line_sep>f.write(biases.astype('f').tostring())<block_end><def_stmt>write_activation_layer layer_scope<block_start>weights=model[layer_scope+'/fully_connected/weights:0']<line_sep>bias=float(model[layer_scope+'/fully_connected/biases:0'])<line_sep>log.verbose(f'Final FC layer shape and bias: {weights.shape}, {bias}')<line_sep>f.write(weights.astype('f').tostring())<line_sep>f.write(struct.pack('f' bias))<block_end>base='imp_net/eval_implicit_parameters/all_elements/OccNet/'<line_sep>write_header(base)<line_sep>write_input_layer(base+'sample_resize_fc')<line_sep>write_cbn_layer(base+'fc_resnet_layer_0/cbn_1')<line_sep>write_fc_layer(base+'fc_resnet_layer_0/fc_1')<line_sep>write_cbn_layer(base+'fc_resnet_layer_0/cbn_2')<line_sep>write_fc_layer(base+'fc_resnet_layer_0/fc_2')<line_sep>write_cbn_layer(base+'final_cbn')<line_sep>write_activation_layer(base+'final_activation')<line_sep>f.close()<block_end><block_end> |
<import_stmt>sys<line_sep>sys.path.insert(0 "../..")<import_stmt>pprint<import_stmt>pytest<import_stmt>logging<line_sep>logging.basicConfig(level=logging.DEBUG)<import_from_stmt>ttp ttp<def_stmt>test_answer_1 <block_start>"""https://stackoverflow.com/questions/63522291/parsing-blocks-of-text-within-a-file-into-objects"""<line_sep>data="""
#*Approximate Distance Oracles with Improved Query Time.
#@<NAME>
#t2015
#cEncyclopedia of Algorithms
#index555036b37cea80f954149ffc
#*Subset Sum Algorithm for Bin Packing.
#@<NAME>
#t2015
#cEncyclopedia of Algorithms
#index555036b37cea80f954149ffd
"""<line_sep>template="""
#*{{ info | ORPHRASE }}
#@{{ author | ORPHRASE }}
#t{{ year }}
#c{{ title | ORPHRASE }}
#index{{ index }}
"""<line_sep>parser=ttp(data template)<line_sep>parser.parse()<line_sep>res=parser.result(structure="flat_list")<line_sep>pprint.pprint(res)<assert_stmt>res<eq>[{"author":"<NAME>" "index":"555036b37cea80f954149ffc" "info":"Approximate Distance Oracles with Improved Query Time." "title":"Encyclopedia of Algorithms" "year":"2015" } {"author":"<NAME>" "index":"555036b37cea80f954149ffd" "info":"Subset Sum Algorithm for Bin Packing." "title":"Encyclopedia of Algorithms" "year":"2015" } ]<block_end># test_answer_1()
<def_stmt>test_answer_2 <block_start>"""https://stackoverflow.com/questions/63499479/extract-value-from-text-string-using-format-string-in-python"""<line_sep>data="""
name=username1, age=1001
name=username2, age=1002
name=username3, age=1003
"""<line_sep>template="name={{ name }}, age={{ age }}"<line_sep>parser=ttp(data template)<line_sep>parser.parse()<line_sep>res=parser.result(structure="flat_list")<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[{"age":"1001" "name":"username1"} {"age":"1002" "name":"username2"} {"age":"1003" "name":"username3"} ]<block_end># test_answer_2()
<def_stmt>test_issue_20_answer <block_start>data_to_parse="""
(*, 172.16.58.3)
LISP0.4200, (172.16.58.3, 172.16.31.10), Forward/Sparse, 1d18h/stopped
LISP0.4201, (172.16.58.3, 172.16.31.10), Forward/Sparse, 2d05h/stopped
(172.16.17.32, 172.16.58.3), 6d20h/00:02:23, flags: FT
Incoming interface: Vlan1029, RPF nbr 0.0.0.0
Outgoing interface list:
LISP0.4100, (172.16.58.3, 172.16.31.10), Forward/Sparse, 1d18h/stopped
"""<line_sep>show_mcast1="""
<template name="mcast" results="per_template">
<group name="mcast_entries.{{ overlay_src }}">
({{ overlay_src | _start_ | replace("*", "'*'")}}, {{ overlay_grp | IP }})
({{ overlay_src | _start_ | IP }}, {{ overlay_grp | IP }}), {{ entry_uptime }}/{{ entry_state_or_timer }}, flags: {{ entry_flags }}
Incoming interface: {{ incoming_intf }}, RPF nbr {{ rpf_neighbor }}
<group name="oil_entries*">
{{ outgoing_intf }}, ({{ underlay_src | IP }}, {{ underlay_grp | IP }}), Forward/Sparse, {{ oil_uptime }}/{{ oil_state_or_timer}}
</group>
</group>
</template>
"""<line_sep>parser=ttp(template=show_mcast1)<line_sep>parser.add_input(data_to_parse template_name="mcast")<line_sep>parser.parse()<line_sep>res=parser.result(structure="dictionary")<line_sep># pprint.pprint(res, width=100)
<assert_stmt>res<eq>{"mcast":{"mcast_entries":{"'*'":{"oil_entries":[{"oil_state_or_timer":"stopped" "oil_uptime":"1d18h" "outgoing_intf":"LISP0.4200" "underlay_grp":"172.16.31.10" "underlay_src":"172.16.58.3" } {"oil_state_or_timer":"stopped" "oil_uptime":"2d05h" "outgoing_intf":"LISP0.4201" "underlay_grp":"172.16.31.10" "underlay_src":"172.16.58.3" } ] "overlay_grp":"172.16.58.3" } "172.16.17.32":{"entry_flags":"FT" "entry_state_or_timer":"00:02:23" "entry_uptime":"6d20h" "incoming_intf":"Vlan1029" "oil_entries":[{"oil_state_or_timer":"stopped" "oil_uptime":"1d18h" "outgoing_intf":"LISP0.4100" "underlay_grp":"172.16.31.10" "underlay_src":"172.16.58.3" }] "overlay_grp":"172.16.58.3" "rpf_neighbor":"0.0.0.0" } }}}<block_end># test_issue_20_answer()
<def_stmt>test_answer_3 <block_start>"""
Fixed bug with results forming - when have two _start_ matches, but
one of them is False, TTP was selecting first match without checking
if its False, updated decision logic to do that check.
"""<line_sep>data="""
/c/slb/virt 12
dis
ipver v4
vip 1.1.1.1
rtsrcmac ena
vname "my name"
/c/slb/virt 12/service 443 https
group 15
rport 443
pbind clientip
dbind forceproxy
/c/slb/virt 12/service 443 https/http
xforward ena
httpmod hsts_insert
/c/slb/virt 12/service 443 https/ssl
srvrcert cert certname
sslpol ssl-Policy
/c/slb/virt 12/service 80 http
group 15
rport 80
pbind clientip
dbind forceproxy
/c/slb/virt 12/service 80 http/http
xforward ena
/c/slb/virt 14
dis
ipver v4
vip 1.1.4.4
rtsrcmac ena
vname "my name2"
"""<line_sep>template="""
<template name="VIP_cfg" results="per_template">
<group name="{{ vip }}">
/c/slb/virt {{ virt_seq | DIGIT }}
dis {{ config_state | set("dis") }}
ipver {{ ipver}}
vip {{ vip }}
rtsrcmac {{ rtsrcmac }}
vname "{{ vip_name | ORPHRASE }}"
<group name="services.{{ port }}.{{ proto }}">
/c/slb/virt 12/service {{ port | DIGIT }} {{ proto | exclude(ssl) }}
group {{group_seq }}
rport {{ real_port }}
pbind {{ pbind }}
dbind {{ dbind }}
xforward {{ xforward }}
httpmod {{ httpmod }}
</group>
<group name="ssl_profile">
/c/slb/virt {{ virt_seq }}/service 443 https/ssl
srvrcert cert {{ ssl_server_cert }}
sslpol {{ ssl_profile }}
{{ ssl | set("https/ssl") }}
</group>
</group>
</template>
"""<line_sep>parser=ttp(data template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result(structure="dictionary")<line_sep># pprint.pprint(res, width=50)
<assert_stmt>res<eq>{"VIP_cfg":{"1.1.1.1":{"config_state":"dis" "ipver":"v4" "rtsrcmac":"ena" "services":{"443":{"https":{"dbind":"forceproxy" "group_seq":"15" "pbind":"clientip" "real_port":"443" } "https/http":{"httpmod":"hsts_insert" "xforward":"ena"} } "80":{"http":{"dbind":"forceproxy" "group_seq":"15" "pbind":"clientip" "real_port":"80" } "http/http":{"xforward":"ena"} } } "ssl_profile":{"ssl":"https/ssl" "ssl_profile":"ssl-Policy" "ssl_server_cert":"certname" "virt_seq":"12" } "vip_name":"my name" "virt_seq":"12" } "1.1.4.4":{"config_state":"dis" "ipver":"v4" "rtsrcmac":"ena" "vip_name":"my name2" "virt_seq":"14" } }}<block_end># test_answer_3()
<def_stmt>test_answer_4 <block_start>data="""
/c/slb/virt 12
dis
ipver v4
vip 1.1.1.1
rtsrcmac ena
vname "my name"
/c/slb/virt 12/service 443 https
group 15
rport 443
pbind clientip
dbind forceproxy
/c/slb/virt 12/service 443 https/http
xforward ena
httpmod hsts_insert
/c/slb/virt 12/service 443 https/ssl
srvrcert cert certname
sslpol ssl-Policy
/c/slb/virt 12/service 80 http
group 15
rport 80
pbind clientip
dbind forceproxy
/c/slb/virt 12/service 80 http/http
xforward ena
/c/slb/virt 14
dis
ipver v4
vip 1.1.4.4
rtsrcmac ena
vname "my name2"
"""<line_sep>template="""
<template name="VIP_cfg" results="per_template">
<group name="{{ vip }}">
/c/slb/virt {{ virt_seq | DIGIT }}
dis {{ config_state | set("dis") }}
ipver {{ ipver}}
vip {{ vip }}
rtsrcmac {{ rtsrcmac }}
vname "{{ vip_name | ORPHRASE }}"
<group name="services.{{ port }}" contains="dbind, pbind">
/c/slb/virt 12/service {{ port | DIGIT }} {{ proto | exclude(ssl) }}
group {{group_seq }}
rport {{ real_port }}
pbind {{ pbind }}
dbind {{ dbind }}
xforward {{ xforward }}
httpmod {{ httpmod }}
</group>
<group name="ssl_profile">
/c/slb/virt {{ virt_seq }}/service 443 https/ssl
srvrcert cert {{ ssl_server_cert }}
sslpol {{ ssl_profile }}
{{ ssl | set("https/ssl") }}
</group>
</group>
</template>
"""<line_sep>parser=ttp(data template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result(structure="dictionary")<line_sep># pprint.pprint(res, width=50)
<assert_stmt>res<eq>{"VIP_cfg":{"1.1.1.1":{"config_state":"dis" "ipver":"v4" "rtsrcmac":"ena" "services":{"443":{"dbind":"forceproxy" "group_seq":"15" "pbind":"clientip" "proto":"https" "real_port":"443" } "80":{"dbind":"forceproxy" "group_seq":"15" "pbind":"clientip" "proto":"http" "real_port":"80" } } "ssl_profile":{"ssl":"https/ssl" "ssl_profile":"ssl-Policy" "ssl_server_cert":"certname" "virt_seq":"12" } "vip_name":"my name" "virt_seq":"12" } "1.1.4.4":{"config_state":"dis" "ipver":"v4" "rtsrcmac":"ena" "vip_name":"my name2" "virt_seq":"14" } }}<block_end># test_answer_4()
<def_stmt>test_issue_20_answer_2 <block_start>data_to_parse="""
(*, 192.168.3.11)
LISP0.4200, (172.16.58.3, 172.16.31.10), Forward/Sparse, 1d18h/stopped
LISP0.4201, (172.16.58.3, 172.16.31.10), Forward/Sparse, 2d05h/stopped
(172.16.17.32, 239.100.100.100), 2d05h/00:01:19, flags: FT
Incoming interface: Vlan1029, RPF nbr 0.0.0.0
Outgoing interface list:
LISP0.4100, (172.16.58.3, 172.16.31.10), Forward/Sparse, 2d05h/stopped
LISP0.4101, (172.16.58.3, 172.16.31.10), Forward/Sparse, 2d05h/stopped
(*, 172.16.58.3), 6d20h/00:03:28, RP 172.16.17.32, flags: S
Incoming interface: Null, RPF nbr 0.0.0.0
Outgoing interface list:
Vlan3014, Forward/Sparse, 1d18h/00:03:28
LISP0.4100, (172.16.58.3, 172.16.31.10), Forward/Sparse, 1d18h/stopped
"""<line_sep>show_mcast1="""
<template name="mcast" results="per_template">
<group name="mcast_entries.{{ overlay_src }}">
({{ overlay_src | _start_ | replace("*", "'*'") }}, {{ overlay_grp | IP }})
({{ overlay_src | _start_ | IP }}, {{ overlay_grp | IP }}), {{ entry_uptime }}/{{ entry_state_or_timer }}, flags: {{ entry_flags }}
({{ overlay_src | _start_ | replace("*", "'*'") }}, {{ overlay_grp | IP }}), {{ entry_uptime }}/{{ entry_state_or_timer }}, RP {{ rp }}, flags: {{ entry_flags }}
Incoming interface: {{ incoming_intf }}, RPF nbr {{ rpf_neighbor }}
<group name="oil_entries*">
{{ outgoing_intf }}, Forward/Sparse, {{ oil_uptime }}/{{ oil_state_or_timer}}
{{ outgoing_intf }}, ({{ underlay_src | IP }}, {{ underlay_grp | IP }}), Forward/Sparse, {{ oil_uptime }}/{{ oil_state_or_timer}}
</group>
</group>
</template>
"""<line_sep>parser=ttp(template=show_mcast1)<line_sep>parser.add_input(data_to_parse template_name="mcast")<line_sep>parser.parse()<line_sep>res=parser.result(structure="dictionary")<line_sep># pprint.pprint(res, width=100)
<assert_stmt>res<eq>{"mcast":{"mcast_entries":{"'*'":[{"overlay_grp":"192.168.3.11"} {"entry_flags":"S" "entry_state_or_timer":"00:03:28" "entry_uptime":"6d20h" "incoming_intf":"Null" "oil_entries":[{"oil_state_or_timer":"00:03:28" "oil_uptime":"1d18h" "outgoing_intf":"Vlan3014" "underlay_grp":"172.16.31.10" "underlay_src":"172.16.58.3" }] "overlay_grp":"172.16.58.3" "rp":"172.16.17.32" "rpf_neighbor":"0.0.0.0" } ] "172.16.17.32":{"entry_flags":"FT" "entry_state_or_timer":"00:01:19" "entry_uptime":"2d05h" "incoming_intf":"Vlan1029" "overlay_grp":"172.16.58.3" "rpf_neighbor":"0.0.0.0" } }}}<block_end># test_issue_20_answer_2()
<def_stmt>test_docs_ttp_dictionary_usage_example <block_start>template="""
<input load="text">
interface Lo0
ip address 172.16.17.32/29
!
interface Lo1
ip address 1.1.1.1/30
</input>
<group macro="add_last_host">
interface {{ interface }}
ip address {{ ip }}
</group>
<macro>
def add_last_host(data):
ip_obj, _ = _ttp_["match"]["to_ip"](data["ip"])
all_ips = list(ip_obj.network.hosts())
data["last_host"] = str(all_ips[-1])
return data
</macro>
"""<line_sep>parser=ttp(template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[[{"interface":"Lo0" "ip":"172.16.17.32/29" "last_host":"172.16.58.3" } {"interface":"Lo1" "ip":"1.1.1.1/30" "last_host":"1.1.1.2"} ]]]<block_end># test_docs_ttp_dictionary_usage_example()
<def_stmt>test_github_issue_21_answer <block_start>data_to_parse="""
R1#sh ip nbar protocol-discovery protocol
GigabitEthernet1
Last clearing of "show ip nbar protocol-discovery" counters 00:13:45
Input Output
----- ------
Protocol Packet Count Packet Count
Byte Count Byte Count
5min Bit Rate (bps) 5min Bit Rate (bps)
5min Max Bit Rate (bps) 5min Max Bit Rate (bps)
---------------------------- ------------------------ ------------------------
ssh 191 134
24805 22072
2000 1000
1999 1001
unknown 172 503
39713 31378
0 0
3000 0
ping 144 144
14592 14592
0 0
1000 1000
dns 107 0
21149 0
0 0
2000 0
vrrp 0 738
0 39852
0 0
0 0
ldp 174 175
13224 13300
0 0
0 0
ospf 86 87
9460 9570
0 0
0 0
Total 874 1781
122943 130764
2000 1000
8000 2000
"""<line_sep>show_nbar="""
<template name="nbar" results="per_template">
<vars>C1 = "DIGIT | to_int | to_list | joinmatches"</vars>
<group name="{{ interface }}">
{{ interface | re('Gig.+') | re('Ten.+') }}
<group name="{{ protocol }}" macro="map_to_keys">
{{ protocol }} {{ in | chain(C1) }} {{ out | chain(C1) }}
{{ ignore(r"\\s+") }} {{ in | chain(C1) }} {{ out | chain(C1) }}
</group>
</group>
<macro>
def map_to_keys(data):
# uncomment to see data
# print(data)
inp_values = data.pop("in")
out_values = data.pop("out")
inp_keys = ["IN Packet Count", "IN Byte Count", "IN 5min Bit Rate (bps)", "IN 5min Max Bit Rate (bps)"]
out_keys = ["OUT Packet Count", "OUT Byte Count", "OUT 5min Bit Rate (bps)", "OUT 5min Max Bit Rate (bps)"]
data.update(dict(zip(inp_keys, inp_values)))
data.update(dict(zip(out_keys, out_values)))
return data
</macro>
</template>
"""<line_sep>parser=ttp(template=show_nbar)<line_sep>parser.add_input(data_to_parse template_name="nbar")<line_sep>parser.parse()<line_sep>res=parser.result(structure="dictionary")<line_sep>pprint.pprint(res width=100)<assert_stmt>res<eq>{"nbar":{"GigabitEthernet1 ":{"Total":{"IN 5min Bit Rate (bps)":2000 "IN 5min Max Bit Rate (bps)":8000 "IN Byte Count":122943 "IN Packet Count":874 "OUT 5min Bit Rate (bps)":1000 "OUT 5min Max Bit Rate (bps)":2000 "OUT Byte Count":130764 "OUT Packet Count":1781 } "dns":{"IN 5min Bit Rate (bps)":0 "IN 5min Max Bit Rate (bps)":2000 "IN Byte Count":21149 "IN Packet Count":107 "OUT 5min Bit Rate (bps)":0 "OUT 5min Max Bit Rate (bps)":0 "OUT Byte Count":0 "OUT Packet Count":0 } "ldp":{"IN 5min Bit Rate (bps)":0 "IN 5min Max Bit Rate (bps)":0 "IN Byte Count":13224 "IN Packet Count":174 "OUT 5min Bit Rate (bps)":0 "OUT 5min Max Bit Rate (bps)":0 "OUT Byte Count":13300 "OUT Packet Count":175 } "ospf":{"IN 5min Bit Rate (bps)":0 "IN 5min Max Bit Rate (bps)":0 "IN Byte Count":9460 "IN Packet Count":86 "OUT 5min Bit Rate (bps)":0 "OUT 5min Max Bit Rate (bps)":0 "OUT Byte Count":9570 "OUT Packet Count":87 } "ping":{"IN 5min Bit Rate (bps)":0 "IN 5min Max Bit Rate (bps)":1000 "IN Byte Count":14592 "IN Packet Count":144 "OUT 5min Bit Rate (bps)":0 "OUT 5min Max Bit Rate (bps)":1000 "OUT Byte Count":14592 "OUT Packet Count":144 } "ssh":{"IN 5min Bit Rate (bps)":2000 "IN 5min Max Bit Rate (bps)":1999 "IN Byte Count":24805 "IN Packet Count":191 "OUT 5min Bit Rate (bps)":1000 "OUT 5min Max Bit Rate (bps)":1001 "OUT Byte Count":22072 "OUT Packet Count":134 } "unknown":{"IN 5min Bit Rate (bps)":0 "IN 5min Max Bit Rate (bps)":3000 "IN Byte Count":39713 "IN Packet Count":172 "OUT 5min Bit Rate (bps)":0 "OUT 5min Max Bit Rate (bps)":0 "OUT Byte Count":31378 "OUT Packet Count":503 } "vrrp":{"IN 5min Bit Rate (bps)":0 "IN 5min Max Bit Rate (bps)":0 "IN Byte Count":0 "IN Packet Count":0 "OUT 5min Bit Rate (bps)":0 "OUT 5min Max Bit Rate (bps)":0 "OUT Byte Count":39852 "OUT Packet Count":738 } }}}<block_end># test_github_issue_21_answer()
<def_stmt>test_github_issue_22 <block_start>data="""
interface Loopback0
description Fabric Node Router ID
ip address 172.16.58.3 255.255.255.255
ip pim sparse-mode
ip router isis
clns mtu 1400
end
interface Loopback0
description Fabric Node Router ID
ip address 172.16.17.32 255.255.255.255
ip pim sparse-mode
ip router isis
clns mtu 1400
end
"""<line_sep>template="""{{ ignore(r"\\s+") }}ip address {{ ip_address }} 255.255.255.255"""<line_sep>parser=ttp(data template)<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res, width=100)
<assert_stmt>res<eq>[[[{"ip_address":"172.16.58.3"} {"ip_address":"172.16.17.32"}]]]<block_end># test_github_issue_22()
<def_stmt>test_github_issue_24 <block_start>data="""
19: IP4 1.1.1.1, 00:03:b2:78:04:13, vname portal, NO SERVICES UP
Virtual Services:
http: rport http, group 11, health http (HTTP), pbind clientip
Real Servers:
22: 10.10.10.10, web1, group ena, health (runtime HTTP), 0 ms, FAILED
Reason: N/A
23: 10.11.11.11, web2, group ena, health (runtime HTTP), 0 ms, FAILED
Reason: N/A
https: rport https, group 12, health tcp (TCP), pbind clientip
Real Servers:
22: 10.10.10.10, web1, group ena, health (runtime TCP), 0 ms, FAILED
Reason: N/A
23: 10.11.11.11, web2, group ena, health (runtime TCP), 0 ms, FAILED
Reason: N/A
"""<line_sep>template="""
<template name="VIP_cfg" results="per_template">
<group name="{{ vs_instance }}" default="">
{{ vs_instance }}: IP4 {{ vs_ip }},{{ ignore(".+") }}
<group name="services*" default="">
{{ vs_service }}: rport {{ rport }},{{ ignore(".+") }}
<group name="pool*" default="">
{{ node_id }}: {{ node_ip }},{{ ignore(".+") }}
Reason: {{ reason }}
</group>
</group>
</group>
</template>
"""<line_sep>parser=ttp(data template)<line_sep>parser.parse()<line_sep>res=parser.result(structure="dictionary")<line_sep># pprint.pprint(res, width=100)
<assert_stmt>res<eq>{"VIP_cfg":{"19":{"services":[{"pool":[{"node_id":"22" "node_ip":"10.10.10.10" "reason":"N/A" } {"node_id":"23" "node_ip":"10.11.11.11" "reason":"N/A" } ] "rport":"http" "vs_service":"http" } {"pool":[{"node_id":"22" "node_ip":"10.10.10.10" "reason":"N/A" } {"node_id":"23" "node_ip":"10.11.11.11" "reason":"N/A" } ] "rport":"https" "vs_service":"https" } ] "vs_ip":"1.1.1.1" }}}<block_end># test_github_issue_24()
<def_stmt>test_reddit_answer_1 <block_start>"""
https://www.reddit.com/r/networking/comments/j106ot/export_custom_lists_from_the_config_aruba_switch/
Hit a bug while was doing this template - join action overridden by ignore indicator add action
"""<line_sep>data="""
SWITCH# show vlan port 2/11 detail
Status and Counters - VLAN Information - for ports 2/11
Port name:
VLAN ID Name | Status Voice Jumbo Mode
------- -------------------- + ---------- ----- ----- --------
60 ABC | Port-based No No Tagged
70 DEF | Port-based No No Tagged
101 GHIJ | Port-based No No Untagged
105 KLMNO | Port-based No No Tagged
116 PQRS | Port-based No No Tagged
117 TVU | Port-based No No Tagged
SWITCH# show vlan port 2/12 detail
Status and Counters - VLAN Information - for ports 2/12
Port name:
VLAN ID Name | Status Voice Jumbo Mode
------- -------------------- + ---------- ----- ----- --------
61 ABC | Port-based No No Tagged
71 DEF | Port-based No No Tagged
103 GHI | Port-based No No Untagged
"""<line_sep>template="""
<vars>
hostname="gethostname"
</vars>
<group name="vlans*">
Status and Counters - VLAN Information - for ports {{ Port_Number }}
{{ Tagged_VLAN | joinmatches(" ") }} {{ ignore }} | {{ ignore }} {{ ignore }} {{ ignore }} Tagged
{{ Untagged_VLAN }} {{ ignore }} | {{ ignore }} {{ ignore }} {{ ignore }} Untagged
{{ Hostname | set(hostname) }}
</group>
<output>
format = "csv"
path = "vlans"
headers = "Hostname, Port_Number, Untagged_VLAN, Tagged_VLAN"
</output>
"""<line_sep>parser=ttp(data template)<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># print(res)
<assert_stmt>res<eq>['"Hostname","Port_Number","Untagged_VLAN","Tagged_VLAN"\n"SWITCH","2/11","101","60 70 105 116 117"\n"SWITCH","2/12","103","61 71"']<block_end># test_reddit_answer_1()
<def_stmt>test_reddit_answer_2 <block_start>data="""
config router ospf
set abr-type standard
set auto-cost-ref-bandwidth 1000
set distance-external 110
set distance-inter-area 110
set distance-intra-area 110
set database-overflow disable
set database-overflow-max-lsas 10000
set database-overflow-time-to-recover 300
set default-information-originate disable
set default-information-metric 10
set default-information-metric-type 2
set default-information-route-map ''
set default-metric 10
set distance 110
set rfc1583-compatible disable
set router-id 10.1.1.1
set spf-timers 5 10
set bfd disable
set log-neighbour-changes enable
set distribute-list-in "OSPF_IMPORT_PREFIX"
set distribute-route-map-in ''
set restart-mode none
set restart-period 120
config area
edit 0.0.0.1
set shortcut disable
set authentication none
set default-cost 10
set nssa-translator-role candidate
set stub-type summary
set type nssa
set nssa-default-information-originate disable
set nssa-default-information-originate-metric 10
set nssa-default-information-originate-metric-type 2
set nssa-redistribution enable
next
end
config ospf-interface
edit "vlan1-int"
set interface "Vlan1"
set ip 0.0.0.0
set authentication text
set authentication-key netconanRemoved13
set prefix-length 0
set retransmit-interval 5
set transmit-delay 1
set cost 0
set priority 1
set dead-interval 40
set hello-interval 10
set hello-multiplier 0
set database-filter-out disable
set mtu 0
set mtu-ignore disable
set network-type point-to-point
set bfd global
set status enable
set resync-timeout 40
next
edit "vlan2-int"
set interface "vlan2"
set ip 0.0.0.0
set authentication text
set authentication-key netconanRemoved14
set prefix-length 0
set retransmit-interval 5
set transmit-delay 1
set cost 0
set priority 1
set dead-interval 40
set hello-interval 10
set hello-multiplier 0
set database-filter-out disable
set mtu 0
set mtu-ignore disable
set network-type point-to-point
set bfd global
set status enable
set resync-timeout 40
next
end
config network
edit 1
set prefix 10.1.1.1 255.255.255.252
set area 0.0.0.1
next
edit 2
set prefix 10.1.1.3 255.255.255.252
set area 0.0.0.1
next
end
config redistribute "connected"
set status enable
set metric 0
set routemap ''
set metric-type 2
set tag 0
end
config redistribute "static"
set status enable
set metric 0
set routemap ''
set metric-type 2
set tag 0
end
config redistribute "rip"
set status disable
set metric 0
set routemap ''
set metric-type 2
set tag 0
end
config redistribute "bgp"
set status enable
set metric 0
set routemap ''
set metric-type 2
set tag 0
end
config redistribute "isis"
set status disable
set metric 0
set routemap ''
set metric-type 2
set tag 0
end
end
"""<line_sep>template="""
<vars>
clean_phrase = [
'ORPHRASE',
'macro(\"clean_str\")'
]
clean_list = [
'ORPHRASE',
'macro(\"build_list\")'
]
</vars>
<macro>
def build_list(data):
if "\\" \\"" in data:
t = data.split("\\" \\"")
for i in range(0, len(t)):
t[i] = t[i].strip("\\"").replace(" ", "_")
i+=1
return t
else:
return [data.strip("\\"").replace(" ", "_")]
def clean_str(data):
return data.replace("\\"","").replace(" ", "_")
def match_ip_or_any(data):
import ipaddress
if data == \"any\":
return data
elif "/" in data:
return str(data)
else:
t = data.replace(" ", "/")
return str(ipaddress.IPv4Network(t, strict=False))
def ignore_empty(data):
if data == "\'\'":
return bool(False)
else:
return data
</macro>
<macro>
def skip_empty(data):
if data == {}:
return False
return data
</macro>
<group name="ospf">
config router ospf {{ _start_ }}
set auto-cost-ref-bandwidth {{ ref_bw }}
set default-information-originate {{ default_originate | contains("enable") }}
set default-information-metric {{ default_originate_metric }}
set default-information-metric-type {{ default_originate_metric_type }}
set default-information-route-map {{ default_originate_routemap | chain("clean_phrase") | macro("ignore_empty") }}
set default-metric {{ default_rt_metric }}
set rfc1583-compatible {{ rfc1583_compat | contains("enable") }}
set router-id {{ router_id }}
set distribute-list-in {{ dist_list_in | chain("clean_phrase") | macro("ignore_empty") }}
set distribute-route-map-in {{ dist_routemap_in | chain("clean_phrase") | macro("ignore_empty") }}
<group name="areas*" macro="skip_empty">
config area {{ _start_ }}
<group>
edit {{ area | _start_ }}
set stub-type {{ stub_type }}
set type {{ area_type }}
set nssa-default-information-originate {{ nssa_default_originate | contains("enable") }}
set nssa-default-information-originate-metric {{ nssa_default_metric }}
set nssa-default-information-originate-metric-type {{ nssa_default_metric_type }}
set nssa-redistribution {{ nssa_redis }}
next {{ _end_ }}
</group>
end {{ _end_ }}
</group>
<group name="interfaces*" macro="skip_empty">
config ospf-interface {{ _start_ }}
<group contains="status">
edit {{ name | chain("clean_phrase") | _start_ }}
set interface {{ interface | chain("clean_phrase")}}
set ip {{ ip | exclude("0.0.0.0") }}
set cost {{ cost | exclude("0") }}
set priority {{ priority }}
set mtu {{ mtu | exclude("0") }}
set network-type {{ network }}
set status {{ status | contains("enable") }}
next {{ _end_ }}
</group>
end {{ _end_ }}
</group>
<group name="networks*" macro="skip_empty">
config network {{ _start_ }}
<group>
edit {{ id | _start_ }}
set prefix {{ prefix | ORPHRASE | to_ip | with_prefixlen }}
set area {{ area }}
next {{ _end_ }}
</group>
end {{ _end_ }}
</group>
<group name="redistribute*" contains="status">
config redistribute {{ protocol | chain("clean_phrase") | _start_ }}
set status {{ status | contains('enable') }}
set route-map {{ route_map | chain("clean_phrase") | macro("ignore_empty") }}
set metric-type {{ metric-type }}
set metric {{ metric | exclude("0") }}
set tag {{ tag | exclude("0")}}
end {{ _end_ }}
</group>
end {{ _end_ }}
</group>
"""<line_sep>parser=ttp(data template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{"ospf":{"areas":[{"area":"0.0.0.1" "area_type":"nssa" "nssa_default_metric":"10" "nssa_default_metric_type":"2" "nssa_redis":"enable" "stub_type":"summary" }] "default_originate_metric":"10" "default_originate_metric_type":"2" "default_rt_metric":"10" "dist_list_in":"OSPF_IMPORT_PREFIX" "interfaces":[{"interface":"Vlan1" "name":"vlan1-int" "network":"point-to-point" "priority":"1" "status":"enable" } {"interface":"vlan2" "name":"vlan2-int" "network":"point-to-point" "priority":"1" "status":"enable" } ] "networks":[{"area":"0.0.0.1" "id":"1" "prefix":"10.1.1.1/30"} {"area":"0.0.0.1" "id":"2" "prefix":"10.1.1.3/30"} ] "redistribute":[{"metric-type":"2" "protocol":"connected" "status":"enable" } {"metric-type":"2" "protocol":"static" "status":"enable"} {"metric-type":"2" "protocol":"bgp" "status":"enable"} ] "ref_bw":"1000" "router_id":"10.1.1.1" }}]]<block_end># test_reddit_answer_2()
<def_stmt>test_github_issue_32 <block_start>data="""
.id=*c;export-route-targets=65001:48;65001:0;import-route-targets=65001:48;interfaces=lo-ext;vlan56;route-distinguisher=65001:48;routing-mark=VRF_EXT
.id=*10;comment=;export-route-targets=65001:80;import-route-targets=65001:80;65001:0;interfaces=lo-private;route-distinguisher=65001:80;routing-mark=VRF_PRIVATE
"""<line_sep>template="""
<group method="table">
.id={{ id | exclude(";") }};export-route-targets={{ export-route-targets }};import-route-targets={{ import-route-targets }};interfaces={{ interfaces }};route-distinguisher={{ route-distinguisher }};routing-mark={{ routing-mark }}
.id={{ id }};comment{{ comment }};export-route-targets={{ export-route-targets }};import-route-targets={{ import-route-targets }};interfaces={{ interfaces }};route-distinguisher={{ route-distinguisher }};routing-mark={{ routing-mark }}
</group>
"""<line_sep>parser=ttp(data template)<line_sep>parser.parse()<line_sep>res=parser.result(structure="flat_list")<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[{"export-route-targets":"65001:48;65001:0" "id":"*c" "import-route-targets":"65001:48" "interfaces":"lo-ext;vlan56" "route-distinguisher":"65001:48" "routing-mark":"VRF_EXT" } {"comment":"=" "export-route-targets":"65001:80" "id":"*10" "import-route-targets":"65001:80;65001:0" "interfaces":"lo-private" "route-distinguisher":"65001:80" "routing-mark":"VRF_PRIVATE" } ]<block_end># test_github_issue_32()
<def_stmt>test_slack_answer_1 <block_start>data="""
Firmware
Version
----------------
02.1.1 Build 002
Hardware
Version
----------------
V2R4
"""<line_sep>template="""
<group name="versions">
Hardware {{ _start_ }}
Firmware {{ _start_ }}
{{ version | PHRASE | let("type", "firmware") }}
{{ version | exclude("---") | exclude("Vers") | let("type", "hardware") }}
{{ _end_ }}
</group>
"""<line_sep>parser=ttp(data template)<line_sep>parser.parse()<line_sep>res=parser.result(structure="flat_list")<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[{"versions":[{"type":"firmware" "version":"02.1.1 Build 002"} {"type":"hardware" "version":"V2R4"} ]}]<block_end># test_slack_answer_1()
<def_stmt>test_group_default_docs <block_start>template="""
<input load="text">
device-hostame uptime is 27 weeks, 3 days, 10 hours, 46 minutes, 10 seconds
</input>
<group name="uptime**">
device-hostame uptime is {{ uptime | PHRASE }}
<group name="software">
software version {{ version | default("uncknown") }}
</group>
</group>
<group name="domain" default="Uncknown">
Default domain is {{ fqdn }}
</group>
"""<line_sep>parser=ttp(template=template)<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{"domain":{"fqdn":"Uncknown"} "uptime":{"software":{"version":"uncknown"} "uptime":"27 weeks, 3 days, 10 hours, 46 minutes, 10 seconds" } }]]<block_end># test_group_default_docs()
<def_stmt>test_github_issue_34_answer <block_start>template="""
<input load="text">
Hi World
</input>
<group name='demo'>
<group name='audiences*'>
Hello {{ audience | default([]) }}
</group>
</group>
"""<line_sep>parser=ttp(template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{"demo":{"audiences":[{"audience":[]}]}}]]<block_end># test_github_issue_34_answer()
<def_stmt>test_github_issue_33_answer_1 <block_start>template="""
<input load="text">
server 1.1.1.1
server 172.16.31.10 172.16.31.10
server 172.16.17.32 172.16.17.32 172.16.31.10
</input>
<group name="servers" method="table">
server {{ server | re(r"\\S+") | let("servers_number", 1 ) }}
server {{ server | re(r"\\S+ \\S+") | let("servers_number", 2) }}
server {{ server | re(r"\\S+ \\S+ \\S+") | let("servers_number", 3) }}
</group>
"""<line_sep>parser=ttp(template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{"servers":[{"server":"1.1.1.1" "servers_number":1} {"server":"172.16.31.10 172.16.31.10" "servers_number":2} {"server":"172.16.17.32 172.16.17.32 172.16.31.10" "servers_number":3} ]}]]<block_end># test_github_issue_33_answer_1()
<def_stmt>test_issue_36 <block_start>template="""
<input load="text">
ip access-list standard 42
10 remark machine_A
10 permit 192.168.200.162
20 remark machine_B
20 permit 192.168.200.149
30 deny any log
ip access-list standard 98
10 permit 10.10.10.1
20 remark toto
20 permit 30.30.30.1
30 permit 30.30.30.0 0.0.0.255
ip access-list standard 99
10 permit 10.20.30.40 log
20 permit 20.30.40.1 log
30 remark DEVICE - SNMP RW
30 permit 172.16.58.3 0.0.0.127
40 permit 172.16.17.32 0.0.0.63
ip access-list extended 199
10 remark COLLECTOR - SNMP
10 permit ip 172.16.17.32 0.0.0.255 any
20 remark RETURN - Back
20 permit ip 172.16.31.10 0.0.0.127 any
30 remark VISUALIZE
30 permit ip host 172.16.58.3 any
</input>
<group name="ip.{{ acl_type }}.{{ acl_name }}">
ip access-list {{ acl_type }} {{ acl_name }}
<group name="{{ entry_id }}*" method="table">
{{ entry_id }} remark {{ remark_name | re(".+") | let("action", "remark") }}
{{ entry_id }} {{ action }} {{ src_host }}
{{ entry_id }} {{ action }} {{ src_host | let("log", "log") }} log
{{ entry_id }} {{ action }} {{ protocol }} host {{ src_host | let("dest_any", "any") }} any
{{ entry_id }} {{ action }} {{ protocol }} {{ src_ntw | let("dest_any", "any") }} {{ src_wildcard | IP }} any
{{ entry_id }} {{ action }} {{ src_ntw }} {{ src_wildcard | IP }}
</group>
</group>
"""<line_sep>parser=ttp(template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep>pprint.pprint(res)<assert_stmt>res<eq>[[{"ip":{"extended":{"199":{"10":[{"action":"remark" "remark_name":"COLLECTOR - SNMP"} {"action":"permit" "dest_any":"any" "protocol":"ip" "src_ntw":"172.16.17.32" "src_wildcard":"0.0.0.255" } ] "20":[{"action":"remark" "remark_name":"RETURN - Back"} {"action":"permit" "dest_any":"any" "protocol":"ip" "src_ntw":"172.16.31.10" "src_wildcard":"0.0.0.127" } ] "30":[{"action":"remark" "remark_name":"VISUALIZE"} {"action":"permit" "dest_any":"any" "protocol":"ip" "src_host":"172.16.58.3" } ] }} "standard":{"42":{"10":[{"action":"remark" "remark_name":"machine_A"} {"action":"permit" "src_host":"192.168.200.162"} ] "20":[{"action":"remark" "remark_name":"machine_B"} {"action":"permit" "src_host":"192.168.200.149"} ] "30":[{"action":"deny" "log":"log" "src_host":"any"}] } "98":{"10":[{"action":"permit" "src_host":"10.10.10.1"}] "20":[{"action":"remark" "remark_name":"toto"} {"action":"permit" "src_host":"30.30.30.1"} ] "30":[{"action":"permit" "src_ntw":"30.30.30.0" "src_wildcard":"0.0.0.255" }] } "99":{"10":[{"action":"permit" "log":"log" "src_host":"10.20.30.40" }] "20":[{"action":"permit" "log":"log" "src_host":"20.30.40.1" }] "30":[{"action":"remark" "remark_name":"DEVICE - SNMP RW"} {"action":"permit" "src_ntw":"172.16.58.3" "src_wildcard":"0.0.0.127" } ] "40":[{"action":"permit" "src_ntw":"60.60.60.64" "src_wildcard":"0.0.0.63" }] } } }}]]<block_end># test_issue_36()
<def_stmt>test_github_issue_37_original_data_template <block_start>template="""
<macro>
import re
def qinq(data):
data = re.sub(r"\\*", r"qinq", data)
return data
</macro>
<group name="service">
service {{ ignore }}
<group name="epipe.{{ service_id }}" default="none">
epipe {{ service_id | _start_ }} customer {{ customer_id }} create
description "{{ description | ORPHRASE | default("none") }}"
service-mtu {{ service_mtu | default("none") }}
service-name "{{ service_name | ORPHRASE | default("none") }}"
<group name="endpoint" default="none">
endpoint {{ endpoint | _start_ }} create
revert-time {{ revert_time | default("none") }}
exit {{ _end_ }}
</group>
<group name="sap.{{ sap_id }}" default="none">
sap {{ sap_id | macro("qinq") | _start_ | ORPHRASE }} create
description "{{ description | ORPHRASE | default("none")}}"
multi-service-site "{{ mss_name | default("none") }}"
<group name="ingress" default="default_ingress" >
ingress {{ _start_ }}
qos {{ sap_ingress | default("1") }}
scheduler-policy {{ scheduler_policy | default("none")}}
exit {{ _end_ }}
</group>
<group name="egress" default="default_egress">
egress {{ _start_ }}
scheduler-policy {{ scheduler_policy | default("none") }}
qos {{ sap_egress | default("1)") }}
exit {{ _end_ }}
</group>
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
<group name="pwr_sdp.{{pwr_spoke_sdp_id}}**" default="none">
spoke-sdp {{ pwr_spoke_sdp_id | default("none")}}:{{vc_id | _start_ | default("none") }} endpoint {{ endpoint | default("none") }} create
precedence {{ precedence | default("default_precedence") }}
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
<group name="regular_sdp.{{r_spoke_sdp_id}}**" default="none">
spoke-sdp {{ r_spoke_sdp_id }}:{{vc_id | _start_ }} create
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
exit {{ _end_ }}
</group>
"""<line_sep>data="""
service foo
epipe 103076 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:"
service-mtu 1588
service-name "EPIPE service-103076 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN10"
ingress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
egress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8051:103076 create
no shutdown
exit
no shutdown
exit
epipe 103206 customer 1904 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
service-mtu 1988
service-name "EPIPE service-103206 DKTN08a-D0105 (192.168.3.11)"
sap 2/2/3:401.100 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
multi-service-site "SKANSKA_E13DG_A825_LAN1"
ingress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
egress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
collect-stats
accounting-policy 4
no shutdown
exit
spoke-sdp 8035:103206 create
no shutdown
exit
no shutdown
exit
epipe 103256 customer 160 create
description "vf=EWL:cn=TATA_COMM:tl=2C02490189:st=act:"
service-mtu 1988
service-name "EPIPE service-103256 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:15.* create
description "vf=EWL:cn=TATA_COMM:tl=2C02490171:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN5"
ingress
qos 11000
queue-override
queue 1 create
cbs default
mbs 391 kilobytes
rate 100000 cir 100000
exit
exit
exit
egress
qos 11000
queue-override
queue 1 create
cbs default
mbs 391 kilobytes
rate 100000 cir 100000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8139:103256 create
no shutdown
exit
no shutdown
exit
epipe 103742 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02410363:st=act:"
service-mtu 1588
service-name "EPIPE service-103742 DKTN08a-D0105 (192.168.3.11)"
sap 5/2/50:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02410338:st=act:"
multi-service-site "TATA_STRAT_LON_A206_LANA"
ingress
qos 11000
queue-override
queue 1 create
cbs default
mbs 32 kilobytes
rate 8000 cir 8000
exit
exit
exit
egress
qos 11000
queue-override
queue 1 create
cbs default
mbs 32 kilobytes
rate 8000 cir 8000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8061:103742 create
no shutdown
exit
no shutdown
exit
epipe 55513386 customer 4 vc-switching create
description "vf=EAGG:cn=Bulldog:tl=VF"
service-mtu 1526
spoke-sdp 78:55513386 create
control-word
no shutdown
exit
spoke-sdp 8245:55513386 create
control-word
no shutdown
exit
no shutdown
exit
epipe 55517673 customer 4 create
description "vf=EAGG:cn=Bulldog:tl=2C01291821:st=act:no=NGA EPIPE#BAACTQ#VLAN 901"
service-mtu 1526
service-name "epipe service-64585 DKTN08a-D0105 (192.168.3.11)"
endpoint "SDP" create
revert-time infinite
exit
sap 2/2/3:901.* create
description "2_2_3,H0505824A,Bulldog,VLAN 901"
ingress
scheduler-policy "NGA-LLU-300M"
qos 20010
exit
egress
scheduler-policy "NGA-LLU-300M"
qos 20010
exit
no shutdown
exit
spoke-sdp 8243:55517673 endpoint "SDP" create
collect-stats
precedence 1
no shutdown
exit
spoke-sdp 8245:55517673 endpoint "SDP" create
collect-stats
precedence primary
no shutdown
exit
no shutdown
exit
"""<line_sep>parser=ttp(data=data template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{"service":{"epipe":{"103076":{"customer_id":"160" "description":"vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:" "regular_sdp":{"8051":{"state":"enabled" "vc_id":"103076"}} "sap":{"1/2/12:20.qinq":{"description":"vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:" "egress":{"sap_egress":"1)" "scheduler_policy":"none" } "ingress":{"sap_ingress":"1" "scheduler_policy":"none" } "mss_name":"TATA_VSNL_STRAT_A206_LAN10" "state":"enabled" }} "service_mtu":"1588" "service_name":"EPIPE service-103076 "<concat>"DKTN08a-D0105 "<concat>"(192.168.3.11)" "state":"enabled" } "103206":{"customer_id":"1904" "description":"vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA "<concat>"UK PLC Stepney Green E1 "<concat>"3DG'" "regular_sdp":{"8035":{"state":"enabled" "vc_id":"103206"}} "sap":{"2/2/3:401.100":{"description":"vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA "<concat>"UK "<concat>"PLC "<concat>"Stepney "<concat>"Green "<concat>"E1 "<concat>"3DG'" "egress":{"sap_egress":"11010" "scheduler_policy":"none" } "ingress":{"sap_ingress":"11010" "scheduler_policy":"none" } "mss_name":"SKANSKA_E13DG_A825_LAN1" "state":"enabled" }} "service_mtu":"1988" "service_name":"EPIPE service-103206 "<concat>"DKTN08a-D0105 "<concat>"(192.168.3.11)" "state":"enabled" } "103256":{"customer_id":"160" "description":"vf=EWL:cn=TATA_COMM:tl=2C02490189:st=act:" "regular_sdp":{"8139":{"state":"enabled" "vc_id":"103256"}} "sap":{"1/2/12:15.qinq":{"description":"vf=EWL:cn=TATA_COMM:tl=2C02490171:st=act:" "egress":{"sap_egress":"11000" "scheduler_policy":"none" } "ingress":{"sap_ingress":"11000" "scheduler_policy":"none" } "mss_name":"TATA_VSNL_STRAT_A206_LAN5" "state":"enabled" }} "service_mtu":"1988" "service_name":"EPIPE service-103256 "<concat>"DKTN08a-D0105 "<concat>"(192.168.3.11)" "state":"enabled" } "103742":{"customer_id":"160" "description":"vf=EWL:cn=TATA_COM:tl=2C02410363:st=act:" "regular_sdp":{"8061":{"state":"enabled" "vc_id":"103742"}} "sap":{"5/2/50:20.qinq":{"description":"vf=EWL:cn=TATA_COM:tl=2C02410338:st=act:" "egress":{"sap_egress":"11000" "scheduler_policy":"none" } "ingress":{"sap_ingress":"11000" "scheduler_policy":"none" } "mss_name":"TATA_STRAT_LON_A206_LANA" "state":"enabled" }} "service_mtu":"1588" "service_name":"EPIPE service-103742 "<concat>"DKTN08a-D0105 "<concat>"(192.168.3.11)" "state":"enabled" } "55517673":{"customer_id":"4" "description":"vf=EAGG:cn=Bulldog:tl=2C01291821:st=act:no=NGA "<concat>"EPIPE#BAACTQ#VLAN 901" "endpoint":{"endpoint":'"SDP"' "revert_time":"infinite" } "pwr_sdp":{"8243":{"endpoint":'"SDP"' "precedence":"1" "state":"enabled" "vc_id":"55517673" } "8245":{"endpoint":'"SDP"' "precedence":"primary" "state":"enabled" "vc_id":"55517673" } } "sap":{"2/2/3:901.qinq":{"description":"2_2_3,H0505824A,Bulldog,VLAN "<concat>"901" "egress":{"sap_egress":"20010" "scheduler_policy":'"NGA-LLU-300M"' } "ingress":{"sap_ingress":"20010" "scheduler_policy":'"NGA-LLU-300M"' } "mss_name":"none" "state":"enabled" }} "service_mtu":"1526" "service_name":"epipe service-64585 "<concat>"DKTN08a-D0105 "<concat>"(6172.16.17.321)" "state":"enabled" } }}}]]<block_end># test_github_issue_37_original_data_template()
<def_stmt>test_github_issue_37_cleaned_up_data <block_start>"""
Problem with below template without bug fix, was that
'no shutdown' statement for sap group was matched by
spoke-sdp group as well and added to results causing
false match. To fix it, added tracking of previously
started groups in results object, so that before add
match results to overall results if PATH differ need
to check that this particular item groups has been
started before, previous logic was not checking for that.
Have not noticed any issues with other 200+ tests or
any performance degradation for single/multi-process
parsing.
"""<line_sep>template="""
<group name="service">
service {{ ignore }}
<group name="epipe.{{ service_id }}">
epipe {{ service_id }} customer {{ customer_id }} create
<group name="regular_sdp.{{r_spoke_sdp_id}}**">
spoke-sdp {{ r_spoke_sdp_id }}:{{vc_id }} create
no shutdown {{ state | set("enabled") }}
</group>
</group>
</group>
"""<line_sep>data="""
service foo
epipe 103076 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:"
service-mtu 1588
service-name "EPIPE service-103076 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN10"
ingress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
egress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8051:103076 create
no shutdown
exit
no shutdown
exit
epipe 103206 customer 1904 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
service-mtu 1988
service-name "EPIPE service-103206 DKTN08a-D0105 (192.168.3.11)"
sap 2/2/3:401.100 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
multi-service-site "SKANSKA_E13DG_A825_LAN1"
ingress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
egress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
collect-stats
accounting-policy 4
no shutdown
exit
spoke-sdp 8035:103206 create
no shutdown
exit
no shutdown
exit
"""<line_sep>parser=ttp(data=data template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep>pprint.pprint(res)<assert_stmt>res<eq>[[{"service":{"epipe":{"103076":{"customer_id":"160" "regular_sdp":{"8051":{"state":"enabled" "vc_id":"103076"}} } "103206":{"customer_id":"1904" "regular_sdp":{"8035":{"state":"enabled" "vc_id":"103206"}} } }}}]]<block_end># test_github_issue_37_cleaned_up_data()
<def_stmt>test_github_issue_37_cleaned_data_template <block_start>template="""
<group name="service">
service {{ ignore }}
<group name="epipe.{{ service_id }}" default="none">
epipe {{ service_id }} customer {{ customer_id }} create
description "{{ description | ORPHRASE }}"
service-mtu {{ service_mtu }}
service-name "{{ service_name | ORPHRASE }}"
<group name="endpoint" default="none">
endpoint {{ endpoint }} create
revert-time {{ revert_time }}
exit {{ _end_ }}
</group>
<group name="sap.{{ sap_id }}" default="none">
sap {{ sap_id | resub(r"\\*", "qinq") | ORPHRASE }} create
description "{{ description | ORPHRASE }}"
multi-service-site "{{ mss_name }}"
<group name="ingress">
ingress {{ _start_ }}
qos {{ sap_ingress | default("1") }}
scheduler-policy {{ scheduler_policy | default("none")}}
exit {{ _end_ }}
</group>
<group name="egress">
egress {{ _start_ }}
scheduler-policy {{ scheduler_policy | default("none") }}
qos {{ sap_egress | default("1)") }}
exit {{ _end_ }}
</group>
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
<group name="pwr_sdp.{{pwr_spoke_sdp_id}}**" default="none">
spoke-sdp {{ pwr_spoke_sdp_id }}:{{vc_id }} endpoint {{ endpoint }} create
precedence {{ precedence | default("default_precedence") }}
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
<group name="regular_sdp.{{r_spoke_sdp_id}}**" default="none">
spoke-sdp {{ r_spoke_sdp_id }}:{{vc_id }} create
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
no shutdown {{ state | set("enabled") | default("disabled") }}
exit {{ _end_ }}
</group>
exit {{ _end_ }}
</group>
"""<line_sep>data="""
service foo
epipe 103076 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:"
service-mtu 1588
service-name "EPIPE service-103076 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN10"
ingress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
egress
queue-override
queue 1 create
cbs default
mbs 40 kilobytes
rate 10000 cir 10000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8051:103076 create
no shutdown
exit
no shutdown
exit
epipe 103206 customer 1904 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
service-mtu 1988
service-name "EPIPE service-103206 DKTN08a-D0105 (192.168.3.11)"
sap 2/2/3:401.100 create
description "vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA UK PLC Stepney Green E1 3DG'"
multi-service-site "SKANSKA_E13DG_A825_LAN1"
ingress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
egress
qos 11010
queue-override
queue 1 create
cbs default
mbs 1188 kilobytes
rate max cir 47500
exit
queue 3 create
cbs default
mbs 63 kilobytes
rate max cir 2500
exit
exit
exit
collect-stats
accounting-policy 4
no shutdown
exit
spoke-sdp 8035:103206 create
no shutdown
exit
no shutdown
exit
epipe 103256 customer 160 create
description "vf=EWL:cn=TATA_COMM:tl=2C02490189:st=act:"
service-mtu 1988
service-name "EPIPE service-103256 DKTN08a-D0105 (192.168.3.11)"
sap 1/2/12:15.* create
description "vf=EWL:cn=TATA_COMM:tl=2C02490171:st=act:"
multi-service-site "TATA_VSNL_STRAT_A206_LAN5"
ingress
qos 11000
queue-override
queue 1 create
cbs default
mbs 391 kilobytes
rate 100000 cir 100000
exit
exit
exit
egress
qos 11000
queue-override
queue 1 create
cbs default
mbs 391 kilobytes
rate 100000 cir 100000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8139:103256 create
no shutdown
exit
no shutdown
exit
epipe 103742 customer 160 create
description "vf=EWL:cn=TATA_COM:tl=2C02410363:st=act:"
service-mtu 1588
service-name "EPIPE service-103742 DKTN08a-D0105 (192.168.3.11)"
sap 5/2/50:20.* create
description "vf=EWL:cn=TATA_COM:tl=2C02410338:st=act:"
multi-service-site "TATA_STRAT_LON_A206_LANA"
ingress
qos 11000
queue-override
queue 1 create
cbs default
mbs 32 kilobytes
rate 8000 cir 8000
exit
exit
exit
egress
qos 11000
queue-override
queue 1 create
cbs default
mbs 32 kilobytes
rate 8000 cir 8000
exit
exit
exit
accounting-policy 4
no shutdown
exit
spoke-sdp 8061:103742 create
no shutdown
exit
no shutdown
exit
epipe 55513386 customer 4 vc-switching create
description "vf=EAGG:cn=Bulldog:tl=VF"
service-mtu 1526
spoke-sdp 78:55513386 create
control-word
no shutdown
exit
spoke-sdp 8245:55513386 create
control-word
no shutdown
exit
no shutdown
exit
epipe 55517673 customer 4 create
description "vf=EAGG:cn=Bulldog:tl=2C01291821:st=act:no=NGA EPIPE#BAACTQ#VLAN 901"
service-mtu 1526
service-name "epipe service-64585 DKTN08a-D0105 (63.130.108.41)"
endpoint "SDP" create
revert-time infinite
exit
sap 2/2/3:901.* create
description "2_2_3,H0505824A,Bulldog,VLAN 901"
ingress
scheduler-policy "NGA-LLU-300M"
qos 20010
exit
egress
scheduler-policy "NGA-LLU-300M"
qos 20010
exit
no shutdown
exit
spoke-sdp 8243:55517673 endpoint "SDP" create
collect-stats
precedence 1
no shutdown
exit
spoke-sdp 8245:55517673 endpoint "SDP" create
collect-stats
precedence primary
no shutdown
exit
no shutdown
exit
"""<line_sep>parser=ttp(data=data template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{"service":{"epipe":{"103076":{"customer_id":"160" "description":"vf=EWL:cn=TATA_COM:tl=2C02495918:st=act:" "regular_sdp":{"8051":{"state":"enabled" "vc_id":"103076"}} "sap":{"1/2/12:20.qinq":{"description":"vf=EWL:cn=TATA_COM:tl=2C02495890:st=act:" "egress":{"sap_egress":"1)" "scheduler_policy":"none" } "ingress":{"sap_ingress":"1" "scheduler_policy":"none" } "mss_name":"TATA_VSNL_STRAT_A206_LAN10" "state":"enabled" }} "service_mtu":"1588" "service_name":"EPIPE service-103076 "<concat>"DKTN08a-D0105 "<concat>"(192.168.3.11)" "state":"enabled" } "103206":{"customer_id":"1904" "description":"vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA "<concat>"UK PLC Stepney Green E1 "<concat>"3DG'" "regular_sdp":{"8035":{"state":"enabled" "vc_id":"103206"}} "sap":{"2/2/3:401.100":{"description":"vf=1273:cn=skanska:tl=3C02407455:st=act:no='SKANSKA "<concat>"UK "<concat>"PLC "<concat>"Stepney "<concat>"Green "<concat>"E1 "<concat>"3DG'" "egress":{"sap_egress":"11010" "scheduler_policy":"none" } "ingress":{"sap_ingress":"11010" "scheduler_policy":"none" } "mss_name":"SKANSKA_E13DG_A825_LAN1" "state":"enabled" }} "service_mtu":"1988" "service_name":"EPIPE service-103206 "<concat>"DKTN08a-D0105 "<concat>"(192.168.3.11)" "state":"enabled" } "103256":{"customer_id":"160" "description":"vf=EWL:cn=TATA_COMM:tl=2C02490189:st=act:" "regular_sdp":{"8139":{"state":"enabled" "vc_id":"103256"}} "sap":{"1/2/12:15.qinq":{"description":"vf=EWL:cn=TATA_COMM:tl=2C02490171:st=act:" "egress":{"sap_egress":"11000" "scheduler_policy":"none" } "ingress":{"sap_ingress":"11000" "scheduler_policy":"none" } "mss_name":"TATA_VSNL_STRAT_A206_LAN5" "state":"enabled" }} "service_mtu":"1988" "service_name":"EPIPE service-103256 "<concat>"DKTN08a-D0105 "<concat>"(192.168.3.11)" "state":"enabled" } "103742":{"customer_id":"160" "description":"vf=EWL:cn=TATA_COM:tl=2C02410363:st=act:" "regular_sdp":{"8061":{"state":"enabled" "vc_id":"103742"}} "sap":{"5/2/50:20.qinq":{"description":"vf=EWL:cn=TATA_COM:tl=2C02410338:st=act:" "egress":{"sap_egress":"11000" "scheduler_policy":"none" } "ingress":{"sap_ingress":"11000" "scheduler_policy":"none" } "mss_name":"TATA_STRAT_LON_A206_LANA" "state":"enabled" }} "service_mtu":"1588" "service_name":"EPIPE service-103742 "<concat>"DKTN08a-D0105 "<concat>"(192.168.3.11)" "state":"enabled" } "55517673":{"customer_id":"4" "description":"vf=EAGG:cn=Bulldog:tl=2C01291821:st=act:no=NGA "<concat>"EPIPE#BAACTQ#VLAN 901" "endpoint":{"endpoint":'"SDP"' "revert_time":"infinite" } "pwr_sdp":{"8243":{"endpoint":'"SDP"' "precedence":"1" "state":"enabled" "vc_id":"55517673" } "8245":{"endpoint":'"SDP"' "precedence":"primary" "state":"enabled" "vc_id":"55517673" } } "sap":{"2/2/3:901.qinq":{"description":"2_2_3,H0505824A,Bulldog,VLAN "<concat>"901" "egress":{"sap_egress":"20010" "scheduler_policy":'"NGA-LLU-300M"' } "ingress":{"sap_ingress":"20010" "scheduler_policy":'"NGA-LLU-300M"' } "mss_name":"none" "state":"enabled" }} "service_mtu":"1526" "service_name":"epipe service-64585 "<concat>"DKTN08a-D0105 "<concat>"(192.168.3.11)" "state":"enabled" } }}}]]<block_end># test_github_issue_37_cleaned_data_template()
<def_stmt>test_github_issue_42 <block_start>data="""
vrf xyz
address-family ipv4 unicast
import route-target
65000:3507
65000:3511
65000:5453
65000:5535
!
export route-target
65000:5453
65000:5535
!
!
!
"""<line_sep>template="""
<group name="vrfs">
vrf {{name}}
<group name="route-targets">
import route-target {{ _start_ }}
{{ import | to_list | joinmatches }}
</group>
!
<group name="route-targets">
export route-target {{ _start_ }}
{{ export | to_list | joinmatches }}
</group>
</group>
"""<line_sep>parser=ttp(data=data template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{"vrfs":{"name":"xyz" "route-targets":[{"import":["65000:3507" "65000:3511" "65000:5453" "65000:5535" ]} {"export":["65000:5453" "65000:5535"]} ] }}]]<block_end># test_github_issue_42()
<def_stmt>test_github_issue_42_answer <block_start>data="""
vrf xyz
address-family ipv4 unicast
import route-target
65000:3507
65000:3511
65000:5453
65000:5535
!
export route-target
65000:5453
65000:5535
!
!
!
"""<line_sep>template="""
<group name="vrfs">
vrf {{name}}
<group name="import_rts">
import route-target {{ _start_ }}
{{ import_rt | _start_ }}
</group>
!
<group name="export_rts">
export route-target {{ _start_ }}
{{ export_rt | _start_ }}
</group>
</group>
"""<line_sep>parser=ttp(data=data template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{"vrfs":{"export_rts":[{"export_rt":"65000:5453"} {"export_rt":"65000:5535"} ] "import_rts":[{"import_rt":"65000:3507"} {"import_rt":"65000:3511"} {"import_rt":"65000:5453"} {"import_rt":"65000:5535"} ] "name":"xyz" }}]]<block_end># test_github_issue_42_answer()
<def_stmt>test_issue_45 <block_start>data="""
vrf2 {
forwarding-options {
dhcp-relay {
server-group {
IN_MEDIA_SIGNALING {
10.154.6.147;
}
DHCP-NGN-SIG {
10.154.6.147;
}
}
group group2 {
active-server-group IN_MEDIA_SIGNALING;
overrides {
trust-option-82;
}
}
group NGN-SIG {
active-server-group DHCP-NGN-SIG;
overrides {
trust-option-82;
}
}
}
}
}
"""<line_sep>template="""
<group name="vrfs*">
{{ name | _start_ }} {
<group name="forwarding_options">
forwarding-options { {{ _start_ }}
<group name="dhcp_relay">
dhcp-relay { {{ _start_ }}
<group name="server_group">
server-group { {{ _start_ }}
<group name="dhcp*">
{{ server_group_name1 | _start_ }} {
<group name="helper_addresses*">
{{ helper_address | IP }};
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
<group name="groups*">
group {{ group_name | _start_ }} {
active-server-group {{server_group_name2}};
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
"""<line_sep>parser=ttp(data=data template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep>pprint.pprint(res)<line_sep># assert res == [
# [
# {
# "vrfs": [
# {
# "forwarding_options": {
# "dhcp_relay": {
# "groups": [
# {
# "group_name": "group2",
# "server_group_name2": "IN_MEDIA_SIGNALING",
# },
# {
# "group_name": "NGN-SIG",
# "server_group_name2": "DHCP-NGN-SIG",
# },
# ],
# "server_group": {
# "dhcp": [
# {
# "helper_addresses": [
# {"helper_address": "10.154.6.147"}
# ],
# "server_group_name1": "IN_MEDIA_SIGNALING",
# },
# {
# "helper_addresses": [
# {"helper_address": "10.154.6.147"}
# ],
# "server_group_name1": "DHCP-NGN-SIG",
# },
# {"server_group_name1": "overrides"},
# {"server_group_name1": "overrides"},
# ]
# },
# }
# },
# "name": "vrf2",
# }
# ]
# }
# ]
# ]
# was able to fix the issue by introducing ended_groups tracking in results
# processing while was trying to fix issue 57
<assert_stmt>res<eq>[[{"vrfs":[{"forwarding_options":{"dhcp_relay":{"groups":[{"group_name":"group2" "server_group_name2":"IN_MEDIA_SIGNALING" } {"group_name":"NGN-SIG" "server_group_name2":"DHCP-NGN-SIG" } ] "server_group":{"dhcp":[{"helper_addresses":[{"helper_address":"10.154.6.147"}] "server_group_name1":"IN_MEDIA_SIGNALING" } {"helper_addresses":[{"helper_address":"10.154.6.147"}] "server_group_name1":"DHCP-NGN-SIG" } ]} }} "name":"vrf2" }]}]]<block_end># test_issue_45()
<def_stmt>test_issue_45_1 <block_start>data="""
vrf2 {
forwarding-options {
dhcp-relay {
server-group {
IN_MEDIA_SIGNALING {
10.154.6.147;
}
group NGN-SIG {
active-server-group DHCP-NGN-SIG;
overrides {
trust-option-82;
}
}
}
}
}
"""<line_sep>template="""
<group name="vrfs*">
{{ name | _start_ }} {
<group name="forwarding_options">
forwarding-options { {{ _start_ }}
<group name="dhcp_relay">
dhcp-relay { {{ _start_ }}
<group name="server_group">
server-group { {{ _start_ }}
<group name="dhcp*">
{{ server_group_name | _start_ }} {
</group>
</group>
<group name="groups*">
group {{ group_name | _start_ }} {
</group>
</group>
</group>
</group>
"""<line_sep>parser=ttp(data=data template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{"vrfs":[{"forwarding_options":{"dhcp_relay":{"groups":[{"group_name":"NGN-SIG"}] "server_group":{"dhcp":[{"server_group_name":"IN_MEDIA_SIGNALING"} {"server_group_name":"overrides"} ]} }} "name":"vrf2" }]}]]<block_end># test_issue_45_1()
<def_stmt>test_issue_45_filtering_fix <block_start>data="""
vrf2 {
forwarding-options {
dhcp-relay {
server-group {
IN_MEDIA_SIGNALING {
10.154.6.147;
}
DHCP-NGN-SIG {
10.154.6.147;
}
}
group group2 {
active-server-group IN_MEDIA_SIGNALING;
overrides {
trust-option-82;
}
}
group NGN-SIG {
active-server-group DHCP-NGN-SIG;
overrides {
trust-option-82;
}
}
}
}
}
"""<line_sep>template="""
<group name="vrfs*">
{{ name | _start_ }} {
<group name="forwarding_options">
forwarding-options { {{ _start_ }}
<group name="dhcp_relay">
dhcp-relay { {{ _start_ }}
<group name="server_group">
server-group { {{ _start_ }}
<group name="dhcp*">
{{ server_group_name1 | _start_ | exclude("overrides") }} {
<group name="helper_addresses*">
{{ helper_address | IP }};
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
<group name="groups*">
group {{ group_name | _start_ }} {
active-server-group {{server_group_name2}};
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
"""<line_sep>parser=ttp(data=data template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{"vrfs":[{"forwarding_options":{"dhcp_relay":{"groups":[{"group_name":"group2" "server_group_name2":"IN_MEDIA_SIGNALING" } {"group_name":"NGN-SIG" "server_group_name2":"DHCP-NGN-SIG" } ] "server_group":{"dhcp":[{"helper_addresses":[{"helper_address":"10.154.6.147"}] "server_group_name1":"IN_MEDIA_SIGNALING" } {"helper_addresses":[{"helper_address":"10.154.6.147"}] "server_group_name1":"DHCP-NGN-SIG" } ]} }} "name":"vrf2" }]}]]<block_end># test_issue_45_filtering_fix()
<def_stmt>test_issue_47_answer <block_start>data="""
Some text which indicates that below block should be included in results ABC
interface Loopback0
description Router-id-loopback
ip address 192.168.0.113/24
!
Some text which indicates that below block should be included in results DEF
interface Loopback2
description Router-id-loopback 2
ip address 192.168.0.114/24
!
Some text which indicates that below block should NOT be included in results
interface Vlan778
description CPE_Acces_Vlan
ip address fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/124
ip vrf CPE1
!
Some text which indicates that below block should be included in results GKL
interface Loopback3
description Router-id-loopback 3
ip address 192.168.0.115/24
!
"""<line_sep>template="""
Some text which indicates that below block should be included in results ABC {{ _start_ }}
Some text which indicates that below block should be included in results DEF {{ _start_ }}
Some text which indicates that below block should be included in results GKL {{ _start_ }}
interface {{ interface }}
ip address {{ ip }}/{{ mask }}
description {{ description | re(".+") }}
ip vrf {{ vrf }}
! {{ _end_ }}
"""<line_sep>parser=ttp(data=data template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res, width=150)
<assert_stmt>res<eq>[[[{"description":"Router-id-loopback" "interface":"Loopback0" "ip":"192.168.0.113" "mask":"24" } {"description":"Router-id-loopback 2" "interface":"Loopback2" "ip":"192.168.0.114" "mask":"24" } {"description":"Router-id-loopback 3" "interface":"Loopback3" "ip":"192.168.0.115" "mask":"24" } ]]]<block_end># test_issue_47_answer()
<def_stmt>test_issue_48_answer <block_start>data="""
ECON*3400 The Economics of Personnel Management U (3-0) [0.50]
In this course, we examine the economics of personnel management in organizations.
Using mainstream microeconomic and behavioural economic theory, we will consider
such issues as recruitment, promotion, financial and non-financial incentives,
compensation, job performance, performance evaluation, and investment in personnel.
The interplay between theoretical models and empirical evidence will be emphasized in
considering different approaches to the management of personnel.
Prerequisite(s): ECON*2310 or ECON*2200
Department(s): Department of Economics and Finance
ECON*4400 The Economics of Personnel Management U (7-1) [0.90]
In this course, we examine the economics of personnel management in organizations.
Using mainstream microeconomic and behavioural economic theory, we will consider
such issues as recruitment, promotion, financial and non-financial incentives,
compensation, job performance, performance evaluation, and investment in personnel.
Prerequisite(s): ECON*2310
Department(s): Department of Economics
"""<line_sep>template="""
<vars>
descr_chain = [
"PHRASE",
"exclude('Prerequisite(s)')",
"exclude('Department(s)')",
"joinmatches"
]
</vars>
<group>
{{ course }}*{{ code }} {{ name | PHRASE }} {{ semester }} ({{ lecture_lab_time }}) [{{ weight }}]
{{ description | chain(descr_chain) }}
Prerequisite(s): {{ prereqs | ORPHRASE }}
Department(s): {{ department | ORPHRASE }}
</group>
"""<line_sep>parser=ttp(data=data template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res, width=150)
<assert_stmt>res<eq>[[[{"code":"3400" "course":"ECON" "department":"Department of Economics and Finance" "description":"In this course, we examine the economics of personnel management in organizations.\n"<concat>"Using mainstream microeconomic and behavioural economic theory, we will consider\n"<concat>"such issues as recruitment, promotion, financial and non-financial incentives,\n"<concat>"compensation, job performance, performance evaluation, and investment in personnel.\n"<concat>"The interplay between theoretical models and empirical evidence will be emphasized in\n"<concat>"considering different approaches to the management of personnel." "lecture_lab_time":"3-0" "name":"The Economics of Personnel Management" "prereqs":"ECON*2310 or ECON*2200" "semester":"U" "weight":"0.50" } {"code":"4400" "course":"ECON" "department":"Department of Economics" "description":"In this course, we examine the economics of personnel management in organizations.\n"<concat>"Using mainstream microeconomic and behavioural economic theory, we will consider\n"<concat>"such issues as recruitment, promotion, financial and non-financial incentives,\n"<concat>"compensation, job performance, performance evaluation, and investment in personnel." "lecture_lab_time":"7-1" "name":"The Economics of Personnel Management" "prereqs":"ECON*2310" "semester":"U" "weight":"0.90" } ]]]<block_end># test_issue_48_answer()
<def_stmt>test_issue_48_answer_more <block_start>data="""
IBIO*4521 Thesis in Integrative Biology F (0-12) [1.00]
This course is the first part of the two-semester course IBIO*4521/2. This course is
a two-semester (F,W) undergraduate project in which students conduct a comprehensive,
independent research project in organismal biology under the supervision of a faculty
member in the Department of Integrative Biology. Projects involve a thorough literature
review, a research proposal, original research communicated in oral and poster
presentations, and in a written, publication quality document. This two-semester course
offers students the opportunity to pursue research questions and experimental designs
that cannot be completed in the single semester research courses. Students must make
arrangements with both a faculty supervisor and the course coordinator at least one
semester in advance. A departmental registration form must be obtained from the course
coordinator and submitted no later than the second class day of the fall semester. This is
a twosemester course offered over consecutive semesters F-W. When you select this
course, you must select IBIO*4521 in the Fall semester and IBIO*4522 in the Winter
semester.A grade will not be assigned to IBIO*4521 until IBIO*4522 has been completed.
Prerequisite(s): 12.00 credits
Restriction(s): Normally a minimum cumulative average of 70%. Permission of course
coordinator.
Department(s): Department of Integrative Biology
IBIO*4533 Thesis in Integrative Biology F (0-14) [2.00]
This course is the first part of the two-semester course IBIO*4521/2. This course is
a two-semester (F,W) undergraduate project in which students conduct a comprehensive,
independent research project in organismal biology under the supervision of a faculty
member in the Department of Integrative Biology.
Restriction(s): Normally a minimum cumulative average of 80%. Permission of course
coordinator. Normally a minimum cumulative average of 90%. Permission of course
coordinator.
Department(s): Department of Integrative Biology
"""<line_sep>template="""
<vars>
chain_1 = [
"ORPHRASE",
"exclude('Prerequisite(s)')",
"exclude('Department(s)')",
"exclude('Restriction(s)')",
"joinmatches"
]
</vars>
<group>
{{ course }}*{{ code }} {{ name | PHRASE }} {{ semester }} ({{ lecture_lab_time }}) [{{ weight }}]
{{ description | chain(chain_1) }}
Prerequisite(s): {{ prereqs | ORPHRASE }}
Department(s): {{ department | ORPHRASE }}
<group name="_">
Restriction(s): {{ restrictions | PHRASE | joinmatches }}
{{ restrictions | chain(chain_1) }}
</group>
</group>
"""<line_sep>parser=ttp(data=data template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep>pprint.pprint(res width=150)<assert_stmt>res<eq>[[[{"code":"4521" "course":"IBIO" "department":"Department of Integrative Biology" "description":"This course is the first part of the two-semester course IBIO*4521/2. This course is\n"<concat>"a two-semester (F,W) undergraduate project in which students conduct a comprehensive,\n"<concat>"independent research project in organismal biology under the supervision of a faculty\n"<concat>"member in the Department of Integrative Biology. Projects involve a thorough literature\n"<concat>"review, a research proposal, original research communicated in oral and poster\n"<concat>"presentations, and in a written, publication quality document. This two-semester course\n"<concat>"offers students the opportunity to pursue research questions and experimental designs\n"<concat>"that cannot be completed in the single semester research courses. Students must make\n"<concat>"arrangements with both a faculty supervisor and the course coordinator at least one\n"<concat>"semester in advance. A departmental registration form must be obtained from the course\n"<concat>"coordinator and submitted no later than the second class day of the fall semester. This is\n"<concat>"a twosemester course offered over consecutive semesters F-W. When you select this\n"<concat>"course, you must select IBIO*4521 in the Fall semester and IBIO*4522 in the Winter\n"<concat>"semester.A grade will not be assigned to IBIO*4521 until IBIO*4522 has been completed." "lecture_lab_time":"0-12" "name":"Thesis in Integrative Biology" "prereqs":"12.00 credits" "restrictions":"Normally a minimum cumulative average of 70%. Permission of course\ncoordinator." "semester":"F" "weight":"1.00" } {"code":"4533" "course":"IBIO" "department":"Department of Integrative Biology" "description":"This course is the first part of the two-semester course IBIO*4521/2. This course is\n"<concat>"a two-semester (F,W) undergraduate project in which students conduct a comprehensive,\n"<concat>"independent research project in organismal biology under the supervision of a faculty\n"<concat>"member in the Department of Integrative Biology." "lecture_lab_time":"0-14" "name":"Thesis in Integrative Biology" "restrictions":"Normally a minimum cumulative average of 80%. Permission of course\n"<concat>"coordinator. Normally a minimum cumulative average of 90%. Permission of course\n"<concat>"coordinator." "semester":"F" "weight":"2.00" } ]]]<block_end># test_issue_48_answer_more()
<def_stmt>test_slack_channel_answer_for_Noif <block_start>data="""
# not disabled and no comment
/ip address add address=10.4.1.245 interface=lo0 network=10.4.1.245
/ip address add address=10.4.1.246 interface=lo1 network=10.4.1.246
# not disabled and comment with no quotes
/ip address add address=10.9.48.241/29 comment=SITEMON interface=ether2 network=10.9.48.240
/ip address add address=10.9.48.233/29 comment=Camera interface=vlan205@bond1 network=10.9.48.232
/ip address add address=10.9.49.1/24 comment=SM-Management interface=vlan200@bond1 network=10.9.49.0
# not disabled and comment with quotes
/ip address add address=10.4.1.130/30 comment="to core01" interface=vlan996@bond4 network=10.4.1.128
/ip address add address=10.4.250.28/29 comment="BH 01" interface=vlan210@bond1 network=10.4.250.24
/ip address add address=10.9.50.13/30 comment="Cust: site01-PE" interface=vlan11@bond1 network=10.9.50.12
# disabled no comment
/ip address add address=10.0.0.2/30 disabled=yes interface=bridge:customer99 network=10.0.0.0
# disabled with comment
/ip address add address=169.254.1.100/24 comment=Cambium disabled=yes interface=vlan200@bond1 network=169.254.1.0
# disabled with comment with quotes
/ip address add address=10.4.248.20/29 comment="Backhaul to AGR (Test Segment)" disabled=yes interface=vlan209@bond1 network=10.4.248.16
"""<line_sep>template="""
<vars>
default_values = {
"comment": "",
"disabled": False
}
</vars>
<group default="default_values">
## not disabled and no comment
/ip address add address={{ ip | _start_ }} interface={{ interface }} network={{ network }}
## not disabled and comment with/without quotes
/ip address add address={{ ip | _start_ }}/{{ mask }} comment={{ comment | ORPHRASE | exclude("disabled=") | strip('"')}} interface={{ interface }} network={{ network }}
## disabled no comment
/ip address add address={{ ip | _start_ }}/{{ mask }} disabled={{ disabled }} interface={{ interface }} network={{ network }}
## disabled with comment with/without quotes
/ip address add address={{ ip | _start_ }}/{{ mask }} comment={{ comment | ORPHRASE | exclude("disabled=") | strip('"') }} disabled={{ disabled }} interface={{ interface }} network={{ network }}
</group>
"""<line_sep>parser=ttp(data=data template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result(structure="flat_list")<line_sep># pprint.pprint(res, width=200)
<assert_stmt>res<eq>[{"comment":"" "disabled":<false> "interface":"lo0" "ip":"10.4.1.245" "network":"10.4.1.245" } {"comment":"" "disabled":<false> "interface":"lo1" "ip":"10.4.1.246" "network":"10.4.1.246" } {"comment":"SITEMON" "disabled":<false> "interface":"ether2" "ip":"10.9.48.241" "mask":"29" "network":"10.9.48.240" } {"comment":"Camera" "disabled":<false> "interface":"vlan205@bond1" "ip":"10.9.48.233" "mask":"29" "network":"10.9.48.232" } {"comment":"SM-Management" "disabled":<false> "interface":"vlan200@bond1" "ip":"10.9.49.1" "mask":"24" "network":"10.9.49.0" } {"comment":"to core01" "disabled":<false> "interface":"vlan996@bond4" "ip":"10.4.1.130" "mask":"30" "network":"10.4.1.128" } {"comment":"BH 01" "disabled":<false> "interface":"vlan210@bond1" "ip":"10.4.250.28" "mask":"29" "network":"10.4.250.24" } {"comment":"Cust: site01-PE" "disabled":<false> "interface":"vlan11@bond1" "ip":"10.9.50.13" "mask":"30" "network":"10.9.50.12" } {"comment":"" "disabled":"yes" "interface":"bridge:customer99" "ip":"10.0.0.2" "mask":"30" "network":"10.0.0.0" } {"comment":"Cambium" "disabled":"yes" "interface":"vlan200@bond1" "ip":"169.254.1.100" "mask":"24" "network":"169.254.1.0" } {"comment":"Backhaul to AGR (Test Segment)" "disabled":"yes" "interface":"vlan209@bond1" "ip":"10.4.248.20" "mask":"29" "network":"10.4.248.16" } ]<block_end># test_slack_channel_answer_for_Noif()
<def_stmt>test_slack_answer_2 <block_start>data_to_parse="""
port 1/1/1
description "port 1 description"
ethernet
mode hybrid
encap-type dot1q
crc-monitor
sd-threshold 5 multiplier 5
sf-threshold 3 multiplier 5
window-size 60
exit
network
queue-policy "ncq-only"
accounting-policy 12
collect-stats
egress
queue-group "qos-policy-for-router1" instance 1 create
accounting-policy 1
collect-stats
agg-rate
rate 50000
exit
exit
exit
exit
access
egress
queue-group "policer-output-queues" instance 1 create
accounting-policy 1
collect-stats
exit
exit
exit
lldp
dest-mac nearest-bridge
admin-status tx-rx
notification
tx-tlvs port-desc sys-name sys-desc sys-cap
tx-mgmt-address system
exit
exit
down-on-internal-error
exit
no shutdown
exit
port 1/1/2
description "another port to a another router"
ethernet
mode hybrid
encap-type dot1q
egress-scheduler-policy "qos-port-scheduler"
crc-monitor
sd-threshold 5 multiplier 5
sf-threshold 3 multiplier 5
window-size 60
exit
access
egress
queue-group "policer-output-queues" instance 1 create
accounting-policy 1
collect-stats
exit
exit
exit
down-on-internal-error
exit
no shutdown
exit
port 1/1/3
description "port 3 to some third router"
ethernet
mode access
encap-type dot1q
mtu 2000
egress-scheduler-policy "strict-scheduler"
network
queue-policy "ncq-only"
accounting-policy 12
collect-stats
egress
queue-group "some-shaping-policy" instance 1 create
accounting-policy 1
collect-stats
agg-rate
rate 50000
exit
exit
queue-group "another-shaping-policy" instance 1 create
accounting-policy 1
collect-stats
agg-rate
rate 50000
exit
exit
queue-group "this-shaper-is-cool" instance 1 create
agg-rate
rate 1000000
exit
exit
exit
exit
exit
no shutdown
exit
"""<line_sep>template="""
<group name="system.ports">
port {{ id }}
shutdown {{ admin_enabled | set(false) }}
description "{{ description | ORPHRASE | strip('"') }}"
<group name="ethernet">
ethernet {{ _start_ }}
mode {{ mode }}
encap-type {{ encap_type }}
mtu {{ mtu | DIGIT }}
egress-scheduler-policy {{ egress_sched_policy | strip('"') }}
loopback internal persistent {{ loop_internal | set(true) }}
<group name="network">
network {{ _start_ }}
queue-policy {{ queue_policy | ORPHRASE | strip('"') }}
accounting-policy {{ accounting_policy | DIGIT }}
collect-stats {{ collect_stats | set(true) }}
<group name="egress">
egress {{ _start_ }}
<group name="queuegroups*">
queue-group {{ name | strip('"') }} instance 1 create
rate {{ agg_rate | DIGIT }}
exit {{_end_}}
</group>
## this "exit {{ _end_ }}" had wrong indentation level, leading to
## group name="egress" finishing too early
exit {{_end_}}
</group>
exit {{_end_}}
</group>
lldp {{ lldp_enabled | set(true) }}
exit {{_end_}}
</group>
no shutdown {{admin_enabled | set(true)}}
exit {{_end_}}
</group>
"""<line_sep>parser=ttp(data=data_to_parse template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep>pprint.pprint(res width=150)<assert_stmt>res<eq>[[{"system":{"ports":[{"admin_enabled":<true> "description":"port 1 description" "ethernet":{"encap_type":"dot1q" "lldp_enabled":<true> "mode":"hybrid" "network":{"accounting_policy":"12" "collect_stats":<true> "egress":{"queuegroups":[{"agg_rate":"50000" "name":"qos-policy-for-router1" }]} "queue_policy":"ncq-only" } } "id":"1/1/1" } {"admin_enabled":<true> "description":"another port to a another router" "ethernet":{"egress_sched_policy":"qos-port-scheduler" "encap_type":"dot1q" "mode":"hybrid" } "id":"1/1/2" } {"admin_enabled":<true> "description":"port 3 to some third router" "ethernet":{"egress_sched_policy":"strict-scheduler" "encap_type":"dot1q" "mode":"access" "mtu":"2000" "network":{"accounting_policy":"12" "collect_stats":<true> "egress":{"queuegroups":[{"agg_rate":"50000" "name":"some-shaping-policy" } {"agg_rate":"50000" "name":"another-shaping-policy" } {"agg_rate":"1000000" "name":"this-shaper-is-cool" } ]} "queue_policy":"ncq-only" } } "id":"1/1/3" } ]}}]]<block_end># test_slack_answer_2()
<def_stmt>test_slack_answer_3 <block_start>"""
Problem was that interfaces were matched by regexes from both ospf and ospfv3
groups, decision logic was not able to properly work out to which group result
should belong, changed behavior to check if match is a child of current record
group and use it if so. Also had to change how group id encoded from string to
tuple of two elements ("group path", "group index",)
Here is some debug output until problem was fixed:
self.record["GRP_ID"]: service.vprns*.{{id}}**.ospf3**::1
re_["GROUP"].group_id: service.vprns*.{{id}}**.ospf**.interfaces*::0
re_idex: 0
self.record["GRP_ID"]: service.vprns*.{{id}}**.ospf3**::1
re_["GROUP"].group_id: service.vprns*.{{id}}**.ospf3**.interfaces*::0
re_idex: 1
# problem was happening because logic was not able to decide that need to use this match
self.record["GRP_ID"]: service.vprns*.{{id}}**.ospf**::0
re_["GROUP"].group_id: service.vprns*.{{id}}**.ospf**.interfaces*::0
re_idex: 0
# problem was happening because logic was picking up this match
self.record["GRP_ID"]: service.vprns*.{{id}}**.ospf**::0
re_["GROUP"].group_id: service.vprns*.{{id}}**.ospf3**.interfaces*::0
re_idex: 1
Wrong results:
[[{'service': {'vprns': [{'4': {'name': 'ospf_version3_vprn',
'ospf': {'area': '0.0.0.0', 'interfaces': [{'name': 'interface-one'}]},
'ospf3': {'area': '0.0.0.0', 'interfaces': [{'name': 'interface-two'}]}},
'5': {'name': 'vprn5', 'ospf': {'area': '0.0.0.0'},
'ospf3': {'interfaces': [{'name': 'interface-three'}]}}}]}}]]
"""<line_sep>data="""
service
vprn 4 name "ospf_version3_vprn" customer 40 create
ospf
area 0.0.0.0
interface "interface-one"
ospf3 0
area 0.0.0.0
interface "interface-two"
vprn 5 name "vprn5" customer 50 create
ospf
area 0.0.0.0
interface "interface-three"
"""<line_sep>template="""
<group name="service.vprns*.{{id}}**">
vprn {{ id }} name {{ name | ORPHRASE | strip('"') }} customer {{ ignore }} create
<group name="ospf**">
ospf {{ _start_ }}
area {{ area }}
<group name="interfaces*">
interface {{ name | ORPHRASE | strip('"') }}
</group>
</group>
<group name="ospf3**">
ospf3 0 {{ _start_ }}
area {{ area }}
<group name="interfaces*">
interface {{ name | ORPHRASE | strip('"') }}
</group>
</group>
</group>
"""<line_sep>parser=ttp(data template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res, width=100)
<assert_stmt>res<eq>[[{"service":{"vprns":[{"4":{"name":"ospf_version3_vprn" "ospf":{"area":"0.0.0.0" "interfaces":[{"name":"interface-one"}] } "ospf3":{"area":"0.0.0.0" "interfaces":[{"name":"interface-two"}] } } "5":{"name":"vprn5" "ospf":{"area":"0.0.0.0" "interfaces":[{"name":"interface-three"}] } } }]}}]]<block_end># test_slack_answer_3()
<def_stmt>test_slack_answer_3_full <block_start>data="""
service
vprn 1 name "vprn1" customer 10 create
interface "loopback" create
exit
interface "interface-one" create
exit
interface "interface-two" create
exit
interface "bgp-interface" create
exit
exit
vprn 2 name "vprn2" customer 20 create
interface "loopback" create
exit
interface "interface-two" create
exit
interface "bgp-interface" create
exit
exit
vprn 3 name "vprn3" customer 30 create
interface "loopback" create
exit
interface "interface-two" create
exit
exit
vprn 4 name "ospf_version3_vprn" customer 40 create
interface "loopback" create
exit
interface "interface-two" create
exit
exit
vprn 5 name "vprn5" customer 50 create
interface "loopback" create
exit
interface "interface-two" create
exit
interface "bgp-interface" create
exit
exit
vprn 1 name "vprn1" customer 10 create
interface "loopback" create
address 10.10.10.1/32
loopback
exit
interface "interface-one" create
address 10.10.10.10/30
sap 1/1/1:10 create
exit
exit
interface "interface-two" create
address 10.10.10.100/31
sap lag-5:80 create
exit
exit
interface "bgp-interface" create
address 10.10.10.200/31
sap lag-4:100 create
exit
exit
ospf
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
no shutdown
exit
vprn 2 name "vprn2" customer 20 create
interface "interface-two" create
address 10.11.11.10/31
sap lag-1:50 create
exit
exit
ospf
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
no shutdown
exit
vprn 3 name "vprn3" customer 30 create
interface "loopback" create
address 10.12.12.12/32
loopback
exit
interface "interface-two" create
address 10.12.12.100/31
sap lag-5:33 create
exit
exit
ospf
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
no shutdown
exit
vprn 4 name "ospf_version3_vprn" customer 40 create
interface "loopback" create
address 10.40.40.10/32
ipv6
address fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:ae46/128
exit
loopback
exit
interface "interface-two" create
address 10.40.40.100/31
ipv6
address fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:1111/64
exit
sap lag-5:800 create
exit
exit
ospf
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
ospf3 0
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
no shutdown
exit
vprn 5 name "vprn5" customer 50 create
interface "loopback" create
address 10.50.50.50/32
loopback
exit
interface "interface-two" create
address 10.50.50.100/31
sap lag-5:5 create
exit
exit
interface "bgp-interface" create
address 10.50.50.200/31
sap lag-1:602 create
exit
exit
bgp
group "eBGP"
peer-as 4444
neighbor 10.50.50.201
exit
exit
no shutdown
exit
ospf
area 0.0.0.0
interface "interface-two"
passive
no shutdown
exit
exit
no shutdown
exit
no shutdown
exit
exit
"""<line_sep>template="""
#-------------------------------------------------- {{ ignore }}
echo "Service Configuration" {{ ignore }}
#-------------------------------------------------- {{ ignore }}
service {{ ignore }}
<group name="service.vprns*.{{id}}**">
vprn {{ id }} name {{ name | ORPHRASE | strip('"') }} customer {{ ignore }} create
shutdown {{ admin_enabled | set("False") }}
description {{ description | ORPHRASE | strip('"') }}
vrf-import {{ import_policy | ORPHRASE | strip('"') }}
router-id {{ router_id }}
autonomous-system {{ local_as }}
route-distinguisher {{ loopback_ip }}:{{ vrf_routedist }}
vrf-target target:{{ ignore }}:{{ vrf_routetarget }}
vrf-target {{ vrf_export }} target:{{ ignore }}:{{ vrf_routetarget }}
<group name="interfaces*.{{name}}**">
interface {{ name | ORPHRASE | strip('"') }} create
shutdown {{ admin_enabled | set("False") }}
description {{ description | ORPHRASE | strip('"') }}
address {{ address | IP }}/{{ mask | DIGIT }}
ip-mtu {{ mtu }}
bfd {{ bfd_timers }} receive {{ ignore }} multiplier {{ bfd_interval }}
<group name="vrrp">
vrrp {{ instance }}
backup {{ backup }}
priority {{ priority }}
policy {{ policy }}
ping-reply {{ pingreply | set("True") }}
traceroute-reply {{ traceroute_reply | set("True") }}
init-delay {{ initdelay }}
message-interval {{ message_int_seconds }}
message-interval milliseconds {{ message_int_milliseconds }}
bfd-enable 1 interface {{ bfd_interface | ORPHRASE | strip('"')}} dst-ip {{ bfd_dst_ip }}
exit {{ _end_ }}
</group>
<group name="ipv6">
ipv6 {{ _start_ }}
address {{ address | IPV6 }}/{{ mask | DIGIT }}
address {{ address | _start_ | IPV6 }}/{{ mask | DIGIT }} dad-disable
link-local-address {{ linklocal_address | IPV6 }} dad-disable
<group name="vrrp">
vrrp {{ instance | _start_ }}
<group name="backup*">
backup {{ ip }}
</group>
priority {{ priority }}
policy {{ policy }}
ping-reply {{ pingreplay | set("True") }}
traceroute-reply {{ traceroute_reply | set("True") }}
init-delay {{ initdelay }}
message-interval milliseconds {{ message_int_milliseconds }}
exit {{ _end_ }}
</group>
exit {{ _end_ }}
</group>
<group name="vpls">
vpls {{ vpls_name | ORPHRASE | strip('"') | _start_ }}
exit {{ _end_ }}
</group>
<group name="sap**">
sap {{ port | _start_ }}:{{ vlan | DIGIT }} create
ingress {{ _exact_ }}
qos {{ qos_sap_ingress }}
<group name="_">
egress {{ _start_ }}
qos {{ qos_sap_egress }}
</group>
collect-stats {{ collect_stats | set("True") }}
accounting-policy {{ accounting_policy }}
exit {{ _end_}}
</group>
exit {{ _end_}}
</group>
<group name="staticroutes*">
static-route-entry {{ prefix | PREFIX | _start_ }}
black-hole {{ blackhole | set("True") }}
next-hop {{ nexthop | IP }}
shutdown {{ admin_enabled | set("False") }}
no shutdown {{ admin_enabled | set("True") }}
exit {{ _end_ }}
</group>
<group name="aggregates">
aggregate {{ agg_block | PREFIX | _start_ }} summary-only
</group>
<group name="router_advertisement">
router-advertisement {{ _start_ }}
interface {{ interface | ORPHRASE | strip('"') }}
use-virtual-mac {{ use_virtualmac | set("True") }}
no shutdown {{ admin_enabled | set("True") }}
exit {{ _end_ }}
</group>
<group name="bgp**">
bgp {{ _start_ }}
min-route-advertisement {{ min_route_advertisement | DIGIT }}
<group name="peergroups*">
group {{ name | ORPHRASE | strip('"') }}
family {{ family | ORPHRASE | split(" ") }}
type {{ peer_type | ORPHRASE }}
import {{ importpolicy | ORPHRASE | strip('"') }}
export {{ exportpolicy | ORPHRASE | strip('"') }}
peer-as {{ remote_as }}
bfd-enable {{ bfd_enabled | set("True") }}
<group name="neighbors*">
neighbor {{ address | IP | _start_ }}
neighbor {{ address | IPV6 | _start_ }}
shutdown {{ admin_enabled | set("False") }}
keepalive {{ keepalive }}
hold-time {{ holdtime }}
bfd-enable {{ bfd_enabled | set("True") }}
as-override {{ as_override | set("True") }}
exit {{ _end_ }}
</group>
exit {{ _end_ }}
</group>
no shutdown {{ admin_enabled | set("True") | _start_ }}
exit {{ _end_ }}
</group>
<group name="ospf**">
ospf {{ _start_ }}{{ _exact_ }}
area {{ area }}
<group name="interfaces*">
interface {{ name | ORPHRASE | strip('"') | _start_ }}
passive {{ passive | set("True") }}
exit {{ _end_ }}
</group>
no shutdown {{ admin_enabled | set("True") }}
exit {{ _end_ }}
</group>
<group name="ospf3**">
ospf3 0 {{ _start_ }}{{ _exact_ }}
area {{ area }}
<group name="interfaces*">
interface {{ name | ORPHRASE | strip('"') | _start_ }}
passive {{ passive | set("True") }}
exit {{ _end_ }}
</group>
no shutdown {{ admin_enabled | set("True") }}
exit {{ _end_ }}
</group>
no shutdown {{ admin_enabled | set("True") }}
exit {{ _end_ }}
</group>
"""<line_sep>parser=ttp(data template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep>pprint.pprint(res width=100)<assert_stmt>res<eq>[[{"service":{"vprns":[{"1":{"admin_enabled":"True" "interfaces":[{"bgp-interface":{"address":"10.10.10.200" "mask":"31" "sap":{"port":"lag-4" "vlan":"100"} } "interface-one":{"address":"10.10.10.10" "mask":"30" "sap":{"port":"1/1/1" "vlan":"10"} } "interface-two":{"address":"10.10.10.100" "mask":"31" "sap":{"port":"lag-5" "vlan":"80"} } "loopback":{"address":"10.10.10.1" "mask":"32" } }] "name":"vprn1" "ospf":{"admin_enabled":"True" "area":"0.0.0.0" "interfaces":[{"name":"interface-two" "passive":"True"}] } } "2":{"admin_enabled":"True" "interfaces":[{"bgp-interface":{} "interface-two":{"address":"10.11.11.10" "mask":"31" "sap":{"port":"lag-1" "vlan":"50"} } "loopback":{} }] "name":"vprn2" "ospf":{"admin_enabled":"True" "area":"0.0.0.0" "interfaces":[{"name":"interface-two" "passive":"True"}] } } "3":{"admin_enabled":"True" "interfaces":[{"interface-two":{"address":"10.12.12.100" "mask":"31" "sap":{"port":"lag-5" "vlan":"33"} } "loopback":{"address":"10.12.12.12" "mask":"32" } }] "name":"vprn3" "ospf":{"admin_enabled":"True" "area":"0.0.0.0" "interfaces":[{"name":"interface-two" "passive":"True"}] } } "4":{"admin_enabled":"True" "interfaces":[{"interface-two":{"address":"10.40.40.100" "ipv6":{"address":"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:1111" "mask":"64" } "mask":"31" "sap":{"port":"lag-5" "vlan":"800"} } "loopback":{"address":"10.40.40.10" "ipv6":{"address":"fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:ae46" "mask":"128" } "mask":"32" } }] "name":"ospf_version3_vprn" "ospf":{"admin_enabled":"True" "area":"0.0.0.0" "interfaces":[{"name":"interface-two" "passive":"True"}] } "ospf3":{"admin_enabled":"True" "area":"0.0.0.0" "interfaces":[{"name":"interface-two" "passive":"True"}] } } "5":{"admin_enabled":"True" "bgp":{"admin_enabled":"True" "peergroups":[{"name":"eBGP" "neighbors":[{"address":"10.50.50.201"}] "remote_as":"4444" }] } "interfaces":[{"bgp-interface":{"address":"10.50.50.200" "mask":"31" "sap":{"port":"lag-1" "vlan":"602"} } "interface-two":{"address":"10.50.50.100" "mask":"31" "sap":{"port":"lag-5" "vlan":"5"} } "loopback":{"address":"10.50.50.50" "mask":"32" } }] "name":"vprn5" "ospf":{"area":"0.0.0.0" "interfaces":[{"name":"interface-two" "passive":"True"}] } } }]}}]]<block_end># test_slack_answer_3_full()
<def_stmt>test_issue_45_for_junos_cfg <block_start>data="""
system {
host-name LAB-MX-1;
time-zone some/time;
default-address-selection;
no-redirects;
no-ping-record-route;
no-ping-time-stamp;
tacplus-server {
1.1.1.1 {
port 49;
secret "<SECRET_HASH>"; ## SECRET-DATA
source-address 172.16.17.32;
}
172.16.31.10 {
port 49;
secret "<SECRET_HASH>"; ## SECRET-DATA
source-address 172.16.17.32;
}
172.16.17.32 {
port 49;
secret "<SECRET_HASH>"; ## SECRET-DATA
source-address 172.16.17.32;
}
}
services {
ssh {
root-login deny;
no-tcp-forwarding;
protocol-version v2;
max-sessions-per-connection 32;
client-alive-count-max 3;
client-alive-interval 10;
connection-limit 10;
rate-limit 5;
}
netconf {
ssh {
connection-limit 10;
rate-limit 4;
}
}
}
}
"""<line_sep>template="""
<group name="system_level">
system { {{ _start_ }}
host-name {{ HOSTNAME }};
time-zone {{ TZ }};
default-address-selection; {{ default_address_selection | set(True) }}
no-redirects; {{ no_redirects | set(True) }}
no-ping-record-route; {{ no_ping_record_route | set(True) }}
no-ping-time-stamp; {{ no_ping_time_stamp | set(True) }}
<group name="services">
services { {{ _start_ }}
<group name="{{ service }}">
{{ service }} {
http; {{ http | set(true) }}
https; {{ https | set(true) }}
no-tcp-forwarding; {{ no-tcp-fwding | set(true) }}
protocol-version {{ ssh-proto }};
connection-limit {{ connection-limit | DIGIT }};
rate-limit {{rate-limit | DIGIT }};
root-login deny; {{ root-login | set(false) }}
max-sessions-per-connection {{ max-sessions | DIGIT }};
client-alive-count-max {{ client-alive-count-max | DIGIT }};
client-alive-interval {{ client-alive-interval | DIGIT }};
<group name="ssh">
ssh; {{ ssh | set(true) }}
</group>
<group name="ssh">
ssh { {{ _start_ }}
connection-limit {{ connection-limit | DIGIT }};
rate-limit {{ rate-limit | DIGIT }};
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
} {{ _end_ }}
</group>
<group name="internet-options">
internet-options { {{ _start_ }}
icmpv4-rate-limit packet-rate {{ packet-rate| DIGIT }};
icmpv6-rate-limit packet-rate {{ packet-rate| DIGIT }};
no-source-quench; {{ no-source-quench | set(true) }}
tcp-drop-synfin-set; {{ tcp-drop-synfin-set | set(true) }}
no-tcp-reset {{ no-tcp-reset }};
} {{ _end_ }}
</group>
authentication-order [{{ authentication-order }}];
<group name="ports">
ports { {{ _start_ }}
auxiliary disable; {{ auxiliary | set(false) }}
} {{ _end_ }}
</group>
<group name="root-authentication">
root-authentication { {{ _start_ }}
encrypted-password "{{ <PASSWORD>-password }}"; ## SECRET-DATA
} {{ _end_ }}
</group>
<group name="dns" itemize="name_server">
name-server { {{ _start_ }}
{{ name_server | IP | _line_ | to_list }};
} {{ _end_ }}
</group>
<group name="commit">
commit { {{ _start_ }}
synchronize; {{ commit_sync | set(true) }}
persist-groups-inheritance; {{ commit_persist-groups-inherit | set(true) }}
} {{ _end_ }}
</group>
<group name="tacacs">
tacplus-server { {{ _start_ }}
<group name="tacacs-servers.{{ tac_server }}">
{{ tac_server | IP }} {
port {{ tac_port }};
secret "{{ tac_secret }}"; ## SECRET-DATA
source-address {{ tac_source | IP }};
} {{ end }}
</group>
} {{ end }}
</group>
} {{ end }}
</group>
"""<line_sep>parser=ttp(data template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res, width=100)
<assert_stmt>res<eq>[[{"system_level":{"HOSTNAME":"LAB-MX-1" "TZ":"some/time" "default_address_selection":<true> "no_ping_record_route":<true> "no_ping_time_stamp":<true> "no_redirects":<true> "services":{"netconf":{"ssh":{"connection-limit":"10" "rate-limit":"4"}} "ssh":{"client-alive-count-max":"3" "client-alive-interval":"10" "connection-limit":"10" "max-sessions":"32" "no-tcp-fwding":<true> "rate-limit":"5" "root-login":<false> "ssh-proto":"v2" } } "tacacs":{"tacacs-servers":{"1.1.1.1":{"tac_port":"49" "tac_secret":"<SECRET_HASH>" "tac_source":"5.5.5.5" } "2.2.2.2":{"tac_port":"49" "tac_secret":"<SECRET_HASH>" "tac_source":"5.5.5.5" } "4.4.4.4":{"tac_port":"49" "tac_secret":"<SECRET_HASH>" "tac_source":"5.5.5.5" } }} }}]]<block_end># test_issue_45_for_junos_cfg()
<def_stmt>test_faq_multiline_output_matching <block_start>data="""
Local Intf: Te2/1/23
System Name: r1.lab.local
System Description:
Cisco IOS Software, Catalyst 1234 L3 Switch Software (cat1234e-ENTSERVICESK9-M), Version 1534.1(1)SG, RELEASE SOFTWARE (fc3)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2012 by Cisco Systems, Inc.
Compiled Sun 15-Apr-12 02:35 by p
Time remaining: 92 seconds
"""<line_sep>template="""
<group>
Local Intf: {{ local_intf }}
System Name: {{ peer_name }}
<group name="peer_system_description">
System Description: {{ _start_ }}
{{ sys_description | _line_ | joinmatches(" ") }}
Time remaining: {{ ignore }} seconds {{ _end_ }}
</group>
</group>
"""<line_sep>parser=ttp(data template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res, width=100)
<assert_stmt>res<eq>[[[{"local_intf":"Te2/1/23" "peer_name":"r1.lab.local" "peer_system_description":{"sys_description":"Cisco IOS Software, Catalyst 1234 L3 Switch "<concat>"Software (cat1234e-ENTSERVICESK9-M), Version "<concat>"1534.1(1)SG, RELEASE SOFTWARE (fc3) Technical "<concat>"Support: http://www.cisco.com/techsupport "<concat>"Copyright (c) 1986-2012 by Cisco Systems, Inc. "<concat>"Compiled Sun 15-Apr-12 02:35 by p"} }]]]<block_end># test_faq_multiline_output_matching()
<def_stmt>test_issue_52_answer <block_start>data="""
Origin:
Some random name
Example Address, example number, example city
Origin:
Some random name 2
Example Address, example number, example city 2
Origin:
Some random name 3
Example Address, example number, example city 3
One more string
"""<line_sep>template="""
<macro>
def process(data):
lines = data["match"].splitlines()
name = lines[0]
address = lines[1]
return {"name": name, "address": address}
</macro>
<group name="origin*" macro="process">
Origin: {{ _start_ }}
{{ match | _line_ | joinmatches }}
</group>
"""<line_sep>parser=ttp(data template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res, width=100)
<assert_stmt>res<eq>[[{"origin":[{"address":"Example Address, example number, example city" "name":"Some random name" } {"address":"Example Address, example number, example city 2" "name":"Some random name 2" } {"address":"Example Address, example number, example city 3" "name":"Some random name 3" } ]}]]<block_end># test_issue_52_answer()
<def_stmt>test_issue_51_answer <block_start>""" test workaround for removing <> chars from input data """<line_sep>data="""
Name:Jane<br>
Name:Michael<br>
Name:July<br>
"""<line_sep>template="""
<group name="people">
Name:{{ name }}<br>
</group>
"""<line_sep># this works as well
# template = "Name:{{ name }}br"
# data = data.replace("<", "").replace(">", "")
# this did not work. fails with xml parsing error
# template = "Name:{{ name }}<br>"
# data = data.replace("<", "<").replace(">", ">")
parser=ttp(data template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res, width=100)
<assert_stmt>res<eq>[[{"people":[{"name":"Jane"} {"name":"Michael"} {"name":"July"}]}]]<block_end># test_issue_51_answer()
<def_stmt>test_issue_50 <block_start>template="""
<input load="text">
interface "BNG-RH201-CORE"
address 11.11.11.11/31
description "BNG-RH201-CORE"
ldp-sync-timer 10
port lag-107:709
ipv6
address fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64
exit
bfd 150 receive 150 multiplier 3
no shutdown
exit
interface "BNG-RH202-CORE"
address 22.22.22.22/31
description "BNG-RH201-CORE"
ldp-sync-timer 10
port lag-108:809
ipv6
address fdf8:f53e:61e4::18/64
exit
bfd 150 receive 150 multiplier 3
no shutdown
exit
interface "system"
address 33.33.33.33/32
ipv6
address fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/128
exit
no shutdown
exit
ies 97 name "OTDR-MGT" customer 1 create
description "OTDR-MGT"
interface "OTDR-MGT" create
address 172.16.31.10/25
vrrp 97
backup 10.20.30.1
priority 200
exit
vpls "OTDR-MGT-VPLS"
exit
exit
no shutdown
exit
ies 99 name "OLT-MGT" customer 1 create
description "OLT-INBAND-MGT"
interface "OLT-MGT" create
address 192.168.3.11/25
vrrp 1
backup 10.20.40.1
priority 200
exit
vpls "OLT-MGT-VPLS"
exit
exit
no shutdown
exit
ies 100 name "100" customer 1 create
description "IES 100 for subscribers"
redundant-interface "shunt" create
address 66.66.66.66/31
spoke-sdp 1:100 create
no shutdown
exit
exit
subscriber-interface "s100" create
description " Subscriber interface for subscribers"
allow-unmatching-subnets
address 172.16.58.3/22 gw-ip-address 192.168.3.11
address 172.16.31.10/20 gw-ip-address 192.168.3.11
group-interface "s100-lag210-vlan101" create
tos-marking-state trusted
ipv6
router-advertisements
managed-configuration
no shutdown
exit
dhcp6
proxy-server
no shutdown
exit
exit
exit
exit
exit
</input>
<group name="ifaces.{{ name }}" contains="ipv4,ipv6">
## group to match top level interfaces
interface "{{ name }}"
description {{ description | re(".+") | strip('"') }}
address {{ ipv4 | joinmatches('; ') }}
address {{ ipv6 | contains(":") | joinmatches('; ') }}
exit {{ _end_ }}
</group>
<group name="ifaces.{{ name }}" contains="ipv4,ipv6">
## group to match lower level interfaces
interface "{{ name | _start_ }}" create
{{ iftype }}-interface "{{ name | _start_ }}" create
description {{ description | re(".+") | strip('"') | strip }}
address {{ ipv4 | contains(".") | joinmatches('; ') }}
address {{ ipv4 | contains(".") | joinmatches('; ') }} gw-ip-address {{ ignore }}
exit {{ _end_ }}
</group>
"""<line_sep>parser=ttp(template=template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{"ifaces":{"BNG-RH201-CORE":{"description":"BNG-RH201-CORE" "ipv4":"11.11.11.11/31" "ipv6":"fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64" } "BNG-RH202-CORE":{"description":"BNG-RH201-CORE" "ipv4":"172.16.17.32/31" "ipv6":"fdf8:f53e:61e4::18/64" } "OLT-MGT":{"ipv4":"192.168.3.11/25"} "OTDR-MGT":{"ipv4":"172.16.31.10/25"} "s100":{"description":"Subscriber interface for subscribers" "iftype":"subscriber" "ipv4":"172.16.58.3/22; 172.16.31.10/20" } "shunt":{"iftype":"redundant" "ipv4":"66.66.66.66/31"} "system":{"ipv4":"192.168.127.12/32" "ipv6":"fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/128" } }}]]<block_end># test_issue_50()
<def_stmt>test_start_with_set <block_start>data="""
authentication {
inactive: authentication {
"""<line_sep>template="""
authentication { {{ inactive | set(False) | _start_ }}
inactive: authentication { {{ inactive | set(True) | _start_ }}
"""<line_sep>parser=ttp(data template log_level="ERROR")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[[{"inactive":<false>} {"inactive":<true>}]]]<block_end># test_start_with_set()
<def_stmt>test_ios_bgp_pers_pars <block_start>template="""
<vars>
defaults_bgp_peers = {
"description": "",
"remote-as": "",
"shutdown": "no",
"inherit_peer-session": "",
"update-source": "",
"password": ""
}
</vars>
<group name="bgp_peers">
<group name="{{ ASN }}">
router bgp {{ ASN }}
<group name="{{ PeerIP }}" default="defaults_bgp_peers">
neighbor {{ PeerIP }} remote-as {{ remote-as }}
neighbor {{ PeerIP }} description {{ description | ORPHRASE }}
neighbor {{ PeerIP | let("shutdown", "yes") }} shutdown
neighbor {{ PeerIP }} inherit peer-session {{ inherit_peer-session }}
neighbor {{ PeerIP }} password {{ password | ORPHRASE }}
neighbor {{ PeerIP }} update-source {{ update-source }}
</group>
</group>
</group>
"""<line_sep>data="""
router bgp 65100
neighbor 1.1.1.1 remote-as 1234
neighbor 1.1.1.1 description Some Description here
neighbor 1.1.1.1 shutdown
neighbor 1.1.1.1 inherit peer-session session_1
neighbor 1.1.1.1 password <PASSWORD>
neighbor 1.1.1.1 update-source Loopback 1
neighbor 1.1.1.2 remote-as 1234
neighbor 1.1.1.2 inherit peer-session session_1
neighbor 1.1.1.2 update-source Loopback 1
"""<line_sep>parser=ttp(data template log_level="DEBUG")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{"bgp_peers":{"65100":{"1.1.1.1":{"description":"Some Description here" "inherit_peer-session":"session_1" "password":"<PASSWORD>" "remote-as":"1234" "shutdown":"yes" "update-source":"" } "1.1.1.2":{"description":"" "inherit_peer-session":"session_1" "password":"" "remote-as":"1234" "shutdown":"no" "update-source":"" } }}}]]<block_end># test_ios_bgp_pers_pars()
<def_stmt>test_ip_address_parsing <block_start>data="""
interface Vlan99
description vlan99_interface
ip address 192.168.127.12 255.255.255.0 secondary
ip address 192.168.3.11 255.255.255.0 secondary
ip address 10.99.10.1 255.255.255.0
load-interval 60
bandwidth 10000000
!
interface Vlan100
description vlan100_interface
ip address 10.100.10.1 255.255.255.0
load-interval 60
bandwidth 10000000
!
"""<line_sep>template="""
<group name="interface">
interface {{ interface }}
description {{ description }}
ip address {{ ipv4_addr | PHRASE | exclude("secondary") | to_ip | with_prefixlen }}
load-interval {{ load-interval }}
bandwidth {{ bandwidth }}
<group name="ipv4_secondary*">
ip address {{ ipv4_addr | PHRASE | let("is_secondary", True) | to_ip | with_prefixlen }} secondary
</group>
</group>
"""<line_sep>parser=ttp(data template)<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{"interface":[{"bandwidth":"10000000" "description":"vlan99_interface" "interface":"Vlan99" "ipv4_addr":"10.99.10.1/24" "ipv4_secondary":[{"ipv4_addr":"192.168.127.12/24" "is_secondary":<true>} {"ipv4_addr":"192.168.3.11/24" "is_secondary":<true>} ] "load-interval":"60" } {"bandwidth":"10000000" "description":"vlan100_interface" "interface":"Vlan100" "ipv4_addr":"10.100.10.1/24" "load-interval":"60" } ]}]]<block_end># test_ip_address_parsing()
<def_stmt>test_vlans_parsing <block_start>template="""
<group name="ports_summary*">
{{ port }} {{ mode }} {{ encap }} {{ satus }} {{ native_vlan | DIGIT }}
</group>
<group name="vlans_allowed">
Port Vlans allowed on trunk {{ _start_ }}
<group name="interfaces*">
{{ port }} {{ vlans | unrange('-', ',') | split(",") }}
</group>
{{ _end_ }}
</group>
<group name="vlans_active">
Port Vlans allowed and active in management domain {{ _start_ }}
<group name="interfaces*">
{{ port }} {{ vlans | unrange('-', ',') | split(",") }}
</group>
{{ _end_ }}
</group>
<group name="vlans_forwarding">
Port Vlans in spanning tree forwarding state and not pruned {{ _start_ }}
<group name="interfaces*">
{{ port }} {{ vlans | unrange('-', ',') | split(",") }}
</group>
{{ _end_ }}
</group>
"""<line_sep>data="""
Port Mode Encapsulation Status Native vlan
Gi0 on 802.1q trunking 1
Gi7 on 802.1q trunking 1
Port Vlans allowed on trunk
Gi0 1,8,999,1002-1005
Gi7 1,100,120,1000,1002-1005
Port Vlans allowed and active in management domain
Gi0 1,8,999
Gi7 1,100,120,1000
Port Vlans in spanning tree forwarding state and not pruned
Gi0 1,8,999
Gi7 1,100,120,1000
"""<line_sep>parser=ttp(data template log_level="DEBUG")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res, width=120)
<assert_stmt>res<eq>[[{"ports_summary":[{"encap":"802.1q" "mode":"on" "native_vlan":"1" "port":"Gi0" "satus":"trunking" } {"encap":"802.1q" "mode":"on" "native_vlan":"1" "port":"Gi7" "satus":"trunking" } ] "vlans_active":{"interfaces":[{"port":"Gi0" "vlans":["1" "8" "999"]} {"port":"Gi7" "vlans":["1" "100" "120" "1000"]} ]} "vlans_allowed":{"interfaces":[{"port":"Gi0" "vlans":["1" "8" "999" "1002" "1003" "1004" "1005"] } {"port":"Gi7" "vlans":["1" "100" "120" "1000" "1002" "1003" "1004" "1005" ] } ]} "vlans_forwarding":{"interfaces":[{"port":"Gi0" "vlans":["1" "8" "999"]} {"port":"Gi7" "vlans":["1" "100" "120" "1000"]} ]} }]]<block_end># test_vlans_parsing()
<def_stmt>test_asa_acls_issue_55_uses_itemize_with_dynamic_path <block_start>data="""
object-group service gokuhead
service-object tcp-udp destination eq gokurpc
service-object tcp destination eq 902
service-object tcp destination eq https
service-object tcp destination eq nfs
service-object tcp destination eq 10025
object-group network gohan
network-object object gohan-01
network-object object gohan-02
network-object object vlan_944
network-object object gohan-03
network-object object gohan-05
network-object object gohan-06
object-group service sql tcp
port-object eq 1433
object-group network vegeta
group-object trunks
network-object object vegeta-01
object-group network Space-Users
network-object object ab
network-object object ac
network-object object ad
network-object object ae
network-object object af
network-object object ag
network-object object ah
network-object object ai
network-object object aj
object-group network dalmatians
network-object object dog-01
group-object trunks
network-object object vlan_950
group-object Space-Users
network-object object Darts-Summary
"""<line_sep>template="""
<vars>
SVC_PORTS = "tcp-udp|tcp|udp"
</vars>
<group name="object-{{ object_type }}-groups**.{{ object_name }}**">
object-group {{ object_type }} {{ object_name | _start_ }}
object-group {{ object_type }} {{ object_name | _start_ }} {{ protocol | re("SVC_PORTS")}}
description {{ description | re(".*") }}
<group name="{{ type }}-objects" itemize="obj_name" method="table">
network-object object {{ obj_name | let("type", "network") }}
network-object host {{ obj_name | IP | let("type", "network") }}
group-object {{ obj_name | let("type", "group") }}
service-object object {{ obj_name | let("type", "service") }}
service-object {{ obj_name | let("type", "service") }}
</group>
<group name="service-object-ports*">
service-object {{ protocol | re("SVC_PORTS") }} destination eq {{port}}
</group>
<group name="service-object-port-ranges*">
service-object {{ protocol | re("SVC_PORTS") }} destination range {{port_begin}} {{port_end}}
</group>
<group name="service-port-objects" itemize="port_obj">
port-object eq {{ port_obj }}
</group>
</group>
"""<line_sep>parser=ttp(data template)<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res, width=80)
<assert_stmt>res<eq>[[{"object-network-groups":{"Space-Users":{"network-objects":["ab" "ac" "ad" "ae" "af" "ag" "ah" "ai" "aj" ]} "dalmatians":{"group-objects":["trunks" "Space-Users"] "network-objects":["dog-01" "vlan_950" "Darts-Summary"] } "gohan":{"network-objects":["gohan-01" "gohan-02" "vlan_944" "gohan-03" "gohan-05" "gohan-06" ]} "vegeta":{"group-objects":["trunks"] "network-objects":["vegeta-01"] } } "object-service-groups":{"gokuhead":{"service-object-ports":[{"port":"gokurpc" "protocol":"tcp-udp"} {"port":"902" "protocol":"tcp"} {"port":"https" "protocol":"tcp"} {"port":"nfs" "protocol":"tcp"} {"port":"10025" "protocol":"tcp"} ]} "sql":{"protocol":"tcp" "service-port-objects":["1433"]} } }]]<block_end># test_asa_acls_issue_55()
<def_stmt>test_asa_acls_issue_55 <block_start>data="""
object-group service gokuhead
service-object tcp-udp destination eq gokurpc
service-object tcp destination eq 902
service-object tcp destination eq https
service-object tcp destination eq nfs
service-object tcp destination eq 10025
object-group network gohan
network-object object gohan-01
network-object object gohan-02
network-object object vlan_944
network-object object gohan-03
network-object object gohan-05
network-object object gohan-06
object-group service sql tcp
port-object eq 1433
object-group network vegeta
group-object trunks
network-object object vegeta-01
object-group network Space-Users
network-object object ab
network-object object ac
network-object object ad
network-object object ae
network-object object af
network-object object ag
network-object object ah
network-object object ai
network-object object aj
object-group network dalmatians
network-object object dog-01
group-object trunks
network-object object vlan_950
group-object Space-Users
network-object object Darts-Summary
"""<line_sep>template="""
<vars>
SVC_PORTS = "tcp-udp|tcp|udp"
</vars>
<group name="object-{{ object_type }}-groups**.{{ object_name }}**">
object-group {{ object_type }} {{ object_name | _start_ }}
object-group {{ object_type }} {{ object_name | _start_ }} {{ protocol | re("SVC_PORTS")}}
description {{ description | re(".*") }}
<group name="network-objects" itemize="obj_name" method="table">
network-object object {{ obj_name | }}
network-object host {{ obj_name | IP }}
</group>
<group name="group-objects" itemize="obj_name" method="table">
group-object {{ obj_name }}
</group>
<group name="group-objects" itemize="obj_name" method="table">
service-object object {{ obj_name }}
service-object {{ obj_name }}
</group>
<group name="service-object-ports*">
service-object {{ protocol | re("SVC_PORTS") }} destination eq {{port}}
</group>
<group name="service-object-port-ranges*">
service-object {{ protocol | re("SVC_PORTS") }} destination range {{port_begin}} {{port_end}}
</group>
<group name="service-port-objects" itemize="port_obj">
port-object eq {{ port_obj }}
</group>
</group>
"""<line_sep>parser=ttp(data template)<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res, width=80)
<assert_stmt>res<eq>[[{"object-network-groups":{"Space-Users":{"network-objects":["ab" "ac" "ad" "ae" "af" "ag" "ah" "ai" "aj" ]} "dalmatians":{"group-objects":["trunks" "Space-Users"] "network-objects":["dog-01" "vlan_950" "Darts-Summary"] } "gohan":{"network-objects":["gohan-01" "gohan-02" "vlan_944" "gohan-03" "gohan-05" "gohan-06" ]} "vegeta":{"group-objects":["trunks"] "network-objects":["vegeta-01"] } } "object-service-groups":{"gokuhead":{"service-object-ports":[{"port":"gokurpc" "protocol":"tcp-udp"} {"port":"902" "protocol":"tcp"} {"port":"https" "protocol":"tcp"} {"port":"nfs" "protocol":"tcp"} {"port":"10025" "protocol":"tcp"} ]} "sql":{"protocol":"tcp" "service-port-objects":["1433"]} } }]]<block_end># test_asa_acls_issue_55()
<def_stmt>test_issue_57_headers_parsing <block_start>"""
Issue first was with startempty match not beeing selected in favour
of start match produced by headers :
Interface Link Protocol Primary_IP Description {{ _headers_ }}
that was fixed by adding this code to the TTP selection logic for multiple
matches:
# startempty RE always more preferred
if startempty_re:
for index in startempty_re:
re_ = result[index][0]
result_data = result[index][1]
# skip results that did not pass validation check
if result_data == False:
continue
# prefer result with same path as current record
elif re_["GROUP"].group_id == self.record["GRP_ID"]:
break
# prefer children of current record group
elif self.record["GRP_ID"] and re_["GROUP"].group_id[
0
].startswith(self.record["GRP_ID"][0]):
break
# start RE preferred next
elif start_re:
Another problem was with
Interface Link Protocol Primary_IP Description {{ _headers_ }}
matching on "Duplex: (a)/A - auto; H - half; F - full" line, that was fixed
by chaning _end_ logic by introducing self.ended_groups set to _results_class
and replacing self.GRPLOCL with logic to use self.ended_groups instead.
All in all it resulted in better _end_ handling behavior and allowed to fix issue
45 as well where before this one had to use filtering instead, but now _end_ also
helps.
"""<line_sep>data="""
Brief information on interfaces in route mode:
Link: ADM - administratively down; Stby - standby
Protocol: (s) - spoofing
Interface Link Protocol Primary IP Description
InLoop0 UP UP(s) --
REG0 UP -- --
Vlan401 UP UP 10.251.147.36 HSSBC_to_inband_mgmt_r4
Brief information on interfaces in bridge mode:
Link: ADM - administratively down; Stby - standby
Speed: (a) - auto
Duplex: (a)/A - auto; H - half; F - full
Type: A - access; T - trunk; H - hybrid
Interface Link Speed Duplex Type PVID Description
BAGG1 UP 20G(a) F(a) T 1 to-KDC-R4.10-Core-1
BAGG14 UP 10G(a) F(a) T 1 KDC-R429-E1 BackUp Chassis
BAGG22 UP 20G(a) F(a) T 1 HSSBC-NS-01
FGE1/0/49 DOWN auto A A 1
XGE1/0/1 UP 10G(a) F(a) T 1 KDC-R402-E1 Backup Chassis
"""<line_sep>template="""
<group name = "interfaces">
<group name="routed">
Brief information on interfaces in route mode: {{ _start_ }}
<group name = "{{Interface}}">
Interface Link Protocol Primary_IP Description {{ _headers_ }}
</group>
{{ _end_ }}
</group>
<group name="bridged">
Brief information on interfaces in bridge mode: {{ _start_ }}
<group name = "{{Interface}}">
Interface Link Speed Duplex Type PVID Description {{ _headers_ }}
</group>
{{ _end_ }}
</group>
</group>
"""<line_sep>parser=ttp(data template log_level="error")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep>pprint.pprint(res width=80)<assert_stmt>res<eq>[[{"interfaces":{"bridged":{"BAGG1":{"Description":"to-KDC-R4.10-Core-1" "Duplex":"F(a)" "Link":"UP" "PVID":"1" "Speed":"20G(a)" "Type":"T" } "BAGG14":{"Description":"KDC-R429-E1 BackUp "<concat>"Chassis" "Duplex":"F(a)" "Link":"UP" "PVID":"1" "Speed":"10G(a)" "Type":"T" } "BAGG22":{"Description":"HSSBC-NS-01" "Duplex":"F(a)" "Link":"UP" "PVID":"1" "Speed":"20G(a)" "Type":"T" } "FGE1/0/49":{"Description":"" "Duplex":"A" "Link":"DOWN" "PVID":"1" "Speed":"auto" "Type":"A" } "Link: ADM - administr":{"Description":"" "Duplex":"Stby -" "Link":"ative" "PVID":"dby" "Speed":"ly down;" "Type":"stan" } "XGE1/0/1":{"Description":"KDC-R402-E1 Backup "<concat>"Chassis" "Duplex":"F(a)" "Link":"UP" "PVID":"1" "Speed":"10G(a)" "Type":"T" } } "routed":{"InLoop0":{"Description":"" "Link":"UP" "Primary_IP":"--" "Protocol":"UP(s)" } "Link: ADM - administr":{"Description":"" "Link":"ative" "Primary_IP":"Stby - "<concat>"standby" "Protocol":"ly down;" } "REG0":{"Description":"" "Link":"UP" "Primary_IP":"--" "Protocol":"--" } "Vlan401":{"Description":"HSSBC_to_inband_mgmt_r4" "Link":"UP" "Primary_IP":"10.251.147.36" "Protocol":"UP" } } }}]]<block_end># test_issue_57_headers_parsing()
<def_stmt>test_issue_57_headers_parsing_using_columns <block_start>"""
Added columns for headers, now can adjust headers size as required
to filter unwanted results
"""<line_sep>data="""
Brief information on interfaces in route mode:
Link: ADM - administratively down; Stby - standby
Protocol: (s) - spoofing
Interface Link Protocol Primary IP Description
InLoop0 UP UP(s) --
REG0 UP -- --
Vlan401 UP UP 10.251.147.36 HSSBC_to_inband_mgmt_r4
Brief information on interfaces in bridge mode:
Link: ADM - administratively down; Stby - standby
Speed: (a) - auto
Duplex: (a)/A - auto; H - half; F - full
Type: A - access; T - trunk; H - hybrid
Interface Link Speed Duplex Type PVID Description
BAGG1 UP 20G(a) F(a) T 1 to-KDC-R4.10-Core-1
BAGG14 UP 10G(a) F(a) T 1 KDC-R429-E1 BackUp Chassis
BAGG22 UP 20G(a) F(a) T 1 HSSBC-NS-01
FGE1/0/49 DOWN auto A A 1
XGE1/0/1 UP 10G(a) F(a) T 1 KDC-R402-E1 Backup Chassis
"""<line_sep>template="""
<group name = "interfaces">
<group name="routed">
Brief information on interfaces in route mode: {{ _start_ }}
<group name = "{{Interface}}">
Interface Link Protocol Primary_IP Description {{ _headers_ | columns(5)}}
</group>
{{ _end_ }}
</group>
<group name="bridged">
Brief information on interfaces in bridge mode: {{ _start_ }}
<group name = "{{Interface}}">
Interface Link Speed Duplex Type PVID Description {{ _headers_ | columns(7) }}
</group>
{{ _end_ }}
</group>
</group>
"""<line_sep>parser=ttp(data template log_level="error")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res, width=80)
<assert_stmt>res<eq>[[{"interfaces":{"bridged":{"BAGG1":{"Description":"to-KDC-R4.10-Core-1" "Duplex":"F(a)" "Link":"UP" "PVID":"1" "Speed":"20G(a)" "Type":"T" } "BAGG14":{"Description":"KDC-R429-E1 BackUp "<concat>"Chassis" "Duplex":"F(a)" "Link":"UP" "PVID":"1" "Speed":"10G(a)" "Type":"T" } "BAGG22":{"Description":"HSSBC-NS-01" "Duplex":"F(a)" "Link":"UP" "PVID":"1" "Speed":"20G(a)" "Type":"T" } "FGE1/0/49":{"Description":"" "Duplex":"A" "Link":"DOWN" "PVID":"1" "Speed":"auto" "Type":"A" } "XGE1/0/1":{"Description":"KDC-R402-E1 Backup "<concat>"Chassis" "Duplex":"F(a)" "Link":"UP" "PVID":"1" "Speed":"10G(a)" "Type":"T" } } "routed":{"InLoop0":{"Description":"" "Link":"UP" "Primary_IP":"--" "Protocol":"UP(s)" } "REG0":{"Description":"" "Link":"UP" "Primary_IP":"--" "Protocol":"--" } "Vlan401":{"Description":"HSSBC_to_inband_mgmt_r4" "Link":"UP" "Primary_IP":"10.251.147.36" "Protocol":"UP" } } }}]]<block_end># test_issue_57_headers_parsing_using_columns()
<def_stmt>test_interface_template_not_collecting_all_data_solution <block_start>data="""
interface Bundle-Ether10
description Bundle-Ether10
bfd mode ietf
bfd address-family ipv4 multiplier 3
bfd address-family ipv4 destination 192.168.1.7
bfd address-family ipv4 fast-detect
bfd address-family ipv4 minimum-interval 100
mtu 9114
ipv4 address 192.168.1.6 255.255.255.254
ipv6 address fc00::1:5/127
load-interval 30
!
interface Bundle-Ether51
description Bundle-Ether51
bfd mode ietf
bfd address-family ipv4 multiplier 3
bfd address-family ipv4 destination 192.168.1.2
bfd address-family ipv4 fast-detect
bfd address-family ipv4 minimum-interval 100
mtu 9114
ipv4 address 192.168.1.3 255.255.255.254
ipv6 address fc00::1:3/127
load-interval 30
!
interface Loopback0
description Loopback0
ipv4 address 10.1.1.1 255.255.255.255
ipv4 address 10.2.2.2 255.255.255.255 secondary
ipv6 address fc00::1/128
ipv6 address fc00::101/128
!
interface Loopback1
description Loopback1
ipv4 address 10.100.0.1 255.255.255.0
ipv4 address 10.100.1.1 255.255.255.0 secondary
ipv4 address 10.100.2.1 255.255.255.0 secondary
ipv6 address fc00:100::1/64
ipv6 address fc00:100::101/64
ipv6 address fc00:100::201/64
!
interface MgmtEth0/RP0/CPU0/0
description MgmtEth0/RP0/CPU0/0
cdp
vrf VRF-MGMT
ipv4 address 172.23.136.21 255.255.252.0
!
interface GigabitEthernet0/0/0/12
description GigabitEthernet0/0/0/12
mtu 9018
lldp
receive disable
transmit disable
!
negotiation auto
load-interval 30
l2transport
!
!
interface TenGigE0/0/0/4
description TenGigE0/0/0/4
bundle id 51 mode active
cdp
load-interval 30
!
interface TenGigE0/0/0/5
shutdown
!
interface TenGigE0/0/0/5.100 l2transport
description TenGigE0/0/0/5.100
!
interface TenGigE0/0/0/47
description TenGigE0/0/0/47
shutdown
mac-address 201.b19.1234
!
interface BVI101
cdp
description BVI101
ipv4 address 192.168.101.1 255.255.255.0
load-interval 30
mac-address 200.b19.4321
!
interface HundredGigE0/0/1/0
description HundredGigE0/0/1/0
bundle id 10 mode active
cdp
load-interval 30
mac-address 200.b19.5678
!
interface preconfigure GigabitEthernet0/0/0/11
description GigabitEthernet0/0/0/11
shutdown
!
interface preconfigure GigabitEthernet0/0/0/16
description GigabitEthernet0/0/0/16
shutdown
!
interface preconfigure GigabitEthernet0/0/0/17
description GigabitEthernet0/0/0/17
shutdown
!
"""<line_sep>template_original="""
<doc>
Template for capturing interface configuration data from IOS-XR devices
Note: In order to different interface appearances, the interface block has been replicated.
Be sure to update all blocks accordingly when adding any new values to capture.
</doc>
<vars>
intf_defaults = {
"description": None,
"speed": None,
"negotiation": None,
"disabled": False,
"mode": None,
}
</vars>
<macro>
## parses ipv4 addresses to determine which is primary and which are secondary
## and converts dotted-quad subnet mask into cidr format
def ipv4_macro(data):
data_list = list(data.split(" "))
addr = str(data_list[0])
mask = str(data_list[1])
mask = str(sum(bin(int(x)).count('1') for x in mask.split('.')))
ipv4 = addr+"/"+mask
if 'secondary' in data:
is_secondary = True
else:
is_secondary = False
result = { "ipv4" : ipv4, "is_secondary" : is_secondary }
return result
</macro>
<group name="interfaces" default="intf_defaults">
interface {{ interface | _start_}}
interface {{ interface | let("mode", "l2transport") | _start_ }} l2transport
interface preconfigure {{ interface | let("mode", "preconfigure") | _start_ }}
description {{ description | re(".+") }}
speed {{ speed }}
negotiation {{ negotiation }}
shutdown {{ disabled | set(True) }}
mac-address {{ mac_address }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | PHRASE | _exact_ | macro("ipv4_macro") }}
</group>
<group name="ipv6*" method="table" containsall="ipv6">
ipv6 address {{ ipv6 | ORPHRASE | _exact_ }}
</group>
! {{ _end_ }}
</group>
"""<line_sep>parser=ttp(data template_original log_level="error")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep>pprint.pprint(res width=80)<assert_stmt>res<eq>[[{"interfaces":[{"description":"Bundle-Ether10" "disabled":<false> "interface":"Bundle-Ether10" "ipv4":[{"ipv4":{"ipv4":"192.168.1.6/31" "is_secondary":<false>}}] "ipv6":[{"ipv6":"fc00::1:5/127"}] "mode":<none> "negotiation":<none> "speed":<none> } {"description":"Bundle-Ether51" "disabled":<false> "interface":"Bundle-Ether51" "ipv4":[{"ipv4":{"ipv4":"192.168.1.3/31" "is_secondary":<false>}}] "ipv6":[{"ipv6":"fc00::1:3/127"}] "mode":<none> "negotiation":<none> "speed":<none> } {"description":"Loopback0" "disabled":<false> "interface":"Loopback0" "ipv4":[{"ipv4":{"ipv4":"10.1.1.1/32" "is_secondary":<false>}} {"ipv4":{"ipv4":"10.2.2.2/32" "is_secondary":<true>}} ] "ipv6":[{"ipv6":"fc00::1/128"} {"ipv6":"fc00::101/128"}] "mode":<none> "negotiation":<none> "speed":<none> } {"description":"Loopback1" "disabled":<false> "interface":"Loopback1" "ipv4":[{"ipv4":{"ipv4":"10.100.0.1/24" "is_secondary":<false>}} {"ipv4":{"ipv4":"10.100.1.1/24" "is_secondary":<true>}} {"ipv4":{"ipv4":"10.100.2.1/24" "is_secondary":<true>}} ] "ipv6":[{"ipv6":"fc00:100::1/64"} {"ipv6":"fc00:100::101/64"} {"ipv6":"fc00:100::201/64"} ] "mode":<none> "negotiation":<none> "speed":<none> } {"description":"MgmtEth0/RP0/CPU0/0" "disabled":<false> "interface":"MgmtEth0/RP0/CPU0/0" "ipv4":[{"ipv4":{"ipv4":"172.23.136.21/22" "is_secondary":<false> }}] "mode":<none> "negotiation":<none> "speed":<none> } {"description":"GigabitEthernet0/0/0/12" "disabled":<false> "interface":"GigabitEthernet0/0/0/12" "mode":<none> "negotiation":"auto" "speed":<none> } {"description":"TenGigE0/0/0/4" "disabled":<false> "interface":"TenGigE0/0/0/4" "mode":<none> "negotiation":<none> "speed":<none> } {"description":<none> "disabled":<true> "interface":"TenGigE0/0/0/5" "mode":<none> "negotiation":<none> "speed":<none> } {"description":"TenGigE0/0/0/5.100" "disabled":<false> "interface":"TenGigE0/0/0/5.100" "mode":"l2transport" "negotiation":<none> "speed":<none> } {"description":"TenGigE0/0/0/47" "disabled":<true> "interface":"TenGigE0/0/0/47" "mac_address":"201.b19.1234" "mode":<none> "negotiation":<none> "speed":<none> } {"description":"BVI101" "disabled":<false> "interface":"BVI101" "ipv4":[{"ipv4":{"ipv4":"192.168.101.1/24" "is_secondary":<false> }}] "mac_address":"200.b19.4321" "mode":<none> "negotiation":<none> "speed":<none> } {"description":"HundredGigE0/0/1/0" "disabled":<false> "interface":"HundredGigE0/0/1/0" "mac_address":"200.b19.5678" "mode":<none> "negotiation":<none> "speed":<none> } {"description":"GigabitEthernet0/0/0/11" "disabled":<true> "interface":"GigabitEthernet0/0/0/11" "mode":"preconfigure" "negotiation":<none> "speed":<none> } {"description":"GigabitEthernet0/0/0/16" "disabled":<true> "interface":"GigabitEthernet0/0/0/16" "mode":"preconfigure" "negotiation":<none> "speed":<none> } {"description":"GigabitEthernet0/0/0/17" "disabled":<true> "interface":"GigabitEthernet0/0/0/17" "mode":"preconfigure" "negotiation":<none> "speed":<none> } ]}]]<block_end># test_interface_template_not_collecting_all_data_solution()
@pytest.mark.skipif(<true> reason="Need to fix this one")<def_stmt>test_interface_template_not_collecting_all_data <block_start>"""
For interface BVI101 not collecting mac-address
"""<line_sep>data="""
interface Bundle-Ether10
description Bundle-Ether10
bfd mode ietf
bfd address-family ipv4 multiplier 3
bfd address-family ipv4 destination 192.168.1.7
bfd address-family ipv4 fast-detect
bfd address-family ipv4 minimum-interval 100
mtu 9114
ipv4 address 192.168.1.6 255.255.255.254
ipv6 address fc00::1:5/127
load-interval 30
!
interface Bundle-Ether51
description Bundle-Ether51
bfd mode ietf
bfd address-family ipv4 multiplier 3
bfd address-family ipv4 destination 192.168.1.2
bfd address-family ipv4 fast-detect
bfd address-family ipv4 minimum-interval 100
mtu 9114
ipv4 address 192.168.1.3 255.255.255.254
ipv6 address fc00::1:3/127
load-interval 30
!
interface Loopback0
description Loopback0
ipv4 address 10.1.1.1 255.255.255.255
ipv4 address 10.2.2.2 255.255.255.255 secondary
ipv6 address fc00::1/128
ipv6 address fc00::101/128
!
interface Loopback1
description Loopback1
ipv4 address 10.100.0.1 255.255.255.0
ipv4 address 10.100.1.1 255.255.255.0 secondary
ipv4 address 10.100.2.1 255.255.255.0 secondary
ipv6 address fc00:100::1/64
ipv6 address fc00:100::101/64
ipv6 address fc00:100::201/64
!
interface MgmtEth0/RP0/CPU0/0
description MgmtEth0/RP0/CPU0/0
cdp
vrf VRF-MGMT
ipv4 address 172.23.136.21 255.255.252.0
!
interface GigabitEthernet0/0/0/12
description GigabitEthernet0/0/0/12
mtu 9018
lldp
receive disable
transmit disable
!
negotiation auto
load-interval 30
l2transport
!
!
interface TenGigE0/0/0/4
description TenGigE0/0/0/4
bundle id 51 mode active
cdp
load-interval 30
!
interface TenGigE0/0/0/5
shutdown
!
interface TenGigE0/0/0/5.100 l2transport
description TenGigE0/0/0/5.100
!
interface TenGigE0/0/0/47
description TenGigE0/0/0/47
shutdown
mac-address 201.b19.1234
!
interface BVI101
cdp
description BVI101
ipv4 address 192.168.101.1 255.255.255.0
load-interval 30
mac-address 200.b19.4321
!
interface HundredGigE0/0/1/0
description HundredGigE0/0/1/0
bundle id 10 mode active
cdp
load-interval 30
mac-address 200.b19.5678
!
interface preconfigure GigabitEthernet0/0/0/11
description GigabitEthernet0/0/0/11
shutdown
!
interface preconfigure GigabitEthernet0/0/0/16
description GigabitEthernet0/0/0/16
shutdown
!
interface preconfigure GigabitEthernet0/0/0/17
description GigabitEthernet0/0/0/17
shutdown
!
"""<line_sep>template_original="""
<doc>
Template for capturing interface configuration data from IOS-XR devices
Note: In order to different interface appearances, the interface block has been replicated.
Be sure to update all blocks accordingly when adding any new values to capture.
</doc>
<macro>
## parses ipv4 addresses to determine which is primary and which are secondary
## and converts dotted-quad subnet mask into cidr format
def ipv4_macro(data):
data_list = list(data.split(" "))
addr = str(data_list[0])
mask = str(data_list[1])
mask = str(sum(bin(int(x)).count('1') for x in mask.split('.')))
ipv4 = addr+"/"+mask
if 'secondary' in data:
is_secondary = True
else:
is_secondary = False
result = { "ipv4" : ipv4, "is_secondary" : is_secondary }
return result
</macro>
## parent group for all interface groups
<group name="interfaces">
## matches primary interfaces
<group>
{{ mode | set(None) }}
{{ description | set(None) }}
{{ speed | set(None) }}
{{ negotiation | set(None) }}
{{ disabled | set(False) }}
interface {{ interface }}
description {{ description | re(".+") }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | PHRASE | _exact_ | macro("ipv4_macro") }}
</group>
<group name="ipv6*" method="table" containsall="ipv6">
ipv6 address {{ ipv6 | PHRASE | _exact_ }}
</group>
speed {{ speed }}
negotiation {{ negotiation }}
shutdown {{ disabled | set(True) }}
mac-address {{ mac_address }}
</group>
## matches pre-configured interfaces
<group>
{{ mode | set('preconfigure') }}
{{ description | set(None) }}
{{ speed | set(None) }}
{{ negotiation | set(None) }}
{{ disabled | set(False) }}
interface preconfigure {{ interface }}
description {{ description | re(".+") }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | PHRASE | _exact_ | macro("ipv4_macro") }}
</group>
<group name="ipv6*" method="table" containsall="ipv6">
ipv6 address {{ ipv6 | PHRASE | _exact_ }}
</group>
speed {{ speed }}
negotiation {{ negotiation }}
shutdown {{ disabled | set(True) }}
mac-address {{ mac_address }}
</group>
## matches sub-interfaces
<group>
{{ mode | set('l2transport') }}
{{ description | set(None) }}
{{ speed | set(None) }}
{{ negotiation | set(None) }}
{{ disabled | set(False) }}
interface {{ interface }} l2transport
description {{ description | re(".+") }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | PHRASE | _exact_ | macro("ipv4_macro") }}
</group>
<group name="ipv6*" method="table" containsall="ipv6">
ipv6 address {{ ipv6 | PHRASE | _exact_ }}
</group>
speed {{ speed }}
negotiation {{ negotiation }}
shutdown {{ disabled | set(True) }}
mac-address {{ mac_address }}
</group>
</group>
"""<line_sep>parser=ttp(data template_original log_level="error")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep>pprint.pprint(res width=80)<block_end># test_interface_template_not_collecting_all_data()
<def_stmt>test_interface_template_not_collecting_all_data_reduced <block_start>"""
Below template and data were producing this result:
[[{'interfaces': [{'interface': 'TenGigE0/0/0/5.100'},
{'interface': 'BVI101',
'ipv4': [{'ipv4': '192.168.101.1 255.255.255.0'}]}]}]]
TTP was not collecting mac-address for BVI 101
"""<line_sep>data="""
interface TenGigE0/0/0/5.100 l2transport
!
interface BVI101
ipv4 address 192.168.101.1 255.255.255.0
mac-address 200.b19.4321
!
"""<line_sep>template="""
<group name="interfaces">
## matches primary interfaces
<group>
interface {{ interface }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | _line_ | _exact_ }}
</group>
mac-address {{ mac_address }}
</group>
## matches sub-interfaces
<group>
interface {{ interface }} l2transport
mac-address {{ mac_address }}
</group>
</group>
"""<line_sep>parser=ttp(data template log_level="error")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res, width=80)
<assert_stmt>res<eq>[[{"interfaces":[{"interface":"TenGigE0/0/0/5.100"} {"interface":"BVI101" "ipv4":[{"ipv4":"192.168.101.1 255.255.255.0"}] "mac_address":"200.b19.4321" } ]}]]<block_end># test_interface_template_not_collecting_all_data_reduced()
@pytest.mark.skipif(<true> reason="Need to fix this one")<def_stmt>test_interface_template_not_collecting_all_data_reduced_2 <block_start>"""
Below template and data producing this result:
[[{'interfaces': [{'interface': 'TenGigE0/0/0/5'},
{'interface': 'TenGigE0/0/0/5.100',
'mac_address': '200.b19.1234'},
{'interface': 'BVI101',
'ipv4': [{'ipv4': '192.168.101.1 255.255.255.0'}]},
{'interface': 'HundredGigE0/0/1/0',
'mac_address': '200.b19.5678'}]}]]
Interface BVI should not have IPv4 address matched, but
should have mac-address matched. Problem is due to that
l2transport group starts and it has group for IPv4 addresses,
next match after matching IPv4 is mac-address, but his parent
is a different group, as a result IPv4 address saved under wrong group
and mac-address not saved at all
IDEA: try to implement automatic end of group tracking, to add pevious
groups to self.ended_groups if next, different group starts.
Current solution to this problem would be to use _end_ to explicitly
indicate end of group
"""<line_sep>data="""
interface TenGigE0/0/0/5
!
interface TenGigE0/0/0/5.100 l2transport
mac-address 200.b19.1234
!
interface BVI101
ipv4 address 192.168.101.1 255.255.255.0
mac-address 200.b19.4321
!
interface HundredGigE0/0/1/0
mac-address 200.b19.5678
!
"""<line_sep>template_original="""
<group name="interfaces">
## matches primary interfaces
<group>
interface {{ interface }}
mac-address {{ mac_address }}
</group>
## matches sub-interfaces
<group>
interface {{ interface }} l2transport
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | _line_ | _exact_ }}
</group>
</group>
</group>
"""<line_sep>parser=ttp(data template_original log_level="error")<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep>pprint.pprint(res width=80)<block_end># test_interface_template_not_collecting_all_data_reduced_2()
<def_stmt>test_issue_61 <block_start>data="""
banner motd &
BANNER MESSAGE line 1
BANNER MESSAGE line 2
BANNER MESSAGE line 3
&
some
other staff
"""<line_sep>template_to_match_marker="banner motd {{ marker }}"<line_sep>template_to_parse_banner="""
<group name="motd">
banner motd {{ ignore(banner_marker) }} {{ _start_ }}
{{ banner_mesage | _line_ | joinmatches("\\n") }}
{{ ignore(banner_marker) }} {{ _end_ }}
</group>
"""<line_sep># extract marker value
parser=ttp(data template_to_match_marker)<line_sep>parser.parse()<line_sep>marker=parser.result()[0][0]["marker"]<line_sep># parse banner
parser=ttp(data template_to_parse_banner vars={"banner_marker":marker})<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep>pprint.pprint(res)<assert_stmt>res<eq>[[{'motd':{'banner_mesage':'BANNER MESSAGE line 1\n'<concat>'BANNER MESSAGE line 2\n'<concat>'BANNER MESSAGE line 3'}}]]<block_end># test_issue_61()
<def_stmt>test_fortigate_intf_parsing <block_start>template="""
<group name="interfaces">
config system interface {{ _start_ }}
<group name="/interfaces*">
edit "{{ interface }}"
set allowaccess {{ allowaccess }}
set description "{{ description }}"
set interface "{{ phy_interface }}"
set snmp-index {{ snmp_index }}
set type {{ fgt_int_type }}
set vdom "{{ vdom }}"
set vlanid {{ vlan }}
next {{ _end_ }}
</group>
end {{ _end_ }}
</group>
"""<line_sep>data="""
config system np6
edit "np6_0"
next
end
config system interface
edit "mgmt1"
set vdom "root"
set ip 10.10.10.1 255.255.255.248
set allowaccess ping
set type physical
set description "mgmt1"
set snmp-index 1
next
edit "port1"
set vdom "internal"
set ip 20.20.20.1 255.255.255.248
set allowaccess ping
set type physical
set snmp-index 2
next
end
config system custom-language
edit "en"
set filename "en"
next
edit "fr"
set filename "fr"
next
end
"""<line_sep>parser=ttp(data template)<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep>pprint.pprint(res)<assert_stmt>res<eq>[[{'interfaces':[{'allowaccess':'ping' 'description':'mgmt1' 'fgt_int_type':'physical' 'interface':'mgmt1' 'snmp_index':'1' 'vdom':'root'} {'allowaccess':'ping' 'fgt_int_type':'physical' 'interface':'port1' 'snmp_index':'2' 'vdom':'internal'}]}]]<block_end># test_fortigate_intf_parsing()
<def_stmt>test_issue_57_one_more <block_start>"""
Without _anonymous_ group groups id formation bug fix
below template/data were producitng this result:
[[{'portchannel': {'1': {'local_members': [{}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/1',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/2',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]},
'2': {'local_members': [{}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/3',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/4',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]}}}]]
Further debugging revelead the flaw in results selection logic,
due to exclude("Port") statemets group was invalidated and anonymous group_id
was same as parent group_id resulting in new anonymous group matches were not
able to restart the group, fixed by changing the way how anonymous group id formed.
Before fix:
self.ended_groups: set()
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
self.ended_groups: {('portchannel.{{channel_number}}.local_members*', 0)}
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
self.ended_groups: {('portchannel.{{channel_number}}.local_members*', 0)}
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
After fix:
self.ended_groups: set()
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*._anonymous_', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
self.ended_groups: {('portchannel.{{channel_number}}.local_members*._anonymous_', 0)}
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*._anonymous_', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
self.ended_groups: set()
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*._anonymous_', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
"""<line_sep>data="""
Loadsharing Type: Shar -- Loadsharing, NonS -- Non-Loadsharing
Port Status: S -- Selected, U -- Unselected,
I -- Individual, * -- Management port
Flags: A -- LACP_Activity, B -- LACP_Timeout, C -- Aggregation,
D -- Synchronization, E -- Collecting, F -- Distributing,
G -- Defaulted, H -- Expired
Aggregate Interface: Bridge-Aggregation1
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/1 U 32768 1 {ACG}
GE6/0/2 U 32768 1 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/1 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/2 0 32768 0 0x8000, 0000-0000-0000 {EF}
Aggregate Interface: Bridge-Aggregation2
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/3 U 32768 2 {ACG}
GE6/0/4 U 32768 2 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/3 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/4 0 32768 0 0x8000, 0000-0000-0000 {EF}
"""<line_sep>template="""
<group name = "portchannel.{{channel_number}}">
Aggregate Interface: Bridge-Aggregation{{ channel_number}}
<group name = "local_members*" void="">
Local: {{_start_}}
<group>
{{interface | exclude("Port") }} {{status}} {{priority}} {{oper_key }} {{flag}}
</group>
</group>
<group name = "remote_members*">
{{interface }} {{status}} {{priority}} {{oper_key}} {{sys_id}}, {{ mac | MAC }} {{flag}}
</group>
</group>
"""<line_sep>parser=ttp(data template)<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep>pprint.pprint(res)<assert_stmt>res<eq>[[{'portchannel':{'1':{'local_members':[{'flag':'{ACG}' 'interface':'GE6/0/1' 'oper_key':'1' 'priority':'32768' 'status':'U'} {'flag':'{ACG}' 'interface':'GE6/0/2' 'oper_key':'1' 'priority':'32768' 'status':'U'}] 'remote_members':[{'flag':'{EF}' 'interface':'GE6/0/1' 'mac':'0000-0000-0000' 'oper_key':'0' 'priority':'32768' 'status':'0' 'sys_id':'0x8000'} {'flag':'{EF}' 'interface':'GE6/0/2' 'mac':'0000-0000-0000' 'oper_key':'0' 'priority':'32768' 'status':'0' 'sys_id':'0x8000'}]} '2':{'local_members':[{'flag':'{ACG}' 'interface':'GE6/0/3' 'oper_key':'2' 'priority':'32768' 'status':'U'} {'flag':'{ACG}' 'interface':'GE6/0/4' 'oper_key':'2' 'priority':'32768' 'status':'U'}] 'remote_members':[{'flag':'{EF}' 'interface':'GE6/0/3' 'mac':'0000-0000-0000' 'oper_key':'0' 'priority':'32768' 'status':'0' 'sys_id':'0x8000'} {'flag':'{EF}' 'interface':'GE6/0/4' 'mac':'0000-0000-0000' 'oper_key':'0' 'priority':'32768' 'status':'0' 'sys_id':'0x8000'}]}}}]]<block_end># test_issue_57_one_more()
<def_stmt>test_issue_57_one_more_answer <block_start>data="""
Loadsharing Type: Shar -- Loadsharing, NonS -- Non-Loadsharing
Port Status: S -- Selected, U -- Unselected,
I -- Individual, * -- Management port
Flags: A -- LACP_Activity, B -- LACP_Timeout, C -- Aggregation,
D -- Synchronization, E -- Collecting, F -- Distributing,
G -- Defaulted, H -- Expired
Aggregate Interface: Bridge-Aggregation1
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/1 U 32768 1 {ACG}
GE6/0/2 U 32768 1 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/1 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/2 0 32768 0 0x8000, 0000-0000-0000 {EF}
Aggregate Interface: Bridge-Aggregation2
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/3 U 32768 2 {ACG}
GE6/0/4 U 32768 2 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/3 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/4 0 32768 0 0x8000, 0000-0000-0000 {EF}
"""<line_sep>template="""
<group name = "portchannel.{{channel_number}}">
Aggregate Interface: Bridge-Aggregation{{ channel_number}}
<group name = "local_members*">
{{interface}} {{status}} {{priority | DIGIT}} {{oper_key | DIGIT}} {{flag}}
</group>
<group name = "remote_members*">
{{interface}} {{status}} {{priority | DIGIT}} {{oper_key | DIGIT}} {{sys_id}}, {{ mac | MAC }} {{flag}}
</group>
</group>
"""<line_sep>parser=ttp(data template)<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{'portchannel':{'1':{'local_members':[{'flag':'{ACG}' 'interface':'GE6/0/1' 'oper_key':'1' 'priority':'32768' 'status':'U'} {'flag':'{ACG}' 'interface':'GE6/0/2' 'oper_key':'1' 'priority':'32768' 'status':'U'}] 'remote_members':[{'flag':'{EF}' 'interface':'GE6/0/1' 'mac':'0000-0000-0000' 'oper_key':'0' 'priority':'32768' 'status':'0' 'sys_id':'0x8000'} {'flag':'{EF}' 'interface':'GE6/0/2' 'mac':'0000-0000-0000' 'oper_key':'0' 'priority':'32768' 'status':'0' 'sys_id':'0x8000'}]} '2':{'local_members':[{'flag':'{ACG}' 'interface':'GE6/0/3' 'oper_key':'2' 'priority':'32768' 'status':'U'} {'flag':'{ACG}' 'interface':'GE6/0/4' 'oper_key':'2' 'priority':'32768' 'status':'U'}] 'remote_members':[{'flag':'{EF}' 'interface':'GE6/0/3' 'mac':'0000-0000-0000' 'oper_key':'0' 'priority':'32768' 'status':'0' 'sys_id':'0x8000'} {'flag':'{EF}' 'interface':'GE6/0/4' 'mac':'0000-0000-0000' 'oper_key':'0' 'priority':'32768' 'status':'0' 'sys_id':'0x8000'}]}}}]]<block_end># test_issue_57_one_more_answer()
<def_stmt>test_issue_57_one_more_empty_dict_in_res <block_start>"""
Without fix this results produced:
[[{'portchannel': {'1': {'local_members': [{},
{'flag': '{ACG}',
'interface': 'GE6/0/1',
'oper_key': '1',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/2',
'oper_key': '1',
'priority': '32768',
'status': 'U'}],
'remote_members': [{},
{'flag': '{EF}',
'interface': 'GE6/0/1',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/2',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]},
'2': {'local_members': [{},
{'flag': '{ACG}',
'interface': 'GE6/0/3',
'oper_key': '2',
'priority': '32768',
'status': 'U'},
{'flag': '{ACG}',
'interface': 'GE6/0/4',
'oper_key': '2',
'priority': '32768',
'status': 'U'}],
'remote_members': [{},
{'flag': '{EF}',
'interface': 'GE6/0/3',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/4',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]}}}]]
Above results contain empty dictionary list item, this is because
local_members* and remote_members* use * to indicate list item
as a result self.dict_by_path was returning E as a list element,
and results were appended to that element, but results are empty dictionary,
update saving logic to check if results are empty and skip appending them
if so.
"""<line_sep>data="""
Loadsharing Type: Shar -- Loadsharing, NonS -- Non-Loadsharing
Port Status: S -- Selected, U -- Unselected,
I -- Individual, * -- Management port
Flags: A -- LACP_Activity, B -- LACP_Timeout, C -- Aggregation,
D -- Synchronization, E -- Collecting, F -- Distributing,
G -- Defaulted, H -- Expired
Aggregate Interface: Bridge-Aggregation1
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/1 U 32768 1 {ACG}
GE6/0/2 U 32768 1 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/1 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/2 0 32768 0 0x8000, 0000-0000-0000 {EF}
Aggregate Interface: Bridge-Aggregation2
Aggregation Mode: Dynamic
Loadsharing Type: Shar
Management VLAN : None
System ID: 0x8000, d07e-28b5-a200
Local:
Port Status Priority Oper-Key Flag
--------------------------------------------------------------------------------
GE6/0/3 U 32768 2 {ACG}
GE6/0/4 U 32768 2 {ACG}
Remote:
Actor Partner Priority Oper-Key SystemID Flag
--------------------------------------------------------------------------------
GE6/0/3 0 32768 0 0x8000, 0000-0000-0000 {EF}
GE6/0/4 0 32768 0 0x8000, 0000-0000-0000 {EF}
"""<line_sep>template="""
<group name = "portchannel.{{channel_number}}">
Aggregate Interface: Bridge-Aggregation{{ channel_number}}
<group name = "local_members*">
Local: {{_start_}}
<group>
{{interface }} {{status}} {{priority}} {{oper_key | DIGIT }} {{flag}}
</group>
</group>
<group name = "remote_members*">
Remote: {{_start_}}
<group>
{{interface }} {{status}} {{priority}} {{oper_key}} {{sys_id}}, {{ mac | MAC }} {{flag}}
</group>
</group>
</group>
"""<line_sep>parser=ttp(data template)<line_sep>parser.parse()<line_sep>res=parser.result()<line_sep># pprint.pprint(res)
<assert_stmt>res<eq>[[{'portchannel':{'1':{'local_members':[{'flag':'{ACG}' 'interface':'GE6/0/1' 'oper_key':'1' 'priority':'32768' 'status':'U'} {'flag':'{ACG}' 'interface':'GE6/0/2' 'oper_key':'1' 'priority':'32768' 'status':'U'}] 'remote_members':[{'flag':'{EF}' 'interface':'GE6/0/1' 'mac':'0000-0000-0000' 'oper_key':'0' 'priority':'32768' 'status':'0' 'sys_id':'0x8000'} {'flag':'{EF}' 'interface':'GE6/0/2' 'mac':'0000-0000-0000' 'oper_key':'0' 'priority':'32768' 'status':'0' 'sys_id':'0x8000'}]} '2':{'local_members':[{'flag':'{ACG}' 'interface':'GE6/0/3' 'oper_key':'2' 'priority':'32768' 'status':'U'} {'flag':'{ACG}' 'interface':'GE6/0/4' 'oper_key':'2' 'priority':'32768' 'status':'U'}] 'remote_members':[{'flag':'{EF}' 'interface':'GE6/0/3' 'mac':'0000-0000-0000' 'oper_key':'0' 'priority':'32768' 'status':'0' 'sys_id':'0x8000'} {'flag':'{EF}' 'interface':'GE6/0/4' 'mac':'0000-0000-0000' 'oper_key':'0' 'priority':'32768' 'status':'0' 'sys_id':'0x8000'}]}}}]]<block_end># test_issue_57_one_more_empty_dict_in_res()
|
"""
Tests for constructing Pin universes
"""<import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>openmc<import_from_stmt>openmc.model pin<def_stmt>get_pin_radii pin_univ<block_start>"""Return a sorted list of all radii from pin"""<line_sep>rads=set()<for_stmt>cell pin_univ.get_all_cells().values()<block_start>surfs=cell.region.get_surfaces().values()<line_sep>rads.update(set(s.r<for>s surfs))<block_end><return>list(sorted(rads))<block_end>@pytest.fixture<def_stmt>pin_mats <block_start>fuel=openmc.Material(name="UO2")<line_sep>fuel.volume=100<line_sep>clad=openmc.Material(name="zirc")<line_sep>clad.volume=100<line_sep>water=openmc.Material(name="water")<line_sep><return>fuel clad water<block_end>@pytest.fixture<def_stmt>good_radii <block_start><return>(0.4 0.42)<block_end><def_stmt>test_failure pin_mats good_radii<block_start>"""Check for various failure modes"""<line_sep>good_surfaces=[openmc.ZCylinder(r=r)<for>r good_radii]<line_sep># Bad material type
<with_stmt>pytest.raises(TypeError)<block_start>pin(good_surfaces [mat.name<for>mat pin_mats])<block_end># Incorrect lengths
<with_stmt>pytest.raises(ValueError match="length")<block_start>pin(good_surfaces[:len(pin_mats)-2] pin_mats)<block_end># Non-positive radii
rad=[openmc.ZCylinder(r=-0.1)]+good_surfaces[1:]<with_stmt>pytest.raises(ValueError match="index 0")<block_start>pin(rad pin_mats)<block_end># Non-increasing radii
surfs=tuple(reversed(good_surfaces))<with_stmt>pytest.raises(ValueError match="index 1")<block_start>pin(surfs pin_mats)<block_end># Bad orientation
surfs=[openmc.XCylinder(r=good_surfaces[0].r)]+good_surfaces[1:]<with_stmt>pytest.raises(TypeError match="surfaces")<block_start>pin(surfs pin_mats)<block_end># Passing cells argument
<with_stmt>pytest.raises(ValueError match="Cells")<block_start>pin(surfs pin_mats cells=[])<block_end><block_end><def_stmt>test_pins_of_universes pin_mats good_radii<block_start>"""Build a pin with a Universe in one ring"""<line_sep>u1=openmc.Universe(cells=[openmc.Cell(fill=pin_mats[1])])<line_sep>new_items=pin_mats[:1]+(u1 )+pin_mats[2:]<line_sep>new_pin=pin([openmc.ZCylinder(r=r)<for>r good_radii] new_items subdivisions={0:2} divide_vols=<true>)<assert_stmt>len(new_pin.cells)<eq>len(pin_mats)+1<block_end>@pytest.mark.parametrize("surf_type" [openmc.ZCylinder openmc.XCylinder openmc.YCylinder])<def_stmt>test_subdivide pin_mats good_radii surf_type<block_start>"""Test the subdivision with various orientations"""<line_sep>surfs=[surf_type(r=r)<for>r good_radii]<line_sep>fresh=pin(surfs pin_mats name="fresh pin")<assert_stmt>len(fresh.cells)<eq>len(pin_mats)<assert_stmt>fresh.name<eq>"fresh pin"<line_sep># subdivide inner region
N=5<line_sep>div0=pin(surfs pin_mats {0:N})<assert_stmt>len(div0.cells)<eq>len(pin_mats)+N-1<line_sep># Check volume of fuel material
<for_stmt>mid,mat div0.get_all_materials().items()<block_start><if_stmt>mat.name<eq>"UO2"<block_start><assert_stmt>mat.volume<eq>pytest.approx(100/N)<block_end><block_end># check volumes of new rings
radii=get_pin_radii(div0)<line_sep>bounds=[0]+radii[:N]<line_sep>sqrs=np.square(bounds)<assert_stmt>np.all(sqrs[1:]-sqrs[:-1]<eq>pytest.approx(good_radii[0]<power>2/N))<line_sep># subdivide non-inner most region
new_pin=pin(surfs pin_mats {1:N})<assert_stmt>len(new_pin.cells)<eq>len(pin_mats)+N-1<line_sep># Check volume of clad material
<for_stmt>mid,mat div0.get_all_materials().items()<block_start><if_stmt>mat.name<eq>"zirc"<block_start><assert_stmt>mat.volume<eq>pytest.approx(100/N)<block_end><block_end># check volumes of new rings
radii=get_pin_radii(new_pin)<line_sep>sqrs=np.square(radii[:N+1])<assert_stmt>np.all(sqrs[1:]-sqrs[:-1]<eq>pytest.approx((good_radii[1]<power>2-good_radii[0]<power>2)/N))<block_end> |
<import_from_future_stmt> unicode_literals<class_stmt>BaseCommonException(Exception)<block_start>"""
Base exception for the common app
"""<line_sep><pass><block_end><class_stmt>NotLatestVersion(BaseCommonException)<block_start>"""
The installed version is not the latest available version
"""<def_stmt>__init__ self upstream_version<block_start>self.upstream_version=upstream_version<block_end><block_end> |
"""
observer module
Typical usage is as follows:
from __future__ import with_statement
from observer import consumer, observation
@consumer
def do_something_with_notification():
while True:
key, old, new = (yield)
print "%s: %s -> %s" % (key, old, new)
container = {}
# Any modification to `container`, now called `observed` in the
# body of the with statement, is sent to the coroutine
# do_something_with_notification()
with observation(observe=container,
notify=[do_something_with_notification()) as observed:
modify_observed(observed)
Requires Python 2.5
Author: <NAME> (<EMAIL>)
"""<import_from_future_stmt> with_statement<import_from_stmt>contextlib contextmanager<import_stmt>unittest<line_sep>@contextmanager<def_stmt>observation observe notify<block_start>"""Simple boilerplate to link to the 'with' statement.
Contextlib's contextmanager decorator is a very convenient way to
create simple context managers, specifically the __enter__ and
__exit__ special methods.
"""<line_sep>proxy=Observation(observe notify)<try_stmt><block_start><yield>proxy<block_end><finally_stmt><block_start>proxy.close()<block_end><block_end><class_stmt>NoneSuch(object)<block_start>"""A useful alternative to None in the case of a key being deleted or inserted."""<def_stmt>__new__ cls *args **kwargs<block_start><if_stmt>'_inst'<not><in>vars(cls)<block_start>cls._inst=object.__new__(cls *args **kwargs)<block_end><return>cls._inst<block_end><def_stmt>__init__ self *args **kwargs<block_start><pass><block_end><def_stmt>__repr__ self<block_start><return>"NoneSuch()"<block_end><def_stmt>__call__ self *args **kwargs<block_start><return>self<block_end><def_stmt>__nonzero__ self<block_start><return><false><block_end><block_end>NoneSuch=NoneSuch()<class_stmt>Observation(object)<block_start>"""Enables observation of dictionaries.
Proxies the `observe` dictionary such that any modifications to
it are sent via `send()` to the notifiers in the `notify`
sequence. The sent value is a triple (key, old, new).
Notifications are sent AFTER the change.
Other mutable containers, such as sets and lists or your custom
container, can be readily added by supporting their interface.
"""<def_stmt>__init__ self observe notify<block_start>self._obj=observe<line_sep>self.notify=notify<block_end><def_stmt>close self<block_start>self._obj=<none><line_sep>self.notify=<none><block_end><def_stmt>__iter__ self<block_start><if_stmt>self._obj<is><none><block_start><raise>ValueError("Operation on closed observation")<block_end><return>iter(self._obj)<block_end># all mutating methods go here, this list should be comprehensive as of 2.5
<def_stmt>__delitem__ self K<block_start><if_stmt>self._obj<is><none><block_start><raise>ValueError("Operation on closed observation")<block_end>old=self._obj[K]<del_stmt>self._obj[K]<for_stmt>notify self.notify<block_start>notify.send((K old NoneSuch))<block_end><block_end><def_stmt>__setitem__ self K V<block_start><if_stmt>self._obj<is><none><block_start><raise>ValueError("Operation on closed observation")<block_end>old=self._obj.get(K NoneSuch)<line_sep>self._obj[K]=V<for_stmt>notify self.notify<block_start>notify.send((K old V))<block_end><block_end><def_stmt>setdefault self K default<block_start><if_stmt>self._obj<is><none><block_start><raise>ValueError("Operation on closed observation")<block_end><try_stmt><block_start><return>self._obj[K]<block_end><except_stmt>KeyError<block_start>self._obj[K]=default<for_stmt>notify self.notify<block_start>notify.send((K NoneSuch default))<block_end><block_end><block_end><def_stmt>clear self<block_start><if_stmt>self._obj<is><none><block_start><raise>ValueError("Operation on closed observation")<block_end>items=self._obj.items()<line_sep>self._obj.clear()<for_stmt>K,old items<block_start><for_stmt>notify self.notify<block_start>notify.send((K old NoneSuch))<block_end><block_end><block_end><def_stmt>update self *seq_or_map **kw<block_start><import_from_stmt>itertools chain<if_stmt>self._obj<is><none><block_start><raise>ValueError("Operation on closed observation")<block_end><try_stmt><block_start>seq=seq_or_map[0].iteritems()<block_end><except_stmt>IndexError<block_start>seq=((K <none>)<for>K seq_or_map)<block_end><for_stmt>K,V chain(seq kw.iteritems())<block_start>old=self._obj.get(K NoneSuch)<line_sep>self._obj[K]=V<for_stmt>notify self.notify<block_start>notify.send((K old V))<block_end><block_end><block_end><def_stmt>pop self K *default<block_start><if_stmt>self._obj<is><none><block_start><raise>ValueError("Operation on closed observation")<block_end># this may be unexpected to have old be the default
# value. what do you think?
<if_stmt>default<block_start>old=self._obj.pop(K default[0])<block_end><else_stmt><block_start>old=self._obj.pop(K)<block_end><for_stmt>notify self.notify<block_start>notify.send((K old NoneSuch))<block_end><return>old<block_end><def_stmt>popitem self<block_start><if_stmt>self._obj<is><none><block_start><raise>ValueError("Operation on closed observation")<block_end>K,old=self._obj.popitem()<for_stmt>notify self.notify<block_start>notify.send((K old NoneSuch))<block_end><return>old<block_end><def_stmt>__contains__ self K<block_start><if_stmt>self._obj<is><none><block_start><raise>ValueError("Operation on closed observation")<block_end><return>K<in>self._obj<block_end><def_stmt>__getitem__ self K<block_start><if_stmt>self._obj<is><none><block_start><raise>ValueError("Operation on closed observation")<block_end><return>self._obj[K]<block_end><def_stmt>__len__ self<block_start><if_stmt>self._obj<is><none><block_start><raise>ValueError("Operation on closed observation")<block_end><return>len(self._obj)<block_end># otherwise, just pass through
<def_stmt>__getattr__ self attrib<block_start><if_stmt>self._obj<is><none><block_start><raise>ValueError("Operation on closed observation")<block_end><return>getattr(self._obj attrib)<block_end><block_end><def_stmt>consumer func<block_start>"""A decorator, advances func to its first yield point when called.
Modifed this original example code from PEP 342 to use the new
functools.wraps decorator. This convenience function makes it look
like the original function, which is almost always what we want,
especially if we designed the original function to be wrapped in
the first place!
Maybe `consumer` should go into functools too!
"""<import_from_stmt>functools wraps<line_sep>@wraps(func)<def_stmt>wrapper *args **kw<block_start>gen=func(*args **kw)<line_sep>gen.next()<line_sep><return>gen<block_end><return>wrapper<block_end><class_stmt>ObserverTestCase(unittest.TestCase)<block_start>"""Tests observer module, special emphasis on dictionary protocol.
We keep the tests monolithic, just RunTest(), to keep the scope of
the with statement visible and simple.
"""<def_stmt>runTest self<block_start><import_from_stmt>collections deque<line_sep>changes=deque()<def_stmt>consume X<block_start><def_stmt>_consume X<block_start><while_stmt>X<block_start><yield>X.popleft()<block_end><block_end><return>list(_consume(X))<block_end>@consumer<def_stmt>observe_changes <block_start><while_stmt><true><block_start>change=(<yield>)<line_sep>changes.append(change)<block_end><block_end>fruits=dict(apple=1 banana=2 cherry=3)<with_stmt>observation(observe=fruits notify=[observe_changes()])<as>observed_fruits# typical mutations
<block_start>observed_fruits['cherry']<augmul>2<del_stmt>observed_fruits['apple']<line_sep>self.assertEquals(consume(changes) [('cherry' 3 6) ('apple' 1 NoneSuch)])<line_sep># .update with keyword args
observed_fruits.update(durian=4 figs=5)<line_sep>self.assertEquals(fruits['durian'] 4)<line_sep># .clear
observed_fruits.clear()<line_sep>self.assertEquals(len(observed_fruits) 0)<line_sep>consume(changes)# keep it simple, just throw away
# .update with map and keyword args, kw should override
observed_fruits.update({'grapefruit':6 'jackfruit':7} jackfruit=8)<line_sep>self.assertEquals(observed_fruits['jackfruit'] 8)<line_sep>self.assertEquals(consume(changes) [('jackfruit' NoneSuch 7) ('grapefruit' NoneSuch 6) ('jackfruit' 7 8)])<line_sep># .pop, default here may be controversial
observed_fruits.pop('durian' <none>)<line_sep>self.assertEquals(consume(changes) [('durian' <none> NoneSuch)])<line_sep># .setdefault
observed_fruits.setdefault('jackfruit' -1)<line_sep>observed_fruits.setdefault('kiwi' 9)<line_sep>self.assertEquals(consume(changes) [('kiwi' NoneSuch 9)])<line_sep># .popitem
<while_stmt>observed_fruits<block_start>observed_fruits.popitem()<block_end>self.assertEquals(fruits dict())<block_end># verify that outside of with statement scope, the observation
# is closed
self.assertRaises(ValueError <lambda>:observed_fruits.update(foo=0 fum=1))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
<import_from_stmt>functools partial<import_from_stmt>typing Sequence<import_stmt>pytest<import_from_stmt>torch Tensor tensor<import_from_stmt>tests.text.helpers TextTester<import_from_stmt>tests.text.inputs _inputs_multiple_references _inputs_single_sentence_multiple_references<import_from_stmt>torchmetrics.functional.text.chrf chrf_score<import_from_stmt>torchmetrics.text.chrf CHRFScore<import_from_stmt>torchmetrics.utilities.imports _SACREBLEU_AVAILABLE<if_stmt>_SACREBLEU_AVAILABLE<block_start><import_from_stmt>sacrebleu.metrics CHRF<block_end><def_stmt>sacrebleu_chrf_fn preds:Sequence[str] targets:Sequence[Sequence[str]] char_order:int word_order:int lowercase:bool whitespace:bool <arrow>Tensor<block_start>sacrebleu_chrf=CHRF(char_order=char_order word_order=word_order lowercase=lowercase whitespace=whitespace eps_smoothing=<true>)<line_sep># Sacrebleu CHRF expects different format of input
targets=[[target[i]<for>target targets]<for>i range(len(targets[0]))]<line_sep>sacrebleu_chrf=sacrebleu_chrf.corpus_score(preds targets).score/100<line_sep><return>tensor(sacrebleu_chrf)<block_end>@pytest.mark.parametrize(["char_order" "word_order" "lowercase" "whitespace"] [(6 2 <false> <false>) (6 2 <false> <true>) (4 2 <true> <false>) (6 0 <true> <false>) (6 0 <true> <true>) (4 0 <false> <true>) ] )@pytest.mark.parametrize(["preds" "targets"] [(_inputs_multiple_references.preds _inputs_multiple_references.targets)] )@pytest.mark.skipif(<not>_SACREBLEU_AVAILABLE reason="test requires sacrebleu")<class_stmt>TestCHRFScore(TextTester)<block_start>@pytest.mark.parametrize("ddp" [<false> <true>])@pytest.mark.parametrize("dist_sync_on_step" [<false> <true>])<def_stmt>test_chrf_score_class self ddp dist_sync_on_step preds targets char_order word_order lowercase whitespace<block_start>metric_args={"n_char_order":char_order "n_word_order":word_order "lowercase":lowercase "whitespace":whitespace }<line_sep>nltk_metric=partial(sacrebleu_chrf_fn char_order=char_order word_order=word_order lowercase=lowercase whitespace=whitespace)<line_sep>self.run_class_metric_test(ddp=ddp preds=preds targets=targets metric_class=CHRFScore sk_metric=nltk_metric dist_sync_on_step=dist_sync_on_step metric_args=metric_args )<block_end><def_stmt>test_chrf_score_functional self preds targets char_order word_order lowercase whitespace<block_start>metric_args={"n_char_order":char_order "n_word_order":word_order "lowercase":lowercase "whitespace":whitespace }<line_sep>nltk_metric=partial(sacrebleu_chrf_fn char_order=char_order word_order=word_order lowercase=lowercase whitespace=whitespace)<line_sep>self.run_functional_metric_test(preds targets metric_functional=chrf_score sk_metric=nltk_metric metric_args=metric_args )<block_end><def_stmt>test_chrf_score_differentiability self preds targets char_order word_order lowercase whitespace<block_start>metric_args={"n_char_order":char_order "n_word_order":word_order "lowercase":lowercase "whitespace":whitespace }<line_sep>self.run_differentiability_test(preds=preds targets=targets metric_module=CHRFScore metric_functional=chrf_score metric_args=metric_args )<block_end><block_end><def_stmt>test_chrf_empty_functional <block_start>hyp=[]<line_sep>ref=[[]]<assert_stmt>chrf_score(hyp ref)<eq>tensor(0.0)<block_end><def_stmt>test_chrf_empty_class <block_start>chrf=CHRFScore()<line_sep>hyp=[]<line_sep>ref=[[]]<assert_stmt>chrf(hyp ref)<eq>tensor(0.0)<block_end><def_stmt>test_chrf_return_sentence_level_score_functional <block_start>hyp=_inputs_single_sentence_multiple_references.preds<line_sep>ref=_inputs_single_sentence_multiple_references.targets<line_sep>_,chrf_sentence_score=chrf_score(hyp ref return_sentence_level_score=<true>)<line_sep>isinstance(chrf_sentence_score Tensor)<block_end><def_stmt>test_chrf_return_sentence_level_class <block_start>chrf=CHRFScore(return_sentence_level_score=<true>)<line_sep>hyp=_inputs_single_sentence_multiple_references.preds<line_sep>ref=_inputs_single_sentence_multiple_references.targets<line_sep>_,chrf_sentence_score=chrf(hyp ref)<line_sep>isinstance(chrf_sentence_score Tensor)<block_end> |
<import_from_stmt>django.db connection<import_from_stmt>usaspending_api.common.etl ETLQuery ETLTable<import_from_stmt>usaspending_api.common.etl.operations delete_obsolete_rows insert_missing_rows update_changed_rows<line_sep># This is basically the desired final state of the federal_account table. We can diff this against the
# actual federal_account table and make corrections as appropriate to bring the federal_account table
# into line. Since the treasury_appropriation_account and federal_account tables are fairly small, we
# can perform full diffs with no noticeable performance impact. This sort order is dictated by DEV-3495.
FEDERAL_ACCOUNTS_FROM_TREASURY_ACCOUNTS_SQL="""
select
distinct on (agency_id, main_account_code)
agency_id as agency_identifier,
main_account_code,
concat(agency_id, '-', main_account_code) as federal_account_code,
account_title
from
treasury_appropriation_account
order by
agency_id,
main_account_code,
beginning_period_of_availability desc nulls last,
ending_period_of_availability desc nulls last,
sub_account_code,
allocation_transfer_agency_id,
treasury_account_identifier desc
"""<line_sep>source_federal_account_query=ETLQuery(FEDERAL_ACCOUNTS_FROM_TREASURY_ACCOUNTS_SQL)<line_sep>destination_federal_account_table=ETLTable("federal_account" key_overrides=["agency_identifier" "main_account_code"])<def_stmt>remove_empty_federal_accounts <block_start>"""
Removes federal accounts that are no longer attached to a TAS.
Returns:
Number of rows updated
"""<line_sep><return>delete_obsolete_rows(source_federal_account_query destination_federal_account_table)<block_end><def_stmt>update_federal_accounts <block_start>"""
Update existing federal account records based on the latest information
from the TreasuryAppropriationAccount (TAS) table. The account title
for each federal account should reflect the account title of the
a related TAS with the most recent beginning period of availability.
Returns:
Number of rows updated
"""<line_sep><return>update_changed_rows(source_federal_account_query destination_federal_account_table)<block_end><def_stmt>insert_federal_accounts <block_start>"""
Insert new federal accounts records based on the TreasuryAppropriationAccount
(TAS) table. Each TAS maps to a higher-level federal account, defined
by a unique combination of TAS agency_id (AID) and TAS main account
code (MAC).
"""<line_sep><return>insert_missing_rows(source_federal_account_query destination_federal_account_table)<block_end><def_stmt>link_treasury_accounts_to_federal_accounts <block_start>"""
Federal accounts are derived from AID (agency identifier) + MAIN (main account code) in treasury accounts.
Using this information, we can link treasury accounts to their corresponding federal account and correct
any accounts that may be mis-linked. Since these tables are relatively small, we can simply perform full
updates with little to no noticeable performance impact.
"""<with_stmt>connection.cursor()<as>cursor<block_start>cursor.execute("""
update treasury_appropriation_account as tu
set federal_account_id = fa.id
from treasury_appropriation_account as t
left outer join federal_account as fa on
t.agency_id = fa.agency_identifier and
t.main_account_code = fa.main_account_code
where tu.treasury_account_identifier = t.treasury_account_identifier and
tu.federal_account_id is distinct from fa.id;
""")<block_end><return>cursor.rowcount<block_end> |
# -*- coding: utf-8 -*-
"""
Curve
~~~~
db package
:copyright: (c) 2017-2018 by Baidu, Inc.
:license: Apache, see LICENSE for more details.
"""<import_stmt>flask_sqlalchemy<line_sep>db=flask_sqlalchemy.SQLAlchemy()<line_sep> |
<import_stmt>os<line_sep>os.environ["HOME"]=os.path.expanduser('~')<import_from_stmt>akdl.models.tf.easytransfer easytransfer_main<import_from_stmt>akdl.runner.config TrainTaskConfig<def_stmt>main task_config:TrainTaskConfig<block_start>easytransfer_main.main(task_config)<block_end> |
<import_from_stmt>nose.tools istest assert_equal<import_from_stmt>mammoth.lists unique<line_sep>@istest<def_stmt>unique_of_empty_list_is_empty_list <block_start>assert_equal([] unique([]))<block_end>@istest<def_stmt>unique_removes_duplicates_while_preserving_order <block_start>assert_equal(["apple" "banana"] unique(["apple" "banana" "apple"]))<block_end> |
# coding: utf-8
# STANDARD LIB
<import_stmt>os<line_sep># THIRD PARTY
<import_stmt>requests<import_from_stmt>django.core.files.base ContentFile File <import_from_stmt>django.db models<import_from_stmt>django.test.utils override_settings<line_sep># DJANGAE
<import_from_stmt>djangae.contrib sleuth<import_from_stmt>djangae.storage CloudStorage _get_storage_client <import_from_stmt>djangae.test TestCase<class_stmt>ModelWithTextFile(models.Model)<block_start><class_stmt>Meta<block_start>app_label="djangae"<block_end>text_file=models.FileField()<block_end><class_stmt>ModelWithUploadTo(models.Model)<block_start><class_stmt>Meta<block_start>app_label="djangae"<block_end>text_file=models.FileField(upload_to="nested/document/")<block_end><class_stmt>CloudStorageTests(TestCase)<block_start><def_stmt>setUp self<block_start>requests.get('{}/wipe'.format(os.environ["STORAGE_EMULATOR_HOST"]))<line_sep>client=_get_storage_client()<line_sep>client.create_bucket('test_bucket')<line_sep><return>super().setUp()<block_end><def_stmt>test_no_config_raises self<block_start><import_from_stmt>django.core.exceptions ImproperlyConfigured<with_stmt>sleuth.fake("djangae.storage.project_id" return_value=<none>)<block_start><with_stmt>self.assertRaises(ImproperlyConfigured)<block_start>CloudStorage()<block_end><block_end><block_end>@override_settings(CLOUD_STORAGE_BUCKET='test_bucket')<def_stmt>test_basic_actions self<block_start>content=b'content'<line_sep>storage=CloudStorage()<line_sep>name=u'tmp.ąćęłńóśźż.马铃薯.zip'<line_sep>f=ContentFile(content name='my_file')<line_sep>filename=storage.save(name f)<line_sep>self.assertIsInstance(filename str)<line_sep>self.assertTrue(filename.endswith(name))<line_sep>self.assertTrue(storage.exists(filename))<line_sep>self.assertEqual(storage.size(filename) len(content))<line_sep>url=storage.url(filename)<line_sep>self.assertIsInstance(url str)<line_sep>self.assertNotEqual(url '')<line_sep>response=requests.get(url)<line_sep>self.assertEqual(response.status_code 200)<line_sep>self.assertEqual(response.content content)<line_sep>f=storage.open(filename)<line_sep>self.assertIsInstance(f File)<line_sep>self.assertEqual(f.read() content)<line_sep># Delete it
storage.delete(filename)<line_sep>self.assertFalse(storage.exists(filename))<block_end>@override_settings(CLOUD_STORAGE_BUCKET='test_bucket')<def_stmt>test_dotslash_prefix self<block_start>storage=CloudStorage()<line_sep>name='./my_file'<line_sep>f=ContentFile(b'content')<line_sep>filename=storage.save(name f)<line_sep>self.assertEqual(filename name.lstrip("./"))<block_end>@override_settings(CLOUD_STORAGE_BUCKET='test_bucket')<def_stmt>test_different_bucket self<block_start><import_from_stmt>google.cloud.exceptions NotFound<line_sep>storage=CloudStorage(bucket_name='different_test_bucket')<line_sep>name='./my_file'<line_sep>f=ContentFile(b'content')<with_stmt>self.assertRaises(NotFound)<as>cm<block_start>storage.save(name f)<block_end>self.assertIn('different_test_bucket' cm.exception.message)<block_end>@override_settings(CLOUD_STORAGE_BUCKET='different_test_bucket')<def_stmt>test_different_bucket_config self<block_start><import_from_stmt>google.cloud.exceptions NotFound<line_sep>storage=CloudStorage()<line_sep>name='./my_file'<line_sep>f=ContentFile(b'content')<with_stmt>self.assertRaises(NotFound)<as>cm<block_start>storage.save(name f)<block_end>self.assertIn('different_test_bucket' cm.exception.message)<block_end>@override_settings(CLOUD_STORAGE_BUCKET='test_bucket')<def_stmt>test_supports_nameless_files self<block_start>storage=CloudStorage()<line_sep>f2=ContentFile(b'nameless-content')<line_sep>storage.save('tmp2' f2)<block_end>@override_settings(CLOUD_STORAGE_BUCKET='test_bucket')<def_stmt>test_new_objects_get_the_default_acl self<block_start>storage=CloudStorage()<line_sep>filename='example.txt'<line_sep>fileobj=ContentFile(b'content')<with_stmt>sleuth.watch('google.cloud.storage.blob.Blob.upload_from_file')<as>upload_func<block_start>storage.save(filename fileobj)<block_end>self.assertTrue(storage.exists(filename))<line_sep>self.assertIsNone(upload_func.calls[0].kwargs['predefined_acl'])<block_end>@override_settings(CLOUD_STORAGE_BUCKET='test_bucket')<def_stmt>test_new_objects_with_an_explicit_acl self<block_start>storage=CloudStorage(google_acl='publicRead')<line_sep>filename='example.txt'<line_sep>fileobj=ContentFile(b'content' name=filename)<with_stmt>sleuth.watch('google.cloud.storage.blob.Blob.upload_from_file')<as>upload_func<block_start>storage.save(filename fileobj)<block_end>self.assertTrue(storage.exists(filename))<line_sep>self.assertEqual(upload_func.calls[0].kwargs['predefined_acl'] 'publicRead' )<block_end>@override_settings(CLOUD_STORAGE_BUCKET='test_bucket' DEFAULT_FILE_STORAGE='djangae.storage.CloudStorage' )<def_stmt>test_works_with_text_file_fields self<block_start>content=b"content"<line_sep>instance=ModelWithTextFile(text_file=ContentFile(content name="my_file"))<line_sep>instance.save()<line_sep>fetched=ModelWithTextFile.objects.get()<line_sep>self.assertEqual(fetched.text_file.read() content)<block_end>@override_settings(CLOUD_STORAGE_BUCKET='test_bucket' DEFAULT_FILE_STORAGE='djangae.storage.CloudStorage' )<def_stmt>test_works_with_upload_to self<block_start>content=b"content"<line_sep>instance=ModelWithUploadTo(text_file=ContentFile(content name="my_file"))<line_sep>instance.save()<line_sep>fetched=ModelWithUploadTo.objects.get()<line_sep>self.assertEqual(fetched.text_file.read() content)<block_end>@override_settings(CLOUD_STORAGE_BUCKET='test_bucket')<def_stmt>test_open_uses_correct_bucket self<block_start>storage=CloudStorage()<line_sep>filename=storage.save('file1' ContentFile(b'content' name='file1'))<line_sep>storage=CloudStorage()# new instance
storage._open(filename)<block_end>@override_settings(CLOUD_STORAGE_BUCKET='test_bucket')<def_stmt>test_delete_uses_correct_bucket self<block_start>storage=CloudStorage()<line_sep>filename=storage.save('file1' ContentFile(b'content' name='file1'))<line_sep>storage=CloudStorage()# new instance
storage.delete(filename)<line_sep>self.assertFalse(storage.exists(filename))<block_end><block_end> |
<import_stmt>json<import_stmt>unittest<import_stmt>unittest.mock<import_stmt>sys<import_from_stmt>unittest mock<import_stmt>sys<import_stmt>os<import_from_stmt>helpers utils<line_sep>sys.path.insert(0 os.path.abspath(os.path.join(os.getcwd() "configfinder/")))<line_sep>sys.modules['configfinder.builder']=unittest.mock.Mock()<line_sep># Mocking builder like so:https://stackoverflow.com/questions/8658043/how-to-mock-an-import
sys.modules['builder']=unittest.mock.Mock()<line_sep># Mocking builder like so:https://stackoverflow.com/questions/8658043/how-to-mock-an-import
sys.modules["config_settings.MAX_TIMEOUT_PER_PACKAGE"]=1# unittest.mock.Mock(MAX_TIMEOUT_PER_PACKAGE=1)
<import_stmt>configfinder.fuzzer_wrapper<import_from_stmt>configfinder minimzer<import_stmt>sh<import_stmt>shutil<import_stmt>os<class_stmt>TestAflFuzzerWrapper(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>os.makedirs("test_data" exist_ok=<true>)<line_sep>self.volume_path="test_data/test_output_volume"<line_sep>os.makedirs(self.volume_path exist_ok=<true>)<line_sep>self.jpg_binary_path="test_data/jpg_binary_main"<line_sep>aflgcc=sh.Command("afl-gcc")<line_sep>aflgcc("test/mock_data/input_mock/jpg_binary/main.c" "-o" self.jpg_binary_path)<line_sep>self.timeout_binary_path="test_data/timeout_binary_main"<line_sep>aflgcc("test/mock_data/input_mock/timeout_binary/main.c" "-o" self.timeout_binary_path)<block_end><def_stmt>tearDown self<block_start>shutil.rmtree("test_data")<block_end><def_stmt>test_multi_core_fuzzing self<block_start>package_name="jpg_parser"<line_sep>binary_path=self.jpg_binary_path<line_sep>parameter="@@"<line_sep>fuzz_duration=30<line_sep>seeds_dir="test/mock_data/mock_seeds/jpg_samples"<with_stmt>mock.patch("uuid.uuid4")<as>uuidmock<block_start>uuidmock.return_value="mockuuid"<line_sep>fuzzer_wrapper=configfinder.fuzzer_wrapper.AflFuzzWrapper(volume_path=self.volume_path package=package_name binary_path=binary_path parameter=parameter fuzz_duration=fuzz_duration seeds_dir=seeds_dir afl_config_file_path=os.path.join(self.volume_path package_name os.path.basename(binary_path))+".afl_conf")<block_end>fuzzer_wrapper.start_fuzzer(cores=4)<line_sep>self.assertTrue(os.path.exists(os.path.join(fuzzer_wrapper.get_afl_multi_core_config_dict()["output"] fuzzer_wrapper.session_name+"000/fuzzer_stats")))<line_sep>self.assertGreater(int(utils.get_afl_stats_from_syncdir(fuzzer_wrapper.multicore_dict["output"])["execs_done"]) 0)<block_end><def_stmt>test_multi_core_fuzzing_timeout self<block_start>package_name="timeut_jpg_parser"<line_sep>binary_path=self.timeout_binary_path<line_sep>parameter="@@"<line_sep>fuzz_duration=20<line_sep>seeds_dir="test/mock_data/mock_seeds/jpg_samples"<line_sep>log_dict={}<with_stmt>mock.patch("uuid.uuid4")<as>uuidmock<block_start>uuidmock.return_value="mockuuid"<line_sep>fuzzer_wrapper=configfinder.fuzzer_wrapper.AflFuzzWrapper(volume_path=self.volume_path package=package_name binary_path=binary_path parameter=parameter fuzz_duration=fuzz_duration seeds_dir=seeds_dir log_dict=log_dict)<block_end>self.assertFalse(fuzzer_wrapper.start_fuzzer(cores=4))<line_sep>print(log_dict)<block_end><block_end>"""
class TestFuzzingWrapper(unittest.TestCase):
def test_wrong_qemu_invocation(self, ):
if os.path.exists("afl_out"):
shutil.rmtree("afl_out")
aflgcc = sh.Command("afl-gcc")
aflgcc("test/mock_data/input_mock/jpg_binary/main.c", "-o", "test/mock_data/input_mock/jpg_binary/main")
fuzzer_args = ["-Q", "-i", "test/mock_data/mock_seeds", "-o", "afl_out", "--",
"test/mock_data/input_mock/jpg_binary/main", "@@"]
self.assertEqual(
configfinder.fuzzer_wrapper.afl_fuzz_wrapper(fuzzer_args, "test/mock_data/input_mock/jpg_binary/main",
fuzz_duration=6), True)
self.assertEqual(os.path.exists("afl_out/fuzzer_stats"), True)
shutil.rmtree("afl_out")
def test_wrong_nonqemu_invocation(self, ):
if os.path.exists("afl_out"):
shutil.rmtree("afl_out")
gcc = sh.Command("gcc")
command = gcc(
["test/mock_data/input_mock/jpg_binary/main.c", "-o", "test/mock_data/input_mock/jpg_binary/main"],
_out=sys.stdout)
fuzzer_args = ["-i", "test/mock_data/mock_seeds", "-o", "afl_out", "--",
"test/mock_data/input_mock/jpg_binary/main", "@@"]
self.assertEqual(
configfinder.fuzzer_wrapper.afl_fuzz_wrapper(fuzzer_args, "test/mock_data/input_mock/jpg_binary/main",
fuzz_duration=6), True)
self.assertEqual(os.path.exists("afl_out/fuzzer_stats"), True)
shutil.rmtree("afl_out")
def test_fuzzer_normal(self):
volume_path = "test/test_output_volume"
name = "test_package"
shutil.rmtree(volume_path, ignore_errors=True)
os.makedirs(os.path.join(os.path.join(volume_path, name), "main/"))
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "mockuuid"
configfinder.fuzzer_wrapper.prepare_and_start_fuzzer(parameter=None,
seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main",
package=name, volume_path=volume_path,
afl_config_file_name="main.afl_config",
fuzz_duration=10)
with open(os.path.join(os.path.join(volume_path, name), "main.afl_config")) as testaflfp:
aflconfigdict = json.load(testaflfp)
self.assertEqual(aflconfigdict["afl_out_dir"],
"test/test_output_volume/test_package/main/afl_fuzz_mockuuid")
self.assertTrue(os.path.exists(aflconfigdict["afl_out_dir"]))
shutil.rmtree(volume_path, ignore_errors=True)
def test_fuzzer_minimized(self):
volume_path = "test/test_output_volume"
name = "main"
shutil.rmtree(volume_path, ignore_errors=True)
os.makedirs(os.path.join(os.path.join(volume_path, name), "main/"))
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "mockuuidmin"
m = minimzer.minize(parameter="@@", seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main", package=None,
volume_path=volume_path, afl_config_file_name="main.afl_config", tmin_total_time=1000)
uuidmock.return_value = "mockuuid"
configfinder.fuzzer_wrapper.prepare_and_start_fuzzer(parameter="@@",
seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main",
package=None, volume_path=volume_path,
afl_config_file_name="main.afl_config",
fuzz_duration=10)
with open(os.path.join(os.path.join(volume_path, name), "main.afl_config")) as testaflfp:
aflconfigdict = json.load(testaflfp)
self.assertEqual(aflconfigdict["afl_out_dir"],
os.path.join(volume_path, name, "main/afl_fuzz_mockuuid"))
self.assertTrue(os.path.exists(aflconfigdict["afl_out_dir"]))
shutil.rmtree(volume_path, ignore_errors=True)
def test_fuzzer_resume(self):
volume_path = "test/test_output_volume"
name = "test_package"
shutil.rmtree(volume_path, ignore_errors=True)
os.makedirs(os.path.join(os.path.join(volume_path, name), "main/"))
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "mockuuid"
configfinder.fuzzer_wrapper.prepare_and_start_fuzzer(parameter="@@",
seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main",
package=name, volume_path=volume_path,
afl_config_file_name="main.afl_config",
fuzz_duration=15, timeout=1500.0)
with open(os.path.join(os.path.join(volume_path, name), "main.afl_config")) as testaflfp:
aflconfigdict = json.load(testaflfp)
self.assertEqual(aflconfigdict["afl_out_dir"],
"test/test_output_volume/test_package/main/afl_fuzz_mockuuid")
self.assertTrue(os.path.exists(aflconfigdict["afl_out_dir"]))
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "resume"
configfinder.fuzzer_wrapper.resume_fuzzer("test/test_output_volume/test_package/main/afl_fuzz_mockuuid",
binary_path="test/mock_data/input_mock/jpg_binary/main",
parameter="@@", timeout=1500.0, fuzz_duration=10)
shutil.rmtree(volume_path, ignore_errors=True)
def test_fuzzer_minimized_failed(self):
volume_path = "test/test_output_volume"
name = "main"
shutil.rmtree(volume_path, ignore_errors=True)
os.makedirs(os.path.join(os.path.join(volume_path, name), "main/"))
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "mockuuidmin"
m = minimzer.minize(parameter="@@", seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main", package=None,
volume_path=volume_path, afl_config_file_name="main.afl_config", tmin_total_time=1000)
uuidmock.return_value = "mockuuid"
for file in os.listdir(os.path.join(volume_path, name, "main/afl_tmin_mockuuidmin/")):
with open(os.path.join(os.path.join(volume_path, name, "main/afl_tmin_mockuuidmin/", file)),
"w"):
pass
# shutil.rmtree(os.path.join(volume_path,name,"main/afl_tmin_mockuuidmin/"))
configfinder.fuzzer_wrapper.prepare_and_start_fuzzer(parameter=None,
seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main",
package=None, volume_path=volume_path,
afl_config_file_name="main.afl_config",
fuzz_duration=10)
# with open(os.path.join(os.path.join(volume_path, name), "main.afl_config")) as testaflfp:
# aflconfigdict = json.load(testaflfp)
# self.assertEqual(aflconfigdict["afl_out_dir"],
# os.path.join(volume_path, name, "main/afl_fuzz_mockuuid"))
# self.assertTrue(os.path.exists(aflconfigdict["afl_out_dir"]))
shutil.rmtree(volume_path, ignore_errors=True)
"""<line_sep> |
"""Functions related to usign feh as wallpaper setter."""<import_stmt>subprocess<import_from_stmt>pathlib Path<import_from_stmt>simber Logger<line_sep># Declare the logger
logger=Logger("feh")<class_stmt>feh<block_start><def_stmt>__init__ self<block_start>self.feh_config_path=Path('~/.fehbg').expanduser()<line_sep>self.current=self._find_current()<block_end><def_stmt>_find_current self<block_start>"""
Extract the current wall path.
"""<line_sep>logger.debug("{}".format(open(self.feh_config_path).read().split(' ')[-2]))<line_sep><return>open(self.feh_config_path).read().split(' ')[-2]<block_end><def_stmt>restore self<block_start>"""
Restore the wallpaper
"""<line_sep>command="feh --bg-fill {}".format(self.current)<line_sep>subprocess.Popen(command.split() stdout=subprocess.PIPE)<block_end><def_stmt>set self file_path<block_start>"""
Set the wallpaper temporarily.
"""<line_sep>command="feh --bg-fill {}".format(file_path)<line_sep>p=subprocess.Popen(command.split(' ') stdout=subprocess.PIPE)<line_sep>ret,err=p.communicate()<block_end><def_stmt>set_perm self file_path<block_start>"""
Set the wallpaper permanently.
"""<line_sep>self.set(file_path)<block_end><block_end> |
<import_from_stmt>pydantic BaseModel# pylint: disable=no-name-in-module
<import_from_stmt>...utils kubernetes<class_stmt>ServiceAccount(BaseModel)<block_start>create:bool<line_sep>name:str<line_sep>annotations:kubernetes.Annotations<block_end> |
<import_from_stmt>.report_server ReportServer<import_from_stmt>.report_client ReportClient<import_from_stmt>.record ReportRecord<import_from_stmt>.nsga_iii NonDominatedSorting SortAndSelectPopulation<line_sep> |
<import_from_future_stmt> absolute_import division print_function <line_sep>__metaclass__=type<line_sep>DOCUMENTATION='''
name: apt_keys
author: Manala (@manala)
short_description: returns a curated keys list
description:
- Takes a keys list and returns it curated.
'''<import_from_stmt>ansible.plugins.lookup LookupBase<import_from_stmt>ansible.errors AnsibleError<import_from_stmt>ansible.module_utils.six string_types<class_stmt>LookupModule(LookupBase)<block_start><def_stmt>run self terms variables=<none> **kwargs<block_start>results=[]<line_sep>keys=self._flatten(terms[0])<line_sep>keysPatterns=terms[1]<line_sep>repositories=terms[2]<line_sep>itemDefault={}<line_sep># Handle repositories defined as reversed preferences
<for_stmt>repository repositories[::-1]<block_start><if_stmt>'key'<in>repository<block_start>keys.insert(0 repository.get('key'))<block_end><block_end><for_stmt>key keys<block_start>items=[]<line_sep>item=itemDefault.copy()<line_sep># Short syntax
<if_stmt>isinstance(key string_types)<block_start>item.update(keysPatterns.get(key))<block_end><else_stmt># Must be a dict
<block_start><if_stmt><not>isinstance(key dict)<block_start><raise>AnsibleError('Expected a dict but was a %s'%type(key))<block_end># Check id key
<if_stmt>'id'<not><in>key<block_start><raise>AnsibleError('Missing "id" key')<block_end>item.update(key)<block_end>items.append(item)<line_sep># Merge by index key
<for_stmt>item items<block_start>itemFound=<false><for_stmt>i,result enumerate(results)<block_start><if_stmt>result['id']<eq>item['id']<block_start>results[i]=item<line_sep>itemFound=<true><line_sep><break><block_end><block_end><if_stmt><not>itemFound<block_start>results.append(item)<block_end><block_end><block_end><return>results<block_end><block_end> |
<import_from_stmt>textwrap dedent<import_stmt>attack_flow.graphviz<def_stmt>test_convert_attack_flow_to_dot <block_start>flow={"actions":[{"id":"action1" "name":"action-one" } {"id":"action2" "name":"action-two" } ] "assets":[{"id":"asset1"} {"id":"asset2"} ] "relationships":[{"source":"action1" "target":"asset1" } {"source":"asset1" "target":"action2" } {"source":"action2" "target":"asset2" } ] }<line_sep>output=attack_flow.graphviz.convert(flow)<assert_stmt>output<eq>dedent('''\
digraph {
node [shape=box,style="rounded,filled,fixedsize=true,width=2,height=1"]
"action1" [fillcolor=pink,label="action-one"]
"action2" [fillcolor=pink,label="action-two"]
"asset1" [fillcolor=lightblue1]
"asset2" [fillcolor=lightblue1]
}''')<block_end><def_stmt>test_convert_complex_attack_flow_to_dot <block_start>flow={"flow":{"type":"attack-flow" "id":"flow-1" "name":"Attack Flow Export" "author":"Unspecified" "created":"2022-01-14T13:59:42-05:00"} "actions":[{"id":"flow-1/action-3" "type":"action" "name":"T1133: External Remote Services" "description":"Kubernetes Dashboard" "reference":"" "succeeded":1 "confidence":1 "logic_operator_language":"" "logic_operator":"AND"} {"id":"flow-1/action-11" "type":"action" "name":"T1610: Deploy Container" "description":"Deploy cryptomining container" "reference":"" "succeeded":1 "confidence":1 "logic_operator_language":"" "logic_operator":"AND"} {"id":"flow-1/action-12" "type":"action" "name":"T1552.001: Unsecured Credentials: Credentials In Files" "description":"Harvest AWS service credentials." "reference":"" "succeeded":1 "confidence":0 "logic_operator_language":"" "logic_operator":"AND"} {"id":"flow-1/action-17" "type":"action" "name":"T1496: Resource Highjacking" "description":"Run cryptomining software" "reference":"" "succeeded":1 "confidence":1 "logic_operator_language":"" "logic_operator":"AND"} {"id":"flow-1/action-18" "type":"action" "name":"T1078.004: Valid Accounts: Cloud Accounts" "description":"Use harvested AWS credentials" "reference":"" "succeeded":1 "confidence":0 "logic_operator_language":"" "logic_operator":"AND"} {"id":"flow-1/action-23" "type":"action" "name":"T1530: Data from Cloud Storage Object" "description":"Download data from storage bucket" "reference":"" "succeeded":1 "confidence":0 "logic_operator_language":"" "logic_operator":"AND"}] "assets":[{"id":"flow-1/asset-1" "type":"asset" "state":"compromised"} {"id":"flow-1/asset-7" "type":"asset" "state":"compromised"} {"id":"flow-1/asset-9" "type":"asset" "state":"compromised"} {"id":"flow-1/asset-13" "type":"asset" "state":"compromised"} {"id":"flow-1/asset-15" "type":"asset" "state":"compromised"} {"id":"flow-1/asset-19" "type":"asset" "state":"compromised"} {"id":"flow-1/asset-21" "type":"asset" "state":"compromised"} {"id":"flow-1/asset-24" "type":"asset" "state":"compromised"}] "relationships":[{"source":"flow-1/asset-1" "type":"flow-1#state" "target":"flow-1/action-3"} {"source":"flow-1/action-3" "type":"flow-1#state-change" "target":"flow-1/asset-7"} {"source":"flow-1/action-3" "type":"flow-1#state-change" "target":"flow-1/asset-9"} {"source":"flow-1/asset-7" "type":"flow-1#state" "target":"flow-1/action-11"} {"source":"flow-1/asset-9" "type":"flow-1#state" "target":"flow-1/action-12"} {"source":"flow-1/action-11" "type":"flow-1#state-change" "target":"flow-1/asset-13"} {"source":"flow-1/action-12" "type":"flow-1#state-change" "target":"flow-1/asset-15"} {"source":"flow-1/asset-13" "type":"flow-1#state" "target":"flow-1/action-17"} {"source":"flow-1/asset-15" "type":"flow-1#state" "target":"flow-1/action-18"} {"source":"flow-1/action-17" "type":"flow-1#state-change" "target":"flow-1/asset-19"} {"source":"flow-1/action-18" "type":"flow-1#state-change" "target":"flow-1/asset-21"} {"source":"flow-1/asset-21" "type":"flow-1#state" "target":"flow-1/action-23"} {"source":"flow-1/action-23" "type":"flow-1#state-change" "target":"flow-1/asset-24"} {"source":"flow-1" "type":"flow-1#flow-edge" "target":"flow-1/action-3"} {"source":"flow-1" "type":"flow-1#flow-edge" "target":"flow-1/action-11"} {"source":"flow-1" "type":"flow-1#flow-edge" "target":"flow-1/action-12"} {"source":"flow-1" "type":"flow-1#flow-edge" "target":"flow-1/action-17"} {"source":"flow-1" "type":"flow-1#flow-edge" "target":"flow-1/action-18"} {"source":"flow-1" "type":"flow-1#flow-edge" "target":"flow-1/action-23"} {"source":"flow-1" "type":"flow-1#flow-edge" "target":"flow-1/asset-1"} {"source":"flow-1" "type":"flow-1#flow-edge" "target":"flow-1/asset-7"} {"source":"flow-1" "type":"flow-1#flow-edge" "target":"flow-1/asset-9"} {"source":"flow-1" "type":"flow-1#flow-edge" "target":"flow-1/asset-13"} {"source":"flow-1" "type":"flow-1#flow-edge" "target":"flow-1/asset-15"} {"source":"flow-1" "type":"flow-1#flow-edge" "target":"flow-1/asset-19"} {"source":"flow-1" "type":"flow-1#flow-edge" "target":"flow-1/asset-21"} {"source":"flow-1" "type":"flow-1#flow-edge" "target":"flow-1/asset-24"}] "object_properties":[] "data_properties":[{"source":"flow-1/asset-1" "type":"flow-1#description" "target":"Kubernetes Dashboard"} {"source":"flow-1/asset-1" "type":"flow-1#state" "target":"exposed"} {"source":"flow-1/asset-1" "type":"flow-1#state" "target":"unsecured"} {"source":"flow-1/asset-7" "type":"flow-1#description" "target":"Kubernetes Cluster"} {"source":"flow-1/asset-9" "type":"flow-1#description" "target":"Kubernetes Admin Priv"} {"source":"flow-1/asset-13" "type":"flow-1#description" "target":"Kubernetes Container"} {"source":"flow-1/asset-15" "type":"flow-1#description" "target":"AWS Credentials"} {"source":"flow-1/asset-19" "type":"flow-1#description" "target":"Cryptocurrency"} {"source":"flow-1/asset-21" "type":"flow-1#description" "target":"AWS Access"} {"source":"flow-1/asset-24" "type":"flow-1#description" "target":"Data"}]}<line_sep>output=attack_flow.graphviz.convert(flow)<assert_stmt>output<eq>dedent('''\
digraph {
node [shape=box,style="rounded,filled,fixedsize=true,width=2,height=1"]
"flow-1/action-3" [fillcolor=pink,label="T1133: External\\nRemote Services"]
"flow-1/action-11" [fillcolor=pink,label="T1610: Deploy\\nContainer"]
"flow-1/action-12" [fillcolor=pink,label="T1552.001: Unsecured\\nCredentials:\\nCredentials In Files"]
"flow-1/action-17" [fillcolor=pink,label="T1496: Resource\\nHighjacking"]
"flow-1/action-18" [fillcolor=pink,label="T1078.004: Valid\\nAccounts: Cloud\\nAccounts"]
"flow-1/action-23" [fillcolor=pink,label="T1530: Data from\\nCloud Storage Object"]
"flow-1/asset-1" [fillcolor=lightblue1,label="Kubernetes Dashboard"]
"flow-1/asset-7" [fillcolor=lightblue1,label="Kubernetes Cluster"]
"flow-1/asset-9" [fillcolor=lightblue1,label="Kubernetes Admin\\nPriv"]
"flow-1/asset-13" [fillcolor=lightblue1,label="Kubernetes Container"]
"flow-1/asset-15" [fillcolor=lightblue1,label="AWS Credentials"]
"flow-1/asset-19" [fillcolor=lightblue1,label="Cryptocurrency"]
"flow-1/asset-21" [fillcolor=lightblue1,label="AWS Access"]
"flow-1/asset-24" [fillcolor=lightblue1,label="Data"]
"flow-1/asset-1" -> "flow-1/action-3" [label="requires"]
"flow-1/action-3" -> "flow-1/asset-7" [label="provides"]
"flow-1/action-3" -> "flow-1/asset-9" [label="provides"]
"flow-1/asset-7" -> "flow-1/action-11" [label="requires"]
"flow-1/asset-9" -> "flow-1/action-12" [label="requires"]
"flow-1/action-11" -> "flow-1/asset-13" [label="provides"]
"flow-1/action-12" -> "flow-1/asset-15" [label="provides"]
"flow-1/asset-13" -> "flow-1/action-17" [label="requires"]
"flow-1/asset-15" -> "flow-1/action-18" [label="requires"]
"flow-1/action-17" -> "flow-1/asset-19" [label="provides"]
"flow-1/action-18" -> "flow-1/asset-21" [label="provides"]
"flow-1/asset-21" -> "flow-1/action-23" [label="requires"]
"flow-1/action-23" -> "flow-1/asset-24" [label="provides"]
"flow-1/asset-1-exposed-state" [fillcolor=lightgreen,label="exposed"]
"flow-1/asset-1-unsecured-state" [fillcolor=lightgreen,label="unsecured"]
"flow-1/asset-1-exposed-state" -> "flow-1/asset-1" [dir=none,style=dashed]
"flow-1/asset-1-unsecured-state" -> "flow-1/asset-1" [dir=none,style=dashed]
}''')<block_end># noqa: E501
<def_stmt>test_align_node_label_one_liner <block_start><assert_stmt>attack_flow.graphviz.align_node_label("one liner")<eq>"one liner"<block_end><def_stmt>test_align_node_label_multiline <block_start><assert_stmt>attack_flow.graphviz.align_node_label("multi liner label example" width=15)<eq>"multi liner\\nlabel example"<block_end><def_stmt>test_align_node_label_string_escaping <block_start><assert_stmt>attack_flow.graphviz.align_node_label("a \"tricky\" example")<eq>'a \\"tricky\\" example'<block_end> |
<import_from_stmt>.decorators.base VueDecorator<import_from_stmt>.decorators.prop Prop<import_from_stmt>.decorators.data Data<import_from_stmt>.decorators.lifecycle_hook LifecycleHook<import_from_stmt>.decorators.method Method<import_from_stmt>.decorators.render Render<import_from_stmt>.decorators.mixins Mixins<import_from_stmt>.decorators.template Template<import_from_stmt>.decorators.directive DirectiveHook<import_from_stmt>.decorators.extends Extends<import_from_stmt>.decorators.components Components<import_from_stmt>.decorators.state State<import_from_stmt>.decorators.plugin Plugin<import_from_stmt>.decorators.routes Routes<def_stmt>merge_templates sub<block_start><def_stmt>get_template_slots cls<block_start>template_slots=getattr(cls "template_slots" {})<if_stmt>isinstance(template_slots str)<block_start>template_slots={"default":template_slots}<block_end><return>template_slots<block_end>base=sub.__base__<line_sep>template_merging=hasattr(base "template")<and>getattr(sub "template_slots" <false>)<if_stmt>template_merging<block_start>base_template=merge_templates(base)<line_sep>base_slots=get_template_slots(base)<line_sep>sub_slots=get_template_slots(sub)<line_sep>slots=dict(tuple(base_slots.items())+tuple(sub_slots.items()))<line_sep>default=slots.get("default")<line_sep><return>base_template.format(default **slots)<block_end><return>getattr(sub "template" "{}")<block_end><class_stmt>BrythonObjectWorkarounds(type)<block_start>"""
Fixes the following Brython bugs:
* https://github.com/brython-dev/brython/issues/904
"""<line_sep>@property<def_stmt>__base__ cls<block_start><return>cls.__bases__[0]<block_end><block_end><class_stmt>Wrapper(metaclass=BrythonObjectWorkarounds)<block_start><pass><block_end><class_stmt>AttributeDictFactory<block_start>@classmethod<def_stmt>get_item cls wrapper<block_start><if_stmt>isinstance(wrapper BrythonObjectWorkarounds)<block_start><return>cls(wrapper).generate_item()<block_end><return>wrapper<block_end>@classmethod<def_stmt>get_wrapper_base cls wrapper<block_start>base=wrapper.__base__<if_stmt>base<is>Wrapper<block_start><return>wrapper<block_end><return>cls.get_wrapper_base(base)<block_end><def_stmt>__init__ self wrapper<block_start>self.wrapper=wrapper<line_sep>self.base=self.get_wrapper_base(wrapper)<block_end><def_stmt>__attributes__ self<block_start>all_objects=set(dir(self.wrapper))<line_sep>all_objects.update(getattr(self.wrapper "__annotations__" {}).keys())<line_sep>own_objects=all_objects-set(dir(self.base))-{"__annotations__"}<for_stmt>obj_name own_objects<block_start><yield>obj_name getattr(self.wrapper obj_name <none>)<block_end><block_end><def_stmt>auto_decorate self obj_name obj<block_start><return>obj<block_end><def_stmt>generate_item self<block_start>object_map={}<for_stmt>obj_name,obj self.__attributes__()<block_start>obj=self.auto_decorate(obj_name obj)<if_stmt>isinstance(obj VueDecorator)<block_start>obj.update(object_map)<block_end><block_end><return>object_map<block_end><block_end><class_stmt>VueComponentFactory(AttributeDictFactory)<block_start><def_stmt>_property_mixin self prop_name<block_start><if_stmt>prop_name<not><in>dir(self.wrapper)<block_start><return>{"required":<true>}<block_end><else_stmt><block_start><return>{"default":getattr(self.wrapper prop_name)}<block_end><block_end><def_stmt>auto_decorate self obj_name obj<block_start><if_stmt>obj_name<in>LifecycleHook.mapping<block_start>obj=LifecycleHook(obj_name obj)<block_end><elif_stmt>obj_name<eq>"template"<block_start>obj=Template(merge_templates(self.wrapper))<block_end><elif_stmt>obj_name<eq>"extends"<block_start><if_stmt>obj<block_start>extends=self.wrapper.__base__<if>isinstance(obj bool)<else>obj<line_sep>obj=Extends(VueComponentFactory.get_item(extends))<block_end><block_end><elif_stmt>obj_name<eq>"mixins"<block_start>obj=Mixins(*(VueComponentFactory.get_item(m)<for>m obj))<block_end><elif_stmt>obj_name<eq>"components"<block_start>obj=Components(*(VueComponentFactory.get_item(m)<for>m obj))<block_end><elif_stmt>obj_name<eq>"render"<block_start>obj=Render(obj)<block_end><elif_stmt>callable(obj)<block_start>obj=Method(obj)<block_end><elif_stmt>obj_name<in>getattr(self.wrapper "__annotations__" {})<block_start>obj=Prop(obj_name self.wrapper.__annotations__[obj_name] self._property_mixin(obj_name) )<block_end><elif_stmt><not>isinstance(obj VueDecorator)<block_start>obj=Data(obj_name obj)<block_end><return>super().auto_decorate(obj_name obj)<block_end><def_stmt>generate_item self<block_start>init_dict=super().generate_item()<line_sep>_data=init_dict.get("data" <none>)<if_stmt><not>_data<block_start><return>init_dict<block_end><def_stmt>get_initialized_data this<block_start>initialized_data={}<for_stmt>name,date _data.items()<block_start>initialized_data[name]=date(this)<if>callable(date)<else>date<block_end><return>initialized_data<block_end>init_dict.update(data=get_initialized_data)<line_sep><return>init_dict<block_end><block_end><class_stmt>VueDirectiveFactory(AttributeDictFactory)<block_start><def_stmt>auto_decorate self obj_name obj<block_start><if_stmt>callable(obj)<block_start>obj=DirectiveHook(obj hooks=(obj_name ) name=self.wrapper.name)<block_end><return>super().auto_decorate(obj_name obj)<block_end>@classmethod<def_stmt>get_item cls wrapper<block_start>default={wrapper.name:{}}<line_sep>dct=super().get_item(wrapper)<line_sep><return>dct.get("directives" default).popitem()[1]<block_end><block_end><class_stmt>VueStoreFactory(AttributeDictFactory)<block_start><def_stmt>auto_decorate self obj_name obj<block_start><if_stmt>obj_name<eq>"plugins"<block_start>obj=Plugin(obj)<block_end><elif_stmt><not>isinstance(obj VueDecorator)<block_start>obj=State(obj_name obj)<block_end><return>super().auto_decorate(obj_name obj)<block_end><block_end><class_stmt>VueRouterFactory(AttributeDictFactory)<block_start><def_stmt>auto_decorate self obj_name obj<block_start><if_stmt>obj_name<eq>"routes"<block_start>obj=Routes(obj)<block_end><return>super().auto_decorate(obj_name obj)<block_end><block_end> |
"""
Ease-of-use context-manager classes & functions.
There isn't much (or any) additional functionality provided in this module,
most things are nicer-packaged combinations to already available methods from
`pytermgui.ansi_interface`.
"""<import_from_future_stmt> annotations<import_from_stmt>os name<import_from_stmt>contextlib contextmanager<import_from_stmt>typing Callable Generator Any Union List<import_from_stmt>.ansi_interface is_interactive save_cursor restore_cursor print_to show_cursor hide_cursor set_echo unset_echo set_alt_buffer unset_alt_buffer cursor_up report_mouse translate_mouse MouseEvent <line_sep># TODO: Move this absolute beast to a types submodule
MouseTranslator=Callable[[str] Union[List[Union[MouseEvent <none>]] <none>]]<line_sep>@contextmanager<def_stmt>cursor_at pos:tuple[int int]<arrow>Generator[Callable[<ellipsis> <none>] <none> <none>]<block_start>"""Get callable to print at `pos`, incrementing `y` on every print"""<line_sep>offset=0<line_sep>posx,posy=pos<def_stmt>printer *args:tuple[Any <ellipsis>]<arrow><none><block_start>"""Print to posx, current y"""<line_sep><nonlocal>offset<line_sep>print_to((posx posy+offset) *args)<line_sep>offset<augadd>1<block_end><try_stmt><block_start>save_cursor()<line_sep><yield>printer<block_end><finally_stmt><block_start>restore_cursor()<block_end><block_end>@contextmanager<def_stmt>alt_buffer echo:bool=<false> cursor:bool=<true><arrow>Generator[<none> <none> <none>]<block_start>"""Create non-scrollable alt-buffer
This is useful for retrieving original terminal state after program end."""<try_stmt><block_start>set_alt_buffer()<if_stmt><not>echo<and>name<eq>"posix"<and><not>is_interactive()<block_start>unset_echo()<block_end><if_stmt><not>cursor<block_start>hide_cursor()<block_end><yield><block_end><finally_stmt><block_start>unset_alt_buffer()<if_stmt><not>echo<and>name<eq>"posix"<and><not>is_interactive()<block_start>set_echo()<line_sep>cursor_up()<block_end><if_stmt><not>cursor<block_start>show_cursor()<line_sep>cursor_up()<block_end><block_end><block_end>@contextmanager<def_stmt>mouse_handler events:list[str] method:str="decimal_xterm"<arrow>Generator[MouseTranslator|<none> <none> <none>]<block_start>"""Return a mouse handler function
Note: This method only supports `decimal_urxvt` and `decimal_xterm`, as they are the most
universal.
See `help(report_mouse)` for help about all of the methods.
Example use:
```python3
import pytermgui as ptg
with ptg.mouse_handler(["press", "hover"]) as mouse:
while True:
event = mouse(ptg.getch())
print(type(event))
print(event.action)
print(event.position)
'pytermgui.ansi_interface.MouseEvent'
'pytermgui.ansi_interface.MouseAction.LEFT_CLICK'
(33, 55)
```
"""<line_sep>event=<none><try_stmt><block_start><for_stmt>event events<block_start>report_mouse(event method=method)<block_end><yield><lambda>code:translate_mouse(code method=method)<block_end><finally_stmt><block_start><if_stmt>event<is><not><none><block_start>report_mouse(event method=method stop=<true>)<block_end><block_end><block_end> |
<import_stmt>pytest<import_from_stmt>pywhat Distribution Filter pywhat_tags<import_from_stmt>pywhat.helper CaseInsensitiveSet InvalidTag load_regexes<line_sep>regexes=load_regexes()<line_sep>@pytest.mark.skip("Dist.get_regexes() returns the regex list with the default filter of 0.1:1. \
load_regexes() returns all regex without that filter. \
This fails because one of them is filtered and the other is not.")<def_stmt>test_distribution <block_start>dist=Distribution()<assert_stmt>regexes<eq>dist.get_regexes()<block_end><def_stmt>test_distribution2 <block_start>filter={"MinRarity":0.3 "MaxRarity":0.8 "Tags":["Networking"] "ExcludeTags":["Identifiers"] }<line_sep>dist=Distribution(filter)<for_stmt>regex regexes<block_start><if_stmt>(0.3<le>regex["Rarity"]<le>0.8<and>"Networking"<in>regex["Tags"]<and>"Identifiers"<not><in>regex["Tags"])<block_start><assert_stmt>regex<in>dist.get_regexes()<block_end><block_end><block_end><def_stmt>test_distribution3 <block_start>filter1={"MinRarity":0.3 "Tags":["Networking"] "ExcludeTags":["Identifiers"]}<line_sep>filter2={"MinRarity":0.4 "MaxRarity":0.8 "ExcludeTags":["Media"]}<line_sep>dist=Distribution(filter1)&Distribution(filter2)<assert_stmt>dist._dict["MinRarity"]<eq>0.4<assert_stmt>dist._dict["MaxRarity"]<eq>0.8<assert_stmt>dist._dict["Tags"]<eq>CaseInsensitiveSet(["Networking"])<assert_stmt>dist._dict["ExcludeTags"]<eq>CaseInsensitiveSet()<for_stmt>regex regexes<block_start><if_stmt>0.4<le>regex["Rarity"]<le>0.8<and>"Networking"<in>regex["Tags"]<block_start><assert_stmt>regex<in>dist.get_regexes()<block_end><block_end><block_end><def_stmt>test_distribution4 <block_start>filter1={"MinRarity":0.3 "Tags":["Networking"] "ExcludeTags":["Identifiers"]}<line_sep>filter2={"MinRarity":0.4 "MaxRarity":0.8 "ExcludeTags":["Media"]}<line_sep>dist=Distribution(filter2)<line_sep>dist<augand>Distribution(filter1)<assert_stmt>dist._dict["MinRarity"]<eq>0.4<assert_stmt>dist._dict["MaxRarity"]<eq>0.8<assert_stmt>dist._dict["Tags"]<eq>CaseInsensitiveSet(["Networking"])<assert_stmt>dist._dict["ExcludeTags"]<eq>CaseInsensitiveSet()<for_stmt>regex regexes<block_start><if_stmt>0.4<le>regex["Rarity"]<le>0.8<and>"Networking"<in>regex["Tags"]<block_start><assert_stmt>regex<in>dist.get_regexes()<block_end><block_end><block_end><def_stmt>test_distribution5 <block_start>filter1={"MinRarity":0.3 "Tags":["Networking"] "ExcludeTags":["Identifiers"]}<line_sep>filter2={"MinRarity":0.4 "MaxRarity":0.8 "ExcludeTags":["Media"]}<line_sep>dist=Distribution(filter1)|Distribution(filter2)<assert_stmt>dist._dict["MinRarity"]<eq>0.3<assert_stmt>dist._dict["MaxRarity"]<eq>1<assert_stmt>dist._dict["Tags"]<eq>CaseInsensitiveSet(pywhat_tags)<assert_stmt>dist._dict["ExcludeTags"]<eq>CaseInsensitiveSet(["Identifiers" "Media"])<for_stmt>regex regexes<block_start><if_stmt>(0.3<le>regex["Rarity"]<le>1<and>"Identifiers"<not><in>regex["Tags"]<and>"Media"<not><in>regex["Tags"])<block_start><assert_stmt>regex<in>dist.get_regexes()<block_end><block_end><block_end><def_stmt>test_distribution6 <block_start>filter1={"MinRarity":0.3 "Tags":["Networking"] "ExcludeTags":["Identifiers"]}<line_sep>filter2={"MinRarity":0.4 "MaxRarity":0.8 "ExcludeTags":["Media"]}<line_sep>dist=Distribution(filter2)<line_sep>dist<augor>Distribution(filter1)<assert_stmt>dist._dict["MinRarity"]<eq>0.3<assert_stmt>dist._dict["MaxRarity"]<eq>1<assert_stmt>dist._dict["Tags"]<eq>CaseInsensitiveSet(pywhat_tags)<assert_stmt>dist._dict["ExcludeTags"]<eq>CaseInsensitiveSet(["Identifiers" "Media"])<for_stmt>regex regexes<block_start><if_stmt>(0.3<le>regex["Rarity"]<le>1<and>"Identifiers"<not><in>regex["Tags"]<and>"Media"<not><in>regex["Tags"])<block_start><assert_stmt>regex<in>dist.get_regexes()<block_end><block_end><block_end><def_stmt>test_distribution7 <block_start><with_stmt>pytest.raises(InvalidTag)<block_start>Distribution({"Tags":"Media" "MinRarity":0.7})<block_end><block_end><def_stmt>test_filter <block_start>filter={"MinRarity":0.3 "MaxRarity":0.8 "Tags":["Networking"] "ExcludeTags":["Identifiers"] }<line_sep>filt=Filter(filter)<assert_stmt>filt["MinRarity"]<eq>0.3<assert_stmt>filt["MaxRarity"]<eq>0.8<assert_stmt>filt["Tags"]<eq>CaseInsensitiveSet(["networking"])<assert_stmt>filt["ExcludeTags"]<eq>CaseInsensitiveSet(["identifiers"])<block_end><def_stmt>test_filter2 <block_start>filter1={"MinRarity":0.3 "MaxRarity":0.8 "Tags":["Networking"] "ExcludeTags":["Identifiers"] }<line_sep>filter2={"MinRarity":0.5 "Tags":["Networking" "Identifiers"]}<line_sep>filt=Filter(filter1)&Filter(filter2)<assert_stmt>filt["MinRarity"]<eq>0.5<assert_stmt>filt["MaxRarity"]<eq>0.8<assert_stmt>filt["Tags"]<eq>CaseInsensitiveSet(["networking"])<assert_stmt>filt["ExcludeTags"]<eq>CaseInsensitiveSet([])<block_end><def_stmt>test_filter3 <block_start>filter={"MinRarity":0.3 "MaxRarity":0.8 "Tags":["Networking"] "ExcludeTags":["Identifiers"] }<line_sep>filt=Filter(filter)<line_sep>dist=Distribution(filt)<for_stmt>regex regexes<block_start><if_stmt>(0.3<le>regex["Rarity"]<le>0.8<and>"Networking"<in>regex["Tags"]<and>"Identifiers"<not><in>regex["Tags"])<block_start><assert_stmt>regex<in>dist.get_regexes()<block_end><block_end><block_end> |
# ====================================================================
# Copyright (c) 2004-2010 Open Source Applications Foundation.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ====================================================================
#
<import_from_stmt>icu UnicodeString BreakIterator Locale<def_stmt>printTextRange iterator start end<block_start>s=iterator.getText().getText()<line_sep>print("%2d %2d %s|%s|%s"%(start end s[:start] s[start:end] s[end:]))<block_end>#def printTextRange(iterator, start, end):
#
# u = iterator.getText().getText(UnicodeString())
# print "%2d %2d %s|%s|%s" %(start, end,
# UnicodeString(u, 0, start),
# UnicodeString(u, start, end-start),
# UnicodeString(u, end))
<def_stmt>printEachForward boundary<block_start>start=boundary.first()<for_stmt>end boundary<block_start>printTextRange(boundary start end)<line_sep>start=end<block_end><block_end># Print each element in reverse order:
<def_stmt>printEachBackward boundary<block_start>end=boundary.last()<while_stmt><true><block_start>start=boundary.previous()<if_stmt>start<eq>BreakIterator.DONE<block_start><break><block_end>printTextRange(boundary start end)<line_sep>end=start<block_end><block_end># Print the first element
<def_stmt>printFirst boundary<block_start>start=boundary.first()<line_sep>end=boundary.next()<line_sep>printTextRange(boundary start end)<block_end># Print the last element
<def_stmt>printLast boundary<block_start>end=boundary.last()<line_sep>start=boundary.previous()<if_stmt>start<ne>BreakIterator.DONE<block_start>printTextRange(boundary start end)<block_end><block_end># Print the element at a specified position
<def_stmt>printAt boundary pos<block_start>end=boundary.following(pos)<line_sep>start=boundary.previous()<line_sep>printTextRange(boundary start end)<block_end><def_stmt>main <block_start>print("ICU Break Iterator Sample Program")<line_sep>print("C++ Break Iteration in Python")<line_sep>stringToExamine=u"Aaa bbb ccc. Ddd eee fff."<line_sep>print("Examining: " stringToExamine)<line_sep># print each sentence in forward and reverse order
boundary=BreakIterator.createSentenceInstance(Locale.getUS())<line_sep>boundary.setText(stringToExamine)<line_sep>print()<line_sep>print("Sentence Boundaries... ")<line_sep>print("----- forward: -----------")<line_sep>printEachForward(boundary)<line_sep>print("----- backward: ----------")<line_sep>printEachBackward(boundary)<line_sep># print each word in order
print()<line_sep>print("Word Boundaries...")<line_sep>boundary=BreakIterator.createWordInstance(Locale.getUS())<line_sep>boundary.setText(stringToExamine)<line_sep>print("----- forward: -----------")<line_sep>printEachForward(boundary)<line_sep># print first element
print("----- first: -------------")<line_sep>printFirst(boundary)<line_sep># print last element
print("----- last: --------------")<line_sep>printLast(boundary)<line_sep># print word at charpos 10
print("----- at pos 10: ---------")<line_sep>printAt(boundary 10)<line_sep>print()<line_sep>print("End C++ Break Iteration in Python")<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
<import_from_stmt>functools partial<import_from_stmt>fedot.core.composer.advisor PipelineChangeAdvisor<import_from_stmt>fedot.core.composer.gp_composer.gp_composer GPComposerRequirements<import_from_stmt>fedot.core.debug.metrics RandomMetric<import_from_stmt>fedot.core.optimisers.adapters PipelineAdapter<import_from_stmt>fedot.core.optimisers.gp_comp.gp_operators random_graph<import_from_stmt>fedot.core.optimisers.gp_comp.gp_optimiser GraphGenerationParams<import_from_stmt>fedot.core.optimisers.gp_comp.individual Individual<import_from_stmt>fedot.core.optimisers.gp_comp.operators.selection SelectionTypesEnum individuals_selection random_selection selection tournament_selection <def_stmt>rand_population_gener_and_eval pop_size=4<block_start>models_set=['knn' 'logit' 'rf']<line_sep>requirements=GPComposerRequirements(primary=models_set secondary=models_set max_depth=1)<line_sep>pipeline_gener_params=GraphGenerationParams(advisor=PipelineChangeAdvisor() adapter=PipelineAdapter())<line_sep>random_pipeline_function=partial(random_graph params=pipeline_gener_params requirements=requirements)<line_sep>population=[]<while_stmt>len(population)<ne>pop_size# to ensure uniqueness
<block_start>ind=Individual(random_pipeline_function())<if_stmt>ind<not><in>population<block_start>population.append(ind)<block_end><block_end># evaluation
<for_stmt>ind population<block_start>ind.fitness=obj_function()<block_end><return>population<block_end><def_stmt>obj_function <arrow>float<block_start>metric_function=RandomMetric.get_value<line_sep><return>metric_function()<block_end><def_stmt>test_tournament_selection <block_start>num_of_inds=2<line_sep>population=rand_population_gener_and_eval(pop_size=4)<line_sep>selected_individuals=tournament_selection(individuals=population pop_size=num_of_inds)<assert_stmt>(all([ind<in>population<for>ind selected_individuals])<and>len(selected_individuals)<eq>num_of_inds)<block_end><def_stmt>test_random_selection <block_start>num_of_inds=2<line_sep>population=rand_population_gener_and_eval(pop_size=4)<line_sep>selected_individuals=random_selection(individuals=population pop_size=num_of_inds)<assert_stmt>(all([ind<in>population<for>ind selected_individuals])<and>len(selected_individuals)<eq>num_of_inds)<block_end><def_stmt>test_selection <block_start>num_of_inds=2<line_sep>population=rand_population_gener_and_eval(pop_size=4)<line_sep>graph_params=GraphGenerationParams(advisor=PipelineChangeAdvisor() adapter=PipelineAdapter())<line_sep>selected_individuals=selection(types=[SelectionTypesEnum.tournament] population=population pop_size=num_of_inds params=graph_params)<assert_stmt>(all([ind<in>population<for>ind selected_individuals])<and>len(selected_individuals)<eq>num_of_inds)<block_end><def_stmt>test_individuals_selection_random_individuals <block_start>num_of_inds=2<line_sep>population=rand_population_gener_and_eval(pop_size=4)<line_sep>types=[SelectionTypesEnum.tournament]<line_sep>graph_params=GraphGenerationParams(advisor=PipelineChangeAdvisor() adapter=PipelineAdapter())<line_sep>selected_individuals=individuals_selection(types=types individuals=population pop_size=num_of_inds graph_params=graph_params)<line_sep>selected_individuals_ref=[str(ind)<for>ind selected_individuals]<assert_stmt>(len(set(selected_individuals_ref))<eq>len(selected_individuals)<and>len(selected_individuals)<eq>num_of_inds)<block_end><def_stmt>test_individuals_selection_equality_individuals <block_start>num_of_inds=4<line_sep>population=rand_population_gener_and_eval(pop_size=1)<line_sep>types=[SelectionTypesEnum.tournament]<line_sep>population=[population[0]<for>_ range(4)]<line_sep>graph_params=GraphGenerationParams(advisor=PipelineChangeAdvisor() adapter=PipelineAdapter())<line_sep>selected_individuals=individuals_selection(types=types individuals=population pop_size=num_of_inds graph_params=graph_params)<line_sep>selected_individuals_ref=[str(ind)<for>ind selected_individuals]<assert_stmt>(len(selected_individuals)<eq>num_of_inds<and>len(set(selected_individuals_ref))<eq>1)<block_end> |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
<import_stmt>asyncio<import_stmt>threading<import_stmt>time<import_from_stmt>._repeated_timer AtomicCounter<import_from_stmt>._perf_stress_base _PerfTestBase<class_stmt>EventPerfTest(_PerfTestBase)<block_start><def_stmt>__init__ self arguments<block_start>super().__init__(arguments)<if_stmt>self.args.profile<block_start><raise>NotImplementedError("Profiler support for event tests pending.")<block_end><if_stmt>self.args.sync<block_start>self._condition=threading.Condition()<block_end><else_stmt><block_start>self._condition=asyncio.Condition()<block_end>self._start_time=time.time()<line_sep>self._error=<none><line_sep>self._processing=<none><line_sep>self._completed_operations=AtomicCounter()<block_end>@property<def_stmt>completed_operations self<arrow>int<block_start>"""
Total number of operations completed by run_all().
Reset after warmup.
"""<line_sep><return>self._completed_operations.value()<block_end>@property<def_stmt>last_completion_time self<arrow>float<block_start>"""
Elapsed time between start of warmup/run and last completed operation.
Reset after warmup.
"""<line_sep><return>self._last_completion_time-self._start_time<block_end><def_stmt>event_raised_sync self<block_start>self._completed_operations.increment()<line_sep>self._last_completion_time=time.time()<block_end><def_stmt>error_raised_sync self error<block_start><with_stmt>self._condition<block_start>self._error=error<line_sep>self._condition.notify_all()<block_end><block_end><async_keyword><def_stmt>event_raised_async self<block_start>self._completed_operations.increment()<line_sep>self._last_completion_time=time.time()<block_end><async_keyword><def_stmt>error_raised_async self error<block_start><async_keyword><with_stmt>self._condition<block_start>self._error=error<line_sep>self._condition.notify_all()<block_end><block_end><async_keyword><def_stmt>setup self<arrow><none><block_start>"""
Setup called once per parallel test instance.
Used to setup state specific to this test instance.
"""<if_stmt>self.args.sync<block_start>self._processing=threading.Thread(target=self.start_events_sync)<line_sep>self._processing.daemon=<true><line_sep>self._processing.start()<block_end><else_stmt><block_start>self._processing=asyncio.ensure_future(self.start_events_async())<block_end><block_end><async_keyword><def_stmt>cleanup self<arrow><none><block_start>"""
Cleanup called once per parallel test instance.
Used to cleanup state specific to this test instance.
"""<if_stmt>self.args.sync<block_start>self.stop_events_sync()<line_sep>self._processing.join()<block_end><else_stmt><block_start><await>self.stop_events_async()<line_sep><await>self._processing<block_end><try_stmt><block_start><raise>self._error<block_end><except_stmt>TypeError<block_start><pass><block_end><block_end><def_stmt>run_all_sync self duration:int<arrow><none><block_start>"""
Run all sync tests, including both warmup and duration.
"""<with_stmt>self._condition<block_start>self._completed_operations.reset()<line_sep>self._last_completion_time=0.0<line_sep>self._start_time=time.time()<line_sep>self._condition.wait(timeout=duration)<block_end><block_end><async_keyword><def_stmt>run_all_async self duration:int<arrow><none><block_start>"""
Run all async tests, including both warmup and duration.
"""<async_keyword><with_stmt>self._condition<block_start>self._completed_operations.reset()<line_sep>self._last_completion_time=0.0<line_sep>self._start_time=time.time()<try_stmt><block_start><await>asyncio.wait_for(self._condition.wait() timeout=duration)<block_end><except_stmt>asyncio.TimeoutError<block_start><pass><block_end><block_end><block_end><def_stmt>start_events_sync self<arrow><none><block_start>"""
Start the process for receiving events.
"""<line_sep><raise>NotImplementedError("start_events_sync must be implemented for {}".format(self.__class__.__name__))<block_end><def_stmt>stop_events_sync self<arrow><none><block_start>"""
Stop the process for receiving events.
"""<line_sep><raise>NotImplementedError("stop_events_sync must be implemented for {}".format(self.__class__.__name__))<block_end><async_keyword><def_stmt>start_events_async self<arrow><none><block_start>"""
Start the process for receiving events.
"""<line_sep><raise>NotImplementedError("start_events_async must be implemented for {}".format(self.__class__.__name__))<block_end><async_keyword><def_stmt>stop_events_async self<arrow><none><block_start>"""
Stop the process for receiving events.
"""<line_sep><raise>NotImplementedError("stop_events_async must be implemented for {}".format(self.__class__.__name__))<block_end><block_end> |
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is a class for Numpy2Tensor."""<import_stmt>torch<import_stmt>numpy<as>np<import_from_stmt>zeus.common ClassFactory ClassType<line_sep>@ClassFactory.register(ClassType.TRANSFORM)<class_stmt>Numpy2Tensor(object)<block_start>"""Transform a numpy to tensor."""<def_stmt>__call__ self *args<block_start>"""Call function of Numpy2Tensor."""<if_stmt>len(args)<eq>1<block_start><return>torch.from_numpy(args[0])<block_end><else_stmt><block_start><return>tuple([torch.from_numpy(np.array(array))<for>array args])<block_end><block_end><block_end> |
"""
Copyright 2015, Cisco Systems, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: <NAME>, Cisco Systems, Inc.
"""<import_stmt>os<import_stmt>glob<import_stmt>lxml.etree<as>ET<import_stmt>logging<import_from_stmt>explorer.models Collection<as>Col<import_from_stmt>explorer.models User<import_from_stmt>explorer.utils.adapter Adapter<class_stmt>Collection(object)<block_start>""" This class implements utility routines to work with
collections """<line_sep>@staticmethod<def_stmt>add metadata payload<block_start>""" Add a collection entry """<if_stmt>metadata<in>[<none> '']<block_start>logging.error('Invalid metadata')<line_sep><return><false><block_end><if_stmt>payload<in>[<none> '' 'null']<block_start>logging.error('Invalid payload')<line_sep><return><false><block_end>metadata=ET.fromstring(metadata)<line_sep>payload=ET.fromstring(payload)<line_sep>logging.debug(ET.tostring(metadata))<line_sep>logging.debug(ET.tostring(payload))<line_sep>cname=metadata.find('collection').text<line_sep>author=metadata.find('author').text<line_sep>name=metadata.find('name').text<if_stmt><not>Col.objects.filter(name=cname).exists()<block_start><if_stmt><not>User.objects.filter(username=author).exists()<block_start>logging.error('User %s does not exist !!'%author)<line_sep><return><false><block_end>user=User.objects.filter(username=author)<line_sep>obj=Col(name=cname user=user[0])<line_sep>obj.save()<line_sep>logging.debug('Created new collection '+cname)<block_end>path=os.path.join('data' 'collections' cname)<if_stmt><not>os.path.exists(path)<block_start>logging.error('Path to collection does not exist : %s !!'%path)<line_sep><return><false><block_end><for_stmt>child payload<block_start><if_stmt>child.tag<eq>'metadata'<block_start><for_stmt>elem metadata<block_start>child.append(elem)<block_end><block_end><block_end>cfile=os.path.join(path name+'.xml')<with_stmt>open(cfile 'w')<as>f<block_start>f.write(ET.tostring(payload))<block_end>logging.debug('%s was saved successfully in collection %s'%(name cname))<line_sep><return><true><block_end>@staticmethod<def_stmt>remove metadata<block_start>""" Remove a entry from collection """<if_stmt>metadata<is><none><or>metadata<eq>''<block_start>logging.error('Invalid metadata')<line_sep><return><false><block_end>metadata=ET.fromstring(metadata)<line_sep>cname=metadata.find('collection').text<line_sep>name=metadata.find('name').text<if_stmt>name<is><none><or><not>name<block_start>logging.error('Invalid entry %s in argument!!'%name)<line_sep><return><false><block_end><if_stmt><not>Col.objects.filter(name=cname).exists()<block_start>logging.debug('Collection %s does not exists !!'%cname)<line_sep><return><true><block_end>path=os.path.join('data' 'collections' cname name+'.xml')<if_stmt><not>os.path.exists(path)<block_start>logging.debug('Path to collection does not exist : %s !!'%path)<line_sep><return><true><block_end>os.remove(path)<line_sep>logging.debug('%s was successfully removed from collection %s'%(name cname))<line_sep><return><true><block_end>@staticmethod<def_stmt>list <block_start>""" get list of all collection entries """<line_sep>cols_elem=ET.Element('collections')<for_stmt>col Col.objects.all()<block_start>path=os.path.join('data' 'collections' col.name)<if_stmt><not>os.path.exists(path)<block_start>logging.error('Collection has inconstancy : %s !!'%col.name)<line_sep><continue><block_end>files=glob.glob(os.path.join(path '*'))<for_stmt>_file files<block_start>payload=ET.parse(_file)<for_stmt>child payload.getroot()<block_start><if_stmt>child.tag<eq>'metadata'<block_start>cols_elem.append(child)<block_end><block_end><block_end><block_end><return>cols_elem<block_end>@staticmethod<def_stmt>load username metadata<block_start>""" Load a collection entry """<if_stmt>metadata<is><none><or>metadata<eq>''<block_start>logging.error('Invalid metadata')<line_sep><return><none><block_end>metadata=ET.fromstring(metadata)<line_sep>cname=metadata.find('collection').text<line_sep>name=metadata.find('name').text<if_stmt><not>Col.objects.filter(name=cname).exists()<block_start>logging.debug('Collection %s does not exists !!'%cname)<line_sep><return><none><block_end>_file=os.path.join('data' 'collections' cname name+'.xml')<if_stmt><not>os.path.exists(_file)<block_start>logging.error('Collection entry not found')<line_sep><return><none><block_end>data=<none><with_stmt>open(_file 'r')<as>f<block_start>data=f.read()<line_sep>data=data.replace('>' '>')<line_sep>data=data.replace('<' '<')<line_sep>payload=ET.fromstring(data)<block_end><if_stmt>data<is><none><block_start>logging.error('Collection entry is empty')<line_sep><return><none><block_end>fmt=payload.get('format' 'raw')<if_stmt>fmt<eq>'xpath'<block_start><return>Adapter.gen_rpc(username data)<block_end><return>payload<block_end><block_end> |
expected_output={'pvst':{'a':{'pvst_id':'a' 'vlans':{2:{'vlan_id':2 'designated_root_priority':32768 'designated_root_address':'0021.1bff.d973' 'designated_root_max_age':20 'designated_root_forward_delay':15 'bridge_priority':32768 'sys_id_ext':0 'bridge_address':'8cb6.4fff.6588' 'bridge_max_age':20 'bridge_forward_delay':15 'bridge_transmit_hold_count':6 'interface':{'GigabitEthernet0/7/0/0':{'name':'GigabitEthernet0/7/0/0' 'cost':20000 'role':'DSGN' 'port_priority':128 'port_num':1 'port_state':'FWD' 'designated_bridge_priority':32768 'designated_bridge_address':'8cb6.4fff.6588' 'designated_port_priority':128 'designated_port_num':1 } 'GigabitEthernet0/7/0/1':{'name':'GigabitEthernet0/7/0/1' 'cost':20000 'role':'DSGN' 'port_priority':128 'port_num':2 'port_state':'FWD' 'designated_bridge_priority':32768 'designated_bridge_address':'8cb6.4fff.6588' 'designated_port_priority':128 'designated_port_num':2 } 'GigabitEthernet0/7/0/10':{'name':'GigabitEthernet0/7/0/10' 'cost':20000 'role':'ROOT' 'port_priority':128 'port_num':3 'port_state':'FWD' 'designated_bridge_priority':32768 'designated_bridge_address':'0021.1bff.d973' 'designated_port_priority':128 'designated_port_num':3 } 'GigabitEthernet0/7/0/11':{'name':'GigabitEthernet0/7/0/11' 'cost':20000 'role':'ALT' 'port_priority':128 'port_num':4 'port_state':'BLK' 'designated_bridge_priority':32768 'designated_bridge_address':'0021.1bff.d973' 'designated_port_priority':128 'designated_port_num':4 } } } 3:{'vlan_id':3 'designated_root_priority':32768 'designated_root_address':'0021.1bff.d973' 'designated_root_max_age':20 'designated_root_forward_delay':15 'bridge_priority':32768 'sys_id_ext':0 'bridge_address':'8cb6.4fff.6588' 'bridge_max_age':20 'bridge_forward_delay':15 'bridge_transmit_hold_count':6 'interface':{'GigabitEthernet0/7/0/0':{'name':'GigabitEthernet0/7/0/0' 'cost':20000 'role':'DSGN' 'port_priority':128 'port_num':1 'port_state':'FWD' 'designated_bridge_priority':32768 'designated_bridge_address':'8cb6.4fff.6588' 'designated_port_priority':128 'designated_port_num':1 } 'GigabitEthernet0/7/0/1':{'name':'GigabitEthernet0/7/0/1' 'cost':20000 'role':'DSGN' 'port_priority':128 'port_num':2 'port_state':'FWD' 'designated_bridge_priority':32768 'designated_bridge_address':'8cb6.4fff.6588' 'designated_port_priority':128 'designated_port_num':2 } 'GigabitEthernet0/7/0/10':{'name':'GigabitEthernet0/7/0/10' 'cost':20000 'role':'ROOT' 'port_priority':128 'port_num':3 'port_state':'FWD' 'designated_bridge_priority':32768 'designated_bridge_address':'0021.1bff.d973' 'designated_port_priority':128 'designated_port_num':3 } 'GigabitEthernet0/7/0/11':{'name':'GigabitEthernet0/7/0/11' 'cost':20000 'role':'ALT' 'port_priority':128 'port_num':4 'port_state':'BLK' 'designated_bridge_priority':32768 'designated_bridge_address':'0021.1bff.d973' 'designated_port_priority':128 'designated_port_num':4 } } } 4:{'vlan_id':4 'designated_root_priority':32768 'designated_root_address':'0021.1bff.d973' 'designated_root_max_age':20 'designated_root_forward_delay':15 'bridge_priority':32768 'sys_id_ext':0 'bridge_address':'8cb6.4fff.6588' 'bridge_max_age':20 'bridge_forward_delay':15 'bridge_transmit_hold_count':6 'interface':{'GigabitEthernet0/7/0/0':{'name':'GigabitEthernet0/7/0/0' 'cost':20000 'role':'DSGN' 'port_priority':128 'port_num':1 'port_state':'FWD' 'designated_bridge_priority':32768 'designated_bridge_address':'8cb6.4fff.6588' 'designated_port_priority':128 'designated_port_num':1 } 'GigabitEthernet0/7/0/1':{'name':'GigabitEthernet0/7/0/1' 'cost':20000 'role':'DSGN' 'port_priority':128 'port_num':2 'port_state':'FWD' 'designated_bridge_priority':32768 'designated_bridge_address':'8cb6.4fff.6588' 'designated_port_priority':128 'designated_port_num':2 } 'GigabitEthernet0/7/0/10':{'name':'GigabitEthernet0/7/0/10' 'cost':20000 'role':'ROOT' 'port_priority':128 'port_num':3 'port_state':'FWD' 'designated_bridge_priority':32768 'designated_bridge_address':'0021.1bff.d973' 'designated_port_priority':128 'designated_port_num':3 } 'GigabitEthernet0/7/0/11':{'name':'GigabitEthernet0/7/0/11' 'cost':20000 'role':'ALT' 'port_priority':128 'port_num':4 'port_state':'BLK' 'designated_bridge_priority':32768 'designated_bridge_address':'0021.1bff.d973' 'designated_port_priority':128 'designated_port_num':4 } } } } } } }<line_sep> |
<import_from_future_stmt> annotations<import_stmt>asyncio<import_stmt>json<import_stmt>logging<import_stmt>os<import_from_stmt>typing Any Mapping Optional Union<import_stmt>aiohttp<import_from_stmt>aiohttp hdrs<import_from_stmt>aiohttp.client _RequestContextManager<import_from_stmt>.auth Auth<import_from_stmt>.request ClientRequest<import_from_stmt>.typedefs WsBytesHandler WsJsonHandler WsStrHandler<import_from_stmt>.ws ClientWebSocketResponse ws_run_forever<line_sep>logger=logging.getLogger(__name__)<class_stmt>Client<block_start>"""
HTTPリクエストクライアントクラス
.. note::
引数 apis は省略できます。
:Example:
.. code-block:: python
async def main():
async with pybotters.Client(apis={'example': ['KEY', 'SECRET']}) as client:
r = await client.get('https://...', params={'foo': 'bar'})
print(await r.json())
.. code-block:: python
async def main():
async with pybotters.Client(apis={'example': ['KEY', 'SECRET']}) as client:
wstask = await client.ws_connect(
'wss://...',
send_json={'foo': 'bar'},
hdlr_json=pybotters.print_handler
)
await wstask
# Ctrl+C to break
Basic API
パッケージトップレベルで利用できるHTTPリクエスト関数です。 これらは同期関数です。 内部的にpybotters.Clientをラップしています。
:Example:
.. code-block:: python
r = pybotters.get(
'https://...',
params={'foo': 'bar'},
apis={'example': ['KEY', 'SECRET']}
)
print(r.text())
print(r.json())
.. code-block:: python
pybotters.ws_connect(
'wss://...',
send_json={'foo': 'bar'},
hdlr_json=pybotters.print_handler,
apis={'example': ['KEY', 'SECRET']}
)
# Ctrl+C to break
"""<line_sep>_session:aiohttp.ClientSession<line_sep>_base_url:str<def_stmt>__init__ self apis:Optional[Union[dict[str list[str]] str]]=<none> base_url:str='' **kwargs:Any <arrow><none><block_start>"""
:param apis: APIキー・シークレットのデータ(optional) ex: {'exchange': ['key', 'secret']}
:param base_url: リクエストメソッドの url の前方に自動付加するURL(optional)
:param ``**kwargs``: aiohttp.Client.requestに渡されるキーワード引数(optional)
"""<line_sep>self._session=aiohttp.ClientSession(request_class=ClientRequest ws_response_class=ClientWebSocketResponse **kwargs )<line_sep>apis=self._load_apis(apis)<line_sep>self._session.__dict__['_apis']=self._encode_apis(apis)<line_sep>self._base_url=base_url<block_end><async_keyword><def_stmt>__aenter__ self<arrow>'Client'<block_start><return>self<block_end><async_keyword><def_stmt>__aexit__ self *args:Any<arrow><none><block_start><await>self.close()<block_end><async_keyword><def_stmt>close self<arrow><none><block_start><await>self._session.close()<block_end><def_stmt>_request self method:str url:str * params:Optional[Mapping[str Any]]=<none> data:Optional[dict[str Any]]=<none> auth:Optional[Auth]=Auth **kwargs:Any <arrow>_RequestContextManager<block_start><return>self._session.request(method=method url=self._base_url+url params=params data=data auth=auth **kwargs )<block_end><def_stmt>request self method:str url:str * params:Optional[Mapping[str str]]=<none> data:Any=<none> **kwargs:Any <arrow>_RequestContextManager<block_start>"""
:param method: GET, POST, PUT, DELETE などのHTTPメソッド
:param url: リクエストURL
:param params: URLのクエリ文字列(optional)
:param data: リクエストボディ(optional)
:param headers: リクエストヘッダー(optional)
:param auth: API自動認証の機能の有効/無効。デフォルトで有効。auth=Noneを指定することで無効になります(optional)
:param ``kwargs``: aiohttp.Client.requestに渡されるキーワード引数(optional)
"""<line_sep><return>self._request(method url params=params data=data **kwargs)<block_end><def_stmt>get self url:str * params:Optional[Mapping[str str]]=<none> **kwargs:Any <arrow>_RequestContextManager<block_start><return>self._request(hdrs.METH_GET url params=params **kwargs)<block_end><def_stmt>post self url:str * data:Any=<none> **kwargs:Any <arrow>_RequestContextManager<block_start><return>self._request(hdrs.METH_POST url data=data **kwargs)<block_end><def_stmt>put self url:str * data:Any=<none> **kwargs:Any <arrow>_RequestContextManager<block_start><return>self._request(hdrs.METH_PUT url data=data **kwargs)<block_end><def_stmt>delete self url:str * data:Any=<none> **kwargs:Any <arrow>_RequestContextManager<block_start><return>self._request(hdrs.METH_DELETE url data=data **kwargs)<block_end><async_keyword><def_stmt>ws_connect self url:str * send_str:Optional[Union[str list[str]]]=<none> send_bytes:Optional[Union[bytes list[bytes]]]=<none> send_json:Any=<none> hdlr_str:Optional[WsStrHandler]=<none> hdlr_bytes:Optional[WsBytesHandler]=<none> hdlr_json:Optional[WsJsonHandler]=<none> **kwargs:Any <arrow>asyncio.Task<block_start>"""
:param url: WebSocket URL
:param send_str: WebSocketで送信する文字列。文字列、または文字列のリスト形式(optional)
:param send_json: WebSocketで送信する辞書オブジェクト。辞書、または辞書のリスト形式(optional)
:param hdlr_str: WebSocketの受信データをハンドリングする関数。
第1引数 msg に _str_型, 第2引数 ws にWebSocketClientResponse 型の変数が渡されます(optional)
:param hdlr_json: WebSocketの受信データをハンドリングする関数。
第1引数 msg に Any 型(JSON-like), 第2引数 ws に WebSocketClientResponse 型の変数が渡されます
(optional)
:param headers: リクエストヘッダー(optional)
:param auth: API自動認証の機能の有効/無効。デフォルトで有効。auth=Noneを指定することで無効になります(optional)
:param ``**kwargs``: aiohttp.ClientSession.ws_connectに渡されるキーワード引数(optional)
"""<line_sep>event=asyncio.Event()<line_sep>task=asyncio.create_task(ws_run_forever(url self._session event send_str=send_str send_bytes=send_bytes send_json=send_json hdlr_str=hdlr_str hdlr_bytes=hdlr_bytes hdlr_json=hdlr_json **kwargs ))<line_sep><await>event.wait()<line_sep><return>task<block_end>@staticmethod<def_stmt>_load_apis apis:Optional[Union[dict[str list[str]] str]]<arrow>dict[str list[str]]<block_start><if_stmt>apis<is><none><block_start>apis={}<block_end><if_stmt>isinstance(apis dict)<block_start><if_stmt>apis<block_start><return>apis<block_end><else_stmt><block_start>current_apis=os.path.join(os.getcwd() 'apis.json')<if_stmt>os.path.isfile(current_apis)<block_start><with_stmt>open(current_apis)<as>fp<block_start><return>json.load(fp)<block_end><block_end><else_stmt><block_start>env_apis=os.getenv('PYBOTTERS_APIS')<if_stmt>env_apis<and>os.path.isfile(env_apis)<block_start><with_stmt>open(env_apis)<as>fp<block_start><return>json.load(fp)<block_end><block_end><else_stmt><block_start><return>apis<block_end><block_end><block_end><block_end><elif_stmt>isinstance(apis str)<block_start><with_stmt>open(apis)<as>fp<block_start><return>json.load(fp)<block_end><block_end><else_stmt><block_start>logger.warning(f'apis must be dict or str, not {apis.__class__.__name__}')<line_sep><return>{}<block_end><block_end>@staticmethod<def_stmt>_encode_apis apis:Optional[dict[str list[str]]]<arrow>dict[str tuple[str bytes]]<block_start><if_stmt>apis<is><none><block_start>apis={}<block_end>encoded={}<for_stmt>name apis<block_start><if_stmt>len(apis[name])<eq>2<block_start>encoded[name]=(apis[name][0] apis[name][1].encode())<block_end><block_end><return>encoded<block_end><block_end> |
"""
Halite II Python 3 starter kit
See MyBot.py for a basic usage example. In short, you should initialize() at
the start, then in a loop, call get_map() to get the current game state, then
build up a list of commands and send them with send_command_queue().
"""<import_from_stmt>. collision constants entity game_map networking<import_from_stmt>.networking Game<line_sep> |
"""
Losses are critical to training a neural network well. The training can only make progress if you
provide a meaningful measure of loss for each training step. What the loss looks like usually depends
on your application. Pytorch has a number of `loss functions <https://pytorch.org/docs/stable/nn.html#loss-functions/>`_ that
you can use out of the box. However, some more advanced and cutting edge loss functions exist that are not (yet) part of
Pytorch. We include those below for your experimenting.\n
**Caution:** if you decide to use one of these, you will definitely want to peruse the source code first, as it has
many additional useful notes and references which will help you.
Keep in mind that losses are specific to the type of task. Classification losses are computed differently from Segmentation losses.
Within segmentation domain make sure to use BCE (Binary Cross Entropy) for any work involving binary masks (e.g. num_classes = 1)
Make sure to read the documentation and notes (in the code) for each loss to understand how it is applied.
`Read this blog post <https://gombru.github.io/2018/05/23/cross_entropy_loss/>`_
Note:
Logit is the vector of raw (non-normalized) predictions that a classification model generates, which is ordinarily then passed to a normalization function.
If the model is solving a multi-class classification problem, logits typically become an input to the softmax function. The softmax function then generates
a vector of (normalized) probabilities with one value for each possible class.
For example, BCEWithLogitsLoss is a BCE that accepts R((-inf, inf)) and automatically applies torch.sigmoid to convert it to ([0,1]) space.
However, if you use one-hot encoding or similar methods where you need to convert a tensor to pytorch from another source (e.g. numpy), you will need to
make sure to apply the correct type to the resulting tensor. E.g. If y_hot is of type long and the BCE loss expects a Tensor of type float then you
can try converting y_hot with y_hot = y_hot.type_as(output).
To convert predictions into (0,1) range you will sometimes need to use either softmax or sigmoid.
Softmax is used for multi-classification in the Logistic Regression model, whereas Sigmoid is used for binary classification in the Logistic Regression model
"""<line_sep>## Various loss calculation functions ##
# Sources: https://github.com/bermanmaxim/jaccardSegment/blob/master/losses.py (?)
# https://github.com/doodledood/carvana-image-masking-challenge/blob/master/losses.py (MIT)
# https://github.com/atlab/attorch/blob/master/attorch/losses.py (MIT)
# https://github.com/EKami/carvana-challenge (MIT)
# https://github.com/DingKe/pytorch_workplace (MIT)
<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>math<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>torch.autograd Function<import_from_stmt>torch.autograd Variable<import_from_stmt>torch Tensor<import_from_stmt>typing Iterable Set<line_sep>__all__=['ActiveContourLoss' 'ActiveContourLossAlt' 'AngularPenaltySMLoss' 'AsymLoss' 'BCELoss2d' 'BCEDiceLoss' 'BCEWithLogitsViewLoss' 'BCEDiceTL1Loss' 'BCEDicePenalizeBorderLoss' 'BCEDiceFocalLoss' 'BinaryFocalLoss' 'ComboBCEDiceLoss' 'ComboSemsegLossWeighted' 'EncNetLoss' 'FocalLoss' 'FocalLoss2' 'HausdorffERLoss' 'HausdorffDTLoss' 'LovaszSoftmax' 'mIoULoss' 'MixSoftmaxCrossEntropyOHEMLoss' 'MSE3D' 'OhemCELoss' 'OhemCrossEntropy2d' 'OhemBCEDicePenalizeBorderLoss' 'PoissonLoss' 'PoissonLoss3d' 'RecallLoss' 'RMILoss' 'RMILossAlt' 'RMIBCEDicePenalizeBorderLoss' 'SoftInvDiceLoss' 'SoftDiceLoss' 'StableBCELoss' 'TverskyLoss' 'ThresholdedL1Loss' 'WeightedSoftDiceLoss' 'WeightedBCELoss2d' 'BDLoss' 'L1Loss3d' 'WingLoss' 'BoundaryLoss']<line_sep>VOID_LABEL=255<line_sep>N_CLASSES=1<class_stmt>StableBCELoss(nn.Module)<block_start><def_stmt>__init__ self **_<block_start>super(StableBCELoss self).__init__()<block_end>@staticmethod<def_stmt>forward input_ target **_<block_start>neg_abs=-input_.abs()<line_sep>loss=input_.clamp(min=0)-input_<times>target+(1+neg_abs.exp()).log()<line_sep><return>loss.mean()<block_end><block_end># WARN: Only applicable to Binary Segmentation!
<def_stmt>binaryXloss logits label<block_start>mask=(label.view(-1)<ne>VOID_LABEL)<line_sep>nonvoid=mask.long().sum()<if_stmt>nonvoid<eq>0# only void pixels, the gradients should be 0
<block_start><return>logits.sum()<times>0.<block_end># if nonvoid == mask.numel():
# # no void pixel, use builtin
# return F.cross_entropy(logits, label)
target=label.contiguous().view(-1)[mask]<line_sep>logits=logits.contiguous().view(-1)[mask]<line_sep># loss = F.binary_cross_entropy(logits, target.float())
loss=StableBCELoss()(logits target.float())<line_sep><return>loss<block_end><def_stmt>naive_single logit label# single images
<block_start>mask=(label.view(-1)<ne>255)<line_sep>num_preds=mask.long().sum()<if_stmt>num_preds<eq>0# only void pixels, the gradients should be 0
<block_start><return>logit.sum()<times>0.<block_end>target=label.contiguous().view(-1)[mask].float()<line_sep>logit=logit.contiguous().view(-1)[mask]<line_sep>prob=torch.sigmoid(logit)<line_sep>intersect=target<times>prob<line_sep>union=target+prob-intersect<line_sep>loss=(1.-intersect/union).sum()<line_sep><return>loss<block_end># WARN: Only applicable to Binary Segmentation!
<def_stmt>hingeloss logits label<block_start>mask=(label.view(-1)<ne>255)<line_sep>num_preds=mask.long().sum().item()<if_stmt>num_preds<eq>0# only void pixels, the gradients should be 0
<block_start><return>logits.sum().item()<times>0.<block_end>target=label.contiguous().view(-1)[mask]<line_sep>target=2.<times>target.float()-1.# [target == 0] = -1
logits=logits.contiguous().view(-1)[mask]<line_sep>hinge=1./num_preds<times>F.relu(1.-logits<times>target).sum().item()<line_sep><return>hinge<block_end><def_stmt>gamma_fast gt permutation<block_start>p=len(permutation)<line_sep>gt=gt.gather(0 permutation)<line_sep>gts=gt.sum()<line_sep>intersection=gts-gt.float().cumsum(0)<line_sep>union=gts+(1-gt).float().cumsum(0)<line_sep>jaccard=1.-intersection/union<line_sep>jaccard[1:p]=jaccard[1:p]-jaccard[0:-1]<line_sep><return>jaccard<block_end># WARN: Only applicable to Binary Segmentation right now (zip function needs to be replaced)!
<def_stmt>lovaszloss logits labels prox=<false> max_steps=20 debug=<none><block_start>"""
`The Lovasz-Softmax loss <https://arxiv.org/abs/1705.08790>`_
:param logits:
:param labels:
:param prox:
:param max_steps:
:param debug:
:return:
"""<if_stmt>debug<is><none><block_start>debug={}<block_end># image-level Lovasz hinge
<if_stmt>logits.size(0)<eq>1# single image case
<block_start>loss=lovasz_single(logits.squeeze(0) labels.squeeze(0) prox max_steps debug)<block_end><else_stmt><block_start>losses=[]<line_sep># assert len(logits[0]) == len(labels[0])
<for_stmt>logit,label zip(logits labels)<block_start>loss=lovasz_single(logit label prox max_steps debug)<line_sep>losses.append(loss)<block_end>loss=sum(losses)/len(losses)<block_end><return>loss<block_end><def_stmt>naiveloss logits labels# image-level Lovasz hinge
<block_start><if_stmt>logits.size(0)<eq>1# single image case
<block_start>loss=naive_single(logits.squeeze(0) labels.squeeze(0))<block_end><else_stmt><block_start>losses=[]<for_stmt>logit,label zip(logits labels)<block_start>loss=naive_single(logit label)<line_sep>losses.append(loss)<block_end>loss=sum(losses)/len(losses)<block_end><return>loss<block_end><def_stmt>iouloss pred gt# works for one binary pred and associated target
# make byte tensors
<block_start>pred=(pred<eq>1)<line_sep>mask=(gt<ne>255)<line_sep>gt=(gt<eq>1)<line_sep>union=(gt|pred)[mask].long().sum()<if_stmt><not>union<block_start><return>0.<block_end><else_stmt><block_start>intersection=(gt&pred)[mask].long().sum()<line_sep><return>1.-intersection/union<block_end><block_end><def_stmt>compute_step_length x grad active eps=1e-6# compute next intersection with an edge in the direction grad
# OR next intersection with a 0 - border
# returns: delta in ind such that:
# after a step delta in the direction grad, x[ind] and x[ind+1] will be equal
<block_start>delta=np.inf<line_sep>ind=-1<if_stmt>active<g>0<block_start>numerator=(x[:active]-x[1:active+1])# always positive (because x is sorted)
denominator=(grad[:active]-grad[1:active+1])<line_sep># indices corresponding to negative denominator won't intersect
# also, we are not interested in indices in x that are *already equal*
valid=(denominator<g>eps)&(numerator<g>eps)<line_sep>valid_indices=valid.nonzero()<line_sep>intersection_times=numerator[valid]/denominator[valid]<if_stmt>intersection_times.size()<block_start>delta,ind=intersection_times.min(0)<line_sep>ind=valid_indices[ind]<line_sep>delta,ind=delta[0] ind[0 0]<block_end><block_end><if_stmt>grad[active]<g>0<block_start>intersect_zero=x[active]/grad[active]<if_stmt>intersect_zero<g>0.<and>intersect_zero<l>delta<block_start><return>intersect_zero -1<block_end><block_end><return>delta ind<block_end><def_stmt>project gam active members<block_start>tovisit=set(range(active+1))<while_stmt>tovisit<block_start>v=tovisit.pop()<if_stmt>len(members[v])<g>1<block_start>avg=0.<for_stmt>k members[v]<block_start><if_stmt>k<ne>v<block_start>tovisit.remove(k)<block_end>avg<augadd>gam[k]/len(members[v])<block_end><for_stmt>k members[v]<block_start>gam[k]=avg<block_end><block_end><block_end><if_stmt>active+1<l>len(gam)<block_start>gam[active+1:]=0.<block_end><block_end><def_stmt>find_proximal x0 gam lam eps=1e-6 max_steps=20 debug=<none><block_start><if_stmt>debug<is><none><block_start>debug={}<block_end># x0: sorted margins data
# gam: initial gamma_fast(target, perm)
# regularisation parameter lam
x=x0.clone()<line_sep>act=(x<ge>eps).nonzero()<line_sep>finished=<false><if_stmt><not>act.size()<block_start>finished=<true><block_end><else_stmt><block_start>active=act[-1 0]<line_sep>members={i:{i}<for>i range(active+1)}<if_stmt>active<g>0<block_start>equal=(x[:active]-x[1:active+1])<l>eps<for_stmt>i,e enumerate(equal)<block_start><if_stmt>e<block_start>members[i].update(members[i+1])<line_sep>members[i+1]=members[i]<block_end><block_end>project(gam active members)<block_end><block_end>step=0<while_stmt><not>finished<and>step<l>max_steps<and>active<g>-1<block_start>step<augadd>1<line_sep>res=compute_step_length(x gam active eps)<line_sep>delta,ind=res<if_stmt>ind<eq>-1<block_start>active=active-len(members[active])<block_end>stop=torch.dot(x-x0 gam)/torch.dot(gam gam)+1./lam<if_stmt>0<le>stop<l>delta<block_start>delta=stop<line_sep>finished=<true><block_end>x=x-delta<times>gam<if_stmt><not>finished<block_start><if_stmt>ind<ge>0<block_start>repr=min(members[ind])<line_sep>members[repr].update(members[ind+1])<for_stmt>m members[ind]<block_start><if_stmt>m<ne>repr<block_start>members[m]=members[repr]<block_end><block_end><block_end>project(gam active members)<block_end><if_stmt>"path"<in>debug<block_start>debug["path"].append(x.numpy())<block_end><block_end><if_stmt>"step"<in>debug<block_start>debug["step"]=step<block_end><if_stmt>"finished"<in>debug<block_start>debug["finished"]=finished<block_end><return>x gam<block_end><def_stmt>lovasz_binary margins label prox=<false> max_steps=20 debug=<none><block_start><if_stmt>debug<is><none><block_start>debug={}<block_end># 1d vector inputs
# Workaround: can't sort Variable bug
# prox: False or lambda regularization value
_,perm=torch.sort(margins.detach() dim=0 descending=<true>)<line_sep>margins_sorted=margins[perm]<line_sep>grad=gamma_fast(label perm)<line_sep>loss=torch.dot(F.relu(margins_sorted) grad)<if_stmt>prox<is><not><false><block_start>xp,gam=find_proximal(margins_sorted.detach() grad prox max_steps=max_steps eps=1e-6 debug=debug)<line_sep>hook=margins_sorted.register_hook(<lambda>grad:(margins_sorted.detach()-xp))<line_sep><return>loss hook gam<block_end><else_stmt><block_start><return>loss<block_end><block_end><def_stmt>lovasz_single logit label prox=<false> max_steps=20 debug=<none><block_start><if_stmt>debug<is><none><block_start>debug={}<block_end># single images
mask=(label.view(-1)<ne>255)<line_sep>num_preds=mask.long().sum()<if_stmt>num_preds<eq>0# only void pixels, the gradients should be 0
<block_start><return>logit.sum()<times>0.<block_end>target=label.contiguous().view(-1)[mask]<line_sep>signs=2.<times>target.float()-1.<line_sep>logit=logit.contiguous().view(-1)[mask]<line_sep>margins=(1.-logit<times>signs)<line_sep>loss=lovasz_binary(margins target prox max_steps debug=debug)<line_sep><return>loss<block_end><def_stmt>dice_coefficient logit label isCuda=<true><block_start>'''
WARNING THIS IS VERY SLOW FOR SOME REASON!!
:param logit: calculated guess (expects torch.Tensor)
:param label: truth label (expects torch.Tensor)
:return: dice coefficient
'''<line_sep>A=label.view(-1)<line_sep>B=logit.view(-1)<line_sep>A=A.clone()<line_sep>B=B.clone()<if_stmt>len(A)<ne>len(B)<block_start><raise>AssertionError<block_end><for_stmt>i list(range(len(A)))<block_start><if_stmt>A[i]<g>0.5<block_start>A[i]=1.0<block_end><else_stmt><block_start>A[i]=0.0<block_end><if_stmt>B[i]<g>0.5<block_start>B[i]=1.0<block_end><else_stmt><block_start>B[i]=0.0<block_end><block_end><if_stmt>isCuda<block_start>A=A.type(torch.cuda.ByteTensor)<block_end><else_stmt><block_start>A=A.type(torch.ByteTensor)<block_end>dice=torch.masked_select(B A).sum()<times>2.0/(B.sum()+A.sum())<line_sep><return>dice<block_end># ==================================== #
# Source: https://github.com/EKami/carvana-challenge
<class_stmt>WeightedSoftDiceLoss(torch.nn.Module)<block_start><def_stmt>__init__ self **_<block_start>super(WeightedSoftDiceLoss self).__init__()<block_end>@staticmethod<def_stmt>forward logits labels weights **_<block_start>probs=torch.sigmoid(logits)<line_sep>num=labels.size(0)<line_sep>w=weights.view(num -1)<line_sep>w2=w<times>w<line_sep>m1=probs.view(num -1)<line_sep>m2=labels.view(num -1)<line_sep>intersection=(m1<times>m2)<line_sep>score=2.<times>((w2<times>intersection).sum(1)+1)/((w2<times>m1).sum(1)+(w2<times>m2).sum(1)+1)<line_sep>score=1-score.sum()/num<line_sep><return>score<block_end><block_end><def_stmt>dice_coeff pred target<block_start>smooth=1.<line_sep>num=pred.size(0)<line_sep>m1=pred.view(num -1)# Flatten
m2=target.view(num -1)# Flatten
intersection=(m1<times>m2).sum()<line_sep><return>(2.<times>intersection+smooth)/(m1.sum()+m2.sum()+smooth)<block_end><def_stmt>dice_coeff_hard_np y_true y_pred<block_start>smooth=1.<line_sep>y_true_f=np.flatten(y_true)<line_sep>y_pred_f=np.round(np.flatten(y_pred))<line_sep>intersection=np.sum(y_true_f<times>y_pred_f)<line_sep>score=(2.<times>intersection+smooth)/(np.sum(y_true_f)+np.sum(y_pred_f)+smooth)<line_sep><return>score<block_end># ==================================== #
# Source: https://github.com/doodledood/carvana-image-masking-challenge/blob/master/losses.py
# TODO Replace this with nn.BCEWithLogitsLoss??
<class_stmt>BCELoss2d(nn.Module)<block_start><def_stmt>__init__ self weight=<none> size_average=<true> **_<block_start>super(BCELoss2d self).__init__()<line_sep>self.bce_loss=nn.BCELoss(weight size_average)<block_end><def_stmt>forward self logits labels **_<block_start>probs=torch.sigmoid(logits)<line_sep>probs_flat=probs.view(-1)<line_sep>targets_flat=labels.view(-1)<line_sep><return>self.bce_loss(probs_flat targets_flat)<block_end><block_end><class_stmt>SoftDiceLoss(nn.Module)<block_start><def_stmt>__init__ self smooth=1.0 **_<block_start>super(SoftDiceLoss self).__init__()<line_sep>self.smooth=smooth<block_end><def_stmt>forward self logits labels **_<block_start>num=labels.size(0)<line_sep>probs=torch.sigmoid(logits)<line_sep>m1=probs.view(num -1)<line_sep>m2=labels.view(num -1)<line_sep>intersection=(m1<times>m2)<line_sep># smooth = 1.
score=2.<times>(intersection.sum(1)+self.smooth)/(m1.sum(1)+m2.sum(1)+self.smooth)<line_sep>score=1-score.sum()/num<line_sep><return>score<block_end><block_end><class_stmt>FocalLoss(nn.Module)<block_start>"""
Weighs the contribution of each sample to the loss based in the classification error.
If a sample is already classified correctly by the CNN, its contribution to the loss decreases.
:eps: Focusing parameter. eps=0 is equivalent to BCE_loss
"""<def_stmt>__init__ self l=0.5 eps=1e-6 **_<block_start>super(FocalLoss self).__init__()<line_sep>self.l=l<line_sep>self.eps=eps<block_end><def_stmt>forward self logits labels **_<block_start>labels=labels.view(-1)<line_sep>probs=torch.sigmoid(logits).view(-1)<line_sep>losses=-(labels<times>torch.pow((1.-probs) self.l)<times>torch.log(probs+self.eps)+(1.-labels)<times>torch.pow(probs self.l)<times>torch.log(1.-probs+self.eps))<line_sep>loss=torch.mean(losses)<line_sep><return>loss<block_end><block_end><class_stmt>ThresholdedL1Loss(nn.Module)<block_start><def_stmt>__init__ self threshold=0.5 **_<block_start>super(ThresholdedL1Loss self).__init__()<line_sep>self.threshold=threshold<block_end><def_stmt>forward self logits labels **_<block_start>labels=labels.view(-1)<line_sep>probs=torch.sigmoid(logits).view(-1)<line_sep>probs=(probs<g>self.threshold).float()<line_sep>losses=torch.abs(labels-probs)<line_sep>loss=torch.mean(losses)<line_sep><return>loss<block_end><block_end><class_stmt>BCEDiceTL1Loss(nn.Module)<block_start><def_stmt>__init__ self threshold=0.5 **_<block_start>super(BCEDiceTL1Loss self).__init__()<line_sep>self.bce=nn.BCEWithLogitsLoss(weight=<none> size_average=<none> reduce=<none> reduction='mean' pos_weight=<none>)<line_sep>self.dice=SoftDiceLoss()<line_sep>self.tl1=ThresholdedL1Loss(threshold=threshold)<block_end><def_stmt>forward self logits labels **_<block_start><return>self.bce(logits labels)+self.dice(logits labels)+self.tl1(logits labels)<block_end><block_end><class_stmt>BCEDiceFocalLoss(nn.Module)<block_start>'''
:param num_classes: number of classes
:param gamma: (float,double) gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more
focus on hard misclassified example
:param size_average: (bool, optional) By default, the losses are averaged over each loss element in the batch.
:param weights: (list(), default = [1,1,1]) Optional weighing (0.0-1.0) of the losses in order of [bce, dice, focal]
'''<def_stmt>__init__ self focal_param weights=<none> **kwargs<block_start><if_stmt>weights<is><none><block_start>weights=[1.0 1.0 1.0]<block_end>super(BCEDiceFocalLoss self).__init__()<line_sep>self.bce=BCEWithLogitsViewLoss(weight=<none> size_average=<true> **kwargs)<line_sep>self.dice=SoftDiceLoss(**kwargs)<line_sep>self.focal=FocalLoss(l=focal_param **kwargs)<line_sep>self.weights=weights<block_end><def_stmt>forward self logits labels **_<block_start><return>self.weights[0]<times>self.bce(logits labels)+self.weights[1]<times>self.dice(logits labels)+self.weights[2]<times>self.focal(logits.unsqueeze(1) labels.unsqueeze(1))<block_end><block_end><class_stmt>BCEDiceLoss(nn.Module)<block_start><def_stmt>__init__ self **_<block_start>super(BCEDiceLoss self).__init__()<line_sep>self.bce=BCELoss2d()<line_sep>self.dice=SoftDiceLoss()<block_end><def_stmt>forward self logits labels **_<block_start><return>self.bce(logits labels)+self.dice(logits labels)<block_end><block_end><class_stmt>WeightedBCELoss2d(nn.Module)<block_start><def_stmt>__init__ self **_<block_start>super(WeightedBCELoss2d self).__init__()<block_end>@staticmethod<def_stmt>forward logits labels weights **_<block_start>w=weights.view(-1)# (-1 operation flattens all the dimensions)
z=logits.view(-1)# (-1 operation flattens all the dimensions)
t=labels.view(-1)# (-1 operation flattens all the dimensions)
loss=w<times>z.clamp(min=0)-w<times>z<times>t+w<times>torch.log(1+torch.exp(-z.abs()))<line_sep>loss=loss.sum()/w.sum()<line_sep><return>loss<block_end><block_end><class_stmt>BCEDicePenalizeBorderLoss(nn.Module)<block_start><def_stmt>__init__ self kernel_size=55 **_<block_start>super(BCEDicePenalizeBorderLoss self).__init__()<line_sep>self.bce=WeightedBCELoss2d()<line_sep>self.dice=WeightedSoftDiceLoss()<line_sep>self.kernel_size=kernel_size<block_end><def_stmt>to self device<block_start>super().to(device=device)<line_sep>self.bce.to(device=device)<line_sep>self.dice.to(device=device)<block_end><def_stmt>forward self logits labels **_<block_start>a=F.avg_pool2d(labels kernel_size=self.kernel_size padding=self.kernel_size<floordiv>2 stride=1)<line_sep>ind=a.ge(0.01)<times>a.le(0.99)<line_sep>ind=ind.float()<line_sep>weights=torch.ones(a.size()).to(device=logits.device)<line_sep>w0=weights.sum()<line_sep>weights=weights+ind<times>2<line_sep>w1=weights.sum()<line_sep>weights=weights/w1<times>w0<line_sep>loss=self.bce(logits labels weights)+self.dice(logits labels weights)<line_sep><return>loss<block_end><block_end># ==== Focal Loss with extra parameters ==== #
# Source: https://github.com/Hsuxu/Loss_ToolBox-PyTorch/blob/master/FocalLoss/FocalLoss.py
# License: MIT
<class_stmt>FocalLoss2(nn.Module)<block_start>"""
This is a implementation of Focal Loss with smooth label cross entropy supported which is proposed in
'Focal Loss for Dense Object Detection. (https://arxiv.org/abs/1708.02002)'
Focal_Loss= -1*alpha*(1-pt)*log(pt)
Params:
:param num_class:
:param alpha: (tensor) 3D or 4D the scalar factor for this criterion
:param gamma: (float,double) gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more
focus on hard misclassified example
:param smooth: (float,double) smooth value when cross entropy
:param balance_index: (int) balance class index, should be specific when alpha is float
:param size_average: (bool, optional) By default, the losses are averaged over each loss element in the batch.
"""<def_stmt>__init__ self num_class alpha=<none> gamma=2 balance_index=-1 smooth=<none> size_average=<true> **_<block_start>super(FocalLoss2 self).__init__()<line_sep>self.num_class=num_class<line_sep>self.alpha=alpha<line_sep>self.gamma=gamma<line_sep>self.smooth=smooth<line_sep>self.size_average=size_average<if_stmt>self.alpha<is><none><block_start>self.alpha=torch.ones(self.num_class 1)<block_end><elif_stmt>isinstance(self.alpha (list np.ndarray))<block_start><if_stmt>len(self.alpha)<ne>self.num_class<block_start><raise>AssertionError<block_end>self.alpha=torch.FloatTensor(alpha).view(self.num_class 1)<line_sep>self.alpha=self.alpha/self.alpha.sum()<block_end><elif_stmt>isinstance(self.alpha float)<block_start>alpha=torch.ones(self.num_class 1)<line_sep>alpha=alpha<times>(1-self.alpha)<line_sep>alpha[balance_index]=self.alpha<line_sep>self.alpha=alpha<block_end><else_stmt><block_start><raise>TypeError('Not support alpha type')<block_end><if_stmt>self.smooth<is><not><none><block_start><if_stmt>self.smooth<l>0<or>self.smooth<g>1.0<block_start><raise>ValueError('smooth value should be in [0,1]')<block_end><block_end><block_end><def_stmt>forward self logits labels **_# logits = F.softmax(logits, dim=1)
<block_start><if_stmt>logits.dim()<g>2# N,C,d1,d2 -> N,C,m (m=d1*d2*...)
<block_start>logits=logits.view(logits.size(0) logits.size(1) -1)<line_sep>logits=logits.permute(0 2 1).contiguous()<line_sep>logits=logits.view(-1 logits.size(-1))<block_end>labels=labels.view(-1 1)<line_sep># N = input.size(0)
# alpha = torch.ones(N, self.num_class)
# alpha = alpha * (1 - self.alpha)
# alpha = alpha.scatter_(1, target.long(), self.alpha)
epsilon=1e-10<line_sep>alpha=self.alpha.to(logits.device)<line_sep>idx=labels.cpu().long()<line_sep>one_hot_key=torch.FloatTensor(labels.size(0) self.num_class).zero_()<line_sep>one_hot_key=one_hot_key.scatter_(1 idx 1)<line_sep>one_hot_key=one_hot_key.to(logits.device)<if_stmt>self.smooth<block_start>one_hot_key=torch.clamp(one_hot_key self.smooth/(self.num_class-1) 1.0-self.smooth)<block_end>pt=(one_hot_key<times>logits).sum(1)+epsilon<line_sep>logpt=pt.log()<line_sep>gamma=self.gamma<line_sep>alpha=alpha[idx]<line_sep>loss=-1<times>alpha<times>torch.pow((1-pt) gamma)<times>logpt<if_stmt>self.size_average<block_start>loss=loss.mean()<block_end><else_stmt><block_start>loss=loss.sum()<block_end><return>loss<block_end><block_end># -------- #
# Source: https://github.com/huaifeng1993/DFANet/blob/master/loss.py
<class_stmt>FocalLoss3(nn.Module)<block_start>"""
This criterion is a implemenation of Focal Loss, which is proposed in Focal Loss for Dense Object Detection.
Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class])
The losses are averaged across observations for each minibatch.
Params:
:param alpha: (1D Tensor, Variable) - the scalar factor for this criterion
:param gamma: (float, double) - gamma > 0
:param size_average: (bool) - size_average(bool): By default, the losses are averaged over observations for each minibatch.
However, if the field size_average is set to False, the losses are instead summed for each minibatch.
"""<def_stmt>__init__ self class_num alpha=<none> gamma=2 size_average=<true> **_<block_start>super(FocalLoss3 self).__init__()<if_stmt>alpha<is><none><block_start>self.alpha=Variable(torch.ones(class_num+1))<block_end><else_stmt><block_start><if_stmt>isinstance(alpha Variable)<block_start>self.alpha=alpha<block_end><else_stmt><block_start>self.alpha=Variable(alpha)<block_end><block_end>self.gamma=gamma<line_sep>self.class_num=class_num<line_sep>self.size_average=size_average<block_end><def_stmt>forward self inputs labels **_# variables
<block_start>P=F.softmax(inputs)<if_stmt>len(inputs.size())<eq>3<block_start>torch_out=torch.zeros(inputs.size())<block_end><else_stmt><block_start>b,c,h,w=inputs.size()<line_sep>torch_out=torch.zeros([b c+1 h w])<block_end><if_stmt>inputs.is_cuda<block_start>torch_out=torch_out.cuda()<block_end>class_mask=Variable(torch_out)<line_sep>class_mask.scatter_(1 labels.long() 1.)<line_sep>class_mask=class_mask[: :-1 : :]<if_stmt>inputs.is_cuda<and><not>self.alpha.is_cuda<block_start>self.alpha=self.alpha.cuda()<block_end># print('alpha',self.alpha.size())
alpha=self.alpha[labels.data.view(-1)].view_as(labels)<line_sep># print (alpha.size(),class_mask.size(),P.size())
probs=(P<times>class_mask).sum(1)# + 1e-6#.view(-1, 1)
log_p=probs.log()<line_sep>batch_loss=-alpha<times>(torch.pow((1-probs) self.gamma))<times>log_p<if_stmt>self.size_average<block_start>loss=batch_loss.mean()<block_end><else_stmt><block_start>loss=batch_loss.sum()<block_end><return>loss<block_end><block_end># -------- #
# -------- #
# Source: https://discuss.pytorch.org/t/is-this-a-correct-implementation-for-focal-loss-in-pytorch/43327/4
<class_stmt>BinaryFocalLoss(nn.Module)<block_start>'''
Implementation of binary focal loss. For multi-class focal loss use one of the other implementations.
gamma = 0 is equivalent to BinaryCrossEntropy Loss
'''<def_stmt>__init__ self gamma=1.333 eps=1e-6 alpha=1.0 **_<block_start>super().__init__()<line_sep>self.gamma=gamma<line_sep>self.eps=eps<line_sep>self.alpha=alpha<block_end><def_stmt>forward self inputs labels **_<block_start>BCE_loss=F.binary_cross_entropy_with_logits(inputs labels reduction='none')<line_sep>pt=torch.exp(-BCE_loss)# prevents nans when probability 0
F_loss=self.alpha<times>(1-pt)<power>self.gamma<times>BCE_loss<line_sep><return>F_loss.mean()<block_end><block_end># -------- #
# ==== Additional Losses === #
# Source: https://github.com/atlab/attorch/blob/master/attorch/losses.py
# License: MIT
<class_stmt>PoissonLoss(nn.Module)<block_start><def_stmt>__init__ self bias=1e-12 **_<block_start>super().__init__()<line_sep>self.bias=bias<block_end><def_stmt>forward self output labels **_# _assert_no_grad(target)
<block_start><with_stmt>torch.no_grad# Pytorch 0.4.0 replacement (should be ok to use like this)
<block_start><return>(output-labels<times>torch.log(output+self.bias)).mean()<block_end><block_end><block_end><class_stmt>PoissonLoss3d(nn.Module)<block_start><def_stmt>__init__ self bias=1e-12 **_<block_start>super().__init__()<line_sep>self.bias=bias<block_end><def_stmt>forward self output target **_# _assert_no_grad(target)
<block_start><with_stmt>torch.no_grad# Pytorch 0.4.0 replacement (should be ok to use like this)
<block_start>lag=target.size(1)-output.size(1)<line_sep><return>(output-target[: lag: :]<times>torch.log(output+self.bias)).mean()<block_end><block_end><block_end><class_stmt>L1Loss3d(nn.Module)<block_start><def_stmt>__init__ self bias=1e-12 **_<block_start>super().__init__()<line_sep>self.bias=bias<block_end>@staticmethod<def_stmt>forward output target **_# _assert_no_grad(target)
<block_start><with_stmt>torch.no_grad# Pytorch 0.4.0 replacement (should be ok to use like this)
<block_start>lag=target.size(1)-output.size(1)<line_sep><return>(output-target[: lag: :]).abs().mean()<block_end><block_end><block_end><class_stmt>MSE3D(nn.Module)<block_start><def_stmt>__init__ self **_<block_start>super().__init__()<block_end>@staticmethod<def_stmt>forward output target **_# _assert_no_grad(target)
<block_start><with_stmt>torch.no_grad# Pytorch 0.4.0 replacement (should be ok to use like this)
<block_start>lag=target.size(1)-output.size(1)<line_sep><return>(output-target[: lag: :]).pow(2).mean()<block_end><block_end><block_end># ==== Custom ==== #
<class_stmt>BCEWithLogitsViewLoss(nn.BCEWithLogitsLoss)<block_start>'''
Silly wrapper of nn.BCEWithLogitsLoss because BCEWithLogitsLoss only takes a 1-D array
'''<def_stmt>__init__ self weight=<none> size_average=<true> **_<block_start>super().__init__(weight=weight size_average=size_average)<block_end><def_stmt>forward self input_ target **_<block_start>'''
:param input_:
:param target:
:return:
Simply passes along input.view(-1), target.view(-1)
'''<line_sep><return>super().forward(input_.view(-1) target.view(-1))<block_end><block_end># ===================== #
# Source: https://discuss.pytorch.org/t/one-hot-encoding-with-autograd-dice-loss/9781/5
# For calculating dice loss on images where multiple classes are present at the same time
<def_stmt>multi_class_dice_loss output target weights=<none> ignore_index=<none># output : NxCxHxW float tensor
# target : NxHxW long tensor
# weights : C float tensor
# ignore_index : int value to ignore from loss
<block_start>smooth=1.<line_sep>loss=0.<line_sep>output=output.exp()<line_sep>encoded_target=output.detach().clone().zero_()<if_stmt>ignore_index<is><not><none><block_start>mask=target<eq>ignore_index<line_sep>target=target.clone()<line_sep>target[mask]=0<line_sep>encoded_target.scatter_(1 target.unsqueeze(1) 1)<line_sep>mask=mask.unsqueeze(1).expand_as(encoded_target)<line_sep>encoded_target[mask]=0<block_end><else_stmt><block_start>encoded_target.scatter_(1 target.unsqueeze(1) 1)<block_end><if_stmt>weights<is><none><block_start>weights=torch.ones(output.size(1)).type_as(output.detach())<block_end>intersection=output<times>encoded_target<line_sep>numerator=2<times>intersection.sum(3).sum(2).sum(0)+smooth<line_sep>denominator=(output+encoded_target).sum(3).sum(2).sum(0)+smooth<line_sep>loss_per_channel=weights<times>(1-(numerator/denominator))<line_sep><return>loss_per_channel.sum()/output.size(1)<block_end># ====================== #
# Source: https://discuss.pytorch.org/t/how-to-implement-soft-iou-loss/15152
# Calculation of soft-IOU loss
<def_stmt>to_one_hot tensor nClasses<block_start>n,h,w=tensor.size()<line_sep>one_hot=torch.zeros(n nClasses h w).scatter_(1 tensor.view(n 1 h w) 1)<line_sep><return>one_hot<block_end># ====================== #
# Source: https://gist.github.com/jeremyjordan/9ea3032a32909f71dd2ab35fe3bacc08
# Another calculation of dice loss over multiple classes. Input is numpy matrices.
<def_stmt>soft_multiclass_dice_loss y_true y_pred epsilon=1e-6<block_start>'''
Soft dice loss calculation for arbitrary batch size, number of classes, and number of spatial dimensions.
Assumes the `channels_last` format.
# Arguments
y_true: b x X x Y( x Z...) x c One hot encoding of ground truth
y_pred: b x X x Y( x Z...) x c Network output, must sum to 1 over c channel (such as after softmax)
epsilon: Used for numerical stability to avoid divide by zero errors
# References
V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation
https://arxiv.org/abs/1606.04797
More details on Dice loss formulation
https://mediatum.ub.tum.de/doc/1395260/1395260.pdf (page 72)
Adapted from https://github.com/Lasagne/Recipes/issues/99#issuecomment-347775022
'''<line_sep># skip the batch and class axis for calculating Dice score
axes=tuple(range(1 len(y_pred.shape)-1))<line_sep>numerator=2.<times>np.sum(y_pred<times>y_true axes)<line_sep>denominator=np.sum(np.square(y_pred)+np.square(y_true) axes)<line_sep><return>1-np.mean(numerator/(denominator+epsilon))<block_end># average over classes and batch
<class_stmt>mIoULoss(nn.Module)<block_start><def_stmt>__init__ self weight=<none> size_average=<true> num_classes=2 **_<block_start>super(mIoULoss self).__init__()<line_sep>self.classes=num_classes<block_end><def_stmt>forward self inputs target_oneHot **_# inputs => N x Classes x H x W
# target_oneHot => N x Classes x H x W
<block_start>N=inputs.size()[0]<line_sep># predicted probabilities for each pixel along channel
inputs=F.softmax(inputs dim=1)<line_sep># Numerator Product
inter=inputs<times>target_oneHot<line_sep>## Sum over all pixels N x C x H x W => N x C
inter=inter.view(N self.classes -1).sum(2)<line_sep># Denominator
union=inputs+target_oneHot-(inputs<times>target_oneHot)<line_sep>## Sum over all pixels N x C x H x W => N x C
union=union.view(N self.classes -1).sum(2)<line_sep>loss=inter/union<line_sep>## Return average loss over classes and batch
<return>-loss.mean()<block_end><block_end># ====================== #
# Source: https://github.com/snakers4/mnasnet-pytorch/blob/master/src/models/semseg_loss.py
# Combination Loss from BCE and Dice
<class_stmt>ComboBCEDiceLoss(nn.Module)<block_start>"""
Combination BinaryCrossEntropy (BCE) and Dice Loss with an optional running mean and loss weighing.
"""<def_stmt>__init__ self use_running_mean=<false> bce_weight=1 dice_weight=1 eps=1e-6 gamma=0.9 combined_loss_only=<true> **_<block_start>"""
:param use_running_mean: - bool (default: False) Whether to accumulate a running mean and add it to the loss with (1-gamma)
:param bce_weight: - float (default: 1.0) Weight multiplier for the BCE loss (relative to dice)
:param dice_weight: - float (default: 1.0) Weight multiplier for the Dice loss (relative to BCE)
:param eps: -
:param gamma:
:param combined_loss_only: - bool (default: True) whether to return a single combined loss or three separate losses
"""<line_sep>super().__init__()<line_sep>'''
Note: BCEWithLogitsLoss already performs a torch.sigmoid(pred)
before applying BCE!
'''<line_sep>self.bce_logits_loss=nn.BCEWithLogitsLoss()<line_sep>self.dice_weight=dice_weight<line_sep>self.bce_weight=bce_weight<line_sep>self.eps=eps<line_sep>self.gamma=gamma<line_sep>self.combined_loss_only=combined_loss_only<line_sep>self.use_running_mean=use_running_mean<line_sep>self.bce_weight=bce_weight<line_sep>self.dice_weight=dice_weight<if_stmt>self.use_running_mean<is><true><block_start>self.register_buffer('running_bce_loss' torch.zeros(1))<line_sep>self.register_buffer('running_dice_loss' torch.zeros(1))<line_sep>self.reset_parameters()<block_end><block_end><def_stmt>to self device<block_start>super().to(device=device)<line_sep>self.bce_logits_loss.to(device=device)<block_end><def_stmt>reset_parameters self<block_start>self.running_bce_loss.zero_()<line_sep>self.running_dice_loss.zero_()<block_end><def_stmt>forward self outputs labels **_# inputs and targets are assumed to be BxCxWxH (batch, color, width, height)
<block_start>outputs=outputs.squeeze()# necessary in case we're dealing with binary segmentation (color dim of 1)
<if_stmt>len(outputs.shape)<ne>len(labels.shape)<block_start><raise>AssertionError<block_end># assert that B, W and H are the same
<if_stmt>outputs.size(-0)<ne>labels.size(-0)<block_start><raise>AssertionError<block_end><if_stmt>outputs.size(-1)<ne>labels.size(-1)<block_start><raise>AssertionError<block_end><if_stmt>outputs.size(-2)<ne>labels.size(-2)<block_start><raise>AssertionError<block_end>bce_loss=self.bce_logits_loss(outputs labels)<line_sep>dice_target=(labels<eq>1).float()<line_sep>dice_output=torch.sigmoid(outputs)<line_sep>intersection=(dice_output<times>dice_target).sum()<line_sep>union=dice_output.sum()+dice_target.sum()+self.eps<line_sep>dice_loss=(-torch.log(2<times>intersection/union))<if_stmt>self.use_running_mean<is><false><block_start>bmw=self.bce_weight<line_sep>dmw=self.dice_weight<line_sep># loss += torch.clamp(1 - torch.log(2 * intersection / union),0,100) * self.dice_weight
<block_end><else_stmt><block_start>self.running_bce_loss=self.running_bce_loss<times>self.gamma+bce_loss.data<times>(1-self.gamma)<line_sep>self.running_dice_loss=self.running_dice_loss<times>self.gamma+dice_loss.data<times>(1-self.gamma)<line_sep>bm=float(self.running_bce_loss)<line_sep>dm=float(self.running_dice_loss)<line_sep>bmw=1-bm/(bm+dm)<line_sep>dmw=1-dm/(bm+dm)<block_end>loss=bce_loss<times>bmw+dice_loss<times>dmw<if_stmt>self.combined_loss_only<block_start><return>loss<block_end><else_stmt><block_start><return>loss bce_loss dice_loss<block_end><block_end><block_end><class_stmt>ComboSemsegLossWeighted(nn.Module)<block_start><def_stmt>__init__ self use_running_mean=<false> bce_weight=1 dice_weight=1 eps=1e-6 gamma=0.9 use_weight_mask=<false> combined_loss_only=<false> **_<block_start>super().__init__()<line_sep>self.use_weight_mask=use_weight_mask<line_sep>self.nll_loss=nn.BCEWithLogitsLoss()<line_sep>self.dice_weight=dice_weight<line_sep>self.bce_weight=bce_weight<line_sep>self.eps=eps<line_sep>self.gamma=gamma<line_sep>self.combined_loss_only=combined_loss_only<line_sep>self.use_running_mean=use_running_mean<line_sep>self.bce_weight=bce_weight<line_sep>self.dice_weight=dice_weight<if_stmt>self.use_running_mean<is><true><block_start>self.register_buffer('running_bce_loss' torch.zeros(1))<line_sep>self.register_buffer('running_dice_loss' torch.zeros(1))<line_sep>self.reset_parameters()<block_end><block_end><def_stmt>to self device<block_start>super().to(device=device)<line_sep>self.nll_loss.to(device=device)<block_end><def_stmt>reset_parameters self<block_start>self.running_bce_loss.zero_()<line_sep>self.running_dice_loss.zero_()<block_end><def_stmt>forward self logits labels weights **_# logits and labels are assumed to be BxCxWxH
<block_start><if_stmt>len(logits.shape)<ne>len(labels.shape)<block_start><raise>AssertionError<block_end># assert that B, W and H are the same
<if_stmt>logits.size(0)<ne>labels.size(0)<block_start><raise>AssertionError<block_end><if_stmt>logits.size(2)<ne>labels.size(2)<block_start><raise>AssertionError<block_end><if_stmt>logits.size(3)<ne>labels.size(3)<block_start><raise>AssertionError<block_end># weights are assumed to be BxWxH
# assert that B, W and H are the are the same for target and mask
<if_stmt>logits.size(0)<ne>weights.size(0)<block_start><raise>AssertionError<block_end><if_stmt>logits.size(2)<ne>weights.size(1)<block_start><raise>AssertionError<block_end><if_stmt>logits.size(3)<ne>weights.size(2)<block_start><raise>AssertionError<block_end><if_stmt>self.use_weight_mask<block_start>bce_loss=F.binary_cross_entropy_with_logits(input=logits target=labels weight=weights)<block_end><else_stmt><block_start>bce_loss=self.nll_loss(input=logits target=labels)<block_end>dice_target=(labels<eq>1).float()<line_sep>dice_output=torch.sigmoid(logits)<line_sep>intersection=(dice_output<times>dice_target).sum()<line_sep>union=dice_output.sum()+dice_target.sum()+self.eps<line_sep>dice_loss=(-torch.log(2<times>intersection/union))<if_stmt>self.use_running_mean<is><false><block_start>bmw=self.bce_weight<line_sep>dmw=self.dice_weight<line_sep># loss += torch.clamp(1 - torch.log(2 * intersection / union),0,100) * self.dice_weight
<block_end><else_stmt><block_start>self.running_bce_loss=self.running_bce_loss<times>self.gamma+bce_loss.data<times>(1-self.gamma)<line_sep>self.running_dice_loss=self.running_dice_loss<times>self.gamma+dice_loss.data<times>(1-self.gamma)<line_sep>bm=float(self.running_bce_loss)<line_sep>dm=float(self.running_dice_loss)<line_sep>bmw=1-bm/(bm+dm)<line_sep>dmw=1-dm/(bm+dm)<block_end>loss=bce_loss<times>bmw+dice_loss<times>dmw<if_stmt>self.combined_loss_only<block_start><return>loss<block_end><else_stmt><block_start><return>loss bce_loss dice_loss<block_end><block_end><block_end># ====================== #
# Source: https://github.com/Tramac/awesome-semantic-segmentation-pytorch/blob/master/core/utils/loss.py
# Description: http://www.erogol.com/online-hard-example-mining-pytorch/
# Online Hard Example Loss
<class_stmt>OhemCrossEntropy2d(nn.Module)<block_start><def_stmt>__init__ self thresh=0.6 min_kept=0 ignore_index=-100 is_binary=<true> **kwargs<block_start>super().__init__()<line_sep>self.ignore_label=ignore_index<line_sep>self.is_binary=is_binary<line_sep>self.thresh=float(thresh)<line_sep>self.min_kept=int(min_kept)<line_sep>self.criterion=BCEWithLogitsViewLoss(**kwargs)<block_end><def_stmt>forward self logits labels **_<block_start>"""
Args:
predict:(n, c, h, w)
labels:(n, h, w)
"""<if_stmt>self.is_binary<block_start>predict=torch.sigmoid(logits)<block_end><else_stmt><block_start>predict=F.softmax(logits dim=1)<block_end>n,c,h,w=predict.size()<line_sep>input_label=labels.detach().cpu().numpy().ravel().astype(np.int32)<line_sep>x=np.rollaxis(predict.detach().cpu().numpy() 1).reshape((c -1))<line_sep>input_prob=np.exp(x-x.max(axis=0).reshape((1 -1)))<line_sep>input_prob<augdiv>input_prob.sum(axis=0).reshape((1 -1))<line_sep>valid_flag=input_label<ne>self.ignore_label<line_sep>valid_inds=np.where(valid_flag)[0]<line_sep>label=input_label[valid_flag]<line_sep>num_valid=valid_flag.sum()<if_stmt>self.min_kept<ge>num_valid<block_start>print('Labels: {}'.format(num_valid))<block_end><elif_stmt>num_valid<g>0<block_start>prob=input_prob[: valid_flag]<line_sep>pred=prob[label-1 np.arange(len(label) dtype=np.int32)]<line_sep>threshold=self.thresh<if_stmt>self.min_kept<g>0<block_start>index=pred.argsort()<line_sep>threshold_index=index[min(len(index) self.min_kept)-1]<if_stmt>pred[threshold_index]<g>self.thresh<block_start>threshold=pred[threshold_index]<block_end><block_end>kept_flag=pred<le>threshold<line_sep>valid_inds=valid_inds[kept_flag]<line_sep>#print('hard ratio: {} = {} / {} '.format(round(len(valid_inds)/num_valid, 4), len(valid_inds), num_valid))
<block_end>label=input_label[valid_inds].copy()<line_sep>input_label.fill(self.ignore_label)<line_sep>input_label[valid_inds]=label<line_sep>#print(np.sum(input_label != self.ignore_label))
labels=torch.from_numpy(input_label.reshape(labels.size())).type_as(predict).to(labels.device)<line_sep>predict=predict.squeeze()# in case we're dealing with B/W images instead of RGB
<return>self.criterion(predict labels)<block_end><block_end># ====================== #
# Source: https://github.com/Tramac/awesome-semantic-segmentation-pytorch/blob/master/core/utils/loss.py
# Loss used for EncNet
<class_stmt>EncNetLoss(nn.CrossEntropyLoss)<block_start>"""
2D Cross Entropy Loss with SE Loss
Specifically used for EncNet.
se_loss is the Semantic Encoding Loss from the paper `Context Encoding for Semantic Segmentation <https://arxiv.org/pdf/1803.08904v1>`_.
It computes probabilities of contexts appearing together.
Without SE_loss and Aux_loss this class simply forwards inputs to Torch's Cross Entropy Loss (nn.CrossEntropyLoss)
"""<def_stmt>__init__ self se_loss=<true> se_weight=0.2 nclass=19 aux=<false> aux_weight=0.4 weight=<none> ignore_index=-1 **_<block_start>super(EncNetLoss self).__init__(weight <none> ignore_index)<line_sep>self.se_loss=se_loss<line_sep>self.aux=aux<line_sep>self.nclass=nclass<line_sep>self.se_weight=se_weight<line_sep>self.aux_weight=aux_weight<line_sep>self.bceloss=nn.BCELoss(weight)<block_end><def_stmt>forward self *inputs **_<block_start>preds,target=tuple(inputs)<line_sep>inputs=tuple(list(preds)+[target])<if_stmt><not>self.se_loss<and><not>self.aux<block_start><return>super(EncNetLoss self).forward(*inputs)<block_end><elif_stmt><not>self.se_loss<block_start>pred1,pred2,target=tuple(inputs)<line_sep>loss1=super(EncNetLoss self).forward(pred1 target)<line_sep>loss2=super(EncNetLoss self).forward(pred2 target)<line_sep><return>dict(loss=loss1+self.aux_weight<times>loss2)<block_end><elif_stmt><not>self.aux<block_start>pred,se_pred,target=tuple(inputs)<line_sep>se_target=self._get_batch_label_vector(target nclass=self.nclass).type_as(pred)<line_sep>loss1=super(EncNetLoss self).forward(pred target)<line_sep>loss2=self.bceloss(torch.sigmoid(se_pred) se_target)<line_sep><return>dict(loss=loss1+self.se_weight<times>loss2)<block_end><else_stmt><block_start>pred1,se_pred,pred2,target=tuple(inputs)<line_sep>se_target=self._get_batch_label_vector(target nclass=self.nclass).type_as(pred1)<line_sep>loss1=super(EncNetLoss self).forward(pred1 target)<line_sep>loss2=super(EncNetLoss self).forward(pred2 target)<line_sep>loss3=self.bceloss(torch.sigmoid(se_pred) se_target)<line_sep><return>dict(loss=loss1+self.aux_weight<times>loss2+self.se_weight<times>loss3)<block_end><block_end>@staticmethod<def_stmt>_get_batch_label_vector target nclass# target is a 3D Variable BxHxW, output is 2D BxnClass
<block_start>batch=target.size(0)<line_sep>tvect=Variable(torch.zeros(batch nclass))<for_stmt>i range(batch)<block_start>hist=torch.histc(target[i].cpu().data.float() bins=nclass min=0 max=nclass-1)<line_sep>vect=hist<g>0<line_sep>tvect[i]=vect<block_end><return>tvect<block_end><block_end><class_stmt>MixSoftmaxCrossEntropyOHEMLoss(OhemCrossEntropy2d)<block_start>"""
Loss taking into consideration class and segmentation targets together, as well as, using OHEM
"""<def_stmt>__init__ self aux=<false> aux_weight=0.4 weight=<none> ignore_index=-1 **kwargs<block_start>super(MixSoftmaxCrossEntropyOHEMLoss self).__init__(ignore_index=ignore_index)<line_sep>self.aux=aux<line_sep>self.aux_weight=aux_weight<line_sep>self.bceloss=nn.BCELoss(weight)<block_end><def_stmt>to self device<block_start>super().to(device=device)<line_sep>self.bceloss.to(device=device)<block_end><def_stmt>_aux_forward self *inputs **_<block_start>*preds,target=tuple(inputs)<line_sep>loss=super(MixSoftmaxCrossEntropyOHEMLoss self).forward(preds[0] target)<for_stmt>i range(1 len(preds))<block_start>aux_loss=super(MixSoftmaxCrossEntropyOHEMLoss self).forward(preds[i] target)<line_sep>loss<augadd>self.aux_weight<times>aux_loss<block_end><return>loss<block_end><def_stmt>forward self *inputs **_<block_start>preds,target=tuple(inputs)<line_sep>inputs=tuple(list(preds)+[target])<if_stmt>self.aux<block_start><return>dict(loss=self._aux_forward(*inputs))<block_end><else_stmt><block_start><return>dict(loss=super(MixSoftmaxCrossEntropyOHEMLoss self).forward(preds target))<block_end><block_end><block_end># ====================== #
# Source: https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/encoding/nn/loss.py
# OHEM Segmentation Loss
<class_stmt>OHEMSegmentationLosses(OhemCrossEntropy2d)<block_start>"""
2D Cross Entropy Loss with Auxiliary Loss
"""<def_stmt>__init__ self se_loss=<false> se_weight=0.2 num_classes=1 aux=<false> aux_weight=0.4 weight=<none> ignore_index=-1<block_start>super(OHEMSegmentationLosses self).__init__(ignore_index)<line_sep>self.se_loss=se_loss<line_sep>self.aux=aux<line_sep>self.num_classes=num_classes<line_sep>self.se_weight=se_weight<line_sep>self.aux_weight=aux_weight<line_sep>self.bceloss=nn.BCELoss(weight)<block_end><def_stmt>to self device<block_start>super().to(device=device)<line_sep>self.bceloss.to(device=device)<block_end><def_stmt>forward self *inputs **_<block_start><if_stmt><not>self.se_loss<and><not>self.aux<block_start><return>super(OHEMSegmentationLosses self).forward(*inputs)<block_end><elif_stmt><not>self.se_loss<block_start>pred1,pred2,target=tuple(inputs)<line_sep>loss1=super(OHEMSegmentationLosses self).forward(pred1 target)<line_sep>loss2=super(OHEMSegmentationLosses self).forward(pred2 target)<line_sep><return>loss1+self.aux_weight<times>loss2<block_end><elif_stmt><not>self.aux<block_start>pred,se_pred,target=tuple(inputs)<line_sep>se_target=self._get_batch_label_vector(target nclass=self.num_classes).type_as(pred)<line_sep>loss1=super(OHEMSegmentationLosses self).forward(pred target)<line_sep>loss2=self.bceloss(torch.sigmoid(se_pred) se_target)<line_sep><return>loss1+self.se_weight<times>loss2<block_end><else_stmt><block_start>pred1,se_pred,pred2,target=tuple(inputs)<line_sep>se_target=self._get_batch_label_vector(target nclass=self.num_classes).type_as(pred1)<line_sep>loss1=super(OHEMSegmentationLosses self).forward(pred1 target)<line_sep>loss2=super(OHEMSegmentationLosses self).forward(pred2 target)<line_sep>loss3=self.bceloss(torch.sigmoid(se_pred) se_target)<line_sep><return>loss1+self.aux_weight<times>loss2+self.se_weight<times>loss3<block_end><block_end>@staticmethod<def_stmt>_get_batch_label_vector target nclass# target is a 3D Variable BxHxW, output is 2D BxnClass
<block_start>batch=target.size(0)<line_sep>tvect=Variable(torch.zeros(batch nclass))<for_stmt>i range(batch)<block_start>hist=torch.histc(target[i].cpu().data.float() bins=nclass min=0 max=nclass-1)<line_sep>vect=hist<g>0<line_sep>tvect[i]=vect<block_end><return>tvect<block_end><block_end># ====================== #
# Source: https://github.com/yinmh17/DNL-Semantic-Segmentation/blob/master/model/seg/loss/ohem_ce_loss.py
# OHEM CrossEntropy Loss
<class_stmt>OhemCELoss(nn.Module)<block_start><def_stmt>__init__ self configer is_binary=<false><block_start>super(OhemCELoss self).__init__()<line_sep>self.configer=configer<line_sep>weight=self.configer.get('loss.params.ohem_ce_loss.weight' default=<none>)<line_sep>self.weight=torch.FloatTensor(weight)<if>weight<is><not><none><else>weight<line_sep>self.reduction=self.configer.get('loss.params.ohem_ce_loss.reduction' default='mean')<line_sep>self.ignore_index=self.configer.get('loss.params.ohem_ce_loss.ignore_index' default=-100)<line_sep>self.thresh=self.configer.get('loss.params.ohem_ce_loss.thresh' default=0.7)<line_sep>self.min_kept=max(1 self.configer.get('loss.params.ohem_ce_loss.minkeep' default=5))<line_sep>self.is_binary=is_binary<block_end><def_stmt>forward self logits labels **_<block_start>"""
Args:
logits:(n, c, h, w)
labels:(n, h, w)
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
"""<line_sep>batch_kept=self.min_kept<times>labels.size(0)<line_sep>labels=self._scale_target(labels (logits.size(2) logits.size(3)))<if_stmt>self.is_binary<block_start>prob_out=torch.sigmoid(logits)<block_end><else_stmt><block_start>prob_out=F.softmax(logits dim=1)<block_end>tmp_target=labels.clone()<line_sep>tmp_target[tmp_target<eq>self.ignore_index]=0<line_sep>prob=prob_out.gather(1 tmp_target.unsqueeze(1))<line_sep>mask=labels.contiguous().view(-1 )<ne>self.ignore_index<line_sep>sort_prob,sort_indices=prob.contiguous().view(-1 )[mask].contiguous().sort()<line_sep>min_threshold=sort_prob[min(batch_kept sort_prob.numel()-1)]<if>sort_prob.numel()<g>0<else>0.0<line_sep>threshold=max(min_threshold self.thresh)<line_sep>loss_matrix=F.cross_entropy(logits labels weight=self.weight.to(logits.device)<if>self.weight<is><not><none><else><none> ignore_index=self.ignore_index reduction='none')<line_sep>loss_matrix=loss_matrix.contiguous().view(-1 )<line_sep>sort_loss_matirx=loss_matrix[mask][sort_indices]<line_sep>select_loss_matrix=sort_loss_matirx[sort_prob<l>threshold]<if_stmt>self.reduction<eq>'sum'<or>select_loss_matrix.numel()<eq>0<block_start><return>select_loss_matrix.sum()<block_end><elif_stmt>self.reduction<eq>'mean'<block_start><return>select_loss_matrix.mean()<block_end><else_stmt><block_start><raise>NotImplementedError('Reduction Error!')<block_end><block_end>@staticmethod<def_stmt>_scale_target targets_ scaled_size<block_start>targets=targets_.clone().unsqueeze(1).float()<line_sep>targets=F.interpolate(targets size=scaled_size mode='nearest')<line_sep><return>targets.squeeze(1).long()<block_end><block_end># ===================== #
# Source: https://github.com/Hsuxu/Loss_ToolBox-PyTorch/blob/master/LovaszSoftmax/lovasz_loss.py
<def_stmt>lovasz_grad gt_sorted<block_start>"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""<line_sep>p=len(gt_sorted)<line_sep>gts=gt_sorted.sum()<line_sep>intersection=gts-gt_sorted.float().cumsum(0)<line_sep>union=gts+(1-gt_sorted).float().cumsum(0)<line_sep>jaccard=1.-intersection/union<if_stmt>p<g>1# cover 1-pixel case
<block_start>jaccard[1:p]=jaccard[1:p]-jaccard[0:-1]<block_end><return>jaccard<block_end><class_stmt>LovaszSoftmax(nn.Module)<block_start><def_stmt>__init__ self reduction='mean' **_<block_start>super(LovaszSoftmax self).__init__()<line_sep>self.reduction=reduction<block_end>@staticmethod<def_stmt>prob_flatten input target<block_start><if_stmt>input.dim()<not><in>[4 5]<block_start><raise>AssertionError<block_end>num_class=input.size(1)<if_stmt>input.dim()<eq>4<block_start>input=input.permute(0 2 3 1).contiguous()<line_sep>input_flatten=input.view(-1 num_class)<block_end><elif_stmt>input.dim()<eq>5<block_start>input=input.permute(0 2 3 4 1).contiguous()<line_sep>input_flatten=input.view(-1 num_class)<block_end>target_flatten=target.view(-1)<line_sep><return>input_flatten target_flatten<block_end><def_stmt>lovasz_softmax_flat self inputs targets<block_start>num_classes=inputs.size(1)<line_sep>losses=[]<for_stmt>c range(num_classes)<block_start>target_c=(targets<eq>c).float()<if_stmt>num_classes<eq>1<block_start>input_c=inputs[: 0]<block_end><else_stmt><block_start>input_c=inputs[: c]<block_end>loss_c=(torch.autograd.Variable(target_c)-input_c).abs()<line_sep>loss_c_sorted,loss_index=torch.sort(loss_c 0 descending=<true>)<line_sep>target_c_sorted=target_c[loss_index]<line_sep>losses.append(torch.dot(loss_c_sorted torch.autograd.Variable(lovasz_grad(target_c_sorted))))<block_end>losses=torch.stack(losses)<if_stmt>self.reduction<eq>'none'<block_start>loss=losses<block_end><elif_stmt>self.reduction<eq>'sum'<block_start>loss=losses.sum()<block_end><else_stmt><block_start>loss=losses.mean()<block_end><return>loss<block_end><def_stmt>forward self inputs targets **_<block_start>inputs,targets=self.prob_flatten(inputs targets)<line_sep>losses=self.lovasz_softmax_flat(inputs targets)<line_sep><return>losses<block_end><block_end># ===================== #
# Source: https://github.com/xuuuuuuchen/Active-Contour-Loss/blob/master/Active-Contour-Loss.py (MIT)
<class_stmt>ActiveContourLoss(nn.Module)<block_start>"""
`Learning Active Contour Models for Medical Image Segmentation <http://openaccess.thecvf.com/content_CVPR_2019/papers/Chen_Learning_Active_Contour_Models_for_Medical_Image_Segmentation_CVPR_2019_paper.pdf>`_
Note that is only works for B/W masks right now... which is kind of the point of this loss as contours in RGB should be cast to B/W
before computing the loss.
Params:
:param mu: (float, default=1.0) - Scales the inner region loss relative to outer region (less or more prominent)
:param lambdaP: (float, default=1.0) - Scales the combined region loss compared to the length loss (less or more prominent)
"""<def_stmt>__init__ self lambdaP=5. mu=1. is_binary:bool=<false> **_<block_start>super(ActiveContourLoss self).__init__()<line_sep>self.lambdaP=lambdaP<line_sep>self.mu=mu<line_sep>self.is_binary=is_binary<block_end><def_stmt>forward self logits labels **_<block_start><if_stmt>self.is_binary<block_start>logits=torch.sigmoid(logits)<block_end><else_stmt><block_start>logits=F.softmax(logits dim=1)<block_end><if_stmt>labels.shape<ne>logits.shape<block_start><if_stmt>logits.shape<g>labels.shape<block_start>labels.unsqueeze(dim=1)<block_end><else_stmt><block_start><raise>Exception(f'Non-matching shapes for logits ({logits.shape}) and labels ({labels.shape})')<block_end><block_end>"""
length term
"""<line_sep>x=logits[: : 1: :]-logits[: : :-1 :]# horizontal gradient (B, C, H-1, W)
y=logits[: : : 1:]-logits[: : : :-1]# vertical gradient (B, C, H, W-1)
delta_x=x[: : 1: :-2]<power>2# (B, C, H-2, W-2)
delta_y=y[: : :-2 1:]<power>2# (B, C, H-2, W-2)
delta_u=torch.abs(delta_x+delta_y)<line_sep>epsilon=1e-8# where is a parameter to avoid square root is zero in practice.
length=torch.mean(torch.sqrt(delta_u+epsilon))# eq.(11) in the paper, mean is used instead of sum.
"""
region term
"""<line_sep>C_in=torch.ones_like(logits)<line_sep>C_out=torch.zeros_like(labels)<line_sep>region_in=torch.abs(torch.mean(logits[: 0 : :]<times>((labels[: 0 : :]-C_in)<power>2)))# equ.(12) in the paper, mean is used instead of sum.
region_out=torch.abs(torch.mean((1-logits[: 0 : :])<times>((labels[: 0 : :]-C_out)<power>2)))# equ.(12) in the paper
<return>length+self.lambdaP<times>(self.mu<times>region_in+region_out)<block_end><block_end><class_stmt>ActiveContourLossAlt(nn.Module)<block_start>"""
`Learning Active Contour Models for Medical Image Segmentation <http://openaccess.thecvf.com/content_CVPR_2019/papers/Chen_Learning_Active_Contour_Models_for_Medical_Image_Segmentation_CVPR_2019_paper.pdf>`_
Note that is only works for B/W masks right now... which is kind of the point of this loss as contours in RGB should be cast to B/W
before computing the loss.
Params:
:param len_w: (float, default=1.0) - The multiplier to use when adding boundary loss.
:param reg_w: (float, default=1.0) - The multiplier to use when adding region loss.
:param apply_log: (bool, default=True) - Whether to transform the log into log space (due to the
"""<def_stmt>__init__ self len_w=1. reg_w=1. apply_log=<true> is_binary:bool=<false> **_<block_start>super(ActiveContourLossAlt self).__init__()<line_sep>self.len_w=len_w<line_sep>self.reg_w=reg_w<line_sep>self.epsilon=1e-8# a parameter to avoid square root = zero issues
self.apply_log=apply_log<line_sep>self.is_binary=is_binary<block_end><def_stmt>forward self logits labels **_# must convert raw logits to predicted probabilities for each pixel along channel
<block_start><if_stmt>self.is_binary<block_start>probs=torch.sigmoid(logits)<block_end><else_stmt><block_start>probs=F.softmax(logits dim=1)<block_end><if_stmt>labels.shape<ne>logits.shape<block_start><if_stmt>logits.shape<g>labels.shape<block_start>labels.unsqueeze(dim=1)<block_end><else_stmt><block_start><raise>Exception(f'Non-matching shapes for logits ({logits.shape}) and labels ({labels.shape})')<block_end><block_end>"""
length term:
- Subtract adjacent pixels from each other in X and Y directions
- Determine where they differ from the ground truth (targets)
- Calculate MSE
"""<line_sep># horizontal and vertical directions
x=probs[: : 1: :]-probs[: : :-1 :]# differences in horizontal direction
y=probs[: : : 1:]-probs[: : : :-1]# differences in vertical direction
target_x=labels[: : 1: :]-labels[: : :-1 :]<line_sep>target_y=labels[: : : 1:]-labels[: : : :-1]<line_sep># find difference between values of probs and targets
delta_x=(target_x-x).abs()# do we need to subtract absolute values or relative?
delta_y=(target_y-y).abs()<line_sep># get MSE of the differences per pixel
# importantly because deltas are mostly < 1, a simple square of the error will actually yield LOWER results
# so we select 0.5 as the middle ground where small error will be further minimized while large error will
# be highlighted (pushed to be > 1 and up to 2.5 for maximum error).
# len_error_sq = ((delta_x + 0.5) ** 2) + ((delta_y + 0.5) ** 2)
# length = torch.sqrt(len_error_sq.sum() + self.epsilon)
# the length loss here is simply the MSE of x and y deltas
length_loss=torch.sqrt(delta_x.sum()<power>2+delta_y.sum()<power>2+self.epsilon)<line_sep>"""
region term (should this be done in log space to avoid instabilities?)
- compute the error produced by all pixels that are not equal to 0 outside of the ground truth mask
- compute error produced by all pixels that are not equal to 1 inside the mask
"""<line_sep># reference code for selecting masked values from a tensor
# t_m_bool = t_mask.type(torch.ByteTensor)
# t_result = t_in.masked_select(t_m_bool)
# C_1 = torch.ones((image_size, image_size), device=target.device)
# C_2 = torch.zeros((image_size, image_size), device=target.device)
# the sum of all pixel values that are not equal 0 outside of the ground truth mask
error_in=probs[: 0 : :]<times>((labels[: 0 : :]-1)<power>2)# invert the ground truth mask and multiply by probs
# the sum of all pixel values that are not equal 1 inside of the ground truth mask
probs_diff=(probs[: 0 : :]-labels[: 0 : :]).abs()# subtract mask from probs giving us the errors
error_out=(probs_diff<times>labels[: 0 : :])# multiply mask by error, giving us the error terms inside the mask.
<if_stmt>self.apply_log<block_start>loss=torch.log(length_loss)+torch.log(error_in.sum()+error_out.sum())<block_end><else_stmt># loss = self.len_w * length_loss
<block_start>loss=self.reg_w<times>(error_in.sum()+error_out.sum())<block_end><return>torch.clamp(loss min=0.0)# make sure we don't return negative values
<block_end><block_end># ===================== #
# Sources: https://github.com/JunMa11/SegLoss
# https://github.com/MIC-DKFZ/nnUNet/tree/master/nnunet (Apache 2.0)
<def_stmt>uniq a:Tensor<arrow>Set<block_start><return>set(torch.unique(a.cpu()).numpy())<block_end><def_stmt>sset a:Tensor sub:Iterable<arrow>bool<block_start><return>uniq(a).issubset(sub)<block_end><def_stmt>simplex t:Tensor axis=1<arrow>bool<block_start>_sum=t.sum(axis).type(torch.float32)<line_sep>_ones=torch.ones_like(_sum dtype=torch.float32)<line_sep><return>torch.allclose(_sum _ones)<block_end><def_stmt>one_hot t:Tensor axis=1<arrow>bool<block_start><return>simplex(t axis)<and>sset(t [0 1])<block_end><def_stmt>numpy_haussdorf pred:np.ndarray target:np.ndarray<arrow>float<block_start><import_from_stmt>scipy.spatial.distance directed_hausdorff<if_stmt>len(pred.shape)<ne>2<block_start><raise>AssertionError<block_end><if_stmt>pred.shape<ne>target.shape<block_start><raise>AssertionError<block_end><return>max(directed_hausdorff(pred target)[0] directed_hausdorff(target pred)[0])<block_end><def_stmt>haussdorf preds:Tensor target:Tensor<arrow>Tensor<block_start><if_stmt>preds.shape<ne>target.shape<block_start><raise>AssertionError<block_end><if_stmt><not>one_hot(preds)<block_start><raise>AssertionError<block_end><if_stmt><not>one_hot(target)<block_start><raise>AssertionError<block_end>B,C,_,_=preds.shape<line_sep>res=torch.zeros((B C) dtype=torch.float32 device=preds.device)<line_sep>n_pred=preds.detach().cpu().numpy()<line_sep>n_target=target.detach().cpu().numpy()<for_stmt>b range(B)<block_start><if_stmt>C<eq>2<block_start>res[b :]=numpy_haussdorf(n_pred[b 0] n_target[b 0])<line_sep><continue><block_end><for_stmt>c range(C)<block_start>res[b c]=numpy_haussdorf(n_pred[b c] n_target[b c])<block_end><block_end><return>res<block_end><def_stmt>softmax_helper x<block_start>rpt=[1<for>_ range(len(x.size()))]<line_sep>rpt[1]=x.size(1)<line_sep>x_max=x.max(1 keepdim=<true>)[0].repeat(*rpt)<line_sep>e_x=torch.exp(x-x_max)<line_sep><return>e_x/e_x.sum(1 keepdim=<true>).repeat(*rpt)<block_end><def_stmt>sum_tensor inp axes keepdim=<false><block_start>axes=np.unique(axes).astype(int)<if_stmt>keepdim<block_start><for_stmt>ax axes<block_start>inp=inp.sum(int(ax) keepdim=<true>)<block_end><block_end><else_stmt><block_start><for_stmt>ax sorted(axes reverse=<true>)<block_start>inp=inp.sum(int(ax))<block_end><block_end><return>inp<block_end><def_stmt>get_tp_fp_fn net_output gt axes=<none> mask=<none> square=<false><block_start>"""
net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes:
:param mask: mask must be 1 for valid pixels and 0 for invalid pixels
:param square: if True then fp, tp and fn will be squared before summation
:return:
"""<if_stmt>axes<is><none><block_start>axes=tuple(range(2 len(net_output.size())))<block_end>shp_x=net_output.shape<line_sep>shp_y=gt.shape<with_stmt>torch.no_grad()<block_start><if_stmt>len(shp_x)<ne>len(shp_y)<block_start>gt=gt.view((shp_y[0] 1 *shp_y[1:]))<block_end><if_stmt>all([i<eq>j<for>i,j zip(net_output.shape gt.shape)])# if this is the case then gt is probably already a one hot encoding
<block_start>y_onehot=gt<block_end><else_stmt><block_start>gt=gt.long()<line_sep>y_onehot=torch.zeros(shp_x)<if_stmt>net_output.device.type<eq>"cuda"<block_start>y_onehot=y_onehot.cuda(net_output.device.index)<block_end>y_onehot.scatter_(1 gt 1)<block_end><block_end>tp=net_output<times>y_onehot<line_sep>fp=net_output<times>(1-y_onehot)<line_sep>fn=(1-net_output)<times>y_onehot<if_stmt>mask<is><not><none><block_start>tp=torch.stack(tuple(x_i<times>mask[: 0]<for>x_i torch.unbind(tp dim=1)) dim=1)<line_sep>fp=torch.stack(tuple(x_i<times>mask[: 0]<for>x_i torch.unbind(fp dim=1)) dim=1)<line_sep>fn=torch.stack(tuple(x_i<times>mask[: 0]<for>x_i torch.unbind(fn dim=1)) dim=1)<block_end><if_stmt>square<block_start>tp=tp<power>2<line_sep>fp=fp<power>2<line_sep>fn=fn<power>2<block_end>tp=sum_tensor(tp axes keepdim=<false>)<line_sep>fp=sum_tensor(fp axes keepdim=<false>)<line_sep>fn=sum_tensor(fn axes keepdim=<false>)<line_sep><return>tp fp fn<block_end># ===================== #
# Boundary Loss
# Source: https://github.com/JunMa11/SegLoss/blob/71b14900e91ea9405d9705c95b451fc819f24c70/test/loss_functions/boundary_loss.py#L102
<def_stmt>compute_sdf img_gt out_shape<block_start>"""
compute the signed distance map of binary mask
img_gt: segmentation, shape = (batch_size, x, y, z)
out_shape: the Signed Distance Map (SDM)
sdf(x) = 0; x in segmentation boundary
-inf|x-y|; x in segmentation
+inf|x-y|; x out of segmentation
"""<import_from_stmt>scipy.ndimage distance_transform_edt<import_from_stmt>skimage segmentation<as>skimage_seg<line_sep>img_gt=img_gt.astype(np.uint8)<line_sep>gt_sdf=np.zeros(out_shape)<for_stmt>b range(out_shape[0])# batch size
<block_start><for_stmt>c range(1 out_shape[1])# channel
<block_start>posmask=img_gt[b][c].astype(np.bool)<if_stmt>posmask.any()<block_start>negmask=~posmask<line_sep>posdis=distance_transform_edt(posmask)<line_sep>negdis=distance_transform_edt(negmask)<line_sep>boundary=skimage_seg.find_boundaries(posmask mode='inner').astype(np.uint8)<line_sep>sdf=negdis-posdis<line_sep>sdf[boundary<eq>1]=0<line_sep>gt_sdf[b][c]=sdf<block_end><block_end><block_end><return>gt_sdf<block_end><class_stmt>BDLoss(nn.Module)<block_start><def_stmt>__init__ self is_binary:bool=<false> **_<block_start>"""
compute boundary loss
only compute the loss of foreground
ref: https://github.com/LIVIAETS/surface-loss/blob/108bd9892adca476e6cdf424124bc6268707498e/losses.py#L74
"""<line_sep>self.is_binary=is_binary<line_sep>super(BDLoss self).__init__()<line_sep># self.do_bg = do_bg
<block_end><def_stmt>forward self logits labels **_<block_start>"""
net_output: (batch_size, class, x,y,z)
target: ground truth, shape: (batch_size, 1, x,y,z)
bound: precomputed distance map, shape (batch_size, class, x,y,z)
"""<if_stmt>self.is_binary<block_start>logits=torch.sigmoid(logits)<block_end><else_stmt><block_start>logits=F.softmax(logits dim=1)<block_end><with_stmt>torch.no_grad()<block_start><if_stmt>len(logits.shape)<ne>len(labels.shape)<block_start>labels=labels.view((labels.shape[0] 1 *labels.shape[1:]))<block_end><if_stmt>all([i<eq>j<for>i,j zip(logits.shape labels.shape)])# if this is the case then gt is probably already a one hot encoding
<block_start>y_onehot=labels<block_end><else_stmt><block_start>labels=labels.long()<line_sep>y_onehot=torch.zeros(logits.shape)<if_stmt>logits.device.type<eq>"cuda"<block_start>y_onehot=y_onehot.cuda(logits.device.index)<block_end>y_onehot.scatter_(1 labels 1)<block_end>gt_sdf=compute_sdf(y_onehot.cpu().numpy() logits.shape)<block_end>phi=torch.from_numpy(gt_sdf)<if_stmt>phi.device<ne>logits.device<block_start>phi=phi.to(logits.device).type(torch.float32)<block_end># pred = net_output[:, 1:, ...].type(torch.float32)
# phi = phi[:,1:, ...].type(torch.float32)
multipled=torch.einsum("bcxyz,bcxyz->bcxyz" logits[: 1: <ellipsis>] phi[: 1: <ellipsis>])<line_sep>bd_loss=multipled.mean()<line_sep><return>bd_loss<block_end><block_end># ===================== #
# Source: https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py
<class_stmt>TverskyLoss(nn.Module)<block_start>"""Computes the Tversky loss [1].
Args:
:param alpha: controls the penalty for false positives.
:param beta: controls the penalty for false negatives.
:param eps: added to the denominator for numerical stability.
Returns:
tversky_loss: the Tversky loss.
Notes:
alpha = beta = 0.5 => dice coeff
alpha = beta = 1 => tanimoto coeff
alpha + beta = 1 => F beta coeff
References:
[1]: https://arxiv.org/abs/1706.05721
"""<def_stmt>__init__ self alpha beta eps=1e-7 **_<block_start>super(TverskyLoss self).__init__()<line_sep>self.alpha=alpha<line_sep>self.beta=beta<line_sep>self.eps=eps<block_end><def_stmt>forward self logits labels **_<block_start>"""
Args:
:param logits: a tensor of shape [B, C, H, W]. Corresponds to the raw output or logits of the model.
:param labels: a tensor of shape [B, H, W] or [B, 1, H, W].
:return: loss
"""<line_sep>num_classes=logits.shape[1]<if_stmt>num_classes<eq>1<block_start>true_1_hot=torch.eye(num_classes+1)[labels.squeeze(1).long()]<line_sep>true_1_hot=true_1_hot.permute(0 3 1 2).float()<line_sep>true_1_hot_f=true_1_hot[: 0:1 : :]<line_sep>true_1_hot_s=true_1_hot[: 1:2 : :]<line_sep>true_1_hot=torch.cat([true_1_hot_s true_1_hot_f] dim=1)<line_sep>pos_prob=torch.sigmoid(logits)<line_sep>neg_prob=1-pos_prob<line_sep>probas=torch.cat([pos_prob neg_prob] dim=1)<block_end><else_stmt><block_start>true_1_hot=torch.eye(num_classes)[labels.squeeze(1)]<line_sep>true_1_hot=true_1_hot.permute(0 3 1 2).float()<line_sep>probas=F.softmax(logits dim=1)<block_end>true_1_hot=true_1_hot.type(logits.type())<line_sep>dims=(0 )+tuple(range(2 logits.ndimension()))<line_sep>intersection=torch.sum(probas<times>true_1_hot dims)<line_sep>fps=torch.sum(probas<times>(1-true_1_hot) dims)<line_sep>fns=torch.sum((1-probas)<times>true_1_hot dims)<line_sep>num=intersection<line_sep>denom=intersection+(self.alpha<times>fps)+(self.beta<times>fns)<line_sep>tversky_loss=(num/(denom+self.eps)).mean()<line_sep><return>1-tversky_loss<block_end><block_end># ===================== #
# Source: https://github.com/cvqluu/Angular-Penalty-Softmax-Losses-Pytorch
<class_stmt>AngularPenaltySMLoss(nn.Module)<block_start><def_stmt>__init__ self in_features out_features loss_type='arcface' eps=1e-7 s=<none> m=<none> **_<block_start>'''
Angular Penalty Softmax Loss
Three 'loss_types' available: ['arcface', 'sphereface', 'cosface']
These losses are described in the following papers:
ArcFace: https://arxiv.org/abs/1801.07698
SphereFace: https://arxiv.org/abs/1704.08063
CosFace/Ad Margin: https://arxiv.org/abs/1801.05599
- Example -
criterion = AngularPenaltySMLoss(in_features, out_features, loss_type='arcface') # loss_type in ['arcface', 'sphereface', 'cosface']
'''<line_sep>super(AngularPenaltySMLoss self).__init__()<line_sep>loss_type=loss_type.lower()<if_stmt>loss_type<not><in>['arcface' 'sphereface' 'cosface']<block_start><raise>AssertionError<block_end><if_stmt>loss_type<eq>'arcface'<block_start>self.s=64.0<if><not>s<else>s<line_sep>self.m=0.5<if><not>m<else>m<block_end><if_stmt>loss_type<eq>'sphereface'<block_start>self.s=64.0<if><not>s<else>s<line_sep>self.m=1.35<if><not>m<else>m<block_end><if_stmt>loss_type<eq>'cosface'<block_start>self.s=30.0<if><not>s<else>s<line_sep>self.m=0.4<if><not>m<else>m<block_end>self.loss_type=loss_type<line_sep>self.in_features=in_features<line_sep>self.out_features=out_features<line_sep>self.fc=nn.Linear(in_features out_features bias=<false>)<line_sep>self.eps=eps<block_end><def_stmt>forward self x labels **_<block_start>'''
input shape (N, in_features)
'''<if_stmt>len(x)<ne>len(labels)<block_start><raise>AssertionError<block_end><if_stmt>torch.min(labels)<l>0<block_start><raise>AssertionError<block_end><if_stmt>torch.max(labels)<ge>self.out_features<block_start><raise>AssertionError<block_end><for_stmt>W self.fc.parameters()<block_start>W=F.normalize(W p=2 dim=1)<block_end>x=F.normalize(x p=2 dim=1)<line_sep>wf=self.fc(x)<if_stmt>self.loss_type<eq>'cosface'<block_start>numerator=self.s<times>(torch.diagonal(wf.transpose(0 1)[labels])-self.m)<block_end><if_stmt>self.loss_type<eq>'arcface'<block_start>numerator=self.s<times>torch.cos(torch.acos(torch.clamp(torch.diagonal(wf.transpose(0 1)[labels]) -1.+self.eps 1-self.eps))+self.m)<block_end><if_stmt>self.loss_type<eq>'sphereface'<block_start>numerator=self.s<times>torch.cos(self.m<times>torch.acos(torch.clamp(torch.diagonal(wf.transpose(0 1)[labels]) -1.+self.eps 1-self.eps)))<block_end>excl=torch.cat([torch.cat((wf[i :y] wf[i y+1:])).unsqueeze(0)<for>i,y enumerate(labels)] dim=0)<line_sep>denominator=torch.exp(numerator)+torch.sum(torch.exp(self.s<times>excl) dim=1)<line_sep>L=numerator-torch.log(denominator)<line_sep><return>-torch.mean(L)<block_end><block_end># ===================== #
# Source: https://github.com/JunMa11/SegLoss/blob/master/losses_pytorch/dice_loss.py
<class_stmt>AsymLoss(nn.Module)<block_start><def_stmt>__init__ self apply_nonlin=<none> batch_dice=<false> do_bg=<true> smooth=1. square=<false> **_<block_start>"""
paper: https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=8573779
"""<line_sep>super(AsymLoss self).__init__()<line_sep>self.square=square<line_sep>self.do_bg=do_bg<line_sep>self.batch_dice=batch_dice<line_sep>self.apply_nonlin=apply_nonlin<line_sep>self.smooth=smooth<line_sep>self.beta=1.5<block_end><def_stmt>forward self logits labels loss_mask=<none> **_<block_start>shp_x=logits.shape<if_stmt>self.batch_dice<block_start>axes=[0]+list(range(2 len(shp_x)))<block_end><else_stmt><block_start>axes=list(range(2 len(shp_x)))<block_end><if_stmt>self.apply_nonlin<is><not><none><block_start>logits=self.apply_nonlin(logits)<block_end>tp,fp,fn=get_tp_fp_fn(logits labels axes loss_mask self.square)# shape: (batch size, class num)
weight=(self.beta<power>2)/(1+self.beta<power>2)<line_sep>asym=(tp+self.smooth)/(tp+weight<times>fn+(1-weight)<times>fp+self.smooth)<if_stmt><not>self.do_bg<block_start><if_stmt>self.batch_dice<block_start>asym=asym[1:]<block_end><else_stmt><block_start>asym=asym[: 1:]<block_end><block_end>asym=asym.mean()<line_sep><return>-asym<block_end><block_end># ===================== #
# Source: https://github.com/BloodAxe/pytorch-toolbelt
# Used to enhance facial segmentation
<def_stmt>wing_loss output:torch.Tensor target:torch.Tensor width=5 curvature=0.5 reduction="mean"<block_start>"""
https://arxiv.org/pdf/1711.06753.pdf
:param output:
:param target:
:param width:
:param curvature:
:param reduction:
:return:
"""<line_sep>diff_abs=(target-output).abs()<line_sep>loss=diff_abs.clone()<line_sep>idx_smaller=diff_abs<l>width<line_sep>idx_bigger=diff_abs<ge>width<line_sep>loss[idx_smaller]=width<times>torch.log(1+diff_abs[idx_smaller]/curvature)<line_sep>C=width-width<times>math.log(1+width/curvature)<line_sep>loss[idx_bigger]=loss[idx_bigger]-C<if_stmt>reduction<eq>"sum"<block_start>loss=loss.sum()<block_end><if_stmt>reduction<eq>"mean"<block_start>loss=loss.mean()<block_end><return>loss<block_end><class_stmt>WingLoss(nn.modules.loss._Loss)<block_start>"""
Used to enhance facial segmentation
"""<def_stmt>__init__ self width=5 curvature=0.5 reduction="mean" **_<block_start>super(WingLoss self).__init__(reduction=reduction)<line_sep>self.width=width<line_sep>self.curvature=curvature<block_end><def_stmt>forward self prediction target **_<block_start><return>wing_loss(prediction target self.width self.curvature self.reduction)<block_end><block_end># ===================== #
# Source: https://github.com/JUNHAOYAN/FPN/tree/master/RMI
# ..which is adapted from: https://github.com/ZJULearning/RMI (MIT License)
# Segmentation loss (memory intensive)
<class_stmt>RMILoss(nn.Module)<block_start>"""
region mutual information
I(A, B) = H(A) + H(B) - H(A, B)
This version need a lot of memory if do not dwonsample.
"""<def_stmt>__init__ self num_classes=1 rmi_radius=3 rmi_pool_way=0 rmi_pool_size=3 rmi_pool_stride=3 loss_weight_lambda=0.5 lambda_way=1 device="cuda" **_<block_start>super(RMILoss self).__init__()<line_sep>self._CLIP_MIN=1e-6# min clip value after softmax or sigmoid operations
self._CLIP_MAX=1.0# max clip value after softmax or sigmoid operations
self._POS_ALPHA=5e-4# add this factor to ensure the AA^T is positive definite
self._IS_SUM=1# sum the loss per channel
self.num_classes=num_classes<line_sep># radius choices
<if_stmt>rmi_radius<not><in>[1 2 3 4 5 6 7 8 9 10]<block_start><raise>AssertionError<block_end>self.rmi_radius=rmi_radius<if_stmt>rmi_pool_way<not><in>[0 1 2 3]<block_start><raise>AssertionError<block_end>self.rmi_pool_way=rmi_pool_way<line_sep># set the pool_size = rmi_pool_stride
<if_stmt>rmi_pool_size<ne>rmi_pool_stride<block_start><raise>AssertionError<block_end>self.rmi_pool_size=rmi_pool_size<line_sep>self.rmi_pool_stride=rmi_pool_stride<line_sep>self.weight_lambda=loss_weight_lambda<line_sep>self.lambda_way=lambda_way<line_sep># dimension of the distribution
self.half_d=self.rmi_radius<times>self.rmi_radius<line_sep>self.d=2<times>self.half_d<line_sep>self.kernel_padding=self.rmi_pool_size<floordiv>2<line_sep># ignore class
self.ignore_index=255<line_sep>self.device=device<block_end><def_stmt>forward self logits labels **_<block_start><if_stmt>self.num_classes<eq>1<block_start>loss=self.forward_sigmoid(logits labels)<block_end><else_stmt><block_start>loss=self.forward_softmax_sigmoid(logits labels)<block_end><return>loss<block_end><def_stmt>forward_softmax_sigmoid self inputs targets<block_start>"""
Using both softmax and sigmoid operations.
Args:
inputs : [N, C, H, W], dtype=float32
targets : [N, H, W], dtype=long
"""<line_sep># PART I -- get the normal cross entropy loss
normal_loss=F.cross_entropy(input=inputs target=targets.long() ignore_index=self.ignore_index reduction='mean')<line_sep># PART II -- get the lower bound of the region mutual information
# get the valid label and logits
# valid label, [N, C, H, W]
label_mask_3D=targets<l>self.num_classes<line_sep>valid_onehot_labels_4D=F.one_hot(targets.long()<times>label_mask_3D.long() num_classes=self.num_classes).float()<line_sep>label_mask_3D=label_mask_3D.float()<line_sep>valid_onehot_labels_4D=valid_onehot_labels_4D<times>label_mask_3D.unsqueeze(dim=3)<line_sep>valid_onehot_labels_4D=valid_onehot_labels_4D.permute(0 3 1 2).requires_grad_(<false>)<line_sep># valid probs
probs_4D=torch.sigmoid(inputs)<times>label_mask_3D.unsqueeze(dim=1)<line_sep>probs_4D=probs_4D.clamp(min=self._CLIP_MIN max=self._CLIP_MAX)<line_sep># get region mutual information
rmi_loss=self.rmi_lower_bound(valid_onehot_labels_4D probs_4D)<line_sep># add together
final_loss=(self.weight_lambda<times>normal_loss+rmi_loss<times>(1-self.weight_lambda)<if>self.lambda_way<else>normal_loss+rmi_loss<times>self.weight_lambda)<line_sep><return>final_loss<block_end><def_stmt>forward_sigmoid self logits_4D labels_4D<block_start>"""
Using the sigmiod operation both.
Args:
logits_4D : [N, C, H, W], dtype=float32
labels_4D : [N, H, W], dtype=long
"""<line_sep># label mask -- [N, H, W, 1]
label_mask_3D=labels_4D<l>self.num_classes<line_sep># valid label
valid_onehot_labels_4D=F.one_hot(labels_4D.long()<times>label_mask_3D.long() num_classes=self.num_classes).float()<line_sep>label_mask_3D=label_mask_3D.float()<line_sep>label_mask_flat=label_mask_3D.view([-1 ])<line_sep>valid_onehot_labels_4D=valid_onehot_labels_4D<times>label_mask_3D.unsqueeze(dim=3)<line_sep>valid_onehot_labels_4D.requires_grad_(<false>)<line_sep># PART I -- calculate the sigmoid binary cross entropy loss
valid_onehot_label_flat=valid_onehot_labels_4D.view([-1 self.num_classes]).requires_grad_(<false>)<line_sep>logits_flat=logits_4D.permute(0 2 3 1).contiguous().view([-1 self.num_classes])<line_sep># binary loss, multiplied by the not_ignore_mask
valid_pixels=torch.sum(label_mask_flat)<line_sep>binary_loss=F.binary_cross_entropy_with_logits(logits_flat target=valid_onehot_label_flat weight=label_mask_flat.unsqueeze(dim=1) reduction='sum')<line_sep>bce_loss=torch.div(binary_loss valid_pixels+1.0)<line_sep># PART II -- get rmi loss
# onehot_labels_4D -- [N, C, H, W]
probs_4D=logits_4D.sigmoid()<times>label_mask_3D.unsqueeze(dim=1)+self._CLIP_MIN<line_sep>valid_onehot_labels_4D=valid_onehot_labels_4D.permute(0 3 1 2).requires_grad_(<false>)<line_sep># get region mutual information
rmi_loss=self.rmi_lower_bound(valid_onehot_labels_4D probs_4D)<line_sep># add together
final_loss=(self.weight_lambda<times>bce_loss+rmi_loss<times>(1-self.weight_lambda)<if>self.lambda_way<else>bce_loss+rmi_loss<times>self.weight_lambda)<line_sep><return>final_loss<block_end><def_stmt>rmi_lower_bound self labels_4D probs_4D<block_start>"""
calculate the lower bound of the region mutual information.
Args:
labels_4D : [N, C, H, W], dtype=float32
probs_4D : [N, C, H, W], dtype=float32
"""<if_stmt>labels_4D.size()<ne>probs_4D.size()<block_start><raise>AssertionError<block_end>p,s=self.rmi_pool_size self.rmi_pool_stride<if_stmt>self.rmi_pool_stride<g>1<block_start><if_stmt>self.rmi_pool_way<eq>0<block_start>labels_4D=F.max_pool2d(labels_4D kernel_size=p stride=s padding=self.kernel_padding)<line_sep>probs_4D=F.max_pool2d(probs_4D kernel_size=p stride=s padding=self.kernel_padding)<block_end><elif_stmt>self.rmi_pool_way<eq>1<block_start>labels_4D=F.avg_pool2d(labels_4D kernel_size=p stride=s padding=self.kernel_padding)<line_sep>probs_4D=F.avg_pool2d(probs_4D kernel_size=p stride=s padding=self.kernel_padding)<block_end><elif_stmt>self.rmi_pool_way<eq>2# interpolation
<block_start>shape=labels_4D.size()<line_sep>new_h,new_w=shape[2]<floordiv>s shape[3]<floordiv>s<line_sep>labels_4D=F.interpolate(labels_4D size=(new_h new_w) mode='nearest')<line_sep>probs_4D=F.interpolate(probs_4D size=(new_h new_w) mode='bilinear' align_corners=<true>)<block_end><else_stmt><block_start><raise>NotImplementedError("Pool way of RMI is not defined!")<block_end><block_end># we do not need the gradient of label.
label_shape=labels_4D.size()<line_sep>n,c=label_shape[0] label_shape[1]<line_sep># combine the high dimension points from label and probability map. new shape [N, C, radius * radius, H, W]
la_vectors,pr_vectors=self.map_get_pairs(labels_4D probs_4D radius=self.rmi_radius is_combine=0)<line_sep>la_vectors=la_vectors.view([n c self.half_d -1]).type(torch.double).to(self.device).requires_grad_(<false>)<line_sep>pr_vectors=pr_vectors.view([n c self.half_d -1]).type(torch.double).to(self.device)<line_sep># small diagonal matrix, shape = [1, 1, radius * radius, radius * radius]
diag_matrix=torch.eye(self.half_d).unsqueeze(dim=0).unsqueeze(dim=0)<line_sep># the mean and covariance of these high dimension points
# Var(X) = E(X^2) - E(X) E(X), N * Var(X) = X^2 - X E(X)
la_vectors=la_vectors-la_vectors.mean(dim=3 keepdim=<true>)<line_sep>la_cov=torch.matmul(la_vectors la_vectors.transpose(2 3))<line_sep>pr_vectors=pr_vectors-pr_vectors.mean(dim=3 keepdim=<true>)<line_sep>pr_cov=torch.matmul(pr_vectors pr_vectors.transpose(2 3))<line_sep># https://github.com/pytorch/pytorch/issues/7500
# waiting for batched torch.cholesky_inverse()
pr_cov_inv=torch.inverse(pr_cov+diag_matrix.type_as(pr_cov)<times>self._POS_ALPHA)<line_sep># if the dimension of the point is less than 9, you can use the below function
# to acceleration computational speed.
# pr_cov_inv = utils.batch_cholesky_inverse(pr_cov + diag_matrix.type_as(pr_cov) * _POS_ALPHA)
la_pr_cov=torch.matmul(la_vectors pr_vectors.transpose(2 3))<line_sep># the approxiamation of the variance, det(c A) = c^n det(A), A is in n x n shape;
# then log det(c A) = n log(c) + log det(A).
# appro_var = appro_var / n_points, we do not divide the appro_var by number of points here,
# and the purpose is to avoid underflow issue.
# If A = A^T, A^-1 = (A^-1)^T.
appro_var=la_cov-torch.matmul(la_pr_cov.matmul(pr_cov_inv) la_pr_cov.transpose(-2 -1))<line_sep># appro_var = la_cov - torch.chain_matmul(la_pr_cov, pr_cov_inv, la_pr_cov.transpose(-2, -1))
# appro_var = torch.div(appro_var, n_points.type_as(appro_var)) + diag_matrix.type_as(appro_var) * 1e-6
# The lower bound. If A is nonsingular, ln( det(A) ) = Tr( ln(A) ).
rmi_now=0.5<times>self.log_det_by_cholesky(appro_var+diag_matrix.type_as(appro_var)<times>self._POS_ALPHA)<line_sep># rmi_now = 0.5 * torch.logdet(appro_var + diag_matrix.type_as(appro_var) * _POS_ALPHA)
# mean over N samples. sum over classes.
rmi_per_class=rmi_now.view([-1 self.num_classes]).mean(dim=0).float()<line_sep># is_half = False
# if is_half:
# rmi_per_class = torch.div(rmi_per_class, float(self.half_d / 2.0))
# else:
rmi_per_class=torch.div(rmi_per_class float(self.half_d))<line_sep>rmi_loss=torch.sum(rmi_per_class)<if>self._IS_SUM<else>torch.mean(rmi_per_class)<line_sep><return>rmi_loss<block_end>@staticmethod<def_stmt>map_get_pairs labels_4D probs_4D radius=3 is_combine=<true><block_start>"""get map pairs
Args:
labels_4D : labels, shape [N, C, H, W]
probs_4D : probabilities, shape [N, C, H, W]
radius : the square radius
Return:
tensor with shape [N, C, radius * radius, H - (radius - 1), W - (radius - 1)]
"""<line_sep># pad to ensure the following slice operation is valid
# pad_beg = int(radius // 2)
# pad_end = radius - pad_beg
# the original height and width
label_shape=labels_4D.size()<line_sep>h,w=label_shape[2] label_shape[3]<line_sep>new_h,new_w=h-(radius-1) w-(radius-1)<line_sep># https://pytorch.org/docs/stable/nn.html?highlight=f%20pad#torch.nn.functional.pad
# padding = (pad_beg, pad_end, pad_beg, pad_end)
# labels_4D, probs_4D = F.pad(labels_4D, padding), F.pad(probs_4D, padding)
# get the neighbors
la_ns=[]<line_sep>pr_ns=[]<line_sep># for x in range(0, radius, 1):
<for_stmt>y range(0 radius 1)<block_start><for_stmt>x range(0 radius 1)<block_start>la_now=labels_4D[: : y:y+new_h x:x+new_w]<line_sep>pr_now=probs_4D[: : y:y+new_h x:x+new_w]<line_sep>la_ns.append(la_now)<line_sep>pr_ns.append(pr_now)<block_end><block_end><if_stmt>is_combine# for calculating RMI
<block_start>pair_ns=la_ns+pr_ns<line_sep>p_vectors=torch.stack(pair_ns dim=2)<line_sep><return>p_vectors<block_end><else_stmt># for other purpose
<block_start>la_vectors=torch.stack(la_ns dim=2)<line_sep>pr_vectors=torch.stack(pr_ns dim=2)<line_sep><return>la_vectors pr_vectors<block_end><block_end>@staticmethod<def_stmt>log_det_by_cholesky matrix<block_start>"""
Args:
matrix: matrix must be a positive define matrix.
shape [N, C, D, D].
Ref:
https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/python/ops/linalg/linalg_impl.py
"""<line_sep># This uses the property that the log det(A) = 2 * sum(log(real(diag(C))))
# where C is the cholesky decomposition of A.
chol=torch.cholesky(matrix)<line_sep># return 2.0 * torch.sum(torch.log(torch.diagonal(chol, dim1=-2, dim2=-1) + 1e-6), dim=-1)
<return>2.0<times>torch.sum(torch.log(torch.diagonal(chol dim1=-2 dim2=-1)+1e-8) dim=-1)<block_end><block_end># ===================== #
# Source: https://github.com/RElbers/region-mutual-information-pytorch
# Segmentation loss (memory intensive)
<class_stmt>RMILossAlt(nn.Module)<block_start>"""
PyTorch Module which calculates the Region Mutual Information loss (https://arxiv.org/abs/1910.12037).
"""<def_stmt>__init__ self with_logits radius=3 bce_weight=0.5 downsampling_method='max' stride=3 use_log_trace=<true> use_double_precision=<true> epsilon=0.0005 **_<block_start>"""
:param with_logits:
If True, apply the sigmoid function to the prediction before calculating loss.
:param radius:
RMI radius.
:param bce_weight:
Weight of the binary cross entropy. Must be between 0 and 1.
:param downsampling_method:
Downsampling method used before calculating RMI. Must be one of ['avg', 'max', 'region-extraction'].
If 'region-extraction', then downscaling is done during the region extraction phase. Meaning that the stride is the spacing between consecutive regions.
:param stride:
Stride used for downsampling.
:param use_log_trace:
Whether to calculate the log of the trace, instead of the log of the determinant. See equation (15).
:param use_double_precision:
Calculate the RMI using doubles in order to fix potential numerical issues.
:param epsilon:
Magnitude of the entries added to the diagonal of M in order to fix potential numerical issues.
"""<line_sep>super().__init__()<line_sep>self.use_double_precision=use_double_precision<line_sep>self.with_logits=with_logits<line_sep>self.bce_weight=bce_weight<line_sep>self.stride=stride<line_sep>self.downsampling_method=downsampling_method<line_sep>self.radius=radius<line_sep>self.use_log_trace=use_log_trace<line_sep>self.epsilon=epsilon<block_end><def_stmt>forward self logits labels **_<block_start>labels=labels.unsqueeze(1)<line_sep># Calculate BCE if needed
<if_stmt>self.bce_weight<ne>0<block_start><if_stmt>self.with_logits<block_start>bce=F.binary_cross_entropy_with_logits(logits target=labels)<block_end><else_stmt><block_start>bce=F.binary_cross_entropy(logits target=labels)<block_end>bce=bce.mean()<times>self.bce_weight<block_end><else_stmt><block_start>bce=0.0<block_end># Apply sigmoid to get probabilities. See final paragraph of section 4.
<if_stmt>self.with_logits<block_start>logits=torch.sigmoid(logits)<block_end># Calculate RMI loss
rmi=self.rmi_loss(input_=logits target=labels)<line_sep>rmi=rmi.mean()<times>(1.0-self.bce_weight)<line_sep><return>rmi+bce<block_end><def_stmt>rmi_loss self input_ target<block_start>"""
Calculates the RMI loss between the prediction and target.
:return:
RMI loss
"""<if_stmt>input_.shape<ne>target.shape<block_start><raise>AssertionError<block_end>vector_size=self.radius<times>self.radius<line_sep># Get region vectors
y=self.extract_region_vector(target)<line_sep>p=self.extract_region_vector(input_)<line_sep># Convert to doubles for better precision
<if_stmt>self.use_double_precision<block_start>y=y.double()<line_sep>p=p.double()<block_end># Small diagonal matrix to fix numerical issues
eps=torch.eye(vector_size dtype=y.dtype device=y.device)<times>self.epsilon<line_sep>eps=eps.unsqueeze(dim=0).unsqueeze(dim=0)<line_sep># Subtract mean
y=y-y.mean(dim=3 keepdim=<true>)<line_sep>p=p-p.mean(dim=3 keepdim=<true>)<line_sep># Covariances
y_cov=y@transpose(y)<line_sep>p_cov=p@transpose(p)<line_sep>y_p_cov=y@transpose(p)<line_sep># Approximated posterior covariance matrix of Y given P
m=y_cov-y_p_cov@transpose(inverse(p_cov+eps))@transpose(y_p_cov)<line_sep># Lower bound of RMI
<if_stmt>self.use_log_trace<block_start>rmi=0.5<times>log_trace(m+eps)<block_end><else_stmt><block_start>rmi=0.5<times>log_det(m+eps)<block_end># Normalize
rmi=rmi/float(vector_size)<line_sep># Sum over classes, mean over samples.
<return>rmi.sum(dim=1).mean(dim=0)<block_end><def_stmt>extract_region_vector self x<block_start>"""
Downsamples and extracts square regions from x.
Returns the flattened vectors of length radius*radius.
"""<line_sep>x=self.downsample(x)<line_sep>stride=self.stride<if>self.downsampling_method<eq>'region-extraction'<else>1<line_sep>x_regions=F.unfold(x kernel_size=self.radius stride=stride)<line_sep>x_regions=x_regions.view((*x.shape[:2] self.radius<power>2 -1))<line_sep><return>x_regions<block_end><def_stmt>downsample self x# Skip if stride is 1
<block_start><if_stmt>self.stride<eq>1<block_start><return>x<block_end># Skip if we pool during region extraction.
<if_stmt>self.downsampling_method<eq>'region-extraction'<block_start><return>x<block_end>padding=self.stride<floordiv>2<if_stmt>self.downsampling_method<eq>'max'<block_start><return>F.max_pool2d(x kernel_size=self.stride stride=self.stride padding=padding)<block_end><if_stmt>self.downsampling_method<eq>'avg'<block_start><return>F.avg_pool2d(x kernel_size=self.stride stride=self.stride padding=padding)<block_end><raise>ValueError(self.downsampling_method)<block_end><block_end><def_stmt>transpose x<block_start><return>x.transpose(-2 -1)<block_end><def_stmt>inverse x<block_start><return>torch.inverse(x)<block_end><def_stmt>log_trace x<block_start>x=torch.cholesky(x)<line_sep>diag=torch.diagonal(x dim1=-2 dim2=-1)<line_sep><return>2<times>torch.sum(torch.log(diag+1e-8) dim=-1)<block_end><def_stmt>log_det x<block_start><return>torch.logdet(x)<block_end># ====================== #
# Source: https://github.com/NRCan/geo-deep-learning/blob/develop/losses/boundary_loss.py
<class_stmt>BoundaryLoss(nn.Module)<block_start>"""Boundary Loss proposed in:
<NAME> al., Boundary Loss for Remote Sensing Imagery Semantic Segmentation
https://arxiv.org/abs/1905.07852
"""<line_sep># in previous implementations theta0=3, theta=5
<def_stmt>__init__ self theta0=19 theta=19 ignore_index=<none> weight=<none> is_binary:bool=<false> **_<block_start>super().__init__()<line_sep>self.theta0=theta0<line_sep>self.theta=theta<line_sep>self.ignore_index=ignore_index<line_sep>self.weight=weight<line_sep>self.is_binary=is_binary<block_end><def_stmt>forward self logits labels **_<block_start>"""
Input:
- logits: the output from model (before softmax)
shape (N, C, H, W)
- labels: ground truth map
shape (N, H, w)
Return:
- boundary loss, averaged over mini-batch
"""<line_sep>n,c,_,_=logits.shape<line_sep># sigmoid / softmax so that predicted map can be distributed in [0, 1]
<if_stmt>self.is_binary<block_start>logits=torch.sigmoid(logits)<block_end><else_stmt><block_start>logits=torch.softmax(logits dim=1)<block_end># one-hot vector of ground truth
# print(gt.shape)
# zo = F.one_hot(gt, c)
# print(zo.shape)
<if_stmt>self.is_binary<block_start>one_hot_gt=labels<block_end><else_stmt><block_start>one_hot_gt=F.one_hot(labels.long()).permute(0 3 1 2).squeeze(dim=-1).contiguous().float()<block_end># boundary map
gt_b=F.max_pool2d(1-one_hot_gt kernel_size=self.theta0 stride=1 padding=(self.theta0-1)<floordiv>2)<line_sep>gt_b<augsub>1-one_hot_gt<line_sep>pred_b=F.max_pool2d(1-logits kernel_size=self.theta0 stride=1 padding=(self.theta0-1)<floordiv>2)<line_sep>pred_b<augsub>1-logits<line_sep># extended boundary map
gt_b_ext=F.max_pool2d(gt_b kernel_size=self.theta stride=1 padding=(self.theta-1)<floordiv>2)<line_sep>pred_b_ext=F.max_pool2d(pred_b kernel_size=self.theta stride=1 padding=(self.theta-1)<floordiv>2)<line_sep># reshape
gt_b=gt_b.view(n c -1)<line_sep>pred_b=pred_b.view(n c -1)<line_sep>gt_b_ext=gt_b_ext.view(n c -1)<line_sep>pred_b_ext=pred_b_ext.view(n c -1)<line_sep># Precision, Recall
eps=1e-7<line_sep>P=(torch.sum(pred_b<times>gt_b_ext dim=2)+eps)/(torch.sum(pred_b dim=2)+eps)<line_sep>R=(torch.sum(pred_b_ext<times>gt_b dim=2)+eps)/(torch.sum(gt_b dim=2)+eps)<line_sep># Boundary F1 Score
BF1=(2<times>P<times>R+eps)/(P+R+eps)<line_sep># summing BF1 Score for each class and average over mini-batch
loss=torch.mean(1-BF1)<line_sep><return>loss<block_end><block_end># ====================== #
"""
Hausdorff loss implementation based on paper:
https://arxiv.org/pdf/1904.10030.pdf
copy pasted from - all credit goes to original authors:
https://github.com/SilmarilBearer/HausdorffLoss
"""<import_from_stmt>scipy.ndimage.morphology distance_transform_edt<as>edt<import_from_stmt>scipy.ndimage convolve<import_stmt>cv2<class_stmt>HausdorffDTLoss(nn.Module)<block_start>"""Binary Hausdorff loss based on distance transform"""<def_stmt>__init__ self alpha=2.0 **_<block_start>super(HausdorffDTLoss self).__init__()<line_sep>self.alpha=alpha<block_end>@torch.no_grad()@staticmethod<def_stmt>distance_field img:np.ndarray<arrow>np.ndarray<block_start>field=np.zeros_like(img)<for_stmt>batch range(len(img))<block_start>fg_mask=img[batch]<g>0.5<if_stmt>fg_mask.any()<block_start>bg_mask=~fg_mask<line_sep>fg_dist=edt(fg_mask)<line_sep>bg_dist=edt(bg_mask)<line_sep>field[batch]=fg_dist+bg_dist<block_end><block_end><return>field<block_end><def_stmt>forward self logits:torch.Tensor labels:torch.Tensor debug=<false> **_<arrow>torch.Tensor<block_start>"""
Uses one binary channel: 1 - fg, 0 - bg
pred: (b, 1, x, y, z) or (b, 1, x, y)
target: (b, 1, x, y, z) or (b, 1, x, y)
"""<line_sep>labels=labels.unsqueeze(1)<if_stmt>logits.dim()<not><in>(4 5)<block_start><raise>AssertionError("Only 2D and 3D supported")<block_end><if_stmt>(logits.dim()<ne>labels.dim())<block_start><raise>AssertionError("Prediction and target need to be of same dimension")<block_end># this is necessary for binary loss
logits=torch.sigmoid(logits)<line_sep>pred_dt=torch.from_numpy(self.distance_field(logits.detach().cpu().numpy())).float()<line_sep>target_dt=torch.from_numpy(self.distance_field(labels.detach().cpu().numpy())).float()<line_sep>pred_error=(logits-labels)<power>2<line_sep>distance=pred_dt.to(logits.device)<power>self.alpha+target_dt.to(logits.device)<power>self.alpha<line_sep>dt_field=pred_error<times>distance<line_sep>loss=dt_field.mean()<if_stmt>debug<block_start><return>(loss.detach().cpu().numpy() (dt_field.detach().cpu().numpy()[0 0] pred_error.detach().cpu().numpy()[0 0] distance.detach().cpu().numpy()[0 0] pred_dt.detach().cpu().numpy()[0 0] target_dt.detach().cpu().numpy()[0 0] ) )<block_end><else_stmt><block_start><return>loss<block_end><block_end><block_end><class_stmt>HausdorffERLoss(nn.Module)<block_start>"""Binary Hausdorff loss based on morphological erosion"""<def_stmt>__init__ self alpha=2.0 erosions=10 **kwargs<block_start>super(HausdorffERLoss self).__init__()<line_sep>self.alpha=alpha<line_sep>self.erosions=erosions<line_sep>self.prepare_kernels()<block_end><def_stmt>prepare_kernels self<block_start>cross=np.array([cv2.getStructuringElement(cv2.MORPH_CROSS (3 3))])<line_sep>bound=np.array([[[0 0 0] [0 1 0] [0 0 0]]])<line_sep>self.kernel2D=cross<times>0.2<line_sep>self.kernel3D=np.array([bound cross bound])<times>(1/7)<block_end>@torch.no_grad()<def_stmt>perform_erosion self pred:np.ndarray target:np.ndarray debug<arrow>np.ndarray<block_start>bound=(pred-target)<power>2<if_stmt>bound.ndim<eq>5<block_start>kernel=self.kernel3D<block_end><elif_stmt>bound.ndim<eq>4<block_start>kernel=self.kernel2D<block_end><else_stmt><block_start><raise>ValueError(f"Dimension {bound.ndim} is nor supported.")<block_end>eroted=np.zeros_like(bound)<line_sep>erosions=[]<for_stmt>batch range(len(bound))# debug
<block_start>erosions.append(np.copy(bound[batch][0]))<for_stmt>k range(self.erosions)# compute convolution with kernel
<block_start>dilation=convolve(bound[batch] kernel mode="constant" cval=0.0)<line_sep># apply soft thresholding at 0.5 and normalize
erosion=dilation-0.5<line_sep>erosion[erosion<l>0]=0<if_stmt>erosion.ptp()<ne>0<block_start>erosion=(erosion-erosion.min())/erosion.ptp()<block_end># save erosion and add to loss
bound[batch]=erosion<line_sep>eroted[batch]<augadd>erosion<times>(k+1)<power>self.alpha<if_stmt>debug<block_start>erosions.append(np.copy(erosion[0]))<block_end><block_end><block_end># image visualization in debug mode
<if_stmt>debug<block_start><return>eroted erosions<block_end><else_stmt><block_start><return>eroted<block_end><block_end><def_stmt>forward self pred:torch.Tensor target:torch.Tensor debug=<false><arrow>torch.Tensor<block_start>"""
Uses one binary channel: 1 - fg, 0 - bg
pred: (b, 1, x, y, z) or (b, 1, x, y)
target: (b, 1, x, y, z) or (b, 1, x, y)
"""<line_sep>target=target.unsqueeze(1)<if_stmt>pred.dim()<not><in>(4 5)<block_start><raise>AssertionError("Only 2D and 3D supported")<block_end><if_stmt>(pred.dim()<ne>target.dim())<block_start><raise>AssertionError("Prediction and target need to be of same dimension")<block_end>pred=torch.sigmoid(pred)<if_stmt>debug<block_start>eroted,erosions=self.perform_erosion(pred.detach().cpu().numpy() target.detach().cpu().numpy() debug)<line_sep><return>eroted.mean() erosions<block_end><else_stmt><block_start>eroted=torch.from_numpy(self.perform_erosion(pred.detach().cpu().numpy() target.detach().cpu().numpy() debug)).float()<line_sep>loss=eroted.mean()<line_sep><return>loss<block_end><block_end><block_end># ====================== #
"""
Recall Loss
copy pasted from - all credit goes to original authors:
https://github.com/shuaizzZ/Recall-Loss-PyTorch/blob/master/recall_loss.py
"""<class_stmt>RecallLoss(nn.Module)<block_start>""" An unofficial implementation of
<Recall Loss for Imbalanced Image Classification and Semantic Segmentation>
Created by: <NAME>
Email: <EMAIL>
recall = TP / (TP + FN)
Args:
weight: An array of shape [C,]
predict: A float32 tensor of shape [N, C, *], for Semantic segmentation task is [N, C, H, W]
target: A int64 tensor of shape [N, *], for Semantic segmentation task is [N, H, W]
Return:
diceloss
"""<def_stmt>__init__ self weight=<none> **_<block_start>super(RecallLoss self).__init__()<if_stmt>weight<is><not><none><block_start>weight=torch.Tensor(weight)<line_sep>self.weight=weight/torch.sum(weight)# Normalized weight
<block_end>self.smooth=1e-5<block_end><def_stmt>forward self logits labels **_<block_start>N,C=logits.size()[:2]<line_sep>_,predict=torch.max(logits 1)# # (N, C, *) ==> (N, 1, *)
predict=predict.view(N 1 -1)# (N, 1, *)
labels=labels.view(N 1 -1)# (N, 1, *)
last_size=labels.size(-1)<line_sep>## convert predict & target (N, 1, *) into one hot vector (N, C, *)
predict_onehot=torch.zeros((N C last_size)).cuda()# (N, 1, *) ==> (N, C, *)
predict_onehot.scatter_(1 predict 1)# (N, C, *)
target_onehot=torch.zeros((N C last_size)).cuda()# (N, 1, *) ==> (N, C, *)
target_onehot.scatter_(1 labels 1)# (N, C, *)
true_positive=torch.sum(predict_onehot<times>target_onehot dim=2)# (N, C)
total_target=torch.sum(target_onehot dim=2)# (N, C)
## Recall = TP / (TP + FN)
recall=(true_positive+self.smooth)/(total_target+self.smooth)# (N, C)
<if_stmt>hasattr(self 'weight')<block_start><if_stmt>self.weight.type()<ne>logits.type()<block_start>self.weight=self.weight.type_as(logits)<line_sep>recall=recall<times>self.weight<times>C# (N, C)
<block_end><block_end>recall_loss=1-torch.mean(recall)# 1
<return>recall_loss<block_end><block_end># ====================== #
<class_stmt>SoftInvDiceLoss(torch.nn.Module)<block_start>"""
Well-performing loss for binary segmentation
"""<def_stmt>__init__ self smooth=1. is_binary=<true> **_<block_start>super(SoftInvDiceLoss self).__init__()<line_sep>self.smooth=smooth<line_sep>self.is_binary=is_binary<block_end><def_stmt>forward self logits labels **_# sigmoid / softmax so that predicted map can be distributed in [0, 1]
<block_start><if_stmt>self.is_binary<block_start>logits=torch.sigmoid(logits)<block_end><else_stmt><block_start>logits=torch.softmax(logits dim=1)<block_end>iflat=1-logits.view(-1)<line_sep>tflat=1-labels.view(-1)<line_sep>intersection=(iflat<times>tflat).sum()<line_sep><return>1-((2.<times>intersection+self.smooth)/(iflat.sum()+tflat.sum()+self.smooth))<block_end><block_end># ======================= #
# --- COMBINED LOSSES --- #
<class_stmt>OhemBCEDicePenalizeBorderLoss(OhemCrossEntropy2d)<block_start>"""
Combined OHEM (Online Hard Example Mining) process with BCE-Dice penalized loss
"""<def_stmt>__init__ self thresh=0.6 min_kept=0 ignore_index=-100 kernel_size=21 **_<block_start>super().__init__()<line_sep>self.ignore_label=ignore_index<line_sep>self.thresh=float(thresh)<line_sep>self.min_kept=int(min_kept)<line_sep>self.criterion=BCEDicePenalizeBorderLoss(kernel_size=kernel_size)<block_end><block_end><class_stmt>RMIBCEDicePenalizeBorderLoss(RMILossAlt)<block_start>"""
Combined RMI and BCEDicePenalized Loss
"""<def_stmt>__init__ self kernel_size=21 rmi_weight=1.0 bce_weight=1.0 **kwargs<block_start>super().__init__(**kwargs)<line_sep>self.bce=BCEDicePenalizeBorderLoss(kernel_size=kernel_size)<line_sep>self.bce_weight=bce_weight<line_sep>self.rmi_weight=rmi_weight<block_end><def_stmt>to self device<block_start>super().to(device=device)<line_sep>self.bce.to(device=device)<block_end><def_stmt>forward self logits labels **_<block_start><if_stmt>labels.shape<ne>logits.shape<block_start><if_stmt>logits.shape<g>labels.shape<block_start>labels.unsqueeze(dim=1)<block_end><else_stmt><block_start><raise>Exception(f'Non-matching shapes for logits ({logits.shape}) and labels ({labels.shape})')<block_end><block_end># Calculate RMI loss
rmi=self.rmi_loss(input_=torch.sigmoid(logits) target=labels)<line_sep>bce=self.bce(logits labels)<line_sep># rmi = rmi.mean() * (1.0 - self.bce_weight)
<return>self.rmi_weight<times>rmi+self.bce_weight<times>bce<block_end><block_end> |
<import_from_stmt>visit_utils *<import_stmt>math<def_stmt>setup_plot <block_start>DeleteAllPlots()<line_sep>OpenDatabase(silo_data_path("rect3d.silo"))<line_sep>exprs.define("coords" "coord(quadmesh3d)" etype="vector")<line_sep>exprs.define("mesh_x_zonal" "recenter(coords[0])")<line_sep>exprs.define("mesh_y_zonal" "recenter(coords[1])")<line_sep>exprs.define("mass" "d * volume(quadmesh3d)")<line_sep>AddPlot("Pseudocolor" "mass")<line_sep>DrawPlots()<block_end><def_stmt>ddf opts# work around quirks related to the ddf pipeline expecting
# vars to already exist
<block_start>predraw_vars=[opts["codomain"]]<line_sep>predraw_vars.extend(opts["varnames"])<for_stmt>v predraw_vars<block_start>ChangeActivePlotsVar(v)<block_end>atts=visit.ConstructDDFAttributes()<line_sep>ddf_op_map={"avg":atts.Average "min":atts.Minimum "max":atts.Maximum "stddev":atts.StandardDeviation "var":atts.Variance "sum":atts.Sum "count":atts.Count "rms":atts.RMS "pdf":atts.PDF}<line_sep>atts.ddfName=opts["name"]<line_sep>atts.codomainName=opts["codomain"]<line_sep>atts.varnames=opts["varnames"]<line_sep>atts.ranges=opts["ranges"]<line_sep>atts.numSamples=opts["samples"]<line_sep>atts.statisticalOperator=ddf_op_map[opts["op"]]<line_sep>visit.ConstructDDF(atts)<line_sep>ndims=len(atts.numSamples)<line_sep>ddf_varname="%s_%s_%dd"%(opts["codomain"] opts["op"] ndims)<if_stmt>len(atts.numSamples)<eq>1<block_start>src_fname="%s.ultra"%atts.ddfName<line_sep>des_fname="%s.ult"%(atts.ddfName)<line_sep>common.sexe("mv %s %s"%(src_fname des_fname))<line_sep>lines=open(des_fname).readlines()<line_sep>f=open(des_fname "w")<line_sep>f.write("# %s\n"%(ddf_varname))<for_stmt>l lines[1:]<block_start>f.write(l)<block_end>f.close()<block_end><else_stmt><block_start>ofname="%s.vtk"%atts.ddfName<line_sep>orig_vtk_var="SCALARS %s float"%opts["codomain"]<line_sep>ddf_vtk_var="SCALARS %s float"%ddf_varname<line_sep>data=open(ofname).read()<line_sep>f=open(ofname "w")<line_sep>data=data.replace(orig_vtk_var ddf_vtk_var)<line_sep>f.write(data)<block_end>print("[ddf output: %s]"%ofname)<line_sep><return>ofname<block_end><def_stmt>test_orig_mass <block_start>setup_plot()<line_sep>Test("ddf_vs_dbinning_input_plot")<line_sep>res=query("Variable Sum")<line_sep>DeleteAllPlots()<line_sep><return>res<block_end><def_stmt>test_dbinning_using_coords <block_start>setup_plot()<line_sep>AddOperator("DataBinning")<line_sep>datts=DataBinningAttributes()<line_sep>datts.numDimensions=datts.Two<line_sep>datts.dim1BinBasedOn=datts.X<line_sep>datts.dim1SpecifyRange=0<line_sep>datts.dim1NumBins=10<line_sep>datts.dim2BinBasedOn=datts.Y<line_sep>datts.dim2SpecifyRange=0<line_sep>datts.dim2NumBins=10<line_sep>datts.outOfBoundsBehavior=datts.Clamp<line_sep>datts.reductionOperator=datts.Sum<line_sep>datts.varForReduction="mass"<line_sep>datts.emptyVal=0<line_sep>datts.outputType=datts.OutputOnBins<line_sep>SetOperatorOptions(datts)<line_sep>DrawPlots()<line_sep># we have to export b/c we can't query the
# result of the operated created expr ...
ofname="dbin_mass_sum_using_coords"<line_sep>eatts=ExportDBAttributes()<line_sep>eatts.db_type="VTK"<line_sep>eatts.filename=ofname<line_sep>ExportDatabase(eatts)<line_sep>DeleteAllPlots()<line_sep>dbin_varname="%s_%s_%dd"%("mass" "sum" 2)<line_sep>ofname<augadd>".vtk"<line_sep>orig_vtk_var="SCALARS %s float"%"operators/DataBinning"<line_sep>ddf_vtk_var="SCALARS %s float"%dbin_varname<line_sep>data=open(ofname).read()<line_sep>f=open(ofname "w")<line_sep>data=data.replace(orig_vtk_var ddf_vtk_var)<line_sep>f.write(data)<line_sep>f.close()<line_sep>OpenDatabase(ofname)<line_sep>AddPlot("Pseudocolor" "mass_sum_2d")<line_sep>DrawPlots()<line_sep>Test("ddf_vs_dbinning_dbin_coords_result")<line_sep>res=query("Variable Sum")<line_sep>DeleteAllPlots()<line_sep>CloseDatabase(ofname)<line_sep><return>res<block_end><def_stmt>test_dbinning_using_coords_exprs <block_start>setup_plot()<line_sep>AddOperator("DataBinning")<line_sep>datts=DataBinningAttributes()<line_sep>datts.numDimensions=datts.Two<line_sep>datts.dim1BinBasedOn=datts.Variable<line_sep>datts.dim1Var="mesh_x_zonal"<line_sep>datts.dim1SpecifyRange=0<line_sep>datts.dim1NumBins=10<line_sep>datts.dim2BinBasedOn=datts.Variable<line_sep>datts.dim2Var="mesh_y_zonal"<line_sep>datts.dim2SpecifyRange=0<line_sep>datts.dim2NumBins=10<line_sep>datts.outOfBoundsBehavior=datts.Clamp<line_sep>datts.reductionOperator=datts.Sum<line_sep>datts.varForReduction="mass"<line_sep>datts.emptyVal=0<line_sep>datts.outputType=datts.OutputOnBins<line_sep>SetOperatorOptions(datts)<line_sep>DrawPlots()<line_sep># we have to export b/c we can't query the
# result of the operated created expr ...
ofname="dbin_mass_sum_using_coords_exprs"<line_sep>eatts=ExportDBAttributes()<line_sep>eatts.db_type="VTK"<line_sep>eatts.filename=ofname<line_sep>ExportDatabase(eatts)<line_sep>DeleteAllPlots()<line_sep>dbin_varname="%s_%s_%dd"%("mass" "sum" 2)<line_sep>ofname<augadd>".vtk"<line_sep>orig_vtk_var="SCALARS %s float"%"operators/DataBinning"<line_sep>ddf_vtk_var="SCALARS %s float"%dbin_varname<line_sep>data=open(ofname).read()<line_sep>f=open(ofname "w")<line_sep>data=data.replace(orig_vtk_var ddf_vtk_var)<line_sep>f.write(data)<line_sep>f.close()<line_sep>OpenDatabase(ofname)<line_sep>AddPlot("Pseudocolor" "mass_sum_2d")<line_sep>DrawPlots()<line_sep>Test("ddf_vs_dbinning_dbin_coords_exprs_result")<line_sep>res=query("Variable Sum")<line_sep>DeleteAllPlots()<line_sep>CloseDatabase(ofname)<line_sep><return>res<block_end><def_stmt>test_ddf <block_start>setup_plot()<line_sep>ddf_opts={"name":"ddf_mass_sum" "op":"sum" "codomain":"mass" "varnames":("mesh_x_zonal" "mesh_y_zonal") "ranges":(0 1 0 1) "samples":(10 10)}<line_sep>ddf(ddf_opts)<line_sep>DeleteAllPlots()<line_sep>OpenDatabase("ddf_mass_sum.vtk")<line_sep>AddPlot("Pseudocolor" "mass_sum_2d")<line_sep>DrawPlots()<line_sep>Test("ddf_vs_dbinning_ddf_result")<line_sep>res=query("Variable Sum")<line_sep>DeleteAllPlots()<line_sep>CloseDatabase("ddf_mass_sum.vtk")<line_sep><return>res<block_end>orig_val=test_orig_mass()<line_sep>ddf_val=test_ddf()<line_sep>dbin_coords_val=test_dbinning_using_coords()<line_sep>dbin_cexprs_val=test_dbinning_using_coords_exprs()<line_sep>TestText("Orig" "Mass Sum = %s"%orig_val)<line_sep>TestText("DDF" "Mass Sum = %s"%ddf_val)<line_sep>TestText("DBIN with Coords" "Mass Sum = %s"%dbin_coords_val)<line_sep>TestText("DBIN with Coords Exprs" "Mass Sum = %s"%dbin_cexprs_val)<line_sep>TestValueLT("Orig Equals DDF" abs(orig_val-ddf_val) 1e-4)<line_sep>TestValueLT("Orig Equals DBIN with Coords" abs(orig_val-dbin_coords_val) 1e-4)<line_sep>TestValueLT("Orig Equals DBIN with Coords Exprs" abs(orig_val-dbin_cexprs_val) 1e-4)<line_sep>Exit()<line_sep> |
# Author - Abhinand --> https://github.com/abhinand5
# =====================================================================
# IMPORTS
# ======================================================================
<import_stmt>torch<import_stmt>torchvision<import_from_stmt>torchvision.models.detection.faster_rcnn FastRCNNPredictor<import_stmt>pkbar<line_sep># =====================================================================
# PyTorch class for the model
# ======================================================================
<class_stmt>FaceMaskDetector(object)<block_start><def_stmt>__init__ self data_loader device pretrained=<true><block_start>self.pretrained=pretrained<line_sep>self.data_loader=data_loader<line_sep>self.device=device<block_end><def_stmt>build_model self n_classes<block_start>model=torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=self.pretrained)<line_sep>in_features=model.roi_heads.box_predictor.cls_score.in_features<line_sep>model.roi_heads.box_predictor=FastRCNNPredictor(in_features n_classes+1)<line_sep>self.model=model<block_end><def_stmt>train self n_epochs learning_rate<block_start>dl_len=len(self.data_loader)<line_sep>self.model.to(self.device)<line_sep>params=[p<for>p self.model.parameters()<if>p.requires_grad]<line_sep>optimizer=torch.optim.SGD(params lr=learning_rate momentum=0.9 weight_decay=0.0005)<line_sep>losses_per_ep=[]<for_stmt>epoch range(n_epochs)<block_start>self.model.train()<line_sep>ep_loss=0<line_sep>kbar=pkbar.Kbar(target=dl_len epoch=epoch num_epochs=n_epochs width=20 always_stateful=<true> )<for_stmt>i,(images annotations) enumerate(self.data_loader)<block_start>images=list(image.to(self.device)<for>image images)<line_sep>annotations=[{k:v.to(self.device)<for>k,v t.items()}<for>t annotations]<line_sep>losses=self.model([images[0]] [annotations[0]])<line_sep>loss=sum(loss<for>loss losses.values())<line_sep>optimizer.zero_grad()<line_sep>loss.backward()<line_sep>optimizer.step()<line_sep>ep_loss<augadd>loss.item()<line_sep>kbar.update(i values=[("loss" ep_loss)])<block_end>losses_per_ep.append(ep_loss)<line_sep>kbar.add(1)<block_end><return>losses_per_ep<block_end><def_stmt>predict self images<block_start>self.model.to(self.device)<line_sep>self.model.eval()<line_sep>preds=self.model(images)<line_sep><return>preds<block_end><def_stmt>save_model self path<block_start>torch.save(self.model.state_dict() path)<block_end><def_stmt>load_model self path<block_start>self.model.load_state_dict(torch.load(path))<block_end><block_end> |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_stmt>threading<import_from_stmt>mesos.interface mesos_pb2<import_from_stmt>twitter.common.metrics LambdaGauge<import_from_stmt>apache.aurora.executor.common.status_checker StatusChecker StatusCheckerProvider StatusResult <import_from_stmt>apache.aurora.executor.common.task_info mesos_task_instance_from_assigned_task<import_from_stmt>apache.thermos.monitoring.monitor TaskMonitor<import_from_stmt>apache.thermos.monitoring.resource TaskResourceMonitor<class_stmt>ResourceManager(StatusChecker)<block_start>""" Manage resources consumed by a Task """<def_stmt>__init__ self resources resource_monitor<block_start>"""
resources: Resources object specifying cpu, ram, disk limits for the task
resource_monitor: The ResourceMonitor to monitor resources
"""<line_sep>self._resource_monitor=resource_monitor<line_sep># TODO(wickman) Remove cpu/ram reporting if MESOS-1458 is resolved.
self._max_cpu=resources.cpu().get()<line_sep>self._max_ram=resources.ram().get()<line_sep>self._max_disk=resources.disk().get()<line_sep>self._kill_reason=<none><line_sep>self._kill_event=threading.Event()<block_end>@property<def_stmt>_num_procs self<block_start>""" Total number of processes the task consists of (including child processes) """<line_sep><return>self._resource_monitor.sample()[1].num_procs<block_end>@property<def_stmt>_ps_sample self<block_start>""" ProcessSample representing the aggregate resource consumption of the Task's processes """<line_sep><return>self._resource_monitor.sample()[1].process_sample<block_end>@property<def_stmt>_disk_sample self<block_start>""" Integer in bytes representing the disk consumption in the Task's sandbox """<line_sep><return>self._resource_monitor.sample()[1].disk_usage<block_end>@property<def_stmt>status self<block_start>sample=self._disk_sample<if_stmt>sample<g>self._max_disk<block_start>self._kill_event.set()<line_sep><return>StatusResult('Disk limit exceeded. Reserved %s bytes vs used %s bytes.'%(self._max_disk sample) mesos_pb2.TASK_FAILED)<block_end><block_end><def_stmt>name self<block_start><return>'resource_manager'<block_end><def_stmt>register_metrics self<block_start>self.metrics.register(LambdaGauge('disk_used' <lambda>:self._disk_sample))<line_sep>self.metrics.register(LambdaGauge('disk_reserved' <lambda>:self._max_disk))<line_sep>self.metrics.register(LambdaGauge('disk_percent' <lambda>:1.0<times>self._disk_sample/self._max_disk))<line_sep>self.metrics.register(LambdaGauge('cpu_used' <lambda>:self._ps_sample.rate))<line_sep>self.metrics.register(LambdaGauge('cpu_reserved' <lambda>:self._max_cpu))<line_sep>self.metrics.register(LambdaGauge('cpu_percent' <lambda>:1.0<times>self._ps_sample.rate/self._max_cpu))<line_sep>self.metrics.register(LambdaGauge('ram_used' <lambda>:self._ps_sample.rss))<line_sep>self.metrics.register(LambdaGauge('ram_reserved' <lambda>:self._max_ram))<line_sep>self.metrics.register(LambdaGauge('ram_percent' <lambda>:1.0<times>self._ps_sample.rss/self._max_ram))<block_end><def_stmt>start self<block_start>super(ResourceManager self).start()<line_sep>self.register_metrics()<line_sep>self._resource_monitor.start()<block_end><block_end><class_stmt>ResourceManagerProvider(StatusCheckerProvider)<block_start><def_stmt>__init__ self checkpoint_root **resource_monitor_options<block_start>self._checkpoint_root=checkpoint_root<line_sep>self._resource_monitor_options=resource_monitor_options<block_end><def_stmt>from_assigned_task self assigned_task sandbox<block_start>task_id=assigned_task.taskId<line_sep>resources=mesos_task_instance_from_assigned_task(assigned_task).task().resources()<line_sep>task_monitor=TaskMonitor(self._checkpoint_root task_id)<line_sep>resource_monitor=TaskResourceMonitor(task_id task_monitor **self._resource_monitor_options)<line_sep><return>ResourceManager(resources resource_monitor)<block_end><block_end> |
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-MediaFoundation-Performance-Core
GUID : b20e65ac-c905-4014-8f78-1b6a508142eb
"""<import_from_stmt>construct Int8sl Int8ul Int16ul Int16sl Int32sl Int32ul Int64sl Int64ul Bytes Double Float32l Struct<import_from_stmt>etl.utils WString CString SystemTime Guid<import_from_stmt>etl.dtyp Sid<import_from_stmt>etl.parsers.etw.core Etw declare guid<line_sep>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=1 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_1_0(Etw)<block_start>pattern=Struct("object"/Int64ul "WorkQueueId"/Int64ul "IsMultithread"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=2 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_2_0(Etw)<block_start>pattern=Struct("object"/Int64ul "m_u32StreamingPeriodMS"/Int32ul "m_u32RenderBufferSizeInFrames"/Int32ul "m_ui64ClockTicksPerSecond"/Int64ul "m_u32AudioClientType"/Int32ul "IsEventDriven"/Int8ul "FillSilenceWhenStarving"/Int8ul "FillCompressedSilenceWhenStarving"/Int8ul "DropLateData"/Int8ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=3 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_3_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=4 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_4_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=5 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_5_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=6 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_6_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=7 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_7_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=8 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_8_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=9 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_9_0(Etw)<block_start>pattern=Struct("object"/Int64ul "SystemTime"/Int64sl "mfsState"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=10 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_10_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=11 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_11_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=12 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_12_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul "pullPlayPosition"/Int64sl "pullRawPlayPosition"/Int64sl "pullRawWritePosition"/Int64sl "pullDevicePosition"/Int64sl "phnsCorrelatedTime"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=13 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_13_0(Etw)<block_start>pattern=Struct("object"/Int64ul "dwBytesWanted"/Int32ul "u32FramesToRender"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=14 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_14_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=15 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_15_0(Etw)<block_start>pattern=Struct("object"/Int64ul "dwBytesWanted"/Int32ul "m_bEOSReceived"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=16 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_16_0(Etw)<block_start>pattern=Struct("object"/Int64ul "dwBytesWanted"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=17 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_17_0(Etw)<block_start>pattern=Struct("object"/Int64ul "ullEOSPosition"/Int64sl "m_bIsEventDriven"/Int8ul "IsOffloadedStream"/Int8ul "IsOffloadedCompressedStream"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=18 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_18_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=19 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_19_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=20 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_20_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=21 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_21_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=22 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_22_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=23 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_23_0(Etw)<block_start>pattern=Struct("object"/Int64ul "bFirstFill"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=24 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_24_0(Etw)<block_start>pattern=Struct("object"/Int64ul "u32CurrentPadding"/Int32ul "u32FramesToRender"/Int32ul "u32TimeLeft"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=25 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_25_0(Etw)<block_start>pattern=Struct("object"/Int64ul "FrameCount"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=26 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_26_0(Etw)<block_start>pattern=Struct("object"/Int64ul "BytesInUse"/Int32ul "dwBytesWanted"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=27 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_27_0(Etw)<block_start>pattern=Struct("object"/Int64ul "dwBytesStillWanted"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=28 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_28_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=29 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_29_0(Etw)<block_start>pattern=Struct("object"/Int64ul "bEOS"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=30 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_30_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=31 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_31_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hr"/Int32sl "fInserted"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=32 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_32_0(Etw)<block_start>pattern=Struct("object"/Int64ul "fFlushed"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=33 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_33_0(Etw)<block_start>pattern=Struct("object"/Int64ul "bEngineStarted"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=34 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_34_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=35 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_35_0(Etw)<block_start>pattern=Struct("object"/Int64ul "bReset"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=36 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_36_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=37 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_37_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=38 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_38_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=39 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_39_0(Etw)<block_start>pattern=Struct("object"/Int64ul "bEngineStarted"/Int8ul "bIsEventDriven"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=40 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_40_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=41 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_41_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=42 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_42_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=43 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_43_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=44 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_44_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=45 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_45_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=46 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_46_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=47 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_47_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=48 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_48_0(Etw)<block_start>pattern=Struct("object"/Int64ul "fFillBuffer"/Int8ul "bIsEventDriven"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=49 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_49_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=50 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_50_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=51 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_51_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=52 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_52_0(Etw)<block_start>pattern=Struct("object"/Int64ul "DisconnectReason"/Int32ul "bReacquire"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=53 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_53_0(Etw)<block_start>pattern=Struct("object"/Int64ul "dwFlags"/Int32ul "ui32EndpointRole"/Int32ul "eCategory"/Int32ul "bIsLowLatency"/Int8ul "bBufferDurationSpecified"/Int8ul "hnsBufferDuration"/Int64sl "bOnlyAudio"/Int8ul "bDisableOffload"/Int8ul "bNonSeekableStream"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=54 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_54_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=55 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_55_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=56 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_56_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=57 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_57_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=58 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_58_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hnsSampleTime"/Int64sl "hnsSampleDuration"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=59 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_59_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=60 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_60_0(Etw)<block_start>pattern=Struct("object"/Int64ul "mfRenderTime"/Int64sl "fDiscontinuity"/Int8ul "mfAudioState"/Int32ul "IsRateZero"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=61 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_61_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hnsSampleTime"/Int64sl "hnsSampleDuration"/Int64sl "bPrerollSample"/Int8ul "bDelayedSample"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=62 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_62_0(Etw)<block_start>pattern=Struct("object"/Int64ul "scenario"/Int32ul "fSignalPrerolled"/Int8ul "m_cSamplesPrerolled"/Int32ul "m_hnsPrerollDuration"/Int64sl "m_u32CurrentPrerolledBytes"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=63 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_63_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hnsSampleDuration"/Int64sl "m_hnsShortSampleTolerance"/Int64sl "m_cMaxPendingRequestSample"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=64 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_64_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=65 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_65_0(Etw)<block_start>pattern=Struct("object"/Int64ul "MarkerType"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=66 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_66_0(Etw)<block_start>pattern=Struct("object"/Int64ul "ControlPoint"/Int32sl "Type"/Int32sl "Value"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=67 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_67_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=68 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_68_0(Etw)<block_start>pattern=Struct("object"/Int64ul "bFlushPreroll"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=69 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_69_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32sl "fDiscontinuity"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=70 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_70_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=71 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_71_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=72 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_72_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=73 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_73_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=74 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_74_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=75 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_75_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=76 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_76_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=77 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_77_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=78 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_78_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=79 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_79_0(Etw)<block_start>pattern=Struct("object"/Int64ul "IsUninitialized"/Int8ul "bInvalidatingStream"/Int8ul "mfaOriginalState"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=80 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_80_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=81 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_81_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=82 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_82_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32sl "phnsTimeNow"/Int64sl "phnsCorrelatedTime"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=83 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_83_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=84 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_84_0(Etw)<block_start>pattern=Struct("object"/Int64ul "phnsTimeNow"/Int64sl "m_mftMaxTimePriorToStreamSwitch"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=85 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_85_0(Etw)<block_start>pattern=Struct("object"/Int64ul "phnsTimeNow"/Int64sl "phnsCorrelatedTime"/Int64sl "m_bInvalidatingStream"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=86 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_86_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=87 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_87_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hnsSystemTime"/Int64sl "llClockStartOffset"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=88 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_88_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=89 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_89_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=90 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_90_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=91 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_91_0(Etw)<block_start>pattern=Struct("object"/Int64ul "StartOffset"/Int64sl "mftStartOffset"/Int64sl "bResetGapAndStallHandling"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=92 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_92_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32sl "StartOffset"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=93 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_93_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=94 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_94_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=95 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_95_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hnsSystemTime"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=96 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_96_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=97 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_97_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hnsSystemTime"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=98 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_98_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=99 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_99_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hnsSystemTime"/Int64sl "IsRateZero"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=100 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_100_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=101 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_101_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=102 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_102_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=103 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_103_0(Etw)<block_start>pattern=Struct("object"/Int64ul "dwFlags"/Int32sl "bUseResampler"/Int8ul "bClockRateMatchEnabled"/Int8ul "bUseLightWeightConverters"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=104 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_104_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32sl "m_bIsOffloadStream"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=105 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_105_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=106 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_106_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32sl "pullBytePosition"/Int64sl "phnsCorrelatedTime"/Int64sl "m_bIsCompressedStream"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=107 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_107_0(Etw)<block_start>pattern=Struct("object"/Int64ul "dwSamples"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=108 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_108_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=109 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_109_0(Etw)<block_start>pattern=Struct("object"/Int64ul "MarkerType"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=110 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_110_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=111 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_111_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=112 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_112_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=113 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_113_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=114 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_114_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=115 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_115_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=116 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_116_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=117 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_117_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=118 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_118_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=119 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_119_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=120 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_120_0(Etw)<block_start>pattern=Struct("object"/Int64ul "mfRenderTime"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=121 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_121_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32sl "pfDiscontinuity"/Int8ul "pullRenderBytePosition"/Int64sl "pdwBytesToStall"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=122 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_122_0(Etw)<block_start>pattern=Struct("object"/Int64ul "pfDiscontinuity"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=123 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_123_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32sl "pullRenderBytePosition"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=124 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_124_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=125 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_125_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=126 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_126_0(Etw)<block_start>pattern=Struct("object"/Int64ul "bConvertToMFPos"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=127 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_127_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32sl "pullPlayPosition"/Int64sl "pullWritePosition"/Int64sl "pullDevicePlayPosition"/Int64sl "phnsCorrelatedTime"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=128 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_128_0(Etw)<block_start>pattern=Struct("object"/Int64ul "mftTrimAmount"/Int64sl "mftCutoff"/Int64sl "bTrimFromFront"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=129 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_129_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=130 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_130_0(Etw)<block_start>pattern=Struct("object"/Int64ul "bEnable"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=131 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_131_0(Etw)<block_start>pattern=Struct("object"/Int64ul "cPendingRequestSample"/Int32sl "cMaxPendingRequestSample"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=132 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_132_0(Etw)<block_start>pattern=Struct("object"/Int64ul "cPendingRequestSample"/Int32sl "cMaxPendingRequestSample"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=133 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_133_0(Etw)<block_start>pattern=Struct("object"/Int64ul "cPendingRequestSample"/Int32sl "cMaxPendingRequestSample"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=134 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_134_0(Etw)<block_start>pattern=Struct("object"/Int64ul "cPendingRequestSample"/Int32sl "cMaxPendingRequestSample"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=135 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_135_0(Etw)<block_start>pattern=Struct("object"/Int64ul "cMaxPendingRequestSample"/Int32sl "NumContainers"/Int32sl "BytesInUse"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=136 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_136_0(Etw)<block_start>pattern=Struct("object"/Int64ul "MaxPendingRequestSample"/Int32sl "NumContainers"/Int32sl "DurationInUse"/Int64sl "hnsMinAllocation"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=137 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_137_0(Etw)<block_start>pattern=Struct("object"/Int64ul "cMaxPendingRequestSample"/Int32sl "NumContainers"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=138 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_138_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=139 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_139_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=140 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_140_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=141 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_141_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=142 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_142_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=143 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_143_0(Etw)<block_start>pattern=Struct("object"/Int64ul "TimeOutinMm"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=144 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_144_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=145 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_145_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=146 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_146_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=147 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_147_0(Etw)<block_start>pattern=Struct("object"/Int64ul "bNeedFormatNegotiation"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=148 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_148_0(Etw)<block_start>pattern=Struct("object"/Int64ul "mfaOriginalStreamState"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=149 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_149_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=150 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_150_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=151 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_151_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hnsStreamInvalidationEventTime"/Int64sl "hnsLastCorrelatedTime"/Int64sl "hnsTimeElapsed"/Int64sl "hnsLastTime"/Int64sl "hnsNewLastTime"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=152 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_152_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=153 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_153_0(Etw)<block_start>pattern=Struct("object"/Int64ul "bNeedFormatNegotiation"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=154 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_154_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=155 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_155_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=156 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_156_0(Etw)<block_start>pattern=Struct("object"/Int64ul "bDeviceChange"/Int8ul "hnsNewStreamStartTime"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=157 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_157_0(Etw)<block_start>pattern=Struct("object"/Int64ul "bStopped"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=158 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_158_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=159 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_159_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=160 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_160_0(Etw)<block_start>pattern=Struct("object"/Int64ul "eventType"/Int32sl "bReacquireDevice"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=161 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_161_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=162 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_162_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=163 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_163_0(Etw)<block_start>pattern=Struct("object"/Int64ul "eventType"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=164 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_164_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=165 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_165_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=166 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_166_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=167 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_167_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=168 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_168_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=169 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_169_0(Etw)<block_start>pattern=Struct("object"/Int64ul "dwNewState"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=170 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_170_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=171 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_171_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=172 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_172_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=173 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_173_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=174 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_174_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=175 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_175_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=176 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_176_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=177 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_177_0(Etw)<block_start>pattern=Struct("object"/Int64ul "MFTimeOfLastRenderSample"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=178 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_178_0(Etw)<block_start>pattern=Struct("object"/Int64ul "u32FramesToRender"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=179 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_179_0(Etw)<block_start>pattern=Struct("object"/Int64ul "ClockTime"/Int64sl "CorrelatedTime"/Int64sl "IsStreamInvalidating"/Int8ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=180 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_180_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=181 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_181_0(Etw)<block_start>pattern=Struct("object"/Int64ul "m_pCurrentMediaType"/Int64sl "IsStreamInvalidating"/Int8ul "hrAEFormatQuery"/Int32ul "hFormatResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=182 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_182_0(Etw)<block_start>pattern=Struct("object"/Int64ul "wFormatTag"/Int32ul "nChannels"/Int16ul "nSamplesPerSec"/Int32ul "nAvgBytesPerSec"/Int32ul "nBlockAlign"/Int16ul "wBitsPerSample"/Int16ul "cbSize"/Int16ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=183 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_183_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=184 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_184_0(Etw)<block_start>pattern=Struct("object"/Int64ul "clientType"/Int32ul "hFormatResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=185 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_185_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=186 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_186_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=187 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_187_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=188 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_188_0(Etw)<block_start>pattern=Struct("object"/Int64ul "clientType"/Int32ul "hFormatResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=189 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_189_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=190 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_190_0(Etw)<block_start>pattern=Struct("object"/Int64ul "bufferDuration"/Int64sl "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=191 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_191_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=192 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_192_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=193 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_193_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=194 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_194_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=195 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_195_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=196 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_196_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=197 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_197_0(Etw)<block_start>pattern=Struct("object"/Int64ul "Key"/Int64ul "Delay"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=198 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_198_0(Etw)<block_start>pattern=Struct("object"/Int64ul "Key"/Int64ul "Delay"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=199 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_199_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=200 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_200_0(Etw)<block_start>pattern=Struct("object"/Int64ul "ClockTime0_us"/Int64sl "QPC0_us"/Int64sl "SmoothedQPC0_us"/Int64sl "QPCDelta_us"/Int64sl "WindowCount"/Int32sl "WindowWidth_us"/Int64sl "Accepted"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=500 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_500_0(Etw)<block_start>pattern=Struct("object"/Int64ul "u32FramesRead"/Int32ul "m_u64LastSampleTime"/Int64ul "u64Duration"/Int64ul "dwFlagsForSample"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=501 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_501_0(Etw)<block_start>pattern=Struct("object"/Int64ul "m_pParent"/Int64ul "eventType"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=502 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_502_0(Etw)<block_start>pattern=Struct("object"/Int64ul "m_bAudioProcessingRaw"/Int8ul "m_bIsEventDriven"/Int8ul "m_bIsLowLatency"/Int8ul "m_hnsBufferDuration"/Int64sl "m_uiAudioCategory"/Int32ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=503 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_503_0(Etw)<block_start>pattern=Struct("object"/Int64ul "m_spAudioSessionControl"/Int64ul "m_spAudioSessionEvents"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=504 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_504_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=505 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_505_0(Etw)<block_start>pattern=Struct("object"/Int64ul "wstrEndpointId"/WString "role"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=506 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_506_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=507 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_507_0(Etw)<block_start>pattern=Struct("object"/Int64ul "wstrEndpointId"/WString "role"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=508 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_508_0(Etw)<block_start>pattern=Struct("object"/Int64ul "uFailedLineNumber"/Int32ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=509 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_509_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=510 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_510_0(Etw)<block_start>pattern=Struct("object"/Int64ul "AudioClientProperties_bIsOffload"/Int8ul "AudioClientProperties_eCategory"/Int32ul "AudioClientProperties_Options"/Int32ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=511 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_511_0(Etw)<block_start>pattern=Struct("object"/Int64ul "m_spAudioClientForStreaming"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=512 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_512_0(Etw)<block_start>pattern=Struct("object"/Int64ul "m_u32BytesPerFrame"/Int32ul "m_u32FramesPerSecond"/Int32ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=513 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_513_0(Etw)<block_start>pattern=Struct("object"/Int64ul "bFirstRead"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=514 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_514_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=515 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_515_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=516 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_516_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=517 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_517_0(Etw)<block_start>pattern=Struct("object"/Int64ul "m_bEngineStarted"/Int8ul "m_bIsEventDriven"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=518 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_518_0(Etw)<block_start>pattern=Struct("object"/Int64ul "m_bEngineStarted"/Int8ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=519 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_519_0(Etw)<block_start>pattern=Struct("object"/Int64ul "bReset"/Int8ul "m_bEngineStarted"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=520 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_520_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=521 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_521_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=522 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_522_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=523 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_523_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=524 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_524_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=525 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_525_0(Etw)<block_start>pattern=Struct("object"/Int64ul "m_bEngineStarted"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=526 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_526_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=527 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_527_0(Etw)<block_start>pattern=Struct("object"/Int64ul "DisconnectReason"/Int32sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=528 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_528_0(Etw)<block_start>pattern=Struct("object"/Int64ul "u32FramesRead"/Int32ul "u32ActualFramesInCurrentPacket"/Int32ul "dwFlags"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=529 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_529_0(Etw)<block_start>pattern=Struct("object"/Int64ul "fLevel"/Float32l)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=530 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_530_0(Etw)<block_start>pattern=Struct("object"/Int64ul "bMute"/Int8ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=531 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_531_0(Etw)<block_start>pattern=Struct("object"/Int64ul "pParentObj"/Int64ul "dwWorkQueueId"/Int32ul "lWorkQueuePriority"/Int32sl "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=532 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_532_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=533 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_533_0(Etw)<block_start>pattern=Struct("object"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=534 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_534_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=535 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_535_0(Etw)<block_start>pattern=Struct("object"/Int64ul "m_u32BufferFrameCount"/Int32ul "m_u32BytesPerFrame"/Int32ul "m_u32FramesPerSecond"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=536 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_536_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=537 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_537_0(Etw)<block_start>pattern=Struct("object"/Int64ul "wFormatTag"/Int32ul "nChannels"/Int16ul "nSamplesPerSec"/Int32ul "nAvgBytesPerSec"/Int32ul "nBlockAlign"/Int16ul "wBitsPerSample"/Int16ul "cbSize"/Int16ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=538 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_538_0(Etw)<block_start>pattern=Struct("object"/Int64ul "wFormatTag"/Int32ul "nChannels"/Int16ul "nSamplesPerSec"/Int32ul "nAvgBytesPerSec"/Int32ul "nBlockAlign"/Int16ul "wBitsPerSample"/Int16ul "cbSize"/Int16ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=539 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_539_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=540 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_540_0(Etw)<block_start>pattern=Struct("object"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=541 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_541_0(Etw)<block_start>pattern=Struct("object"/Int64ul "m_ReadySampleCount"/Int32ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=542 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_542_0(Etw)<block_start>pattern=Struct("object"/Int64ul "m_ReadySampleCount"/Int32ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=543 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_543_0(Etw)<block_start>pattern=Struct("object"/Int64ul "pMediaType"/Int64ul "m_spCurrentMediaType"/Int64ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=544 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_544_0(Etw)<block_start>pattern=Struct("object"/Int64ul "m_spCurrentMediaType"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=545 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_545_0(Etw)<block_start>pattern=Struct("object"/Int64ul "wFormatTag"/Int32ul "nChannels"/Int16ul "nSamplesPerSec"/Int32ul "nAvgBytesPerSec"/Int32ul "nBlockAlign"/Int16ul "wBitsPerSample"/Int16ul "cbSize"/Int16ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=546 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_546_0(Etw)<block_start>pattern=Struct("object"/Int64ul "guidService"/Guid "riid"/Guid "pvObject"/Int64ul "hResult"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=547 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_547_0(Etw)<block_start>pattern=Struct("object"/Int64ul "u32CurrentPadding"/Int32ul "ulSamples"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=600 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_600_0(Etw)<block_start>pattern=Struct("object"/Int64ul "State"/Int32sl "ClockOffset"/Int64sl "QPC"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=650 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_650_0(Etw)<block_start>pattern=Struct("object"/Int64ul "SrcObject"/Int64ul "SamplesReceived"/Int32sl "LateSamples"/Int64sl "TotalLateTime_ms"/Int64sl "SampleLatency_hns"/Int64sl "SampleTime_hns"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=651 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_651_0(Etw)<block_start>pattern=Struct("object"/Int64ul "Node"/Int64ul "OutputIndex"/Int32sl "WorkQueueID"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=652 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_652_0(Etw)<block_start>pattern=Struct("object"/Int64ul "Node"/Int64ul "OutputIndex"/Int32sl "WorkQueueID"/Int32ul)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=700 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_700_0(Etw)<block_start>pattern=Struct("object"/Int64ul "Stream"/Int32sl "SamplePtr"/Int64ul "TimeStamp"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=701 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_701_0(Etw)<block_start>pattern=Struct("object"/Int64ul "Stream"/Int32sl "ES_Stream"/Int32sl "TimeStamp"/Int64sl "PackSize"/Int32sl "LastPCR"/Int64sl)<block_end>@declare(guid=guid("b20e65ac-c905-4014-8f78-1b6a508142eb") event_id=702 version=0)<class_stmt>Microsoft_Windows_MediaFoundation_Performance_Core_702_0(Etw)<block_start>pattern=Struct("object"/Int64ul "PCR"/Int64sl)<block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.